diff --git a/meta/runtime.yml b/meta/runtime.yml index a816aba1..6b7c4978 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -39,3 +39,49 @@ action_groups: - cm - cluster_info - cluster + service: + - metadata: + extend_group: + - cm + - service_info + - service + role: + - metadata: + extend_group: + - cm + - service_role_info + - service_role + role_config_group: + - metadata: + extend_group: + - cm + - service_role_config_group_info + - service_role_config_group + host: + - metadata: + extend_group: + - cm + - host_info + - host + host_template: + - metadata: + extend_group: + - cm + - host_template_info + - host_template + parcel: + - metadata: + extend_group: + - cm + - parcel_info + - parcel + deployment: + - metadata: + extend_group: + - cluster + - service + - role + - role_config_group + - host + - host_template + - parcel diff --git a/plugins/module_utils/cluster_utils.py b/plugins/module_utils/cluster_utils.py index e11512c2..f3f90caa 100644 --- a/plugins/module_utils/cluster_utils.py +++ b/plugins/module_utils/cluster_utils.py @@ -51,6 +51,7 @@ def parse_cluster_result(cluster: ApiCluster) -> dict: return output +# TODO Convert to use cluster_name vs the ApiCluster object for broader usage in pytest fixtures def get_cluster_hosts(api_client: ApiClient, cluster: ApiCluster) -> list[ApiHost]: return ( ClustersResourceApi(api_client) diff --git a/plugins/module_utils/role_config_group_utils.py b/plugins/module_utils/role_config_group_utils.py index 99471f7f..d0213cd6 100644 --- a/plugins/module_utils/role_config_group_utils.py +++ b/plugins/module_utils/role_config_group_utils.py @@ -14,12 +14,20 @@ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( normalize_output, + ConfigListUpdates, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + InvalidRoleTypeException, ) from cm_client import ( ApiClient, + ApiConfig, + ApiConfigList, ApiRoleConfigGroup, + ApiRoleConfigGroupList, RoleConfigGroupsResourceApi, + ServicesResourceApi, MgmtRoleConfigGroupsResourceApi, ) @@ -41,6 +49,7 @@ def parse_role_config_group_result(role_config_group: ApiRoleConfigGroup) -> dic - base (bool) - display_name (str) - config (dict) + - service_name (str) Args: role_config_group (ApiRoleConfigGroup): Role Config Group @@ -55,23 +64,105 @@ def parse_role_config_group_result(role_config_group: ApiRoleConfigGroup) -> dic return output -def get_base_role_config_group( - api_client: ApiClient, cluster_name: str, service_name: str, role_type: str +def create_role_config_group( + api_client: ApiClient, + cluster_name: str, + service_name: str, + name: str, + role_type: str, + display_name: str = None, + config: dict = None, ) -> ApiRoleConfigGroup: - rcg_api = RoleConfigGroupsResourceApi(api_client) - return next( - iter( - [ - r - for r in rcg_api.read_role_config_groups( - cluster_name, service_name - ).items - if r.role_type == role_type and r.base - ] - ), - None, + if ( + role_type.upper() + not in ServicesResourceApi(api_client) + .list_role_types( + cluster_name=cluster_name, + service_name=service_name, + ) + .items + ): + raise InvalidRoleTypeException( + f"Invalid role type '{role_type}' for service '{service_name}'" + ) + + role_config_group = ApiRoleConfigGroup( + name=name, + role_type=role_type.upper(), ) + if display_name: + role_config_group.display_name = display_name + + if config: + role_config_group.config = ApiConfigList( + items=[ApiConfig(name=k, value=v) for k, v in config.items()] + ) + + return role_config_group + + +def provision_role_config_groups( + api_client: ApiClient, + cluster_name: str, + service_name: str, + role_config_groups: list[ApiRoleConfigGroup], +) -> ApiRoleConfigGroup: + return RoleConfigGroupsResourceApi(api_client).create_role_config_groups( + cluster_name=cluster_name, + service_name=service_name, + body=ApiRoleConfigGroupList(items=role_config_groups), + ) + + +def update_role_config_group( + role_config_group: ApiRoleConfigGroup, + display_name: str = None, + config: dict = None, + purge: bool = False, +) -> tuple[ApiRoleConfigGroup, dict, dict]: + before, after = dict(), dict() + + # Check for display name changes + if display_name is not None and display_name != role_config_group.display_name: + before.update(display_name=role_config_group.display_name) + after.update(display_name=display_name) + role_config_group.display_name = display_name + + # Reconcile configurations + if config or purge: + if config is None: + config = dict() + + updates = ConfigListUpdates(role_config_group.config, config, purge) + + if updates.changed: + before.update(config=updates.diff["before"]) + after.update(config=updates.diff["after"]) + role_config_group.config = updates.config + + return (role_config_group, before, after) + + +# TODO Normalize the return value to be a list +def get_base_role_config_group( + api_client: ApiClient, cluster_name: str, service_name: str, role_type: str = None +) -> ApiRoleConfigGroup: + base_rcg_list = [ + r + for r in RoleConfigGroupsResourceApi(api_client) + .read_role_config_groups( + cluster_name=cluster_name, + service_name=service_name, + ) + .items + if (r.base and role_type is None) or (r.base and r.role_type == role_type) + ] + if role_type is not None: + return next(iter(base_rcg_list), None) + else: + return base_rcg_list + def get_mgmt_base_role_config_group( api_client: ApiClient, role_type: str diff --git a/plugins/module_utils/role_utils.py b/plugins/module_utils/role_utils.py index 07de2872..308fa42b 100644 --- a/plugins/module_utils/role_utils.py +++ b/plugins/module_utils/role_utils.py @@ -33,6 +33,7 @@ ApiRoleConfigGroupRef, ApiRoleNameList, ApiRoleState, + ServicesResourceApi, RoleCommandsResourceApi, RoleConfigGroupsResourceApi, RolesResourceApi, @@ -47,6 +48,22 @@ class RoleException(Exception): pass +class RoleHostNotFoundException(RoleException): + pass + + +class RoleConfigGroupNotFoundException(RoleException): + pass + + +class RoleMaintenanceStateException(RoleException): + pass + + +class InvalidRoleTypeException(RoleException): + pass + + ROLE_OUTPUT = [ "commission_state", "config_staleness_status", @@ -214,14 +231,6 @@ def read_cm_roles(api_client: ApiClient) -> ApiRoleList: return ApiRoleList(items=roles) -class HostNotFoundException(RoleException): - pass - - -class RoleConfigGroupNotFoundException(RoleException): - pass - - def create_role( api_client: ApiClient, role_type: str, @@ -233,6 +242,18 @@ def create_role( role_config_group: str = None, tags: dict = None, ) -> ApiRole: + if ( + role_type.upper() + not in ServicesResourceApi(api_client) + .list_role_types( + cluster_name=cluster_name, + service_name=service_name, + ) + .items + ): + raise InvalidRoleTypeException( + f"Invalid role type '{role_type}' for service '{service_name}'" + ) # Set up the role type role = ApiRole(type=str(role_type).upper()) @@ -240,7 +261,7 @@ def create_role( # Host assignment host_ref = get_host_ref(api_client, hostname, host_id) if host_ref is None: - raise HostNotFoundException( + raise RoleHostNotFoundException( f"Host not found: hostname='{hostname}', host_id='{host_id}'" ) else: @@ -315,10 +336,6 @@ def provision_service_role( raise RoleException(str(e)) -class MaintenanceStateException(RoleException): - pass - - def toggle_role_maintenance( api_client: ApiClient, role: ApiRole, maintenance: bool, check_mode: bool ) -> bool: @@ -340,7 +357,7 @@ def toggle_role_maintenance( ) if maintenance_cmd.success is False: - raise MaintenanceStateException( + raise RoleMaintenanceStateException( f"Unable to set Maintenance mode to '{maintenance}': {maintenance_cmd.result_message}" ) diff --git a/plugins/module_utils/service_utils.py b/plugins/module_utils/service_utils.py index 1a920f68..8ca2967a 100644 --- a/plugins/module_utils/service_utils.py +++ b/plugins/module_utils/service_utils.py @@ -19,20 +19,41 @@ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( normalize_output, resolve_parameter_updates, + wait_command, + wait_commands, + ConfigListUpdates, + TagUpdates, ) from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + create_role_config_group, + get_base_role_config_group, parse_role_config_group_result, + update_role_config_group, ) from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + create_role, + read_roles, + read_roles_by_type, parse_role_result, ) from cm_client import ( ApiClient, ApiConfig, + ApiConfigList, + ApiEntityTag, + ApiHost, + ApiRole, + ApiRoleConfigGroup, + ApiRoleConfigGroupList, + ApiRoleList, + ApiRoleNameList, ApiService, ApiServiceConfig, + ApiServiceList, + ApiServiceState, ClustersResourceApi, + HostsResourceApi, MgmtServiceResourceApi, MgmtRoleConfigGroupsResourceApi, MgmtRolesResourceApi, @@ -58,6 +79,18 @@ ] +class ServiceException(Exception): + pass + + +class ServiceMaintenanceStateException(ServiceException): + pass + + +class InvalidServiceTypeException(ServiceException): + pass + + def parse_service_result(service: ApiService) -> dict: # Retrieve only the cluster_name if it exists if service.cluster_ref is not None: @@ -74,16 +107,27 @@ def parse_service_result(service: ApiService) -> dict: # Parse the role config groups via util function if service.role_config_groups is not None: + parsed_rcgs = [ + parse_role_config_group_result(rcg) for rcg in service.role_config_groups + ] output.update( + # Remove service_name from output role_config_groups=[ - parse_role_config_group_result(rcg) - for rcg in service.role_config_groups + {k: v for k, v in rcg_dict.items() if k != "service_name"} + for rcg_dict in parsed_rcgs ] ) # Parse the roles via util function if service.roles is not None: - output.update(roles=[parse_role_result(r) for r in service.roles]) + parsed_roles = [parse_role_result(r) for r in service.roles] + output.update( + # Remove service_name from output + roles=[ + {k: v for k, v in role_dict.items() if k != "service_name"} + for role_dict in parsed_roles + ] + ) return output @@ -103,7 +147,6 @@ def read_service( """ service_api = ServicesResourceApi(api_client) rcg_api = RoleConfigGroupsResourceApi(api_client) - role_api = RolesResourceApi(api_client) service = service_api.read_service( cluster_name=cluster_name, service_name=service_name @@ -116,22 +159,201 @@ def read_service( ) # Gather each role config group configuration - for rcg in service.role_config_groups: - rcg.config = rcg_api.read_config( - cluster_name=cluster_name, - service_name=service_name, - role_config_group_name=rcg.name, + service.role_config_groups = rcg_api.read_role_config_groups( + cluster_name=cluster_name, + service_name=service_name, + ).items + + # Gather each role and its config + service.roles = read_roles( + api_client=api_client, + cluster_name=cluster_name, + service_name=service_name, + ).items + + return service + + +def read_services(api_client: ApiClient, cluster_name: str) -> list[ApiService]: + """Read the cluster services and gather each services' role config group and role dependents. + + Args: + api_client (ApiClient): _description_ + cluster_name (str): _description_ + + Returns: + ApiService: _description_ + """ + service_api = ServicesResourceApi(api_client) + rcg_api = RoleConfigGroupsResourceApi(api_client) + + services = list[ApiService]() + + discovered_services = service_api.read_services( + cluster_name=cluster_name, + ).items + + for service in discovered_services: + # Gather the service-wide configuration + service.config = service_api.read_service_config( + cluster_name=cluster_name, service_name=service.name + ) + + # Gather each role config group configuration + service.role_config_groups = rcg_api.read_role_config_groups( + cluster_name=cluster_name, + service_name=service.name, + ).items + + # Gather each role and its config + service.roles = read_roles( + api_client=api_client, + cluster_name=cluster_name, + service_name=service.name, + ).items + + # Add it to the output + services.append(service) + + return services + + +def create_service_model( + api_client: ApiClient, + name: str, + type: str, + cluster_name: str, + display_name: str = None, + config: dict = None, + tags: dict = None, +) -> ApiService: + if ( + type.upper() + not in ClustersResourceApi(api_client) + .list_service_types( + cluster_name=cluster_name, + ) + .items + ): + raise InvalidServiceTypeException( + f"Invalid service type '{type}' for cluster '{cluster_name}'" + ) + + # Set up the service basics + service = ApiService(name=name, type=str(type).upper()) + + if display_name: + service.display_name = display_name + + # Service-wide configurations + if config: + service.config = ApiConfigList( + items=[ApiConfig(name=k, value=v) for k, v in config.items()] + ) + + # Tags + if tags: + service.tags = [ApiEntityTag(k, v) for k, v in tags.items()] + + return service + + +def provision_service( + api_client: ApiClient, cluster_name: str, service: ApiService +) -> ApiService: + service_api = ServicesResourceApi(api_client) + + provisioned_service = next( + ( + iter( + service_api.create_services( + cluster_name=cluster_name, + body=ApiServiceList(items=[service]), + ).items ) + ), + None, + ) - # Gather each role configuration - for role in service.roles: - role.config = role_api.read_role_config( - cluster_name=cluster_name, - service_name=service_name, - role_name=role.name, + if provisioned_service is None: + return + + # Wait for any running commands like First Run + available_cmds = service_api.list_service_commands( + cluster_name=cluster_name, + service_name=provisioned_service.name, + ) + + running_cmds = service_api.list_active_commands( + cluster_name=cluster_name, + service_name=provisioned_service.name, + ) + + try: + wait_commands(api_client=api_client, commands=running_cmds) + return provisioned_service + except Exception as e: + raise ServiceException(str(e)) + + +def toggle_service_maintenance( + api_client: ApiClient, service: ApiService, maintenance: bool, check_mode: bool +) -> bool: + service_api = ServicesResourceApi(api_client) + changed = False + + if maintenance and not service.maintenance_mode: + changed = True + cmd = service_api.enter_maintenance_mode + elif not maintenance and service.maintenance_mode: + changed = True + cmd = service_api.exit_maintenance_mode + + if not check_mode and changed: + maintenance_cmd = cmd( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + ) + + if maintenance_cmd.success is False: + raise ServiceMaintenanceStateException( + f"Unable to set Maintenance mode to '{maintenance}': {maintenance_cmd.result_message}" ) - return service + return changed + + +def toggle_service_state( + api_client: ApiClient, service: ApiService, state: str, check_mode: bool +) -> ApiServiceState: + service_api = ServicesResourceApi(api_client) + changed = None + + if state == "started" and service.service_state not in [ApiServiceState.STARTED]: + changed = ApiServiceState.STARTED + + if service.service_state == ApiServiceState.NA: + cmd = service_api.first_run + else: + cmd = service_api.start_command + elif state == "stopped" and service.service_state not in [ + ApiServiceState.STOPPED, + ApiServiceState.NA, + ]: + changed = ApiServiceState.STOPPED + cmd = service_api.stop_command + elif state == "restarted": + changed = ApiServiceState.STARTED + cmd = service_api.restart_command + + if not check_mode and changed: + exec_cmd = cmd( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + ) + wait_command(api_client=api_client, command=exec_cmd) + + return changed def read_cm_service(api_client: ApiClient) -> ApiService: @@ -190,9 +412,367 @@ def changed(self) -> bool: return bool(self.config.items) -def get_service_hosts(api_client: ApiClient, service: ApiService): - return ( - ClustersResourceApi(api_client) - .list_hosts(cluster_name=service.cluster_ref.cluster_name) +def get_service_hosts(api_client: ApiClient, service: ApiService) -> list[ApiHost]: + host_api = HostsResourceApi(api_client) + seen_hosts = dict() + + for r in ( + RolesResourceApi(api_client) + .read_roles( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + ) .items - ) + ): + if r.host_ref.hostname not in seen_hosts: + seen_hosts[r.host_ref.hostname] = host_api.read_host(r.host_ref.host_id) + + return seen_hosts.values() + + +def reconcile_service_role_config_groups( + api_client: ApiClient, + service: ApiService, + role_config_groups: list[dict], + purge: bool, + check_mode: bool, +) -> tuple[dict, dict]: + # Map the current role config groups by name and by base role type + base_rcg_map, rcg_map = dict(), dict() + for rcg in service.role_config_groups: + if rcg.base: + base_rcg_map[rcg.role_type] = rcg + else: + rcg_map[rcg.name] = rcg + + addition_list = list[ApiRoleConfigGroup]() + diff_before, diff_after = list[dict](), list[dict]() + + rcg_api = RoleConfigGroupsResourceApi(api_client) + + for incoming_rcg in role_config_groups: + incoming_name = incoming_rcg["name"] + + # If it's a custom role config group + if incoming_name is not None: + # If the custom role config group exists, update it + current_rcg = rcg_map.pop(incoming_name, None) + if current_rcg is not None: + (updated_rcg, before, after) = update_role_config_group( + role_config_group=current_rcg, + display_name=incoming_rcg["display_name"], + config=incoming_rcg["config"], + purge=purge, + ) + + if before or after: + diff_before.append(current_rcg.to_dict()) + diff_after.append(updated_rcg.to_dict()) + + if not check_mode: + rcg_api.update_role_config_group( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_config_group_name=current_rcg.name, + body=updated_rcg, + ) + + # Else create the new custom role config group + else: + created_rcg = create_role_config_group( + api_client=api_client, + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_type=incoming_rcg["role_type"], + display_name=incoming_rcg["display_name"], + config=incoming_rcg["config"], + ) + diff_before.append(dict()) + diff_after.append(created_rcg.to_dict()) + addition_list(created_rcg) + + # Else it's a base role config group + else: + current_rcg = base_rcg_map.pop(incoming_rcg["role_type"]) + (updated_rcg, before, after) = update_role_config_group( + role_config_group=current_rcg, + display_name=incoming_rcg["display_name"], + config=incoming_rcg["config"], + purge=purge, + ) + + if before or after: + diff_before.append(current_rcg.to_dict()) + diff_after.append(updated_rcg.to_dict()) + + if not check_mode: + rcg_api.update_role_config_group( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_config_group_name=current_rcg.name, + body=updated_rcg, + ) + + # Process role config group additions + if addition_list: + if not check_mode: + rcg_api.create_role_config_groups( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + body=ApiRoleConfigGroupList(items=addition_list), + ) + + # Process role config group deletions if purge is set + if purge: + # Reset any remaining base role config groups + for current_rcg in base_rcg_map.values(): + (updated_rcg, before, after) = update_role_config_group( + role_config_group=current_rcg, + purge=purge, + ) + + if before or after: + diff_before.append(current_rcg.to_dict()) + diff_after.append(updated_rcg.to_dict()) + + if not check_mode: + rcg_api.update_role_config_group( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_config_group_name=current_rcg.name, + body=updated_rcg, + ) + + # Reset to base and remove any remaining custom role config groups + for current_rcg in rcg_map.values(): + diff_before.append(current_rcg.to_dict()) + diff_after.append(dict()) + + existing_roles = rcg_api.read_roles( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_config_group_name=current_rcg.name, + ).items + + if existing_roles: + if not check_mode: + rcg_api.move_roles_to_base_group( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + body=ApiRoleNameList(items=[e.name for e in existing_roles]), + ) + + if not check_mode: + rcg_api.delete_role_config_group( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_config_group_name=current_rcg.name, + ) + + return (diff_before, diff_after) + + +def reconcile_service_roles( + api_client: ApiClient, + service: ApiService, + roles: list[dict], + purge: bool, + check_mode: bool, + # maintenance: bool, + # state: str, +) -> tuple[dict, dict]: + + diff_before, diff_after = list[dict](), list[dict]() + + role_api = RolesResourceApi(api_client) + rcg_api = RoleConfigGroupsResourceApi(api_client) + + for incoming_role in roles: + # Prepare for any per-entry changes + role_entry_before, role_entry_after = list(), list() + + # Prepare list for any new role instances + addition_list = list[ApiRole]() + + # Get all existing instances of type per host + current_role_instances = read_roles_by_type( + api_client=api_client, + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_type=incoming_role["type"], + ).items + + # Get the base role config group for the type + base_rcg = get_base_role_config_group( + api_client=api_client, + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_type=incoming_role["type"], + ) + + # Get the role config group, if defined, for use with all of the instance associations + if incoming_role.get("role_config_group", None) is not None: + incoming_rcg = rcg_api.read_role_config_group( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_config_group_name=incoming_role.get("role_config_group"), + ) + else: + incoming_rcg = None + + # Index the current role instances by hostname + instance_map = {r.host_ref.hostname: r for r in current_role_instances} + + # Reconcile existence of type/host + for h in incoming_role["hostnames"]: + # Prepare any role instance changes + instance_role_before, instance_role_after = dict(), dict() + + # Create new role - config, rcg, tags, and host + if h not in instance_map: + created_role = create_role( + api_client=api_client, + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_type=incoming_role["type"], + hostname=h, + config=incoming_role.get("config", None), + role_config_group=incoming_role.get("role_config_group", None), + tags=incoming_role.get("tags", None), + ) + + # before is already an empty dict + instance_role_after = created_role.to_dict() + + addition_list.append(created_role) + + # Update existing role - config, tags, role config group + else: + current_role = instance_map.pop(h, None) + if current_role is not None: + # Reconcile role override configurations + incoming_config = incoming_role.get("config", None) + if incoming_config or purge: + if incoming_config is None: + incoming_config = dict() + + updates = ConfigListUpdates( + current_role.config, incoming_config, purge + ) + + if updates.changed: + instance_role_before.update(config=current_role.config) + instance_role_after.update(config=updates.config) + + current_role.config = updates.config + + if not check_mode: + role_api.update_role_config( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_name=current_role.name, + body=current_role.config, + ) + + # Reconcile role tags + incoming_tags = incoming_role.get("tags", None) + if incoming_tags or purge: + if incoming_tags is None: + incoming_tags = dict() + + tag_updates = TagUpdates( + current_role.tags, incoming_tags, purge + ) + + if tag_updates.changed: + instance_role_before.update(tags=tag_updates.deletions) + instance_role_after.update(tags=tag_updates.additions) + + if tag_updates.deletions: + if not check_mode: + role_api.delete_tags( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_name=current_role.name, + body=tag_updates.deletions, + ) + + if tag_updates.additions: + if not check_mode: + role_api.add_tags( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_name=current_role.name, + body=tag_updates.additions, + ) + + # Handle role config group associations + # If role config group is not present and the existing reference is not the base, reset to base + if ( + incoming_rcg is None + and current_role.role_config_group_ref.role_config_group_name + != base_rcg.name + ): + instance_role_before.update( + role_config_group=current_role.role_config_group_ref.role_config_group_name + ) + instance_role_after.update(role_config_group=base_rcg.name) + + if not check_mode: + rcg_api.move_roles_to_base_group( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + body=ApiRoleNameList(items=[current_role.name]), + ) + # Else if the role config group does not match the declared + elif ( + incoming_rcg is not None + and incoming_rcg.name + != current_role.role_config_group_ref.role_config_group_name + ): + instance_role_before.update( + role_config_group=current_role.role_config_group_ref.role_config_group_name + ) + instance_role_after.update(role_config_group=incoming_rcg.name) + + if not check_mode: + rcg_api.move_roles( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_config_group_name=incoming_rcg.name, + body=ApiRoleNameList(items=[current_role.name]), + ) + + # Record any deltas for the role entry + if instance_role_before or instance_role_after: + role_entry_before.append(instance_role_before) + role_entry_after.append(instance_role_after) + + # Process role instance additions + if addition_list: + if not check_mode: + role_api.create_roles( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + body=ApiRoleList(items=addition_list), + ) + + # Process role deletions if purge is set + if purge: + for deleted_role in instance_map.values(): + role_entry_before.append(deleted_role.to_dict()) + role_entry_after.append(dict()) + + if not check_mode: + role_api.delete_role( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_name=deleted_role.name, + ) + + # Add any changes for the role entry + if role_entry_before or role_entry_after: + diff_before.append(role_entry_before) + diff_after.append(role_entry_after) + + return (diff_before, diff_after) diff --git a/plugins/modules/service.py b/plugins/modules/service.py index ff373a7f..de18b7cb 100644 --- a/plugins/modules/service.py +++ b/plugins/modules/service.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,55 +22,142 @@ - Manage a service in a cluster. author: - "Webster Mudge (@wmudge)" -requirements: - - cm-client options: cluster: description: - - The associated cluster. + - The associated cluster of the service. type: str required: yes aliases: - cluster_name - service: + name: description: - - The service. + - The service to manage. + - This is a unique identifier within the cluster. type: str required: yes aliases: - service_name - - name + - service display_name: description: - The Cloudera Manager UI display name for the service. type: str + type: + description: + - The service type. + - Required if O(state) creates a new service. + type: str + aliases: + - service_type maintenance: description: - - Flag for whether the service should be in maintenance mode. + - Flag indicating if the service should be in maintenance mode. type: bool aliases: - maintenance_mode + purge: + description: + - Flag indicating if the declared service-wide configurations, tags, role config groups, and role assignments and configurations should be append-only or fully reconciled. + - If set, the module will actively remove undeclared entries, e.g. remove roles. + - To clear all service-wide configurations and tags, set O(tags={}) or O(config={}), i.e. an empty dictionary, and O(purge=True). + type: bool + default: False + config: + description: + - A set of service-wide configurations for the service. + - To unset a configuration, use V(None) as its value. + - If O(purge=True), undeclared configurations will be removed. + type: dict tags: description: - A set of tags applied to the service. - - To unset a tag, use C(None) as its value. + - To unset a tag, use V(None) as its value. + - If O(purge=True), undeclared tags will be removed. type: dict - type: + roles: description: - - The service type. - - Required if I(state) creates a new service. - type: str - aliases: - - service_type - purge: + - List of service roles to provision directly to cluster hosts. + - If O(purge=True), undeclared roles for the service will be removed from the hosts. + type: list + elements: dict + options: + type: + description: + - The role instance type to provision on the designated cluster hosts. + type: str + required: yes + aliases: + - role_type + hostnames: + description: + - List of hostnames of the cluster hosts receiving the role type instance. + type: list + elements: str + required: yes + aliases: + - cluster_hosts + - cluster_hostnames + config: + description: + - A set of role override configurations for the role instance on the cluster hosts. + - To unset a configuration, use V(None) as its value. + - If O(purge=True), undeclared configurations will be removed. + type: dict + aliases: + - parameters + - params + role_config_group: + description: + - A named (custom) role config group to assign to the role instance on the cluster hosts. + - To unset the assignment, use V(None) as the value. + type: str + tags: + description: + - A set of tags applied to the role type instance on the cluster hosts. + - To unset a tag, use V(None) as its value. + - If O(purge=True), undeclared tags will be removed. + type: dict + role_config_groups: description: - - Flag for whether the declared service tags should append or overwrite any existing tags. - - To clear all tags, set I(tags={}), i.e. an empty dictionary, and I(purge=True). - type: bool - default: False + - List of base and named (custom) role config groups to declare and configure for the service. + - If O(purge=True), undeclared named (custom) role config groups will be removed and their + associated role instances reassigned to each role type's base role config group. (Base role + config groups cannot be removed.) + type: list + elements: dict + options: + name: + description: + - The name of a custom role config group. + type: str + aliases: + - role_config_group_name + - role_config_group + display_name: + description: + - The Cloudera Manager UI display name for the role config group. + type: str + role_type: + description: + - The role type of the base or named (custom) role config group. + type: str + required: yes + aliases: + - type + config: + description: + - A set of role config group configurations. + - To unset a configuration, use V(None) as its value. + - If O(purge=True), undeclared configurations will be removed. + type: dict + aliases: + - parameters + - params state: description: - The state of the service. + - Setting O(state=restarted) will always result in a V(changed=True) result. type: str default: present choices: @@ -90,6 +177,10 @@ support: full platform: platforms: all +requirements: + - cm-client +seealso: + - module: cloudera.cluster.service_info """ EXAMPLES = r""" @@ -163,6 +254,98 @@ tags: {} purge: yes +- name: Update (append) several service-wide configurations on a cluster service + cloudera.cluster.service: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example_cluster + service: example_ecs + config: + param_one: 1 + param_two: Two + +- name: Update (purge) the service-wide configurations on a cluster service + cloudera.cluster.service: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example_cluster + service: example_ecs + config: + param_one: 1 + param_three: three + purge: yes + +- name: Remove all the service-wide configurations on a cluster service + cloudera.cluster.service: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example_cluster + service: example_ecs + config: {} + purge: yes + +- name: Provision role instances on cluster hosts for a cluster service + cloudera.cluster.service: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example_cluster + service: example_ecs + roles: + - type: SERVER + hostnames: + - host1.example + - host2.example + config: + param_one: 1 + +- name: Provision role config groups (base and named) for a cluster service + cloudera.cluster.service: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example_cluster + service: example_ecs + role_config_group: + - name: custom_server_1 + display_name: Custom Server (1) + role_type: SERVER + config: + param_two: Two + - role_type: SERVER # This is the base role config group for SERVER + config: + param_three: three + +- name: Provision a cluster service with hosts, role config groups, and role assignments + cloudera.cluster.service: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example_cluster + service: example_ecs + roles: + - type: SERVER + hostnames: + - host1.example + config: + param_two: Twelve + role_config_group: custom_server_1 + - type: SERVER # Will use the base role config group for SERVER + hostnames: + - host2.example + role_config_group: + - name: custom_server_1 + display_name: Custom Server (1) + role_type: SERVER + config: + param_two: Two + - role_type: SERVER # This is the base role config group for SERVER + config: + param_three: three + - name: Remove a cluster service cloudera.cluster.service: host: example.cloudera.com @@ -291,25 +474,205 @@ description: Version of the service. type: str returned: when supported + config: + description: Service-wide configuration details about a cluster service. + type: dict + returned: when supported + role_config_groups: + description: List of base and custom role config groups for the cluster service. + type: list + elements: dict + contains: + name: + description: + - The unique name of this role config group. + type: str + returned: always + role_type: + description: + - The type of the roles in this group. + type: str + returned: always + base: + description: + - Flag indicating whether this is a base group. + type: bool + returned: always + display_name: + description: + - A user-friendly name of the role config group, as would have been shown in the web UI. + type: str + returned: when supported + config: + description: Set of configurations for the role config group. + type: dict + returned: when supported + returned: when supported + roles: + description: List of provisioned role instances on cluster hosts for the cluster service. + type: list + elements: dict + contains: + name: + description: The cluster service role name. + type: str + returned: always + type: + description: The cluster service role type. + type: str + returned: always + sample: + - NAMENODE + - DATANODE + - TASKTRACKER + host_id: + description: The unique ID of the cluster host. + type: str + returned: always + hostname: + description: The hostname of the cluster host. + type: str + returned: always + role_state: + description: State of the cluster service role. + type: str + returned: always + sample: + - HISTORY_NOT_AVAILABLE + - UNKNOWN + - STARTING + - STARTED + - STOPPING + - STOPPED + - NA + commission_state: + description: Commission state of the cluster service role. + type: str + returned: always + health_summary: + description: The high-level health status of the cluster service role. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + config_staleness_status: + description: Status of configuration staleness for the cluster service role. + type: str + returned: always + sample: + - FRESH + - STALE_REFRESHABLE + - STALE + health_checks: + description: Lists all available health checks for cluster service role. + type: list + elements: dict + returned: when supported + contains: + name: + description: Unique name of this health check. + type: str + returned: always + summary: + description: The high-level health status of the health check. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + explanation: + description: The explanation of this health check. + type: str + returned: when supported + suppressed: + description: + - Whether this health check is suppressed. + - A suppressed health check is not considered when computing the role's overall health. + type: bool + returned: when supported + maintenance_mode: + description: Whether the cluster service role is in maintenance mode. + type: bool + returned: when supported + maintenance_owners: + description: The list of objects that trigger this service to be in maintenance mode. + type: list + elements: str + returned: when supported + sample: + - CLUSTER + - SERVICE + - ROLE + - HOST + - CONTROL_PLANE + role_config_group_name: + description: The name of the cluster service role config group, which uniquely identifies it in a Cloudera Manager installation. + type: str + returned: when supported + config: + description: Set of role configurations for the cluster service role. + type: dict + returned: when supported + tags: + description: The dictionary of tags for the cluster service role. + type: dict + returned: when supported + zoo_keeper_server_mode: + description: + - The Zookeeper server mode for this cluster service role. + - Note that for non-Zookeeper Server roles, this will be C(null). + type: str + returned: when supported + returned: when supported """ -from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - ClouderaManagerMutableModule, - resolve_tag_updates, -) -from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( - parse_service_result, -) - from cm_client import ( - ApiEntityTag, ApiService, - ApiServiceList, ClustersResourceApi, + RoleConfigGroupsResourceApi, ServicesResourceApi, ) from cm_client.rest import ApiException +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerMutableModule, + ConfigListUpdates, + TagUpdates, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + create_role_config_group, + get_base_role_config_group, + provision_role_config_groups, + update_role_config_group, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + create_role, + provision_service_role, + RoleException, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( + create_service_model, + parse_service_result, + provision_service, + read_service, + reconcile_service_role_config_groups, + reconcile_service_roles, + toggle_service_maintenance, + toggle_service_state, + ServiceMaintenanceStateException, +) + class ClusterService(ClouderaManagerMutableModule): def __init__(self, module): @@ -317,13 +680,16 @@ def __init__(self, module): # Set the parameters self.cluster = self.get_param("cluster") - self.service = self.get_param("service") - self.maintenance = self.get_param("maintenance") + self.name = self.get_param("name") self.display_name = self.get_param("display_name") - self.tags = self.get_param("tags") self.type = self.get_param("type") - self.state = self.get_param("state") + self.maintenance = self.get_param("maintenance") self.purge = self.get_param("purge") + self.config = self.get_param("config") + self.tags = self.get_param("tags") + self.roles = self.get_param("roles") + self.role_config_groups = self.get_param("role_config_groups") + self.state = self.get_param("state") # Initialize the return values self.changed = False @@ -343,194 +709,404 @@ def process(self): else: raise ex - api_instance = ServicesResourceApi(self.api_client) - existing = None + service_api = ServicesResourceApi(self.api_client) + current = None + # Try and retrieve the service by name try: - existing = api_instance.read_service( - self.cluster, self.service, view="full" + current = read_service( + api_client=self.api_client, + cluster_name=self.cluster, + service_name=self.name, ) except ApiException as ex: if ex.status != 404: raise ex if self.state == "absent": - if existing: - api_instance.delete_service(self.cluster, self.service) + if current: + self.changed = True - elif self.state in ["present", "started", "stopped"]: - if existing: + if self.module._diff: + self.diff = dict(before=parse_service_result(current), after=dict()) - # Handle maintenance mode - if ( - self.maintenance is not None - and self.maintenance != existing.maintenance_mode - ): - self.changed = True + if not self.module.check_mode: + service_api.delete_service(self.cluster, self.name) - if self.module._diff: - self.diff["before"].update( - maintenance_mode=existing.maintenance_mode + elif self.state in ["present", "restarted", "started", "stopped"]: + # If it is a new service + if not current: + self.changed = True + + if self.type is None: + self.module.fail_json(msg=f"missing required arguments: type") + + # Create and provision the service + service = create_service_model( + api_client=self.api_client, + name=self.name, + type=self.type, + cluster_name=self.cluster, + display_name=self.display_name, + config=self.config, + tags=self.tags, + # role_config_groups=self.role_config_groups, + # roles=self.roles, + ) + + if self.module._diff: + self.diff = dict( + before={}, + after=service.to_dict(), + ) + + if not self.module.check_mode: + current = provision_service( + api_client=self.api_client, + cluster_name=self.cluster, + service=service, + ) + + if not current: + self.module.fail_json( + msg="Unable to create new service", + service=to_native(service.to_dict()), ) - self.diff["after"].update(maintenance_mode=self.maintenance) - if not self.module.check_mode: - if self.maintenance: - maintenance_cmd = api_instance.enter_maintenance_mode( - self.cluster, self.service + # Create and provision the role config groups + if self.role_config_groups: + rcg_list = list() + base_rcg = None + + if self.module._diff: + before_rcg, after_rcg = list(), list() + + for requested_rcg in self.role_config_groups: + # Create any custom role config groups + if requested_rcg["name"] is not None: + custom_rcg = create_role_config_group( + api_client=self.api_client, + cluster_name=self.cluster, + service_name=current.name, + name=requested_rcg["name"], + role_type=requested_rcg["role_type"], + display_name=requested_rcg.get("display_name", None), + config=requested_rcg.get("config", None), ) + + rcg_list.append(custom_rcg) + + if self.module._diff: + before_rcg.append(dict()) + after_rcg.append(custom_rcg.to_dict()) + + # Else record the base role config group for modification else: - maintenance_cmd = api_instance.exit_maintenance_mode( - self.cluster, self.service + current_base_rcg = get_base_role_config_group( + api_client=self.api_client, + cluster_name=self.cluster, + service_name=current.name, + role_type=requested_rcg["role_type"], ) - if maintenance_cmd.success is False: - self.module.fail_json( - msg=f"Unable to set Maintenance mode to '{self.maintenance}': {maintenance_cmd.result_message}" + (base_rcg, before, after) = update_role_config_group( + role_config_group=current_base_rcg, + display_name=requested_rcg.get("display_name", None), + config=requested_rcg.get("config", None), + purge=self.purge, ) - # Tags - if self.tags: - (delta_add, delta_del) = resolve_tag_updates( - {t.name: t.value for t in existing.tags}, self.tags, self.purge - ) + if self.module._diff: + before_rcg.append(before) + after_rcg.append(after) - if delta_add or delta_del: - self.changed = True + if self.module._diff: + self.diff["before"]["role_config_groups"] = before_rcg + self.diff["after"]["role_config_groups"] = after_rcg + + if not self.module.check_mode: + provision_role_config_groups( + api_client=self.api_client, + cluster_name=self.cluster, + service_name=current.name, + role_config_groups=rcg_list, + ) + if base_rcg is not None: + RoleConfigGroupsResourceApi( + self.api_client + ).update_role_config_group( + cluster_name=self.cluster, + service_name=current.name, + role_config_group_name=base_rcg.name, + message=self.message, + body=base_rcg, + ) + + # Create and provision roles + if self.roles: + if self.module._diff: + role_entries_before, role_entries_after = list(), list() + + for requested_role in self.roles: if self.module._diff: - self.diff["before"].update(tags=delta_del) - self.diff["after"].update(tags=delta_add) + role_instances_before, role_instances_after = list(), list() - if not self.module.check_mode: - if delta_del: - api_instance.delete_tags( - self.cluster, - self.service, - body=[ - ApiEntityTag(k, v) for k, v in delta_del.items() - ], + for role_host in requested_role["hostnames"]: + try: + created_role = create_role( + api_client=self.api_client, + cluster_name=self.cluster, + service_name=current.name, + role_type=requested_role["type"], + hostname=role_host, + config=requested_role.get("config", None), + role_config_group=requested_role.get( + "role_config_group", None + ), + tags=requested_role.get("tags", None), ) - if delta_add: - api_instance.add_tags( - self.cluster, - self.service, - body=[ - ApiEntityTag(k, v) for k, v in delta_add.items() - ], + except RoleException as ex: + self.module.fail_json(msg=to_native(ex)) + + if self.module._diff: + role_instances_before.append(dict()) + role_instances_after.append(created_role.to_dict()) + + if not self.module.check_mode: + provisioned_role = provision_service_role( + api_client=self.api_client, + cluster_name=self.cluster, + service_name=current.name, + role=created_role, ) - # TODO Config + if not provisioned_role: + self.module.fail_json( + msg=f"Unable to create new role in service '{current.name}'", + role=to_native(provisioned_role.to_dict()), + ) - # Service details - # Currently, only display_name - delta = dict() + if self.module._diff: + role_entries_before.append(role_instances_before) + role_entries_after.append(role_instances_after) - if self.display_name and self.display_name != existing.display_name: - delta.update(display_name=self.display_name) + # Set the maintenance + self.handle_maintenance(current) - if delta: - self.changed = True + # Else the service exists, so address any changes + else: + if self.type and self.type.upper() != current.type: + self.module.fail_json( + msg="Service name already in use for type: " + current.type + ) - if self.module._diff: - self.diff["before"].update(display_name=existing.display_name) - self.diff["after"].update(display_name=self.display_name) + # Set the maintenance + self.handle_maintenance(current) - if not self.module.check_mode: - api_instance.update_service( - self.cluster, self.service, body=ApiService(**delta) - ) + # Handle service-wide configurations + if self.config or self.purge: + if self.config is None: + self.config = dict() - if self.state == "started" and existing.service_state != "STARTED": - self.changed = True + config_updates = ConfigListUpdates( + current.config, self.config, self.purge + ) - if self.module._diff: - self.diff["before"].update(service_state=existing.service_state) - self.diff["after"].update(service_state="STARTED") + if config_updates.changed: + self.changed = True - if not self.module.check_mode: - if existing.service_state == "NA": - self.wait_command( - api_instance.first_run(self.cluster, self.service) + if self.module._diff: + self.diff["before"].update( + config=config_updates.diff["before"] ) - else: - self.wait_command( - api_instance.start_command(self.cluster, self.service) + self.diff["after"].update( + config=config_updates.diff["after"] ) - elif self.state == "stopped" and existing.service_state not in [ - "STOPPED", - "NA", - ]: + if not self.module.check_mode: + service_api.update_service_config( + cluster_name=self.cluster, + service_name=self.name, + message=self.message, + body=config_updates.config, + ) + + # Handle tags + if self.tags or self.purge: + if self.tags is None: + self.tags = dict() + + tag_updates = TagUpdates(current.tags, self.tags, self.purge) + + if tag_updates.changed: + self.changed = True + + if self.module._diff: + self.diff["before"].update(tags=tag_updates.diff["before"]) + self.diff["after"].update(tags=tag_updates.diff["after"]) + + if not self.module.check_mode: + if tag_updates.deletions: + service_api.delete_tags( + cluster_name=self.cluster, + service_name=self.name, + body=tag_updates.deletions, + ) + + if tag_updates.additions: + service_api.add_tags( + cluster_name=self.cluster, + service_name=self.name, + body=tag_updates.additions, + ) + + # Handle service details (currently, only display_name) + if self.display_name and self.display_name != current.display_name: self.changed = True + current.display_name = self.display_name if self.module._diff: - self.diff["before"].update(service_state=existing.service_state) - self.diff["after"].update(service_state="STOPPED") + self.diff["before"].update(display_name=current.display_name) + self.diff["after"].update(display_name=self.display_name) if not self.module.check_mode: - self.wait_command( - api_instance.stop_command(self.cluster, self.service) + service_api.update_service( + cluster_name=self.cluster, + service_name=self.name, + body=current, ) - if self.changed: - self.output = parse_service_result( - api_instance.read_service( - self.cluster, self.service, view="full" - ) - ) - else: - self.output = parse_service_result(existing) - else: + # Handle role config groups + if self.role_config_groups or self.purge: + if self.role_config_groups is None: + self.role_config_groups = list() - # Service doesn't exist - if self.type is None: - self.module.fail_json( - msg=f"Service does not exist, missing required arguments: type" + (before_rcg, after_rcg) = reconcile_service_role_config_groups( + api_client=self.api_client, + service=current, + role_config_groups=self.role_config_groups, + purge=self.purge, + check_mode=self.module.check_mode, ) - payload = dict(name=self.service, type=str(self.type).upper()) - - if self.display_name: - payload.update(display_name=self.display_name) - - service_list = ApiServiceList([ApiService(**payload)]) + if before_rcg or after_rcg: + self.changed = True + if self.module._diff: + self.diff["before"].update(role_config_groups=before_rcg) + self.diff["after"].update(role_config_groups=after_rcg) - self.changed = True + # Handle roles + if self.roles or self.purge: + if self.roles is None: + self.roles = list() - if self.module._diff: - self.diff = dict( - before={}, - after=payload, + (before_role, after_role) = reconcile_service_roles( + api_client=self.api_client, + service=current, + roles=self.roles, + purge=self.purge, + check_mode=self.module.check_mode, + # state=self.state, + # maintenance=self.maintenance, ) - if not self.module.check_mode: - api_instance.create_services(self.cluster, body=service_list) + if before_role or after_role: + self.changed = True + if self.module._diff: + self.diff["before"].update(roles=before_role) + self.diff["after"].update(roles=after_role) - if self.state == "started": - self.wait_command( - api_instance.first_run(self.cluster, self.service) - ) + # Handle state changes + state_changed = toggle_service_state( + api_client=self.api_client, + service=current, + state=self.state, + check_mode=self.module.check_mode, + ) + + if state_changed is not None: + self.changed = True + if self.module._diff: + self.diff["before"].update(service_state=current.service_state) + self.diff["after"].update(service_state=state_changed) + # If there are changes, get a fresh read + if self.changed: self.output = parse_service_result( - api_instance.read_service(self.cluster, self.service, view="full") + read_service( + api_client=self.api_client, + cluster_name=self.cluster, + service_name=self.name, + ) ) + else: + self.output = parse_service_result(current) else: self.module.fail_json(msg=f"Invalid state: {self.state}") + def handle_maintenance(self, service: ApiService) -> None: + if self.maintenance is not None: + try: + state_changed = toggle_service_maintenance( + api_client=self.api_client, + service=service, + maintenance=self.maintenance, + check_mode=self.module.check_mode, + ) + except ServiceMaintenanceStateException as ex: + self.module.fail_json(msg=to_native(ex)) + + if state_changed: + self.changed = True + if self.module._diff: + self.diff["before"].update( + maintenance_mode=service.maintenance_mode + ) + self.diff["after"].update(maintenance_mode=self.maintenance) + def main(): module = ClouderaManagerMutableModule.ansible_module( argument_spec=dict( cluster=dict(required=True, aliases=["cluster_name"]), - service=dict(required=True, aliases=["service_name", "name"]), - maintenance=dict(type="bool", aliases=["maintenance_mode"]), + name=dict(required=True, aliases=["service_name", "service"]), display_name=dict(), - tags=dict(type=dict), - purge=dict(type="bool", default=False), type=dict(aliases=["service_type"]), + # version=dict(), + maintenance=dict(type="bool", aliases=["maintenance_mode"]), + purge=dict(type="bool", default=False), + config=dict(type="dict", aliases=["service_wide_config"]), + tags=dict(type="dict"), + roles=dict( + type="list", + elements="dict", + options=dict( + type=dict(required=True, aliases=["role_type"]), + hostnames=dict( + required=True, + type="list", + elements="str", + aliases=["cluster_hosts", "cluster_hostnames"], + ), + config=dict(type="dict", aliases=["parameters", "params"]), + role_config_group=dict(), + tags=dict(type="dict"), + ), + ), + role_config_groups=dict( + type="list", + elements="dict", + options=dict( + name=dict(aliases=["role_config_group_name", "role_config_group"]), + display_name=dict(), + role_type=dict(required=True, aliases=["type"]), + config=dict(type="dict", aliases=["params", "parameters"]), + ), + ), state=dict( - default="present", choices=["present", "absent", "started", "stopped"] + default="present", + choices=["present", "absent", "started", "stopped", "restarted"], ), ), supports_check_mode=True, diff --git a/plugins/modules/service_info.py b/plugins/modules/service_info.py index 5ca2858f..8821832a 100644 --- a/plugins/modules/service_info.py +++ b/plugins/modules/service_info.py @@ -1,6 +1,7 @@ +#!/usr/bin/python # -*- coding: utf-8 -*- -# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,33 +15,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - ClouderaManagerModule, -) -from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( - parse_service_result, -) - -from cm_client import ServicesResourceApi -from cm_client.rest import ApiException - - -ANSIBLE_METADATA = { - "metadata_version": "1.1", - "status": ["preview"], - "supported_by": "community", -} - DOCUMENTATION = r""" ---- module: service_info short_description: Retrieve information about the services of cluster description: - Gather information about services of a CDP cluster. author: - "Webster Mudge (@wmudge)" -requirements: - - cm_client options: cluster: description: @@ -49,34 +30,31 @@ required: yes aliases: - cluster_name - service: + name: description: - A service to retrieve. - If absent, the module will return all services. type: str aliases: - service_name - - name - view: - description: - - The view to materialize. - - C(healthcheck) is the equivalent to I(full_with_health_check_explanation). - - C(redacted) is the equivalent to I(export_redacted). - type: str - default: summary - choices: - - summary - - full - - healthcheck - - export - - redacted + - service extends_documentation_fragment: - cloudera.cluster.cm_options - cloudera.cluster.cm_endpoint +attributes: + check_mode: + support: full + diff_mode: + support: full + platform: + platforms: all +requirements: + - cm-client +seealso: + - module: cloudera.cluster.service """ EXAMPLES = r""" ---- - name: Gather details of the services of a cluster cloudera.cluster.service_info: host: "example.cloudera.host" @@ -95,7 +73,6 @@ """ RETURN = r""" ---- services: description: Details about the services of a cluster. type: list @@ -214,8 +191,182 @@ description: Version of the service. type: str returned: when supported + config: + description: Service-wide configuration details about a cluster service. + type: dict + returned: when supported + role_config_groups: + description: List of base and custom role config groups for the cluster service. + type: list + elements: dict + contains: + name: + description: + - The unique name of this role config group. + type: str + returned: always + role_type: + description: + - The type of the roles in this group. + type: str + returned: always + base: + description: + - Flag indicating whether this is a base group. + type: bool + returned: always + display_name: + description: + - A user-friendly name of the role config group, as would have been shown in the web UI. + type: str + returned: when supported + config: + description: Set of configurations for the role config group. + type: dict + returned: when supported + returned: when supported + roles: + description: List of provisioned role instances on cluster hosts for the cluster service. + type: list + elements: dict + contains: + name: + description: The cluster service role name. + type: str + returned: always + type: + description: The cluster service role type. + type: str + returned: always + sample: + - NAMENODE + - DATANODE + - TASKTRACKER + host_id: + description: The unique ID of the cluster host. + type: str + returned: always + hostname: + description: The hostname of the cluster host. + type: str + returned: always + role_state: + description: State of the cluster service role. + type: str + returned: always + sample: + - HISTORY_NOT_AVAILABLE + - UNKNOWN + - STARTING + - STARTED + - STOPPING + - STOPPED + - NA + commission_state: + description: Commission state of the cluster service role. + type: str + returned: always + health_summary: + description: The high-level health status of the cluster service role. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + config_staleness_status: + description: Status of configuration staleness for the cluster service role. + type: str + returned: always + sample: + - FRESH + - STALE_REFRESHABLE + - STALE + health_checks: + description: Lists all available health checks for cluster service role. + type: list + elements: dict + returned: when supported + contains: + name: + description: Unique name of this health check. + type: str + returned: always + summary: + description: The high-level health status of the health check. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + explanation: + description: The explanation of this health check. + type: str + returned: when supported + suppressed: + description: + - Whether this health check is suppressed. + - A suppressed health check is not considered when computing the role's overall health. + type: bool + returned: when supported + maintenance_mode: + description: Whether the cluster service role is in maintenance mode. + type: bool + returned: when supported + maintenance_owners: + description: The list of objects that trigger this service to be in maintenance mode. + type: list + elements: str + returned: when supported + sample: + - CLUSTER + - SERVICE + - ROLE + - HOST + - CONTROL_PLANE + role_config_group_name: + description: The name of the cluster service role config group, which uniquely identifies it in a Cloudera Manager installation. + type: str + returned: when supported + config: + description: Set of role configurations for the cluster service role. + type: dict + returned: when supported + tags: + description: The dictionary of tags for the cluster service role. + type: dict + returned: when supported + zoo_keeper_server_mode: + description: + - The Zookeeper server mode for this cluster service role. + - Note that for non-Zookeeper Server roles, this will be C(null). + type: str + returned: when supported + returned: when supported """ +from cm_client import ( + ClustersResourceApi, + ServicesResourceApi, +) +from cm_client.rest import ApiException + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerModule, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( + parse_service_result, + read_service, + read_services, +) + class ClusterServiceInfo(ClouderaManagerModule): def __init__(self, module): @@ -223,32 +374,35 @@ def __init__(self, module): # Set the parameters self.cluster = self.get_param("cluster") - self.service = self.get_param("service") + self.name = self.get_param("name") self.view = self.get_param("view") # Initialize the return values - self.services = [] + self.output = [] # Execute the logic self.process() @ClouderaManagerModule.handle_process def process(self): - api_instance = ServicesResourceApi(self.api_client) + try: + ClustersResourceApi(self.api_client).read_cluster(self.cluster) + except ApiException as ex: + if ex.status == 404: + self.module.fail_json(msg="Cluster does not exist: " + self.cluster) + else: + raise ex - if self.view == "healthcheck": - self.view = "full_with_health_check_explanation" - elif self.view == "redacted": - self.view = "export_redacted" + service_api = ServicesResourceApi(self.api_client) - if self.service: + if self.name: try: - self.services.append( + self.output.append( parse_service_result( - api_instance.read_service( + read_service( + api_client=self.api_client, cluster_name=self.cluster, - service_name=self.service, - view=self.view, + service_name=self.name, ) ) ) @@ -256,11 +410,12 @@ def process(self): if e.status != 404: raise e else: - self.services = [ + self.output = [ parse_service_result(s) - for s in api_instance.read_services( - cluster_name=self.cluster, view=self.view - ).items + for s in read_services( + api_client=self.api_client, + cluster_name=self.cluster, + ) ] @@ -268,11 +423,7 @@ def main(): module = ClouderaManagerModule.ansible_module( argument_spec=dict( cluster=dict(required=True, aliases=["cluster_name"]), - service=dict(aliases=["service_name", "name"]), - view=dict( - default="summary", - choices=["summary", "full", "healthcheck", "export", "redacted"], - ), + name=dict(aliases=["service_name", "service"]), ), supports_check_mode=True, ) @@ -281,7 +432,7 @@ def main(): output = dict( changed=False, - services=result.services, + services=result.output, ) if result.debug: diff --git a/plugins/modules/service_role.py b/plugins/modules/service_role.py index 953cd72b..c2e03a08 100644 --- a/plugins/modules/service_role.py +++ b/plugins/modules/service_role.py @@ -620,17 +620,17 @@ def process(self): if not self.module.check_mode: if tag_updates.deletions: role_api.delete_tags( - self.cluster, - self.name, - self.service, + cluster_name=self.cluster, + service_name=self.service, + role_name=self.name, body=tag_updates.deletions, ) if tag_updates.additions: role_api.add_tags( - self.cluster, - self.name, - self.service, + cluster_name=self.cluster, + service_name=self.service, + role_name=self.name, body=tag_updates.additions, ) diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index 9608ef8a..de4b7a6c 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -46,6 +46,8 @@ from cm_client.rest import ApiException from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + wait_command, + wait_commands, resolve_parameter_updates, ) from ansible_collections.cloudera.cluster.plugins.module_utils.host_utils import ( @@ -217,10 +219,43 @@ def deregister_service(api_client: ApiClient, registry: list[ApiService]) -> Non # Delete the services for s in registry: - service_api.delete_service( - cluster_name=s.cluster_ref.cluster_name, - service_name=s.name, - ) + try: + # Check for running commands and wait for them to finish + active_cmds = service_api.list_active_commands( + cluster_name=s.cluster_ref.cluster_name, + service_name=s.name, + ) + + wait_commands( + api_client=api_client, + commands=active_cmds, + ) + + # If the service is running, stop it + current = service_api.read_service( + cluster_name=s.cluster_ref.cluster_name, + service_name=s.name, + ) + + if current.service_state == ApiServiceState.STARTED: + stop_cmd = service_api.stop_command( + cluster_name=s.cluster_ref.cluster_name, + service_name=s.name, + ) + + wait_command( + api_client=api_client, + command=stop_cmd, + ) + + # Delete the service + service_api.delete_service( + cluster_name=s.cluster_ref.cluster_name, + service_name=s.name, + ) + except ApiException as e: + if e.status != 404: + raise e def register_role( @@ -355,21 +390,21 @@ def deregister_role_config_group( for rcg in registry: # Delete the custom role config groups if not rcg.base: - existing_roles = rcg_api.read_roles( - cluster_name=rcg.service_ref.cluster_name, - service_name=rcg.service_ref.service_name, - role_config_group_name=rcg.name, - ).items - - if existing_roles: - rcg_api.move_roles_to_base_group( + # The role might already be deleted, so ignore if not found + try: + existing_roles = rcg_api.read_roles( cluster_name=rcg.service_ref.cluster_name, service_name=rcg.service_ref.service_name, - body=ApiRoleNameList([r.name for r in existing_roles]), - ) + role_config_group_name=rcg.name, + ).items + + if existing_roles: + rcg_api.move_roles_to_base_group( + cluster_name=rcg.service_ref.cluster_name, + service_name=rcg.service_ref.service_name, + body=ApiRoleNameList([r.name for r in existing_roles]), + ) - # The role might already be deleted, so ignore if not found - try: rcg_api.delete_role_config_group( cluster_name=rcg.service_ref.cluster_name, service_name=rcg.service_ref.service_name, diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 54c374d6..dd82d8ee 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -1113,86 +1113,6 @@ def monitor_command( raise Exception(command.result_message) -# @pytest.fixture(scope="module") -# def test_service(cm_api_client) -> Generator[Callable[[ApiCluster, ApiService], ApiService]]: -# service_api = ServicesResourceApi(cm_api_client) -# cm_api = ClustersResourceApi(cm_api_client) - -# services = [] - -# # Consider returning a class with basic functions like initialize? -# def _provision_service(cluster: ApiCluster, service: ApiService) -> ApiService: -# # Check the cluster hosts -# hosts = [ -# h -# for i, h in enumerate(cm_api.list_hosts(cluster_name=cluster.name).items) -# if i < 3 -# ] - -# if len(hosts) != 3: -# raise Exception( -# "Not enough available hosts to assign service roles; the cluster must have 3 or more hosts." -# ) - -# # Create the service -# created_service = service_api.create_services( -# cluster_name=cluster.name, body=ApiServiceList(items=[service]) -# ).items[0] - -# # Record the service -# services.append(created_service) - -# # Start the service -# first_run_cmd = service_api.first_run( -# cluster_name=cluster.name, -# service_name=created_service.name, -# ) -# wait_for_command(cm_api_client, first_run_cmd) - -# # Refresh the service -# created_service = service_api.read_service( -# cluster_name=cluster.name, service_name=created_service.name -# ) - -# # Establish the maintenance mode of the service -# if service.maintenance_mode: -# maintenance_cmd = service_api.enter_maintenance_mode( -# cluster_name=cluster.name, -# service_name=created_service.name -# ) -# wait_for_command(cm_api_client, maintenance_cmd) -# created_service = service_api.read_service( -# cluster_name=cluster.name, service_name=created_service.name -# ) - -# # Establish the state the of the service -# if created_service.service_state != service.service_state: -# if service.service_state == ApiServiceState.STOPPED: -# stop_cmd = service_api.stop_command( -# cluster_name=cluster.name, -# service_name=created_service.name, -# ) -# wait_for_command(cm_api_client, stop_cmd) -# created_service = service_api.read_service( -# cluster_name=cluster.name, service_name=created_service.name -# ) -# else: -# raise Exception("Unsupported service state for fixture: " + service.service_state) - -# # Return the provisioned service -# return created_service - -# # Yield the service to the tests -# yield _provision_service - -# # Delete the services -# for s in services: -# service_api.delete_service( -# cluster_name=s.cluster_ref.cluster_name, -# service_name=s.name, -# ) - - @pytest.fixture(scope="module") def service_factory( cm_api_client, diff --git a/tests/unit/plugins/modules/service/test_service.py b/tests/unit/plugins/modules/service/test_service.py index 2b78aab2..753da8d9 100644 --- a/tests/unit/plugins/modules/service/test_service.py +++ b/tests/unit/plugins/modules/service/test_service.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,315 +19,895 @@ __metaclass__ = type import logging -import os import pytest -from ansible.module_utils.common.dict_transformations import recursive_diff +from pathlib import Path + +from cm_client import ( + ApiConfig, + ApiEntityTag, + ApiHostRef, + ApiRole, + ApiService, + ApiServiceConfig, + ApiServiceState, + ServicesResourceApi, +) from ansible_collections.cloudera.cluster.plugins.modules import service +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + wait_command, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.cluster_utils import ( + get_cluster_hosts, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + create_role, + read_roles, +) from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleExitJson, AnsibleFailJson, + deregister_service, + register_service, + deregister_role, + register_role, ) LOG = logging.getLogger(__name__) -@pytest.fixture -def conn(): - conn = dict(username=os.getenv("CM_USERNAME"), password=os.getenv("CM_PASSWORD")) - - if os.getenv("CM_HOST", None): - conn.update(host=os.getenv("CM_HOST")) - - if os.getenv("CM_PORT", None): - conn.update(port=os.getenv("CM_PORT")) - - if os.getenv("CM_ENDPOINT", None): - conn.update(url=os.getenv("CM_ENDPOINT")) - - if os.getenv("CM_PROXY", None): - conn.update(proxy=os.getenv("CM_PROXY")) - - return { - **conn, - "verify_tls": "no", - "debug": "no", - } - - -def test_missing_required(conn, module_args): - module_args(conn) - - with pytest.raises(AnsibleFailJson, match="cluster, service"): - service.main() - - -def test_missing_service(conn, module_args): - conn.update(service="example") - module_args(conn) - - with pytest.raises(AnsibleFailJson, match="cluster"): - service.main() - - -def test_missing_cluster(conn, module_args): - conn.update(cluster="example") - module_args(conn) - - with pytest.raises(AnsibleFailJson, match="service"): - service.main() - - -def test_present_invalid_cluster(conn, module_args): - conn.update( - cluster="example", - service="example", - ) - module_args(conn) +@pytest.fixture() +def zookeeper(cm_api_client, base_cluster, request): + # Keep track of the provisioned service(s) + service_registry = list[ApiService]() - with pytest.raises(AnsibleFailJson, match="Cluster does not exist"): - service.main() + # Get the current cluster hosts + hosts = get_cluster_hosts(cm_api_client, base_cluster) + id = Path(request.node.name).stem -def test_present_missing_type(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - ) - module_args(conn) - - with pytest.raises(AnsibleFailJson, match="type"): - service.main() - - -def test_present_create_service(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - display_name="Example Service", + zk_service = ApiService( + name=f"test-zk-{id}", type="ZOOKEEPER", + display_name=f"ZooKeeper ({id})", + # Add a SERVER role (so we can start the service -- a ZK requirement!) + roles=[ApiRole(type="SERVER", host_ref=ApiHostRef(hosts[0].host_id))], ) - module_args(conn) - - with pytest.raises(AnsibleExitJson) as e: - service.main() - assert e.value.changed == True - - with pytest.raises(AnsibleExitJson) as e: - service.main() - - assert e.value.changed == False - - -def test_present_update_service(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - display_name="Example Service by Ansible", + # Provision and yield the created service + yield register_service( + api_client=cm_api_client, + registry=service_registry, + cluster=base_cluster, + service=zk_service, ) - module_args(conn) - - with pytest.raises(AnsibleExitJson) as e: - service.main() - assert e.value.changed == True - - with pytest.raises(AnsibleExitJson) as e: - service.main() - - assert e.value.changed == False - - -def test_present_maintenance_mode(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - maintenance="yes", + # Remove the created service + deregister_service(api_client=cm_api_client, registry=service_registry) + + +@pytest.fixture() +def server_role(cm_api_client, base_cluster, zookeeper): + # Keep track of the provisioned role(s) + role_registry = list[ApiRole]() + + existing_role_instances = [ + r.host_ref.hostname + for r in read_roles( + api_client=cm_api_client, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + type="SERVER", + ).items + ] + + hosts = [ + h + for h in get_cluster_hosts(cm_api_client, base_cluster) + if h.hostname not in existing_role_instances + ] + + second_role = create_role( + api_client=cm_api_client, + role_type="SERVER", + hostname=hosts[0].hostname, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, ) - module_args(conn) - - with pytest.raises(AnsibleExitJson) as e: - service.main() - assert e.value.service["maintenance_mode"] == True - - with pytest.raises(AnsibleExitJson) as e: - service.main() + yield register_role( + api_client=cm_api_client, + registry=role_registry, + service=zookeeper, + role=second_role, + ) - assert e.value.service["maintenance_mode"] == True - assert e.value.changed == False + deregister_role(api_client=cm_api_client, registry=role_registry) - conn.update( - maintenance="no", - ) - module_args(conn) - with pytest.raises(AnsibleExitJson) as e: - service.main() +class TestServiceArgSpec: + def test_service_missing_required(self, conn, module_args): + module_args(conn) - assert e.value.service["maintenance_mode"] == False + with pytest.raises(AnsibleFailJson, match="cluster, name"): + service.main() - with pytest.raises(AnsibleExitJson) as e: - service.main() + def test_service_missing_name(self, conn, module_args): + module_args( + { + **conn, + "service": "example", + } + ) - assert e.value.service["maintenance_mode"] == False - assert e.value.changed == False + with pytest.raises(AnsibleFailJson, match="cluster"): + service.main() + def test_service_missing_cluster(self, conn, module_args): + module_args( + { + **conn, + "cluster": "example", + } + ) -def test_present_set_tags(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - tags=dict( - test="Ansible", key="Value", empty_string="", blank_string=" ", none=None - ), - ) - module_args(conn) + with pytest.raises(AnsibleFailJson, match="name"): + service.main() + + def test_service_roles_missing_type(self, conn, module_args): + module_args( + { + **conn, + "cluster": "example", + "name": "example", + "roles": [ + { + "hostnames": "example", + } + ], + } + ) - with pytest.raises(AnsibleExitJson) as e: - service.main() + with pytest.raises(AnsibleFailJson, match="type found in roles"): + service.main() + + def test_service_roles_missing_hostnames(self, conn, module_args): + module_args( + { + **conn, + "cluster": "example", + "name": "example", + "roles": [ + { + "type": "example", + } + ], + } + ) - assert ( - recursive_diff(e.value.service["tags"], dict(test="Ansible", key="Value")) - is None - ) + with pytest.raises(AnsibleFailJson, match="hostnames found in roles"): + service.main() - with pytest.raises(AnsibleExitJson) as e: - service.main() - assert ( - recursive_diff(e.value.service["tags"], dict(test="Ansible", key="Value")) - is None - ) - assert e.value.changed == False +class TestServiceInvalidParameters: + def test_present_invalid_cluster(self, conn, module_args): + module_args({**conn, "cluster": "BOOM", "service": "example"}) + with pytest.raises(AnsibleFailJson, match="Cluster does not exist"): + service.main() -def test_present_append_tags(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - tags=dict(more="Tags", key="Value"), - ) - module_args(conn) + def test_present_missing_type(self, conn, module_args, base_cluster): + module_args( + { + **conn, + "cluster": base_cluster.name, + "service": "test-zookeeper", + } + ) - with pytest.raises(AnsibleExitJson) as e: - service.main() + with pytest.raises(AnsibleFailJson, match="type"): + service.main() + + +class TestServiceProvision: + @pytest.fixture(autouse=True) + def zookeeper_reset(self, cm_api_client, base_cluster): + # Keep track of the existing ZOOKEEPER services + initial_services = set( + [ + s.name + for s in ServicesResourceApi(cm_api_client) + .read_services( + cluster_name=base_cluster.name, + ) + .items + ] + ) - assert ( - recursive_diff( - e.value.service["tags"], dict(test="Ansible", key="Value", more="Tags") + # Yield to the test + yield + + # Remove any added services + services_to_remove = [ + s + for s in ServicesResourceApi(cm_api_client) + .read_services( + cluster_name=base_cluster.name, + ) + .items + if s.name not in initial_services + ] + deregister_service(cm_api_client, services_to_remove) + + def test_service_provision_core(self, conn, module_args, base_cluster, request): + id = f"pytest-{Path(request.node.name)}" + + module_args( + { + **conn, + "cluster": base_cluster.name, + "name": id, + "type": "ZOOKEEPER", + "state": "present", + } ) - is None - ) - with pytest.raises(AnsibleExitJson) as e: - service.main() + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == id + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == id + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert e.value.service["roles"] == list() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == id + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == id + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert e.value.service["roles"] == list() + + def test_service_provision_display_name( + self, conn, module_args, base_cluster, request + ): + id = f"pytest-{Path(request.node.name)}" + name = "Pytest ZooKeeper" + + module_args( + { + **conn, + "cluster": base_cluster.name, + "name": id, + "type": "ZOOKEEPER", + "display_name": name, + "state": "present", + } + ) - assert ( - recursive_diff( - e.value.service["tags"], dict(test="Ansible", key="Value", more="Tags") + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == id + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == name + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert e.value.service["roles"] == list() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == id + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == name + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert e.value.service["roles"] == list() + + def test_service_provision_config(self, conn, module_args, base_cluster, request): + id = f"pytest-{Path(request.node.name)}" + + module_args( + { + **conn, + "cluster": base_cluster.name, + "name": id, + "type": "ZOOKEEPER", + "config": {"tickTime": 2001}, + "state": "present", + } ) - is None - ) - assert e.value.changed == False + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == id + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == id + assert e.value.service["config"]["tickTime"] == "2001" + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert e.value.service["roles"] == list() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == id + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == id + assert e.value.service["config"]["tickTime"] == "2001" + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert e.value.service["roles"] == list() + + def test_service_provision_tags(self, conn, module_args, base_cluster, request): + id = f"pytest-{Path(request.node.name)}" + + module_args( + { + **conn, + "cluster": base_cluster.name, + "name": id, + "type": "ZOOKEEPER", + "tags": {"pytest": "example"}, + "state": "present", + } + ) -@pytest.mark.skip("Move to separate DIFF test suite.") -def test_update_tags_check_mode(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - type="ZOOKEEPER", - tags=dict( - test="Ansible", - empty_string="", - none=None, - long_empty_string=" ", - ), - _ansible_check_mode=True, - _ansible_diff=True, - ) - module_args(conn) + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == id + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == id + assert e.value.service["config"] == dict() + assert e.value.service["tags"]["pytest"] == "example" + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert e.value.service["roles"] == list() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == id + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == id + assert e.value.service["config"] == dict() + assert e.value.service["tags"]["pytest"] == "example" + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert e.value.service["roles"] == list() + + +class TestServiceModification: + @pytest.fixture() + def maintenance_enabled_zookeeper(self, cm_api_client, zookeeper) -> ApiService: + ServicesResourceApi(cm_api_client).enter_maintenance_mode( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + ) + return zookeeper + + def test_service_existing_type(self, conn, module_args, zookeeper): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "type": "GATEWAY", + "state": "present", + } + ) - with pytest.raises(AnsibleExitJson) as e: - service.main() + with pytest.raises(AnsibleFailJson, match="already in use"): + service.main() + + def test_service_existing_display_name(self, conn, module_args, zookeeper): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "display_name": "Example", + "state": "present", + } + ) - assert e.value.changed == True - assert e.value.diff["before"]["tags"] == dict() - assert e.value.diff["after"]["tags"] == dict(test="Ansible") + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["display_name"] == "Example" + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert len(e.value.service["roles"]) == 1 # SERVER + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["display_name"] == "Example" + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert len(e.value.service["roles"]) == 1 # SERVER + + def test_service_existing_maintenance_enabled(self, conn, module_args, zookeeper): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "maintenance": True, + "state": "present", + } + ) + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == True + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert len(e.value.service["roles"]) == 1 # SERVER + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == True + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert len(e.value.service["roles"]) == 1 # SERVER + + def test_service_existing_maintenance_disabled( + self, conn, module_args, cm_api_client, zookeeper + ): + ServicesResourceApi(cm_api_client).enter_maintenance_mode( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + ) -def test_present_purge_tags(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - tags=dict(purge="Ansible"), - purge=True, - ) - module_args(conn) + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "maintenance": False, + "state": "present", + } + ) - with pytest.raises(AnsibleExitJson) as e: - service.main() + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert len(e.value.service["roles"]) == 1 # SERVER + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert len(e.value.service["roles"]) == 1 # SERVER + + def test_service_existing_config( + self, conn, module_args, cm_api_client, zookeeper, request + ): + ServicesResourceApi(cm_api_client).update_service_config( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + message=f"{request.node.name}::set", + body=ApiServiceConfig( + items=[ + ApiConfig(name="tickTime", value="3001"), + ApiConfig(name="autopurgeSnapRetainCount", value="9"), + ] + ), + ) - assert recursive_diff(e.value.service["tags"], dict(purge="Ansible")) is None - assert e.value.changed == True + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "config": { + "tickTime": 2001, + "leaderServes": "no", + }, + "message": f"{request.node.name}::test", + "state": "present", + } + ) - with pytest.raises(AnsibleExitJson) as e: - service.main() + with pytest.raises(AnsibleExitJson) as e: + service.main() - assert recursive_diff(e.value.service["tags"], dict(purge="Ansible")) is None - assert e.value.changed == False + assert e.value.changed == True + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["config"] == dict( + tickTime="2001", leaderServes="no", autopurgeSnapRetainCount="9" + ) + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert len(e.value.service["roles"]) == 1 # SERVER + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["config"] == dict( + tickTime="2001", leaderServes="no", autopurgeSnapRetainCount="9" + ) + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert len(e.value.service["roles"]) == 1 # SERVER + + def test_service_existing_config_purge( + self, conn, module_args, cm_api_client, zookeeper, request + ): + ServicesResourceApi(cm_api_client).update_service_config( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + message=f"{request.node.name}::set", + body=ApiServiceConfig( + items=[ + ApiConfig(name="tickTime", value="3001"), + ApiConfig(name="autopurgeSnapRetainCount", value="9"), + ] + ), + ) + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "config": { + "tickTime": 2001, + "leaderServes": "no", + }, + "message": f"{request.node.name}::test", + "purge": True, + "state": "present", + } + ) -def test_started(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - state="started", - _ansible_verbosity=3, - ) - module_args(conn) + with pytest.raises(AnsibleExitJson) as e: + service.main() - with pytest.raises(AnsibleExitJson): - service.main() + assert e.value.changed == True + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["config"] == dict( + tickTime="2001", + leaderServes="no", + ) + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert len(e.value.service["roles"]) == 1 # SERVER + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["config"] == dict(tickTime="2001", leaderServes="no") + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert len(e.value.service["roles"]) == 1 # SERVER + + def test_service_existing_tags(self, conn, module_args, cm_api_client, zookeeper): + ServicesResourceApi(cm_api_client).add_tags( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + body=[ + ApiEntityTag(name="tag_one", value="Existing"), + ApiEntityTag(name="tag_two", value="Existing"), + ], + ) - with pytest.raises(AnsibleExitJson) as e: - service.main() + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "tags": { + "tag_one": "Updated", + "tag_three": "Added", + }, + "state": "present", + } + ) - assert e.value.changed == False + with pytest.raises(AnsibleExitJson) as e: + service.main() + assert e.value.changed == True + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict( + tag_one="Updated", tag_two="Existing", tag_three="Added" + ) + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert len(e.value.service["roles"]) == 1 # SERVER + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict( + tag_one="Updated", tag_two="Existing", tag_three="Added" + ) + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert len(e.value.service["roles"]) == 1 # SERVER + + def test_service_existing_tags_purge( + self, conn, module_args, cm_api_client, zookeeper + ): + ServicesResourceApi(cm_api_client).add_tags( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + body=[ + ApiEntityTag(name="tag_one", value="Existing"), + ApiEntityTag(name="tag_two", value="Existing"), + ], + ) -def test_stopped(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - state="stopped", - ) - module_args(conn) + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "tags": { + "tag_one": "Updated", + "tag_three": "Added", + }, + "purge": True, + "state": "present", + } + ) - with pytest.raises(AnsibleExitJson): - service.main() + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict(tag_one="Updated", tag_three="Added") + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert len(e.value.service["roles"]) == 1 # SERVER + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == zookeeper.name + assert e.value.service["type"] == zookeeper.type + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict(tag_one="Updated", tag_three="Added") + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert len(e.value.service["roles"]) == 1 # SERVER + + +class TestServiceStates: + def test_service_existing_state_started( + self, conn, module_args, cm_api_client, zookeeper + ): + if zookeeper.service_state not in [ + ApiServiceState.STOPPED, + ApiServiceState.STOPPING, + ]: + stop_cmd = ServicesResourceApi(cm_api_client).stop_command( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + ) + + wait_command(cm_api_client, stop_cmd) + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "state": "started", + } + ) - with pytest.raises(AnsibleExitJson) as e: - service.main() + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["service_state"] == ApiServiceState.STARTED + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["service_state"] == ApiServiceState.STARTED + + def test_service_existing_state_stopped( + self, conn, module_args, cm_api_client, zookeeper + ): + if zookeeper.service_state not in [ + ApiServiceState.STARTED, + ApiServiceState.STARTING, + ]: + start_cmd = ServicesResourceApi(cm_api_client).start_command( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + ) + + wait_command(cm_api_client, start_cmd) + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "state": "stopped", + } + ) - assert e.value.changed == False + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["service_state"] == ApiServiceState.STOPPED + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["service_state"] == ApiServiceState.STOPPED + + def test_service_existing_state_restarted( + self, conn, module_args, cm_api_client, zookeeper + ): + if zookeeper.service_state not in [ + ApiServiceState.STARTED, + ApiServiceState.STARTING, + ]: + start_cmd = ServicesResourceApi(cm_api_client).start_command( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + ) + + wait_command(cm_api_client, start_cmd) + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "state": "restarted", + } + ) + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["service_state"] == ApiServiceState.STARTED + + # No idempotency due to the nature of the state + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["service_state"] == ApiServiceState.STARTED + + def test_service_existing_state_absent( + self, conn, module_args, cm_api_client, zookeeper + ): + if zookeeper.service_state not in [ + ApiServiceState.STARTED, + ApiServiceState.STARTING, + ]: + start_cmd = ServicesResourceApi(cm_api_client).start_command( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + ) + + wait_command(cm_api_client, start_cmd) + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "state": "absent", + } + ) -def test_absent(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - state="absent", - ) - module_args(conn) + with pytest.raises(AnsibleExitJson) as e: + service.main() - with pytest.raises(AnsibleExitJson): - service.main() + assert e.value.changed == True + assert not e.value.service - with pytest.raises(AnsibleExitJson) as e: - service.main() + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() - assert e.value.changed == False + assert e.value.changed == False + assert not e.value.service diff --git a/tests/unit/plugins/modules/service/test_service_rcgs.py b/tests/unit/plugins/modules/service/test_service_rcgs.py new file mode 100644 index 00000000..296fcb44 --- /dev/null +++ b/tests/unit/plugins/modules/service/test_service_rcgs.py @@ -0,0 +1,715 @@ +# -*- coding: utf-8 -*- + +# Copyright 2025 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import pytest + +from collections.abc import Generator + +from pathlib import Path + +from cm_client import ( + ApiConfig, + ApiConfigList, + ApiHostRef, + ApiRole, + ApiRoleConfigGroup, + ApiRoleNameList, + ApiService, + RoleConfigGroupsResourceApi, + ServicesResourceApi, +) + +from ansible_collections.cloudera.cluster.plugins.modules import service +from ansible_collections.cloudera.cluster.plugins.module_utils.cluster_utils import ( + get_cluster_hosts, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + get_base_role_config_group, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + create_role, + read_role, + read_roles, +) +from ansible_collections.cloudera.cluster.tests.unit import ( + AnsibleExitJson, + deregister_service, + register_service, + deregister_role, + register_role, + deregister_role_config_group, + register_role_config_group, +) + +LOG = logging.getLogger(__name__) + + +@pytest.fixture() +def zookeeper(cm_api_client, base_cluster, request): + # Keep track of the provisioned service(s) + service_registry = list[ApiService]() + + # Get the current cluster hosts + hosts = get_cluster_hosts(cm_api_client, base_cluster) + + id = Path(request.node.name).stem + + zk_service = ApiService( + name=f"test-zk-{id}", + type="ZOOKEEPER", + display_name=f"ZooKeeper ({id})", + # Add a SERVER role (so we can start the service -- a ZK requirement!) + roles=[ApiRole(type="SERVER", host_ref=ApiHostRef(hosts[0].host_id))], + ) + + # Provision and yield the created service + yield register_service( + api_client=cm_api_client, + registry=service_registry, + cluster=base_cluster, + service=zk_service, + ) + + # Remove the created service + deregister_service(api_client=cm_api_client, registry=service_registry) + + +@pytest.fixture() +def server_role(cm_api_client, base_cluster, zookeeper): + # Keep track of the provisioned role(s) + role_registry = list[ApiRole]() + + existing_role_instances = [ + r.host_ref.hostname + for r in read_roles( + api_client=cm_api_client, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + type="SERVER", + ).items + ] + + hosts = [ + h + for h in get_cluster_hosts(cm_api_client, base_cluster) + if h.hostname not in existing_role_instances + ] + + second_role = create_role( + api_client=cm_api_client, + role_type="SERVER", + hostname=hosts[0].hostname, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + ) + + yield register_role( + api_client=cm_api_client, + registry=role_registry, + service=zookeeper, + role=second_role, + ) + + deregister_role(api_client=cm_api_client, registry=role_registry) + + +class TestServiceProvisionRoleConfigGroups: + @pytest.fixture(autouse=True) + def resettable_cluster(self, cm_api_client, base_cluster): + # Keep track of the existing ZOOKEEPER services + initial_services = set( + [ + s.name + for s in ServicesResourceApi(cm_api_client) + .read_services( + cluster_name=base_cluster.name, + ) + .items + ] + ) + + # Yield to the test + yield + + # Remove any added services + services_to_remove = [ + s + for s in ServicesResourceApi(cm_api_client) + .read_services( + cluster_name=base_cluster.name, + ) + .items + if s.name not in initial_services + ] + deregister_service(cm_api_client, services_to_remove) + + def test_service_provision_custom_rcg( + self, conn, module_args, base_cluster, request + ): + id = f"pytest-{Path(request.node.name)}" + + module_args( + { + **conn, + "cluster": base_cluster.name, + "name": id, + "type": "ZOOKEEPER", + "role_config_groups": [ + { + "name": id, + "type": "SERVER", + "config": { + "minSessionTimeout": 4601, + }, + } + ], + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == id + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == id + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert e.value.service["roles"] == list() + + assert len(e.value.service["role_config_groups"]) == 3 # custom + 2 bases + rcg = next( + iter([r for r in e.value.service["role_config_groups"] if not r["base"]]) + ) + assert rcg["name"] == id + assert rcg["role_type"] == "SERVER" + assert rcg["config"]["minSessionTimeout"] == "4601" + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == id + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == id + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert e.value.service["roles"] == list() + + assert len(e.value.service["role_config_groups"]) == 3 + rcg = next( + iter([r for r in e.value.service["role_config_groups"] if not r["base"]]) + ) + assert rcg["name"] == id + assert rcg["role_type"] == "SERVER" + assert rcg["config"]["minSessionTimeout"] == "4601" + + def test_service_provision_base_rcg(self, conn, module_args, base_cluster, request): + id = f"pytest-{Path(request.node.name)}" + + module_args( + { + **conn, + "cluster": base_cluster.name, + "name": id, + "type": "ZOOKEEPER", + "role_config_groups": [ + { + "type": "SERVER", + "config": { + "minSessionTimeout": 4601, + }, + } + ], + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == id + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == id + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert e.value.service["roles"] == list() + + assert len(e.value.service["role_config_groups"]) == 2 # 2 bases + rcg = next( + iter( + [ + r + for r in e.value.service["role_config_groups"] + if r["base"] and r["role_type"] == "SERVER" + ] + ) + ) + assert rcg["role_type"] == "SERVER" + assert rcg["config"]["minSessionTimeout"] == "4601" + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == id + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == id + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert e.value.service["roles"] == list() + + assert len(e.value.service["role_config_groups"]) == 2 + rcg = next( + iter( + [ + r + for r in e.value.service["role_config_groups"] + if r["base"] and r["role_type"] == "SERVER" + ] + ) + ) + assert rcg["role_type"] == "SERVER" + assert rcg["config"]["minSessionTimeout"] == "4601" + + +class TestServiceModificationRoleConfigGroups: + @pytest.fixture() + def base_rcg_server(self, cm_api_client, zookeeper) -> ApiRoleConfigGroup: + base_rcg = get_base_role_config_group( + api_client=cm_api_client, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role_type="SERVER", + ) + + base_rcg.config = ApiConfigList( + items=[ + ApiConfig(name="minSessionTimeout", value="5500"), + ApiConfig(name="maxSessionTimeout", value="45000"), + ] + ) + + return RoleConfigGroupsResourceApi(cm_api_client).update_role_config_group( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role_config_group_name=base_rcg.name, + body=base_rcg, + ) + + @pytest.fixture() + def base_rcg_gateway(self, cm_api_client, zookeeper) -> ApiRoleConfigGroup: + base_rcg = get_base_role_config_group( + api_client=cm_api_client, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role_type="GATEWAY", + ) + + base_rcg.config = ApiConfigList( + items=[ApiConfig(name="client_config_priority", value="91")] + ) + + return RoleConfigGroupsResourceApi(cm_api_client).update_role_config_group( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role_config_group_name=base_rcg.name, + body=base_rcg, + ) + + @pytest.fixture() + def custom_rcg_server( + self, cm_api_client, zookeeper, request + ) -> Generator[ApiRoleConfigGroup]: + id = Path(request.node.name).stem + + role_config_groups = list[ApiRoleConfigGroup]() + + yield register_role_config_group( + api_client=cm_api_client, + registry=role_config_groups, + service=zookeeper, + role_config_group=ApiRoleConfigGroup( + name=f"pytest-{id}", + role_type="SERVER", + config=ApiConfigList(items=[ApiConfig("minSessionTimeout", "4501")]), + display_name=f"Pytest ({id})", + ), + message=f"{Path(request.node.parent.name).stem}::{request.node.name}", + ) + + deregister_role_config_group( + api_client=cm_api_client, + registry=role_config_groups, + message=f"{Path(request.node.parent.name).stem}::{request.node.name}", + ) + + @pytest.fixture() + def server_role_custom_rcg( + self, cm_api_client, server_role, custom_rcg_server + ) -> ApiRole: + RoleConfigGroupsResourceApi(cm_api_client).move_roles( + cluster_name=server_role.service_ref.cluster_name, + service_name=server_role.service_ref.service_name, + role_config_group_name=custom_rcg_server.name, + body=ApiRoleNameList(items=[server_role.name]), + ) + return server_role + + def test_service_existing_base_rcg( + self, conn, module_args, zookeeper, base_rcg_server, base_rcg_gateway + ): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "role_config_groups": [ + { + "type": base_rcg_server.role_type, + "config": { + "minSessionTimeout": 5501, + "maxSessionTimeout": 45001, + }, + } + ], + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + + server_rcg = next( + iter( + [ + r + for r in e.value.service["role_config_groups"] + if r["base"] and r["role_type"] == "SERVER" + ] + ) + ) + assert server_rcg["config"]["minSessionTimeout"] == "5501" + assert server_rcg["config"]["maxSessionTimeout"] == "45001" + + gateway_rcg = next( + iter( + [ + r + for r in e.value.service["role_config_groups"] + if r["base"] and r["role_type"] == "GATEWAY" + ] + ) + ) + assert gateway_rcg["config"]["client_config_priority"] == "91" + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + + server_rcg = next( + iter( + [ + r + for r in e.value.service["role_config_groups"] + if r["base"] and r["role_type"] == "SERVER" + ] + ) + ) + assert server_rcg["config"]["minSessionTimeout"] == "5501" + assert server_rcg["config"]["maxSessionTimeout"] == "45001" + + gateway_rcg = next( + iter( + [ + r + for r in e.value.service["role_config_groups"] + if r["base"] and r["role_type"] == "GATEWAY" + ] + ) + ) + assert gateway_rcg["config"]["client_config_priority"] == "91" + + def test_service_existing_base_rcg_purge( + self, conn, module_args, zookeeper, base_rcg_server, base_rcg_gateway + ): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "role_config_groups": [ + { + "type": base_rcg_server.role_type, + "config": { + "minSessionTimeout": 5501, + }, + } + ], + "purge": True, + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + + server_rcg = next( + iter( + [ + r + for r in e.value.service["role_config_groups"] + if r["base"] and r["role_type"] == "SERVER" + ] + ) + ) + assert server_rcg["config"]["minSessionTimeout"] == "5501" + assert "maxSessionTimeout" not in server_rcg["config"] + + gateway_rcg = next( + iter( + [ + r + for r in e.value.service["role_config_groups"] + if r["base"] and r["role_type"] == "GATEWAY" + ] + ) + ) + assert "client_config_priority" not in gateway_rcg["config"] + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + + server_rcg = next( + iter( + [ + r + for r in e.value.service["role_config_groups"] + if r["base"] and r["role_type"] == "SERVER" + ] + ) + ) + assert server_rcg["config"]["minSessionTimeout"] == "5501" + assert "maxSessionTimeout" not in server_rcg["config"] + + gateway_rcg = next( + iter( + [ + r + for r in e.value.service["role_config_groups"] + if r["base"] and r["role_type"] == "GATEWAY" + ] + ) + ) + assert "client_config_priority" not in gateway_rcg["config"] + + def test_service_existing_custom_rcg( + self, conn, module_args, zookeeper, custom_rcg_server + ): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "role_config_groups": [ + { + "name": custom_rcg_server.name, + "type": custom_rcg_server.role_type, + "config": { + "minSessionTimeout": 5501, + "maxSessionTimeout": 45001, + }, + } + ], + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + + server_rcg = next( + iter( + [ + r + for r in e.value.service["role_config_groups"] + if r["name"] == custom_rcg_server.name + ] + ) + ) + assert server_rcg["config"]["minSessionTimeout"] == "5501" + assert server_rcg["config"]["maxSessionTimeout"] == "45001" + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + + server_rcg = next( + iter( + [ + r + for r in e.value.service["role_config_groups"] + if r["name"] == custom_rcg_server.name + ] + ) + ) + assert server_rcg["config"]["minSessionTimeout"] == "5501" + assert server_rcg["config"]["maxSessionTimeout"] == "45001" + + def test_service_existing_custom_rcg_purge( + self, conn, module_args, zookeeper, custom_rcg_server + ): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "role_config_groups": [ + { + "name": custom_rcg_server.name, + "type": custom_rcg_server.role_type, + "config": { + "maxSessionTimeout": 45001, + }, + } + ], + "purge": True, + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + + server_rcg = next( + iter( + [ + r + for r in e.value.service["role_config_groups"] + if r["name"] == custom_rcg_server.name + ] + ) + ) + assert server_rcg["config"]["maxSessionTimeout"] == "45001" + assert "minSessionTimeout" not in server_rcg["config"] + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + + server_rcg = next( + iter( + [ + r + for r in e.value.service["role_config_groups"] + if r["name"] == custom_rcg_server.name + ] + ) + ) + assert server_rcg["config"]["maxSessionTimeout"] == "45001" + assert "minSessionTimeout" not in server_rcg["config"] + + def test_service_existing_custom_rcg_purge_role_assoc( + self, conn, module_args, cm_api_client, zookeeper, server_role_custom_rcg + ): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "purge": True, + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert server_role_custom_rcg.name not in [ + rcg["name"] for rcg in e.value.service["role_config_groups"] + ] + + refreshed_role = read_role( + api_client=cm_api_client, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role_name=server_role_custom_rcg.name, + ) + base_rcg = get_base_role_config_group( + api_client=cm_api_client, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role_type=server_role_custom_rcg.type, + ) + assert ( + refreshed_role.role_config_group_ref.role_config_group_name == base_rcg.name + ) + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert server_role_custom_rcg.name not in [ + rcg["name"] for rcg in e.value.service["role_config_groups"] + ] + + refreshed_role = read_role( + api_client=cm_api_client, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role_name=server_role_custom_rcg.name, + ) + base_rcg = get_base_role_config_group( + api_client=cm_api_client, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role_type=server_role_custom_rcg.type, + ) + assert ( + refreshed_role.role_config_group_ref.role_config_group_name == base_rcg.name + ) diff --git a/tests/unit/plugins/modules/service/test_service_roles.py b/tests/unit/plugins/modules/service/test_service_roles.py new file mode 100644 index 00000000..4a150286 --- /dev/null +++ b/tests/unit/plugins/modules/service/test_service_roles.py @@ -0,0 +1,916 @@ +# -*- coding: utf-8 -*- + +# Copyright 2025 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import pytest + +from collections.abc import Generator + +from pathlib import Path + +from cm_client import ( + ApiConfig, + ApiConfigList, + ApiCluster, + ApiEntityTag, + ApiHost, + ApiHostRef, + ApiRole, + ApiRoleConfigGroup, + ApiRoleNameList, + ApiService, + RoleConfigGroupsResourceApi, + RolesResourceApi, + ServicesResourceApi, +) + +from ansible_collections.cloudera.cluster.plugins.modules import service +from ansible_collections.cloudera.cluster.plugins.module_utils.cluster_utils import ( + get_cluster_hosts, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + create_role_config_group, + get_base_role_config_group, + provision_role_config_groups, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + create_role, + provision_service_role, + read_roles, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( + get_service_hosts, +) +from ansible_collections.cloudera.cluster.tests.unit import ( + AnsibleExitJson, + deregister_service, + register_service, +) + +LOG = logging.getLogger(__name__) + + +@pytest.fixture() +def cluster_hosts(cm_api_client, base_cluster) -> list[ApiHost]: + return get_cluster_hosts(cm_api_client, base_cluster) + + +class TestServiceProvisionRoles: + @pytest.fixture(autouse=True) + def resettable_cluster(self, cm_api_client, base_cluster) -> Generator[ApiCluster]: + # Keep track of the existing ZOOKEEPER services + initial_services = set( + [ + s.name + for s in ServicesResourceApi(cm_api_client) + .read_services( + cluster_name=base_cluster.name, + ) + .items + ] + ) + + # Yield to the test + yield base_cluster + + # Remove any added services + services_to_remove = [ + s + for s in ServicesResourceApi(cm_api_client) + .read_services( + cluster_name=base_cluster.name, + ) + .items + if s.name not in initial_services + ] + deregister_service(cm_api_client, services_to_remove) + + def test_service_provision_roles( + self, conn, module_args, cm_api_client, resettable_cluster, request + ): + service_name = f"pytest-{Path(request.node.name)}" + + available_hosts = get_cluster_hosts( + api_client=cm_api_client, cluster=resettable_cluster + ) + + module_args( + { + **conn, + "cluster": resettable_cluster.name, + "name": service_name, + "type": "ZOOKEEPER", + "roles": [ + { + "type": "SERVER", + "hostnames": [h.hostname for h in available_hosts], + } + ], + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == service_name + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == service_name + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["roles"]) == len(available_hosts) + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == service_name + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == service_name + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["roles"]) == len(available_hosts) + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + + def test_service_provision_roles_custom_rcg( + self, conn, module_args, cm_api_client, resettable_cluster, request + ): + service_name = f"pytest-{Path(request.node.name)}" + + available_hosts = get_cluster_hosts( + api_client=cm_api_client, cluster=resettable_cluster + ) + + module_args( + { + **conn, + "cluster": resettable_cluster.name, + "name": service_name, + "type": "ZOOKEEPER", + "roles": [ + { + "type": "SERVER", + "hostnames": [h.hostname for h in available_hosts], + "role_config_group": "PYTEST_SERVER", + }, + ], + "role_config_groups": [ + { + "name": "PYTEST_SERVER", + "role_type": "SERVER", + }, + ], + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == service_name + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == service_name + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["roles"]) == len(available_hosts) + assert ( + len(e.value.service["role_config_groups"]) == 3 + ) # SERVER, GATEWAY bases + PYTEST_SERVER + + assert e.value.service["roles"][0]["role_config_group_name"] == "PYTEST_SERVER" + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == service_name + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == service_name + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["roles"]) == len(available_hosts) + assert ( + len(e.value.service["role_config_groups"]) == 3 + ) # SERVER, GATEWAY bases + PYTEST_SERVER + + assert e.value.service["roles"][0]["role_config_group_name"] == "PYTEST_SERVER" + + def test_service_provision_roles_config( + self, conn, module_args, cm_api_client, resettable_cluster, request + ): + service_name = f"pytest-{Path(request.node.name)}" + + available_hosts = get_cluster_hosts( + api_client=cm_api_client, cluster=resettable_cluster + ) + + module_args( + { + **conn, + "cluster": resettable_cluster.name, + "name": service_name, + "type": "ZOOKEEPER", + "roles": [ + { + "type": "SERVER", + "hostnames": [h.hostname for h in available_hosts], + "config": { + "minSessionTimeout": 4801, + }, + }, + ], + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == service_name + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == service_name + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["roles"]) == len(available_hosts) + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + + assert e.value.service["roles"][0]["config"]["minSessionTimeout"] == "4801" + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == service_name + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == service_name + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["roles"]) == len(available_hosts) + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + + assert e.value.service["roles"][0]["config"]["minSessionTimeout"] == "4801" + + def test_service_provision_roles_tags( + self, conn, module_args, cm_api_client, resettable_cluster, request + ): + service_name = f"pytest-{Path(request.node.name)}" + + available_hosts = get_cluster_hosts( + api_client=cm_api_client, cluster=resettable_cluster + ) + + module_args( + { + **conn, + "cluster": resettable_cluster.name, + "name": service_name, + "type": "ZOOKEEPER", + "roles": [ + { + "type": "SERVER", + "hostnames": [h.hostname for h in available_hosts], + "tags": { + "pytest": "example", + }, + }, + ], + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert e.value.service["name"] == service_name + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == service_name + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["roles"]) == len(available_hosts) + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + + assert e.value.service["roles"][0]["tags"]["pytest"] == "example" + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert e.value.service["name"] == service_name + assert e.value.service["type"] == "ZOOKEEPER" + assert e.value.service["display_name"] == service_name + assert e.value.service["config"] == dict() + assert e.value.service["tags"] == dict() + assert e.value.service["maintenance_mode"] == False + assert len(e.value.service["roles"]) == len(available_hosts) + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + + assert e.value.service["roles"][0]["tags"]["pytest"] == "example" + + +class TestServiceModificationRoles: + @pytest.fixture() + def zookeeper(self, cm_api_client, base_cluster, request) -> Generator[ApiService]: + # Keep track of the provisioned service(s) + service_registry = list[ApiService]() + + # Get the current cluster hosts + hosts = get_cluster_hosts(cm_api_client, base_cluster) + + id = Path(request.node.name).stem + + zk_service = ApiService( + name=f"test-zk-{id}", + type="ZOOKEEPER", + display_name=f"ZooKeeper ({id})", + # Add a SERVER role (so we can start the service -- a ZK requirement!) + roles=[ApiRole(type="SERVER", host_ref=ApiHostRef(hosts[0].host_id))], + ) + + # Provision and yield the created service + yield register_service( + api_client=cm_api_client, + registry=service_registry, + cluster=base_cluster, + service=zk_service, + ) + + # Remove the created service + deregister_service(api_client=cm_api_client, registry=service_registry) + + @pytest.fixture() + def available_hosts(self, cm_api_client, cluster_hosts, zookeeper) -> list[ApiHost]: + service_host_ids = [ + h.host_id + for h in get_service_hosts( + api_client=cm_api_client, + service=zookeeper, + ) + ] + + return [h for h in cluster_hosts if h.host_id not in service_host_ids] + + @pytest.fixture() + def server_role(self, cm_api_client, base_cluster, zookeeper) -> ApiRole: + existing_role_instances = [ + r.host_ref.hostname + for r in read_roles( + api_client=cm_api_client, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + type="SERVER", + ).items + ] + + hosts = [ + h + for h in get_cluster_hosts(cm_api_client, base_cluster) + if h.hostname not in existing_role_instances + ] + + created_role = create_role( + api_client=cm_api_client, + role_type="SERVER", + hostname=hosts[0].hostname, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + ) + + provisioned_role = provision_service_role( + api_client=cm_api_client, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role=created_role, + ) + + return provisioned_role + + @pytest.fixture() + def server_rcg(self, cm_api_client, zookeeper, request) -> ApiRoleConfigGroup: + custom_rcg = create_role_config_group( + api_client=cm_api_client, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + name=f"pytest-{Path(request.node.name).stem}", + role_type="SERVER", + config=dict(minSessionTimeout=6601), + ) + + provisioned_rcgs = provision_role_config_groups( + api_client=cm_api_client, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role_config_groups=[custom_rcg], + ) + + return provisioned_rcgs.items[0] + + @pytest.fixture() + def server_rcg_role(self, cm_api_client, server_role, server_rcg) -> ApiRole: + moved_roles = RoleConfigGroupsResourceApi(cm_api_client).move_roles( + cluster_name=server_role.service_ref.cluster_name, + service_name=server_role.service_ref.service_name, + role_config_group_name=server_rcg.name, + body=ApiRoleNameList(items=[server_role.name]), + ) + + return moved_roles.items[0] + + def test_service_existing_role_rcg( + self, conn, module_args, cm_api_client, zookeeper, server_rcg + ): + existing_hosts = get_service_hosts( + api_client=cm_api_client, + service=zookeeper, + ) + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "roles": [ + { + "type": "SERVER", + "hostnames": [h.hostname for h in existing_hosts], + "role_config_group": server_rcg.name, + } + ], + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == len(existing_hosts) + assert ( + len(e.value.service["role_config_groups"]) == 3 + ) # SERVER, GATEWAY bases + server_rcg + + assert e.value.service["roles"][0]["role_config_group_name"] == server_rcg.name + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == len(existing_hosts) + assert ( + len(e.value.service["role_config_groups"]) == 3 + ) # SERVER, GATEWAY bases + server_rcg + + assert e.value.service["roles"][0]["role_config_group_name"] == server_rcg.name + + def test_service_existing_role_rcg_base( + self, conn, module_args, cm_api_client, zookeeper, server_rcg_role + ): + base_rcg = get_base_role_config_group( + api_client=cm_api_client, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role_type=server_rcg_role.type, + ) + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "roles": [ + { + "type": server_rcg_role.type, + "hostnames": [server_rcg_role.host_ref.hostname], + } + ], + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 2 # SERVER + service_rcg_role + assert ( + len(e.value.service["role_config_groups"]) == 3 + ) # SERVER, GATEWAY bases + server_rcg + + result_role = [ + r + for r in e.value.service["roles"] + if r["type"] == server_rcg_role.type + and r["hostname"] == server_rcg_role.host_ref.hostname + ][0] + assert result_role["role_config_group_name"] == base_rcg.name + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 2 + assert ( + len(e.value.service["role_config_groups"]) == 3 + ) # SERVER, GATEWAY bases + server_rcg + + result_role = [ + r + for r in e.value.service["roles"] + if r["type"] == server_rcg_role.type + and r["hostname"] == server_rcg_role.host_ref.hostname + ][0] + assert result_role["role_config_group_name"] == base_rcg.name + + def test_service_existing_role_tags( + self, conn, module_args, cm_api_client, zookeeper, server_role + ): + RolesResourceApi(cm_api_client).add_tags( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role_name=server_role.name, + body=[ + ApiEntityTag(name="tag_one", value="Existing"), + ApiEntityTag(name="tag_two", value="Existing"), + ], + ) + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "roles": [ + { + "type": server_role.type, + "hostnames": [server_role.host_ref.hostname], + "tags": { + "tag_one": "Updated", + "tag_three": "Added", + }, + } + ], + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 2 # SERVER + service_role + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + + result_role = [ + r + for r in e.value.service["roles"] + if r["type"] == server_role.type + and r["hostname"] == server_role.host_ref.hostname + ][0] + assert result_role["tags"] == dict( + tag_one="Updated", tag_two="Existing", tag_three="Added" + ) + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 2 # SERVER + service_role + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + + result_role = [ + r + for r in e.value.service["roles"] + if r["type"] == server_role.type + and r["hostname"] == server_role.host_ref.hostname + ][0] + assert result_role["tags"] == dict( + tag_one="Updated", tag_two="Existing", tag_three="Added" + ) + + def test_service_existing_role_tags_purge( + self, conn, module_args, cm_api_client, zookeeper, server_role + ): + RolesResourceApi(cm_api_client).add_tags( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role_name=server_role.name, + body=[ + ApiEntityTag(name="tag_one", value="Existing"), + ApiEntityTag(name="tag_two", value="Existing"), + ], + ) + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "roles": [ + { + "type": server_role.type, + "hostnames": [server_role.host_ref.hostname], + "tags": { + "tag_one": "Updated", + "tag_three": "Added", + }, + } + ], + "purge": True, + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 1 # service_role + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + + result_role = [ + r + for r in e.value.service["roles"] + if r["type"] == server_role.type + and r["hostname"] == server_role.host_ref.hostname + ][0] + assert result_role["tags"] == dict(tag_one="Updated", tag_three="Added") + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 1 # service_role + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + + result_role = [ + r + for r in e.value.service["roles"] + if r["type"] == server_role.type + and r["hostname"] == server_role.host_ref.hostname + ][0] + assert result_role["tags"] == dict(tag_one="Updated", tag_three="Added") + + def test_service_existing_role_config( + self, conn, module_args, cm_api_client, zookeeper, server_role + ): + RolesResourceApi(cm_api_client).update_role_config( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role_name=server_role.name, + body=ApiConfigList( + items=[ + ApiConfig(name="minSessionTimeout", value="5501"), + ApiConfig(name="maxSessionTimeout", value="45001"), + ] + ), + ) + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "roles": [ + { + "type": server_role.type, + "hostnames": [server_role.host_ref.hostname], + "config": { + "minSessionTimeout": 5601, + "maxClientCnxns": 56, + }, + } + ], + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 2 # SERVER + service_role + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + + result_role = [ + r + for r in e.value.service["roles"] + if r["type"] == server_role.type + and r["hostname"] == server_role.host_ref.hostname + ][0] + assert ( + result_role["config"].items() + >= dict( + minSessionTimeout="5601", maxSessionTimeout="45001", maxClientCnxns="56" + ).items() + ) + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 2 # SERVER + service_role + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + + result_role = [ + r + for r in e.value.service["roles"] + if r["type"] == server_role.type + and r["hostname"] == server_role.host_ref.hostname + ][0] + assert ( + result_role["config"].items() + >= dict( + minSessionTimeout="5601", maxSessionTimeout="45001", maxClientCnxns="56" + ).items() + ) + + def test_service_existing_role_config_purge( + self, conn, module_args, cm_api_client, zookeeper, server_role + ): + RolesResourceApi(cm_api_client).update_role_config( + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + role_name=server_role.name, + body=ApiConfigList( + items=[ + ApiConfig(name="minSessionTimeout", value="5501"), + ApiConfig(name="maxSessionTimeout", value="45001"), + ] + ), + ) + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "roles": [ + { + "type": server_role.type, + "hostnames": [server_role.host_ref.hostname], + "config": { + "minSessionTimeout": 5601, + "maxClientCnxns": 56, + }, + } + ], + "purge": True, + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 1 # service_role + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + + result_role = [ + r + for r in e.value.service["roles"] + if r["type"] == server_role.type + and r["hostname"] == server_role.host_ref.hostname + ][0] + assert ( + result_role["config"].items() + == dict(minSessionTimeout="5601", maxClientCnxns="56").items() + ) + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 1 # service_role + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + + result_role = [ + r + for r in e.value.service["roles"] + if r["type"] == server_role.type + and r["hostname"] == server_role.host_ref.hostname + ][0] + assert ( + result_role["config"].items() + == dict(minSessionTimeout="5601", maxClientCnxns="56").items() + ) + + def test_service_existing_role_add( + self, conn, module_args, zookeeper, available_hosts + ): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "roles": [ + { + "type": "SERVER", + "hostnames": [available_hosts[0].hostname], + } + ], + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 2 # SERVER + new SERVER + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert available_hosts[0].hostname in [ + r["hostname"] for r in e.value.service["roles"] if r["type"] == "SERVER" + ] + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 2 # SERVER + new SERVER + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert available_hosts[0].hostname in [ + r["hostname"] for r in e.value.service["roles"] if r["type"] == "SERVER" + ] + + def test_service_existing_role_purge( + self, conn, module_args, zookeeper, available_hosts + ): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "name": zookeeper.name, + "roles": [ + { + "type": "SERVER", + "hostnames": [available_hosts[0].hostname], + "config": { + "serverId": 9, + }, + } + ], + "purge": True, + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 1 # new SERVER + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert available_hosts[0].hostname in [ + r["hostname"] for r in e.value.service["roles"] if r["type"] == "SERVER" + ] + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 1 # new SERVER + assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases + assert available_hosts[0].hostname in [ + r["hostname"] for r in e.value.service["roles"] if r["type"] == "SERVER" + ] diff --git a/tests/unit/plugins/modules/service_info/test_service_info.py b/tests/unit/plugins/modules/service_info/test_service_info.py index 3a70af52..45a03fa5 100644 --- a/tests/unit/plugins/modules/service_info/test_service_info.py +++ b/tests/unit/plugins/modules/service_info/test_service_info.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,74 +22,127 @@ import os import pytest +from pathlib import Path + +from cm_client import ( + ApiConfig, + ApiEntityTag, + ApiHost, + ApiHostRef, + ApiRole, + ApiService, + ApiServiceConfig, + ApiServiceState, + ServicesResourceApi, +) + from ansible_collections.cloudera.cluster.plugins.modules import service_info +from ansible_collections.cloudera.cluster.plugins.module_utils.cluster_utils import ( + get_cluster_hosts, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( + get_service_hosts, +) from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleExitJson, AnsibleFailJson, + deregister_service, + register_service, ) LOG = logging.getLogger(__name__) -@pytest.fixture() -def conn(): - conn = dict(username=os.getenv("CM_USERNAME"), password=os.getenv("CM_PASSWORD")) +@pytest.fixture(scope="module") +def zookeeper(cm_api_client, base_cluster, request): + # Keep track of the provisioned service(s) + service_registry = list[ApiService]() + + # Get the current cluster hosts + hosts = get_cluster_hosts(cm_api_client, base_cluster) + + id = Path(request.node.name).stem + + zk_service = ApiService( + name=f"test-zk-{id}", + type="ZOOKEEPER", + display_name=f"ZooKeeper ({id})", + # Add a SERVER role (so we can start the service -- a ZK requirement!) + roles=[ApiRole(type="SERVER", host_ref=ApiHostRef(hosts[0].host_id))], + ) + + # Provision and yield the created service + yield register_service( + api_client=cm_api_client, + registry=service_registry, + cluster=base_cluster, + service=zk_service, + ) + + # Remove the created service + deregister_service(api_client=cm_api_client, registry=service_registry) - if os.getenv("CM_HOST", None): - conn.update(host=os.getenv("CM_HOST")) - if os.getenv("CM_PORT", None): - conn.update(port=os.getenv("CM_PORT")) +@pytest.fixture() +def cluster_hosts(cm_api_client, base_cluster) -> list[ApiHost]: + return get_cluster_hosts(cm_api_client, base_cluster) - if os.getenv("CM_ENDPOINT", None): - conn.update(url=os.getenv("CM_ENDPOINT")) - if os.getenv("CM_PROXY", None): - conn.update(proxy=os.getenv("CM_PROXY")) +@pytest.fixture() +def available_hosts(cm_api_client, cluster_hosts, zookeeper) -> list[ApiHost]: + service_host_ids = [ + h.host_id + for h in get_service_hosts( + api_client=cm_api_client, + service=zookeeper, + ) + ] - return { - **conn, - "verify_tls": "no", - "debug": "no", - } + return [h for h in cluster_hosts if h.host_id not in service_host_ids] def test_missing_required(conn, module_args): - module_args(conn) + module_args( + { + **conn, + } + ) with pytest.raises(AnsibleFailJson, match="cluster"): service_info.main() def test_missing_cluster(conn, module_args): - conn.update(service="example") - module_args(conn) + module_args( + { + **conn, + "service": "example", + } + ) with pytest.raises(AnsibleFailJson, match="cluster"): service_info.main() -def test_invalid_service(conn, module_args): +def test_invalid_cluster(conn, module_args): module_args( { **conn, - "cluster": os.getenv("CM_CLUSTER"), - "service": "BOOM", + "cluster": "invalid", + "service": "example", } ) - with pytest.raises(AnsibleExitJson) as e: + with pytest.raises(AnsibleFailJson, match="Cluster does not exist") as e: service_info.main() - assert len(e.value.services) == 0 - -def test_invalid_cluster(conn, module_args): +def test_invalid_service(conn, module_args, base_cluster): module_args( { **conn, - "cluster": "BOOM", - "service": os.getenv("CM_SERVICE"), + "cluster": base_cluster.name, + "service": "not_found", } ) @@ -99,30 +152,59 @@ def test_invalid_cluster(conn, module_args): assert len(e.value.services) == 0 -def test_view_all_services(conn, module_args): +def test_all_services( + conn, + module_args, + request, + base_cluster, + zookeeper, + available_hosts, + service_factory, +): + id = Path(request.node.name) + + # Add an additional ZooKeeper service + zookeeper_two = service_factory( + cluster=base_cluster, + service=ApiService( + name=f"test-zk-{id}", + type="ZOOKEEPER", + display_name=f"ZooKeeper ({id})", + # Add a SERVER role (so we can start the service -- a ZK requirement!) + roles=[ + ApiRole(type="SERVER", host_ref=ApiHostRef(available_hosts[0].host_id)) + ], + ), + ) + module_args( { **conn, - "cluster": os.getenv("CM_CLUSTER"), + "cluster": zookeeper.cluster_ref.cluster_name, } ) with pytest.raises(AnsibleExitJson) as e: service_info.main() - assert len(e.value.services) > 0 + assert len(e.value.services) == 3 # 2 ZK and 1 core settings + service_names = [s["name"] for s in e.value.services] + assert zookeeper.name in service_names + assert zookeeper_two.name in service_names -def test_view_single_service(conn, module_args): +def test_named_service(conn, module_args, zookeeper): module_args( { **conn, - "cluster": os.getenv("CM_CLUSTER"), - "service": os.getenv("CM_SERVICE"), + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, } ) with pytest.raises(AnsibleExitJson) as e: service_info.main() - assert len(e.value.services) == 1 + assert len(e.value.services) == 1 # Single named ZK + service_names = [s["name"] for s in e.value.services] + assert zookeeper.name in service_names diff --git a/tests/unit/plugins/modules/service_role/test_service_role.py b/tests/unit/plugins/modules/service_role/test_service_role.py index 5fa7dc15..1a697ecb 100644 --- a/tests/unit/plugins/modules/service_role/test_service_role.py +++ b/tests/unit/plugins/modules/service_role/test_service_role.py @@ -39,8 +39,6 @@ RoleCommandsResourceApi, ) -from ansible.module_utils.common.dict_transformations import recursive_diff - from ansible_collections.cloudera.cluster.plugins.modules import service_role from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( wait_bulk_commands,