diff --git a/plugins/module_utils/cluster_utils.py b/plugins/module_utils/cluster_utils.py index bf3cae1f..e11512c2 100644 --- a/plugins/module_utils/cluster_utils.py +++ b/plugins/module_utils/cluster_utils.py @@ -20,7 +20,12 @@ normalize_output, ) -from cm_client import ApiCluster +from cm_client import ( + ApiClient, + ApiCluster, + ApiHost, + ClustersResourceApi, +) CLUSTER_OUTPUT = [ @@ -44,3 +49,13 @@ def parse_cluster_result(cluster: ApiCluster) -> dict: output = dict(version=cluster.full_version) output.update(normalize_output(cluster.to_dict(), CLUSTER_OUTPUT)) return output + + +def get_cluster_hosts(api_client: ApiClient, cluster: ApiCluster) -> list[ApiHost]: + return ( + ClustersResourceApi(api_client) + .list_hosts( + cluster_name=cluster.name, + ) + .items + ) diff --git a/plugins/module_utils/cm_utils.py b/plugins/module_utils/cm_utils.py index ae0ece39..e56f40eb 100644 --- a/plugins/module_utils/cm_utils.py +++ b/plugins/module_utils/cm_utils.py @@ -32,10 +32,13 @@ from ansible.module_utils.common.text.converters import to_native, to_text from time import sleep from cm_client import ( + ApiBulkCommandList, ApiClient, ApiCommand, + ApiCommandList, ApiConfig, ApiConfigList, + ApiEntityTag, Configuration, ) from cm_client.rest import ApiException, RESTClientObject @@ -47,6 +50,43 @@ __maintainer__ = ["wmudge@cloudera.com"] +def wait_bulk_commands( + api_client: ApiClient, + commands: ApiBulkCommandList, + polling: int = 120, + delay: int = 10, +): + if commands.errors: + error_msg = "\n".join(commands.errors) + raise Exception(error_msg) + + for cmd in commands.items: + # Serial monitoring + wait_command(api_client, cmd, polling, delay) + + +def wait_commands( + api_client: ApiClient, commands: ApiCommandList, polling: int = 120, delay: int = 10 +): + for cmd in commands.items: + # Serial monitoring + wait_command(api_client, cmd, polling, delay) + + +def wait_command( + api_client: ApiClient, command: ApiCommand, polling: int = 120, delay: int = 10 +): + poll_count = 0 + while command.active: + if poll_count > polling: + raise Exception("Command timeout: " + str(command.id)) + sleep(delay) + poll_count += 1 + command = CommandsResourceApi(api_client).read_command(command.id) + if not command.success: + raise Exception(command.result_message) + + def normalize_output(entity: dict, filter: list) -> dict: output = {} for k in filter: @@ -131,8 +171,8 @@ def resolve_tag_updates( incoming_tags = { k: str(v) for k, v in incoming.items() - if (isinstance(v, str) and v.strip() != "") - or (not isinstance(v, str) and v is not None) + if (type(v) is str and v.strip() != "") + or (type(v) is not str and v is not None) } delta_add = {} @@ -151,6 +191,29 @@ def resolve_tag_updates( return (delta_add, delta_del) +class TagUpdates(object): + def __init__( + self, existing: list[ApiEntityTag], updates: dict, purge: bool + ) -> None: + (_additions, _deletions) = resolve_tag_updates( + current={t.name: t.value for t in existing}, + incoming=updates, + purge=purge, + ) + + self.diff = dict( + before=_deletions, + after=_additions, + ) + + self.additions = [ApiEntityTag(k, v) for k, v in _additions.items()] + self.deletions = [ApiEntityTag(k, v) for k, v in _deletions.items()] + + @property + def changed(self) -> bool: + return bool(self.additions) or bool(self.deletions) + + class ConfigListUpdates(object): def __init__(self, existing: ApiConfigList, updates: dict, purge: bool) -> None: current = {r.name: r.value for r in existing.items} @@ -507,7 +570,7 @@ def wait_command(self, command: ApiCommand, polling: int = 10, delay: int = 5): If the command exceeds the polling limit, it fails with a timeout error. If the command completes unsuccessfully, it fails with the command's result message. - Inputs: + Args: command (ApiCommand): The command object to monitor. polling (int, optional): The maximum number of polling attempts before timing out. Default is 10. delay (int, optional): The time (in seconds) to wait between polling attempts. Default is 5. @@ -515,7 +578,7 @@ def wait_command(self, command: ApiCommand, polling: int = 10, delay: int = 5): Raises: module.fail_json: If the command times out or fails. - Return: + Returns: None: The function returns successfully if the command completes and is marked as successful. """ poll_count = 0 @@ -530,6 +593,31 @@ def wait_command(self, command: ApiCommand, polling: int = 10, delay: int = 5): msg=to_text(command.result_message), command_id=to_text(command.id) ) + def wait_commands( + self, commands: ApiBulkCommandList, polling: int = 10, delay: int = 5 + ): + """ + Waits for a list of commands to complete, polling each status at regular intervals. + If a command exceeds the polling limit, it fails with a timeout error. + If a command completes unsuccessfully, it fails with the command's result message. + + Args: + commands (ApiBulkCommandList): Cloudera Manager bulk commands + + Raises: + module.fail_json: If the command times out or fails. + + Returns: + none: The function returns successfully if the commands complete and are marked as successful. + """ + if commands.errors: + error_msg = "\n".join(commands.errors) + self.module.fail_json(msg=error_msg) + + for c in commands.items: + # Not in parallel... + self.wait_command(c, polling, delay) + @staticmethod def ansible_module_internal(argument_spec={}, required_together=[], **kwargs): """ diff --git a/plugins/module_utils/host_utils.py b/plugins/module_utils/host_utils.py index 645cf466..293a17b5 100644 --- a/plugins/module_utils/host_utils.py +++ b/plugins/module_utils/host_utils.py @@ -28,6 +28,19 @@ def get_host( api_client: ApiClient, hostname: str = None, host_id: str = None ) -> ApiHost: + """Retrieve a Host by either hostname or host ID. + + Args: + api_client (ApiClient): Cloudera Manager API client. + hostname (str, optional): The cluster hostname. Defaults to None. + host_id (str, optional): The cluster host ID. Defaults to None. + + Raises: + ex: ApiException for all non-404 errors. + + Returns: + ApiHost: Host object. If not found, returns None. + """ if hostname: return next( ( @@ -50,7 +63,21 @@ def get_host( def get_host_ref( api_client: ApiClient, hostname: str = None, host_id: str = None ) -> ApiHostRef: + """Retrieve a Host Reference by either hostname or host ID. + + Args: + api_client (ApiClient): Cloudera Manager API client. + hostname (str, optional): The cluster hostname. Defaults to None. + host_id (str, optional): The cluster host ID. Defaults to None. + + Raises: + ex: ApiException for all non-404 errors. + + Returns: + ApiHostRef: Host reference object. If not found, returns None. + """ host = get_host(api_client, hostname, host_id) + if host is not None: return ApiHostRef(host.host_id, host.hostname) else: diff --git a/plugins/module_utils/role_utils.py b/plugins/module_utils/role_utils.py index 7746bda4..07de2872 100644 --- a/plugins/module_utils/role_utils.py +++ b/plugins/module_utils/role_utils.py @@ -12,8 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( normalize_output, + wait_commands, + wait_bulk_commands, ) from ansible_collections.cloudera.cluster.plugins.module_utils.host_utils import ( get_host_ref, @@ -23,14 +28,25 @@ ApiClient, ApiConfig, ApiConfigList, + ApiEntityTag, ApiRoleList, ApiRoleConfigGroupRef, + ApiRoleNameList, + ApiRoleState, + RoleCommandsResourceApi, RoleConfigGroupsResourceApi, RolesResourceApi, MgmtRolesResourceApi, ) from cm_client import ApiRole + +class RoleException(Exception): + """General Exception type for Role management.""" + + pass + + ROLE_OUTPUT = [ "commission_state", "config_staleness_status", @@ -51,9 +67,10 @@ def parse_role_result(role: ApiRole) -> dict: - # Retrieve only the host_id, role_config_group, and service identifiers + # Retrieve only the host_id, hostname, role_config_group, and service identifiers output = dict( host_id=role.host_ref.host_id, + hostname=role.host_ref.hostname, role_config_group_name=role.role_config_group_ref.role_config_group_name, service_name=role.service_ref.service_name, ) @@ -70,11 +87,25 @@ def get_mgmt_roles(api_client: ApiClient, role_type: str) -> ApiRoleList: def read_role( - api_client: ApiClient, cluster_name: str, service_name: str, name: str + api_client: ApiClient, cluster_name: str, service_name: str, role_name: str ) -> ApiRole: + """Read a role for a cluster service and populates the role configuration. + + Args: + api_client (ApiClient): Cloudera Manager API client + cluster_name (str): Cluster name (identifier). + service_name (str): Service name (identifier). + role_name (str): Role name (identifier). + + Raises: + ApiException: + + Returns: + ApiRole: The Role object or None if the role is not found. + """ role_api = RolesResourceApi(api_client) role = role_api.read_role( - cluster_name=cluster_name, service_name=service_name, role_name=name + cluster_name=cluster_name, service_name=service_name, role_name=role_name ) if role is not None: role.config = role_api.read_role_config( @@ -84,17 +115,65 @@ def read_role( def read_roles( - api_client: ApiClient, cluster_name: str, service_name: str + api_client: ApiClient, + cluster_name: str, + service_name: str, + type: str = None, + hostname: str = None, + host_id: str = None, + view: str = None, ) -> ApiRoleList: + """Read roles for a cluster service. Optionally, filter by type, hostname, host ID. + + Args: + api_client (ApiClient): Cloudera Manager API client + cluster_name (str): Cluster name (identifier) + service_name (str): Service name (identifier) + type (str, optional): Role type. Defaults to None. + hostname (str, optional): Cluster hostname. Defaults to None. + host_id (str, optional): Cluster host ID. Defaults to None. + view (str, optional): View to retrieve. Defaults to None. + + Raises: + ApiException: + + Returns: + ApiRoleList: List of Role objects + """ role_api = RolesResourceApi(api_client) - roles = role_api.read_roles(cluster_name, service_name).items + + payload = dict( + cluster_name=cluster_name, + service_name=service_name, + ) + + if view is not None: + payload.update(view=view) + + filter = ";".join( + [ + f"{f[0]}=={f[1]}" + for f in [ + ("type", type), + ("hostname", hostname), + ("hostId", host_id), + ] + if f[1] is not None + ] + ) + + if filter != "": + payload.update(filter=filter) + + roles = role_api.read_roles(**payload).items + + # Remove filter from core payload + payload.pop("filter", None) + for r in roles: - r.config = role_api.read_role_config( - api_client=api_client, - cluster_name=cluster_name, - service_name=service_name, - role_name=r.name, - ) + payload.update(role_name=r.name) + r.config = role_api.read_role_config(**payload) + return ApiRoleList(items=roles) @@ -109,7 +188,6 @@ def read_roles_by_type( ] for r in roles: r.config = role_api.read_role_config( - api_client=api_client, cluster_name=cluster_name, service_name=service_name, role_name=r.name, @@ -136,31 +214,28 @@ def read_cm_roles(api_client: ApiClient) -> ApiRoleList: return ApiRoleList(items=roles) -class HostNotFoundException(Exception): +class HostNotFoundException(RoleException): pass -class RoleConfigGroupNotFoundException(Exception): +class RoleConfigGroupNotFoundException(RoleException): pass def create_role( api_client: ApiClient, role_type: str, - hostname: str, - host_id: str, - name: str = None, + hostname: str = None, + host_id: str = None, config: dict = None, cluster_name: str = None, service_name: str = None, role_config_group: str = None, + tags: dict = None, ) -> ApiRole: - # Set up the role - role = ApiRole(type=str(role_type).upper()) - # Name - if name: - role.name = name # No name allows auto-generation + # Set up the role type + role = ApiRole(type=str(role_type).upper()) # Host assignment host_ref = get_host_ref(api_client, hostname, host_id) @@ -192,4 +267,111 @@ def create_role( items=[ApiConfig(name=k, value=v) for k, v in config.items()] ) + # Tags + if tags: + role.tags = [ApiEntityTag(k, v) for k, v in tags.items()] + return role + + +def provision_service_role( + api_client: ApiClient, cluster_name: str, service_name: str, role: ApiRole +) -> ApiRole: + role_api = RolesResourceApi(api_client) + + provisioned_role = next( + ( + iter( + role_api.create_roles( + cluster_name=cluster_name, + service_name=service_name, + body=ApiRoleList(items=[role]), + ).items + ) + ), + None, + ) + + if provisioned_role is None: + return + + # Wait for any running commands like Initialize + available_cmds = role_api.list_commands( + cluster_name=cluster_name, + service_name=service_name, + role_name=provisioned_role.name, + ) + + running_cmds = role_api.list_active_commands( + cluster_name=cluster_name, + service_name=service_name, + role_name=provisioned_role.name, + ) + + try: + wait_commands(api_client=api_client, commands=running_cmds) + return provisioned_role + except Exception as e: + raise RoleException(str(e)) + + +class MaintenanceStateException(RoleException): + pass + + +def toggle_role_maintenance( + api_client: ApiClient, role: ApiRole, maintenance: bool, check_mode: bool +) -> bool: + role_api = RolesResourceApi(api_client) + changed = False + + if maintenance and not role.maintenance_mode: + changed = True + cmd = role_api.enter_maintenance_mode + elif not maintenance and role.maintenance_mode: + changed = True + cmd = role_api.exit_maintenance_mode + + if not check_mode and changed: + maintenance_cmd = cmd( + cluster_name=role.service_ref.cluster_name, + service_name=role.service_ref.service_name, + role_name=role.name, + ) + + if maintenance_cmd.success is False: + raise MaintenanceStateException( + f"Unable to set Maintenance mode to '{maintenance}': {maintenance_cmd.result_message}" + ) + + return changed + + +def toggle_role_state( + api_client: ApiClient, role: ApiRole, state: str, check_mode: bool +) -> ApiRoleState: + role_cmd_api = RoleCommandsResourceApi(api_client) + changed = None + + if state == "started" and role.role_state not in [ApiRoleState.STARTED]: + changed = ApiRoleState.STARTED + cmd = role_cmd_api.start_command + elif state == "stopped" and role.role_state not in [ + ApiRoleState.STOPPED, + ApiRoleState.NA, + ]: + changed = ApiRoleState.STOPPED + cmd = role_cmd_api.stop_command + elif state == "restarted": + changed = ApiRoleState.STARTED + cmd = role_cmd_api.restart_command + + if not check_mode and changed: + exec_cmds = cmd( + cluster_name=role.service_ref.cluster_name, + service_name=role.service_ref.service_name, + body=ApiRoleNameList(items=[role.name]), + ) + wait_bulk_commands(api_client=api_client, commands=exec_cmds) + + return changed diff --git a/plugins/module_utils/service_utils.py b/plugins/module_utils/service_utils.py index 53e405d7..1a920f68 100644 --- a/plugins/module_utils/service_utils.py +++ b/plugins/module_utils/service_utils.py @@ -32,6 +32,7 @@ ApiConfig, ApiService, ApiServiceConfig, + ClustersResourceApi, MgmtServiceResourceApi, MgmtRoleConfigGroupsResourceApi, MgmtRolesResourceApi, @@ -187,3 +188,11 @@ def __init__(self, existing: ApiServiceConfig, updates: dict, purge: bool) -> No @property def changed(self) -> bool: return bool(self.config.items) + + +def get_service_hosts(api_client: ApiClient, service: ApiService): + return ( + ClustersResourceApi(api_client) + .list_hosts(cluster_name=service.cluster_ref.cluster_name) + .items + ) diff --git a/plugins/modules/cm_service_role.py b/plugins/modules/cm_service_role.py index 69feda02..bb3c98d6 100644 --- a/plugins/modules/cm_service_role.py +++ b/plugins/modules/cm_service_role.py @@ -492,7 +492,7 @@ def exec_role_command( self.diff["after"].update(role_state=value) if not self.module.check_mode: - self.handle_commands(cmd(body=ApiRoleNameList(items=[role.name]))) + self.wait_commands(cmd(body=ApiRoleNameList(items=[role.name]))) def handle_maintenance(self, role_api: MgmtRolesResourceApi, role: ApiRole) -> None: if self.maintenance is not None and self.maintenance != role.maintenance_mode: @@ -581,15 +581,6 @@ def deprovision_role(self, role_api: MgmtRolesResourceApi, role: ApiRole) -> Non if not self.module.check_mode: role_api.delete_role(role.name) - def handle_commands(self, commands: ApiBulkCommandList): - if commands.errors: - error_msg = "\n".join(commands.errors) - self.module.fail_json(msg=error_msg) - - for c in commands.items: - # Not in parallel, but should only be a single command - self.wait_command(c) - def main(): module = ClouderaManagerMutableModule.ansible_module( diff --git a/plugins/modules/service_role.py b/plugins/modules/service_role.py index d92ca533..953cd72b 100644 --- a/plugins/modules/service_role.py +++ b/plugins/modules/service_role.py @@ -1,6 +1,7 @@ +#!/usr/bin/python # -*- coding: utf-8 -*- -# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,45 +15,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - ClouderaManagerMutableModule, - resolve_tag_updates, -) -from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( - parse_role_result, -) - -from cm_client import ( - ApiEntityTag, - ApiHostRef, - ApiRole, - ApiRoleList, - ApiRoleNameList, - ClustersResourceApi, - HostsResourceApi, - RoleCommandsResourceApi, - RolesResourceApi, - ServicesResourceApi, -) -from cm_client.rest import ApiException - - -ANSIBLE_METADATA = { - "metadata_version": "1.1", - "status": ["preview"], - "supported_by": "community", -} - DOCUMENTATION = r""" ---- module: service_role short_description: Manage a service role in cluster description: - Manage a service role in a cluster. author: - "Webster Mudge (@wmudge)" -requirements: - - cm-client options: cluster: description: @@ -68,18 +37,27 @@ required: yes aliases: - service_name - role: + name: description: - - The role name. - - If not specified, the role name will be auto-generated on creation. + - The role name, i.e. the auto-generated identifier. + - Either O(name) or O(type) must be provided. type: str aliases: - role_name - - name + - role + type: + description: + - A role type for the role. + - Either O(name) or O(type) must be provided. + - Required to provision a new role. + type: str + aliases: + - role_type cluster_hostname: description: - The hostname of a cluster instance for the role. - Mutually exclusive with I(cluster_host_id). + - Either O(cluster_host_id) or O(cluster_hostname) must be provided if O(type) is present. type: str aliases: - cluster_host @@ -87,35 +65,42 @@ description: - The host ID of a cluster instance for the role. - Mutually exclusive with I(cluster_hostname). + - Either O(cluster_host_id) or O(cluster_hostname) must be provided if O(type) is present. type: str - type: - description: - - A role type for the role. - - Required if the I(state) creates a new role. - type: str - aliases: - - role_type maintenance: description: - Flag for whether the role should be in maintenance mode. type: bool aliases: - maintenance_mode + config: + description: + - The role configuration overrides to set. + - To unset a parameter, use V(None) as the value. + type: dict + aliases: + - params + - parameters tags: description: - A set of tags applied to the role. - - To unset a tag, use C(None) as its value. + - To unset a tag, use V(None) as its value. type: dict + role_config_group: + description: + - The role configuration group name to assign to the role. + - To assign the I(base) role configuration group, i.e. the default, set O(role_config_group=None). + type: str purge: description: - - Flag for whether the declared role tags should append or overwrite any existing tags. - - To clear all tags, set I(tags={}), i.e. an empty dictionary, and I(purge=True). + - Flag for whether the declared role configuration overrides and tags should append or overwrite any existing entries. + - To clear all configuration overrides or tags, set O(config={}) or O(tags={}), i.e. an empty dictionary, respectively, and set O(purge=True). type: bool default: False state: description: - The state of the role. - - Note, if the declared state is invalid for the role, for example, the role is a C(HDFS GATEWAY), the module will return an error. + - Note, if the declared state is invalid for the role type, for example, C(HDFS GATEWAY), the module will return an error. type: str default: present choices: @@ -125,9 +110,9 @@ - started - stopped extends_documentation_fragment: - - ansible.builtin.action_common_attributes - cloudera.cluster.cm_options - cloudera.cluster.cm_endpoint + - cloudera.cluster.message attributes: check_mode: support: full @@ -135,21 +120,14 @@ support: full platform: platforms: all +requirements: + - cm-client +seealso: + - module: cloudera.cluster.service_role_info """ EXAMPLES = r""" ---- -- name: Establish a service role (auto-generated name) - cloudera.cluster.service_role: - host: example.cloudera.com - username: "jane_smith" - password: "S&peR4Ec*re" - cluster: example-cluster - service: example-hdfs - type: GATEWAY - cluster_hostname: worker-01.cloudera.internal - -- name: Establish a service role (defined name) +- name: Provision a service role cloudera.cluster.service_role: host: example.cloudera.com username: "jane_smith" @@ -157,17 +135,16 @@ cluster: example-cluster service: example-hdfs type: GATEWAY - name: example-gateway cluster_hostname: worker-01.cloudera.internal -- name: Set a service role to maintenance mode +- name: Set a service role to maintenance mode (using role name) cloudera.cluster.service_role: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" cluster: example-cluster service: example-hdfs - name: example-gateway + name: example-GATEWAY maintenance: yes - name: Update (append) tags to a service role @@ -177,7 +154,8 @@ password: "S&peR4Ec*re" cluster: example-cluster service: example-hdfs - name: example-gateway + type: GATEWAY + cluster_hostname: worker-01.cloudera.internal tags: tag_one: value_one tag_two: value_two @@ -188,7 +166,8 @@ username: "jane_smith" cluster: example-cluster service: example-hdfs - name: example-gateway + type: GATEWAY + cluster_hostname: worker-01.cloudera.internal tags: tag_three: value_three purge: yes @@ -200,7 +179,8 @@ password: "S&peR4Ec*re" cluster: example-cluster service: example-hdfs - name: example-gateway + type: GATEWAY + cluster_hostname: worker-01.cloudera.internal tags: {} purge: yes @@ -211,7 +191,8 @@ password: "S&peR4Ec*re" cluster: example-cluster service: example-hdfs - name: example-gateway + type: GATEWAY + cluster_hostname: worker-01.cloudera.internal state: started - name: Force a restart to a service role @@ -221,19 +202,10 @@ password: "S&peR4Ec*re" cluster: example-cluster service: example-hdfs - name: example-gateway + type: GATEWAY + cluster_hostname: worker-01.cloudera.internal state: restarted -- name: Start a service role - cloudera.cluster.service_role: - host: example.cloudera.com - username: "jane_smith" - password: "S&peR4Ec*re" - cluster: example-cluster - service: example-hdfs - name: example-gateway - state: started - - name: Remove a service role cloudera.cluster.service_role: host: example.cloudera.com @@ -241,15 +213,15 @@ password: "S&peR4Ec*re" cluster: example-cluster service: example-hdfs - name: example-gateway + type: GATEWAY state: absent """ RETURN = r""" ---- role: description: Details about the service role. type: dict + returned: always contains: name: description: The cluster service role name. @@ -356,6 +328,10 @@ description: The name of the cluster service role config group, which uniquely identifies it in a Cloudera Manager installation. type: str returned: when supported + config: + description: Set of role configurations for the cluster service role. + type: dict + returned: when supported tags: description: The dictionary of tags for the cluster service role. type: dict @@ -368,6 +344,39 @@ returned: when supported """ +from cm_client import ( + ApiRole, + ApiRoleNameList, + ClustersResourceApi, + RoleConfigGroupsResourceApi, + RolesResourceApi, + ServicesResourceApi, +) +from cm_client.rest import ApiException + +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerMutableModule, + ConfigListUpdates, + TagUpdates, +) + +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + get_base_role_config_group, +) + +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + create_role, + parse_role_result, + provision_service_role, + read_role, + read_roles, + toggle_role_maintenance, + toggle_role_state, + RoleException, +) + class ClusterServiceRole(ClouderaManagerMutableModule): def __init__(self, module): @@ -376,10 +385,12 @@ def __init__(self, module): # Set the parameters self.cluster = self.get_param("cluster") self.service = self.get_param("service") - self.role = self.get_param("role") + self.name = self.get_param("name") self.cluster_hostname = self.get_param("cluster_hostname") self.cluster_host_id = self.get_param("cluster_host_id") self.maintenance = self.get_param("maintenance") + self.config = self.get_param("config") + self.role_config_group = self.get_param("role_config_group") self.tags = self.get_param("tags") self.type = self.get_param("type") self.state = self.get_param("state") @@ -395,6 +406,13 @@ def __init__(self, module): @ClouderaManagerMutableModule.handle_process def process(self): + if self.type: + if not self.cluster_hostname and not self.cluster_host_id: + self.module.fail_json( + msg="one of the following is required: %s" + % ", ".join(["cluster_hostname", "cluster_host_id"]) + ) + try: ClustersResourceApi(self.api_client).read_cluster(self.cluster) except ApiException as ex: @@ -413,310 +431,256 @@ def process(self): else: raise ex - api_instance = RolesResourceApi(self.api_client) - existing = None + role_api = RolesResourceApi(self.api_client) + current = None - if self.role: + # If given the role identifier, get it or fail (is a read-only variable) + if self.name: try: - existing = api_instance.read_role(self.cluster, self.role, self.service) + current = read_role( + api_client=self.api_client, + cluster_name=self.cluster, + service_name=self.service, + role_name=self.name, + ) except ApiException as ex: if ex.status != 404: raise ex + else: + return + # Else look up the role by type and host + else: + current = next( + iter( + read_roles( + api_client=self.api_client, + cluster_name=self.cluster, + service_name=self.service, + type=self.type, + hostname=self.cluster_hostname, + host_id=self.cluster_host_id, + ).items + ), + None, + ) if self.state == "absent": - if existing: + if current: self.changed = True + + if self.module._diff: + self.diff = dict(before=parse_role_result(current), after=dict()) + if not self.module.check_mode: - api_instance.delete_role(self.cluster, self.role, self.service) + role_api.delete_role(self.cluster, self.name, self.service) elif self.state in ["present", "restarted", "started", "stopped"]: + # If it is a new role + if not current: + self.changed = True - if existing: - if self.type and self.type != existing.type: - # Destroy and rebuild - self.changed = True + try: + role = create_role( + api_client=self.api_client, + cluster_name=self.cluster, + service_name=self.service, + role_type=self.type, + hostname=self.cluster_hostname, + host_id=self.cluster_host_id, + config=self.config, + role_config_group=self.role_config_group, + tags=self.tags, + ) + except RoleException as ex: + self.module.fail_json(msg=to_native(ex)) - if not self.module.check_mode: - api_instance.delete_role(self.cluster, self.role, self.service) - self.cluster_host_id = existing.host_ref.host_id - self.cluster_hostname = existing.host_ref.hostname - self.create_role(api_instance) - else: - # Update existing + if self.module._diff: + self.diff = dict( + before={}, + after=role.to_dict(), + ) + + if not self.module.check_mode: + current = provision_service_role( + api_client=self.api_client, + cluster_name=self.cluster, + service_name=self.service, + role=role, + ) + + if not current: + self.module.fail_json( + msg="Unable to create new role", + role=to_native(role.to_dict()), + ) + + self.handle_maintenance(current) + + # Else it exists, so address any changes + else: + self.handle_maintenance(current) + + # Handle role override configurations + if self.config or self.purge: + if self.config is None: + self.config = dict() + + config_updates = ConfigListUpdates( + current.config, self.config, self.purge + ) - # Maintenance - if ( - self.maintenance is not None - and self.maintenance != existing.maintenance_mode - ): + if config_updates.changed: self.changed = True if self.module._diff: self.diff["before"].update( - maintenance_mode=existing.maintenance_mode + config=config_updates.diff["before"] + ) + self.diff["after"].update( + config=config_updates.diff["after"] ) - self.diff["after"].update(maintenance_mode=self.maintenance) if not self.module.check_mode: - if self.maintenance: - maintenance_cmd = api_instance.enter_maintenance_mode( - self.cluster, self.role, self.service - ) - else: - maintenance_cmd = api_instance.exit_maintenance_mode( - self.cluster, self.role, self.service - ) - - if maintenance_cmd.success is False: - self.module.fail_json( - msg=f"Unable to set Maintenance mode to '{self.maintenance}': {maintenance_cmd.result_message}" - ) + role_api.update_role_config( + cluster_name=self.cluster, + service_name=self.service, + role_name=current.name, + message=self.message, + body=config_updates.config, + ) - # Tags - if self.tags: - (delta_add, delta_del) = resolve_tag_updates( - {t.name: t.value for t in existing.tags}, - self.tags, - self.purge, + # Handle role config group + if ( + self.role_config_group is None + or self.role_config_group + != current.role_config_group_ref.role_config_group_name + ): + # If None, move to the base role config group + if self.role_config_group is None: + base_rcg = get_base_role_config_group( + api_client=self.api_client, + cluster_name=self.cluster, + service_name=self.service, + role_type=current.type, ) - if delta_add or delta_del: + if ( + current.role_config_group_ref.role_config_group_name + != base_rcg.name + ): self.changed = True if self.module._diff: - self.diff["before"].update(tags=delta_del) - self.diff["after"].update(tags=delta_add) + self.diff["before"].update( + role_config_group=current.role_config_group_ref.role_config_group_name + ) + self.diff["after"].update(role_config_group=None) if not self.module.check_mode: - if delta_del: - api_instance.delete_tags( - self.cluster, - self.role, - self.service, - body=[ - ApiEntityTag(k, v) - for k, v in delta_del.items() - ], - ) - if delta_add: - api_instance.add_tags( - self.cluster, - self.role, - self.service, - body=[ - ApiEntityTag(k, v) - for k, v in delta_add.items() - ], - ) - - # TODO Config - - if self.state == "started" and existing.role_state != "STARTED": + RoleConfigGroupsResourceApi( + self.api_client + ).move_roles_to_base_group( + cluster_name=self.cluster, + service_name=self.service, + body=ApiRoleNameList(items=[current.name]), + ) + # Otherwise, move to the given role config group + else: self.changed = True - if self.module._diff: - self.diff["before"].update(role_state=existing.role_state) - self.diff["after"].update(role_state="STARTED") + self.diff["before"].update( + role_config_group=current.role_config_group_ref.role_config_group_name + ) + self.diff["after"].update( + role_config_group=self.role_config_group + ) if not self.module.check_mode: - self.start_role(self.role) - - elif self.state == "stopped" and existing.role_state not in [ - "STOPPED", - "NA", - ]: - self.changed = True + RoleConfigGroupsResourceApi(self.api_client).move_roles( + cluster_name=self.cluster, + service_name=self.service, + role_config_group_name=self.role_config_group, + body=ApiRoleNameList(items=[current.name]), + ) - if self.module._diff: - self.diff["before"].update(role_state=existing.role_state) - self.diff["after"].update(role_state="STOPPED") + # Handle tags + if self.tags or self.purge: + if self.tags is None: + self.tags = dict() - if not self.module.check_mode: - self.stop_role(self.role) + tag_updates = TagUpdates(current.tags, self.tags, self.purge) - elif self.state == "restarted": + if tag_updates.changed: self.changed = True if self.module._diff: - self.diff["before"].update(role_state=existing.role_state) - self.diff["after"].update(role_state="STARTED") + self.diff["before"].update(tags=tag_updates.diff["before"]) + self.diff["after"].update(tags=tag_updates.diff["after"]) if not self.module.check_mode: - restart_cmds = RoleCommandsResourceApi( - self.api_client - ).restart_command( - self.cluster, - self.service, - body=ApiRoleNameList(items=[self.role]), - ) - - if restart_cmds.errors: - error_msg = "\n".join(restart_cmds.errors) - self.module.fail_json(msg=error_msg) - - for c in restart_cmds.items: - # Not in parallel, but should only be a single command - self.wait_command(c) - - if self.changed: - self.output = parse_role_result( - api_instance.read_role( - self.cluster, self.role, self.service, view="full" - ) - ) - else: - self.output = parse_role_result(existing) - else: - # Role doesn't exist - self.create_role(api_instance) - - else: - self.module.fail_json(msg=f"Invalid state: {self.state}") - - def create_role(self, api_instance): - missing_params = [] - - if self.type is None: - missing_params.append("type") - - if self.cluster_hostname is None and self.cluster_host_id is None: - missing_params += ["cluster_hostname", "cluster_host_id"] - - if missing_params: - self.module.fail_json( - msg=f"Role does not exist, missing required arguments: {', '.join(sorted(missing_params)) }" - ) - - payload = ApiRole(type=str(self.type).upper()) - - # Name - if self.role: - payload.name = self.role - - # Host assignment - if self.cluster_host_id is None or self.cluster_hostname is None: - host = None - - if self.cluster_hostname: - host = next( - ( - h - for h in HostsResourceApi(self.api_client).read_hosts().items - if h.hostname == self.cluster_hostname - ), - None, - ) - else: - try: - host = HostsResourceApi(self.api_client).read_host( - self.cluster_host_id - ) - except ApiException as ex: - if ex.status != 404: - raise ex - - if host is None: - self.module.fail_json(msg="Invalid host reference") - - payload.host_ref = ApiHostRef(host.host_id, host.hostname) - else: - payload.host_ref = ApiHostRef(self.cluster_host_id, self.cluster_hostname) - - # Tags - if self.tags: - payload.tags = [ApiEntityTag(k, v) for k, v in self.tags.items()] - - # TODO Config - - self.changed = True + if tag_updates.deletions: + role_api.delete_tags( + self.cluster, + self.name, + self.service, + body=tag_updates.deletions, + ) - if self.module._diff: - self.diff = dict( - before={}, - after=payload.to_dict(), - ) + if tag_updates.additions: + role_api.add_tags( + self.cluster, + self.name, + self.service, + body=tag_updates.additions, + ) - if not self.module.check_mode: - created_role = next( - ( - iter( - api_instance.create_roles( - self.cluster, - self.service, - body=ApiRoleList([payload]), - ).items - ) - ), - {}, + # Handle state changes + state_changed = toggle_role_state( + api_client=self.api_client, + role=current, + state=self.state, + check_mode=self.module.check_mode, ) - refresh = False - - # Maintenance - if self.maintenance: - refresh = True - + if state_changed is not None: + self.changed = True if self.module._diff: - self.diff["after"].update(maintenance_mode=True) - - maintenance_cmd = api_instance.enter_maintenance_mode( - self.cluster, created_role.name, self.service - ) - - if maintenance_cmd.success is False: - self.module.fail_json( - msg=f"Unable to set Maintenance mode to '{self.maintenance}': {maintenance_cmd.result_message}" - ) - - if self.state in ["started", "restarted"]: - refresh = True - self.start_role(created_role.name) + self.diff["before"].update(role_state=current.role_state) + self.diff["after"].update(role_state=state_changed) - elif self.state == "stopped": - refresh = True - self.stop_role(created_role.name) - - if refresh: + # If there are changes, get a fresh read + if self.changed: self.output = parse_role_result( - api_instance.read_role( - self.cluster, - created_role.name, - self.service, - view="full", + read_role( + api_client=self.api_client, + cluster_name=self.cluster, + service_name=self.service, + role_name=current.name, ) ) else: - self.output = parse_role_result(created_role) - - def start_role(self, role_name: str): - start_cmds = RoleCommandsResourceApi(self.api_client).start_command( - self.cluster, - self.service, - body=ApiRoleNameList(items=[role_name]), - ) - - if start_cmds.errors: - error_msg = "\n".join(start_cmds.errors) - self.module.fail_json(msg=error_msg) + self.output = parse_role_result(current) - for c in start_cmds.items: - # Not in parallel, but should only be a single command - self.wait_command(c) - - def stop_role(self, role_name: str): - stop_cmds = RoleCommandsResourceApi(self.api_client).stop_command( - self.cluster, - self.service, - body=ApiRoleNameList(items=[role_name]), - ) + else: + self.module.fail_json(msg=f"Invalid state: {self.state}") - if stop_cmds.errors: - error_msg = "\n".join(stop_cmds.errors) - self.module.fail_json(msg=error_msg) + def handle_maintenance(self, role: ApiRole) -> None: + if self.maintenance is not None: + try: + state_changed = toggle_role_maintenance( + api_client=self.api_client, + role=role, + maintenance=self.maintenance, + check_mode=self.module.check_mode, + ) + except RoleException as ex: + self.module.fail_json(msg=to_native(ex)) - for c in stop_cmds.items: - # Not in parallel, but should only be a single command - self.wait_command(c) + if state_changed: + self.changed = True + if self.module._diff: + self.diff["before"].update(maintenance_mode=role.maintenance_mode) + self.diff["after"].update(maintenance_mode=self.maintenance) def main(): @@ -724,21 +688,27 @@ def main(): argument_spec=dict( cluster=dict(required=True, aliases=["cluster_name"]), service=dict(required=True, aliases=["service_name"]), - role=dict(aliases=["role_name", "name"]), + name=dict(aliases=["role_name", "role"]), + type=dict(aliases=["role_type"]), cluster_hostname=dict(aliases=["cluster_host"]), cluster_host_id=dict(), maintenance=dict(type="bool", aliases=["maintenance_mode"]), + config=dict(type=dict, aliases=["parameters", "params"]), tags=dict(type=dict), + role_config_group=dict(), purge=dict(type="bool", default=False), - type=dict(), state=dict( default="present", choices=["present", "absent", "restarted", "started", "stopped"], ), ), mutually_exclusive=[ + ["type", "name"], ["cluster_hostname", "cluster_host_id"], ], + required_one_of=[ + ["type", "name"], + ], supports_check_mode=True, ) diff --git a/plugins/modules/service_role_config_group.py b/plugins/modules/service_role_config_group.py index 3de6dfb3..a4105fdd 100644 --- a/plugins/modules/service_role_config_group.py +++ b/plugins/modules/service_role_config_group.py @@ -92,9 +92,6 @@ requirements: - cm-client seealso: - - module: cloudera.cluster.cluster - - module: cloudera.cluster.service - - module: cloudera.cluster.service_role - module: cloudera.cluster.service_role_config_group_info """ @@ -201,7 +198,6 @@ ApiConfigList, ApiRoleConfigGroup, ApiRoleConfigGroupList, - ApiRoleNameList, ClustersResourceApi, RoleConfigGroupsResourceApi, ServicesResourceApi, diff --git a/plugins/modules/service_role_info.py b/plugins/modules/service_role_info.py index 9581a8bb..439e87ea 100644 --- a/plugins/modules/service_role_info.py +++ b/plugins/modules/service_role_info.py @@ -1,6 +1,7 @@ -# -*- coding: utf-8 -*- +#!/usr/bin/python +# # -*- coding: utf-8 -*- -# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,36 +15,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json - -from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - ClouderaManagerModule, -) - -from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( - parse_role_result, -) - -from cm_client import ClustersResourceApi, RolesResourceApi, ServicesResourceApi -from cm_client.rest import ApiException - - -ANSIBLE_METADATA = { - "metadata_version": "1.1", - "status": ["preview"], - "supported_by": "community", -} - DOCUMENTATION = r""" ---- module: service_role_info short_description: Retrieve information about the service roles of cluster description: - - Gather information about service roles of a CDP cluster. + - Gather information about one or all service roles of a CDP cluster. author: - "Webster Mudge (@wmudge)" -requirements: - - cm_client options: cluster: description: @@ -103,10 +81,13 @@ extends_documentation_fragment: - cloudera.cluster.cm_options - cloudera.cluster.cm_endpoint +requirements: + - cm_client +seealso: + - module: cloudera.cluster.service_role """ EXAMPLES = r""" ---- - name: Gather details of the roles for the 'yarn' service cloudera.cluster.service_role_info: host: "example.cloudera.host" @@ -144,11 +125,11 @@ """ RETURN = r""" ---- roles: description: Details about the roles of cluster service. type: list elements: dict + returned: always contains: name: description: The cluster service role name. @@ -166,6 +147,10 @@ description: The unique ID of the cluster host. type: str returned: always + hostname: + description: The hostname of the cluster host. + type: str + returned: always service_name: description: The name of the cluster service, which uniquely identifies it in a cluster. type: str @@ -267,6 +252,21 @@ returned: when supported """ +import json + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerModule, +) + +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + parse_role_result, + read_role, + read_roles, +) + +from cm_client import ClustersResourceApi, ServicesResourceApi +from cm_client.rest import ApiException + class ClusterServiceRoleInfo(ClouderaManagerModule): def __init__(self, module): @@ -307,8 +307,6 @@ def process(self): else: raise ex - api_instance = RolesResourceApi(self.api_client) - if self.view == "healthcheck": self.view = "full_with_health_check_explanation" elif self.view == "redacted": @@ -318,44 +316,28 @@ def process(self): try: self.roles.append( parse_role_result( - api_instance.read_role( + read_role( + api_client=self.api_client, cluster_name=self.cluster, - role_name=self.role, service_name=self.service, - view=self.view, + role_name=self.role, ) ) ) except ApiException as e: if e.status != 404: raise e - elif self.type or self.cluster_hostname or self.cluster_host_id: - filter = ";".join( - [ - f"{f[0]}=={f[1]}" - for f in [ - ("type", self.type), - ("hostname", self.cluster_hostname), - ("hostId", self.cluster_host_id), - ] - if f[1] is not None - ] - ) - + else: self.roles = [ parse_role_result(s) - for s in api_instance.read_roles( + for s in read_roles( + api_client=self.api_client, cluster_name=self.cluster, service_name=self.service, view=self.view, - filter=filter, - ).items - ] - else: - self.roles = [ - parse_role_result(s) - for s in api_instance.read_roles( - cluster_name=self.cluster, service_name=self.service, view=self.view + type=self.type, + hostname=self.cluster_hostname, + host_id=self.cluster_host_id, ).items ] diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index b8c926a5..9608ef8a 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -26,18 +26,21 @@ ApiHostRef, ApiRole, ApiRoleConfigGroup, + ApiRoleConfigGroupList, ApiRoleList, ApiRoleNameList, ApiRoleState, ApiService, ApiServiceConfig, ApiServiceList, + ApiServiceState, ClustersResourceApi, CommandsResourceApi, MgmtRolesResourceApi, MgmtRoleCommandsResourceApi, MgmtRoleConfigGroupsResourceApi, RoleConfigGroupsResourceApi, + RolesResourceApi, ServicesResourceApi, ) from cm_client.rest import ApiException @@ -50,6 +53,14 @@ ) from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( get_mgmt_roles, + provision_service_role, + read_role, + read_roles, + toggle_role_maintenance, + toggle_role_state, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + get_base_role_config_group, ) @@ -89,7 +100,7 @@ def wait_for_command( raise Exception(f"CM command [{command.id}] failed: {command.result_message}") -def provision_service( +def yield_service( api_client: ApiClient, cluster: ApiCluster, service_name: str, service_type: str ) -> Generator[ApiService]: """Provisions a new cluster service as a generator. @@ -130,6 +141,274 @@ def provision_service( api.delete_service(cluster_name=cluster.name, service_name=service_name) +def register_service( + api_client: ApiClient, + registry: list[ApiService], + cluster: ApiCluster, + service: ApiService, +) -> ApiService: + service_api = ServicesResourceApi(api_client) + cm_api = ClustersResourceApi(api_client) + + # Check the cluster hosts + hosts = [ + h + for i, h in enumerate(cm_api.list_hosts(cluster_name=cluster.name).items) + if i < 3 + ] + + if len(hosts) != 3: + raise Exception( + "Not enough available hosts to assign service roles; the cluster must have 3 or more hosts." + ) + + # Create the service + created_service = service_api.create_services( + cluster_name=cluster.name, body=ApiServiceList(items=[service]) + ).items[0] + + # Record the service + registry.append(created_service) + + # Execute first run initialization + first_run_cmd = service_api.first_run( + cluster_name=cluster.name, + service_name=created_service.name, + ) + wait_for_command(api_client, first_run_cmd) + + # Refresh the service + created_service = service_api.read_service( + cluster_name=cluster.name, service_name=created_service.name + ) + + # Establish the maintenance mode of the service + if service.maintenance_mode: + maintenance_cmd = service_api.enter_maintenance_mode( + cluster_name=cluster.name, service_name=created_service.name + ) + wait_for_command(api_client, maintenance_cmd) + created_service = service_api.read_service( + cluster_name=cluster.name, service_name=created_service.name + ) + + # Establish the state the of the service + if service.service_state and created_service.service_state != service.service_state: + if service.service_state == ApiServiceState.STOPPED: + stop_cmd = service_api.stop_command( + cluster_name=cluster.name, + service_name=created_service.name, + ) + wait_for_command(api_client, stop_cmd) + created_service = service_api.read_service( + cluster_name=cluster.name, service_name=created_service.name + ) + else: + raise Exception( + "Unsupported service state for fixture: " + service.service_state + ) + + # Return the provisioned service + return created_service + + +def deregister_service(api_client: ApiClient, registry: list[ApiService]) -> None: + service_api = ServicesResourceApi(api_client) + + # Delete the services + for s in registry: + service_api.delete_service( + cluster_name=s.cluster_ref.cluster_name, + service_name=s.name, + ) + + +def register_role( + api_client: ApiClient, registry: list[ApiRole], service: ApiService, role: ApiRole +) -> ApiRole: + # Create the role + created_role = provision_service_role( + api_client=api_client, + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role=role, + ) + + # Record the role + registry.append(created_role) + + # Establish the maintenance mode of the role + toggle_role_maintenance( + api_client=api_client, + role=created_role, + maintenance=role.maintenance_mode, + check_mode=False, + ) + + # Establish the state the of the role + toggle_role_state( + api_client=api_client, + role=created_role, + state="stopped" if role.role_state == ApiRoleState.STOPPED else "started", + check_mode=False, + ) + + # Return the provisioned role + return created_role + + +def deregister_role(api_client: ApiClient, registry: list[ApiRole]) -> None: + role_api = RolesResourceApi(api_client) + + # Delete the roles + for r in registry: + # Refresh the role state (and check for existence) + try: + refreshed_role = read_role( + api_client=api_client, + cluster_name=r.service_ref.cluster_name, + service_name=r.service_ref.service_name, + role_name=r.name, + ) + + toggle_role_state( + api_client=api_client, + role=refreshed_role, + state="stopped", + check_mode=False, + ) + + role_api.delete_role( + cluster_name=refreshed_role.service_ref.cluster_name, + service_name=refreshed_role.service_ref.service_name, + role_name=refreshed_role.name, + ) + except ApiException as e: + if e.status != 404: + raise e + + +def register_role_config_group( + api_client: ApiClient, + registry: list[ApiRoleConfigGroup], + service: ApiService, + role_config_group: ApiRoleConfigGroup, + message: str, +) -> ApiRoleConfigGroup: + rcg_api = RoleConfigGroupsResourceApi(api_client) + + # If creating a custom Role Config Group + if role_config_group.name is not None: + # Create the Role Config Group + created_rcg = rcg_api.create_role_config_groups( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + body=ApiRoleConfigGroupList(items=[role_config_group]), + ).items[0] + + # Record the Role Config Group + registry.append(created_rcg) + + # Return the Role Config Group + return created_rcg + + # Else modify the base Role Config Group + else: + # Look up the base + base_rcg = get_base_role_config_group( + api_client=api_client, + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_type=role_config_group.role_type, + ) + + # Retrieve its current configuration + base_rcg.config = rcg_api.read_config( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_config_group_name=base_rcg.name, + ) + + # Record the state of the current base + registry.append(base_rcg) + + # Add the existing base name to the incoming changes + role_config_group.name = base_rcg.name + + # Update the configuration + updated_base_rcg = rcg_api.update_role_config_group( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_config_group_name=base_rcg.name, # Use the base RCG's name + message=f"{message}::set", + body=role_config_group, + ) + + # Return the updated base Role Config Group + return updated_base_rcg + + +def deregister_role_config_group( + api_client: ApiClient, registry: list[ApiRoleConfigGroup], message: str +) -> None: + rcg_api = RoleConfigGroupsResourceApi(api_client) + for rcg in registry: + # Delete the custom role config groups + if not rcg.base: + existing_roles = rcg_api.read_roles( + cluster_name=rcg.service_ref.cluster_name, + service_name=rcg.service_ref.service_name, + role_config_group_name=rcg.name, + ).items + + if existing_roles: + rcg_api.move_roles_to_base_group( + cluster_name=rcg.service_ref.cluster_name, + service_name=rcg.service_ref.service_name, + body=ApiRoleNameList([r.name for r in existing_roles]), + ) + + # The role might already be deleted, so ignore if not found + try: + rcg_api.delete_role_config_group( + cluster_name=rcg.service_ref.cluster_name, + service_name=rcg.service_ref.service_name, + role_config_group_name=rcg.name, + ) + except ApiException as ex: + if ex.status != 404: + raise ex + + # Reset the base Role Config Groups + else: + # Read the current base + current_rcg = rcg_api.read_role_config_group( + cluster_name=rcg.service_ref.cluster_name, + service_name=rcg.service_ref.service_name, + role_config_group_name=rcg.name, + ) + + # Revert the changes + config_revert = resolve_parameter_updates( + {c.name: c.value for c in current_rcg.config.items}, + {c.name: c.value for c in rcg.config.items}, + True, + ) + + if config_revert: + rcg.config = ApiConfigList( + items=[ApiConfig(name=k, value=v) for k, v in config_revert.items()] + ) + + rcg_api.update_role_config_group( + cluster_name=rcg.service_ref.cluster_name, + service_name=rcg.service_ref.service_name, + role_config_group_name=rcg.name, + message=f"{message}::reset", + body=rcg, + ) + + def service_wide_config( api_client: ApiClient, service: ApiService, params: dict, message: str ) -> Generator[ApiService]: @@ -500,3 +779,16 @@ def set_role_config_group( message=f"{message}::reset", body=role_config_group, ) + + +def read_expected_roles( + api_client: ApiClient, cluster_name: str, service_name: str +) -> list[ApiRole]: + return ( + RolesResourceApi(api_client) + .read_roles( + cluster_name=cluster_name, + service_name=service_name, + ) + .items + ) diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 1831c044..54c374d6 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -83,6 +83,12 @@ AnsibleFailJson, AnsibleExitJson, provision_cm_role, + deregister_role, + register_role, + deregister_service, + register_service, + deregister_role_config_group, + register_role_config_group, set_cm_role_config, set_cm_role_config_group, set_role_config_group, @@ -292,7 +298,7 @@ def base_cluster(cm_api_client, cms_session) -> Generator[ApiCluster]: """Provision a Cloudera on premise base cluster for the session. If the variable 'CM_CLUSTER' is present, will attempt to read and yield a reference to this cluster. Otherwise, will yield a new base cluster - with a single host, deleting the cluster once completed. + with three hosts, deleting the cluster once completed. Args: cm_api_client (ApiClient): CM API client @@ -332,23 +338,28 @@ def base_cluster(cm_api_client, cms_session) -> Generator[ApiCluster]: # Create the initial cluster config = ApiCluster( name=name, + display_name=f"Base ({name})", full_version=cdh_version, ) cluster_api.create_clusters(body=ApiClusterList(items=[config])) - # Get first free host and assign to the cluster - all_hosts = host_api.read_hosts() - host = next((h for h in all_hosts.items if not h.cluster_ref), None) + # Get three free hosts and assign to the cluster + hosts = [h for h in host_api.read_hosts().items if not h.cluster_ref] - if host is None: + if len(hosts) < 3: # Roll back the cluster and then raise an error cluster_api.delete_cluster(cluster_name=name) - raise Exception("No available hosts to allocate to new cluster") + + raise NoHostsFoundException( + "Not enough available hosts to assign to base cluster" + ) else: cluster_api.add_hosts( cluster_name=name, - body=ApiHostRefList(items=[ApiHostRef(host_id=host.host_id)]), + body=ApiHostRefList( + items=[ApiHostRef(host_id=h.host_id) for h in hosts[:3]] + ), ) # Find the first CDH parcel version and activate it @@ -387,6 +398,7 @@ def base_cluster(cm_api_client, cms_session) -> Generator[ApiCluster]: raise Exception(str(ae)) +# TODO Replace with new service factory fixture @pytest.fixture(scope="function") def zk_function(cm_api_client, base_cluster, request) -> Generator[ApiService]: """Create a new ZooKeeper service on the provided base cluster. @@ -448,6 +460,7 @@ def zk_function(cm_api_client, base_cluster, request) -> Generator[ApiService]: ) +# TODO Replace with new service factory fixture @pytest.fixture(scope="session") def zk_session(cm_api_client, base_cluster) -> Generator[ApiService]: """Create a new ZooKeeper service on the provided base cluster. @@ -465,13 +478,15 @@ def zk_session(cm_api_client, base_cluster) -> Generator[ApiService]: service_api = ServicesResourceApi(cm_api_client) cm_api = ClustersResourceApi(cm_api_client) - host = next( - (h for h in cm_api.list_hosts(cluster_name=base_cluster.name).items), None - ) + hosts = [ + h + for i, h in enumerate(cm_api.list_hosts(cluster_name=base_cluster.name).items) + if i < 3 + ] - if host is None: + if len(hosts) != 3: raise NoHostsFoundException( - "No available hosts to assign ZooKeeper service roles" + "Not enough available hosts to assign ZooKeeper service roles" ) payload = ApiService( @@ -480,7 +495,11 @@ def zk_session(cm_api_client, base_cluster) -> Generator[ApiService]: roles=[ ApiRole( type="SERVER", - host_ref=ApiHostRef(host.host_id, host.hostname), + host_ref=ApiHostRef(hosts[0].host_id, hosts[0].hostname), + ), + ApiRole( + type="SERVER", + host_ref=ApiHostRef(hosts[1].host_id, hosts[1].hostname), ), ], ) @@ -508,6 +527,7 @@ def zk_session(cm_api_client, base_cluster) -> Generator[ApiService]: ) +# TODO Convert into a service factory fixture @pytest.fixture(scope="session") def cms(cm_api_client: ApiClient, request) -> Generator[ApiService]: """Provisions Cloudera Manager Service. If the Cloudera Manager Service @@ -995,6 +1015,80 @@ def zk_role_config_group( ) +@pytest.fixture(scope="function") +def role_config_group_wrapper( + cm_api_client, request +) -> Callable[[ApiService, ApiRoleConfigGroup], Generator[ApiRoleConfigGroup]]: + """ + Returns a function that will create a role config group on the selected service, + yield the role config group, and then when the role config group is no longer in scope, + destroys the role config group. + """ + + def wrapper( + service: ApiService, role_config_group: ApiRoleConfigGroup + ) -> Generator[ApiRoleConfigGroup]: + rcg_api = RoleConfigGroupsResourceApi(cm_api_client) + wrapped_rcg = None + if role_config_group.name is not None: + # If it exists, update it + try: + wrapped_rcg = rcg_api.read_role_config_group( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_config_group_name=role_config_group.name, + ) + except ApiException as ex: + if ex.status != 404: + raise ex + + # If it doesn't exist, create, yield, and destroy + if wrapped_rcg is None: + wrapped_rcg = rcg_api.create_role_config_groups( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + body=ApiRoleConfigGroupList(items=[role_config_group]), + ).items[0] + + yield wrapped_rcg + + try: + rcg_api.delete_role_config_group( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_config_group_name=wrapped_rcg.name, + ) + except ApiException as ex: + if ex.status != 404: + raise ex + + return + else: + wrapped_rcg = get_base_role_config_group( + api_client=cm_api_client, + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_type=role_config_group.role_type, + ) + + wrapped_rcg.config = rcg_api.read_config( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + role_config_group_name=wrapped_rcg.name, + ) + + yield from set_role_config_group( + api_client=cm_api_client, + cluster_name=service.cluster_ref.cluster_name, + service_name=zk_session.name, + role_config_group=wrapped_rcg, + update=role_config_group, + message=f"{Path(request.node.parent.name).stem}::{request.node.name}", + ) + + return wrapper + + def handle_commands(api_client: ApiClient, commands: ApiBulkCommandList): if commands.errors: error_msg = "\n".join(commands.errors) @@ -1017,3 +1111,155 @@ def monitor_command( command = CommandsResourceApi(api_client).read_command(command.id) if not command.success: raise Exception(command.result_message) + + +# @pytest.fixture(scope="module") +# def test_service(cm_api_client) -> Generator[Callable[[ApiCluster, ApiService], ApiService]]: +# service_api = ServicesResourceApi(cm_api_client) +# cm_api = ClustersResourceApi(cm_api_client) + +# services = [] + +# # Consider returning a class with basic functions like initialize? +# def _provision_service(cluster: ApiCluster, service: ApiService) -> ApiService: +# # Check the cluster hosts +# hosts = [ +# h +# for i, h in enumerate(cm_api.list_hosts(cluster_name=cluster.name).items) +# if i < 3 +# ] + +# if len(hosts) != 3: +# raise Exception( +# "Not enough available hosts to assign service roles; the cluster must have 3 or more hosts." +# ) + +# # Create the service +# created_service = service_api.create_services( +# cluster_name=cluster.name, body=ApiServiceList(items=[service]) +# ).items[0] + +# # Record the service +# services.append(created_service) + +# # Start the service +# first_run_cmd = service_api.first_run( +# cluster_name=cluster.name, +# service_name=created_service.name, +# ) +# wait_for_command(cm_api_client, first_run_cmd) + +# # Refresh the service +# created_service = service_api.read_service( +# cluster_name=cluster.name, service_name=created_service.name +# ) + +# # Establish the maintenance mode of the service +# if service.maintenance_mode: +# maintenance_cmd = service_api.enter_maintenance_mode( +# cluster_name=cluster.name, +# service_name=created_service.name +# ) +# wait_for_command(cm_api_client, maintenance_cmd) +# created_service = service_api.read_service( +# cluster_name=cluster.name, service_name=created_service.name +# ) + +# # Establish the state the of the service +# if created_service.service_state != service.service_state: +# if service.service_state == ApiServiceState.STOPPED: +# stop_cmd = service_api.stop_command( +# cluster_name=cluster.name, +# service_name=created_service.name, +# ) +# wait_for_command(cm_api_client, stop_cmd) +# created_service = service_api.read_service( +# cluster_name=cluster.name, service_name=created_service.name +# ) +# else: +# raise Exception("Unsupported service state for fixture: " + service.service_state) + +# # Return the provisioned service +# return created_service + +# # Yield the service to the tests +# yield _provision_service + +# # Delete the services +# for s in services: +# service_api.delete_service( +# cluster_name=s.cluster_ref.cluster_name, +# service_name=s.name, +# ) + + +@pytest.fixture(scope="module") +def service_factory( + cm_api_client, +) -> Generator[Callable[[ApiCluster, ApiService], ApiService]]: + # Track the created services + services = list[ApiService]() + + # Yield the service factory function to the tests + def _wrapper(cluster: ApiCluster, service: ApiService) -> ApiService: + return register_service( + api_client=cm_api_client, + registry=services, + cluster=cluster, + service=service, + ) + + yield _wrapper + + # Delete the registered services + deregister_service(api_client=cm_api_client, registry=services) + + +@pytest.fixture(scope="function") +def role_factory(cm_api_client) -> Generator[Callable[[ApiService, ApiRole], ApiRole]]: + # Track the created roles + roles = list[ApiRole]() + + # Yield the role factory function to the tests + def _wrapper(service: ApiService, role: ApiRole) -> ApiRole: + return register_role( + api_client=cm_api_client, + registry=roles, + service=service, + role=role, + ) + + yield _wrapper + + # Delete any registered roles + deregister_role(api_client=cm_api_client, registry=roles) + + +@pytest.fixture(scope="function") +def role_config_group_factory( + cm_api_client, request +) -> Generator[Callable[[ApiService, ApiRoleConfigGroup], ApiRoleConfigGroup]]: + # Track the created or updated role config groups + role_config_groups = list[ApiRoleConfigGroup]() + + # Create the message + message = f"{Path(request.node.parent.name).stem}::{request.node.name}" + + # Yield the role factory function to the tests + def _wrapper( + service: ApiService, role_config_group: ApiRoleConfigGroup + ) -> ApiRoleConfigGroup: + return register_role_config_group( + api_client=cm_api_client, + registry=role_config_groups, + service=service, + role_config_group=role_config_group, + message=message, + ) + + yield _wrapper + + # Delete any registered role config groups + deregister_role_config_group( + api_client=cm_api_client, registry=role_config_groups, message=message + ) diff --git a/tests/unit/plugins/modules/service_config/test_service_config.py b/tests/unit/plugins/modules/service_config/test_service_config.py index cf767d40..b4368d6b 100644 --- a/tests/unit/plugins/modules/service_config/test_service_config.py +++ b/tests/unit/plugins/modules/service_config/test_service_config.py @@ -42,7 +42,7 @@ AnsibleExitJson, AnsibleFailJson, wait_for_command, - provision_service, + yield_service, service_wide_config, ) @@ -63,7 +63,7 @@ def zk_service(cm_api_client, base_cluster, request): + "_" + "".join(random.choices(string.ascii_lowercase, k=6)) ) - yield from provision_service( + yield from yield_service( api_client=cm_api_client, cluster=base_cluster, service_name=name, diff --git a/tests/unit/plugins/modules/service_role/test_service_role.py b/tests/unit/plugins/modules/service_role/test_service_role.py index 29378afc..5fa7dc15 100644 --- a/tests/unit/plugins/modules/service_role/test_service_role.py +++ b/tests/unit/plugins/modules/service_role/test_service_role.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,437 +19,1120 @@ __metaclass__ = type import logging -import os import pytest +from pathlib import Path + +from cm_client import ( + ApiClient, + ApiConfig, + ApiConfigList, + ApiEntityTag, + ApiHostRef, + ApiRole, + ApiRoleConfigGroup, + ApiRoleNameList, + ApiRoleState, + ApiService, + RoleConfigGroupsResourceApi, + RolesResourceApi, + RoleCommandsResourceApi, +) + from ansible.module_utils.common.dict_transformations import recursive_diff from ansible_collections.cloudera.cluster.plugins.modules import service_role +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + wait_bulk_commands, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.cluster_utils import ( + get_cluster_hosts, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( + get_service_hosts, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + create_role, + read_roles, +) from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleExitJson, AnsibleFailJson, + deregister_service, + register_service, + deregister_role, + register_role, + deregister_role_config_group, + register_role_config_group, ) LOG = logging.getLogger(__name__) -@pytest.fixture -def conn(): - conn = dict(username=os.getenv("CM_USERNAME"), password=os.getenv("CM_PASSWORD")) +def gather_server_roles(api_client: ApiClient, service: ApiService) -> list[ApiRole]: + return read_roles( + api_client=api_client, + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + type="SERVER", + ).items - if os.getenv("CM_HOST", None): - conn.update(host=os.getenv("CM_HOST")) - if os.getenv("CM_PORT", None): - conn.update(port=os.getenv("CM_PORT")) +@pytest.fixture(scope="module") +def zookeeper(cm_api_client, base_cluster, request): + # Keep track of the provisioned service(s) + service_registry = list[ApiService]() - if os.getenv("CM_ENDPOINT", None): - conn.update(url=os.getenv("CM_ENDPOINT")) + # Get the current cluster hosts + hosts = get_cluster_hosts(cm_api_client, base_cluster) - if os.getenv("CM_PROXY", None): - conn.update(proxy=os.getenv("CM_PROXY")) + id = Path(request.node.parent.name).stem - return { - **conn, - "verify_tls": "no", - "debug": "no", - } + zk_service = ApiService( + name=f"test-zk-{id}", + type="ZOOKEEPER", + display_name=f"ZooKeeper ({id})", + # Add a SERVER role (so we can start the service -- a ZK requirement!) + roles=[ApiRole(type="SERVER", host_ref=ApiHostRef(hosts[0].host_id))], + ) + # Provision and yield the created service + yield register_service( + api_client=cm_api_client, + registry=service_registry, + cluster=base_cluster, + service=zk_service, + ) -def test_missing_required(conn, module_args): - module_args(conn) + # Remove the created service + deregister_service(api_client=cm_api_client, registry=service_registry) - with pytest.raises(AnsibleFailJson, match="cluster, role, service"): - service_role.main() +@pytest.fixture() +def server_role(cm_api_client, zookeeper): + # Keep track of the provisioned role(s) + role_registry = list[ApiRole]() -def test_missing_service(conn, module_args): - conn.update(service="example") - module_args(conn) + existing_role_instances = [ + r.host_ref.hostname for r in gather_server_roles(cm_api_client, zookeeper) + ] - with pytest.raises(AnsibleFailJson, match="cluster, role"): - service_role.main() + hosts = [ + h + for h in get_service_hosts(cm_api_client, zookeeper) + if h.hostname not in existing_role_instances + ] + second_role = create_role( + api_client=cm_api_client, + role_type="SERVER", + hostname=hosts[0].hostname, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + ) -def test_missing_cluster(conn, module_args): - conn.update(cluster="example") - module_args(conn) + yield register_role( + api_client=cm_api_client, + registry=role_registry, + service=zookeeper, + role=second_role, + ) - with pytest.raises(AnsibleFailJson, match="role, service"): - service_role.main() + deregister_role(api_client=cm_api_client, registry=role_registry) -def test_missing_role(conn, module_args): - conn.update(role="example") - module_args(conn) +@pytest.fixture() +def server_role_reset(cm_api_client, zookeeper): + # Keep track of the existing SERVER roles + initial_roles = set([r.name for r in gather_server_roles(cm_api_client, zookeeper)]) - with pytest.raises(AnsibleFailJson, match="cluster, service"): - service_role.main() + # Yield to the test + yield + # Remove any added roles + roles_to_remove = [ + r + for r in gather_server_roles(cm_api_client, zookeeper) + if r.name not in initial_roles + ] + deregister_role(cm_api_client, roles_to_remove) -def test_present_invalid_cluster(conn, module_args): - conn.update(cluster="example", service="example", role="example") - module_args(conn) - with pytest.raises(AnsibleFailJson, match="Cluster does not exist"): - service_role.main() +class TestServiceRoleArgSpec: + def test_service_role_missing_required(self, conn, module_args): + module_args(conn) + with pytest.raises(AnsibleFailJson, match="cluster, service"): + service_role.main() -def test_present_invalid_service(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service="example", - role="example", - ) - module_args(conn) + def test_service_role_missing_one_of(self, conn, module_args): + module_args( + { + **conn, + "cluster": "cluster", + "service": "service", + } + ) - with pytest.raises(AnsibleFailJson, match="Service does not exist"): - service_role.main() + with pytest.raises(AnsibleFailJson, match="type, name"): + service_role.main() + + def test_service_role_missing_required_by_type(self, conn, module_args): + module_args( + { + **conn, + "cluster": "cluster", + "service": "service", + "type": "type", + } + ) + with pytest.raises(AnsibleFailJson, match="cluster_hostname, cluster_host_id"): + service_role.main() + + def test_service_role_missing_required_by_type_exclusives(self, conn, module_args): + module_args( + { + **conn, + "cluster": "cluster", + "service": "service", + "type": "type", + "cluster_hostname": "hostname", + "cluster_host_id": "host_id", + } + ) -def test_present_create_missing_all_requirements(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - role=os.getenv("CM_ROLE"), - ) - module_args(conn) + with pytest.raises( + AnsibleFailJson, + match="mutually exclusive: cluster_hostname\|cluster_host_id", + ): + service_role.main() + + +class TestServiceRoleInvalidParams: + def test_service_role_invalid_cluster(self, conn, module_args): + module_args( + { + **conn, + "cluster": "example", + "service": "example", + "type": "type", + "cluster_hostname": "hostname", + } + ) + + with pytest.raises(AnsibleFailJson, match="Cluster does not exist"): + service_role.main() - with pytest.raises( - AnsibleFailJson, - match="missing required arguments: cluster_host_id, cluster_hostname, type", + def test_service_role_invalid_service( + self, conn, module_args, cm_api_client, zookeeper ): - service_role.main() + expected_roles = gather_server_roles( + api_client=cm_api_client, + service=zookeeper, + ) + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "service": "example", + "type": expected_roles[0].type, + "cluster_hostname": expected_roles[0].host_ref.hostname, + } + ) -def test_present_create_missing_host(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - role=os.getenv("CM_ROLE"), - cluster_hostname=os.getenv("CM_CLUSTER_HOSTNAME"), - ) - module_args(conn) + with pytest.raises(AnsibleFailJson, match="Service does not exist"): + service_role.main() - with pytest.raises(AnsibleFailJson, match="missing required arguments: type"): - service_role.main() + def test_service_role_invalid_type( + self, conn, module_args, cm_api_client, zookeeper + ): + expected_roles = gather_server_roles( + api_client=cm_api_client, + service=zookeeper, + ) + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "type": "example", + "cluster_hostname": expected_roles[0].host_ref.hostname, + } + ) -def test_present_create_missing_type(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - role=os.getenv("CM_ROLE"), - type=os.getenv("CM_ROLE_TYPE"), - ) - module_args(conn) + with pytest.raises( + AnsibleFailJson, + match="Base role config group of type EXAMPLE not found in service", + ): + service_role.main() - with pytest.raises( - AnsibleFailJson, - match="missing required arguments: cluster_host_id, cluster_hostname", + def test_service_role_invalid_host( + self, conn, module_args, cm_api_client, zookeeper ): - service_role.main() + expected_roles = gather_server_roles( + api_client=cm_api_client, + service=zookeeper, + ) + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "type": expected_roles[0].type, + "cluster_hostname": "example", + } + ) -def test_role(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - role=os.getenv("CM_ROLE"), - type=os.getenv("CM_ROLE_TYPE"), - cluster_hostname=os.getenv("CM_CLUSTER_HOSTNAME"), - ) - module_args(conn) + with pytest.raises(AnsibleFailJson, match="Host not found"): + service_role.main() + + def test_service_role_invalid_role_name(self, conn, module_args, zookeeper): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "name": "example", + } + ) - with pytest.raises(AnsibleExitJson) as e: - service_role.main() + with pytest.raises(AnsibleExitJson) as e: + service_role.main() - assert e.value.changed == True + assert not e.value.role - # with pytest.raises(AnsibleExitJson) as e: - # cluster_service_role.main() - # assert e.value.changed == False +class TestServiceRoleProvision: + def test_service_role_provision_hostname( + self, conn, module_args, cm_api_client, zookeeper, server_role_reset + ): + existing_role_instances = [ + r.host_ref.hostname + for r in gather_server_roles( + api_client=cm_api_client, + service=zookeeper, + ) + ] + + hosts = [ + h + for h in get_service_hosts(cm_api_client, zookeeper) + if h.hostname not in existing_role_instances + ] + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "type": "SERVER", + "cluster_hostname": hosts[0].hostname, + "state": "present", + } + ) + with pytest.raises(AnsibleExitJson) as e: + service_role.main() -def test_role_generated_name(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - type=os.getenv("CM_ROLE_TYPE"), - cluster_hostname=os.getenv("CM_CLUSTER_HOSTNAME"), - ) - module_args(conn) + assert e.value.role["type"] == "SERVER" + assert e.value.role["hostname"] == hosts[0].hostname + assert e.value.role["role_state"] == ApiRoleState.STOPPED - with pytest.raises(AnsibleExitJson) as e: - service_role.main() + def test_service_role_provision_host_id( + self, conn, module_args, cm_api_client, zookeeper, server_role_reset + ): + existing_role_instances = [ + r.host_ref.hostname + for r in gather_server_roles( + api_client=cm_api_client, + service=zookeeper, + ) + ] + + hosts = [ + h + for h in get_service_hosts(cm_api_client, zookeeper) + if h.hostname not in existing_role_instances + ] + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "type": "SERVER", + "cluster_host_id": hosts[0].host_id, + "state": "present", + } + ) - assert e.value.changed == True + with pytest.raises(AnsibleExitJson) as e: + service_role.main() + assert e.value.role["type"] == "SERVER" + assert e.value.role["host_id"] == hosts[0].host_id + assert e.value.role["role_state"] == ApiRoleState.STOPPED -def test_role_with_tags(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - role=os.getenv("CM_ROLE"), - type=os.getenv("CM_ROLE_TYPE"), - cluster_hostname=os.getenv("CM_CLUSTER_HOSTNAME"), - tags=dict(foo="test"), - ) - module_args(conn) + def test_service_role_provision_config( + self, conn, module_args, cm_api_client, zookeeper, server_role_reset + ): + existing_role_instances = [ + r.host_ref.hostname + for r in gather_server_roles( + api_client=cm_api_client, + service=zookeeper, + ) + ] + + hosts = [ + h + for h in get_service_hosts(cm_api_client, zookeeper) + if h.hostname not in existing_role_instances + ] + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "type": "SERVER", + "cluster_hostname": hosts[0].hostname, + "config": { + "minSessionTimeout": 4500, + }, + "state": "present", + } + ) - with pytest.raises(AnsibleExitJson) as e: - service_role.main() + with pytest.raises(AnsibleExitJson) as e: + service_role.main() + + assert e.value.role["type"] == "SERVER" + assert e.value.role["hostname"] == hosts[0].hostname + assert e.value.role["role_state"] == ApiRoleState.STOPPED + assert e.value.role["config"]["minSessionTimeout"] == "4500" + + def test_service_role_provision_role_config_group( + self, + conn, + module_args, + cm_api_client, + zookeeper, + role_config_group_factory, + server_role_reset, + request, + ): + id = Path(request.node.parent.name).stem + + rcg = role_config_group_factory( + service=zookeeper, + role_config_group=ApiRoleConfigGroup( + name=f"pytest-{id}", + role_type="SERVER", + config=ApiConfigList(items=[ApiConfig("minSessionTimeout", "4501")]), + display_name=f"Pytest ({id})", + ), + ) - assert e.value.changed == True + existing_role_instances = [ + r.host_ref.hostname + for r in gather_server_roles( + api_client=cm_api_client, + service=zookeeper, + ) + ] + + hosts = [ + h + for h in get_service_hosts(cm_api_client, zookeeper) + if h.hostname not in existing_role_instances + ] + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "type": rcg.role_type, + "cluster_hostname": hosts[0].hostname, + "role_config_group": rcg.name, + "state": "present", + } + ) + with pytest.raises(AnsibleExitJson) as e: + service_role.main() -def test_role_with_maintenance(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - role=os.getenv("CM_ROLE"), - type=os.getenv("CM_ROLE_TYPE"), - cluster_hostname=os.getenv("CM_CLUSTER_HOSTNAME"), - maintenance=True, - ) - module_args(conn) + assert e.value.role["type"] == "SERVER" + assert e.value.role["hostname"] == hosts[0].hostname + assert e.value.role["role_state"] == ApiRoleState.STOPPED + assert e.value.role["role_config_group_name"] == rcg.name + assert e.value.role["config"]["minSessionTimeout"] == "4501" - with pytest.raises(AnsibleExitJson) as e: - service_role.main() + def test_service_role_provision_tags( + self, conn, module_args, cm_api_client, zookeeper, server_role_reset + ): + existing_role_instances = [ + r.host_ref.hostname + for r in gather_server_roles( + api_client=cm_api_client, + service=zookeeper, + ) + ] + + hosts = [ + h + for h in get_service_hosts(cm_api_client, zookeeper) + if h.hostname not in existing_role_instances + ] + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "type": "SERVER", + "cluster_hostname": hosts[0].hostname, + "tags": { + "pytest": "success", + }, + "state": "present", + } + ) - assert e.value.changed == True + with pytest.raises(AnsibleExitJson) as e: + service_role.main() + assert e.value.role["type"] == "SERVER" + assert e.value.role["hostname"] == hosts[0].hostname + assert e.value.role["role_state"] == ApiRoleState.STOPPED + assert e.value.role["tags"]["pytest"] == "success" -def test_role_started(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - role=os.getenv("CM_ROLE"), - type=os.getenv("CM_ROLE_TYPE"), - cluster_hostname=os.getenv("CM_CLUSTER_HOSTNAME"), - state="started", - ) - module_args(conn) + def test_service_role_provision_enable_maintenance( + self, conn, module_args, cm_api_client, zookeeper, server_role_reset + ): + existing_role_instances = [ + r.host_ref.hostname + for r in gather_server_roles( + api_client=cm_api_client, + service=zookeeper, + ) + ] + + hosts = [ + h + for h in get_service_hosts(cm_api_client, zookeeper) + if h.hostname not in existing_role_instances + ] + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "type": "SERVER", + "cluster_hostname": hosts[0].hostname, + "maintenance": True, + "state": "present", + } + ) - with pytest.raises(AnsibleExitJson) as e: - service_role.main() + with pytest.raises(AnsibleExitJson) as e: + service_role.main() - assert e.value.changed == True + assert e.value.role["type"] == "SERVER" + assert e.value.role["hostname"] == hosts[0].hostname + assert e.value.role["role_state"] == ApiRoleState.STOPPED + assert e.value.role["maintenance_mode"] == True + def test_service_role_provision_state_start( + self, conn, module_args, cm_api_client, zookeeper, server_role_reset + ): + existing_role_instances = [ + r.host_ref.hostname + for r in gather_server_roles( + api_client=cm_api_client, + service=zookeeper, + ) + ] + + hosts = [ + h + for h in get_service_hosts(cm_api_client, zookeeper) + if h.hostname not in existing_role_instances + ] + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "type": "SERVER", + "cluster_hostname": hosts[0].hostname, + "state": "started", + } + ) -def test_role_type_update(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - role=os.getenv("CM_ROLE"), - type="HTTPFS", - ) - module_args(conn) + with pytest.raises(AnsibleExitJson) as e: + service_role.main() - with pytest.raises(AnsibleExitJson) as e: - service_role.main() + assert e.value.role["type"] == "SERVER" + assert e.value.role["hostname"] == hosts[0].hostname + assert e.value.role["role_state"] == ApiRoleState.STARTED - assert e.value.changed == True + def test_service_role_provision_state_stopped( + self, conn, module_args, cm_api_client, zookeeper, server_role_reset + ): + existing_role_instances = [ + r.host_ref.hostname + for r in gather_server_roles( + api_client=cm_api_client, + service=zookeeper, + ) + ] + + hosts = [ + h + for h in get_service_hosts(cm_api_client, zookeeper) + if h.hostname not in existing_role_instances + ] + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "type": "SERVER", + "cluster_hostname": hosts[0].hostname, + "state": "stopped", + } + ) - with pytest.raises(AnsibleExitJson) as e: - service_role.main() + with pytest.raises(AnsibleExitJson) as e: + service_role.main() - assert e.value.changed == False + assert e.value.role["type"] == "SERVER" + assert e.value.role["hostname"] == hosts[0].hostname + assert e.value.role["role_state"] == ApiRoleState.STOPPED + def test_service_role_provision_state_restarted( + self, conn, module_args, cm_api_client, zookeeper, server_role_reset + ): + existing_role_instances = [ + r.host_ref.hostname + for r in gather_server_roles( + api_client=cm_api_client, + service=zookeeper, + ) + ] + + hosts = [ + h + for h in get_service_hosts(cm_api_client, zookeeper) + if h.hostname not in existing_role_instances + ] + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "type": "SERVER", + "cluster_hostname": hosts[0].hostname, + "state": "restarted", + } + ) -def test_role_maintenance_mode(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - role=os.getenv("CM_ROLE"), - maintenance="yes", - ) - module_args(conn) + with pytest.raises(AnsibleExitJson) as e: + service_role.main() + + assert e.value.role["type"] == "SERVER" + assert e.value.role["hostname"] == hosts[0].hostname + assert e.value.role["role_state"] == ApiRoleState.STARTED + + +class TestServiceRoleModification: + @pytest.fixture() + def updated_server_role_config(self, cm_api_client, server_role): + RolesResourceApi(cm_api_client).update_role_config( + cluster_name=server_role.service_ref.cluster_name, + service_name=server_role.service_ref.service_name, + role_name=server_role.name, + body=ApiConfigList( + items=[ + ApiConfig( + "minSessionTimeout", + 5000, + ) + ] + ), + ) + return server_role + + @pytest.fixture() + def updated_server_role_tags(self, cm_api_client, server_role): + RolesResourceApi(cm_api_client).add_tags( + cluster_name=server_role.service_ref.cluster_name, + service_name=server_role.service_ref.service_name, + role_name=server_role.name, + body=[ApiEntityTag("existing", "tag")], + ) + return server_role + + @pytest.fixture() + def stopped_server_role(self, cm_api_client, server_role): + stop_cmds = RoleCommandsResourceApi(cm_api_client).stop_command( + cluster_name=server_role.service_ref.cluster_name, + service_name=server_role.service_ref.service_name, + body=ApiRoleNameList(items=[server_role.name]), + ) + wait_bulk_commands( + api_client=cm_api_client, + commands=stop_cmds, + ) + return server_role + + @pytest.fixture() + def custom_rcg_server_role(self, cm_api_client, zookeeper, request): + id = Path(request.node.name).stem + + role_config_groups = list[ApiRoleConfigGroup]() + + yield register_role_config_group( + api_client=cm_api_client, + registry=role_config_groups, + service=zookeeper, + role_config_group=ApiRoleConfigGroup( + name=f"pytest-{id}", + role_type="SERVER", + config=ApiConfigList(items=[ApiConfig("minSessionTimeout", "4501")]), + display_name=f"Pytest ({id})", + ), + message=f"{Path(request.node.parent.name).stem}::{request.node.name}", + ) - with pytest.raises(AnsibleExitJson) as e: - service_role.main() + deregister_role_config_group( + api_client=cm_api_client, + registry=role_config_groups, + message=f"{Path(request.node.parent.name).stem}::{request.node.name}", + ) - assert e.value.role["maintenance_mode"] == True + @pytest.fixture() + def updated_server_role_rcg( + self, cm_api_client, server_role, custom_rcg_server_role + ): + RoleConfigGroupsResourceApi(cm_api_client).move_roles( + cluster_name=server_role.service_ref.cluster_name, + service_name=server_role.service_ref.service_name, + role_config_group_name=custom_rcg_server_role.name, + body=ApiRoleNameList(items=[server_role.name]), + ) + return server_role - with pytest.raises(AnsibleExitJson) as e: - service_role.main() + def test_service_role_existing_name( + self, conn, module_args, zookeeper, server_role + ): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "name": server_role.name, + "state": "present", + } + ) - assert e.value.role["maintenance_mode"] == True - assert e.value.changed == False + with pytest.raises(AnsibleExitJson) as e: + service_role.main() - conn.update( - maintenance="no", - ) - module_args(conn) + assert e.value.changed == False + assert e.value.role["type"] == server_role.type + assert e.value.role["hostname"] == server_role.host_ref.hostname + assert e.value.role["role_state"] == ApiRoleState.STARTED - with pytest.raises(AnsibleExitJson) as e: - service_role.main() + def test_service_role_existing_hostname( + self, conn, module_args, zookeeper, server_role + ): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "type": server_role.type, + "cluster_hostname": server_role.host_ref.hostname, + "state": "present", + } + ) - assert e.value.role["maintenance_mode"] == False + with pytest.raises(AnsibleExitJson) as e: + service_role.main() - with pytest.raises(AnsibleExitJson) as e: - service_role.main() + assert e.value.changed == False + assert e.value.role["type"] == server_role.type + assert e.value.role["hostname"] == server_role.host_ref.hostname + assert e.value.role["role_state"] == ApiRoleState.STARTED - assert e.value.role["maintenance_mode"] == False - assert e.value.changed == False + def test_service_role_existing_hostid( + self, conn, module_args, zookeeper, server_role + ): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "type": server_role.type, + "cluster_host_id": server_role.host_ref.host_id, + "state": "present", + } + ) + with pytest.raises(AnsibleExitJson) as e: + service_role.main() -def test_role_set_tags(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - role=os.getenv("CM_ROLE"), - tags=dict( - test="Ansible", key="Value", empty_string="", blank_string=" ", none=None - ), - ) - module_args(conn) + assert e.value.changed == False + assert e.value.role["type"] == server_role.type + assert e.value.role["hostname"] == server_role.host_ref.hostname + assert e.value.role["role_state"] == ApiRoleState.STARTED - with pytest.raises(AnsibleExitJson) as e: - service_role.main() + def test_service_role_existing_enable_maintenance( + self, conn, module_args, zookeeper, server_role + ): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "name": server_role.name, + "maintenance": True, + "state": "present", + } + ) - assert ( - recursive_diff(e.value.role["tags"], dict(test="Ansible", key="Value")) is None - ) + with pytest.raises(AnsibleExitJson) as e: + service_role.main() - with pytest.raises(AnsibleExitJson) as e: - service_role.main() + assert e.value.changed == True + assert e.value.role["type"] == server_role.type + assert e.value.role["hostname"] == server_role.host_ref.hostname + assert e.value.role["role_state"] == ApiRoleState.STARTED + assert e.value.role["maintenance_mode"] == True - assert ( - recursive_diff(e.value.role["tags"], dict(test="Ansible", key="Value")) is None - ) - assert e.value.changed == False + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service_role.main() + assert e.value.changed == False + assert e.value.role["maintenance_mode"] == True -def test_role_append_tags(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - role=os.getenv("CM_ROLE"), - tags=dict(more="Tags", key="Value"), - ) - module_args(conn) + def test_service_role_existing_config( + self, conn, module_args, zookeeper, updated_server_role_config + ): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "name": updated_server_role_config.name, + "config": { + "minSessionTimeout": 5001, + "maxSessionTimeout": 50001, + }, + "state": "present", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service_role.main() + + assert e.value.changed == True + assert e.value.role["type"] == updated_server_role_config.type + assert e.value.role["hostname"] == updated_server_role_config.host_ref.hostname + assert e.value.role["role_state"] == ApiRoleState.STARTED + assert e.value.role["config"]["minSessionTimeout"] == "5001" + assert e.value.role["config"]["maxSessionTimeout"] == "50001" + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service_role.main() - with pytest.raises(AnsibleExitJson) as e: - service_role.main() + assert e.value.changed == False + assert e.value.role["config"]["minSessionTimeout"] == "5001" + assert e.value.role["config"]["maxSessionTimeout"] == "50001" - assert ( - recursive_diff( - e.value.role["tags"], dict(test="Ansible", key="Value", more="Tags") + def test_service_role_existing_config_purge( + self, conn, module_args, zookeeper, updated_server_role_config + ): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "name": updated_server_role_config.name, + "config": { + "maxSessionTimeout": 50001, + }, + "purge": True, + "state": "present", + } ) - is None - ) - with pytest.raises(AnsibleExitJson) as e: - service_role.main() + with pytest.raises(AnsibleExitJson) as e: + service_role.main() + + assert e.value.changed == True + assert e.value.role["type"] == updated_server_role_config.type + assert e.value.role["hostname"] == updated_server_role_config.host_ref.hostname + assert e.value.role["role_state"] == ApiRoleState.STARTED + assert "minSessionTimeout" not in e.value.role["config"] + assert e.value.role["config"]["maxSessionTimeout"] == "50001" + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service_role.main() - assert ( - recursive_diff( - e.value.role["tags"], dict(test="Ansible", key="Value", more="Tags") + assert e.value.changed == False + assert "minSessionTimeout" not in e.value.role["config"] + assert e.value.role["config"]["maxSessionTimeout"] == "50001" + + def test_service_role_existing_rcg( + self, conn, module_args, zookeeper, server_role, custom_rcg_server_role + ): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "name": server_role.name, + "role_config_group": custom_rcg_server_role.name, + "state": "present", + } ) - is None - ) - assert e.value.changed == False + with pytest.raises(AnsibleExitJson) as e: + service_role.main() -@pytest.mark.skip("Move to separate DIFF test suite.") -def test_update_tags_check_mode(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - type="ZOOKEEPER", - tags=dict( - test="Ansible", - empty_string="", - none=None, - long_empty_string=" ", - ), - _ansible_check_mode=True, - _ansible_diff=True, - ) - module_args(conn) + assert e.value.changed == True + assert e.value.role["type"] == server_role.type + assert e.value.role["hostname"] == server_role.host_ref.hostname + assert e.value.role["role_state"] == ApiRoleState.STARTED + assert e.value.role["config"]["minSessionTimeout"] == "4501" + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service_role.main() + + assert e.value.changed == False + assert e.value.role["config"]["minSessionTimeout"] == "4501" - with pytest.raises(AnsibleExitJson) as e: - service_role.main() + def test_service_role_existing_rcg_base( + self, conn, module_args, zookeeper, updated_server_role_rcg + ): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "name": updated_server_role_rcg.name, + "role_config_group": None, + "state": "present", + } + ) - assert e.value.changed == True - assert e.value.diff["before"]["tags"] == dict() - assert e.value.diff["after"]["tags"] == dict(test="Ansible") + with pytest.raises(AnsibleExitJson) as e: + service_role.main() + assert e.value.changed == True + assert e.value.role["type"] == updated_server_role_rcg.type + assert e.value.role["hostname"] == updated_server_role_rcg.host_ref.hostname + assert e.value.role["role_state"] == ApiRoleState.STARTED + assert "minSessionTimeout" not in e.value.role["config"] -def test_role_purge_tags(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - role=os.getenv("CM_ROLE"), - tags=dict(purge="Ansible"), - purge=True, - ) - module_args(conn) + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service_role.main() - with pytest.raises(AnsibleExitJson) as e: - service_role.main() + assert e.value.changed == False + assert "minSessionTimeout" not in e.value.role["config"] - assert recursive_diff(e.value.role["tags"], dict(purge="Ansible")) is None - assert e.value.changed == True + def test_service_role_existing_tags( + self, conn, module_args, zookeeper, updated_server_role_tags + ): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "name": updated_server_role_tags.name, + "tags": { + "pytest": "tag", + }, + "state": "present", + } + ) - with pytest.raises(AnsibleExitJson) as e: - service_role.main() + with pytest.raises(AnsibleExitJson) as e: + service_role.main() - assert recursive_diff(e.value.role["tags"], dict(purge="Ansible")) is None - assert e.value.changed == False + assert e.value.changed == True + assert e.value.role["type"] == updated_server_role_tags.type + assert e.value.role["hostname"] == updated_server_role_tags.host_ref.hostname + assert e.value.role["role_state"] == ApiRoleState.STARTED + assert e.value.role["tags"]["existing"] == "tag" + assert e.value.role["tags"]["pytest"] == "tag" + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service_role.main() -def test_started(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - role=os.getenv("CM_ROLE"), - state="started", - _ansible_verbosity=3, - ) - module_args(conn) + assert e.value.changed == False + assert e.value.role["tags"]["existing"] == "tag" + assert e.value.role["tags"]["pytest"] == "tag" - with pytest.raises(AnsibleExitJson): - service_role.main() + def test_service_role_existing_tags_purge( + self, conn, module_args, zookeeper, updated_server_role_tags + ): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "name": updated_server_role_tags.name, + "tags": { + "pytest": "tag", + }, + "purge": True, + "state": "present", + } + ) - with pytest.raises(AnsibleExitJson) as e: - service_role.main() + with pytest.raises(AnsibleExitJson) as e: + service_role.main() - assert e.value.changed == False + assert e.value.changed == True + assert e.value.role["type"] == updated_server_role_tags.type + assert e.value.role["hostname"] == updated_server_role_tags.host_ref.hostname + assert e.value.role["role_state"] == ApiRoleState.STARTED + assert "existing" not in e.value.role["tags"] + assert e.value.role["tags"]["pytest"] == "tag" + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service_role.main() -def test_stopped(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - state="stopped", - ) - module_args(conn) + assert e.value.changed == False + assert "existing" not in e.value.role["tags"] + assert e.value.role["tags"]["pytest"] == "tag" - with pytest.raises(AnsibleExitJson): - service_role.main() + def test_service_role_existing_state_started( + self, conn, module_args, zookeeper, stopped_server_role + ): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "name": stopped_server_role.name, + "state": "started", + } + ) - with pytest.raises(AnsibleExitJson) as e: - service_role.main() + with pytest.raises(AnsibleExitJson) as e: + service_role.main() - assert e.value.changed == False + assert e.value.changed == True + assert e.value.role["type"] == stopped_server_role.type + assert e.value.role["hostname"] == stopped_server_role.host_ref.hostname + assert e.value.role["role_state"] == ApiRoleState.STARTED + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service_role.main() -def test_absent(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - role=os.getenv("CM_ROLE"), - state="absent", - ) - module_args(conn) + assert e.value.changed == False + assert e.value.role["role_state"] == ApiRoleState.STARTED + + def test_service_role_existing_state_stopped( + self, conn, module_args, zookeeper, server_role + ): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "name": server_role.name, + "state": "stopped", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service_role.main() + + assert e.value.changed == True + assert e.value.role["type"] == server_role.type + assert e.value.role["hostname"] == server_role.host_ref.hostname + assert e.value.role["role_state"] == ApiRoleState.STOPPED + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service_role.main() + + assert e.value.changed == False + assert e.value.role["role_state"] == ApiRoleState.STOPPED + + def test_service_role_existing_state_restarted( + self, conn, module_args, zookeeper, server_role + ): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "name": server_role.name, + "state": "restarted", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service_role.main() + + assert e.value.changed == True + assert e.value.role["type"] == server_role.type + assert e.value.role["hostname"] == server_role.host_ref.hostname + assert e.value.role["role_state"] == ApiRoleState.STARTED + + # Idempotency (rather, 'restarted' is not idempotent) + with pytest.raises(AnsibleExitJson) as e: + service_role.main() + + assert e.value.changed == True + assert e.value.role["role_state"] == ApiRoleState.STARTED + + def test_service_role_existing_state_absent( + self, conn, module_args, zookeeper, server_role + ): + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "name": server_role.name, + "state": "absent", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service_role.main() - with pytest.raises(AnsibleExitJson): - service_role.main() + assert e.value.changed == True + assert not e.value.role - with pytest.raises(AnsibleExitJson) as e: - service_role.main() + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + service_role.main() - assert e.value.changed == False + assert e.value.changed == False + assert not e.value.role diff --git a/tests/unit/plugins/modules/service_role_info/test_service_role_info.py b/tests/unit/plugins/modules/service_role_info/test_service_role_info.py index 390c8a3a..5008faf4 100644 --- a/tests/unit/plugins/modules/service_role_info/test_service_role_info.py +++ b/tests/unit/plugins/modules/service_role_info/test_service_role_info.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,61 +19,136 @@ __metaclass__ = type import logging -import os import pytest +from pathlib import Path + +from cm_client import ( + ApiClient, + ApiHostRef, + ApiRole, + ApiService, +) + from ansible_collections.cloudera.cluster.plugins.modules import service_role_info +from ansible_collections.cloudera.cluster.plugins.module_utils.cluster_utils import ( + get_cluster_hosts, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( + get_service_hosts, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + create_role, + read_roles, +) from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleExitJson, AnsibleFailJson, + deregister_service, + register_service, + deregister_role, + register_role, ) LOG = logging.getLogger(__name__) -@pytest.fixture() -def conn(): - conn = dict(username=os.getenv("CM_USERNAME"), password=os.getenv("CM_PASSWORD")) +def gather_server_roles(api_client: ApiClient, service: ApiService): + return read_roles( + api_client=api_client, + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + type="SERVER", + ).items + + +@pytest.fixture(scope="module") +def zookeeper(cm_api_client, base_cluster, request): + # Keep track of the provisioned service(s) + service_registry = list[ApiService]() + + # Get the current cluster hosts + hosts = get_cluster_hosts(cm_api_client, base_cluster) + + id = Path(request.node.parent.name).stem + + zk_service = ApiService( + name=f"test-zk-{id}", + type="ZOOKEEPER", + display_name=f"ZooKeeper ({id})", + # Add a SERVER role (so we can start the service -- a ZK requirement!) + roles=[ApiRole(type="SERVER", host_ref=ApiHostRef(hosts[0].host_id))], + ) + + # Provision and yield the created service + yield register_service( + api_client=cm_api_client, + registry=service_registry, + cluster=base_cluster, + service=zk_service, + ) - if os.getenv("CM_HOST", None): - conn.update(host=os.getenv("CM_HOST")) + # Remove the created service + deregister_service(api_client=cm_api_client, registry=service_registry) - if os.getenv("CM_PORT", None): - conn.update(port=os.getenv("CM_PORT")) - if os.getenv("CM_ENDPOINT", None): - conn.update(url=os.getenv("CM_ENDPOINT")) +@pytest.fixture() +def server_role(cm_api_client, zookeeper): + # Keep track of the provisioned role(s) + role_registry = list[ApiRole]() + + existing_role_instances = [ + r.host_ref.hostname for r in gather_server_roles(cm_api_client, zookeeper) + ] + + hosts = [ + h + for h in get_service_hosts(cm_api_client, zookeeper) + if h.hostname not in existing_role_instances + ] + + second_role = create_role( + api_client=cm_api_client, + role_type="SERVER", + hostname=hosts[0].hostname, + cluster_name=zookeeper.cluster_ref.cluster_name, + service_name=zookeeper.name, + ) - if os.getenv("CM_PROXY", None): - conn.update(proxy=os.getenv("CM_PROXY")) + yield register_role( + api_client=cm_api_client, + registry=role_registry, + service=zookeeper, + role=second_role, + ) - return { - **conn, - "verify_tls": "no", - "debug": "no", - } + deregister_role(api_client=cm_api_client, registry=role_registry) -def test_missing_required(conn, module_args): - module_args(conn) +def test_service_role_info_missing_required(conn, module_args): + module_args({**conn}) with pytest.raises(AnsibleFailJson, match="cluster"): service_role_info.main() -def test_missing_cluster(conn, module_args): - conn.update(service="example") - module_args(conn) +def test_service_role_info_missing_cluster(conn, module_args): + module_args( + { + **conn, + "service": "example", + } + ) with pytest.raises(AnsibleFailJson, match="cluster"): service_role_info.main() -def test_invalid_service(conn, module_args): +def test_service_role_info_invalid_service(conn, module_args, zookeeper): module_args( { **conn, - "cluster": os.getenv("CM_CLUSTER"), + "cluster": zookeeper.cluster_ref.cluster_name, "service": "BOOM", } ) @@ -82,12 +157,12 @@ def test_invalid_service(conn, module_args): service_role_info.main() -def test_invalid_cluster(conn, module_args): +def test_service_role_info_invalid_cluster(conn, module_args, zookeeper): module_args( { **conn, "cluster": "BOOM", - "service": os.getenv("CM_SERVICE"), + "service": zookeeper.name, } ) @@ -95,28 +170,59 @@ def test_invalid_cluster(conn, module_args): service_role_info.main() -def test_view_all_service_roles(conn, module_args): +def test_service_role_info_all(conn, module_args, cm_api_client, zookeeper): + expected_roles = gather_server_roles( + api_client=cm_api_client, + service=zookeeper, + ) + + module_args( + { + **conn, + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + service_role_info.main() + + assert len(e.value.roles) == len(expected_roles) + + +def test_service_role_info_all_full(conn, module_args, cm_api_client, zookeeper): + expected_roles = gather_server_roles( + api_client=cm_api_client, + service=zookeeper, + ) + module_args( { **conn, - "cluster": os.getenv("CM_CLUSTER"), - "service": os.getenv("CM_SERVICE"), + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "view": "full", } ) with pytest.raises(AnsibleExitJson) as e: service_role_info.main() - assert len(e.value.roles) > 0 + assert len(e.value.roles) == len(expected_roles) + +def test_service_role_info_by_name(conn, module_args, cm_api_client, zookeeper): + expected_roles = gather_server_roles( + api_client=cm_api_client, + service=zookeeper, + ) -def test_view_service_role(conn, module_args): module_args( { **conn, - "cluster": os.getenv("CM_CLUSTER"), - "service": os.getenv("CM_SERVICE"), - "role": "yarn-NODEMANAGER-b31d2abaf9e21d6610838c33f4892bf2", + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "role": expected_roles[0].name, } ) @@ -124,53 +230,83 @@ def test_view_service_role(conn, module_args): service_role_info.main() assert len(e.value.roles) == 1 + assert e.value.roles[0]["name"] == expected_roles[0].name + + +def test_service_role_info_by_type( + conn, module_args, cm_api_client, zookeeper, server_role +): + role_type = "SERVER" + expected_roles = [ + r + for r in gather_server_roles( + api_client=cm_api_client, + service=zookeeper, + ) + if r.type == role_type + ] -def test_view_service_roles_by_type(conn, module_args): module_args( { **conn, - "cluster": os.getenv("CM_CLUSTER"), - "service": os.getenv("CM_SERVICE"), - "type": "NODEMANAGER", + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "type": role_type, } ) with pytest.raises(AnsibleExitJson) as e: service_role_info.main() - assert len(e.value.roles) == 3 + assert len(e.value.roles) == len(expected_roles) -@pytest.mark.skip("Requires hostname") -def test_view_service_roles_by_hostname(conn, module_args): +def test_service_role_info_by_hostname( + conn, module_args, cm_api_client, zookeeper, server_role +): + expected_roles = gather_server_roles( + api_client=cm_api_client, + service=zookeeper, + ) + module_args( { **conn, - "cluster": os.getenv("CM_CLUSTER"), - "service": os.getenv("CM_SERVICE"), - "cluster_hostname": "test07-worker-01.cldr.internal", + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "cluster_hostname": expected_roles[0].host_ref.hostname, } ) with pytest.raises(AnsibleExitJson) as e: service_role_info.main() - assert len(e.value.roles) == 2 + assert len(e.value.roles) == 1 + assert e.value.roles[0]["host_id"] == expected_roles[0].host_ref.host_id + assert e.value.roles[0]["hostname"] == expected_roles[0].host_ref.hostname + +def test_service_role_info_by_host_id( + conn, module_args, cm_api_client, zookeeper, server_role +): + expected_roles = gather_server_roles( + api_client=cm_api_client, + service=zookeeper, + ) -@pytest.mark.skip("Requires host ID") -def test_view_service_roles_by_host_id(conn, module_args): module_args( { **conn, - "cluster": os.getenv("CM_CLUSTER"), - "service": os.getenv("CM_SERVICE"), - "cluster_host_id": "0b5fa17e-e316-4c86-8812-3108eb55b83d", + "cluster": zookeeper.cluster_ref.cluster_name, + "service": zookeeper.name, + "cluster_host_id": expected_roles[0].host_ref.host_id, } ) with pytest.raises(AnsibleExitJson) as e: service_role_info.main() - assert len(e.value.roles) == 4 + assert len(e.value.roles) == 1 + assert e.value.roles[0]["host_id"] == expected_roles[0].host_ref.host_id + assert e.value.roles[0]["hostname"] == expected_roles[0].host_ref.hostname diff --git a/tests/unit/plugins/modules/utils.py b/tests/unit/plugins/modules/utils.py index d737263b..2c97a428 100644 --- a/tests/unit/plugins/modules/utils.py +++ b/tests/unit/plugins/modules/utils.py @@ -16,6 +16,7 @@ from __future__ import absolute_import, division, print_function + __metaclass__ = type import json