diff --git a/.test.sh b/.test.sh index 087d5aab..56dacb52 100755 --- a/.test.sh +++ b/.test.sh @@ -20,4 +20,3 @@ mkdir -p .ansible/collections/ansible_collections/cloudera/cluster git archive $(git rev-parse --abbrev-ref HEAD) | tar -x -C .ansible/collections/ansible_collections/cloudera/cluster (cd .ansible/collections/ansible_collections/cloudera/cluster && pytest) - diff --git a/plugins/modules/service_role_config_group_info.py b/plugins/modules/service_role_config_group_info.py index cc71314b..88f9ef65 100644 --- a/plugins/modules/service_role_config_group_info.py +++ b/plugins/modules/service_role_config_group_info.py @@ -1,6 +1,7 @@ +#!/usr/bin/python # -*- coding: utf-8 -*- -# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,38 +15,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - ClouderaManagerModule, -) - -from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( - parse_role_config_group_result, -) - -from cm_client import ( - ClustersResourceApi, - RoleConfigGroupsResourceApi, - ServicesResourceApi, -) -from cm_client.rest import ApiException - - -ANSIBLE_METADATA = { - "metadata_version": "1.1", - "status": ["preview"], - "supported_by": "community", -} - DOCUMENTATION = r""" ---- module: service_role_config_group_info short_description: Retrieve information about a cluster service role config group or groups description: - Gather details about a role config group or groups of a service in a CDP cluster. author: - "Webster Mudge (@wmudge)" -requirements: - - cm_client options: cluster: description: @@ -61,23 +37,35 @@ required: yes aliases: - service_name - role_config_group: + type: + description: + - The role type defining the role config group(s). + - If specified, will return all role config groups for the type. + - Mutually exclusive with O(name). + type: str + aliases: + - role_type + name: description: - The role config group to examine. - - If undefined, the module will return all role config groups for the service. + - If defined, the module will return the role config group. - If the role config group does not exist, the module will return an empty result. type: str - required: yes aliases: - role_config_group - - name extends_documentation_fragment: - cloudera.cluster.cm_options - cloudera.cluster.cm_endpoint +attributes: + check_mode: + support: full +requirements: + - cm-client +seealso: + - module: cloudera.cluster.service_role_config_group """ EXAMPLES = r""" ---- - name: Gather the configuration details for a cluster service role cloudera.cluster.service_role_config_info: host: "example.cloudera.internal" @@ -99,47 +87,56 @@ """ RETURN = r""" ---- role_config_groups: description: - - List of service role config groups. + - List of cluster service role config groups. type: list elements: dict returned: always contains: name: - description: - - The unique name of this role config group. + description: Name (identifier) of the role config group. type: str returned: always role_type: - description: - - The type of the roles in this group. + description: The type of the roles in this role config group. type: str returned: always base: - description: - - Flag indicating whether this is a base group. + description: Flag indicating whether this is a base role config group. type: bool returned: always display_name: - description: - - A user-friendly name of the role config group, as would have been shown in the web UI. + description: A user-friendly name of the role config group, as would have been shown in the web UI. type: str returned: when supported service_name: - description: - - The service name associated with this role config group. + description: The service name associated with this role config group. type: str returned: always role_names: - description: - - List of role names associated with this role config group. + description: List of role names (identifiers) associated with this role config group. type: list elements: str returned: when supported """ +from cm_client import ( + ClustersResourceApi, + RoleConfigGroupsResourceApi, + ServicesResourceApi, +) +from cm_client.rest import ApiException + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerModule, +) + +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + parse_role_config_group_result, + get_base_role_config_group, +) + class ClusterServiceRoleConfigGroupInfo(ClouderaManagerModule): def __init__(self, module): @@ -148,7 +145,8 @@ def __init__(self, module): # Set the parameters self.cluster = self.get_param("cluster") self.service = self.get_param("service") - self.role_config_group = self.get_param("role_config_group") + self.type = self.get_param("type") + self.name = self.get_param("name") # Initialize the return values self.output = [] @@ -176,30 +174,43 @@ def process(self): else: raise ex - api_instance = RoleConfigGroupsResourceApi(self.api_client) + rcg_api = RoleConfigGroupsResourceApi(self.api_client) results = [] - if self.role_config_group: + + # If given a specific RCG + if self.name: try: results = [ - api_instance.read_role_config_group( + rcg_api.read_role_config_group( cluster_name=self.cluster, - role_config_group_name=self.role_config_group, + role_config_group_name=self.name, service_name=self.service, ) ] except ApiException as e: if e.status != 404: raise e + # Else if given a RCG type + elif self.type: + results = [ + r + for r in rcg_api.read_role_config_groups( + cluster_name=self.cluster, + service_name=self.service, + ).items + if r.role_type == self.type + ] + # Else get all RCG entries for the given service else: - results = api_instance.read_role_config_groups( + results = rcg_api.read_role_config_groups( cluster_name=self.cluster, service_name=self.service, ).items + # Get role membership for r in results: - # Get role membership - roles = api_instance.read_roles( + roles = rcg_api.read_roles( cluster_name=self.cluster, service_name=self.service, role_config_group_name=r.name, @@ -218,8 +229,10 @@ def main(): argument_spec=dict( cluster=dict(required=True, aliases=["cluster_name"]), service=dict(required=True, aliases=["service_name"]), - role_config_group=dict(aliases=["role_config_group", "name"]), + type=dict(aliases=["role_type"]), + name=dict(aliases=["role_config_group"]), ), + mutually_exclusive=[["type", "name"]], supports_check_mode=True, ) diff --git a/pyproject.toml b/pyproject.toml index f06438c1..818a4b10 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,17 +19,15 @@ pattern = "version:\\s+(?P[\\d\\.]+)" [tool.hatch.envs.default] python = "3.12" -skip-install = true +detached = true dependencies = [ "pre-commit", "coverage[toml]", "pytest", "pytest-mock", - # "pytest-cov", "molecule", "molecule-plugins", "molecule-plugins[ec2]", - "tox-ansible", "ansible-core<2.17", # For RHEL 8 support "jmespath", "cm-client", @@ -37,17 +35,13 @@ dependencies = [ [tool.hatch.envs.lint] python = "3.12" -skip-install = true +detached = true extra-dependencies = ["ansible-lint"] [tool.hatch.envs.lint.scripts] run = "pre-commit run -a" [tool.pytest.ini_options] -# addopts = [ -# "--lf", -# "--nf", -# ] testpaths = ["tests"] filterwarnings = [ "ignore:AnsibleCollectionFinder has already been configured", diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 3394ff15..e4bc0e38 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -46,6 +46,7 @@ ApiRoleState, ApiService, ApiServiceConfig, + ApiServiceList, ApiServiceState, ClustersResourceApi, CommandsResourceApi, @@ -57,6 +58,7 @@ MgmtServiceResourceApi, ParcelResourceApi, ParcelsResourceApi, + ServicesResourceApi, ) from cm_client.rest import ApiException, RESTClientObject @@ -80,6 +82,14 @@ ) +class NoHostsFoundException(Exception): + pass + + +class ParcelNotFoundException(Exception): + pass + + @pytest.fixture(autouse=True) def skip_python(): if sys.version_info < (3, 6): @@ -198,15 +208,83 @@ def cm_api_client(conn) -> ApiClient: @pytest.fixture(scope="session") -def base_cluster(cm_api_client, request): - """Provision a CDH Base cluster. If the variable 'CM_CLUSTER' is present, - will attempt to read and yield a reference to this cluster. Otherwise, - will yield a new base cluster with a single host, deleting the cluster - once completed. +def cms_session(cm_api_client) -> Generator[ApiService]: + """ + Provisions the Cloudera Manager Service. If the Cloudera Manager Service + is present, will read and yield this reference. Otherwise, will + yield a new Cloudera Manager Service, deleting it after use. + + If it does create a new Cloudera Manager Service, it will do so on the + first available host and will auto-configure the following roles: + - HOSTMONITOR + - SERVICEMONITOR + - EVENTSERVER + - ALERTPUBLISHER + + It starts this Cloudera Manager Service, yields, and will remove this + service if it has created it. Args: - cm_api_client (_type_): _description_ - request (_type_): _description_ + cm_api_client (ApiClient): CM API client + + Yields: + Generator[ApiService]: Cloudera Manager Service + """ + + cms_api = MgmtServiceResourceApi(cm_api_client) + + try: + # Return if the Cloudera Manager Service is already present + yield cms_api.read_service() + + # Do nothing on teardown + return + except ApiException as ae: + if ae.status != 404 or "Cannot find management service." not in str(ae.body): + raise Exception(str(ae)) + + service_api = MgmtServiceResourceApi(cm_api_client) + host_api = HostsResourceApi(cm_api_client) + + host = next((h for h in host_api.read_hosts().items if not h.cluster_ref), None) + + if host is None: + raise Exception("No available hosts to assign Cloudera Manager Service roles") + + name = "pytest-" + "".join(random.choices(string.ascii_lowercase, k=6)) + + service_api.setup_cms( + body=ApiService( + name=name, + type="MGMT", + roles=[ + ApiRole(type="HOSTMONITOR"), + ApiRole(type="SERVICEMONITOR"), + ApiRole(type="EVENTSERVER"), + ApiRole(type="ALERTPUBLISHER"), + ], + ) + ) + service_api.auto_configure() + + monitor_command(cm_api_client, service_api.start_command()) + + # Return the newly-minted CMS + yield service_api.read_service() + + # Delete the created CMS + cms_api.delete_cms() + + +@pytest.fixture(scope="session") +def base_cluster(cm_api_client, cms_session) -> Generator[ApiCluster]: + """Provision a Cloudera on premise base cluster for the session. + If the variable 'CM_CLUSTER' is present, will attempt to read and yield + a reference to this cluster. Otherwise, will yield a new base cluster + with a single host, deleting the cluster once completed. + + Args: + cm_api_client (ApiClient): CM API client Raises: Exception: _description_ @@ -214,7 +292,7 @@ def base_cluster(cm_api_client, request): Exception: _description_ Yields: - _type_: _description_ + ApiCluster: The base cluster """ cluster_api = ClustersResourceApi(cm_api_client) @@ -230,7 +308,7 @@ def base_cluster(cm_api_client, request): ) name = ( - Path(request.fixturename).stem + cms_session.name + "_" + "".join(random.choices(string.ascii_lowercase, k=6)) ) @@ -269,9 +347,17 @@ def base_cluster(cm_api_client, request): p for p in parcels.items if p.product == "CDH" and p.version.startswith(cdh_version) - ) + ), + None, ) + if cdh_parcel is None: + # Roll back the cluster and then raise an error + cluster_api.delete_cluster(cluster_name=name) + raise ParcelNotFoundException( + f"CDH Version {cdh_version} not found. Please check your parcel repo configuration." + ) + parcel = Parcel( parcel_api=parcel_api, product=cdh_parcel.product, @@ -290,6 +376,67 @@ def base_cluster(cm_api_client, request): raise Exception(str(ae)) +@pytest.fixture(scope="function") +def zk_auto(cm_api_client, base_cluster, request) -> Generator[ApiService]: + """Create a new ZooKeeper service on the provided base cluster. + It starts this service, yields, and will remove this service if the tests + do not. + + Args: + cm_api_client (ApiClient): CM API client + base_cluster (ApiCluster): Provided base cluster + request (FixtureRequest): Fixture request + + Yields: + Generator[ApiService]: The instantiated ZooKeeper service + """ + + service_api = ServicesResourceApi(cm_api_client) + cm_api = ClustersResourceApi(cm_api_client) + + host = next( + (h for h in cm_api.list_hosts(cluster_name=base_cluster.name).items), None + ) + + if host is None: + raise NoHostsFoundException( + "No available hosts to assign ZooKeeper service roles" + ) + + payload = ApiService( + name="-".join(["zk", request.node.name]), + type="ZOOKEEPER", + roles=[ + ApiRole( + type="SERVER", + host_ref=ApiHostRef(host.host_id, host.hostname), + ), + ], + ) + + service_results = service_api.create_services( + cluster_name=base_cluster.name, body=ApiServiceList(items=[payload]) + ) + + first_run_cmd = service_api.first_run( + cluster_name=base_cluster.name, + service_name=service_results.items[0].name, + ) + + monitor_command(cm_api_client, first_run_cmd) + + zk_service = service_api.read_service( + cluster_name=base_cluster.name, service_name=service_results.items[0].name + ) + + yield zk_service + + service_api.delete_service( + cluster_name=base_cluster.name, + service_name=zk_service.name, + ) + + @pytest.fixture(scope="session") def cms(cm_api_client: ApiClient, request) -> Generator[ApiService]: """Provisions Cloudera Manager Service. If the Cloudera Manager Service @@ -714,7 +861,7 @@ def handle_commands(api_client: ApiClient, commands: ApiBulkCommandList): def monitor_command( - api_client: ApiClient, command: ApiCommand, polling: int = 10, delay: int = 15 + api_client: ApiClient, command: ApiCommand, polling: int = 120, delay: int = 10 ): poll_count = 0 while command.active: diff --git a/tests/unit/plugins/modules/service_role_config_group_info/test_service_role_config_group_info.py b/tests/unit/plugins/modules/service_role_config_group_info/test_service_role_config_group_info.py index 440aa966..c23f0b63 100644 --- a/tests/unit/plugins/modules/service_role_config_group_info/test_service_role_config_group_info.py +++ b/tests/unit/plugins/modules/service_role_config_group_info/test_service_role_config_group_info.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,12 +19,14 @@ __metaclass__ = type import logging -import os import pytest from ansible_collections.cloudera.cluster.plugins.modules import ( service_role_config_group_info, ) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + get_base_role_config_group, +) from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleExitJson, AnsibleFailJson, @@ -33,29 +35,6 @@ LOG = logging.getLogger(__name__) -@pytest.fixture() -def conn(): - conn = dict(username=os.getenv("CM_USERNAME"), password=os.getenv("CM_PASSWORD")) - - if os.getenv("CM_HOST", None): - conn.update(host=os.getenv("CM_HOST")) - - if os.getenv("CM_PORT", None): - conn.update(port=os.getenv("CM_PORT")) - - if os.getenv("CM_ENDPOINT", None): - conn.update(url=os.getenv("CM_ENDPOINT")) - - if os.getenv("CM_PROXY", None): - conn.update(proxy=os.getenv("CM_PROXY")) - - return { - **conn, - "verify_tls": "no", - "debug": "no", - } - - def test_missing_required(conn, module_args): module_args(conn) @@ -71,11 +50,11 @@ def test_missing_cluster(conn, module_args): service_role_config_group_info.main() -def test_invalid_service(conn, module_args): +def test_invalid_service(conn, module_args, base_cluster): module_args( { **conn, - "cluster": os.getenv("CM_CLUSTER"), + "cluster": base_cluster.name, "service": "BOOM", } ) @@ -84,12 +63,12 @@ def test_invalid_service(conn, module_args): service_role_config_group_info.main() -def test_invalid_cluster(conn, module_args): +def test_invalid_cluster(conn, module_args, base_cluster): module_args( { **conn, "cluster": "BOOM", - "service": os.getenv("CM_SERVICE"), + "service": "ShouldNotReach", } ) @@ -97,66 +76,63 @@ def test_invalid_cluster(conn, module_args): service_role_config_group_info.main() -def test_view_all_role_config_groups(conn, module_args): +def test_all_role_config_groups(conn, module_args, base_cluster, zk_auto): module_args( { **conn, - "cluster": os.getenv("CM_CLUSTER"), - "service": os.getenv("CM_SERVICE"), + "cluster": base_cluster.name, + "service": zk_auto.name, } ) with pytest.raises(AnsibleExitJson) as e: service_role_config_group_info.main() - assert len(e.value.role_config_groups) > 0 + # Should be only one BASE for the SERVER + assert len(e.value.role_config_groups) == 1 + assert e.value.role_config_groups[0]["base"] == True -def test_view_service_role(conn, module_args): +def test_type_role_config_group(conn, module_args, base_cluster, zk_auto): module_args( { **conn, - "cluster": os.getenv("CM_CLUSTER"), - "service": os.getenv("CM_SERVICE"), - "name": "hdfs-GATEWAY-BASE", + "cluster": base_cluster.name, + "service": zk_auto.name, + "type": "SERVER", } ) with pytest.raises(AnsibleExitJson) as e: service_role_config_group_info.main() + # Should be only one BASE for the SERVER assert len(e.value.role_config_groups) == 1 + assert e.value.role_config_groups[0]["base"] == True -@pytest.mark.skip("Requires hostname") -def test_view_service_roles_by_hostname(conn, module_args): - module_args( - { - **conn, - "cluster": os.getenv("CM_CLUSTER"), - "service": os.getenv("CM_SERVICE"), - "cluster_hostname": "test07-worker-01.cldr.internal", - } +def test_name_role_config_group( + conn, module_args, cm_api_client, base_cluster, zk_auto +): + base_rcg = get_base_role_config_group( + api_client=cm_api_client, + cluster_name=base_cluster.name, + service_name=zk_auto.name, + role_type="SERVER", ) - with pytest.raises(AnsibleExitJson) as e: - service_role_config_group_info.main() - - assert len(e.value.roles) == 2 - - -@pytest.mark.skip("Requires host ID") -def test_view_service_roles_by_host_id(conn, module_args): module_args( { **conn, - "cluster": os.getenv("CM_CLUSTER"), - "service": os.getenv("CM_SERVICE"), - "cluster_host_id": "0b5fa17e-e316-4c86-8812-3108eb55b83d", + "cluster": base_cluster.name, + "service": zk_auto.name, + "name": base_rcg.name, } ) with pytest.raises(AnsibleExitJson) as e: service_role_config_group_info.main() - assert len(e.value.roles) == 4 + # Should be only one BASE for the SERVER + assert len(e.value.role_config_groups) == 1 + assert e.value.role_config_groups[0]["base"] == True