From 4df1dcd22835786be53c043f786a2d5395452de0 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Tue, 17 Dec 2024 09:24:23 -0500 Subject: [PATCH 01/31] Update pytest marker for 'service_config' Signed-off-by: Webster Mudge --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index a36945c5..e46f1c79 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,7 +53,7 @@ filterwarnings = [ "ignore:'crypt' is deprecated and slated for removal in Python 3.13:DeprecationWarning", ] markers = [ - "prepare: Prepare Cloudera Manager and resources for tests", + "service_config: Prepare service-wide configurations for tests", ] [build-system] From 4d0dea74ec902872910e5a212ab19d7852fb65fe Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Tue, 17 Dec 2024 09:24:23 -0500 Subject: [PATCH 02/31] Update message for cms_service_config changes Signed-off-by: Webster Mudge --- tests/unit/conftest.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 5fb502e1..268192db 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -295,7 +295,7 @@ def cms_service_config(cm_api_client, cms, request): for k, v in marker.args[0].items(): try: api.update_service_config( - message=f"{request.node.name}::set", + message=f"{Path(request.node.parent.name).stem}::{request.node.name}::set", body=ApiServiceConfig(items=[ApiConfig(name=k, value=v)]), ) except ApiException as ae: @@ -321,6 +321,6 @@ def cms_service_config(cm_api_client, cms, request): ) api.update_service_config( - message=f"{request.node.name}::reset", + message=f"{Path(request.node.parent.name).stem}::{request.node.name}::reset", body=ApiServiceConfig(items=reconciled), ) From c9798f10c8e3545ecc26cad7630623e909395673 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Tue, 17 Dec 2024 09:24:24 -0500 Subject: [PATCH 03/31] Move role and role config group result parsing to separate modules. Update general output parsing function. Signed-off-by: Webster Mudge --- plugins/module_utils/cluster_utils.py | 4 +- plugins/module_utils/cm_utils.py | 50 +------------------ plugins/module_utils/data_context_utils.py | 6 +-- plugins/module_utils/parcel_utils.py | 4 +- .../module_utils/role_config_group_utils.py | 35 +++++++++++++ plugins/module_utils/role_utils.py | 48 ++++++++++++++++++ plugins/module_utils/service_utils.py | 9 +++- plugins/modules/service_role.py | 5 +- plugins/modules/service_role_config_group.py | 3 ++ .../service_role_config_group_config_info.py | 2 +- .../modules/service_role_config_group_info.py | 4 ++ plugins/modules/service_role_info.py | 4 ++ 12 files changed, 115 insertions(+), 59 deletions(-) create mode 100644 plugins/module_utils/role_config_group_utils.py create mode 100644 plugins/module_utils/role_utils.py diff --git a/plugins/module_utils/cluster_utils.py b/plugins/module_utils/cluster_utils.py index 203d4230..bf3cae1f 100644 --- a/plugins/module_utils/cluster_utils.py +++ b/plugins/module_utils/cluster_utils.py @@ -17,7 +17,7 @@ """ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - _parse_output, + normalize_output, ) from cm_client import ApiCluster @@ -42,5 +42,5 @@ def parse_cluster_result(cluster: ApiCluster) -> dict: # Retrieve full_version as version output = dict(version=cluster.full_version) - output.update(_parse_output(cluster.to_dict(), CLUSTER_OUTPUT)) + output.update(normalize_output(cluster.to_dict(), CLUSTER_OUTPUT)) return output diff --git a/plugins/module_utils/cm_utils.py b/plugins/module_utils/cm_utils.py index a1667986..c892b229 100644 --- a/plugins/module_utils/cm_utils.py +++ b/plugins/module_utils/cm_utils.py @@ -35,8 +35,6 @@ ApiClient, ApiCommand, ApiConfigList, - ApiRole, - ApiRoleConfigGroup, Configuration, ) from cm_client.rest import ApiException, RESTClientObject @@ -47,34 +45,8 @@ __credits__ = ["frisch@cloudera.com"] __maintainer__ = ["wmudge@cloudera.com"] -ROLE_OUTPUT = [ - "commission_state", - "config_staleness_status", - "ha_status", - "health_checks", - "health_summary", - # "host_ref", - "maintenance_mode", - "maintenance_owners", - "name", - # "role_config_group_ref", - "role_state", - # "service_ref", - "tags", - "type", - "zoo_keeper_server_mode", -] - -ROLE_CONFIG_GROUP = [ - "name", - "role_type", - "base", - "display_name", - # "service_ref", -] - - -def _parse_output(entity: dict, filter: list) -> dict: + +def normalize_output(entity: dict, filter: list) -> dict: output = {} for k in filter: if k == "tags": @@ -85,24 +57,6 @@ def _parse_output(entity: dict, filter: list) -> dict: return output -def parse_role_result(role: ApiRole) -> dict: - # Retrieve only the host_id, role_config_group, and service identifiers - output = dict( - host_id=role.host_ref.host_id, - role_config_group_name=role.role_config_group_ref.role_config_group_name, - service_name=role.service_ref.service_name, - ) - output.update(_parse_output(role.to_dict(), ROLE_OUTPUT)) - return output - - -def parse_role_config_group_result(role_config_group: ApiRoleConfigGroup) -> dict: - # Retrieve only the service identifier - output = dict(service_name=role_config_group.service_ref.service_name) - output.update(_parse_output(role_config_group.to_dict(), ROLE_CONFIG_GROUP)) - return output - - def normalize_values(add: dict) -> dict: """Normalize parameter values. Strings have whitespace trimmed, integers are converted to strings, and Boolean values are converted their string representation diff --git a/plugins/module_utils/data_context_utils.py b/plugins/module_utils/data_context_utils.py index 4b3f54f7..be4f7c57 100644 --- a/plugins/module_utils/data_context_utils.py +++ b/plugins/module_utils/data_context_utils.py @@ -17,7 +17,7 @@ """ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - _parse_output, + normalize_output, ) from cm_client import ApiDataContextList @@ -38,9 +38,9 @@ ] -def _parse_output(data: dict, keys: list) -> dict: +def normalize_output(data: dict, keys: list) -> dict: return {key: data[key] for key in keys if key in data} def parse_data_context_result(data_contexts: ApiDataContextList) -> list: - return [_parse_output(item, DATA_CONTEXT_OUTPUT) for item in data_contexts.items] + return [normalize_output(item, DATA_CONTEXT_OUTPUT) for item in data_contexts.items] diff --git a/plugins/module_utils/parcel_utils.py b/plugins/module_utils/parcel_utils.py index 88d13793..38a50c5a 100644 --- a/plugins/module_utils/parcel_utils.py +++ b/plugins/module_utils/parcel_utils.py @@ -23,7 +23,7 @@ from cm_client import ApiParcel, ParcelResourceApi from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - _parse_output, + normalize_output, ) @@ -138,5 +138,5 @@ def activate(self): def parse_parcel_result(parcel: ApiParcel) -> dict: # Retrieve only the cluster identifier output = dict(cluster_name=parcel.cluster_ref.cluster_name) - output.update(_parse_output(parcel.to_dict(), PARCEL)) + output.update(normalize_output(parcel.to_dict(), PARCEL)) return output diff --git a/plugins/module_utils/role_config_group_utils.py b/plugins/module_utils/role_config_group_utils.py new file mode 100644 index 00000000..b17e8160 --- /dev/null +++ b/plugins/module_utils/role_config_group_utils.py @@ -0,0 +1,35 @@ +# Copyright 2024 Cloudera, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + normalize_output, +) + +from cm_client import ApiRoleConfigGroup + + +ROLE_CONFIG_GROUP = [ + "name", + "role_type", + "base", + "display_name", + # "service_ref", +] + + +def parse_role_config_group_result(role_config_group: ApiRoleConfigGroup) -> dict: + # Retrieve only the service identifier + output = dict(service_name=role_config_group.service_ref.service_name) + output.update(normalize_output(role_config_group.to_dict(), ROLE_CONFIG_GROUP)) + return output diff --git a/plugins/module_utils/role_utils.py b/plugins/module_utils/role_utils.py new file mode 100644 index 00000000..55bb463b --- /dev/null +++ b/plugins/module_utils/role_utils.py @@ -0,0 +1,48 @@ +# Copyright 2024 Cloudera, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + normalize_output, +) + +from cm_client import ApiRole + +ROLE_OUTPUT = [ + "commission_state", + "config_staleness_status", + "ha_status", + "health_checks", + "health_summary", + # "host_ref", + "maintenance_mode", + "maintenance_owners", + "name", + # "role_config_group_ref", + "role_state", + # "service_ref", + "tags", + "type", + "zoo_keeper_server_mode", +] + + +def parse_role_result(role: ApiRole) -> dict: + # Retrieve only the host_id, role_config_group, and service identifiers + output = dict( + host_id=role.host_ref.host_id, + role_config_group_name=role.role_config_group_ref.role_config_group_name, + service_name=role.service_ref.service_name, + ) + output.update(normalize_output(role.to_dict(), ROLE_OUTPUT)) + return output diff --git a/plugins/module_utils/service_utils.py b/plugins/module_utils/service_utils.py index c11a2d79..9e65bff3 100644 --- a/plugins/module_utils/service_utils.py +++ b/plugins/module_utils/service_utils.py @@ -17,7 +17,7 @@ """ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - _parse_output, + normalize_output, resolve_parameter_updates, ) @@ -47,10 +47,15 @@ def parse_service_result(service: ApiService) -> dict: # Retrieve only the cluster_name output = dict(cluster_name=service.cluster_ref.cluster_name) - output.update(_parse_output(service.to_dict(), SERVICE_OUTPUT)) + output.update(normalize_output(service.to_dict(), SERVICE_OUTPUT)) return output +def parse_cm_service_result(service: ApiService) -> dict: + # Ignore cluster_name + return normalize_output(service.to_dict(), SERVICE_OUTPUT) + + class ServiceConfigUpdates(object): def __init__(self, existing: ApiServiceConfig, updates: dict, purge: bool) -> None: current = {r.name: r.value for r in existing.items} diff --git a/plugins/modules/service_role.py b/plugins/modules/service_role.py index f9bed4a1..d92ca533 100644 --- a/plugins/modules/service_role.py +++ b/plugins/modules/service_role.py @@ -16,9 +16,11 @@ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( ClouderaManagerMutableModule, - parse_role_result, resolve_tag_updates, ) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + parse_role_result, +) from cm_client import ( ApiEntityTag, @@ -34,6 +36,7 @@ ) from cm_client.rest import ApiException + ANSIBLE_METADATA = { "metadata_version": "1.1", "status": ["preview"], diff --git a/plugins/modules/service_role_config_group.py b/plugins/modules/service_role_config_group.py index 5d1f4449..b54ffeef 100644 --- a/plugins/modules/service_role_config_group.py +++ b/plugins/modules/service_role_config_group.py @@ -16,6 +16,9 @@ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( ClouderaManagerMutableModule, +) + +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( parse_role_config_group_result, ) diff --git a/plugins/modules/service_role_config_group_config_info.py b/plugins/modules/service_role_config_group_config_info.py index fc127dc8..ba25a6cb 100644 --- a/plugins/modules/service_role_config_group_config_info.py +++ b/plugins/modules/service_role_config_group_config_info.py @@ -16,7 +16,6 @@ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( ClouderaManagerModule, - parse_role_config_group_result, ) from cm_client import ( @@ -26,6 +25,7 @@ ) from cm_client.rest import ApiException + ANSIBLE_METADATA = { "metadata_version": "1.1", "status": ["preview"], diff --git a/plugins/modules/service_role_config_group_info.py b/plugins/modules/service_role_config_group_info.py index 46e95af4..cc71314b 100644 --- a/plugins/modules/service_role_config_group_info.py +++ b/plugins/modules/service_role_config_group_info.py @@ -16,6 +16,9 @@ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( ClouderaManagerModule, +) + +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( parse_role_config_group_result, ) @@ -26,6 +29,7 @@ ) from cm_client.rest import ApiException + ANSIBLE_METADATA = { "metadata_version": "1.1", "status": ["preview"], diff --git a/plugins/modules/service_role_info.py b/plugins/modules/service_role_info.py index c0e1f63f..9581a8bb 100644 --- a/plugins/modules/service_role_info.py +++ b/plugins/modules/service_role_info.py @@ -18,12 +18,16 @@ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( ClouderaManagerModule, +) + +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( parse_role_result, ) from cm_client import ClustersResourceApi, RolesResourceApi, ServicesResourceApi from cm_client.rest import ApiException + ANSIBLE_METADATA = { "metadata_version": "1.1", "status": ["preview"], From e72cb3b2fbc29c4a05169ef3893eb2513ffb447d Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Tue, 17 Dec 2024 09:24:24 -0500 Subject: [PATCH 04/31] Enable service-wide configuration management. Update return object. Signed-off-by: Webster Mudge --- plugins/modules/cm_service.py | 536 +++++++++++++++++++--------------- 1 file changed, 302 insertions(+), 234 deletions(-) diff --git a/plugins/modules/cm_service.py b/plugins/modules/cm_service.py index 921981cf..6528169d 100644 --- a/plugins/modules/cm_service.py +++ b/plugins/modules/cm_service.py @@ -1,3 +1,6 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright 2024 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,24 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - ClouderaManagerModule, -) - -from cm_client.rest import ApiException -from cm_client import MgmtRolesResourceApi -from cm_client import MgmtServiceResourceApi -from cm_client import MgmtRoleCommandsResourceApi -from cm_client import HostsResourceApi - -ANSIBLE_METADATA = { - "metadata_version": "1.1", - "status": ["preview"], - "supported_by": "community", -} - DOCUMENTATION = r""" ---- module: cm_service short_description: Manage Cloudera Manager service roles description: @@ -68,7 +54,6 @@ """ EXAMPLES = r""" ---- - name: Start Cloudera Manager service roles cloudera.cluster.cm_version: host: "10.10.10.10" @@ -114,8 +99,7 @@ """ RETURN = r""" ---- -cloudera_manager: +service: description: List of Cloudera Manager roles type: dict contains: @@ -185,241 +169,325 @@ returned: optional """ +import json + +from cm_client import ( + HostsResourceApi, + MgmtRolesResourceApi, + MgmtRoleConfigGroupsResourceApi, + MgmtRoleCommandsResourceApi, + MgmtServiceResourceApi, +) +from cm_client.rest import ApiException + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerMutableModule, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( + ServiceConfigUpdates, + parse_cm_service_result, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + parse_role_result, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + parse_role_config_group_result, +) + -class ClouderaService(ClouderaManagerModule): +class ClouderaManagerService(ClouderaManagerMutableModule): def __init__(self, module): - super(ClouderaService, self).__init__(module) + super(ClouderaManagerService, self).__init__(module) - self.role = self.get_param("role") + # Set the parameters + self.params = self.get_param("parameters") + self.roles = self.get_param("roles") self.state = self.get_param("state") self.purge = self.get_param("purge") + self.view = self.get_param("view") + + # Initialize the return value + self.changed = False + self.cm_service = {} + + if self.module._diff: + self.diff = dict(before=dict(), after=dict()) + else: + self.diff = {} + + # Execute the logic self.process() - @ClouderaManagerModule.handle_process + @ClouderaManagerMutableModule.handle_process def process(self): - try: - api_instance = MgmtServiceResourceApi(self.api_client) - role_api_instance = MgmtRolesResourceApi(self.api_client) - role_cmd_api_instance = MgmtRoleCommandsResourceApi(self.api_client) - mgmt_service_api_instance = MgmtServiceResourceApi(self.api_client) - host_api_instance = HostsResourceApi(self.api_client) - - get_host_infomation = host_api_instance.read_hosts().to_dict() - for item in get_host_infomation["items"]: - if self.host == item["hostname"]: - host_id = item["host_id"] - - if not self.purge: - available_roles_info = role_api_instance.read_roles().to_dict() - existing_roles = [] - for item in available_roles_info["items"]: - existing_roles.append(item["type"]) - - if self.state in ["present"]: - not_existing_roles = [] - for role in self.role: - if role not in existing_roles: - not_existing_roles.append(role) - if not_existing_roles: - body = { - "items": [ - {"type": role, "hostRef": {"hostId": host_id}} - for role in not_existing_roles - ] - } - role_api_instance.create_roles(body=body) - self.cm_service_output = role_api_instance.read_roles().to_dict() - self.changed = True - elif self.state in ["absent"]: - roles_to_remove = [ - role for role in self.role if role in existing_roles - ] - roles_to_remove_extended_info = [] - for role in roles_to_remove: - for item in available_roles_info["items"]: - if role == item["type"]: - roles_to_remove_extended_info.append(item["name"]) - if not roles_to_remove_extended_info: - self.cm_service_output = ( - role_api_instance.read_roles().to_dict() - ) - self.changed = False - else: - for role in roles_to_remove_extended_info: - role_api_instance.delete_role(role_name=role) - self.cm_service_output = ( - role_api_instance.read_roles().to_dict() + service_api = MgmtServiceResourceApi(self.api_client) + role_api = MgmtRolesResourceApi(self.api_client) + role_cmd_api = MgmtRoleCommandsResourceApi(self.api_client) + rcg_api = MgmtRoleConfigGroupsResourceApi(self.api_client) + host_api = HostsResourceApi(self.api_client) + + # Manage service-wide configurations + if self.params or self.purge: + try: + existing_params = service_api.read_service_config() + except ApiException as ex: + if ex.status == 404: + self.module.fail_json(msg=json.loads(ex.body)["message"]) + else: + raise ex + + service_wide = ServiceConfigUpdates( + existing_params, self.params, self.purge + ) + + if service_wide.changed: + self.changed = True + + if self.module._diff: + self.diff["before"].update(params=service_wide.diff["before"]) + self.diff["after"].update(params=service_wide.diff["after"]) + + if not self.module.check_mode: + service_api.update_service_config( + message=self.message, body=service_wide.config + ) + + # Manage roles + if self.roles: + try: + # Get a list of all host and find itself + # This is hardcoded, so needs to be broken into host + # assignment per-role + hosts = host_api.read_hosts() + for h in hosts.items(): + if self.host == h.hostname: + host_id = h.host_id + + # CHECK MODE + if not self.purge: + available_roles_info = role_api.read_roles().to_dict() + existing_roles = [] + for item in available_roles_info["items"]: + existing_roles.append(item["type"]) + + if self.state in ["present"]: + not_existing_roles = [] + for role in self.roles: + if role not in existing_roles: + not_existing_roles.append(role) + if not_existing_roles: + body = { + "items": [ + {"type": role, "hostRef": {"hostId": host_id}} + for role in not_existing_roles + ] + } + role_api.create_roles(body=body) + self.cm_service = parse_cm_service_result( + service_api.read_service() ) self.changed = True - elif self.state in ["started"]: - - matching_roles = [] - new_roles = [] - for role in self.role: - if role in existing_roles: - matching_roles.append(role) + elif self.state in ["absent"]: + roles_to_remove = [ + role for role in self.roles if role in existing_roles + ] + roles_to_remove_extended_info = [] + for role in roles_to_remove: + for item in available_roles_info["items"]: + if role == item["type"]: + roles_to_remove_extended_info.append(item["name"]) + if not roles_to_remove_extended_info: + self.cm_service = role_api.read_roles().to_dict() + self.changed = False + else: + for role in roles_to_remove_extended_info: + role_api.delete_role(role_name=role) + self.cm_service = role_api.read_roles().to_dict() + self.changed = True + + elif self.state in ["started"]: + + matching_roles = [] + new_roles = [] + for role in self.roles: + if role in existing_roles: + matching_roles.append(role) + else: + new_roles.append(role) + + new_roles_to_start = [] + if new_roles: + body = { + "items": [ + {"type": role, "hostRef": {"hostId": host_id}} + for role in new_roles + ] + } + newly_added_roles = role_api.create_roles( + body=body + ).to_dict() + + for role in newly_added_roles["items"]: + new_roles_to_start.append(role["name"]) + body = {"items": new_roles_to_start} + + existing_roles_state = [] + for role in matching_roles: + for item in available_roles_info["items"]: + if role == item["type"]: + existing_roles_state.append( + { + "type": item["type"], + "role_state": item["role_state"].lower(), + "name": item["name"], + } + ) + + existing_roles_to_start = [] + for role in existing_roles_state: + if role["role_state"] == "stopped": + existing_roles_to_start.append(role["name"]) + + all_roles_to_start = ( + new_roles_to_start + existing_roles_to_start + ) + body = {"items": all_roles_to_start} + + if all_roles_to_start: + start_roles_request = role_cmd_api.start_command( + body=body + ).to_dict() + command_id = start_roles_request["items"][0]["id"] + self.wait_for_command_state( + command_id=command_id, polling_interval=5 + ) + self.cm_service = role_api.read_roles().to_dict() + self.changed = True + else: + self.cm_service = role_api.read_roles().to_dict() + self.changed = False + + elif self.state in ["stopped"]: + matching_roles = [] + for role in self.roles: + if role in existing_roles: + matching_roles.append(role) + + matching_roles_state = [] + for role in matching_roles: + for item in available_roles_info["items"]: + if role == item["type"]: + matching_roles_state.append( + { + "type": item["type"], + "role_state": item["role_state"].lower(), + "name": item["name"], + } + ) + + roles_to_stop = [] + for role in matching_roles_state: + if role["role_state"] == "started": + roles_to_stop.append(role["name"]) + body = {"items": roles_to_stop} + + if roles_to_stop: + role_cmd_api.stop_command(body=body) + self.cm_service = role_api.read_roles().to_dict() + self.changed = True else: - new_roles.append(role) - - new_roles_to_start = [] - if new_roles: - body = { - "items": [ - {"type": role, "hostRef": {"hostId": host_id}} - for role in new_roles - ] - } - newly_added_roles = role_api_instance.create_roles( - body=body - ).to_dict() - - for role in newly_added_roles["items"]: - new_roles_to_start.append(role["name"]) - body = {"items": new_roles_to_start} - - existing_roles_state = [] - for role in matching_roles: - for item in available_roles_info["items"]: - if role == item["type"]: - existing_roles_state.append( - { - "type": item["type"], - "role_state": item["role_state"].lower(), - "name": item["name"], - } - ) - - existing_roles_to_start = [] - for role in existing_roles_state: - if role["role_state"] == "stopped": - existing_roles_to_start.append(role["name"]) - - all_roles_to_start = new_roles_to_start + existing_roles_to_start - body = {"items": all_roles_to_start} - - if all_roles_to_start: - start_roles_request = role_cmd_api_instance.start_command( - body=body - ).to_dict() - command_id = start_roles_request["items"][0]["id"] + self.cm_service = role_api.read_roles().to_dict() + self.changed = False + + elif self.state in ["restarted"]: + matching_roles = [] + for role in self.roles: + if role in existing_roles: + matching_roles.append(role) + + matching_roles_state = [] + for role in matching_roles: + for item in available_roles_info["items"]: + if role == item["type"]: + matching_roles_state.append( + { + "type": item["type"], + "role_state": item["role_state"].lower(), + "name": item["name"], + } + ) + + roles_to_restart = [] + for role in matching_roles_state: + roles_to_restart.append(role["name"]) + body = {"items": roles_to_restart} + + if roles_to_restart: + role_cmd_api.restart_command(body=body) + self.cm_service = role_api.read_roles().to_dict() + self.changed = True + + if self.purge: + service_api.delete_cms() + body = {"roles": [{"type": role} for role in self.roles]} + service_api.setup_cms(body=body) + self.cm_service = role_api.read_roles().to_dict() + + if self.state in ["started"]: + start_roles_request = service_api.start_command().to_dict() + command_id = start_roles_request["id"] self.wait_for_command_state( command_id=command_id, polling_interval=5 ) - self.cm_service_output = ( - role_api_instance.read_roles().to_dict() + self.cm_service = role_api.read_roles().to_dict() + self.changed = True + except ApiException as e: + if e.status == 404 or 400: + roles_dict = {"roles": [{"type": role} for role in self.roles]} + service_api.setup_cms(body=roles_dict) + + if self.state in ["started"]: + start_roles_request = service_api.start_command().to_dict() + command_id = start_roles_request["id"] + self.wait_for_command_state( + command_id=command_id, polling_interval=5 ) - self.changed = True + self.cm_service = role_api.read_roles().to_dict() else: - self.cm_service_output = ( - role_api_instance.read_roles().to_dict() - ) - self.changed = False - - elif self.state in ["stopped"]: - matching_roles = [] - for role in self.role: - if role in existing_roles: - matching_roles.append(role) - - matching_roles_state = [] - for role in matching_roles: - for item in available_roles_info["items"]: - if role == item["type"]: - matching_roles_state.append( - { - "type": item["type"], - "role_state": item["role_state"].lower(), - "name": item["name"], - } - ) - - roles_to_stop = [] - for role in matching_roles_state: - if role["role_state"] == "started": - roles_to_stop.append(role["name"]) - body = {"items": roles_to_stop} - - if roles_to_stop: - role_cmd_api_instance.stop_command(body=body) - self.cm_service_output = ( - role_api_instance.read_roles().to_dict() - ) - self.changed = True - else: - self.cm_service_output = ( - role_api_instance.read_roles().to_dict() - ) - self.changed = False - - elif self.state in ["restarted"]: - matching_roles = [] - for role in self.role: - if role in existing_roles: - matching_roles.append(role) - - matching_roles_state = [] - for role in matching_roles: - for item in available_roles_info["items"]: - if role == item["type"]: - matching_roles_state.append( - { - "type": item["type"], - "role_state": item["role_state"].lower(), - "name": item["name"], - } - ) - - roles_to_restart = [] - for role in matching_roles_state: - roles_to_restart.append(role["name"]) - body = {"items": roles_to_restart} - - if roles_to_restart: - role_cmd_api_instance.restart_command(body=body) - self.cm_service_output = ( - role_api_instance.read_roles().to_dict() - ) - self.changed = True - - if self.purge: - mgmt_service_api_instance.delete_cms() - body = {"roles": [{"type": role} for role in self.role]} - mgmt_service_api_instance.setup_cms(body=body) - self.cm_service_output = role_api_instance.read_roles().to_dict() - - if self.state in ["started"]: - start_roles_request = api_instance.start_command().to_dict() - command_id = start_roles_request["id"] - self.wait_for_command_state( - command_id=command_id, polling_interval=5 - ) - self.cm_service_output = role_api_instance.read_roles().to_dict() - self.changed = True - - except ApiException as e: - if e.status == 404 or 400: - roles_dict = {"roles": [{"type": role} for role in self.role]} - api_instance.setup_cms(body=roles_dict) + self.cm_service = role_api.read_roles().to_dict() + self.changed = True - if self.state in ["started"]: - start_roles_request = api_instance.start_command().to_dict() - command_id = start_roles_request["id"] - self.wait_for_command_state( - command_id=command_id, polling_interval=5 - ) - self.cm_service_output = role_api_instance.read_roles().to_dict() - else: - self.cm_service_output = role_api_instance.read_roles().to_dict() - self.changed = True + # Read and generate payload for Cloudera Manager Service + self.cm_service = parse_cm_service_result(service_api.read_service()) + self.cm_service.update( + config=[ + c.to_dict() + for c in service_api.read_service_config(view=self.view).items + ] + ) + self.cm_service.update( + roles=[parse_role_result(r) for r in role_api.read_roles().items] + ) + self.cm_service.update( + role_config_groups=[ + parse_role_config_group_result(rcg) + for rcg in rcg_api.read_role_config_groups().items + ] + ) def main(): - module = ClouderaManagerModule.ansible_module( + module = ClouderaManagerMutableModule.ansible_module( argument_spec=dict( - role=dict(required=True, type="list"), - purge=dict(required=False, type="bool", default=False), + parameters=dict(type="dict", aliases=["params"]), + roles=dict(type="list"), + purge=dict(type="bool", default=False), + view=dict( + default="summary", + choices=["summary", "full"], + ), state=dict( type="str", default="started", @@ -429,13 +497,13 @@ def main(): supports_check_mode=False, ) - result = ClouderaService(module) + result = ClouderaManagerService(module) changed = result.changed output = dict( changed=changed, - cloudera_manager=result.cm_service_output, + service=result.cm_service, ) if result.debug: From 7612a7e76325097c70e25be4bfcc734f22ad62f7 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Tue, 17 Dec 2024 09:24:24 -0500 Subject: [PATCH 05/31] Update to use pytest fixtures for Cloudera Manager Service Signed-off-by: Webster Mudge --- .../modules/cm_service/test_cm_service.py | 147 ++++++++++++++++-- 1 file changed, 134 insertions(+), 13 deletions(-) diff --git a/tests/unit/plugins/modules/cm_service/test_cm_service.py b/tests/unit/plugins/modules/cm_service/test_cm_service.py index 5614fe61..af679312 100644 --- a/tests/unit/plugins/modules/cm_service/test_cm_service.py +++ b/tests/unit/plugins/modules/cm_service/test_cm_service.py @@ -17,7 +17,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -import os + import logging import pytest @@ -30,23 +30,144 @@ LOG = logging.getLogger(__name__) -def test_pytest_cm_service(module_args): +def test_minimal(conn, module_args, cms): + module_args(conn) + + with pytest.raises(AnsibleExitJson): + cm_service.main() + + +@pytest.mark.service_config(dict(log_event_retry_frequency=10)) +def test_set_parameters(conn, module_args, cms_service_config): + module_args( + { + **conn, + "parameters": dict(mgmt_emit_sensitive_data_in_stderr=True), + "message": "test_cm_service::test_set_parameters", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict( + mgmt_emit_sensitive_data_in_stderr="True", log_event_retry_frequency="10" + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert ( + expected.items() + <= {c["name"]: c["value"] for c in e.value.service["config"]}.items() + ) + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert ( + expected.items() + <= {c["name"]: c["value"] for c in e.value.service["config"]}.items() + ) + + +@pytest.mark.service_config( + dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) +) +def test_unset_parameters(conn, module_args, cms_service_config): + module_args( + { + **conn, + "parameters": dict(mgmt_emit_sensitive_data_in_stderr=None), + "message": "test_cm_service::test_unset_parameters", + } + ) + + expected = dict(log_event_retry_frequency="10") + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert ( + expected.items() + <= {c["name"]: c["value"] for c in e.value.service["config"]}.items() + ) + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert ( + expected.items() + <= {c["name"]: c["value"] for c in e.value.service["config"]}.items() + ) + + +@pytest.mark.service_config( + dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) +) +def test_set_parameters_with_purge(conn, module_args, cms_service_config): + module_args( + { + **conn, + "parameters": dict(mgmt_emit_sensitive_data_in_stderr=True), + "purge": True, + "message": "test_cm_service::test_set_parameters_with_purge", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_emit_sensitive_data_in_stderr="True") + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert ( + expected.items() + <= {c["name"]: c["value"] for c in e.value.service["config"]}.items() + ) + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert ( + expected.items() + <= {c["name"]: c["value"] for c in e.value.service["config"]}.items() + ) + + +@pytest.mark.service_config( + dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) +) +def test_purge_all_parameters(conn, module_args, cms_service_config): module_args( { - "username": os.getenv("CM_USERNAME"), - "password": os.getenv("CM_PASSWORD"), - "host": os.getenv("CM_HOST"), - "port": "7180", - "verify_tls": "no", - "debug": "yes", - "state": "started", - "role": ["SERVICEMONITOR", "HOSTMONITOR", "EVENTSERVER", "ALERTPUBLISHER"], + **conn, + "parameters": dict(), + "purge": True, + "message": "test_cm_service::test_purge_all_parameters", + # _ansible_check_mode=True, + # _ansible_diff=True, } ) - # with pytest.raises(AnsibleFailJson, match=r"boom") as e: with pytest.raises(AnsibleExitJson) as e: cm_service.main() - # LOG.info(str(e.value)) - LOG.info(str(e.value.cloudera_manager)) + assert e.value.changed == True + assert len(e.value.service["config"]) == 0 + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["config"]) == 0 From 0b5c98f0b926f6f89684fcc5afd4e749499afc19 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Tue, 17 Dec 2024 09:24:24 -0500 Subject: [PATCH 06/31] Update conn object from fixture Signed-off-by: Webster Mudge --- .../modules/cm_service_config/test_cm_service_config.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py b/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py index 9e208227..ad54716b 100644 --- a/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py +++ b/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py @@ -38,10 +38,12 @@ def test_missing_required(conn, module_args): def test_present_invalid_parameter(conn, module_args): - conn.update( - parameters=dict(example="Example"), + module_args( + { + **conn, + "parameters": dict(example="Example"), + } ) - module_args(conn) with pytest.raises( AnsibleFailJson, match="Unknown configuration attribute 'example'" From a91c59f5e412ff9a298d6b46bebcf90e34cbf0f7 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 18 Dec 2024 12:14:28 -0500 Subject: [PATCH 07/31] Create generic configuration list reconcilation utility Signed-off-by: Webster Mudge --- plugins/module_utils/cm_utils.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/plugins/module_utils/cm_utils.py b/plugins/module_utils/cm_utils.py index c892b229..c55d523c 100644 --- a/plugins/module_utils/cm_utils.py +++ b/plugins/module_utils/cm_utils.py @@ -34,6 +34,7 @@ from cm_client import ( ApiClient, ApiCommand, + ApiConfig, ApiConfigList, Configuration, ) @@ -145,6 +146,25 @@ def resolve_tag_updates( return (delta_add, delta_del) +class ConfigListUpdates(object): + def __init__(self, existing: ApiConfigList, updates: dict, purge: bool) -> None: + current = {r.name: r.value for r in existing.items} + changeset = resolve_parameter_updates(current, updates, purge) + + self.diff = dict( + before={k: current[k] if k in current else None for k in changeset.keys()}, + after=changeset, + ) + + self.config = ApiConfigList( + items=[ApiConfig(name=k, value=v) for k, v in changeset.items()] + ) + + @property + def changed(self) -> bool: + return bool(self.config.items) + + class ClusterTemplate(object): IDEMPOTENT_IDS = frozenset( ["refName", "name", "clusterName", "hostName", "product"] From fdfbfb1ecbc469836e90eb77802f4903f0633ebb Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 18 Dec 2024 12:17:43 -0500 Subject: [PATCH 08/31] Add cm_service_role_config module Signed-off-by: Webster Mudge --- plugins/modules/cm_service_role_config.py | 322 +++++++++++++++ pyproject.toml | 3 + tests/unit/__init__.py | 84 ++++ tests/unit/conftest.py | 6 +- .../test_cm_service_role_config.py | 365 ++++++++++++++++++ 5 files changed, 778 insertions(+), 2 deletions(-) create mode 100644 plugins/modules/cm_service_role_config.py create mode 100644 tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py diff --git a/plugins/modules/cm_service_role_config.py b/plugins/modules/cm_service_role_config.py new file mode 100644 index 00000000..fc6efbf3 --- /dev/null +++ b/plugins/modules/cm_service_role_config.py @@ -0,0 +1,322 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = r""" +module: cm_service_role_config +short_description: Manage a service role configuration in cluster +description: + - Manage a service role configuration (role-specific) in a cluster. +author: + - "Webster Mudge (@wmudge)" +requirements: + - cm-client +options: + name: + description: + - A Cloudera Manager Service role name to manage. + - One of C(name) or C(type) is required. + type: str + aliases: + - role_name + - role + type: + description: + - A Cloudera Manager Service role type to manage. + - One of C(name) or C(type) is required. + type: str + aliases: + - role_type + parameters: + description: + - The role-specific configuration to set, i.e. role overrides. + - To unset a parameter, use C(None) as the value. + type: dict + required: yes + aliases: + - params + purge: + description: + - Flag for whether the declared parameters should append or overwrite any existing parameters. + - To clear all parameters, set I(parameters={}), i.e. an empty dictionary, and I(purge=True). + type: bool + default: False + view: + description: + - The view to return. + type: str + default: summary + choices: + - summary + - full +extends_documentation_fragment: + - ansible.builtin.action_common_attributes + - cloudera.cluster.cm_options + - cloudera.cluster.cm_endpoint + - cloudera.cluster.purge + - cloudera.cluster.message +attributes: + check_mode: + support: full + diff_mode: + support: full + platform: + platforms: all +""" + +EXAMPLES = r""" +- name: Update (append) Cloudera manager Service Host Monitor role parameters + cloudera.cluster.cm_service_role_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + parameters: + a_configuration: "schema://host:port" + another_configuration: 234 + +- name: Reset a Cloudera manager Service Host Monitor role parameter + cloudera.cluster.cm_service_role_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + name: "a-non-default-role-name" + parameters: + more_configuration: None + +- name: Update (with purge) Cloudera manager Service Host Monitor role parameters + cloudera.cluster.cluster_service_role_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + parameters: + config_one: None + config_two: ValueTwo + config_three: 2345 + +- name: Reset all Cloudera manager Service Host Monitor role parameters + cloudera.cluster.cluster_service_role_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + parameters: {} + purge: yes +""" + +RETURN = r""" +config: + description: + - List of Cloudera Manager Service role configurations. + - Returns the C(summary) view of the resulting configuration. + type: list + elements: dict + returned: always + contains: + name: + description: + - The canonical name that identifies this configuration parameter. + type: str + returned: when supported + value: + description: + - The user-defined value. + - When absent, the default value (if any) will be used. + - Can also be absent, when enumerating allowed configs. + type: str + returned: when supported + required: + description: + - Whether this configuration is required for the object. + - If any required configuration is not set, operations on the object may not work. + - Requires I(full) view. + type: bool + returned: when supported + default: + description: + - The default value. + - Requires I(full) view. + type: str + returned: when supported + display_name: + description: + - A user-friendly name of the parameters, as would have been shown in the web UI. + - Requires I(full) view. + type: str + returned: when supported + description: + description: + - A textual description of the parameter. + - Requires I(full) view. + type: str + returned: when supported + related_name: + description: + - If applicable, contains the related configuration variable used by the source project. + - Requires I(full) view. + type: str + returned: when supported + sensitive: + description: + - Whether this configuration is sensitive, i.e. contains information such as passwords, which might affect how the value of this configuration might be shared by the caller. + type: bool + returned: when supported + validate_state: + description: + - State of the configuration parameter after validation. + - Requires I(full) view. + type: str + returned: when supported + validation_message: + description: + - A message explaining the parameter's validation state. + - Requires I(full) view. + type: str + returned: when supported + validation_warnings_suppressed: + description: + - Whether validation warnings associated with this parameter are suppressed. + - In general, suppressed validation warnings are hidden in the Cloudera Manager UI. + - Configurations that do not produce warnings will not contain this field. + - Requires I(full) view. + type: bool + returned: when supported +""" + +import json + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerMutableModule, + ConfigListUpdates, +) + +from cm_client import MgmtRolesResourceApi +from cm_client.rest import ApiException + + +class ClouderaManagerServiceRoleConfig(ClouderaManagerMutableModule): + def __init__(self, module): + super(ClouderaManagerServiceRoleConfig, self).__init__(module) + + # Set the parameters + self.name = self.get_param("name") + self.type = self.get_param("type") + self.params = self.get_param("parameters") + self.purge = self.get_param("purge") + self.view = self.get_param("view") + + # Initialize the return value + self.changed = False + self.diff = {} + self.config = [] + + # Execute the logic + self.process() + + @ClouderaManagerMutableModule.handle_process + def process(self): + refresh = True + role_api = MgmtRolesResourceApi(self.api_client) + + try: + if self.name is None: + role = next( + iter( + [r for r in role_api.read_roles().items if r.type == self.type] + ), + None, + ) + if role is None: + self.module.fail_json( + msg=f"Unable to find Cloudera Manager Service role type '{self.type}" + ) + else: + self.name = role.name + + # For some reason, the call to read_roles() doesn't retrieve the configuration + existing = role_api.read_role_config(self.name) + except ApiException as ex: + if ex.status == 404: + self.module.fail_json(msg=json.loads(ex.body)["message"]) + else: + raise ex + + updates = ConfigListUpdates(existing, self.params, self.purge) + + if updates.changed: + self.changed = True + + if self.module._diff: + self.diff = updates.diff + + if not self.module.check_mode: + self.config = [ + p.to_dict() + for p in role_api.update_role_config( + self.name, + message=self.message, + body=updates.config, + ).items + ] + + if self.view == "full": + refresh = False + + if refresh: + self.config = [ + p.to_dict() + for p in role_api.read_role_config(self.name, view=self.view).items + ] + + +def main(): + module = ClouderaManagerMutableModule.ansible_module( + argument_spec=dict( + name=dict(aliases=["role_name", "role"]), + type=dict(aliases=["role_type"]), + parameters=dict(type="dict", required=True, aliases=["params"]), + purge=dict(type="bool", default=False), + view=dict( + default="summary", + choices=["summary", "full"], + ), + ), + required_one_of=[ + ["name", "type"], + ], + supports_check_mode=True, + ) + + result = ClouderaManagerServiceRoleConfig(module) + + output = dict( + changed=result.changed, + config=result.config, + ) + + if module._diff: + output.update(diff=result.diff) + + if result.debug: + log = result.log_capture.getvalue() + output.update(debug=log, debug_lines=log.split("\n")) + + module.exit_json(**output) + + +if __name__ == "__main__": + main() diff --git a/pyproject.toml b/pyproject.toml index e46f1c79..716e06c4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,9 +51,12 @@ testpaths = [ filterwarnings = [ "ignore:AnsibleCollectionFinder has already been configured", "ignore:'crypt' is deprecated and slated for removal in Python 3.13:DeprecationWarning", + "ignore:URLs without a scheme:DeprecationWarning", + "ignore:HTTPResponse.getheaders():DeprecationWarning", ] markers = [ "service_config: Prepare service-wide configurations for tests", + "role_config: Prepare role override configurations for tests", ] [build-system] diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index f593ed44..62a6e4a2 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -22,11 +22,15 @@ ApiCluster, ApiCommand, ApiConfig, + ApiConfigList, + ApiRole, + ApiRoleList, ApiService, ApiServiceConfig, ApiServiceList, ClustersResourceApi, CommandsResourceApi, + MgmtRolesResourceApi, ServicesResourceApi, ) from cm_client.rest import ApiException @@ -179,3 +183,83 @@ def service_wide_config( message=f"{message}::reset", body=ApiServiceConfig(items=reconciled), ) + + +def provision_cm_role( + api_client: ApiClient, role_name: str, role_type: str, host_id: str +) -> Generator[ApiRole]: + api = MgmtRolesResourceApi(api_client) + + role = ApiRole( + name=role_name, + type=role_type, + host_ref=dict(hostId=host_id), + ) + + yield next(iter(api.create_roles(body=ApiRoleList(items=[role])).items), None) + + api.delete_role(role_name=role_name) + + +def cm_role_config( + api_client: ApiClient, role: ApiRole, params: dict, message: str +) -> Generator[ApiRole]: + """Update a role configuration for a given role. Yields the + role, resetting the configuration to its prior state. Use with + 'yield from' within a pytest fixture. + + Args: + api_client (ApiClient): _description_ + role (ApiRole): _description_ + params (dict): _description_ + message (str): _description_ + + Raises: + Exception: _description_ + + Yields: + ApiRole: _description_ + """ + role_api = MgmtRolesResourceApi(api_client) + + # Retrieve all of the pre-setup configurations + pre = role_api.read_role_config(role.name) + + # Set the test configurations + # Do so serially, since a failed update due to defaults (see ApiException) will cause remaining + # configuration entries to not run. Long-term solution is to check-and-set, which is + # what the Ansible modules do... + for k, v in params.items(): + try: + role_api.update_role_config( + role_name=role.name, + message=f"{message}::set", + body=ApiConfigList(items=[ApiConfig(name=k, value=v)]), + ) + except ApiException as ae: + if ae.status != 400 or "delete with template" not in str(ae.body): + raise Exception(str(ae)) + + # Yield the targeted role + yield role_api.read_role(role_name=role.name) + + # Retrieve all of the post-setup configurations + post = role_api.read_role_config(role_name=role.name) + + # Reconcile the configurations + pre_set = set([c.name for c in pre.items]) + + reconciled = pre.items.copy() + reconciled.extend( + [ + ApiConfig(name=k.name, value=None) + for k in post.items + if k.name not in pre_set + ] + ) + + role_api.update_role_config( + role_name=role.name, + message=f"{message}::reset", + body=ApiConfigList(items=reconciled), + ) diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 268192db..41669cfe 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -26,6 +26,7 @@ import sys import yaml +from collections.abc import Generator from pathlib import Path from cm_client import ( @@ -40,6 +41,7 @@ ClustersResourceApi, Configuration, HostsResourceApi, + MgmtRolesResourceApi, MgmtServiceResourceApi, ParcelResourceApi, ParcelsResourceApi, @@ -250,7 +252,7 @@ def base_cluster(cm_api_client, request): @pytest.fixture(scope="session") -def cms(cm_api_client, request): +def cms(cm_api_client, request) -> Generator[ApiService]: """Provisions Cloudera Manager Service.""" api = MgmtServiceResourceApi(cm_api_client) @@ -275,7 +277,7 @@ def cms(cm_api_client, request): @pytest.fixture(scope="function") -def cms_service_config(cm_api_client, cms, request): +def cms_config(cm_api_client, cms, request) -> Generator[ApiService]: """Configures service-wide configurations for the Cloudera Manager Service""" marker = request.node.get_closest_marker("service_config") diff --git a/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py b/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py new file mode 100644 index 00000000..d230005a --- /dev/null +++ b/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py @@ -0,0 +1,365 @@ +# -*- coding: utf-8 -*- + +# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import pytest + +from collections.abc import Generator +from pathlib import Path + +from cm_client import ( + ApiRole, + ClustersResourceApi, + MgmtRolesResourceApi, +) + +from ansible_collections.cloudera.cluster.plugins.modules import cm_service_role_config +from ansible_collections.cloudera.cluster.tests.unit import ( + AnsibleExitJson, + AnsibleFailJson, + provision_cm_role, + cm_role_config, +) + +LOG = logging.getLogger(__name__) + + +@pytest.fixture(scope="module") +def host_monitor(cm_api_client, cms, request) -> Generator[ApiRole]: + api = MgmtRolesResourceApi(cm_api_client) + + hm = next( + iter([r for r in api.read_roles().items if r.type == "HOSTMONITOR"]), None + ) + + if hm is not None: + yield hm + else: + cluster_api = ClustersResourceApi(cm_api_client) + + # Get first host of the cluster + hosts = cluster_api.list_hosts(cluster_name=cms.cluster_ref.cluster_name) + + if not hosts.items: + raise Exception( + "No available hosts to assign the Cloudera Manager Service role." + ) + else: + name = Path(request.fixturename).stem + yield from provision_cm_role( + cm_api_client, name, "HOSTMONITOR", hosts.items[0].hostId + ) + + +@pytest.fixture(scope="function") +def host_monitor_config(cm_api_client, host_monitor, request) -> Generator[ApiRole]: + marker = request.node.get_closest_marker("role_config") + + if marker is None: + raise Exception("No role_config marker found.") + + yield from cm_role_config( + api_client=cm_api_client, + role=host_monitor, + params=marker.args[0], + message=f"{Path(request.node.parent.name).stem}::{request.node.name}", + ) + + +def test_missing_required(conn, module_args): + module_args(conn) + + with pytest.raises(AnsibleFailJson, match="parameters"): + cm_service_role_config.main() + + +def test_missing_required_if(conn, module_args): + module_args( + { + **conn, + "parameters": dict(), + } + ) + + with pytest.raises(AnsibleFailJson, match="name, type"): + cm_service_role_config.main() + + +def test_present_invalid_parameter(conn, module_args, host_monitor): + module_args( + { + **conn, + "role": host_monitor.name, + "parameters": dict(example="Example"), + } + ) + + with pytest.raises( + AnsibleFailJson, match="Unknown configuration attribute 'example'" + ): + cm_service_role_config.main() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "role": host_monitor_config.name, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters_role_type(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "role_type": host_monitor_config.type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_unset_parameters(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "role": host_monitor_config.name, + "parameters": dict(mgmt_num_descriptor_fetch_tries=None), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + expected = dict(process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_unset_parameters_role_type(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "type": host_monitor_config.type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=None), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + expected = dict(process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters_with_purge(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "role": host_monitor_config.name, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters_with_purge_role_type( + conn, module_args, host_monitor_config, request +): + module_args( + { + **conn, + "role_type": host_monitor_config.type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_purge_all_parameters(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "role": host_monitor_config.name, + "parameters": dict(), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert len(e.value.config) == 0 + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert len(e.value.config) == 0 + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_purge_all_parameters_role_type( + conn, module_args, host_monitor_config, request +): + module_args( + { + **conn, + "type": host_monitor_config.type, + "parameters": dict(), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert len(e.value.config) == 0 + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert len(e.value.config) == 0 From c0a82f9c24c610f29eedcb498a3c93f3b049a1a0 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 18 Dec 2024 15:21:30 -0500 Subject: [PATCH 09/31] Add utilities for discovering base role config group for a given role type Signed-off-by: Webster Mudge --- .../module_utils/role_config_group_utils.py | 42 ++++++++++++++++++- 1 file changed, 40 insertions(+), 2 deletions(-) diff --git a/plugins/module_utils/role_config_group_utils.py b/plugins/module_utils/role_config_group_utils.py index b17e8160..50e82afd 100644 --- a/plugins/module_utils/role_config_group_utils.py +++ b/plugins/module_utils/role_config_group_utils.py @@ -16,8 +16,12 @@ normalize_output, ) -from cm_client import ApiRoleConfigGroup - +from cm_client import ( + ApiClient, + ApiRoleConfigGroup, + RoleConfigGroupsResourceApi, + MgmtRoleConfigGroupsResourceApi, +) ROLE_CONFIG_GROUP = [ "name", @@ -28,8 +32,42 @@ ] +class BaseRoleConfigGroupDiscoveryException(Exception): + pass + + def parse_role_config_group_result(role_config_group: ApiRoleConfigGroup) -> dict: # Retrieve only the service identifier output = dict(service_name=role_config_group.service_ref.service_name) output.update(normalize_output(role_config_group.to_dict(), ROLE_CONFIG_GROUP)) return output + + +def get_base_role_config_group( + api_client: ApiClient, cluster_name: str, service_name: str, role_type: str +) -> ApiRoleConfigGroup: + rcg_api = RoleConfigGroupsResourceApi(api_client) + rcgs = [ + r + for r in rcg_api.read_role_config_groups(cluster_name, service_name).items + if r.role_type == role_type and r.base + ] + if len(rcgs) != 1: + raise BaseRoleConfigGroupDiscoveryException(role_count=len(rcgs)) + else: + return rcgs[0] + + +def get_mgmt_base_role_config_group( + api_client: ApiClient, role_type: str +) -> ApiRoleConfigGroup: + rcg_api = MgmtRoleConfigGroupsResourceApi(api_client) + rcgs = [ + r + for r in rcg_api.read_role_config_groups().items + if r.role_type == role_type and r.base + ] + if len(rcgs) != 1: + raise BaseRoleConfigGroupDiscoveryException(role_count=len(rcgs)) + else: + return rcgs[0] From eab3d18e96eed9509dc0f6badecfd1c728b4b857 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 18 Dec 2024 15:22:18 -0500 Subject: [PATCH 10/31] Add utilities for getting roles by role type Signed-off-by: Webster Mudge --- plugins/module_utils/role_utils.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/plugins/module_utils/role_utils.py b/plugins/module_utils/role_utils.py index 55bb463b..c3a962e5 100644 --- a/plugins/module_utils/role_utils.py +++ b/plugins/module_utils/role_utils.py @@ -16,6 +16,12 @@ normalize_output, ) +from cm_client import ( + ApiClient, + ApiRoleList, + RolesResourceApi, + MgmtRolesResourceApi, +) from cm_client import ApiRole ROLE_OUTPUT = [ @@ -46,3 +52,23 @@ def parse_role_result(role: ApiRole) -> dict: ) output.update(normalize_output(role.to_dict(), ROLE_OUTPUT)) return output + + +def get_mgmt_roles(api_client: ApiClient, role_type: str) -> ApiRoleList: + role_api = MgmtRolesResourceApi(api_client) + return ApiRoleList( + items=[r for r in role_api.read_roles().items if r.type == role_type] + ) + + +def get_roles( + api_client: ApiClient, cluster_name: str, service_name: str, role_type: str +) -> ApiRoleList: + role_api = RolesResourceApi(api_client) + return ApiRoleList( + items=[ + r + for r in role_api.read_roles(cluster_name, service_name).items + if r.type == role_type + ] + ) From 6191de9724c005edf770632c4909f4e4fb45ee2f Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 18 Dec 2024 15:24:38 -0500 Subject: [PATCH 11/31] Add utility to set Cloudera Manager Service role config group configurations for tests Signed-off-by: Webster Mudge --- tests/unit/__init__.py | 69 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index 62a6e4a2..74d34146 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -24,6 +24,7 @@ ApiConfig, ApiConfigList, ApiRole, + ApiRoleConfigGroup, ApiRoleList, ApiService, ApiServiceConfig, @@ -31,6 +32,7 @@ ClustersResourceApi, CommandsResourceApi, MgmtRolesResourceApi, + MgmtRoleConfigGroupsResourceApi, ServicesResourceApi, ) from cm_client.rest import ApiException @@ -263,3 +265,70 @@ def cm_role_config( message=f"{message}::reset", body=ApiConfigList(items=reconciled), ) + + +def set_cm_role_config_group_config( + api_client: ApiClient, + role_config_group: ApiRoleConfigGroup, + params: dict, + message: str, +) -> Generator[ApiRoleConfigGroup]: + """Update a configuration for a given Cloudera Manager Service role config group. + Yields the role config group, resetting the configuration to its prior state. + Use with 'yield from' within a pytest fixture. + + Args: + api_client (ApiClient): _description_ + role_config_group (ApiRoleConfigGroup): _description_ + params (dict): _description_ + message (str): _description_ + + Raises: + Exception: _description_ + + Yields: + ApiRoleConfigGroup: _description_ + """ + rcg_api = MgmtRoleConfigGroupsResourceApi(api_client) + + # Retrieve all of the pre-setup configurations + pre = rcg_api.read_config(role_config_group.name) + + # Set the test configurations + # Do so serially, since a failed update due to defaults (see ApiException) will cause remaining + # configuration entries to not run. Long-term solution is to check-and-set, which is + # what the Ansible modules do... + for k, v in params.items(): + try: + rcg_api.update_config( + role_config_group.name, + message=f"{message}::set", + body=ApiConfigList(items=[ApiConfig(name=k, value=v)]), + ) + except ApiException as ae: + if ae.status != 400 or "delete with template" not in str(ae.body): + raise Exception(str(ae)) + + # Yield the targeted role + yield rcg_api.read_role_config_group(role_config_group.name) + + # Retrieve all of the post-setup configurations + post = rcg_api.read_config(role_config_group.name) + + # Reconcile the configurations + pre_set = set([c.name for c in pre.items]) + + reconciled = pre.items.copy() + reconciled.extend( + [ + ApiConfig(name=k.name, value=None) + for k in post.items + if k.name not in pre_set + ] + ) + + rcg_api.update_config( + role_config_group.name, + message=f"{message}::reset", + body=ApiConfigList(items=reconciled), + ) From 8f6334e54d45798394e799258709c6e6d0da17df Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 18 Dec 2024 15:25:10 -0500 Subject: [PATCH 12/31] Add cm_service_role_config_group module and tests Signed-off-by: Webster Mudge --- .../cm_service_role_config_group_config.py | 308 ++++++++++++++ pyproject.toml | 1 + ...est_cm_service_role_config_group_config.py | 379 ++++++++++++++++++ 3 files changed, 688 insertions(+) create mode 100644 plugins/modules/cm_service_role_config_group_config.py create mode 100644 tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py diff --git a/plugins/modules/cm_service_role_config_group_config.py b/plugins/modules/cm_service_role_config_group_config.py new file mode 100644 index 00000000..8e61090b --- /dev/null +++ b/plugins/modules/cm_service_role_config_group_config.py @@ -0,0 +1,308 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = r""" +module: cm_service_role_config_group_config +short_description: Manage the configuration of a Cloudera Manager Service role config group. +description: + - Manage the configuration details of a role config group of the Cloudera Manager Service. +author: + - "Webster Mudge (@wmudge)" +requirements: + - cm-client +options: + name: + description: + - A role config group name to manage. + - One of C(name) or C(type) is required. + type: str + aliases: + - role_config_group + type: + description: + - The role type of the role config group to manage. + - Retrieves the default role config group for the given role type. + - One of C(name) or C(type) is required. + type: str + aliases: + - role_type + parameters: + description: + - The role configuration to set. + - To unset a parameter, use C(None) as the value. + type: dict + required: yes + aliases: + - params + view: + description: + - The view to materialize. + type: str + default: summary + choices: + - summary + - full +extends_documentation_fragment: + - cloudera.cluster.cm_options + - cloudera.cluster.cm_endpoint + - cloudera.cluster.purge + - cloudera.cluster.message +attributes: + check_mode: + support: full + diff_mode: + support: full + platform: + platforms: all +""" + +EXAMPLES = r""" +- name: Update (append) several role config group parameters for a Cloudera Manager Service role type + cloudera.cluster.cm_service_role_config_group_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + parameters: + a_configuration: "schema://host:port" + another_configuration: 234 + +- name: Reset a role config group parameter for a Cloudera Manager Service role type + cloudera.cluster.cm_service_role_config_group_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + parameters: + some_conf: None + +- name: Update (purge) role config group parameters (by name) for a Cloudera Manager Service role + cloudera.cluster.service_role_config_group_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + name: "a-non-default-rcg" + parameters: + config_one: ValueOne + config_two: 4567 + purge: yes + +- name: Reset all role config group parameters for a Cloudera Manager Service role type + cloudera.cluster.service_role_config_group_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + parameters: {} + purge: yes +""" + +RETURN = r""" +config: + description: + - List of configurations for a Cloudera Manager Service role config group. + type: list + elements: dict + returned: always + contains: + name: + description: + - The canonical name that identifies this configuration parameter. + type: str + returned: when supported + value: + description: + - The user-defined value. + - When absent, the default value (if any) will be used. + - Can also be absent, when enumerating allowed configs. + type: str + returned: when supported + required: + description: + - Whether this configuration is required for the object. + - If any required configuration is not set, operations on the object may not work. + - Requires I(full) view. + type: bool + returned: when supported + default: + description: + - The default value. + - Requires I(full) view. + type: str + returned: when supported + display_name: + description: + - A user-friendly name of the parameters, as would have been shown in the web UI. + - Requires I(full) view. + type: str + returned: when supported + description: + description: + - A textual description of the parameter. + - Requires I(full) view. + type: str + returned: when supported + related_name: + description: + - If applicable, contains the related configuration variable used by the source project. + - Requires I(full) view. + type: str + returned: when supported + sensitive: + description: + - Whether this configuration is sensitive, i.e. contains information such as passwords, which might affect how the value of this configuration might be shared by the caller. + type: bool + returned: when supported + validate_state: + description: + - State of the configuration parameter after validation. + - Requires I(full) view. + type: str + returned: when supported + validation_message: + description: + - A message explaining the parameter's validation state. + - Requires I(full) view. + type: str + returned: when supported + validation_warnings_suppressed: + description: + - Whether validation warnings associated with this parameter are suppressed. + - In general, suppressed validation warnings are hidden in the Cloudera Manager UI. + - Configurations that do not produce warnings will not contain this field. + - Requires I(full) view. + type: bool + returned: when supported +""" + +import json + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerMutableModule, + ConfigListUpdates, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + BaseRoleConfigGroupDiscoveryException, + get_mgmt_base_role_config_group, +) + +from cm_client import MgmtRoleConfigGroupsResourceApi +from cm_client.rest import ApiException + + +class ClouderaManagerServiceRoleConfigGroupConfig(ClouderaManagerMutableModule): + def __init__(self, module): + super(ClouderaManagerServiceRoleConfigGroupConfig, self).__init__(module) + + # Set the parameters + self.name = self.get_param("name") + self.type = self.get_param("type") + self.params = self.get_param("parameters") + self.purge = self.get_param("purge") + self.view = self.get_param("view") + + # Initialize the return values + self.changed = False + self.diff = {} + self.config = [] + + # Execute the logic + self.process() + + @ClouderaManagerMutableModule.handle_process + def process(self): + refresh = True + rcg_api = MgmtRoleConfigGroupsResourceApi(self.api_client) + + try: + if self.name is None: + rcg = get_mgmt_base_role_config_group(self.api_client, self.type) + self.name = rcg.name + + existing = rcg_api.read_config(self.name) + except ApiException as ae: + if ae.status == 404: + self.module.fail_json(msg=json.loads(ae.body)["message"]) + else: + raise ae + except BaseRoleConfigGroupDiscoveryException as be: + self.module.fail_json( + msg=f"Unable to find Cloudera Manager Service base role config group for role type '{self.type}'" + ) + + updates = ConfigListUpdates(existing, self.params, self.purge) + + if updates.changed: + self.changed = True + + if self.module._diff: + self.diff = updates.diff + + if not self.module.check_mode: + self.config = [ + p.to_dict() + for p in rcg_api.update_config( + self.name, + message=self.message, + body=updates.config, + ).items + ] + + if self.view == "full": + refresh = False + + if refresh: + self.config = [ + p.to_dict() + for p in rcg_api.read_config(self.name, view=self.view).items + ] + + +def main(): + module = ClouderaManagerMutableModule.ansible_module( + argument_spec=dict( + name=dict(aliases=["role_config_group"]), + type=dict(aliases=["role_type"]), + parameters=dict(type="dict", required=True, aliases=["params"]), + purge=dict(type="bool", default=False), + view=dict( + default="summary", + choices=["summary", "full"], + ), + ), + required_one_of=[ + ["name", "type"], + ], + supports_check_mode=True, + ) + + result = ClouderaManagerServiceRoleConfigGroupConfig(module) + + output = dict( + changed=result.changed, + config=result.config, + ) + + if result.debug: + log = result.log_capture.getvalue() + output.update(debug=log, debug_lines=log.split("\n")) + + module.exit_json(**output) + + +if __name__ == "__main__": + main() diff --git a/pyproject.toml b/pyproject.toml index 716e06c4..86417da5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -57,6 +57,7 @@ filterwarnings = [ markers = [ "service_config: Prepare service-wide configurations for tests", "role_config: Prepare role override configurations for tests", + "role_config_group_config: Prepare role config group configurations for tests", ] [build-system] diff --git a/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py b/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py new file mode 100644 index 00000000..3b740f3d --- /dev/null +++ b/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py @@ -0,0 +1,379 @@ +# -*- coding: utf-8 -*- + +# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import pytest + +from collections.abc import Generator +from pathlib import Path + +from cm_client import ( + ApiRole, + ApiRoleConfigGroup, + ClustersResourceApi, + MgmtRolesResourceApi, + MgmtRoleConfigGroupsResourceApi, +) + +from ansible_collections.cloudera.cluster.plugins.modules import ( + cm_service_role_config_group_config, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + get_mgmt_base_role_config_group, +) + +from ansible_collections.cloudera.cluster.tests.unit import ( + AnsibleExitJson, + AnsibleFailJson, + provision_cm_role, + set_cm_role_config_group_config, +) + +LOG = logging.getLogger(__name__) + + +@pytest.fixture(scope="module") +def host_monitor(cm_api_client, cms, request) -> Generator[ApiRole]: + api = MgmtRolesResourceApi(cm_api_client) + + hm = next( + iter([r for r in api.read_roles().items if r.type == "HOSTMONITOR"]), None + ) + + if hm is not None: + yield hm + else: + cluster_api = ClustersResourceApi(cm_api_client) + + # Get first host of the cluster + hosts = cluster_api.list_hosts(cluster_name=cms.cluster_ref.cluster_name) + + if not hosts.items: + raise Exception( + "No available hosts to assign the Cloudera Manager Service role." + ) + else: + name = Path(request.fixturename).stem + yield from provision_cm_role( + cm_api_client, name, "HOSTMONITOR", hosts.items[0].hostId + ) + + +@pytest.fixture(scope="function") +def host_monitor_config( + cm_api_client, host_monitor, request +) -> Generator[ApiRoleConfigGroup]: + marker = request.node.get_closest_marker("role_config_group_config") + + if marker is None: + raise Exception("No role_config_group_config marker found.") + + rcg_api = MgmtRoleConfigGroupsResourceApi(cm_api_client) + + yield from set_cm_role_config_group_config( + api_client=cm_api_client, + role_config_group=rcg_api.read_role_config_group( + host_monitor.role_config_group_ref.role_config_group_name + ), + params=marker.args[0], + message=f"{Path(request.node.parent.name).stem}::{request.node.name}", + ) + + +def test_missing_required(conn, module_args): + module_args(conn) + + with pytest.raises(AnsibleFailJson, match="parameters"): + cm_service_role_config_group_config.main() + + +def test_missing_required_if(conn, module_args): + module_args( + { + **conn, + "parameters": dict(), + } + ) + + with pytest.raises(AnsibleFailJson, match="name, type"): + cm_service_role_config_group_config.main() + + +def test_present_invalid_parameter(conn, module_args, host_monitor): + module_args( + { + **conn, + "name": host_monitor.role_config_group_ref.role_config_group_name, + "parameters": dict(example="Example"), + } + ) + + with pytest.raises( + AnsibleFailJson, match="Unknown configuration attribute 'example'" + ): + cm_service_role_config_group_config.main() + + +@pytest.mark.role_config_group_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "name": host_monitor_config.name, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config_group_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters_role_type(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "type": host_monitor_config.role_type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config_group_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_unset_parameters(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "name": host_monitor_config.name, + "parameters": dict(mgmt_num_descriptor_fetch_tries=None), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + expected = dict(process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config_group_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_unset_parameters_role_type(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "type": host_monitor_config.role_type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=None), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + expected = dict(process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config_group_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters_with_purge(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "name": host_monitor_config.name, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config_group_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters_with_purge_role_type( + conn, module_args, host_monitor_config, request +): + module_args( + { + **conn, + "type": host_monitor_config.role_type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config_group_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_purge_all_parameters(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "name": host_monitor_config.name, + "parameters": dict(), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == True + assert len(e.value.config) == 0 + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == False + assert len(e.value.config) == 0 + + +@pytest.mark.role_config_group_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_purge_all_parameters_role_type( + conn, module_args, host_monitor_config, request +): + module_args( + { + **conn, + "type": host_monitor_config.role_type, + "parameters": dict(), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == True + assert len(e.value.config) == 0 + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == False + assert len(e.value.config) == 0 From a4e6394f2c54b1408db99206f55cf83381529067 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Thu, 19 Dec 2024 14:55:27 -0500 Subject: [PATCH 13/31] Add 'config' dictionary to parsed Role Config Group results Signed-off-by: Webster Mudge --- plugins/module_utils/role_config_group_utils.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/plugins/module_utils/role_config_group_utils.py b/plugins/module_utils/role_config_group_utils.py index 50e82afd..8b1fa561 100644 --- a/plugins/module_utils/role_config_group_utils.py +++ b/plugins/module_utils/role_config_group_utils.py @@ -37,9 +37,25 @@ class BaseRoleConfigGroupDiscoveryException(Exception): def parse_role_config_group_result(role_config_group: ApiRoleConfigGroup) -> dict: + """Parse a Role Config Group into a normalized dictionary. + + Returns the following: + - name (str) + - role_type (str) + - base (bool) + - display_name (str) + - config (dict) + + Args: + role_config_group (ApiRoleConfigGroup): Role Config Group + + Returns: + dict: Normalized dictionary of returned values + """ # Retrieve only the service identifier output = dict(service_name=role_config_group.service_ref.service_name) output.update(normalize_output(role_config_group.to_dict(), ROLE_CONFIG_GROUP)) + output.update(config={c.name: c.value for c in role_config_group.config.items}) return output From 1a275f227ece5bf83267c2fa44226867d84ddae4 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Thu, 19 Dec 2024 14:57:05 -0500 Subject: [PATCH 14/31] Update utlity function for Role Config Group tests to handle all parameters, not just configuration values Signed-off-by: Webster Mudge --- tests/unit/__init__.py | 67 +++++++++++------------------------------- 1 file changed, 17 insertions(+), 50 deletions(-) diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index 74d34146..1103f609 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -267,68 +267,35 @@ def cm_role_config( ) -def set_cm_role_config_group_config( +def set_cm_role_config_group( api_client: ApiClient, role_config_group: ApiRoleConfigGroup, - params: dict, + update: ApiRoleConfigGroup, message: str, ) -> Generator[ApiRoleConfigGroup]: - """Update a configuration for a given Cloudera Manager Service role config group. - Yields the role config group, resetting the configuration to its prior state. - Use with 'yield from' within a pytest fixture. + """ + Update a configuration for a given Cloudera Manager Service role config group. + Yields the role config group and upon returning control, will reset the + configuration to its prior state. + Use with 'yield from' within a pytest fixture. Args: - api_client (ApiClient): _description_ - role_config_group (ApiRoleConfigGroup): _description_ - params (dict): _description_ - message (str): _description_ - - Raises: - Exception: _description_ + api_client (ApiClient): CM API client + role_config_group (ApiRoleConfigGroup): The Role Config Group to manage + update (ApiRoleConfigGroup): The state to set + message (str): Transaction descriptor; will be appended with '::[re]set' Yields: - ApiRoleConfigGroup: _description_ + ApiRoleConfigGroup: The updated Role Config Group """ rcg_api = MgmtRoleConfigGroupsResourceApi(api_client) - # Retrieve all of the pre-setup configurations - pre = rcg_api.read_config(role_config_group.name) + pre = rcg_api.read_role_config_group(role_config_group.name) - # Set the test configurations - # Do so serially, since a failed update due to defaults (see ApiException) will cause remaining - # configuration entries to not run. Long-term solution is to check-and-set, which is - # what the Ansible modules do... - for k, v in params.items(): - try: - rcg_api.update_config( - role_config_group.name, - message=f"{message}::set", - body=ApiConfigList(items=[ApiConfig(name=k, value=v)]), - ) - except ApiException as ae: - if ae.status != 400 or "delete with template" not in str(ae.body): - raise Exception(str(ae)) - - # Yield the targeted role - yield rcg_api.read_role_config_group(role_config_group.name) - - # Retrieve all of the post-setup configurations - post = rcg_api.read_config(role_config_group.name) - - # Reconcile the configurations - pre_set = set([c.name for c in pre.items]) - - reconciled = pre.items.copy() - reconciled.extend( - [ - ApiConfig(name=k.name, value=None) - for k in post.items - if k.name not in pre_set - ] + yield rcg_api.update_role_config_group( + role_config_group.name, message=f"{message}::set", body=update ) - rcg_api.update_config( - role_config_group.name, - message=f"{message}::reset", - body=ApiConfigList(items=reconciled), + rcg_api.update_role_config_group( + role_config_group.name, message=f"{message}::reset", body=pre ) From 44af849c42e9a92cc0b8c0bcca4c2063465553c8 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Thu, 19 Dec 2024 14:58:03 -0500 Subject: [PATCH 15/31] Move host_monitor and host_monitor_config fixtures and update pytest Marker Signed-off-by: Webster Mudge --- pyproject.toml | 1 + tests/unit/conftest.py | 53 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 86417da5..38f16eaa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -58,6 +58,7 @@ markers = [ "service_config: Prepare service-wide configurations for tests", "role_config: Prepare role override configurations for tests", "role_config_group_config: Prepare role config group configurations for tests", + "role_config_group: Prepare a role config group for tests.", ] [build-system] diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 41669cfe..3528132f 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -36,11 +36,14 @@ ApiConfig, ApiHostRef, ApiHostRefList, + ApiRole, + ApiRoleConfigGroup, ApiService, ApiServiceConfig, ClustersResourceApi, Configuration, HostsResourceApi, + MgmtRoleConfigGroupsResourceApi, MgmtRolesResourceApi, MgmtServiceResourceApi, ParcelResourceApi, @@ -58,6 +61,8 @@ from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleFailJson, AnsibleExitJson, + provision_cm_role, + set_cm_role_config_group, ) @@ -326,3 +331,51 @@ def cms_config(cm_api_client, cms, request) -> Generator[ApiService]: message=f"{Path(request.node.parent.name).stem}::{request.node.name}::reset", body=ApiServiceConfig(items=reconciled), ) + + +@pytest.fixture(scope="module") +def host_monitor(cm_api_client, cms, request) -> Generator[ApiRole]: + api = MgmtRolesResourceApi(cm_api_client) + + hm = next( + iter([r for r in api.read_roles().items if r.type == "HOSTMONITOR"]), None + ) + + if hm is not None: + yield hm + else: + cluster_api = ClustersResourceApi(cm_api_client) + + # Get first host of the cluster + hosts = cluster_api.list_hosts(cluster_name=cms.cluster_ref.cluster_name) + + if not hosts.items: + raise Exception( + "No available hosts to assign the Cloudera Manager Service role." + ) + else: + name = Path(request.fixturename).stem + yield from provision_cm_role( + cm_api_client, name, "HOSTMONITOR", hosts.items[0].hostId + ) + + +@pytest.fixture(scope="function") +def host_monitor_config( + cm_api_client, host_monitor, request +) -> Generator[ApiRoleConfigGroup]: + marker = request.node.get_closest_marker("role_config_group") + + if marker is None: + raise Exception("No role_config_group marker found.") + + rcg_api = MgmtRoleConfigGroupsResourceApi(cm_api_client) + + yield from set_cm_role_config_group( + api_client=cm_api_client, + role_config_group=rcg_api.read_role_config_group( + host_monitor.role_config_group_ref.role_config_group_name + ), + update=marker.args[0], + message=f"{Path(request.node.parent.name).stem}::{request.node.name}", + ) From 72418dcdc74ac8c237bb8c4eb7e6e051d9a8eeb4 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Thu, 19 Dec 2024 14:58:59 -0500 Subject: [PATCH 16/31] Update to use reworked host_monitor_config fixture Signed-off-by: Webster Mudge --- ...est_cm_service_role_config_group_config.py | 164 ++++++++++-------- 1 file changed, 90 insertions(+), 74 deletions(-) diff --git a/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py b/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py index 3b740f3d..99df06c4 100644 --- a/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py +++ b/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py @@ -21,82 +21,26 @@ import logging import pytest -from collections.abc import Generator from pathlib import Path from cm_client import ( - ApiRole, + ApiConfig, + ApiConfigList, ApiRoleConfigGroup, - ClustersResourceApi, - MgmtRolesResourceApi, - MgmtRoleConfigGroupsResourceApi, ) from ansible_collections.cloudera.cluster.plugins.modules import ( cm_service_role_config_group_config, ) -from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( - get_mgmt_base_role_config_group, -) from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleExitJson, AnsibleFailJson, - provision_cm_role, - set_cm_role_config_group_config, ) LOG = logging.getLogger(__name__) -@pytest.fixture(scope="module") -def host_monitor(cm_api_client, cms, request) -> Generator[ApiRole]: - api = MgmtRolesResourceApi(cm_api_client) - - hm = next( - iter([r for r in api.read_roles().items if r.type == "HOSTMONITOR"]), None - ) - - if hm is not None: - yield hm - else: - cluster_api = ClustersResourceApi(cm_api_client) - - # Get first host of the cluster - hosts = cluster_api.list_hosts(cluster_name=cms.cluster_ref.cluster_name) - - if not hosts.items: - raise Exception( - "No available hosts to assign the Cloudera Manager Service role." - ) - else: - name = Path(request.fixturename).stem - yield from provision_cm_role( - cm_api_client, name, "HOSTMONITOR", hosts.items[0].hostId - ) - - -@pytest.fixture(scope="function") -def host_monitor_config( - cm_api_client, host_monitor, request -) -> Generator[ApiRoleConfigGroup]: - marker = request.node.get_closest_marker("role_config_group_config") - - if marker is None: - raise Exception("No role_config_group_config marker found.") - - rcg_api = MgmtRoleConfigGroupsResourceApi(cm_api_client) - - yield from set_cm_role_config_group_config( - api_client=cm_api_client, - role_config_group=rcg_api.read_role_config_group( - host_monitor.role_config_group_ref.role_config_group_name - ), - params=marker.args[0], - message=f"{Path(request.node.parent.name).stem}::{request.node.name}", - ) - - def test_missing_required(conn, module_args): module_args(conn) @@ -131,8 +75,17 @@ def test_present_invalid_parameter(conn, module_args, host_monitor): cm_service_role_config_group_config.main() -@pytest.mark.role_config_group_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) ) def test_set_parameters(conn, module_args, host_monitor_config, request): module_args( @@ -162,8 +115,17 @@ def test_set_parameters(conn, module_args, host_monitor_config, request): assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() -@pytest.mark.role_config_group_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) ) def test_set_parameters_role_type(conn, module_args, host_monitor_config, request): module_args( @@ -193,8 +155,17 @@ def test_set_parameters_role_type(conn, module_args, host_monitor_config, reques assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() -@pytest.mark.role_config_group_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) ) def test_unset_parameters(conn, module_args, host_monitor_config, request): module_args( @@ -222,8 +193,17 @@ def test_unset_parameters(conn, module_args, host_monitor_config, request): assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() -@pytest.mark.role_config_group_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) ) def test_unset_parameters_role_type(conn, module_args, host_monitor_config, request): module_args( @@ -251,8 +231,17 @@ def test_unset_parameters_role_type(conn, module_args, host_monitor_config, requ assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() -@pytest.mark.role_config_group_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) ) def test_set_parameters_with_purge(conn, module_args, host_monitor_config, request): module_args( @@ -283,8 +272,17 @@ def test_set_parameters_with_purge(conn, module_args, host_monitor_config, reque assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() -@pytest.mark.role_config_group_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) ) def test_set_parameters_with_purge_role_type( conn, module_args, host_monitor_config, request @@ -317,8 +315,17 @@ def test_set_parameters_with_purge_role_type( assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() -@pytest.mark.role_config_group_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) ) def test_purge_all_parameters(conn, module_args, host_monitor_config, request): module_args( @@ -347,8 +354,17 @@ def test_purge_all_parameters(conn, module_args, host_monitor_config, request): assert len(e.value.config) == 0 -@pytest.mark.role_config_group_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) ) def test_purge_all_parameters_role_type( conn, module_args, host_monitor_config, request From c7602e5b07df19101b1d6a60d525725035d120ef Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Thu, 19 Dec 2024 15:01:10 -0500 Subject: [PATCH 17/31] Add cm_service_role_config_group module and tests Signed-off-by: Webster Mudge --- .../modules/cm_service_role_config_group.py | 351 ++++++++++++++++++ .../test_cm_service_role_config_group.py | 242 ++++++++++++ 2 files changed, 593 insertions(+) create mode 100644 plugins/modules/cm_service_role_config_group.py create mode 100644 tests/unit/plugins/modules/cm_service_role_config_group/test_cm_service_role_config_group.py diff --git a/plugins/modules/cm_service_role_config_group.py b/plugins/modules/cm_service_role_config_group.py new file mode 100644 index 00000000..0a264870 --- /dev/null +++ b/plugins/modules/cm_service_role_config_group.py @@ -0,0 +1,351 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +DOCUMENTATION = r""" +module: cm_service_role_config_group +short_description: Manage a Cloudera Manager Service role config group. +description: + - Manage a Cloudera Manager Service role config group. +author: + - "Webster Mudge (@wmudge)" +requirements: + - cm-client +options: + type: + description: + - The role type defining the role config group. + type: str + required: True + aliases: + - role_type + display_name: + description: + - The display name for this role config group in the Cloudera Manager UI. + config: + description: + - The role configuration to set. + - To unset a parameter, use C(None) as the value. + type: dict + aliases: + - params + - parameters + purge: + description: + - Flag indicating whether to reset configuration parameters to only the declared entries. + type: bool + default: False +extends_documentation_fragment: + - cloudera.cluster.cm_options + - cloudera.cluster.cm_endpoint + - cloudera.cluster.message +attributes: + check_mode: + support: full + diff_mode: + support: full + platform: + platforms: all +""" + +EXAMPLES = r""" +- name: Update the configuration of a Cloudera Manager Service role config group + cloudera.cluster.cm_service_role_config_group: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + parameters: + some_parameter: True + +- name: Update the configuration of a Cloudera Manager Service role config group, purging undeclared parameters + cloudera.cluster.cm_service_role_config_group: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + parameters: + another_parameter: 3456 + purge: yes + +- name: Reset the configuration of a Cloudera Manager Service role config group + cloudera.cluster.cm_service_role_config_group: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + parameters: {} + purge: yes + +- name: Set the display name of a Cloudera Manager Service role config group + cloudera.cluster.cm_service_role_config_group: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + display_name: A new name +""" + +RETURN = r""" +role_config_group: + description: + - A Cloudera Manager Service role config group. + type: dict + returned: always + contains: + name: + description: + - The unique name of this role config group. + type: str + returned: always + role_type: + description: + - The type of the roles in this group. + type: str + returned: always + base: + description: + - Flag indicating whether this is a base group. + type: bool + returned: always + display_name: + description: + - A user-friendly name of the role config group, as would have been shown in the web UI. + type: str + returned: when supported + service_name: + description: + - The service name associated with this role config group. + type: str + returned: always + role_names: + description: + - List of role names associated with this role config group. + type: list + elements: str + returned: when supported + config: + description: + - List of configurations. + type: list + elements: dict + returned: always + contains: + name: + description: + - The canonical name that identifies this configuration parameter. + type: str + returned: when supported + value: + description: + - The user-defined value. + - When absent, the default value (if any) will be used. + - Can also be absent, when enumerating allowed configs. + type: str + returned: when supported + required: + description: + - Whether this configuration is required for the object. + - If any required configuration is not set, operations on the object may not work. + - Requires I(full) view. + type: bool + returned: when supported + default: + description: + - The default value. + - Requires I(full) view. + type: str + returned: when supported + display_name: + description: + - A user-friendly name of the parameters, as would have been shown in the web UI. + - Requires I(full) view. + type: str + returned: when supported + description: + description: + - A textual description of the parameter. + - Requires I(full) view. + type: str + returned: when supported + related_name: + description: + - If applicable, contains the related configuration variable used by the source project. + - Requires I(full) view. + type: str + returned: when supported + sensitive: + description: + - Whether this configuration is sensitive, i.e. contains information such as passwords, which might affect how the value of this configuration might be shared by the caller. + type: bool + returned: when supported + validate_state: + description: + - State of the configuration parameter after validation. + - Requires I(full) view. + type: str + returned: when supported + validation_message: + description: + - A message explaining the parameter's validation state. + - Requires I(full) view. + type: str + returned: when supported + validation_warnings_suppressed: + description: + - Whether validation warnings associated with this parameter are suppressed. + - In general, suppressed validation warnings are hidden in the Cloudera Manager UI. + - Configurations that do not produce warnings will not contain this field. + - Requires I(full) view. + type: bool + returned: when supported +""" + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerMutableModule, + ConfigListUpdates, +) + +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + BaseRoleConfigGroupDiscoveryException, + parse_role_config_group_result, + get_mgmt_base_role_config_group, +) + +from cm_client import ( + ApiRoleConfigGroup, + MgmtRoleConfigGroupsResourceApi, + MgmtServiceResourceApi, +) +from cm_client.rest import ApiException + + +class ClouderaManagerServiceRoleConfigGroup(ClouderaManagerMutableModule): + def __init__(self, module): + super(ClouderaManagerServiceRoleConfigGroup, self).__init__(module) + + # Set the parameters + self.type = self.get_param("type") + self.display_name = self.get_param("display_name") + self.config = self.get_param("config", default=dict()) + self.purge = self.get_param("purge") + + # Initialize the return value + self.changed = False + self.diff = dict(before=dict(), after=dict()) + self.output = {} + + # Execute the logic + self.process() + + @ClouderaManagerMutableModule.handle_process + def process(self): + # Confirm that CMS is present + try: + MgmtServiceResourceApi(self.api_client).read_service() + except ApiException as ex: + if ex.status == 404: + self.module.fail_json(msg="Cloudera Management Service does not exist") + else: + raise ex + + rcg_api = MgmtRoleConfigGroupsResourceApi(self.api_client) + + # Retrieve the base RCG (the _only_ RCG for CMS roles) + try: + existing = get_mgmt_base_role_config_group(self.api_client, self.type) + except ApiException as ex: + if ex.status != 404: + raise ex + except BaseRoleConfigGroupDiscoveryException as be: + self.module.fail_json( + msg=f"Unable to find Cloudera Manager Service base role config group for role type '{self.type}'" + ) + + payload = ApiRoleConfigGroup() + + # Update display name + if self.display_name and self.display_name != existing.display_name: + self.changed = True + + if self.module._diff: + self.diff["before"].update(display_name=existing.display_name) + self.diff["after"].update(display_name=self.display_name) + + payload.display_name = self.display_name + + # Reconcile configurations + if self.config or self.purge: + updates = ConfigListUpdates(existing.config, self.config, self.purge) + + if updates.changed: + self.changed = True + + if self.module._diff: + self.diff["before"].update(config=updates.diff["before"]) + self.diff["after"].update(config=updates.diff["after"]) + + payload.config = updates.config + + # Execute changes if needed + if self.changed and not self.module.check_mode: + self.output = parse_role_config_group_result( + rcg_api.update_role_config_group( + existing.name, + message=self.message, + body=payload, + ) + ) + else: + self.output = parse_role_config_group_result(existing) + + # Report on any role associations + self.output.update( + role_names=[r.name for r in rcg_api.read_roles(existing.name).items] + ) + + +def main(): + module = ClouderaManagerMutableModule.ansible_module( + argument_spec=dict( + display_name=dict(), + type=dict(required=True, aliases=["role_type"]), + config=dict(type="dict", aliases=["params", "parameters"]), + purge=dict(type="bool", default=False), + ), + supports_check_mode=True, + ) + + result = ClouderaManagerServiceRoleConfigGroup(module) + + output = dict( + changed=result.changed, + role_config_group=result.output, + ) + + if module._diff: + output.update(diff=result.diff) + + if result.debug: + log = result.log_capture.getvalue() + output.update(debug=log, debug_lines=log.split("\n")) + + module.exit_json(**output) + + +if __name__ == "__main__": + main() diff --git a/tests/unit/plugins/modules/cm_service_role_config_group/test_cm_service_role_config_group.py b/tests/unit/plugins/modules/cm_service_role_config_group/test_cm_service_role_config_group.py new file mode 100644 index 00000000..2006d225 --- /dev/null +++ b/tests/unit/plugins/modules/cm_service_role_config_group/test_cm_service_role_config_group.py @@ -0,0 +1,242 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import pytest + +from pathlib import Path + +from cm_client import ( + ApiConfig, + ApiConfigList, + ApiRoleConfigGroup, +) + +from ansible_collections.cloudera.cluster.plugins.modules import ( + cm_service_role_config_group, +) +from ansible_collections.cloudera.cluster.tests.unit import ( + AnsibleExitJson, + AnsibleFailJson, +) + +LOG = logging.getLogger(__name__) + + +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) +) +def test_cm_role_config_group_config_set( + conn, module_args, host_monitor_config, request +): + module_args( + { + **conn, + "type": host_monitor_config.role_type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group.main() + + assert e.value.changed == True + assert expected.items() <= e.value.role_config_group["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group.main() + + assert e.value.changed == False + assert expected.items() <= e.value.role_config_group["config"].items() + + +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) +) +def test_cm_role_config_group_config_unset( + conn, module_args, host_monitor_config, request +): + module_args( + { + **conn, + "type": host_monitor_config.role_type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=None), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group.main() + + assert e.value.changed == True + assert expected.items() <= e.value.role_config_group["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group.main() + + assert e.value.changed == False + assert expected.items() <= e.value.role_config_group["config"].items() + + +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) +) +def test_cm_role_config_group_config_set_purge( + conn, module_args, host_monitor_config, request +): + module_args( + { + **conn, + "type": host_monitor_config.role_type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group.main() + + assert e.value.changed == True + assert expected.items() <= e.value.role_config_group["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group.main() + + assert e.value.changed == False + assert expected.items() <= e.value.role_config_group["config"].items() + + +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) +) +def test_cm_role_config_group_config_purge_all( + conn, module_args, host_monitor_config, request +): + module_args( + { + **conn, + "type": host_monitor_config.role_type, + "parameters": dict(), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict() + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group.main() + + assert e.value.changed == True + assert expected.items() <= e.value.role_config_group["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group.main() + + assert e.value.changed == False + assert expected.items() <= e.value.role_config_group["config"].items() + + +@pytest.mark.role_config_group(ApiRoleConfigGroup(display_name="Test")) +def test_cm_role_config_group_display_name_set( + conn, module_args, host_monitor_config, request +): + expected = "Updated Test" + + module_args( + { + **conn, + "type": host_monitor_config.role_type, + "display_name": expected, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group.main() + + assert e.value.changed == True + assert expected == e.value.role_config_group["display_name"] + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group.main() + + assert e.value.changed == False + assert expected == e.value.role_config_group["display_name"] From f7835a853335e856b4a6f425e31090413e4c1666 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 20 Dec 2024 14:36:11 -0500 Subject: [PATCH 18/31] Add host utilities Signed-off-by: Webster Mudge --- plugins/module_utils/host_utils.py | 57 ++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 plugins/module_utils/host_utils.py diff --git a/plugins/module_utils/host_utils.py b/plugins/module_utils/host_utils.py new file mode 100644 index 00000000..645cf466 --- /dev/null +++ b/plugins/module_utils/host_utils.py @@ -0,0 +1,57 @@ +# Copyright 2024 Cloudera, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +A common functions for Cloudera Manager hosts +""" + +from cm_client import ( + ApiClient, + ApiHost, + ApiHostRef, + HostsResourceApi, +) +from cm_client.rest import ApiException + + +def get_host( + api_client: ApiClient, hostname: str = None, host_id: str = None +) -> ApiHost: + if hostname: + return next( + ( + h + for h in HostsResourceApi(api_client).read_hosts().items + if h.hostname == hostname + ), + None, + ) + else: + try: + return HostsResourceApi(api_client).read_host(host_id) + except ApiException as ex: + if ex.status != 404: + raise ex + else: + return None + + +def get_host_ref( + api_client: ApiClient, hostname: str = None, host_id: str = None +) -> ApiHostRef: + host = get_host(api_client, hostname, host_id) + if host is not None: + return ApiHostRef(host.host_id, host.hostname) + else: + return None From e4f5e9f8c915b2d3c683e062e675d944d8eafebe Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 20 Dec 2024 14:36:57 -0500 Subject: [PATCH 19/31] Add utility function for role config group retrieval Signed-off-by: Webster Mudge --- .../module_utils/role_config_group_utils.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/plugins/module_utils/role_config_group_utils.py b/plugins/module_utils/role_config_group_utils.py index 8b1fa561..a7b8ec70 100644 --- a/plugins/module_utils/role_config_group_utils.py +++ b/plugins/module_utils/role_config_group_utils.py @@ -19,6 +19,7 @@ from cm_client import ( ApiClient, ApiRoleConfigGroup, + ApiRoleConfigGroupRef, RoleConfigGroupsResourceApi, MgmtRoleConfigGroupsResourceApi, ) @@ -36,6 +37,10 @@ class BaseRoleConfigGroupDiscoveryException(Exception): pass +class RoleConfigGroupDiscoveryException(Exception): + pass + + def parse_role_config_group_result(role_config_group: ApiRoleConfigGroup) -> dict: """Parse a Role Config Group into a normalized dictionary. @@ -87,3 +92,16 @@ def get_mgmt_base_role_config_group( raise BaseRoleConfigGroupDiscoveryException(role_count=len(rcgs)) else: return rcgs[0] + + +def get_role_config_group( + api_client: ApiClient, cluster_name: str, service_name: str, name: str +) -> ApiRoleConfigGroup: + rcg_api = RoleConfigGroupsResourceApi(api_client) + + rcg = rcg_api.read_role_config_group(cluster_name, name, service_name) + + if rcg is None: + raise RoleConfigGroupDiscoveryException(name) + else: + return rcg From e287cbff745c1320790ec984c9232e914426411f Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 20 Dec 2024 14:37:38 -0500 Subject: [PATCH 20/31] Add config parameters to parse_role_result. Add utility for role data model creation. Signed-off-by: Webster Mudge --- plugins/module_utils/role_utils.py | 58 ++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/plugins/module_utils/role_utils.py b/plugins/module_utils/role_utils.py index c3a962e5..479691bb 100644 --- a/plugins/module_utils/role_utils.py +++ b/plugins/module_utils/role_utils.py @@ -15,10 +15,19 @@ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( normalize_output, ) +from ansible_collections.cloudera.cluster.plugins.module_utils.host_utils import ( + get_host_ref, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + get_role_config_group, +) from cm_client import ( ApiClient, + ApiConfig, + ApiConfigList, ApiRoleList, + ApiRoleConfigGroupRef, RolesResourceApi, MgmtRolesResourceApi, ) @@ -51,6 +60,7 @@ def parse_role_result(role: ApiRole) -> dict: service_name=role.service_ref.service_name, ) output.update(normalize_output(role.to_dict(), ROLE_OUTPUT)) + output.update(config={c.name: c.value for c in role.config.items}) return output @@ -72,3 +82,51 @@ def get_roles( if r.type == role_type ] ) + + +class RoleHostNotFoundException(Exception): + pass + + +def create_role( + api_client: ApiClient, + role_type: str, + hostname: str, + host_id: str, + name: str = None, + config: dict = None, + cluster_name: str = None, + service_name: str = None, + role_config_group: str = None, +) -> ApiRole: + # Set up the role + role = ApiRole(type=str(role_type).upper()) + + # Name + if name: + role.name = name # No name allows auto-generation + + # Host assignment + host_ref = get_host_ref(api_client, hostname, host_id) + if host_ref is None: + raise RoleHostNotFoundException( + f"Host not found: hostname='{hostname}', host_id='{host_id}'" + ) + else: + role.host_ref = host_ref + + # Role config group + if role_config_group: + role.role_config_group_ref = ApiRoleConfigGroupRef( + get_role_config_group( + api_client, cluster_name, service_name, role_config_group + ).name + ) + + # Role override configurations + if config: + role.config = ApiConfigList( + items=[ApiConfig(name=k, value=v) for k, v in config.items()] + ) + + return role From 6904d8930637325662b04858bfbdbafca03a3511 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 20 Dec 2024 14:39:38 -0500 Subject: [PATCH 21/31] Rename Host Monitor fixtures. Add host_monitor_state for general role testing Signed-off-by: Webster Mudge --- tests/unit/conftest.py | 138 +++++++++++++++++- .../test_cm_service_role_config_group.py | 20 +-- ...est_cm_service_role_config_group_config.py | 4 +- 3 files changed, 145 insertions(+), 17 deletions(-) diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 3528132f..d814a574 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -28,21 +28,29 @@ from collections.abc import Generator from pathlib import Path +from time import sleep from cm_client import ( + ApiBulkCommandList, ApiClient, ApiClusterList, ApiCluster, + ApiCommand, ApiConfig, + ApiConfigList, ApiHostRef, ApiHostRefList, ApiRole, ApiRoleConfigGroup, + ApiRoleNameList, + ApiRoleState, ApiService, ApiServiceConfig, ClustersResourceApi, + CommandsResourceApi, Configuration, HostsResourceApi, + MgmtRoleCommandsResourceApi, MgmtRoleConfigGroupsResourceApi, MgmtRolesResourceApi, MgmtServiceResourceApi, @@ -334,7 +342,7 @@ def cms_config(cm_api_client, cms, request) -> Generator[ApiService]: @pytest.fixture(scope="module") -def host_monitor(cm_api_client, cms, request) -> Generator[ApiRole]: +def host_monitor_role(cm_api_client, cms, request) -> Generator[ApiRole]: api = MgmtRolesResourceApi(cm_api_client) hm = next( @@ -361,21 +369,141 @@ def host_monitor(cm_api_client, cms, request) -> Generator[ApiRole]: @pytest.fixture(scope="function") -def host_monitor_config( - cm_api_client, host_monitor, request +def host_monitor_role_group_config( + cm_api_client, host_monitor_role, request ) -> Generator[ApiRoleConfigGroup]: marker = request.node.get_closest_marker("role_config_group") if marker is None: - raise Exception("No role_config_group marker found.") + raise Exception("No 'role_config_group' marker found.") rcg_api = MgmtRoleConfigGroupsResourceApi(cm_api_client) yield from set_cm_role_config_group( api_client=cm_api_client, role_config_group=rcg_api.read_role_config_group( - host_monitor.role_config_group_ref.role_config_group_name + host_monitor_role.role_config_group_ref.role_config_group_name ), update=marker.args[0], message=f"{Path(request.node.parent.name).stem}::{request.node.name}", ) + + +@pytest.fixture(scope="function") +def host_monitor_state(cm_api_client, host_monitor_role, request) -> Generator[ApiRole]: + marker = request.node.get_closest_marker("role") + + if marker is None: + raise Exception("No 'role' marker found.") + + role = marker.args[0] + + role_api = MgmtRolesResourceApi(cm_api_client) + cmd_api = MgmtRoleCommandsResourceApi(cm_api_client) + + # Get the current state + pre_role = role_api.read_role(host_monitor_role.name) + pre_role.config = role_api.read_role_config(host_monitor_role.name) + + # Set config + for c in role.config.items: + try: + role_api.update_role_config( + role_name=host_monitor_role.name, + message=f"{Path(request.node.parent.name).stem}::{request.node.name}::set", + body=ApiConfigList(items=[c]), + ) + except ApiException as ae: + if ae.status != 400 or "delete with template" not in str(ae.body): + raise Exception(str(ae)) + + # Update maintenance + if role.maintenance_mode: + role_api.enter_maintenance_mode(host_monitor_role.name) + else: + role_api.exit_maintenance_mode(host_monitor_role.name) + + # Update state + if role.role_state is not None: + if role.role_state in [ApiRoleState.STARTED]: + handle_commands( + cmd_api.stop_command( + body=ApiRoleNameList(items=[host_monitor_role.name]) + ) + ) + elif role.role_state in [ApiRoleState.STOPPED]: + handle_commands( + cmd_api.start_command( + body=ApiRoleNameList(items=[host_monitor_role.name]) + ) + ) + + # Yield the role + current_role = role_api.read_role(host_monitor_role.name) + current_role.config = role_api.read_role_config(host_monitor_role.name) + yield current_role + + # Retrieve the test changes + post_role = role_api.read_role(role_name=host_monitor_role.name) + post_role.config = role_api.read_role_config(role_name=host_monitor_role.name) + + # Reset state + if pre_role.role_state != post_role.role_state: + if pre_role.role_state in [ApiRoleState.STARTED]: + handle_commands( + cmd_api.start_command( + body=ApiRoleNameList(items=[host_monitor_role.name]) + ) + ) + elif pre_role.role_state in [ApiRoleState.STOPPED]: + handle_commands( + cmd_api.stop_command( + body=ApiRoleNameList(items=[host_monitor_role.name]) + ) + ) + + # Reset maintenance + if pre_role.maintenance_mode != post_role.maintenance_mode: + if pre_role.maintenance_mode: + role_api.enter_maintenance_mode(host_monitor_role.name) + else: + role_api.exit_maintenance_mode(host_monitor_role.name) + + # Reset config + pre_role_config_set = set([c.name for c in pre_role.config.items]) + + reconciled = pre_role.config.items.copy() + config_reset = [ + c for c in post_role.config.items if c.name not in pre_role_config_set + ] + reconciled.extend([ApiConfig(c.name, None) for c in config_reset]) + + role_api.update_role_config( + role_name=host_monitor_role.name, + message=f"{Path(request.node.parent.name).stem}::{request.node.name}::reset", + body=ApiConfigList(items=reconciled), + ) + + +def handle_commands(api_client: ApiClient, commands: ApiBulkCommandList): + if commands.errors: + error_msg = "\n".join(commands.errors) + raise Exception(error_msg) + + for cmd in commands.items: + # Serial monitoring + monitor_command(api_client, cmd) + + +def monitor_command( + api_client: ApiClient, command: ApiCommand, polling: int = 10, delay: int = 15 +): + poll_count = 0 + while command.active: + if poll_count > polling: + raise Exception("Command timeout: " + str(command.id)) + sleep(delay) + poll_count += 1 + command = CommandsResourceApi(api_client).read_command(command.id) + if not command.success: + raise Exception(command.result_message) diff --git a/tests/unit/plugins/modules/cm_service_role_config_group/test_cm_service_role_config_group.py b/tests/unit/plugins/modules/cm_service_role_config_group/test_cm_service_role_config_group.py index 2006d225..3d198107 100644 --- a/tests/unit/plugins/modules/cm_service_role_config_group/test_cm_service_role_config_group.py +++ b/tests/unit/plugins/modules/cm_service_role_config_group/test_cm_service_role_config_group.py @@ -54,12 +54,12 @@ ) ) def test_cm_role_config_group_config_set( - conn, module_args, host_monitor_config, request + conn, module_args, host_monitor_role_group_config, request ): module_args( { **conn, - "type": host_monitor_config.role_type, + "type": host_monitor_role_group_config.role_type, "parameters": dict(mgmt_num_descriptor_fetch_tries=32), "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, @@ -96,12 +96,12 @@ def test_cm_role_config_group_config_set( ) ) def test_cm_role_config_group_config_unset( - conn, module_args, host_monitor_config, request + conn, module_args, host_monitor_role_group_config, request ): module_args( { **conn, - "type": host_monitor_config.role_type, + "type": host_monitor_role_group_config.role_type, "parameters": dict(mgmt_num_descriptor_fetch_tries=None), "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, @@ -138,12 +138,12 @@ def test_cm_role_config_group_config_unset( ) ) def test_cm_role_config_group_config_set_purge( - conn, module_args, host_monitor_config, request + conn, module_args, host_monitor_role_group_config, request ): module_args( { **conn, - "type": host_monitor_config.role_type, + "type": host_monitor_role_group_config.role_type, "parameters": dict(mgmt_num_descriptor_fetch_tries=32), "purge": True, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", @@ -181,12 +181,12 @@ def test_cm_role_config_group_config_set_purge( ) ) def test_cm_role_config_group_config_purge_all( - conn, module_args, host_monitor_config, request + conn, module_args, host_monitor_role_group_config, request ): module_args( { **conn, - "type": host_monitor_config.role_type, + "type": host_monitor_role_group_config.role_type, "parameters": dict(), "purge": True, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", @@ -213,14 +213,14 @@ def test_cm_role_config_group_config_purge_all( @pytest.mark.role_config_group(ApiRoleConfigGroup(display_name="Test")) def test_cm_role_config_group_display_name_set( - conn, module_args, host_monitor_config, request + conn, module_args, host_monitor_role_group_config, request ): expected = "Updated Test" module_args( { **conn, - "type": host_monitor_config.role_type, + "type": host_monitor_role_group_config.role_type, "display_name": expected, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, diff --git a/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py b/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py index 99df06c4..14218d3c 100644 --- a/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py +++ b/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py @@ -60,11 +60,11 @@ def test_missing_required_if(conn, module_args): cm_service_role_config_group_config.main() -def test_present_invalid_parameter(conn, module_args, host_monitor): +def test_present_invalid_parameter(conn, module_args, host_monitor_role): module_args( { **conn, - "name": host_monitor.role_config_group_ref.role_config_group_name, + "name": host_monitor_role.role_config_group_ref.role_config_group_name, "parameters": dict(example="Example"), } ) From 26402b2be0457643b8f4f1dd20b8e9f379982314 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 20 Dec 2024 14:40:18 -0500 Subject: [PATCH 22/31] Add cm_service_role and tests Signed-off-by: Webster Mudge --- plugins/modules/cm_service_role.py | 740 ++++++++++++++++++ .../cm_service_role/test_cm_service_role.py | 374 +++++++++ 2 files changed, 1114 insertions(+) create mode 100644 plugins/modules/cm_service_role.py create mode 100644 tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py diff --git a/plugins/modules/cm_service_role.py b/plugins/modules/cm_service_role.py new file mode 100644 index 00000000..db5db3cd --- /dev/null +++ b/plugins/modules/cm_service_role.py @@ -0,0 +1,740 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = r""" +module: cm_service_role +short_description: Manage a Cloudera Manager Service role +description: + - Manage a Cloudera Manager Service role +author: + - "Webster Mudge (@wmudge)" +requirements: + - cm-client +options: + cms_hostname: + description: + - The hostname of a cluster instance for the role. + - Mutually exclusive with I(cluster_host_id). + type: str + aliases: + - cluster_host + cms_host_id: + description: + - The host ID of a cluster instance for the role. + - Mutually exclusive with I(cluster_hostname). + type: str + type: + description: + - A role type for the role. + - Required if the I(state) creates a new role. + type: str + aliases: + - role_type + role_config_group: + description: + - A role type for the role. + - Required if the I(state) creates a new role. + type: str + aliases: + - role_type + config: + description: + - The role configuration to set, i.e. overrides. + - To unset a parameter, use C(None) as the value. + type: dict + aliases: + - params + - parameters + maintenance: + description: + - Flag for whether the role should be in maintenance mode. + type: bool + aliases: + - maintenance_mode + tags: + description: + - A set of tags applied to the role. + - To unset a tag, use C(None) as its value. + type: dict + purge: + description: + - Flag for whether the declared role tags should append or overwrite any existing tags. + - To clear all tags, set I(tags={}), i.e. an empty dictionary, and I(purge=True). + type: bool + default: False + state: + description: + - The state of the role. + - Note, if the declared state is invalid for the role, for example, the role is a C(HDFS GATEWAY), the module will return an error. + type: str + default: present + choices: + - present + - absent + - restarted + - started + - stopped +extends_documentation_fragment: + - cloudera.cluster.cm_options + - cloudera.cluster.cm_endpoint +attributes: + check_mode: + support: full + diff_mode: + support: full + platform: + platforms: all +""" + +EXAMPLES = r""" +- name: Establish a service role (auto-generated name) + cloudera.cluster.service_role: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example-cluster + service: example-hdfs + type: GATEWAY + cluster_hostname: worker-01.cloudera.internal + +- name: Establish a service role (defined name) + cloudera.cluster.service_role: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example-cluster + service: example-hdfs + type: GATEWAY + name: example-gateway + cluster_hostname: worker-01.cloudera.internal + +- name: Set a service role to maintenance mode + cloudera.cluster.service_role: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example-cluster + service: example-hdfs + name: example-gateway + maintenance: yes + +- name: Update (append) tags to a service role + cloudera.cluster.service_role: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example-cluster + service: example-hdfs + name: example-gateway + tags: + tag_one: value_one + tag_two: value_two + +- name: Set (purge) tags to a service role + cloudera.cluster.service_role: + host: example.cloudera.com + username: "jane_smith" + cluster: example-cluster + service: example-hdfs + name: example-gateway + tags: + tag_three: value_three + purge: yes + +- name: Remove all tags on a service role + cloudera.cluster.service_role: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example-cluster + service: example-hdfs + name: example-gateway + tags: {} + purge: yes + +- name: Start a service role + cloudera.cluster.service_role: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example-cluster + service: example-hdfs + name: example-gateway + state: started + +- name: Force a restart to a service role + cloudera.cluster.service_role: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example-cluster + service: example-hdfs + name: example-gateway + state: restarted + +- name: Start a service role + cloudera.cluster.service_role: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example-cluster + service: example-hdfs + name: example-gateway + state: started + +- name: Remove a service role + cloudera.cluster.service_role: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + cluster: example-cluster + service: example-hdfs + name: example-gateway + state: absent +""" + +RETURN = r""" +role: + description: Details about the service role. + type: dict + contains: + name: + description: The cluster service role name. + type: str + returned: always + type: + description: The cluster service role type. + type: str + returned: always + sample: + - NAMENODE + - DATANODE + - TASKTRACKER + host_id: + description: The unique ID of the cluster host. + type: str + returned: always + service_name: + description: The name of the cluster service, which uniquely identifies it in a cluster. + type: str + returned: always + role_state: + description: State of the cluster service role. + type: str + returned: always + sample: + - HISTORY_NOT_AVAILABLE + - UNKNOWN + - STARTING + - STARTED + - STOPPING + - STOPPED + - NA + commission_state: + description: Commission state of the cluster service role. + type: str + returned: always + health_summary: + description: The high-level health status of the cluster service role. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + config_staleness_status: + description: Status of configuration staleness for the cluster service role. + type: str + returned: always + sample: + - FRESH + - STALE_REFRESHABLE + - STALE + health_checks: + description: Lists all available health checks for cluster service role. + type: list + elements: dict + returned: when supported + contains: + name: + description: Unique name of this health check. + type: str + returned: always + summary: + description: The high-level health status of the health check. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + explanation: + description: The explanation of this health check. + type: str + returned: when supported + suppressed: + description: + - Whether this health check is suppressed. + - A suppressed health check is not considered when computing the role's overall health. + type: bool + returned: when supported + maintenance_mode: + description: Whether the cluster service role is in maintenance mode. + type: bool + returned: when supported + maintenance_owners: + description: The list of objects that trigger this service to be in maintenance mode. + type: list + elements: str + returned: when supported + sample: + - CLUSTER + - SERVICE + - ROLE + - HOST + - CONTROL_PLANE + role_config_group_name: + description: The name of the cluster service role config group, which uniquely identifies it in a Cloudera Manager installation. + type: str + returned: when supported + tags: + description: The dictionary of tags for the cluster service role. + type: dict + returned: when supported + zoo_keeper_server_mode: + description: + - The Zookeeper server mode for this cluster service role. + - Note that for non-Zookeeper Server roles, this will be C(null). + type: str + returned: when supported +""" + +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerMutableModule, + ConfigListUpdates, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + create_role, + parse_role_result, +) + +from cm_client import ( + ApiBulkCommandList, + ApiRole, + ApiRoleList, + ApiRoleNameList, + ApiRoleState, + MgmtRolesResourceApi, + MgmtRoleCommandsResourceApi, + MgmtServiceResourceApi, +) +from cm_client.rest import ApiException + + +class ClouderaManagerServiceRole(ClouderaManagerMutableModule): + def __init__(self, module): + super(ClouderaManagerServiceRole, self).__init__(module) + + # Set the parameters + self.cluster_hostname = self.get_param("cluster_hostname") + self.cluster_host_id = self.get_param("cluster_host_id") + self.config = self.get_param("config") + self.maintenance = self.get_param("maintenance") + self.type = self.get_param("type") + self.state = self.get_param("state") + self.purge = self.get_param("purge") + + # Initialize the return values + self.changed = False + self.diff = dict(before={}, after={}) + self.output = {} + + # Execute the logic + self.process() + + @ClouderaManagerMutableModule.handle_process + def process(self): + # Confirm that CMS is present + try: + MgmtServiceResourceApi(self.api_client).read_service() + except ApiException as ex: + if ex.status == 404: + self.module.fail_json(msg="Cloudera Management Service does not exist") + else: + raise ex + + self.role_api = MgmtRolesResourceApi(self.api_client) + + current = None + + # Discover the role by its type + try: + current = next( + iter( + [r for r in self.role_api.read_roles().items if r.type == self.type] + ), + None, + ) + current.config = self.role_api.read_role_config(current.name) + except ApiException as ex: + if ex.status != 404: + raise ex + + # If deleting, do so and exit + if self.state == "absent": + if current: + self.deprovision_role(current) + + # Otherwise, manage the configuration and state + elif self.state in ["present", "restarted", "started", "stopped"]: + # If it is a new role + if not current: + new_role = create_role( + api_client=self.api_client, + role_type=self.type, + hostname=self.cluster_hostname, + host_id=self.cluster_host_id, + config=self.config, + ) + current = self.provision_role(new_role) + # # If it exists, but the type has changed, destroy and rebuild completely + # elif self.type and self.type != current.type: + # new_role = create_role( + # api_client=self.api_client, + # role_type=self.type, + # hostname=current.host_ref.hostname, + # host_id=current.host_ref.host_id, + # config=self.config + # ) + # current = self.reprovision_role(current, new_role) + # Else it exists, so address any changes + else: + # Handle role override configurations + if self.config or self.purge: + updates = ConfigListUpdates(current.config, self.config, self.purge) + + if updates.changed: + self.changed = True + + if self.module._diff: + self.diff["before"].update(config=updates.diff["before"]) + self.diff["after"].update(config=updates.diff["after"]) + + if not self.module.check_mode: + self.role_api.update_role_config( + current.name, + message=self.message, + body=updates.config, + ) + + # Handle maintenance mode + if ( + self.maintenance is not None + and self.maintenance != current.maintenance_mode + ): + self.changed = True + + if self.module._diff: + self.diff["before"].update( + maintenance_mode=current.maintenance_mode + ) + self.diff["after"].update(maintenance_mode=self.maintenance) + + if not self.module.check_mode: + if self.maintenance: + maintenance_cmd = self.role_api.enter_maintenance_mode( + current.name + ) + else: + maintenance_cmd = self.role_api.exit_maintenance_mode( + current.name + ) + + if maintenance_cmd.success is False: + self.module.fail_json( + msg=f"Unable to set Maintenance mode to '{self.maintenance}': {maintenance_cmd.result_message}" + ) + + # Handle the various states + if self.state == "started" and current.role_state not in [ + ApiRoleState.STARTED + ]: + self.changed = True + + if self.module._diff: + self.diff["before"].update(role_state=current.role_state) + self.diff["after"].update(role_state="STARTED") + + if not self.module.check_mode: + self.handle_commands( + MgmtRoleCommandsResourceApi(self.api_client).start_command( + body=ApiRoleNameList(items=[current.name]), + ) + ) + + elif self.state == "stopped" and current.role_state not in [ + ApiRoleState.STOPPED, + ApiRoleState.NA, + ]: + self.changed = True + + if self.module._diff: + self.diff["before"].update(role_state=current.role_state) + self.diff["after"].update(role_state="STOPPED") + + if not self.module.check_mode: + self.handle_commands( + MgmtRoleCommandsResourceApi(self.api_client).stop_command( + body=ApiRoleNameList(items=[current.name]), + ) + ) + + elif self.state == "restarted": + self.changed = True + + if self.module._diff: + self.diff["before"].update(role_state=current.role_state) + self.diff["after"].update(role_state="STARTED") + + if not self.module.check_mode: + self.handle_commands( + MgmtRoleCommandsResourceApi(self.api_client).restart_command( + body=ApiRoleNameList(items=[current.name]), + ) + ) + + # If there are changes, get a refresh read + if self.changed: + refresh = self.role_api.read_role(current.name) + refresh.config = self.role_api.read_role_config(current.name) + self.output = parse_role_result(refresh) + # Otherwise return the existing + else: + self.output = parse_role_result(current) + else: + self.module.fail_json(msg=f"Invalid state: {self.state}") + + def provision_role(self, role: ApiRole) -> ApiRole: + self.changed = True + + if self.module._diff: + self.diff = dict( + before={}, + after=role.to_dict(), + ) + + if not self.module.check_mode: + created_role = next( + ( + iter( + self.role_api.create_roles( + body=ApiRoleList(items=[role]), + ).items + ) + ), + {}, + ) + if not created_role: + self.module.fail_json( + msg="Unable to create new role", role=to_native(role.to_dict()) + ) + + def reprovision_role(self, existing_role: ApiRole, new_role: ApiRole) -> ApiRole: + self.changed = True + + if self.module._diff: + self.diff = dict( + before=existing_role.to_dict(), + after=new_role.to_dict(), + ) + + if not self.module.check_mode: + self.role_api.delete_role(existing_role.name) + + rebuilt_role = next( + ( + iter( + self.role_api.create_roles( + body=ApiRoleList(items=[new_role]), + ).items + ) + ), + {}, + ) + if not rebuilt_role: + self.module.fail_json( + msg="Unable to recreate role, " + existing_role.name, + role=to_native(rebuilt_role.to_dict()), + ) + + def deprovision_role(self, role: ApiRole): + self.changed = True + + if self.module._diff: + self.diff = dict(before=role.to_dict(), after=dict()) + + if not self.module.check_mode: + self.role_api.delete_role(role.name) + + # def xxxcreate_role(self) -> ApiRole: + # # Check for required creation parameters + # missing_params = [] + + # if self.type is None: + # missing_params.append("type") + + # if self.cluster_hostname is None and self.cluster_host_id is None: + # missing_params += ["cluster_hostname", "cluster_host_id"] + + # if missing_params: + # self.module.fail_json( + # msg=f"Unable to create new role, missing required arguments: {', '.join(sorted(missing_params)) }" + # ) + + # # Set up the role + # payload = ApiRole(type=str(self.type).upper()) + + # # Name + # if self.name: + # payload.name = self.name # No name allows auto-generation + + # # Host assignment + # host_ref = get_host_ref(self.api_client, self.cluster_hostname, self.cluster_host_id) + + # if host_ref is None: + # self.module.fail_json(msg="Invalid host reference") + # else: + # payload.host_ref = host_ref + + # # Role override configurations + # if self.config: + # payload.config = ApiConfigList(items=[ApiConfig(name=k, value=v) for k, v in self.config.items()]) + + # # Execute the creation + # self.changed = True + + # if self.module._diff: + # self.diff = dict( + # before={}, + # after=payload.to_dict(), + # ) + + # if not self.module.check_mode: + # created_role = next( + # ( + # iter( + # self.role_api.create_roles( + # body=ApiRoleList(items=[payload]), + # ).items + # ) + # ), + # {}, + # ) + + # # Maintenance + # if self.maintenance: + # if self.module._diff: + # self.diff["after"].update(maintenance_mode=True) + + # maintenance_cmd = self.role_api.enter_maintenance_mode( + # created_role.name + # ) + + # if maintenance_cmd.success is False: + # self.module.fail_json( + # msg=f"Unable to set Maintenance mode to '{self.maintenance}': {maintenance_cmd.result_message}" + # ) + + # if self.state in ["started", "restarted"]: + # self.handle_commands(MgmtRoleCommandsResourceApi(self.api_client).start_command( + # body=ApiRoleNameList(items=[created_role.name]), + # )) + + # elif self.state == "stopped": + # self.handle_commands(MgmtRoleCommandsResourceApi(self.api_client).stop_command( + # body=ApiRoleNameList(items=[created_role.name]), + # )) + + # if refresh: + # self.output = parse_role_result( + # self.role_api.read_role( + # self.cluster, + # created_role.name, + # self.service, + # view="full", + # ) + # ) + # else: + # self.output = parse_role_result(created_role) + + def handle_commands(self, commands: ApiBulkCommandList): + if commands.errors: + error_msg = "\n".join(commands.errors) + self.module.fail_json(msg=error_msg) + + for c in commands.items: + # Not in parallel, but should only be a single command + self.wait_command(c) + + +def main(): + module = ClouderaManagerMutableModule.ansible_module( + argument_spec=dict( + cluster_hostname=dict(aliases=["cluster_host"]), + cluster_host_id=dict(), + maintenance=dict(type="bool", aliases=["maintenance_mode"]), + config=dict(type="dict", aliases=["params", "parameters"]), + purge=dict(type="bool", default=False), + type=dict(required=True), + state=dict( + default="present", + choices=["present", "absent", "restarted", "started", "stopped"], + ), + ), + mutually_exclusive=[ + ["cluster_hostname", "cluster_host_id"], + ], + supports_check_mode=True, + ) + + result = ClouderaManagerServiceRole(module) + + output = dict( + changed=result.changed, + role=result.output, + ) + + if module._diff: + output.update(diff=result.diff) + + if result.debug: + log = result.log_capture.getvalue() + output.update(debug=log, debug_lines=log.split("\n")) + + module.exit_json(**output) + + +if __name__ == "__main__": + main() diff --git a/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py b/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py new file mode 100644 index 00000000..ed5ce806 --- /dev/null +++ b/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py @@ -0,0 +1,374 @@ +# -*- coding: utf-8 -*- + +# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import pytest + +from collections.abc import Generator +from pathlib import Path + +from cm_client import ( + ApiConfig, + ApiConfigList, + ApiRole, + ClustersResourceApi, + MgmtRolesResourceApi, +) + +from ansible_collections.cloudera.cluster.plugins.modules import cm_service_role +from ansible_collections.cloudera.cluster.tests.unit import ( + AnsibleExitJson, + AnsibleFailJson, + provision_cm_role, + cm_role_config, +) + +LOG = logging.getLogger(__name__) + + +@pytest.fixture(scope="module") +def host_monitor(cm_api_client, cms, request) -> Generator[ApiRole]: + api = MgmtRolesResourceApi(cm_api_client) + + hm = next( + iter([r for r in api.read_roles().items if r.type == "HOSTMONITOR"]), None + ) + + if hm is not None: + yield hm + else: + cluster_api = ClustersResourceApi(cm_api_client) + + # Get first host of the cluster + hosts = cluster_api.list_hosts(cluster_name=cms.cluster_ref.cluster_name) + + if not hosts.items: + raise Exception( + "No available hosts to assign the Cloudera Manager Service role." + ) + else: + name = Path(request.fixturename).stem + yield from provision_cm_role( + cm_api_client, name, "HOSTMONITOR", hosts.items[0].hostId + ) + + +@pytest.fixture(scope="function") +def host_monitor_config(cm_api_client, host_monitor, request) -> Generator[ApiRole]: + marker = request.node.get_closest_marker("role_config") + + if marker is None: + raise Exception("No role_config marker found.") + + yield from cm_role_config( + api_client=cm_api_client, + role=host_monitor, + params=marker.args[0], + message=f"{Path(request.node.parent.name).stem}::{request.node.name}", + ) + + +def test_missing_required(conn, module_args): + module_args(conn) + + with pytest.raises(AnsibleFailJson, match="parameters"): + cm_service_role.main() + + +def test_missing_required_if(conn, module_args): + module_args( + { + **conn, + "parameters": dict(), + } + ) + + with pytest.raises(AnsibleFailJson, match="name, type"): + cm_service_role.main() + + +def test_present_invalid_parameter(conn, module_args, host_monitor): + module_args( + { + **conn, + "role": host_monitor.name, + "parameters": dict(example="Example"), + } + ) + + with pytest.raises( + AnsibleFailJson, match="Unknown configuration attribute 'example'" + ): + cm_service_role.main() + + +@pytest.mark.role( + ApiRole( + config=ApiConfigList( + items=[ + ApiConfig("mgmt_num_descriptor_fetch_tries", 11), + ApiConfig("process_start_secs", 21), + ] + ) + ) +) +def test_set_parameters(conn, module_args, host_monitor_state, request): + module_args( + { + **conn, + "type": host_monitor_state.type, + "config": dict(mgmt_num_descriptor_fetch_tries=32), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == True + assert expected.items() <= e.value.role["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == False + assert expected.items() <= e.value.role["config"].items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters_role_type(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "role_type": host_monitor_config.type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_unset_parameters(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "role": host_monitor_config.name, + "parameters": dict(mgmt_num_descriptor_fetch_tries=None), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + expected = dict(process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_unset_parameters_role_type(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "type": host_monitor_config.type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=None), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + expected = dict(process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters_with_purge(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "role": host_monitor_config.name, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters_with_purge_role_type( + conn, module_args, host_monitor_config, request +): + module_args( + { + **conn, + "role_type": host_monitor_config.type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_purge_all_parameters(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "role": host_monitor_config.name, + "parameters": dict(), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert len(e.value.config) == 0 + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert len(e.value.config) == 0 + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_purge_all_parameters_role_type( + conn, module_args, host_monitor_config, request +): + module_args( + { + **conn, + "type": host_monitor_config.type, + "parameters": dict(), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert len(e.value.config) == 0 + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert len(e.value.config) == 0 From 092d3dcf6ee65ec5c66fac35bebbd3c83c0ebe46 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 20 Dec 2024 14:40:30 -0500 Subject: [PATCH 23/31] Add 'role' marker Signed-off-by: Webster Mudge --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 38f16eaa..5f8c1bd0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -59,6 +59,7 @@ markers = [ "role_config: Prepare role override configurations for tests", "role_config_group_config: Prepare role config group configurations for tests", "role_config_group: Prepare a role config group for tests.", + "role: Prepare a role for tests.", ] [build-system] From 904b9c2da2baf20b63c03819faf91e9e6fd86974 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Sun, 29 Dec 2024 10:43:43 -0500 Subject: [PATCH 24/31] Consolidate host_monitor role Signed-off-by: Webster Mudge --- tests/unit/conftest.py | 46 +++++++++++++++++++ .../cm_service_role/test_cm_service_role.py | 42 ----------------- .../test_cm_service_role_config.py | 42 ----------------- 3 files changed, 46 insertions(+), 84 deletions(-) diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index d814a574..95bcd110 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -168,6 +168,10 @@ def cm_api_client(conn) -> ApiClient: # Handle redirects redirect = rest.GET(url).urllib3_response.geturl() + + if redirect == None: + raise Exception("Unable to establish connection to Cloudera Manager") + if redirect != "/": url = redirect @@ -341,6 +345,48 @@ def cms_config(cm_api_client, cms, request) -> Generator[ApiService]: ) +@pytest.fixture(scope="module") +def host_monitor(cm_api_client, cms, request) -> Generator[ApiRole]: + api = MgmtRolesResourceApi(cm_api_client) + + hm = next( + iter([r for r in api.read_roles().items if r.type == "HOSTMONITOR"]), None + ) + + if hm is not None: + yield hm + else: + cluster_api = ClustersResourceApi(cm_api_client) + + # Get first host of the cluster + hosts = cluster_api.list_hosts(cluster_name=cms.cluster_ref.cluster_name) + + if not hosts.items: + raise Exception( + "No available hosts to assign the Cloudera Manager Service role." + ) + else: + name = Path(request.fixturename).stem + yield from provision_cm_role( + cm_api_client, name, "HOSTMONITOR", hosts.items[0].hostId + ) + + +@pytest.fixture(scope="function") +def host_monitor_config(cm_api_client, host_monitor, request) -> Generator[ApiRole]: + marker = request.node.get_closest_marker("role_config") + + if marker is None: + raise Exception("No role_config marker found.") + + yield from cm_role_config( + api_client=cm_api_client, + role=host_monitor, + params=marker.args[0], + message=f"{Path(request.node.parent.name).stem}::{request.node.name}", + ) + + @pytest.fixture(scope="module") def host_monitor_role(cm_api_client, cms, request) -> Generator[ApiRole]: api = MgmtRolesResourceApi(cm_api_client) diff --git a/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py b/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py index ed5ce806..2ec761d9 100644 --- a/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py +++ b/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py @@ -43,48 +43,6 @@ LOG = logging.getLogger(__name__) -@pytest.fixture(scope="module") -def host_monitor(cm_api_client, cms, request) -> Generator[ApiRole]: - api = MgmtRolesResourceApi(cm_api_client) - - hm = next( - iter([r for r in api.read_roles().items if r.type == "HOSTMONITOR"]), None - ) - - if hm is not None: - yield hm - else: - cluster_api = ClustersResourceApi(cm_api_client) - - # Get first host of the cluster - hosts = cluster_api.list_hosts(cluster_name=cms.cluster_ref.cluster_name) - - if not hosts.items: - raise Exception( - "No available hosts to assign the Cloudera Manager Service role." - ) - else: - name = Path(request.fixturename).stem - yield from provision_cm_role( - cm_api_client, name, "HOSTMONITOR", hosts.items[0].hostId - ) - - -@pytest.fixture(scope="function") -def host_monitor_config(cm_api_client, host_monitor, request) -> Generator[ApiRole]: - marker = request.node.get_closest_marker("role_config") - - if marker is None: - raise Exception("No role_config marker found.") - - yield from cm_role_config( - api_client=cm_api_client, - role=host_monitor, - params=marker.args[0], - message=f"{Path(request.node.parent.name).stem}::{request.node.name}", - ) - - def test_missing_required(conn, module_args): module_args(conn) diff --git a/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py b/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py index d230005a..8f6c2337 100644 --- a/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py +++ b/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py @@ -41,48 +41,6 @@ LOG = logging.getLogger(__name__) -@pytest.fixture(scope="module") -def host_monitor(cm_api_client, cms, request) -> Generator[ApiRole]: - api = MgmtRolesResourceApi(cm_api_client) - - hm = next( - iter([r for r in api.read_roles().items if r.type == "HOSTMONITOR"]), None - ) - - if hm is not None: - yield hm - else: - cluster_api = ClustersResourceApi(cm_api_client) - - # Get first host of the cluster - hosts = cluster_api.list_hosts(cluster_name=cms.cluster_ref.cluster_name) - - if not hosts.items: - raise Exception( - "No available hosts to assign the Cloudera Manager Service role." - ) - else: - name = Path(request.fixturename).stem - yield from provision_cm_role( - cm_api_client, name, "HOSTMONITOR", hosts.items[0].hostId - ) - - -@pytest.fixture(scope="function") -def host_monitor_config(cm_api_client, host_monitor, request) -> Generator[ApiRole]: - marker = request.node.get_closest_marker("role_config") - - if marker is None: - raise Exception("No role_config marker found.") - - yield from cm_role_config( - api_client=cm_api_client, - role=host_monitor, - params=marker.args[0], - message=f"{Path(request.node.parent.name).stem}::{request.node.name}", - ) - - def test_missing_required(conn, module_args): module_args(conn) From 44e0fc07f2f23dff079a93cb087a9de9296a2531 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 3 Jan 2025 16:53:37 -0500 Subject: [PATCH 25/31] Fix invalid fixture references Signed-off-by: Webster Mudge --- .../unit/plugins/modules/cm_service/test_cm_service.py | 8 ++++---- .../cm_service_config/test_cm_service_config.py | 8 ++++---- .../modules/service_config/test_service_config.py | 10 +++++----- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/tests/unit/plugins/modules/cm_service/test_cm_service.py b/tests/unit/plugins/modules/cm_service/test_cm_service.py index af679312..21004145 100644 --- a/tests/unit/plugins/modules/cm_service/test_cm_service.py +++ b/tests/unit/plugins/modules/cm_service/test_cm_service.py @@ -38,7 +38,7 @@ def test_minimal(conn, module_args, cms): @pytest.mark.service_config(dict(log_event_retry_frequency=10)) -def test_set_parameters(conn, module_args, cms_service_config): +def test_set_parameters(conn, module_args, cms_config): module_args( { **conn, @@ -76,7 +76,7 @@ def test_set_parameters(conn, module_args, cms_service_config): @pytest.mark.service_config( dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) ) -def test_unset_parameters(conn, module_args, cms_service_config): +def test_unset_parameters(conn, module_args, cms_config): module_args( { **conn, @@ -110,7 +110,7 @@ def test_unset_parameters(conn, module_args, cms_service_config): @pytest.mark.service_config( dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) ) -def test_set_parameters_with_purge(conn, module_args, cms_service_config): +def test_set_parameters_with_purge(conn, module_args, cms_config): module_args( { **conn, @@ -147,7 +147,7 @@ def test_set_parameters_with_purge(conn, module_args, cms_service_config): @pytest.mark.service_config( dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) ) -def test_purge_all_parameters(conn, module_args, cms_service_config): +def test_purge_all_parameters(conn, module_args, cms_config): module_args( { **conn, diff --git a/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py b/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py index ad54716b..39856c63 100644 --- a/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py +++ b/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py @@ -54,7 +54,7 @@ def test_present_invalid_parameter(conn, module_args): @pytest.mark.service_config( dict(mgmt_emit_sensitive_data_in_stderr=False, log_event_retry_frequency=10) ) -def test_set_parameters(conn, module_args, cms_service_config): +def test_set_parameters(conn, module_args, cms_config): module_args( { **conn, @@ -86,7 +86,7 @@ def test_set_parameters(conn, module_args, cms_service_config): @pytest.mark.service_config( dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) ) -def test_unset_parameters(conn, module_args, cms_service_config): +def test_unset_parameters(conn, module_args, cms_config): module_args( { **conn, @@ -114,7 +114,7 @@ def test_unset_parameters(conn, module_args, cms_service_config): @pytest.mark.service_config( dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) ) -def test_set_parameters_with_purge(conn, module_args, cms_service_config): +def test_set_parameters_with_purge(conn, module_args, cms_config): module_args( { **conn, @@ -145,7 +145,7 @@ def test_set_parameters_with_purge(conn, module_args, cms_service_config): @pytest.mark.service_config( dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) ) -def test_purge_all_parameters(conn, module_args, cms_service_config): +def test_purge_all_parameters(conn, module_args, cms_config): module_args( { **conn, diff --git a/tests/unit/plugins/modules/service_config/test_service_config.py b/tests/unit/plugins/modules/service_config/test_service_config.py index 707208a8..cf767d40 100644 --- a/tests/unit/plugins/modules/service_config/test_service_config.py +++ b/tests/unit/plugins/modules/service_config/test_service_config.py @@ -128,11 +128,11 @@ def test_present_invalid_cluster(conn, module_args): service_config.main() -def test_present_invalid_service(conn, module_args, target_service): +def test_present_invalid_service(conn, module_args, zk_service): module_args( { **conn, - "cluster": target_service.cluster_ref.cluster_name, + "cluster": zk_service.cluster_ref.cluster_name, "service": "example", "parameters": dict(example="Example"), } @@ -142,12 +142,12 @@ def test_present_invalid_service(conn, module_args, target_service): service_config.main() -def test_present_invalid_parameter(conn, module_args, target_service): +def test_present_invalid_parameter(conn, module_args, zk_service): module_args( { **conn, - "cluster": target_service.cluster_ref.cluster_name, - "service": target_service.name, + "cluster": zk_service.cluster_ref.cluster_name, + "service": zk_service.name, "parameters": dict(example="Example"), } ) From 159d0d0c881209d40ff03bc98b8f4e4a80517cc3 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 3 Jan 2025 16:53:37 -0500 Subject: [PATCH 26/31] Update testing requirements.txt Signed-off-by: Webster Mudge --- tests/unit/requirements.txt | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tests/unit/requirements.txt b/tests/unit/requirements.txt index ece294ca..db7e089e 100644 --- a/tests/unit/requirements.txt +++ b/tests/unit/requirements.txt @@ -12,4 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -cm-client +pre-commit +pytest +pytest-mock +ansible-core<2.17 # For RHEL 8 support +molecule +molecule-plugins +molecule-plugins[ec2] +tox-ansible From 516af16494d259ea4323bcfb7c4804e1e62b50ac Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 3 Jan 2025 16:53:37 -0500 Subject: [PATCH 27/31] Fix name of Cloudera Manager client library Signed-off-by: Webster Mudge --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 78e23e5b..49d92201 100644 --- a/requirements.txt +++ b/requirements.txt @@ -16,4 +16,4 @@ jmespath # For cm_service lookup -cm_client +cm-client From 4a446acfda87309a2d9c360dc7c879c639a17475 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 3 Jan 2025 16:53:37 -0500 Subject: [PATCH 28/31] Add pytest comment and update formatting Signed-off-by: Webster Mudge --- pyproject.toml | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 5f8c1bd0..bae03de2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,12 +6,13 @@ readme = "README.md" requires-python = ">=3.8" license = "Apache-2.0" keywords = [] -authors = [ - { name = "Webster Mudge", email = "wmudge@cloudera.com" }, -] +authors = [{ name = "Webster Mudge", email = "wmudge@cloudera.com" }] classifiers = [] dependencies = [] +[tool.hatch.build.targets.wheel] +bypass-selection = true + [tool.hatch.version] path = "galaxy.yml" pattern = "version:\\s+(?P[\\d\\.]+)" @@ -29,7 +30,7 @@ dependencies = [ "molecule-plugins", "molecule-plugins[ec2]", "tox-ansible", - "ansible-core<2.17", # For RHEL 8 support + "ansible-core<2.17", # For RHEL 8 support "jmespath", "cm-client", ] @@ -37,17 +38,17 @@ dependencies = [ [tool.hatch.envs.lint] python = "3.12" skip-install = true -extra-dependencies = [ - "ansible-lint", -] +extra-dependencies = ["ansible-lint"] [tool.hatch.envs.lint.scripts] run = "pre-commit run -a" [tool.pytest.ini_options] -testpaths = [ - "tests", -] +# addopts = [ +# "--lf", +# "--nf", +# ] +testpaths = ["tests"] filterwarnings = [ "ignore:AnsibleCollectionFinder has already been configured", "ignore:'crypt' is deprecated and slated for removal in Python 3.13:DeprecationWarning", From 3558133e4058db8e78728a5e8776920854a8fad5 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 3 Jan 2025 16:53:37 -0500 Subject: [PATCH 29/31] Update docstrings and add missing imports for fixtures and fixture utilities Signed-off-by: Webster Mudge --- tests/unit/__init__.py | 33 ++++++++++++++++++++++----------- tests/unit/conftest.py | 3 ++- 2 files changed, 24 insertions(+), 12 deletions(-) diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index 1103f609..c68bf4ba 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -61,7 +61,7 @@ def __init__(self, kwargs): def wait_for_command( api_client: ApiClient, command: ApiCommand, polling: int = 120, delay: int = 5 ): - """Polls Cloudera Manager to wait for a Command to complete.""" + """Polls Cloudera Manager to wait for given Command to succeed or fail.""" poll_count = 0 while command.active: @@ -90,7 +90,7 @@ def provision_service( Exception: _description_ Yields: - ApiService: _description_ + Generator[ApiService]: _description_ """ api = ServicesResourceApi(api_client) @@ -132,7 +132,7 @@ def service_wide_config( Exception: _description_ Yields: - ApiService: _description_ + Generator[ApiService]: _description_ """ service_api = ServicesResourceApi(api_client) @@ -190,6 +190,18 @@ def service_wide_config( def provision_cm_role( api_client: ApiClient, role_name: str, role_type: str, host_id: str ) -> Generator[ApiRole]: + """Yield a newly-created Cloudera Manager Service role, deleting the + role after use. Use with 'yield from' within a pytest fixture. + + Args: + api_client (ApiClient): _description_ + role_name (str): _description_ + role_type (str): _description_ + host_id (str): _description_ + + Yields: + Generator[ApiRole]: _description_ + """ api = MgmtRolesResourceApi(api_client) role = ApiRole( @@ -203,7 +215,7 @@ def provision_cm_role( api.delete_role(role_name=role_name) -def cm_role_config( +def set_cm_role_config( api_client: ApiClient, role: ApiRole, params: dict, message: str ) -> Generator[ApiRole]: """Update a role configuration for a given role. Yields the @@ -220,7 +232,7 @@ def cm_role_config( Exception: _description_ Yields: - ApiRole: _description_ + Generator[ApiRole]: _description_ """ role_api = MgmtRolesResourceApi(api_client) @@ -273,11 +285,10 @@ def set_cm_role_config_group( update: ApiRoleConfigGroup, message: str, ) -> Generator[ApiRoleConfigGroup]: - """ - Update a configuration for a given Cloudera Manager Service role config group. - Yields the role config group and upon returning control, will reset the - configuration to its prior state. - Use with 'yield from' within a pytest fixture. + """Update a configuration for a given Cloudera Manager Service role config group. + Yields the role config group and upon returning control, will reset the + configuration to its prior state. + Use with 'yield from' within a pytest fixture. Args: api_client (ApiClient): CM API client @@ -286,7 +297,7 @@ def set_cm_role_config_group( message (str): Transaction descriptor; will be appended with '::[re]set' Yields: - ApiRoleConfigGroup: The updated Role Config Group + Generator[ApiRoleConfigGroup]: The updated Role Config Group """ rcg_api = MgmtRoleConfigGroupsResourceApi(api_client) diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 95bcd110..a19c2b8d 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -70,6 +70,7 @@ AnsibleFailJson, AnsibleExitJson, provision_cm_role, + set_cm_role_config, set_cm_role_config_group, ) @@ -379,7 +380,7 @@ def host_monitor_config(cm_api_client, host_monitor, request) -> Generator[ApiRo if marker is None: raise Exception("No role_config marker found.") - yield from cm_role_config( + yield from set_cm_role_config( api_client=cm_api_client, role=host_monitor, params=marker.args[0], From 7c5aa310cbf6e00de5f07475cae35de639d06b4e Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 3 Jan 2025 16:53:38 -0500 Subject: [PATCH 30/31] Remove unused imports Signed-off-by: Webster Mudge --- .../test_cm_service_role_config.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py b/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py index 8f6c2337..d9581e4d 100644 --- a/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py +++ b/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py @@ -21,21 +21,12 @@ import logging import pytest -from collections.abc import Generator from pathlib import Path -from cm_client import ( - ApiRole, - ClustersResourceApi, - MgmtRolesResourceApi, -) - from ansible_collections.cloudera.cluster.plugins.modules import cm_service_role_config from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleExitJson, AnsibleFailJson, - provision_cm_role, - cm_role_config, ) LOG = logging.getLogger(__name__) From b3c5556e5a8d684625efff8ce23ce39eaa73898a Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 3 Jan 2025 16:53:38 -0500 Subject: [PATCH 31/31] Update cm_service_role and tests to support role override configurations, host relocation, and states. Signed-off-by: Webster Mudge --- plugins/modules/cm_service_role.py | 292 ++++---------- tests/unit/__init__.py | 92 ++++- tests/unit/conftest.py | 89 ++++- .../cm_service_role/test_cm_service_role.py | 366 +++++++++++++----- 4 files changed, 518 insertions(+), 321 deletions(-) diff --git a/plugins/modules/cm_service_role.py b/plugins/modules/cm_service_role.py index db5db3cd..fe8688fe 100644 --- a/plugins/modules/cm_service_role.py +++ b/plugins/modules/cm_service_role.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -25,30 +25,25 @@ requirements: - cm-client options: - cms_hostname: + cluster_hostname: description: - The hostname of a cluster instance for the role. + - If the hostname is different that the existing host for the I(type), the role will be destroyed and rebuilt on the declared host. - Mutually exclusive with I(cluster_host_id). type: str aliases: - cluster_host - cms_host_id: + cluster_host_id: description: - The host ID of a cluster instance for the role. + - If the host ID is different that the existing host for the I(type), the role will be destroyed and rebuilt on the declared host. - Mutually exclusive with I(cluster_hostname). type: str type: description: - A role type for the role. - - Required if the I(state) creates a new role. - type: str - aliases: - - role_type - role_config_group: - description: - - A role type for the role. - - Required if the I(state) creates a new role. type: str + required: True aliases: - role_type config: @@ -65,21 +60,17 @@ type: bool aliases: - maintenance_mode - tags: - description: - - A set of tags applied to the role. - - To unset a tag, use C(None) as its value. - type: dict purge: description: - - Flag for whether the declared role tags should append or overwrite any existing tags. - - To clear all tags, set I(tags={}), i.e. an empty dictionary, and I(purge=True). + - Flag for whether the declared role configurations should append or overwrite any existing configurations. + - To clear all role configurations, set I(config={}), i.e. an empty dictionary, or omit entirely, and set I(purge=True). type: bool default: False state: description: - The state of the role. - - Note, if the declared state is invalid for the role, for example, the role is a C(HDFS GATEWAY), the module will return an error. + - Note, if the declared state is invalid for the role, the module will return an error. + - Note, I(restarted) is always force a change of state of the role. type: str default: present choices: @@ -101,139 +92,101 @@ """ EXAMPLES = r""" -- name: Establish a service role (auto-generated name) - cloudera.cluster.service_role: - host: example.cloudera.com - username: "jane_smith" - password: "S&peR4Ec*re" - cluster: example-cluster - service: example-hdfs - type: GATEWAY - cluster_hostname: worker-01.cloudera.internal - -- name: Establish a service role (defined name) - cloudera.cluster.service_role: +- name: Establish a Cloudera Manager Service role + cloudera.cluster.cm_service_role: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" - cluster: example-cluster - service: example-hdfs - type: GATEWAY - name: example-gateway + type: HOSTMONITOR cluster_hostname: worker-01.cloudera.internal -- name: Set a service role to maintenance mode - cloudera.cluster.service_role: +- name: Set a Cloudera Manager Service role to maintenance mode + cloudera.cluster.cm_service_role: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" - cluster: example-cluster - service: example-hdfs - name: example-gateway + type: HOSTMONITOR maintenance: yes -- name: Update (append) tags to a service role - cloudera.cluster.service_role: +- name: Update (append) role configurations to a Cloudera Manager Service role + cloudera.cluster.cm_service_role: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" - cluster: example-cluster - service: example-hdfs - name: example-gateway - tags: - tag_one: value_one - tag_two: value_two + type: HOSTMONITOR + config: + some_config: value_one + another_config: value_two -- name: Set (purge) tags to a service role - cloudera.cluster.service_role: +- name: Set (purge) role configurations to a Cloudera Manager Service role + cloudera.cluster.cm_service_role: host: example.cloudera.com username: "jane_smith" - cluster: example-cluster - service: example-hdfs - name: example-gateway - tags: - tag_three: value_three + type: HOSTMONITOR + config: + yet_another_config: value_three purge: yes -- name: Remove all tags on a service role - cloudera.cluster.service_role: +- name: Remove all role configurations on a Cloudera Manager Service role + cloudera.cluster.cm_service_role: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" - cluster: example-cluster - service: example-hdfs - name: example-gateway - tags: {} + type: HOSTMONITOR purge: yes -- name: Start a service role - cloudera.cluster.service_role: +- name: Start a Cloudera Manager Service role + cloudera.cluster.cm_service_role: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" - cluster: example-cluster - service: example-hdfs - name: example-gateway + type: HOSTMONITOR state: started -- name: Force a restart to a service role - cloudera.cluster.service_role: +- name: Force a restart to a Cloudera Manager Service role + cloudera.cluster.cm_service_role: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" - cluster: example-cluster - service: example-hdfs - name: example-gateway + type: HOSTMONITOR state: restarted -- name: Start a service role - cloudera.cluster.service_role: +- name: Remove a Cloudera Manager Service role + cloudera.cluster.cm_service_role: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" - cluster: example-cluster - service: example-hdfs - name: example-gateway - state: started - -- name: Remove a service role - cloudera.cluster.service_role: - host: example.cloudera.com - username: "jane_smith" - password: "S&peR4Ec*re" - cluster: example-cluster - service: example-hdfs - name: example-gateway + type: HOSTMONITOR state: absent """ RETURN = r""" role: - description: Details about the service role. + description: Details about the Cloudera Manager Service role. type: dict contains: name: - description: The cluster service role name. + description: + - The Cloudera Manager Service role name. + - Note, this is an auto-generated name and cannot be changed. type: str returned: always type: - description: The cluster service role type. + description: The Cloudera Manager Service role type. type: str returned: always sample: - - NAMENODE - - DATANODE - - TASKTRACKER + - HOSTMONITOR host_id: description: The unique ID of the cluster host. type: str returned: always service_name: - description: The name of the cluster service, which uniquely identifies it in a cluster. + description: The name of the Cloudera Manager Service, which uniquely identifies it in a deployment. type: str - returned: always + returned: when supported role_state: - description: State of the cluster service role. + description: State of the Cloudera Manager Service role. type: str returned: always sample: @@ -245,11 +198,11 @@ - STOPPED - NA commission_state: - description: Commission state of the cluster service role. + description: Commission state of the Cloudera Manager Service role. type: str returned: always health_summary: - description: The high-level health status of the cluster service role. + description: The high-level health status of the Cloudera Manager Service role. type: str returned: always sample: @@ -260,7 +213,7 @@ - CONCERNING - BAD config_staleness_status: - description: Status of configuration staleness for the cluster service role. + description: Status of configuration staleness for the Cloudera Manager Service role. type: str returned: always sample: @@ -268,7 +221,7 @@ - STALE_REFRESHABLE - STALE health_checks: - description: Lists all available health checks for cluster service role. + description: Lists all available health checks for Cloudera Manager Service role. type: list elements: dict returned: when supported @@ -299,7 +252,7 @@ type: bool returned: when supported maintenance_mode: - description: Whether the cluster service role is in maintenance mode. + description: Whether the Cloudera Manager Service role is in maintenance mode. type: bool returned: when supported maintenance_owners: @@ -314,16 +267,16 @@ - HOST - CONTROL_PLANE role_config_group_name: - description: The name of the cluster service role config group, which uniquely identifies it in a Cloudera Manager installation. + description: The name of the Cloudera Manager Service role config group, which uniquely identifies it in a Cloudera Manager installation. type: str returned: when supported tags: - description: The dictionary of tags for the cluster service role. + description: The dictionary of tags for the Cloudera Manager Service role. type: dict returned: when supported zoo_keeper_server_mode: description: - - The Zookeeper server mode for this cluster service role. + - The Zookeeper server mode for this Cloudera Manager Service role. - Note that for non-Zookeeper Server roles, this will be C(null). type: str returned: when supported @@ -397,7 +350,8 @@ def process(self): ), None, ) - current.config = self.role_api.read_role_config(current.name) + if current is not None: + current.config = self.role_api.read_role_config(current.name) except ApiException as ex: if ex.status != 404: raise ex @@ -419,16 +373,27 @@ def process(self): config=self.config, ) current = self.provision_role(new_role) - # # If it exists, but the type has changed, destroy and rebuild completely - # elif self.type and self.type != current.type: - # new_role = create_role( - # api_client=self.api_client, - # role_type=self.type, - # hostname=current.host_ref.hostname, - # host_id=current.host_ref.host_id, - # config=self.config - # ) - # current = self.reprovision_role(current, new_role) + # If it exists, but the host has changed, destroy and rebuild completely + elif ( + self.cluster_hostname is not None + and self.cluster_hostname != current.host_ref.hostname + ) or ( + self.cluster_host_id is not None + and self.cluster_host_id != current.host_ref.host_id + ): + if self.config: + new_config = self.config + else: + new_config = {c.name: c.value for c in current.config.items} + + new_role = create_role( + api_client=self.api_client, + role_type=current.type, + hostname=self.cluster_hostname, + host_id=self.cluster_host_id, + config=new_config, + ) + current = self.reprovision_role(current, new_role) # Else it exists, so address any changes else: # Handle role override configurations @@ -560,6 +525,7 @@ def provision_role(self, role: ApiRole) -> ApiRole: self.module.fail_json( msg="Unable to create new role", role=to_native(role.to_dict()) ) + return created_role def reprovision_role(self, existing_role: ApiRole, new_role: ApiRole) -> ApiRole: self.changed = True @@ -588,8 +554,11 @@ def reprovision_role(self, existing_role: ApiRole, new_role: ApiRole) -> ApiRole msg="Unable to recreate role, " + existing_role.name, role=to_native(rebuilt_role.to_dict()), ) + return rebuilt_role + else: + return existing_role - def deprovision_role(self, role: ApiRole): + def deprovision_role(self, role: ApiRole) -> None: self.changed = True if self.module._diff: @@ -598,97 +567,6 @@ def deprovision_role(self, role: ApiRole): if not self.module.check_mode: self.role_api.delete_role(role.name) - # def xxxcreate_role(self) -> ApiRole: - # # Check for required creation parameters - # missing_params = [] - - # if self.type is None: - # missing_params.append("type") - - # if self.cluster_hostname is None and self.cluster_host_id is None: - # missing_params += ["cluster_hostname", "cluster_host_id"] - - # if missing_params: - # self.module.fail_json( - # msg=f"Unable to create new role, missing required arguments: {', '.join(sorted(missing_params)) }" - # ) - - # # Set up the role - # payload = ApiRole(type=str(self.type).upper()) - - # # Name - # if self.name: - # payload.name = self.name # No name allows auto-generation - - # # Host assignment - # host_ref = get_host_ref(self.api_client, self.cluster_hostname, self.cluster_host_id) - - # if host_ref is None: - # self.module.fail_json(msg="Invalid host reference") - # else: - # payload.host_ref = host_ref - - # # Role override configurations - # if self.config: - # payload.config = ApiConfigList(items=[ApiConfig(name=k, value=v) for k, v in self.config.items()]) - - # # Execute the creation - # self.changed = True - - # if self.module._diff: - # self.diff = dict( - # before={}, - # after=payload.to_dict(), - # ) - - # if not self.module.check_mode: - # created_role = next( - # ( - # iter( - # self.role_api.create_roles( - # body=ApiRoleList(items=[payload]), - # ).items - # ) - # ), - # {}, - # ) - - # # Maintenance - # if self.maintenance: - # if self.module._diff: - # self.diff["after"].update(maintenance_mode=True) - - # maintenance_cmd = self.role_api.enter_maintenance_mode( - # created_role.name - # ) - - # if maintenance_cmd.success is False: - # self.module.fail_json( - # msg=f"Unable to set Maintenance mode to '{self.maintenance}': {maintenance_cmd.result_message}" - # ) - - # if self.state in ["started", "restarted"]: - # self.handle_commands(MgmtRoleCommandsResourceApi(self.api_client).start_command( - # body=ApiRoleNameList(items=[created_role.name]), - # )) - - # elif self.state == "stopped": - # self.handle_commands(MgmtRoleCommandsResourceApi(self.api_client).stop_command( - # body=ApiRoleNameList(items=[created_role.name]), - # )) - - # if refresh: - # self.output = parse_role_result( - # self.role_api.read_role( - # self.cluster, - # created_role.name, - # self.service, - # view="full", - # ) - # ) - # else: - # self.output = parse_role_result(created_role) - def handle_commands(self, commands: ApiBulkCommandList): if commands.errors: error_msg = "\n".join(commands.errors) @@ -707,7 +585,7 @@ def main(): maintenance=dict(type="bool", aliases=["maintenance_mode"]), config=dict(type="dict", aliases=["params", "parameters"]), purge=dict(type="bool", default=False), - type=dict(required=True), + type=dict(required=True, aliases=["role_type"]), state=dict( default="present", choices=["present", "absent", "restarted", "started", "stopped"], diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index c68bf4ba..1a2f8423 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2024 Cloudera, Inc. +# Copyright 2025 Cloudera, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,17 +26,27 @@ ApiRole, ApiRoleConfigGroup, ApiRoleList, + ApiRoleNameList, + ApiRoleState, ApiService, ApiServiceConfig, ApiServiceList, ClustersResourceApi, CommandsResourceApi, MgmtRolesResourceApi, + MgmtRoleCommandsResourceApi, MgmtRoleConfigGroupsResourceApi, ServicesResourceApi, ) from cm_client.rest import ApiException +from ansible_collections.cloudera.cluster.plugins.module_utils.host_utils import ( + get_host_ref, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + get_mgmt_roles, +) + class AnsibleExitJson(Exception): """Exception class to be raised by module.exit_json and caught by the test case""" @@ -215,6 +225,86 @@ def provision_cm_role( api.delete_role(role_name=role_name) +def set_cm_role( + api_client: ApiClient, cluster: ApiCluster, role: ApiRole +) -> Generator[ApiRole]: + """Set a net-new Cloudera Manager Service role. Yields the new role, + resetting to any existing role upon completion. Use with 'yield from' + within a pytest fixture. + """ + role_api = MgmtRolesResourceApi(api_client) + role_cmd_api = MgmtRoleCommandsResourceApi(api_client) + + # Check for existing management role + pre_role = next( + iter([r for r in get_mgmt_roles(api_client, role.type).items]), None + ) + + if pre_role is not None: + # Get the current state + pre_role.config = role_api.read_role_config(role_name=pre_role.name) + + # Remove the prior role + role_api.delete_role(role_name=pre_role.name) + + if not role.host_ref: + cluster_api = ClustersResourceApi(api_client) + + # Get first host of the cluster + hosts = cluster_api.list_hosts(cluster_name=cluster.name) + + if not hosts.items: + raise Exception( + "No available hosts to assign the Cloudera Manager Service role." + ) + + role.host_ref = get_host_ref(api_client, host_id=hosts.items[0].host_id) + + # Create the role under test + current_role = next( + iter(role_api.create_roles(body=ApiRoleList(items=[role])).items), None + ) + current_role.config = role_api.read_role_config(role_name=current_role.name) + + if role.maintenance_mode: + role_api.enter_maintenance_mode(role_name=current_role.name) + + if role.role_state in [ApiRoleState.STARTING, ApiRoleState.STARTED]: + start_cmds = role_cmd_api.start_command( + body=ApiRoleNameList(items=[current_role.name]) + ) + if start_cmds.errors: + error_msg = "\n".join(start_cmds.errors) + raise Exception(error_msg) + + for cmd in start_cmds.items: + # Serial monitoring + wait_for_command(api_client=api_client, command=cmd) + + # Yield the role under test + yield current_role + + # Remove the role under test + current_role = role_api.delete_role(role_name=current_role.name) + + # Reinstate the previous role + if pre_role is not None: + role_api.create_roles(body=ApiRoleList(items=[pre_role])) + if pre_role.maintenance_mode: + role_api.enter_maintenance_mode(pre_role.name) + if pre_role.role_state in [ApiRoleState.STARTED, ApiRoleState.STARTING]: + restart_cmds = role_cmd_api.restart_command( + body=ApiRoleNameList(items=[pre_role.name]) + ) + if restart_cmds.errors: + error_msg = "\n".join(restart_cmds.errors) + raise Exception(error_msg) + + for cmd in restart_cmds.items: + # Serial monitoring + wait_for_command(api_client=api_client, command=cmd) + + def set_cm_role_config( api_client: ApiClient, role: ApiRole, params: dict, message: str ) -> Generator[ApiRole]: diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index a19c2b8d..cda5e8d9 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -42,6 +42,7 @@ ApiHostRefList, ApiRole, ApiRoleConfigGroup, + ApiRoleList, ApiRoleNameList, ApiRoleState, ApiService, @@ -66,6 +67,10 @@ Parcel, ) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + get_mgmt_roles, +) + from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleFailJson, AnsibleExitJson, @@ -194,7 +199,23 @@ def cm_api_client(conn) -> ApiClient: @pytest.fixture(scope="session") def base_cluster(cm_api_client, request): - """Provision a CDH Base cluster.""" + """Provision a CDH Base cluster. If the variable 'CM_CLUSTER' is present, + will attempt to read and yield a reference to this cluster. Otherwise, + will yield a new base cluster with a single host, deleting the cluster + once completed. + + Args: + cm_api_client (_type_): _description_ + request (_type_): _description_ + + Raises: + Exception: _description_ + Exception: _description_ + Exception: _description_ + + Yields: + _type_: _description_ + """ cluster_api = ClustersResourceApi(cm_api_client) @@ -270,14 +291,32 @@ def base_cluster(cm_api_client, request): @pytest.fixture(scope="session") -def cms(cm_api_client, request) -> Generator[ApiService]: - """Provisions Cloudera Manager Service.""" +def cms(cm_api_client: ApiClient, request) -> Generator[ApiService]: + """Provisions Cloudera Manager Service. If the Cloudera Manager Service + is present, will read and yield this reference. Otherwise, will + yield a new Cloudera Manager Service, deleting it after use. - api = MgmtServiceResourceApi(cm_api_client) + NOTE! A new Cloudera Manager Service will _not_ be provisioned if + there are any existing clusters within the deployment! Therefore, + you must only run this fixture to provision a net-new Cloudera Manager + Service on a bare deployment, i.e. Cloudera Manager and hosts only. + + Args: + cm_api_client (ApiClient): _description_ + request (_type_): _description_ + + Raises: + Exception: _description_ + + Yields: + Generator[ApiService]: _description_ + """ + + cms_api = MgmtServiceResourceApi(cm_api_client) # Return if the Cloudera Manager Service is already present try: - yield api.read_service() + yield cms_api.read_service() return except ApiException as ae: if ae.status != 404 or "Cannot find management service." not in str(ae.body): @@ -289,9 +328,12 @@ def cms(cm_api_client, request) -> Generator[ApiService]: type="MGMT", ) - yield api.setup_cms(body=service) + cm_service = cms_api.setup_cms(body=service) + cms_api.auto_assign_roles() - api.delete_cms() + yield cm_service + + cms_api.delete_cms() @pytest.fixture(scope="function") @@ -419,6 +461,7 @@ def host_monitor_role(cm_api_client, cms, request) -> Generator[ApiRole]: def host_monitor_role_group_config( cm_api_client, host_monitor_role, request ) -> Generator[ApiRoleConfigGroup]: + """Configures the base Role Config Group for the Host Monitor role of a Cloudera Manager Service.""" marker = request.node.get_closest_marker("role_config_group") if marker is None: @@ -532,6 +575,38 @@ def host_monitor_state(cm_api_client, host_monitor_role, request) -> Generator[A ) +@pytest.fixture(scope="function") +def host_monitor_cleared(cm_api_client, cms) -> Generator[None]: + role_api = MgmtRolesResourceApi(cm_api_client) + role_cmd_api = MgmtRoleCommandsResourceApi(cm_api_client) + + # Check for existing management role + pre_role = next( + iter([r for r in get_mgmt_roles(cm_api_client, "HOSTMONITOR").items]), None + ) + + if pre_role is not None: + # Get the current state + pre_role.config = role_api.read_role_config(role_name=pre_role.name) + + # Remove the prior role + role_api.delete_role(role_name=pre_role.name) + + # Yield now that the role has been removed + yield + + # Reinstate the previous role + if pre_role is not None: + role_api.create_roles(body=ApiRoleList(items=[pre_role])) + if pre_role.maintenance_mode: + role_api.enter_maintenance_mode(pre_role.name) + if pre_role.role_state in [ApiRoleState.STARTED, ApiRoleState.STARTING]: + restart_cmds = role_cmd_api.restart_command( + body=ApiRoleNameList(items=[pre_role.name]) + ) + handle_commands(api_client=cm_api_client, commands=restart_cmds) + + def handle_commands(api_client: ApiClient, commands: ApiBulkCommandList): if commands.errors: error_msg = "\n".join(commands.errors) diff --git a/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py b/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py index 2ec761d9..b1a9c98a 100644 --- a/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py +++ b/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -28,6 +28,8 @@ ApiConfig, ApiConfigList, ApiRole, + ApiRoleList, + ApiRoleState, ClustersResourceApi, MgmtRolesResourceApi, ) @@ -36,46 +38,142 @@ from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleExitJson, AnsibleFailJson, - provision_cm_role, - cm_role_config, + set_cm_role, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.host_utils import ( + get_host_ref, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + get_mgmt_roles, ) LOG = logging.getLogger(__name__) +@pytest.fixture(scope="function") +def target_cm_role(cm_api_client, cms, base_cluster, request) -> Generator[ApiRole]: + marker = request.node.get_closest_marker("role") + + if marker is None: + role = ApiRole( + type="HOSTMONITOR", + ) + else: + role = marker.args[0] + role.type = "HOSTMONITOR" + + yield from set_cm_role(cm_api_client, base_cluster, role) + + +@pytest.fixture(scope="function") +def target_cm_role_cleared( + cm_api_client, base_cluster, host_monitor_cleared, request +) -> Generator[ApiRole]: + marker = request.node.get_closest_marker("role") + + if marker is None: + role = ApiRole( + type="HOSTMONITOR", + ) + else: + role = marker.args[0] + role.type = "HOSTMONITOR" + + role_api = MgmtRolesResourceApi(cm_api_client) + + if not role.host_ref: + cluster_api = ClustersResourceApi(cm_api_client) + + # Get first host of the cluster + hosts = cluster_api.list_hosts(cluster_name=base_cluster.name) + + if not hosts.items: + raise Exception( + "No available hosts to assign the Cloudera Manager Service role." + ) + + role.host_ref = get_host_ref(cm_api_client, host_id=hosts.items[0].host_id) + + # Create and yield the role under test + current_role = next( + iter(role_api.create_roles(body=ApiRoleList(items=[role])).items), None + ) + current_role.config = role_api.read_role_config(role_name=current_role.name) + + yield current_role + + # Clear out any remaining roles + remaining_roles = get_mgmt_roles(cm_api_client, "HOSTMONITOR") + + for r in remaining_roles.items: + role_api.delete_role(role_name=r.name) + + def test_missing_required(conn, module_args): module_args(conn) - with pytest.raises(AnsibleFailJson, match="parameters"): + with pytest.raises(AnsibleFailJson, match="type"): cm_service_role.main() -def test_missing_required_if(conn, module_args): - module_args( - { - **conn, - "parameters": dict(), - } - ) +def test_mutually_exclusive(conn, module_args): + module_args({**conn, "cluster_hostname": "hostname", "cluster_host_id": "host_id"}) - with pytest.raises(AnsibleFailJson, match="name, type"): + with pytest.raises( + AnsibleFailJson, + match="parameters are mutually exclusive: cluster_hostname|cluster_host_id", + ): cm_service_role.main() -def test_present_invalid_parameter(conn, module_args, host_monitor): +@pytest.mark.role(ApiRole()) +def test_relocate_host( + conn, module_args, cm_api_client, base_cluster, target_cm_role_cleared, request +): + cluster_api = ClustersResourceApi(cm_api_client) + + # Get second host of the cluster + hosts = cluster_api.list_hosts(cluster_name=base_cluster.name) + + if not hosts.items: + raise Exception( + "No available hosts to assign the Cloudera Manager Service role." + ) + filtered_hosts = [ + h for h in hosts.items if h.host_id != target_cm_role_cleared.host_ref.host_id + ] + + if len(filtered_hosts) < 1: + raise Exception( + "Not enough hosts to reassign the Cloudera Manager Service role." + ) + module_args( { **conn, - "role": host_monitor.name, - "parameters": dict(example="Example"), + "type": target_cm_role_cleared.type, + "cluster_hostname": filtered_hosts[0].hostname, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, } ) - with pytest.raises( - AnsibleFailJson, match="Unknown configuration attribute 'example'" - ): + expected = filtered_hosts[0].host_id + + with pytest.raises(AnsibleExitJson) as e: cm_service_role.main() + assert e.value.changed == True + assert expected == e.value.role["host_id"] + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == False + assert expected == e.value.role["host_id"] + @pytest.mark.role( ApiRole( @@ -87,19 +185,19 @@ def test_present_invalid_parameter(conn, module_args, host_monitor): ) ) ) -def test_set_parameters(conn, module_args, host_monitor_state, request): +def test_set_config(conn, module_args, target_cm_role, request): module_args( { **conn, - "type": host_monitor_state.type, - "config": dict(mgmt_num_descriptor_fetch_tries=32), + "type": target_cm_role.type, + "config": dict(mgmt_num_descriptor_fetch_tries=55), "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, } ) - expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") + expected = dict(mgmt_num_descriptor_fetch_tries="55", process_start_secs="21") with pytest.raises(AnsibleExitJson) as e: cm_service_role.main() @@ -115,171 +213,206 @@ def test_set_parameters(conn, module_args, host_monitor_state, request): assert expected.items() <= e.value.role["config"].items() -@pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +@pytest.mark.role( + ApiRole( + config=ApiConfigList( + items=[ + ApiConfig("mgmt_num_descriptor_fetch_tries", 12), + ApiConfig("process_start_secs", 22), + ] + ) + ) ) -def test_set_parameters_role_type(conn, module_args, host_monitor_config, request): +def test_unset_config(conn, module_args, target_cm_role, request): module_args( { **conn, - "role_type": host_monitor_config.type, - "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "type": target_cm_role.type, + "config": dict(mgmt_num_descriptor_fetch_tries=None), "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", - # _ansible_check_mode=True, - # _ansible_diff=True, } ) - expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") + expected = dict(process_start_secs="22") with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == True - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert expected.items() <= e.value.role["config"].items() # Idempotency with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == False - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert expected.items() <= e.value.role["config"].items() -@pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +@pytest.mark.role( + ApiRole( + config=ApiConfigList( + items=[ + ApiConfig("mgmt_num_descriptor_fetch_tries", 13), + ApiConfig("process_start_secs", 23), + ] + ) + ) ) -def test_unset_parameters(conn, module_args, host_monitor_config, request): +def test_set_config_purge(conn, module_args, target_cm_role, request): module_args( { **conn, - "role": host_monitor_config.name, - "parameters": dict(mgmt_num_descriptor_fetch_tries=None), + "type": target_cm_role.type, + "config": dict(mgmt_num_descriptor_fetch_tries=33), + "purge": True, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, } ) - expected = dict(process_start_secs="21") + expected = dict(mgmt_num_descriptor_fetch_tries="33") with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == True - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert expected.items() <= e.value.role["config"].items() # Idempotency with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == False - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert expected.items() <= e.value.role["config"].items() -@pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +@pytest.mark.role( + ApiRole( + config=ApiConfigList( + items=[ + ApiConfig("mgmt_num_descriptor_fetch_tries", 14), + ApiConfig("process_start_secs", 24), + ] + ) + ) ) -def test_unset_parameters_role_type(conn, module_args, host_monitor_config, request): +def test_set_config_purge_all(conn, module_args, target_cm_role, request): module_args( { **conn, - "type": host_monitor_config.type, - "parameters": dict(mgmt_num_descriptor_fetch_tries=None), + "type": target_cm_role.type, + "config": dict(), + "purge": True, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, } ) - expected = dict(process_start_secs="21") - with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == True - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert len(e.value.role["config"]) == 0 # Idempotency with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == False - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert len(e.value.role["config"]) == 0 -@pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) -) -def test_set_parameters_with_purge(conn, module_args, host_monitor_config, request): +@pytest.mark.role(ApiRole(maintenance_mode=False)) +def test_maintenance_mode_enabled(conn, module_args, target_cm_role, request): module_args( { **conn, - "role": host_monitor_config.name, - "parameters": dict(mgmt_num_descriptor_fetch_tries=32), - "purge": True, + "type": target_cm_role.type, + "maintenance": True, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, } ) - expected = dict(mgmt_num_descriptor_fetch_tries="32") - with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == True - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert e.value.role["maintenance_mode"] == True # Idempotency with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == False - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert e.value.role["maintenance_mode"] == True -@pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) -) -def test_set_parameters_with_purge_role_type( - conn, module_args, host_monitor_config, request -): +@pytest.mark.role(ApiRole(maintenance_mode=True)) +def test_maintenance_mode_disabled(conn, module_args, target_cm_role, request): module_args( { **conn, - "role_type": host_monitor_config.type, - "parameters": dict(mgmt_num_descriptor_fetch_tries=32), - "purge": True, + "type": target_cm_role.type, + "maintenance": False, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, } ) - expected = dict(mgmt_num_descriptor_fetch_tries="32") + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == True + assert e.value.role["maintenance_mode"] == False + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == False + assert e.value.role["maintenance_mode"] == False + + +@pytest.mark.role(ApiRole(role_state=ApiRoleState.STOPPED)) +def test_state_started(conn, module_args, target_cm_role, request): + module_args( + { + **conn, + "type": target_cm_role.type, + "state": "started", + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == True - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert e.value.role["role_state"] == "STARTED" # Idempotency with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == False - assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + assert e.value.role["role_state"] == "STARTED" -@pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) -) -def test_purge_all_parameters(conn, module_args, host_monitor_config, request): +@pytest.mark.role(ApiRole(role_state=ApiRoleState.STARTED)) +def test_state_started(conn, module_args, target_cm_role, request): module_args( { **conn, - "role": host_monitor_config.name, - "parameters": dict(), - "purge": True, + "type": target_cm_role.type, + "state": "stopped", "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, @@ -287,31 +420,26 @@ def test_purge_all_parameters(conn, module_args, host_monitor_config, request): ) with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == True - assert len(e.value.config) == 0 + assert e.value.role["role_state"] == "STOPPED" # Idempotency with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == False - assert len(e.value.config) == 0 + assert e.value.role["role_state"] == "STOPPED" -@pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) -) -def test_purge_all_parameters_role_type( - conn, module_args, host_monitor_config, request -): +@pytest.mark.role(ApiRole(role_state=ApiRoleState.STOPPED)) +def test_state_restarted(conn, module_args, target_cm_role, request): module_args( { **conn, - "type": host_monitor_config.type, - "parameters": dict(), - "purge": True, + "type": target_cm_role.type, + "state": "restarted", "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, @@ -319,14 +447,40 @@ def test_purge_all_parameters_role_type( ) with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == True - assert len(e.value.config) == 0 + assert e.value.role["role_state"] == "STARTED" + + # Idempotency is not possible due to this state + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == True + assert e.value.role["role_state"] == "STARTED" + + +def test_state_absent(conn, module_args, target_cm_role_cleared, request): + module_args( + { + **conn, + "type": target_cm_role_cleared.type, + "state": "absent", + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == True + assert not e.value.role # Idempotency with pytest.raises(AnsibleExitJson) as e: - cm_service_role_config.main() + cm_service_role.main() assert e.value.changed == False - assert len(e.value.config) == 0 + assert not e.value.role