diff --git a/plugins/module_utils/cluster_utils.py b/plugins/module_utils/cluster_utils.py index 203d4230..bf3cae1f 100644 --- a/plugins/module_utils/cluster_utils.py +++ b/plugins/module_utils/cluster_utils.py @@ -17,7 +17,7 @@ """ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - _parse_output, + normalize_output, ) from cm_client import ApiCluster @@ -42,5 +42,5 @@ def parse_cluster_result(cluster: ApiCluster) -> dict: # Retrieve full_version as version output = dict(version=cluster.full_version) - output.update(_parse_output(cluster.to_dict(), CLUSTER_OUTPUT)) + output.update(normalize_output(cluster.to_dict(), CLUSTER_OUTPUT)) return output diff --git a/plugins/module_utils/cm_utils.py b/plugins/module_utils/cm_utils.py index a1667986..c55d523c 100644 --- a/plugins/module_utils/cm_utils.py +++ b/plugins/module_utils/cm_utils.py @@ -34,9 +34,8 @@ from cm_client import ( ApiClient, ApiCommand, + ApiConfig, ApiConfigList, - ApiRole, - ApiRoleConfigGroup, Configuration, ) from cm_client.rest import ApiException, RESTClientObject @@ -47,34 +46,8 @@ __credits__ = ["frisch@cloudera.com"] __maintainer__ = ["wmudge@cloudera.com"] -ROLE_OUTPUT = [ - "commission_state", - "config_staleness_status", - "ha_status", - "health_checks", - "health_summary", - # "host_ref", - "maintenance_mode", - "maintenance_owners", - "name", - # "role_config_group_ref", - "role_state", - # "service_ref", - "tags", - "type", - "zoo_keeper_server_mode", -] - -ROLE_CONFIG_GROUP = [ - "name", - "role_type", - "base", - "display_name", - # "service_ref", -] - - -def _parse_output(entity: dict, filter: list) -> dict: + +def normalize_output(entity: dict, filter: list) -> dict: output = {} for k in filter: if k == "tags": @@ -85,24 +58,6 @@ def _parse_output(entity: dict, filter: list) -> dict: return output -def parse_role_result(role: ApiRole) -> dict: - # Retrieve only the host_id, role_config_group, and service identifiers - output = dict( - host_id=role.host_ref.host_id, - role_config_group_name=role.role_config_group_ref.role_config_group_name, - service_name=role.service_ref.service_name, - ) - output.update(_parse_output(role.to_dict(), ROLE_OUTPUT)) - return output - - -def parse_role_config_group_result(role_config_group: ApiRoleConfigGroup) -> dict: - # Retrieve only the service identifier - output = dict(service_name=role_config_group.service_ref.service_name) - output.update(_parse_output(role_config_group.to_dict(), ROLE_CONFIG_GROUP)) - return output - - def normalize_values(add: dict) -> dict: """Normalize parameter values. Strings have whitespace trimmed, integers are converted to strings, and Boolean values are converted their string representation @@ -191,6 +146,25 @@ def resolve_tag_updates( return (delta_add, delta_del) +class ConfigListUpdates(object): + def __init__(self, existing: ApiConfigList, updates: dict, purge: bool) -> None: + current = {r.name: r.value for r in existing.items} + changeset = resolve_parameter_updates(current, updates, purge) + + self.diff = dict( + before={k: current[k] if k in current else None for k in changeset.keys()}, + after=changeset, + ) + + self.config = ApiConfigList( + items=[ApiConfig(name=k, value=v) for k, v in changeset.items()] + ) + + @property + def changed(self) -> bool: + return bool(self.config.items) + + class ClusterTemplate(object): IDEMPOTENT_IDS = frozenset( ["refName", "name", "clusterName", "hostName", "product"] diff --git a/plugins/module_utils/data_context_utils.py b/plugins/module_utils/data_context_utils.py index 4b3f54f7..be4f7c57 100644 --- a/plugins/module_utils/data_context_utils.py +++ b/plugins/module_utils/data_context_utils.py @@ -17,7 +17,7 @@ """ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - _parse_output, + normalize_output, ) from cm_client import ApiDataContextList @@ -38,9 +38,9 @@ ] -def _parse_output(data: dict, keys: list) -> dict: +def normalize_output(data: dict, keys: list) -> dict: return {key: data[key] for key in keys if key in data} def parse_data_context_result(data_contexts: ApiDataContextList) -> list: - return [_parse_output(item, DATA_CONTEXT_OUTPUT) for item in data_contexts.items] + return [normalize_output(item, DATA_CONTEXT_OUTPUT) for item in data_contexts.items] diff --git a/plugins/module_utils/parcel_utils.py b/plugins/module_utils/parcel_utils.py index 88d13793..38a50c5a 100644 --- a/plugins/module_utils/parcel_utils.py +++ b/plugins/module_utils/parcel_utils.py @@ -23,7 +23,7 @@ from cm_client import ApiParcel, ParcelResourceApi from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - _parse_output, + normalize_output, ) @@ -138,5 +138,5 @@ def activate(self): def parse_parcel_result(parcel: ApiParcel) -> dict: # Retrieve only the cluster identifier output = dict(cluster_name=parcel.cluster_ref.cluster_name) - output.update(_parse_output(parcel.to_dict(), PARCEL)) + output.update(normalize_output(parcel.to_dict(), PARCEL)) return output diff --git a/plugins/module_utils/role_config_group_utils.py b/plugins/module_utils/role_config_group_utils.py new file mode 100644 index 00000000..b17e8160 --- /dev/null +++ b/plugins/module_utils/role_config_group_utils.py @@ -0,0 +1,35 @@ +# Copyright 2024 Cloudera, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + normalize_output, +) + +from cm_client import ApiRoleConfigGroup + + +ROLE_CONFIG_GROUP = [ + "name", + "role_type", + "base", + "display_name", + # "service_ref", +] + + +def parse_role_config_group_result(role_config_group: ApiRoleConfigGroup) -> dict: + # Retrieve only the service identifier + output = dict(service_name=role_config_group.service_ref.service_name) + output.update(normalize_output(role_config_group.to_dict(), ROLE_CONFIG_GROUP)) + return output diff --git a/plugins/module_utils/role_utils.py b/plugins/module_utils/role_utils.py new file mode 100644 index 00000000..55bb463b --- /dev/null +++ b/plugins/module_utils/role_utils.py @@ -0,0 +1,48 @@ +# Copyright 2024 Cloudera, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + normalize_output, +) + +from cm_client import ApiRole + +ROLE_OUTPUT = [ + "commission_state", + "config_staleness_status", + "ha_status", + "health_checks", + "health_summary", + # "host_ref", + "maintenance_mode", + "maintenance_owners", + "name", + # "role_config_group_ref", + "role_state", + # "service_ref", + "tags", + "type", + "zoo_keeper_server_mode", +] + + +def parse_role_result(role: ApiRole) -> dict: + # Retrieve only the host_id, role_config_group, and service identifiers + output = dict( + host_id=role.host_ref.host_id, + role_config_group_name=role.role_config_group_ref.role_config_group_name, + service_name=role.service_ref.service_name, + ) + output.update(normalize_output(role.to_dict(), ROLE_OUTPUT)) + return output diff --git a/plugins/module_utils/service_utils.py b/plugins/module_utils/service_utils.py index c11a2d79..9e65bff3 100644 --- a/plugins/module_utils/service_utils.py +++ b/plugins/module_utils/service_utils.py @@ -17,7 +17,7 @@ """ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - _parse_output, + normalize_output, resolve_parameter_updates, ) @@ -47,10 +47,15 @@ def parse_service_result(service: ApiService) -> dict: # Retrieve only the cluster_name output = dict(cluster_name=service.cluster_ref.cluster_name) - output.update(_parse_output(service.to_dict(), SERVICE_OUTPUT)) + output.update(normalize_output(service.to_dict(), SERVICE_OUTPUT)) return output +def parse_cm_service_result(service: ApiService) -> dict: + # Ignore cluster_name + return normalize_output(service.to_dict(), SERVICE_OUTPUT) + + class ServiceConfigUpdates(object): def __init__(self, existing: ApiServiceConfig, updates: dict, purge: bool) -> None: current = {r.name: r.value for r in existing.items} diff --git a/plugins/modules/cm_service.py b/plugins/modules/cm_service.py index 921981cf..6528169d 100644 --- a/plugins/modules/cm_service.py +++ b/plugins/modules/cm_service.py @@ -1,3 +1,6 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright 2024 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,24 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - ClouderaManagerModule, -) - -from cm_client.rest import ApiException -from cm_client import MgmtRolesResourceApi -from cm_client import MgmtServiceResourceApi -from cm_client import MgmtRoleCommandsResourceApi -from cm_client import HostsResourceApi - -ANSIBLE_METADATA = { - "metadata_version": "1.1", - "status": ["preview"], - "supported_by": "community", -} - DOCUMENTATION = r""" ---- module: cm_service short_description: Manage Cloudera Manager service roles description: @@ -68,7 +54,6 @@ """ EXAMPLES = r""" ---- - name: Start Cloudera Manager service roles cloudera.cluster.cm_version: host: "10.10.10.10" @@ -114,8 +99,7 @@ """ RETURN = r""" ---- -cloudera_manager: +service: description: List of Cloudera Manager roles type: dict contains: @@ -185,241 +169,325 @@ returned: optional """ +import json + +from cm_client import ( + HostsResourceApi, + MgmtRolesResourceApi, + MgmtRoleConfigGroupsResourceApi, + MgmtRoleCommandsResourceApi, + MgmtServiceResourceApi, +) +from cm_client.rest import ApiException + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerMutableModule, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( + ServiceConfigUpdates, + parse_cm_service_result, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + parse_role_result, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + parse_role_config_group_result, +) + -class ClouderaService(ClouderaManagerModule): +class ClouderaManagerService(ClouderaManagerMutableModule): def __init__(self, module): - super(ClouderaService, self).__init__(module) + super(ClouderaManagerService, self).__init__(module) - self.role = self.get_param("role") + # Set the parameters + self.params = self.get_param("parameters") + self.roles = self.get_param("roles") self.state = self.get_param("state") self.purge = self.get_param("purge") + self.view = self.get_param("view") + + # Initialize the return value + self.changed = False + self.cm_service = {} + + if self.module._diff: + self.diff = dict(before=dict(), after=dict()) + else: + self.diff = {} + + # Execute the logic self.process() - @ClouderaManagerModule.handle_process + @ClouderaManagerMutableModule.handle_process def process(self): - try: - api_instance = MgmtServiceResourceApi(self.api_client) - role_api_instance = MgmtRolesResourceApi(self.api_client) - role_cmd_api_instance = MgmtRoleCommandsResourceApi(self.api_client) - mgmt_service_api_instance = MgmtServiceResourceApi(self.api_client) - host_api_instance = HostsResourceApi(self.api_client) - - get_host_infomation = host_api_instance.read_hosts().to_dict() - for item in get_host_infomation["items"]: - if self.host == item["hostname"]: - host_id = item["host_id"] - - if not self.purge: - available_roles_info = role_api_instance.read_roles().to_dict() - existing_roles = [] - for item in available_roles_info["items"]: - existing_roles.append(item["type"]) - - if self.state in ["present"]: - not_existing_roles = [] - for role in self.role: - if role not in existing_roles: - not_existing_roles.append(role) - if not_existing_roles: - body = { - "items": [ - {"type": role, "hostRef": {"hostId": host_id}} - for role in not_existing_roles - ] - } - role_api_instance.create_roles(body=body) - self.cm_service_output = role_api_instance.read_roles().to_dict() - self.changed = True - elif self.state in ["absent"]: - roles_to_remove = [ - role for role in self.role if role in existing_roles - ] - roles_to_remove_extended_info = [] - for role in roles_to_remove: - for item in available_roles_info["items"]: - if role == item["type"]: - roles_to_remove_extended_info.append(item["name"]) - if not roles_to_remove_extended_info: - self.cm_service_output = ( - role_api_instance.read_roles().to_dict() - ) - self.changed = False - else: - for role in roles_to_remove_extended_info: - role_api_instance.delete_role(role_name=role) - self.cm_service_output = ( - role_api_instance.read_roles().to_dict() + service_api = MgmtServiceResourceApi(self.api_client) + role_api = MgmtRolesResourceApi(self.api_client) + role_cmd_api = MgmtRoleCommandsResourceApi(self.api_client) + rcg_api = MgmtRoleConfigGroupsResourceApi(self.api_client) + host_api = HostsResourceApi(self.api_client) + + # Manage service-wide configurations + if self.params or self.purge: + try: + existing_params = service_api.read_service_config() + except ApiException as ex: + if ex.status == 404: + self.module.fail_json(msg=json.loads(ex.body)["message"]) + else: + raise ex + + service_wide = ServiceConfigUpdates( + existing_params, self.params, self.purge + ) + + if service_wide.changed: + self.changed = True + + if self.module._diff: + self.diff["before"].update(params=service_wide.diff["before"]) + self.diff["after"].update(params=service_wide.diff["after"]) + + if not self.module.check_mode: + service_api.update_service_config( + message=self.message, body=service_wide.config + ) + + # Manage roles + if self.roles: + try: + # Get a list of all host and find itself + # This is hardcoded, so needs to be broken into host + # assignment per-role + hosts = host_api.read_hosts() + for h in hosts.items(): + if self.host == h.hostname: + host_id = h.host_id + + # CHECK MODE + if not self.purge: + available_roles_info = role_api.read_roles().to_dict() + existing_roles = [] + for item in available_roles_info["items"]: + existing_roles.append(item["type"]) + + if self.state in ["present"]: + not_existing_roles = [] + for role in self.roles: + if role not in existing_roles: + not_existing_roles.append(role) + if not_existing_roles: + body = { + "items": [ + {"type": role, "hostRef": {"hostId": host_id}} + for role in not_existing_roles + ] + } + role_api.create_roles(body=body) + self.cm_service = parse_cm_service_result( + service_api.read_service() ) self.changed = True - elif self.state in ["started"]: - - matching_roles = [] - new_roles = [] - for role in self.role: - if role in existing_roles: - matching_roles.append(role) + elif self.state in ["absent"]: + roles_to_remove = [ + role for role in self.roles if role in existing_roles + ] + roles_to_remove_extended_info = [] + for role in roles_to_remove: + for item in available_roles_info["items"]: + if role == item["type"]: + roles_to_remove_extended_info.append(item["name"]) + if not roles_to_remove_extended_info: + self.cm_service = role_api.read_roles().to_dict() + self.changed = False + else: + for role in roles_to_remove_extended_info: + role_api.delete_role(role_name=role) + self.cm_service = role_api.read_roles().to_dict() + self.changed = True + + elif self.state in ["started"]: + + matching_roles = [] + new_roles = [] + for role in self.roles: + if role in existing_roles: + matching_roles.append(role) + else: + new_roles.append(role) + + new_roles_to_start = [] + if new_roles: + body = { + "items": [ + {"type": role, "hostRef": {"hostId": host_id}} + for role in new_roles + ] + } + newly_added_roles = role_api.create_roles( + body=body + ).to_dict() + + for role in newly_added_roles["items"]: + new_roles_to_start.append(role["name"]) + body = {"items": new_roles_to_start} + + existing_roles_state = [] + for role in matching_roles: + for item in available_roles_info["items"]: + if role == item["type"]: + existing_roles_state.append( + { + "type": item["type"], + "role_state": item["role_state"].lower(), + "name": item["name"], + } + ) + + existing_roles_to_start = [] + for role in existing_roles_state: + if role["role_state"] == "stopped": + existing_roles_to_start.append(role["name"]) + + all_roles_to_start = ( + new_roles_to_start + existing_roles_to_start + ) + body = {"items": all_roles_to_start} + + if all_roles_to_start: + start_roles_request = role_cmd_api.start_command( + body=body + ).to_dict() + command_id = start_roles_request["items"][0]["id"] + self.wait_for_command_state( + command_id=command_id, polling_interval=5 + ) + self.cm_service = role_api.read_roles().to_dict() + self.changed = True + else: + self.cm_service = role_api.read_roles().to_dict() + self.changed = False + + elif self.state in ["stopped"]: + matching_roles = [] + for role in self.roles: + if role in existing_roles: + matching_roles.append(role) + + matching_roles_state = [] + for role in matching_roles: + for item in available_roles_info["items"]: + if role == item["type"]: + matching_roles_state.append( + { + "type": item["type"], + "role_state": item["role_state"].lower(), + "name": item["name"], + } + ) + + roles_to_stop = [] + for role in matching_roles_state: + if role["role_state"] == "started": + roles_to_stop.append(role["name"]) + body = {"items": roles_to_stop} + + if roles_to_stop: + role_cmd_api.stop_command(body=body) + self.cm_service = role_api.read_roles().to_dict() + self.changed = True else: - new_roles.append(role) - - new_roles_to_start = [] - if new_roles: - body = { - "items": [ - {"type": role, "hostRef": {"hostId": host_id}} - for role in new_roles - ] - } - newly_added_roles = role_api_instance.create_roles( - body=body - ).to_dict() - - for role in newly_added_roles["items"]: - new_roles_to_start.append(role["name"]) - body = {"items": new_roles_to_start} - - existing_roles_state = [] - for role in matching_roles: - for item in available_roles_info["items"]: - if role == item["type"]: - existing_roles_state.append( - { - "type": item["type"], - "role_state": item["role_state"].lower(), - "name": item["name"], - } - ) - - existing_roles_to_start = [] - for role in existing_roles_state: - if role["role_state"] == "stopped": - existing_roles_to_start.append(role["name"]) - - all_roles_to_start = new_roles_to_start + existing_roles_to_start - body = {"items": all_roles_to_start} - - if all_roles_to_start: - start_roles_request = role_cmd_api_instance.start_command( - body=body - ).to_dict() - command_id = start_roles_request["items"][0]["id"] + self.cm_service = role_api.read_roles().to_dict() + self.changed = False + + elif self.state in ["restarted"]: + matching_roles = [] + for role in self.roles: + if role in existing_roles: + matching_roles.append(role) + + matching_roles_state = [] + for role in matching_roles: + for item in available_roles_info["items"]: + if role == item["type"]: + matching_roles_state.append( + { + "type": item["type"], + "role_state": item["role_state"].lower(), + "name": item["name"], + } + ) + + roles_to_restart = [] + for role in matching_roles_state: + roles_to_restart.append(role["name"]) + body = {"items": roles_to_restart} + + if roles_to_restart: + role_cmd_api.restart_command(body=body) + self.cm_service = role_api.read_roles().to_dict() + self.changed = True + + if self.purge: + service_api.delete_cms() + body = {"roles": [{"type": role} for role in self.roles]} + service_api.setup_cms(body=body) + self.cm_service = role_api.read_roles().to_dict() + + if self.state in ["started"]: + start_roles_request = service_api.start_command().to_dict() + command_id = start_roles_request["id"] self.wait_for_command_state( command_id=command_id, polling_interval=5 ) - self.cm_service_output = ( - role_api_instance.read_roles().to_dict() + self.cm_service = role_api.read_roles().to_dict() + self.changed = True + except ApiException as e: + if e.status == 404 or 400: + roles_dict = {"roles": [{"type": role} for role in self.roles]} + service_api.setup_cms(body=roles_dict) + + if self.state in ["started"]: + start_roles_request = service_api.start_command().to_dict() + command_id = start_roles_request["id"] + self.wait_for_command_state( + command_id=command_id, polling_interval=5 ) - self.changed = True + self.cm_service = role_api.read_roles().to_dict() else: - self.cm_service_output = ( - role_api_instance.read_roles().to_dict() - ) - self.changed = False - - elif self.state in ["stopped"]: - matching_roles = [] - for role in self.role: - if role in existing_roles: - matching_roles.append(role) - - matching_roles_state = [] - for role in matching_roles: - for item in available_roles_info["items"]: - if role == item["type"]: - matching_roles_state.append( - { - "type": item["type"], - "role_state": item["role_state"].lower(), - "name": item["name"], - } - ) - - roles_to_stop = [] - for role in matching_roles_state: - if role["role_state"] == "started": - roles_to_stop.append(role["name"]) - body = {"items": roles_to_stop} - - if roles_to_stop: - role_cmd_api_instance.stop_command(body=body) - self.cm_service_output = ( - role_api_instance.read_roles().to_dict() - ) - self.changed = True - else: - self.cm_service_output = ( - role_api_instance.read_roles().to_dict() - ) - self.changed = False - - elif self.state in ["restarted"]: - matching_roles = [] - for role in self.role: - if role in existing_roles: - matching_roles.append(role) - - matching_roles_state = [] - for role in matching_roles: - for item in available_roles_info["items"]: - if role == item["type"]: - matching_roles_state.append( - { - "type": item["type"], - "role_state": item["role_state"].lower(), - "name": item["name"], - } - ) - - roles_to_restart = [] - for role in matching_roles_state: - roles_to_restart.append(role["name"]) - body = {"items": roles_to_restart} - - if roles_to_restart: - role_cmd_api_instance.restart_command(body=body) - self.cm_service_output = ( - role_api_instance.read_roles().to_dict() - ) - self.changed = True - - if self.purge: - mgmt_service_api_instance.delete_cms() - body = {"roles": [{"type": role} for role in self.role]} - mgmt_service_api_instance.setup_cms(body=body) - self.cm_service_output = role_api_instance.read_roles().to_dict() - - if self.state in ["started"]: - start_roles_request = api_instance.start_command().to_dict() - command_id = start_roles_request["id"] - self.wait_for_command_state( - command_id=command_id, polling_interval=5 - ) - self.cm_service_output = role_api_instance.read_roles().to_dict() - self.changed = True - - except ApiException as e: - if e.status == 404 or 400: - roles_dict = {"roles": [{"type": role} for role in self.role]} - api_instance.setup_cms(body=roles_dict) + self.cm_service = role_api.read_roles().to_dict() + self.changed = True - if self.state in ["started"]: - start_roles_request = api_instance.start_command().to_dict() - command_id = start_roles_request["id"] - self.wait_for_command_state( - command_id=command_id, polling_interval=5 - ) - self.cm_service_output = role_api_instance.read_roles().to_dict() - else: - self.cm_service_output = role_api_instance.read_roles().to_dict() - self.changed = True + # Read and generate payload for Cloudera Manager Service + self.cm_service = parse_cm_service_result(service_api.read_service()) + self.cm_service.update( + config=[ + c.to_dict() + for c in service_api.read_service_config(view=self.view).items + ] + ) + self.cm_service.update( + roles=[parse_role_result(r) for r in role_api.read_roles().items] + ) + self.cm_service.update( + role_config_groups=[ + parse_role_config_group_result(rcg) + for rcg in rcg_api.read_role_config_groups().items + ] + ) def main(): - module = ClouderaManagerModule.ansible_module( + module = ClouderaManagerMutableModule.ansible_module( argument_spec=dict( - role=dict(required=True, type="list"), - purge=dict(required=False, type="bool", default=False), + parameters=dict(type="dict", aliases=["params"]), + roles=dict(type="list"), + purge=dict(type="bool", default=False), + view=dict( + default="summary", + choices=["summary", "full"], + ), state=dict( type="str", default="started", @@ -429,13 +497,13 @@ def main(): supports_check_mode=False, ) - result = ClouderaService(module) + result = ClouderaManagerService(module) changed = result.changed output = dict( changed=changed, - cloudera_manager=result.cm_service_output, + service=result.cm_service, ) if result.debug: diff --git a/plugins/modules/cm_service_role_config.py b/plugins/modules/cm_service_role_config.py new file mode 100644 index 00000000..fc6efbf3 --- /dev/null +++ b/plugins/modules/cm_service_role_config.py @@ -0,0 +1,322 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = r""" +module: cm_service_role_config +short_description: Manage a service role configuration in cluster +description: + - Manage a service role configuration (role-specific) in a cluster. +author: + - "Webster Mudge (@wmudge)" +requirements: + - cm-client +options: + name: + description: + - A Cloudera Manager Service role name to manage. + - One of C(name) or C(type) is required. + type: str + aliases: + - role_name + - role + type: + description: + - A Cloudera Manager Service role type to manage. + - One of C(name) or C(type) is required. + type: str + aliases: + - role_type + parameters: + description: + - The role-specific configuration to set, i.e. role overrides. + - To unset a parameter, use C(None) as the value. + type: dict + required: yes + aliases: + - params + purge: + description: + - Flag for whether the declared parameters should append or overwrite any existing parameters. + - To clear all parameters, set I(parameters={}), i.e. an empty dictionary, and I(purge=True). + type: bool + default: False + view: + description: + - The view to return. + type: str + default: summary + choices: + - summary + - full +extends_documentation_fragment: + - ansible.builtin.action_common_attributes + - cloudera.cluster.cm_options + - cloudera.cluster.cm_endpoint + - cloudera.cluster.purge + - cloudera.cluster.message +attributes: + check_mode: + support: full + diff_mode: + support: full + platform: + platforms: all +""" + +EXAMPLES = r""" +- name: Update (append) Cloudera manager Service Host Monitor role parameters + cloudera.cluster.cm_service_role_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + parameters: + a_configuration: "schema://host:port" + another_configuration: 234 + +- name: Reset a Cloudera manager Service Host Monitor role parameter + cloudera.cluster.cm_service_role_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + name: "a-non-default-role-name" + parameters: + more_configuration: None + +- name: Update (with purge) Cloudera manager Service Host Monitor role parameters + cloudera.cluster.cluster_service_role_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + parameters: + config_one: None + config_two: ValueTwo + config_three: 2345 + +- name: Reset all Cloudera manager Service Host Monitor role parameters + cloudera.cluster.cluster_service_role_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + parameters: {} + purge: yes +""" + +RETURN = r""" +config: + description: + - List of Cloudera Manager Service role configurations. + - Returns the C(summary) view of the resulting configuration. + type: list + elements: dict + returned: always + contains: + name: + description: + - The canonical name that identifies this configuration parameter. + type: str + returned: when supported + value: + description: + - The user-defined value. + - When absent, the default value (if any) will be used. + - Can also be absent, when enumerating allowed configs. + type: str + returned: when supported + required: + description: + - Whether this configuration is required for the object. + - If any required configuration is not set, operations on the object may not work. + - Requires I(full) view. + type: bool + returned: when supported + default: + description: + - The default value. + - Requires I(full) view. + type: str + returned: when supported + display_name: + description: + - A user-friendly name of the parameters, as would have been shown in the web UI. + - Requires I(full) view. + type: str + returned: when supported + description: + description: + - A textual description of the parameter. + - Requires I(full) view. + type: str + returned: when supported + related_name: + description: + - If applicable, contains the related configuration variable used by the source project. + - Requires I(full) view. + type: str + returned: when supported + sensitive: + description: + - Whether this configuration is sensitive, i.e. contains information such as passwords, which might affect how the value of this configuration might be shared by the caller. + type: bool + returned: when supported + validate_state: + description: + - State of the configuration parameter after validation. + - Requires I(full) view. + type: str + returned: when supported + validation_message: + description: + - A message explaining the parameter's validation state. + - Requires I(full) view. + type: str + returned: when supported + validation_warnings_suppressed: + description: + - Whether validation warnings associated with this parameter are suppressed. + - In general, suppressed validation warnings are hidden in the Cloudera Manager UI. + - Configurations that do not produce warnings will not contain this field. + - Requires I(full) view. + type: bool + returned: when supported +""" + +import json + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerMutableModule, + ConfigListUpdates, +) + +from cm_client import MgmtRolesResourceApi +from cm_client.rest import ApiException + + +class ClouderaManagerServiceRoleConfig(ClouderaManagerMutableModule): + def __init__(self, module): + super(ClouderaManagerServiceRoleConfig, self).__init__(module) + + # Set the parameters + self.name = self.get_param("name") + self.type = self.get_param("type") + self.params = self.get_param("parameters") + self.purge = self.get_param("purge") + self.view = self.get_param("view") + + # Initialize the return value + self.changed = False + self.diff = {} + self.config = [] + + # Execute the logic + self.process() + + @ClouderaManagerMutableModule.handle_process + def process(self): + refresh = True + role_api = MgmtRolesResourceApi(self.api_client) + + try: + if self.name is None: + role = next( + iter( + [r for r in role_api.read_roles().items if r.type == self.type] + ), + None, + ) + if role is None: + self.module.fail_json( + msg=f"Unable to find Cloudera Manager Service role type '{self.type}" + ) + else: + self.name = role.name + + # For some reason, the call to read_roles() doesn't retrieve the configuration + existing = role_api.read_role_config(self.name) + except ApiException as ex: + if ex.status == 404: + self.module.fail_json(msg=json.loads(ex.body)["message"]) + else: + raise ex + + updates = ConfigListUpdates(existing, self.params, self.purge) + + if updates.changed: + self.changed = True + + if self.module._diff: + self.diff = updates.diff + + if not self.module.check_mode: + self.config = [ + p.to_dict() + for p in role_api.update_role_config( + self.name, + message=self.message, + body=updates.config, + ).items + ] + + if self.view == "full": + refresh = False + + if refresh: + self.config = [ + p.to_dict() + for p in role_api.read_role_config(self.name, view=self.view).items + ] + + +def main(): + module = ClouderaManagerMutableModule.ansible_module( + argument_spec=dict( + name=dict(aliases=["role_name", "role"]), + type=dict(aliases=["role_type"]), + parameters=dict(type="dict", required=True, aliases=["params"]), + purge=dict(type="bool", default=False), + view=dict( + default="summary", + choices=["summary", "full"], + ), + ), + required_one_of=[ + ["name", "type"], + ], + supports_check_mode=True, + ) + + result = ClouderaManagerServiceRoleConfig(module) + + output = dict( + changed=result.changed, + config=result.config, + ) + + if module._diff: + output.update(diff=result.diff) + + if result.debug: + log = result.log_capture.getvalue() + output.update(debug=log, debug_lines=log.split("\n")) + + module.exit_json(**output) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/service_role.py b/plugins/modules/service_role.py index f9bed4a1..d92ca533 100644 --- a/plugins/modules/service_role.py +++ b/plugins/modules/service_role.py @@ -16,9 +16,11 @@ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( ClouderaManagerMutableModule, - parse_role_result, resolve_tag_updates, ) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + parse_role_result, +) from cm_client import ( ApiEntityTag, @@ -34,6 +36,7 @@ ) from cm_client.rest import ApiException + ANSIBLE_METADATA = { "metadata_version": "1.1", "status": ["preview"], diff --git a/plugins/modules/service_role_config_group.py b/plugins/modules/service_role_config_group.py index 5d1f4449..b54ffeef 100644 --- a/plugins/modules/service_role_config_group.py +++ b/plugins/modules/service_role_config_group.py @@ -16,6 +16,9 @@ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( ClouderaManagerMutableModule, +) + +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( parse_role_config_group_result, ) diff --git a/plugins/modules/service_role_config_group_config_info.py b/plugins/modules/service_role_config_group_config_info.py index fc127dc8..ba25a6cb 100644 --- a/plugins/modules/service_role_config_group_config_info.py +++ b/plugins/modules/service_role_config_group_config_info.py @@ -16,7 +16,6 @@ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( ClouderaManagerModule, - parse_role_config_group_result, ) from cm_client import ( @@ -26,6 +25,7 @@ ) from cm_client.rest import ApiException + ANSIBLE_METADATA = { "metadata_version": "1.1", "status": ["preview"], diff --git a/plugins/modules/service_role_config_group_info.py b/plugins/modules/service_role_config_group_info.py index 46e95af4..cc71314b 100644 --- a/plugins/modules/service_role_config_group_info.py +++ b/plugins/modules/service_role_config_group_info.py @@ -16,6 +16,9 @@ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( ClouderaManagerModule, +) + +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( parse_role_config_group_result, ) @@ -26,6 +29,7 @@ ) from cm_client.rest import ApiException + ANSIBLE_METADATA = { "metadata_version": "1.1", "status": ["preview"], diff --git a/plugins/modules/service_role_info.py b/plugins/modules/service_role_info.py index c0e1f63f..9581a8bb 100644 --- a/plugins/modules/service_role_info.py +++ b/plugins/modules/service_role_info.py @@ -18,12 +18,16 @@ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( ClouderaManagerModule, +) + +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( parse_role_result, ) from cm_client import ClustersResourceApi, RolesResourceApi, ServicesResourceApi from cm_client.rest import ApiException + ANSIBLE_METADATA = { "metadata_version": "1.1", "status": ["preview"], diff --git a/pyproject.toml b/pyproject.toml index a36945c5..716e06c4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,9 +51,12 @@ testpaths = [ filterwarnings = [ "ignore:AnsibleCollectionFinder has already been configured", "ignore:'crypt' is deprecated and slated for removal in Python 3.13:DeprecationWarning", + "ignore:URLs without a scheme:DeprecationWarning", + "ignore:HTTPResponse.getheaders():DeprecationWarning", ] markers = [ - "prepare: Prepare Cloudera Manager and resources for tests", + "service_config: Prepare service-wide configurations for tests", + "role_config: Prepare role override configurations for tests", ] [build-system] diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index f593ed44..62a6e4a2 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -22,11 +22,15 @@ ApiCluster, ApiCommand, ApiConfig, + ApiConfigList, + ApiRole, + ApiRoleList, ApiService, ApiServiceConfig, ApiServiceList, ClustersResourceApi, CommandsResourceApi, + MgmtRolesResourceApi, ServicesResourceApi, ) from cm_client.rest import ApiException @@ -179,3 +183,83 @@ def service_wide_config( message=f"{message}::reset", body=ApiServiceConfig(items=reconciled), ) + + +def provision_cm_role( + api_client: ApiClient, role_name: str, role_type: str, host_id: str +) -> Generator[ApiRole]: + api = MgmtRolesResourceApi(api_client) + + role = ApiRole( + name=role_name, + type=role_type, + host_ref=dict(hostId=host_id), + ) + + yield next(iter(api.create_roles(body=ApiRoleList(items=[role])).items), None) + + api.delete_role(role_name=role_name) + + +def cm_role_config( + api_client: ApiClient, role: ApiRole, params: dict, message: str +) -> Generator[ApiRole]: + """Update a role configuration for a given role. Yields the + role, resetting the configuration to its prior state. Use with + 'yield from' within a pytest fixture. + + Args: + api_client (ApiClient): _description_ + role (ApiRole): _description_ + params (dict): _description_ + message (str): _description_ + + Raises: + Exception: _description_ + + Yields: + ApiRole: _description_ + """ + role_api = MgmtRolesResourceApi(api_client) + + # Retrieve all of the pre-setup configurations + pre = role_api.read_role_config(role.name) + + # Set the test configurations + # Do so serially, since a failed update due to defaults (see ApiException) will cause remaining + # configuration entries to not run. Long-term solution is to check-and-set, which is + # what the Ansible modules do... + for k, v in params.items(): + try: + role_api.update_role_config( + role_name=role.name, + message=f"{message}::set", + body=ApiConfigList(items=[ApiConfig(name=k, value=v)]), + ) + except ApiException as ae: + if ae.status != 400 or "delete with template" not in str(ae.body): + raise Exception(str(ae)) + + # Yield the targeted role + yield role_api.read_role(role_name=role.name) + + # Retrieve all of the post-setup configurations + post = role_api.read_role_config(role_name=role.name) + + # Reconcile the configurations + pre_set = set([c.name for c in pre.items]) + + reconciled = pre.items.copy() + reconciled.extend( + [ + ApiConfig(name=k.name, value=None) + for k in post.items + if k.name not in pre_set + ] + ) + + role_api.update_role_config( + role_name=role.name, + message=f"{message}::reset", + body=ApiConfigList(items=reconciled), + ) diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 5fb502e1..41669cfe 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -26,6 +26,7 @@ import sys import yaml +from collections.abc import Generator from pathlib import Path from cm_client import ( @@ -40,6 +41,7 @@ ClustersResourceApi, Configuration, HostsResourceApi, + MgmtRolesResourceApi, MgmtServiceResourceApi, ParcelResourceApi, ParcelsResourceApi, @@ -250,7 +252,7 @@ def base_cluster(cm_api_client, request): @pytest.fixture(scope="session") -def cms(cm_api_client, request): +def cms(cm_api_client, request) -> Generator[ApiService]: """Provisions Cloudera Manager Service.""" api = MgmtServiceResourceApi(cm_api_client) @@ -275,7 +277,7 @@ def cms(cm_api_client, request): @pytest.fixture(scope="function") -def cms_service_config(cm_api_client, cms, request): +def cms_config(cm_api_client, cms, request) -> Generator[ApiService]: """Configures service-wide configurations for the Cloudera Manager Service""" marker = request.node.get_closest_marker("service_config") @@ -295,7 +297,7 @@ def cms_service_config(cm_api_client, cms, request): for k, v in marker.args[0].items(): try: api.update_service_config( - message=f"{request.node.name}::set", + message=f"{Path(request.node.parent.name).stem}::{request.node.name}::set", body=ApiServiceConfig(items=[ApiConfig(name=k, value=v)]), ) except ApiException as ae: @@ -321,6 +323,6 @@ def cms_service_config(cm_api_client, cms, request): ) api.update_service_config( - message=f"{request.node.name}::reset", + message=f"{Path(request.node.parent.name).stem}::{request.node.name}::reset", body=ApiServiceConfig(items=reconciled), ) diff --git a/tests/unit/plugins/modules/cm_service/test_cm_service.py b/tests/unit/plugins/modules/cm_service/test_cm_service.py index 5614fe61..af679312 100644 --- a/tests/unit/plugins/modules/cm_service/test_cm_service.py +++ b/tests/unit/plugins/modules/cm_service/test_cm_service.py @@ -17,7 +17,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -import os + import logging import pytest @@ -30,23 +30,144 @@ LOG = logging.getLogger(__name__) -def test_pytest_cm_service(module_args): +def test_minimal(conn, module_args, cms): + module_args(conn) + + with pytest.raises(AnsibleExitJson): + cm_service.main() + + +@pytest.mark.service_config(dict(log_event_retry_frequency=10)) +def test_set_parameters(conn, module_args, cms_service_config): + module_args( + { + **conn, + "parameters": dict(mgmt_emit_sensitive_data_in_stderr=True), + "message": "test_cm_service::test_set_parameters", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict( + mgmt_emit_sensitive_data_in_stderr="True", log_event_retry_frequency="10" + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert ( + expected.items() + <= {c["name"]: c["value"] for c in e.value.service["config"]}.items() + ) + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert ( + expected.items() + <= {c["name"]: c["value"] for c in e.value.service["config"]}.items() + ) + + +@pytest.mark.service_config( + dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) +) +def test_unset_parameters(conn, module_args, cms_service_config): + module_args( + { + **conn, + "parameters": dict(mgmt_emit_sensitive_data_in_stderr=None), + "message": "test_cm_service::test_unset_parameters", + } + ) + + expected = dict(log_event_retry_frequency="10") + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert ( + expected.items() + <= {c["name"]: c["value"] for c in e.value.service["config"]}.items() + ) + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert ( + expected.items() + <= {c["name"]: c["value"] for c in e.value.service["config"]}.items() + ) + + +@pytest.mark.service_config( + dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) +) +def test_set_parameters_with_purge(conn, module_args, cms_service_config): + module_args( + { + **conn, + "parameters": dict(mgmt_emit_sensitive_data_in_stderr=True), + "purge": True, + "message": "test_cm_service::test_set_parameters_with_purge", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_emit_sensitive_data_in_stderr="True") + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert ( + expected.items() + <= {c["name"]: c["value"] for c in e.value.service["config"]}.items() + ) + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert ( + expected.items() + <= {c["name"]: c["value"] for c in e.value.service["config"]}.items() + ) + + +@pytest.mark.service_config( + dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) +) +def test_purge_all_parameters(conn, module_args, cms_service_config): module_args( { - "username": os.getenv("CM_USERNAME"), - "password": os.getenv("CM_PASSWORD"), - "host": os.getenv("CM_HOST"), - "port": "7180", - "verify_tls": "no", - "debug": "yes", - "state": "started", - "role": ["SERVICEMONITOR", "HOSTMONITOR", "EVENTSERVER", "ALERTPUBLISHER"], + **conn, + "parameters": dict(), + "purge": True, + "message": "test_cm_service::test_purge_all_parameters", + # _ansible_check_mode=True, + # _ansible_diff=True, } ) - # with pytest.raises(AnsibleFailJson, match=r"boom") as e: with pytest.raises(AnsibleExitJson) as e: cm_service.main() - # LOG.info(str(e.value)) - LOG.info(str(e.value.cloudera_manager)) + assert e.value.changed == True + assert len(e.value.service["config"]) == 0 + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["config"]) == 0 diff --git a/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py b/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py index 9e208227..ad54716b 100644 --- a/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py +++ b/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py @@ -38,10 +38,12 @@ def test_missing_required(conn, module_args): def test_present_invalid_parameter(conn, module_args): - conn.update( - parameters=dict(example="Example"), + module_args( + { + **conn, + "parameters": dict(example="Example"), + } ) - module_args(conn) with pytest.raises( AnsibleFailJson, match="Unknown configuration attribute 'example'" diff --git a/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py b/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py new file mode 100644 index 00000000..d230005a --- /dev/null +++ b/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py @@ -0,0 +1,365 @@ +# -*- coding: utf-8 -*- + +# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import pytest + +from collections.abc import Generator +from pathlib import Path + +from cm_client import ( + ApiRole, + ClustersResourceApi, + MgmtRolesResourceApi, +) + +from ansible_collections.cloudera.cluster.plugins.modules import cm_service_role_config +from ansible_collections.cloudera.cluster.tests.unit import ( + AnsibleExitJson, + AnsibleFailJson, + provision_cm_role, + cm_role_config, +) + +LOG = logging.getLogger(__name__) + + +@pytest.fixture(scope="module") +def host_monitor(cm_api_client, cms, request) -> Generator[ApiRole]: + api = MgmtRolesResourceApi(cm_api_client) + + hm = next( + iter([r for r in api.read_roles().items if r.type == "HOSTMONITOR"]), None + ) + + if hm is not None: + yield hm + else: + cluster_api = ClustersResourceApi(cm_api_client) + + # Get first host of the cluster + hosts = cluster_api.list_hosts(cluster_name=cms.cluster_ref.cluster_name) + + if not hosts.items: + raise Exception( + "No available hosts to assign the Cloudera Manager Service role." + ) + else: + name = Path(request.fixturename).stem + yield from provision_cm_role( + cm_api_client, name, "HOSTMONITOR", hosts.items[0].hostId + ) + + +@pytest.fixture(scope="function") +def host_monitor_config(cm_api_client, host_monitor, request) -> Generator[ApiRole]: + marker = request.node.get_closest_marker("role_config") + + if marker is None: + raise Exception("No role_config marker found.") + + yield from cm_role_config( + api_client=cm_api_client, + role=host_monitor, + params=marker.args[0], + message=f"{Path(request.node.parent.name).stem}::{request.node.name}", + ) + + +def test_missing_required(conn, module_args): + module_args(conn) + + with pytest.raises(AnsibleFailJson, match="parameters"): + cm_service_role_config.main() + + +def test_missing_required_if(conn, module_args): + module_args( + { + **conn, + "parameters": dict(), + } + ) + + with pytest.raises(AnsibleFailJson, match="name, type"): + cm_service_role_config.main() + + +def test_present_invalid_parameter(conn, module_args, host_monitor): + module_args( + { + **conn, + "role": host_monitor.name, + "parameters": dict(example="Example"), + } + ) + + with pytest.raises( + AnsibleFailJson, match="Unknown configuration attribute 'example'" + ): + cm_service_role_config.main() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "role": host_monitor_config.name, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters_role_type(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "role_type": host_monitor_config.type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_unset_parameters(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "role": host_monitor_config.name, + "parameters": dict(mgmt_num_descriptor_fetch_tries=None), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + expected = dict(process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_unset_parameters_role_type(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "type": host_monitor_config.type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=None), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + expected = dict(process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters_with_purge(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "role": host_monitor_config.name, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters_with_purge_role_type( + conn, module_args, host_monitor_config, request +): + module_args( + { + **conn, + "role_type": host_monitor_config.type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_purge_all_parameters(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "role": host_monitor_config.name, + "parameters": dict(), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert len(e.value.config) == 0 + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert len(e.value.config) == 0 + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_purge_all_parameters_role_type( + conn, module_args, host_monitor_config, request +): + module_args( + { + **conn, + "type": host_monitor_config.type, + "parameters": dict(), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert len(e.value.config) == 0 + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert len(e.value.config) == 0