diff --git a/meta/runtime.yml b/meta/runtime.yml index 51339f2d..a816aba1 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -1,6 +1,6 @@ ---- +# -*- coding: utf-8 -*- -# Copyright 2023 Cloudera, Inc. All Rights Reserved. +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,6 +26,10 @@ action_groups: - cm_resource - cm_service_info - cm_service + - cm_service_role_config_group_info + - cm_service_role_config_group + - cm_service_role_info + - cm_service_role - cm_trial_license - cm_version_info - cm_endpoint_info diff --git a/plugins/module_utils/cluster_utils.py b/plugins/module_utils/cluster_utils.py index 203d4230..bf3cae1f 100644 --- a/plugins/module_utils/cluster_utils.py +++ b/plugins/module_utils/cluster_utils.py @@ -17,7 +17,7 @@ """ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - _parse_output, + normalize_output, ) from cm_client import ApiCluster @@ -42,5 +42,5 @@ def parse_cluster_result(cluster: ApiCluster) -> dict: # Retrieve full_version as version output = dict(version=cluster.full_version) - output.update(_parse_output(cluster.to_dict(), CLUSTER_OUTPUT)) + output.update(normalize_output(cluster.to_dict(), CLUSTER_OUTPUT)) return output diff --git a/plugins/module_utils/cm_utils.py b/plugins/module_utils/cm_utils.py index a1667986..fad3f7e7 100644 --- a/plugins/module_utils/cm_utils.py +++ b/plugins/module_utils/cm_utils.py @@ -34,9 +34,8 @@ from cm_client import ( ApiClient, ApiCommand, + ApiConfig, ApiConfigList, - ApiRole, - ApiRoleConfigGroup, Configuration, ) from cm_client.rest import ApiException, RESTClientObject @@ -47,34 +46,8 @@ __credits__ = ["frisch@cloudera.com"] __maintainer__ = ["wmudge@cloudera.com"] -ROLE_OUTPUT = [ - "commission_state", - "config_staleness_status", - "ha_status", - "health_checks", - "health_summary", - # "host_ref", - "maintenance_mode", - "maintenance_owners", - "name", - # "role_config_group_ref", - "role_state", - # "service_ref", - "tags", - "type", - "zoo_keeper_server_mode", -] - -ROLE_CONFIG_GROUP = [ - "name", - "role_type", - "base", - "display_name", - # "service_ref", -] - - -def _parse_output(entity: dict, filter: list) -> dict: + +def normalize_output(entity: dict, filter: list) -> dict: output = {} for k in filter: if k == "tags": @@ -85,24 +58,6 @@ def _parse_output(entity: dict, filter: list) -> dict: return output -def parse_role_result(role: ApiRole) -> dict: - # Retrieve only the host_id, role_config_group, and service identifiers - output = dict( - host_id=role.host_ref.host_id, - role_config_group_name=role.role_config_group_ref.role_config_group_name, - service_name=role.service_ref.service_name, - ) - output.update(_parse_output(role.to_dict(), ROLE_OUTPUT)) - return output - - -def parse_role_config_group_result(role_config_group: ApiRoleConfigGroup) -> dict: - # Retrieve only the service identifier - output = dict(service_name=role_config_group.service_ref.service_name) - output.update(_parse_output(role_config_group.to_dict(), ROLE_CONFIG_GROUP)) - return output - - def normalize_values(add: dict) -> dict: """Normalize parameter values. Strings have whitespace trimmed, integers are converted to strings, and Boolean values are converted their string representation @@ -149,6 +104,11 @@ def resolve_parameter_updates( diff = recursive_diff(current, normalize_values(incoming)) if diff is not None: + # TODO Lookup default for v=None to avoid issues with CM + # CM sometimes fails to find the default value for a parameter + # However, a view=full will return the default, so if we can + # change this method's signature to include that reference, we + # can short-circuit CM's problematic lookup of the default value. updates = { k: v for k, v in diff[1].items() @@ -191,6 +151,25 @@ def resolve_tag_updates( return (delta_add, delta_del) +class ConfigListUpdates(object): + def __init__(self, existing: ApiConfigList, updates: dict, purge: bool) -> None: + current = {r.name: r.value for r in existing.items} + changeset = resolve_parameter_updates(current, updates, purge) + + self.diff = dict( + before={k: current[k] if k in current else None for k in changeset.keys()}, + after=changeset, + ) + + self.config = ApiConfigList( + items=[ApiConfig(name=k, value=v) for k, v in changeset.items()] + ) + + @property + def changed(self) -> bool: + return bool(self.config.items) + + class ClusterTemplate(object): IDEMPOTENT_IDS = frozenset( ["refName", "name", "clusterName", "hostName", "product"] diff --git a/plugins/module_utils/data_context_utils.py b/plugins/module_utils/data_context_utils.py index 4b3f54f7..be4f7c57 100644 --- a/plugins/module_utils/data_context_utils.py +++ b/plugins/module_utils/data_context_utils.py @@ -17,7 +17,7 @@ """ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - _parse_output, + normalize_output, ) from cm_client import ApiDataContextList @@ -38,9 +38,9 @@ ] -def _parse_output(data: dict, keys: list) -> dict: +def normalize_output(data: dict, keys: list) -> dict: return {key: data[key] for key in keys if key in data} def parse_data_context_result(data_contexts: ApiDataContextList) -> list: - return [_parse_output(item, DATA_CONTEXT_OUTPUT) for item in data_contexts.items] + return [normalize_output(item, DATA_CONTEXT_OUTPUT) for item in data_contexts.items] diff --git a/plugins/module_utils/host_utils.py b/plugins/module_utils/host_utils.py new file mode 100644 index 00000000..645cf466 --- /dev/null +++ b/plugins/module_utils/host_utils.py @@ -0,0 +1,57 @@ +# Copyright 2024 Cloudera, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +A common functions for Cloudera Manager hosts +""" + +from cm_client import ( + ApiClient, + ApiHost, + ApiHostRef, + HostsResourceApi, +) +from cm_client.rest import ApiException + + +def get_host( + api_client: ApiClient, hostname: str = None, host_id: str = None +) -> ApiHost: + if hostname: + return next( + ( + h + for h in HostsResourceApi(api_client).read_hosts().items + if h.hostname == hostname + ), + None, + ) + else: + try: + return HostsResourceApi(api_client).read_host(host_id) + except ApiException as ex: + if ex.status != 404: + raise ex + else: + return None + + +def get_host_ref( + api_client: ApiClient, hostname: str = None, host_id: str = None +) -> ApiHostRef: + host = get_host(api_client, hostname, host_id) + if host is not None: + return ApiHostRef(host.host_id, host.hostname) + else: + return None diff --git a/plugins/module_utils/parcel_utils.py b/plugins/module_utils/parcel_utils.py index 88d13793..38a50c5a 100644 --- a/plugins/module_utils/parcel_utils.py +++ b/plugins/module_utils/parcel_utils.py @@ -23,7 +23,7 @@ from cm_client import ApiParcel, ParcelResourceApi from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - _parse_output, + normalize_output, ) @@ -138,5 +138,5 @@ def activate(self): def parse_parcel_result(parcel: ApiParcel) -> dict: # Retrieve only the cluster identifier output = dict(cluster_name=parcel.cluster_ref.cluster_name) - output.update(_parse_output(parcel.to_dict(), PARCEL)) + output.update(normalize_output(parcel.to_dict(), PARCEL)) return output diff --git a/plugins/module_utils/role_config_group_utils.py b/plugins/module_utils/role_config_group_utils.py new file mode 100644 index 00000000..99471f7f --- /dev/null +++ b/plugins/module_utils/role_config_group_utils.py @@ -0,0 +1,89 @@ +# Copyright 2025 Cloudera, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + normalize_output, +) + +from cm_client import ( + ApiClient, + ApiRoleConfigGroup, + RoleConfigGroupsResourceApi, + MgmtRoleConfigGroupsResourceApi, +) + +ROLE_CONFIG_GROUP = [ + "name", + "role_type", + "base", + "display_name", + # "service_ref", +] + + +def parse_role_config_group_result(role_config_group: ApiRoleConfigGroup) -> dict: + """Parse a Role Config Group into a normalized dictionary. + + Returns the following: + - name (str) + - role_type (str) + - base (bool) + - display_name (str) + - config (dict) + + Args: + role_config_group (ApiRoleConfigGroup): Role Config Group + + Returns: + dict: Normalized dictionary of returned values + """ + # Retrieve only the service identifier + output = dict(service_name=role_config_group.service_ref.service_name) + output.update(normalize_output(role_config_group.to_dict(), ROLE_CONFIG_GROUP)) + output.update(config={c.name: c.value for c in role_config_group.config.items}) + return output + + +def get_base_role_config_group( + api_client: ApiClient, cluster_name: str, service_name: str, role_type: str +) -> ApiRoleConfigGroup: + rcg_api = RoleConfigGroupsResourceApi(api_client) + return next( + iter( + [ + r + for r in rcg_api.read_role_config_groups( + cluster_name, service_name + ).items + if r.role_type == role_type and r.base + ] + ), + None, + ) + + +def get_mgmt_base_role_config_group( + api_client: ApiClient, role_type: str +) -> ApiRoleConfigGroup: + rcg_api = MgmtRoleConfigGroupsResourceApi(api_client) + return next( + iter( + [ + r + for r in rcg_api.read_role_config_groups().items + if r.role_type == role_type and r.base + ] + ), + None, + ) diff --git a/plugins/module_utils/role_utils.py b/plugins/module_utils/role_utils.py new file mode 100644 index 00000000..7746bda4 --- /dev/null +++ b/plugins/module_utils/role_utils.py @@ -0,0 +1,195 @@ +# Copyright 2024 Cloudera, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + normalize_output, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.host_utils import ( + get_host_ref, +) + +from cm_client import ( + ApiClient, + ApiConfig, + ApiConfigList, + ApiRoleList, + ApiRoleConfigGroupRef, + RoleConfigGroupsResourceApi, + RolesResourceApi, + MgmtRolesResourceApi, +) +from cm_client import ApiRole + +ROLE_OUTPUT = [ + "commission_state", + "config_staleness_status", + "ha_status", + "health_checks", + "health_summary", + # "host_ref", + "maintenance_mode", + "maintenance_owners", + "name", + # "role_config_group_ref", + "role_state", + # "service_ref", + "tags", + "type", + "zoo_keeper_server_mode", +] + + +def parse_role_result(role: ApiRole) -> dict: + # Retrieve only the host_id, role_config_group, and service identifiers + output = dict( + host_id=role.host_ref.host_id, + role_config_group_name=role.role_config_group_ref.role_config_group_name, + service_name=role.service_ref.service_name, + ) + output.update(normalize_output(role.to_dict(), ROLE_OUTPUT)) + output.update(config={c.name: c.value for c in role.config.items}) + return output + + +def get_mgmt_roles(api_client: ApiClient, role_type: str) -> ApiRoleList: + role_api = MgmtRolesResourceApi(api_client) + return ApiRoleList( + items=[r for r in role_api.read_roles().items if r.type == role_type] + ) + + +def read_role( + api_client: ApiClient, cluster_name: str, service_name: str, name: str +) -> ApiRole: + role_api = RolesResourceApi(api_client) + role = role_api.read_role( + cluster_name=cluster_name, service_name=service_name, role_name=name + ) + if role is not None: + role.config = role_api.read_role_config( + cluster_name=cluster_name, service_name=service_name, role_name=role.name + ) + return role + + +def read_roles( + api_client: ApiClient, cluster_name: str, service_name: str +) -> ApiRoleList: + role_api = RolesResourceApi(api_client) + roles = role_api.read_roles(cluster_name, service_name).items + for r in roles: + r.config = role_api.read_role_config( + api_client=api_client, + cluster_name=cluster_name, + service_name=service_name, + role_name=r.name, + ) + return ApiRoleList(items=roles) + + +def read_roles_by_type( + api_client: ApiClient, cluster_name: str, service_name: str, role_type: str +) -> ApiRoleList: + role_api = RolesResourceApi(api_client) + roles = [ + r + for r in role_api.read_roles(cluster_name, service_name).items + if r.type == role_type + ] + for r in roles: + r.config = role_api.read_role_config( + api_client=api_client, + cluster_name=cluster_name, + service_name=service_name, + role_name=r.name, + ) + return ApiRoleList(items=roles) + + +def read_cm_role(api_client: ApiClient, role_type: str) -> ApiRole: + role_api = MgmtRolesResourceApi(api_client) + role = next( + iter([r for r in role_api.read_roles().items if r.type == role_type]), + None, + ) + if role is not None: + role.config = role_api.read_role_config(role.name) + return role + + +def read_cm_roles(api_client: ApiClient) -> ApiRoleList: + role_api = MgmtRolesResourceApi(api_client) + roles = role_api.read_roles().items + for r in roles: + r.config = role_api.read_role_config(role_name=r.name) + return ApiRoleList(items=roles) + + +class HostNotFoundException(Exception): + pass + + +class RoleConfigGroupNotFoundException(Exception): + pass + + +def create_role( + api_client: ApiClient, + role_type: str, + hostname: str, + host_id: str, + name: str = None, + config: dict = None, + cluster_name: str = None, + service_name: str = None, + role_config_group: str = None, +) -> ApiRole: + # Set up the role + role = ApiRole(type=str(role_type).upper()) + + # Name + if name: + role.name = name # No name allows auto-generation + + # Host assignment + host_ref = get_host_ref(api_client, hostname, host_id) + if host_ref is None: + raise HostNotFoundException( + f"Host not found: hostname='{hostname}', host_id='{host_id}'" + ) + else: + role.host_ref = host_ref + + # Role config group + if role_config_group: + rcg_api = RoleConfigGroupsResourceApi(api_client) + rcg = rcg_api.read_role_config_group( + cluster_name=cluster_name, + service_name=service_name, + role_config_group_name=role_config_group, + ) + if rcg is None: + raise RoleConfigGroupNotFoundException( + f"Role config group not found: {role_config_group}" + ) + else: + role.role_config_group_ref = ApiRoleConfigGroupRef(rcg.name) + + # Role override configurations + if config: + role.config = ApiConfigList( + items=[ApiConfig(name=k, value=v) for k, v in config.items()] + ) + + return role diff --git a/plugins/module_utils/service_utils.py b/plugins/module_utils/service_utils.py index c11a2d79..53e405d7 100644 --- a/plugins/module_utils/service_utils.py +++ b/plugins/module_utils/service_utils.py @@ -17,14 +17,27 @@ """ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - _parse_output, + normalize_output, resolve_parameter_updates, ) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + parse_role_config_group_result, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + parse_role_result, +) from cm_client import ( + ApiClient, ApiConfig, ApiService, ApiServiceConfig, + MgmtServiceResourceApi, + MgmtRoleConfigGroupsResourceApi, + MgmtRolesResourceApi, + RoleConfigGroupsResourceApi, + RolesResourceApi, + ServicesResourceApi, ) SERVICE_OUTPUT = [ @@ -45,20 +58,126 @@ def parse_service_result(service: ApiService) -> dict: - # Retrieve only the cluster_name - output = dict(cluster_name=service.cluster_ref.cluster_name) - output.update(_parse_output(service.to_dict(), SERVICE_OUTPUT)) + # Retrieve only the cluster_name if it exists + if service.cluster_ref is not None: + output = dict(cluster_name=service.cluster_ref.cluster_name) + else: + output = dict(cluster_name=None) + + # Parse the service itself + output.update(normalize_output(service.to_dict(), SERVICE_OUTPUT)) + + # Parse the service-wide configurations + if service.config is not None: + output.update(config={c.name: c.value for c in service.config.items}) + + # Parse the role config groups via util function + if service.role_config_groups is not None: + output.update( + role_config_groups=[ + parse_role_config_group_result(rcg) + for rcg in service.role_config_groups + ] + ) + + # Parse the roles via util function + if service.roles is not None: + output.update(roles=[parse_role_result(r) for r in service.roles]) + return output +def read_service( + api_client: ApiClient, cluster_name: str, service_name: str +) -> ApiService: + """Read a cluster service and its role config group and role dependents. + + Args: + api_client (ApiClient): _description_ + cluster_name (str): _description_ + service_name (str): _description_ + + Returns: + ApiService: _description_ + """ + service_api = ServicesResourceApi(api_client) + rcg_api = RoleConfigGroupsResourceApi(api_client) + role_api = RolesResourceApi(api_client) + + service = service_api.read_service( + cluster_name=cluster_name, service_name=service_name + ) + + if service is not None: + # Gather the service-wide configuration + service.config = service_api.read_service_config( + cluster_name=cluster_name, service_name=service_name + ) + + # Gather each role config group configuration + for rcg in service.role_config_groups: + rcg.config = rcg_api.read_config( + cluster_name=cluster_name, + service_name=service_name, + role_config_group_name=rcg.name, + ) + + # Gather each role configuration + for role in service.roles: + role.config = role_api.read_role_config( + cluster_name=cluster_name, + service_name=service_name, + role_name=role.name, + ) + + return service + + +def read_cm_service(api_client: ApiClient) -> ApiService: + """Read the Cloudera Manager service and its role config group and role dependents. + + Args: + api_client (ApiClient): _description_ + + Returns: + ApiService: _description_ + """ + service_api = MgmtServiceResourceApi(api_client) + rcg_api = MgmtRoleConfigGroupsResourceApi(api_client) + role_api = MgmtRolesResourceApi(api_client) + + service = service_api.read_service() + + if service is not None: + # Gather the service-wide configuration + service.config = service_api.read_service_config() + + # Gather each role config group configuration + service.role_config_groups = [ + rcg for rcg in rcg_api.read_role_config_groups().items if rcg.config.items + ] + + # Gather each role configuration + service.roles = role_api.read_roles().items + for role in service.roles: + role.config = role_api.read_role_config(role_name=role.name) + + return service + + class ServiceConfigUpdates(object): def __init__(self, existing: ApiServiceConfig, updates: dict, purge: bool) -> None: current = {r.name: r.value for r in existing.items} changeset = resolve_parameter_updates(current, updates, purge) + self.before = { + k: current[k] if k in current else None for k in changeset.keys() + } + self.after = changeset + self.diff = dict( - before={k: current[k] if k in current else None for k in changeset.keys()}, - after=changeset, + before=self.before, + after=self.after, ) self.config = ApiServiceConfig( diff --git a/plugins/modules/cm_service.py b/plugins/modules/cm_service.py index 921981cf..1ae5da6f 100644 --- a/plugins/modules/cm_service.py +++ b/plugins/modules/cm_service.py @@ -1,4 +1,7 @@ -# Copyright 2024 Cloudera, Inc. All Rights Reserved. +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,430 +15,1037 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - ClouderaManagerModule, -) - -from cm_client.rest import ApiException -from cm_client import MgmtRolesResourceApi -from cm_client import MgmtServiceResourceApi -from cm_client import MgmtRoleCommandsResourceApi -from cm_client import HostsResourceApi - -ANSIBLE_METADATA = { - "metadata_version": "1.1", - "status": ["preview"], - "supported_by": "community", -} - DOCUMENTATION = r""" ---- module: cm_service -short_description: Manage Cloudera Manager service roles +short_description: Manage Cloudera Manager service description: - - Create or remove one or more Cloudera Manager service roles. - - Start, stop or restart one or more Cloudera Manager service roles. + - Manage the Cloudera Manager service (CMS), its role config groups and roles, and its operations. author: - - "Ronald Suplina (@rsuplina)" + - Ronald Suplina (@rsuplina) + - Webster Mudge (@wmudge) options: - role: + config: description: - - A list of one or more service roles to be configured. + - The service-wide configuration to set. + - To unset a parameter, use V(None) as the value. + type: dict + aliases: + - params + - parameters + role_config_groups: + description: + - A list of one or more role config groups to manage. + - Each role config group is the I(base) for the O(type). type: list - elements: str - required: True + elements: dict + suboptions: + type: + description: + - The role type defining the role config group. + required: yes + aliases: + - role_type + config: + description: + - The configuration for the role config group. + - To unset a configuration, use V(None) as the value. + - This configuration is applied to role instances. + - To override these configuration values, use role overrides. + type: dict + required: yes + aliases: + - params + - parameters + roles: + description: + - A list of one or more role instances to manage. + - Each role instance is the application and configuration of a role type to a host. + type: list + elements: dict + suboptions: + cluster_hostname: + description: + - The hostname of an instance for the role. + - If the hostname is different than that of the existing instance for the O(type), the role will be destroyed and rebuilt on the declared host. + - Mutually exclusive with O(cluster_host_id). + type: str + aliases: + - cluster_host + cluster_host_id: + description: + - The host ID of the instance for the role. + - If the host ID is different than that of the existing instance for the O(type), the role will be destroyed and rebuilt on the declared host. + - Mutually exclusive with O(cluster_hostname). + type: str + config: + description: + - The configuration for the role overrides. + - To unset a configuration, use V(None) as the value. + - This configuration is applied to role, overriding any role config group or default values. + type: dict + aliases: + - params + - parameters + type: + description: + - The role type of the role to manage on the instance. + type: str + required: yes + aliases: + - role_type + maintenance: + description: + - Flag for whether the service should be in maintenance mode. + type: bool + aliases: + - maintenance_mode purge: description: - - Delete all current roles and setup only the roles provided + - Flag for whether the declared service-wide configurations, role config groups, and roles should update existing configuration or reset to match the declared state only. + - To clear configurations - service-wide, role config groups, and roles - set O(config={}), i.e. an empty dictionary, or omit entirely, and set O(purge=True). + - To clear role config groups and roles, set O(role_config_groups=[]) or O(roles=[]), i.e. an empty list, or omit entirely, and set O(purge=True). type: bool - required: False - default: False + required: no + default: no state: description: - - The desired state of roles + - The operating state of the service. + - The V(restarted) value will always restart the service and set RV(changed=True). type: str - default: 'started' + default: started choices: - - 'started' - - 'stopped' - - 'absent' - - 'present' - - 'restarted' - required: False - + - started + - stopped + - absent + - present + - restarted + required: no +extends_documentation_fragment: + - cloudera.cluster.cm_options + - cloudera.cluster.cm_endpoint + - cloudera.cluster.message +attributes: + check_mode: + support: full + diff_mode: + support: full + platform: + platforms: all requirements: - - cm_client + - cm-client +seealso: + - module: cloudera.cluster.cm_service_role + - module: cloudera.cluster.cm_service_role_config_group """ EXAMPLES = r""" ---- -- name: Start Cloudera Manager service roles - cloudera.cluster.cm_version: - host: "10.10.10.10" +- name: Define and start the Cloudera Manager service and its roles + cloudera.cluster.cm_service: + host: "cm.example.com" + username: "jane_smith" + password: "S&peR4Ec*re" + state: started + roles: + - type: SERVICEMONITOR + cluster_hostname: "services01.example.com" + - type: HOSTMONITOR + cluster_hostname: "services02.example.com" + - type: EVENTSERVER + cluster_hostname: "services02.example.com" + - type: ALERTPUBLISHER + cluster_hostname: "services01.example.com" + +- name: Set the service-wide configuration for Cloudera Manager service + cloudera.cluster.cm_service: + host: "cm.example.com" + username: "jane_smith" + password: "S&peR4Ec*re" + config: + mgmt_pause_duration_window: 10 + ldap_monitoring_enabled: no + +- name: Unset a service-wide configuration for Cloudera Manager service + cloudera.cluster.cm_service: + host: "cm.example.com" + username: "jane_smith" + password: "S&peR4Ec*re" + config: + ldap_monitoring_enabled: None + +- name: Set the role config group for the Host Monitor role + cloudera.cluster.cm_service: + host: "cm.example.com" + username: "jane_smith" + password: "S&peR4Ec*re" + role_config_groups: + - type: HOSTMONITOR + config: + mgmt_num_descriptor_fetch_tries: 25 + process_start_secs: 30 + +- name: Unset a configuration in the role config group for the Host Monitor role + cloudera.cluster.cm_service: + host: "cm.example.com" username: "jane_smith" password: "S&peR4Ec*re" - port: "7180" - purge: False - state: "started" - role: [ "SERVICEMONITOR" , "HOSTMONITOR", "EVENTSERVER", "ALERTPUBLISHER" ] - register: cm_output - -- name: Purge all roles then create and start new roles - cloudera.cluster.cm_version: - host: "10.10.10.10" + role_config_groups: + - type: HOSTMONITOR + config: + process_start_secs: None + +- name: Set the role overrides for the Host Monitor role instance + cloudera.cluster.cm_service: + host: "cm.example.com" username: "jane_smith" password: "S&peR4Ec*re" - port: "7180" - purge: True - state: "started" - role: [ "SERVICEMONITOR" , "HOSTMONITOR", "EVENTSERVER", "ALERTPUBLISHER" ] - register: cm_output - -- name: Stop two Cloudera Manager service roles - cloudera.cluster.cm_version: - host: "10.10.10.10" + roles: + - type: HOSTMONITOR + cluster_hostname: "services02.example.com" + config: + mgmt_num_descriptor_fetch_tries: 30 + process_start_secs: 45 + +- name: Unset a role override for the Host Monitor role instance + cloudera.cluster.cm_service: + host: "cm.example.com" + username: "jane_smith" + password: "S&peR4Ec*re" + roles: + - type: HOSTMONITOR + cluster_hostname: "services02.example.com" + config: + process_start_secs: None + +- name: Update the service state to only the declared configuration + cloudera.cluster.cm_service + host: "cm.example.com" + username: "jane_smith" + password: "S&peR4Ec*re" + state: started + purge: yes + config: + mgmt_pause_duration_window: 10 + role_config_groups: + - type: HOSTMONITOR + config: + mgmt_num_descriptor_fetch_tries: 25 + process_start_secs: 30 + roles: + - type: SERVICEMONITOR + cluster_hostname: "services01.example.com" + - type: HOSTMONITOR + cluster_hostname: "services02.example.com" + config: + mgmt_num_descriptor_fetch_tries: 30 + - type: EVENTSERVER + cluster_hostname: "services02.example.com" + - type: ALERTPUBLISHER + cluster_hostname: "services01.example.com" + +- name: Stop the Cloudera Manager service + cloudera.cluster.cm_service + host: "cm.example.com" username: "jane_smith" password: "S&peR4Ec*re" - port: "7180" state: "stopped" - role: [ "EVENTSERVER", "ALERTPUBLISHER" ] - register: cm_output -- name: Remove Cloudera Manager service role - cloudera.cluster.cm_version: - host: "10.10.10.10" +- name: Remove the Cloudera Manager service and its roles and role config groups + cloudera.cluster.cm_service + host: "cm.example.com" username: "jane_smith" password: "S&peR4Ec*re" - port: "7180" - purge: False state: "absent" - role: [ "ALERTPUBLISHER" ] - register: cm_output """ RETURN = r""" ---- -cloudera_manager: - description: List of Cloudera Manager roles - type: dict - contains: +service: + description: The Cloudera Manager service. + type: dict + contains: + client_config_staleness_status: + description: Status of client configuration for the Cloudera Manager service. + type: str + returned: optional + cluster_name: + description: The associated cluster name. + type: str + returned: optional + config: + description: Service-wide configuration for the Cloudera Manager service. + type: dict + returned: optional + config_staleness_status: + description: Status of configuration staleness for the Cloudera Manager service. + type: str + returned: optional + sample: + - FRESH + - STALE_REFRESHABLE + - STALE + display_name: + description: Display name of the Cloudera Manager service. + type: str + returned: always + health_checks: + description: Lists all available health checks for the Cloudera Manager service. + type: list + elements: dict + returned: optional + contains: + explanation: + description: A descriptor for the health check. + type: str + returned: optional name: - description: The Cloudera Manager role name. - type: str - returned: optional - type: - description: The Cloudera Manager role type. - type: str - returned: optional - serviceRef: - description: Reference to a service. - type: str - returned: optional - service_url: - description: Role url for Cloudera Manager Role. - type: str - returned: optional - hostRef: - description: Reference to a host. - type: str - returned: optional - role_state: - description: State of the Cloudera Manager Role. - type: str - returned: optional - commissionState: - description: Commission state of the role. - type: str - returned: optional - health_summary: - description: Health of the Cloudera Manager Role. - type: str - returned: optional - roleConfigGroupRef: - description: Reference to role config groups. - type: str - returned: optional - configStalenessStatus: - description: Status of configuration staleness for Cloudera Manager Role. - type: str - returned: optional + description: Unique name fore the health check. + type: str + returned: always + summary: + description: The summary status of the health check. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + suppressed: + description: + - Whether the health check is suppressed. + - A suppressed health check is not considered when computing the overall health. + type: bool + returned: always + health_summary: + description: Health of the Cloudera Manager service. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + maintenance_mode: + description: Whether maintance mode is enabled for the Cloudera Manager service. + type: bool + returned: always + maintenance_owners: + description: List of objects that trigger the Cloudera Manager service to be in maintenance mode. + type: list + elements: str + returned: optional + sample: + - CLUSTER + - SERVICE + - ROLE + - HOST + - CONTROL_PLANE + name: + description: Name (identifier) of the Cloudera Manager service. + type: str + returned: always + role_config_groups: + description: List of role configuration groups for Cloudera Manager service. + type: list + elements: dict + returned: optional + contains: + base: + description: Whether the role config group is a base (default) group. + type: bool + returned: always + config: + description: Configuration for the role config group. + type: dict + returned: optional + display_name: + description: Display name for the role config group. + type: str + returned: always + name: + description: Name (identifier) of the role config group. + type: str + returned: always + role_type: + description: The type of roles in this group. + type: str + returned: always + service_name: + description: Name (identifier) of the associated service of the role config group. + type: str + returned: always + roles: + description: List of role instances for Cloudera Manager service. + type: list + elements: dict + returned: optional + contains: + commission_state: + description: Commission state of the Cloudera Manager service role. + type: str + returned: always + sample: + - COMMISSIONED + - DECOMMISSIONING + - DECOMMISSIONED + - UNKNOWN + - OFFLINING + - OFFLINED + config: + description: Role override configuration for the Cloudera Manager service. + type: dict + returned: optional + config_staleness_status: + description: Status of configuration staleness for the Cloudera Manager service role. + type: str + returned: always + sample: + - FRESH + - STALE_REFRESHABLE + - STALE + ha_status: + description: High-availability status for the Cloudera Manager service. + type: str + returned: optional + sample: + - ACTIVE + - STANDBY + - UNKNOWN health_checks: - description: Lists all available health checks for Cloudera Manager Service. - type: dict - returned: optional - role_instances_url: - description: Role instance url for Cloudera Manager Service. - type: str - returned: optional + description: List of all available health checks for Cloudera Manager service role. + type: list + elements: dict + returned: optional + contains: + explanation: + description: The explanation of this health check. + type: str + returned: optional + name: + description: Unique name of this health check. + type: str + returned: always + summary: + description: The high-level health status of the health check. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + suppressed: + description: + - Whether this health check is suppressed. + - A suppressed health check is not considered when computing the role's overall health. + type: bool + returned: optional + health_summary: + description: The high-level health status of the Cloudera Manager service role. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + host_id: + description: The unique ID of the cluster host. + type: str + returned: always maintenance_mode: - description: Maintance mode of Cloudera Manager Role. - type: bool - returned: optional + description: Whether the Cloudera Manager service role is in maintenance mode. + type: bool + returned: always maintenance_owners: - description: List of Maintance owners for Cloudera Manager Service. - type: list - returned: optional - entity_status: - description: Health status of entities for Cloudera Manager Role. - type: str - returned: optional + description: List of objects that trigger the Cloudera Manager service role to be in maintenance mode. + type: list + elements: str + returned: optional + sample: + - CLUSTER + - SERVICE + - ROLE + - HOST + - CONTROL_PLANE + name: + description: + - The Cloudera Manager service role name. + - Note, this is an auto-generated name and cannot be changed. + type: str + returned: always + role_config_group_name: + description: The name of the Cloudera Manager Service role config group, which uniquely identifies it in a Cloudera Manager installation. + type: str + returned: always + role_state: + description: State of the Cloudera Manager service role. + type: str + returned: always + sample: + - HISTORY_NOT_AVAILABLE + - UNKNOWN + - STARTING + - STARTED + - STOPPING + - STOPPED + - NA + service_name: + description: The name of the Cloudera Manager service, which uniquely identifies it in a deployment. + type: str + returned: always tags: - description: List of tags for Cloudera Manager Role. - type: list - returned: optional + description: Set of tags for the Cloudera Manager service role. + type: dict + returned: optional + type: + description: The Cloudera Manager service role type. + type: str + returned: always + sample: + - HOSTMONITOR + - ALERTPUBLISHER + - SERVICEMONITOR + - REPORTSMANAGER + - EVENTSERVER + zoo_keeper_server_mode: + description: + - The Zookeeper server mode for this Cloudera Manager service role. + - Note that for non-Zookeeper Server roles, this will be V(null). + type: str + returned: optional + service_state: + description: Run state of the Cloudera Manager service. + type: str + returned: always + sample: + - HISTORY_NOT_AVAILABLE + - UNKNOWN + - STARTING + - STARTED + - STOPPING + - STOPPED + - NA + service_version: + description: Version of Cloudera Manager service. + type: str + returned: always + tags: + description: List of tags for Cloudera Manager service. + type: list + returned: optional + type: + description: Type of the Cloudera Manager service, i.e. MGMT. + type: str + returned: always + sample: + - MGMT """ +from collections.abc import Callable -class ClouderaService(ClouderaManagerModule): +from cm_client import ( + ApiBulkCommandList, + ApiCommand, + ApiConfigList, + ApiRoleList, + ApiRoleConfigGroup, + ApiService, + ApiServiceState, + MgmtRolesResourceApi, + MgmtRoleConfigGroupsResourceApi, + MgmtServiceResourceApi, +) +from cm_client.rest import ApiException + +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerMutableModule, + ConfigListUpdates, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( + ServiceConfigUpdates, + parse_service_result, + read_cm_service, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + get_mgmt_base_role_config_group, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + create_role, +) + + +class ClouderaManagerService(ClouderaManagerMutableModule): def __init__(self, module): - super(ClouderaService, self).__init__(module) + super(ClouderaManagerService, self).__init__(module) - self.role = self.get_param("role") + # Set the parameters + self.maintenance = self.get_param("maintenance") + self.config = self.get_param("config") + self.role_config_groups = self.get_param("role_config_groups") + self.roles = self.get_param("roles") self.state = self.get_param("state") self.purge = self.get_param("purge") + + # Initialize the return value + self.changed = False + self.output = dict() + + if self.module._diff: + self.diff = dict(before=dict(), after=dict()) + self.before = dict() + self.after = dict() + else: + self.diff = dict() + + # Execute the logic self.process() - @ClouderaManagerModule.handle_process + @ClouderaManagerMutableModule.handle_process def process(self): + + service_api = MgmtServiceResourceApi(self.api_client) + role_api = MgmtRolesResourceApi(self.api_client) + rcg_api = MgmtRoleConfigGroupsResourceApi(self.api_client) + + current = None + + # Discover the CM service and retrieve its configured dependents try: - api_instance = MgmtServiceResourceApi(self.api_client) - role_api_instance = MgmtRolesResourceApi(self.api_client) - role_cmd_api_instance = MgmtRoleCommandsResourceApi(self.api_client) - mgmt_service_api_instance = MgmtServiceResourceApi(self.api_client) - host_api_instance = HostsResourceApi(self.api_client) - - get_host_infomation = host_api_instance.read_hosts().to_dict() - for item in get_host_infomation["items"]: - if self.host == item["hostname"]: - host_id = item["host_id"] - - if not self.purge: - available_roles_info = role_api_instance.read_roles().to_dict() - existing_roles = [] - for item in available_roles_info["items"]: - existing_roles.append(item["type"]) - - if self.state in ["present"]: - not_existing_roles = [] - for role in self.role: - if role not in existing_roles: - not_existing_roles.append(role) - if not_existing_roles: - body = { - "items": [ - {"type": role, "hostRef": {"hostId": host_id}} - for role in not_existing_roles - ] - } - role_api_instance.create_roles(body=body) - self.cm_service_output = role_api_instance.read_roles().to_dict() - self.changed = True + current = read_cm_service(self.api_client) + except ApiException as ex: + if ex.status != 404: + raise ex - elif self.state in ["absent"]: - roles_to_remove = [ - role for role in self.role if role in existing_roles - ] - roles_to_remove_extended_info = [] - for role in roles_to_remove: - for item in available_roles_info["items"]: - if role == item["type"]: - roles_to_remove_extended_info.append(item["name"]) - if not roles_to_remove_extended_info: - self.cm_service_output = ( - role_api_instance.read_roles().to_dict() - ) - self.changed = False + # If deleting, do so and exit + if self.state == "absent": + if current: + self.changed = True + + if self.module._diff: + self.before = parse_service_result(current) + + if not self.module.check_mode: + service_api.delete_cms() + + # Otherwise, manage the configurations of the service, its role config + # groups, its roles, and its state + elif self.state in ["present", "restarted", "started", "stopped"]: + # If it is a new service, create the initial service + if not current: + self.changed = True + new_service = ApiService(type="MGMT") + current = service_api.setup_cms(body=new_service) + current.config = service_api.read_service_config() + current.role_config_groups = [] + current.roles = [] + + # Handle maintenance mode + if ( + self.maintenance is not None + and self.maintenance != current.maintenance_mode + ): + self.changed = True + + if self.module._diff: + self.before.update(maintenance_mode=current.maintenance_mode) + self.after.update(maintenance_mode=self.maintenance_mode) + + if not self.module.check_mode: + if self.maintenance: + maintenance_cmd = service_api.enter_maintenance_mode() else: - for role in roles_to_remove_extended_info: - role_api_instance.delete_role(role_name=role) - self.cm_service_output = ( - role_api_instance.read_roles().to_dict() + maintenance_cmd = service_api.exit_maintenance_mode() + + if maintenance_cmd.success is False: + self.module.fail_json( + msg=f"Unable to set Maintenance mode to '{self.maintenance}': {maintenance_cmd.result_message}" + ) + + # Handle service-wide changes + if self.config or self.purge: + if self.config is None: + self.config = dict() + + updates = ServiceConfigUpdates(current.config, self.config, self.purge) + + if updates.changed: + self.changed = True + + if self.module._diff: + self.before.update(config=updates.before) + self.after.update(config=updates.after) + + if not self.module.check_mode: + service_api.update_service_config( + message=self.message, body=updates.config ) + + # Manage role config groups (base only) + if self.role_config_groups or self.purge: + # Get existing role config groups (ApiRoleConfigGroup) + current_rcgs_map = { + rcg.role_type: rcg for rcg in current.role_config_groups + } + + # Get the incoming role config groups (dict) + if self.role_config_groups is None: + incoming_rcgs_map = dict() + else: + incoming_rcgs_map = { + rcg["type"]: rcg for rcg in self.role_config_groups + } + + # Create sets of each role config group by type + current_set = set(current_rcgs_map.keys()) + incoming_set = set(incoming_rcgs_map.keys()) + + # Update any existing role config groups + for rcg_type in current_set & incoming_set: + existing_rcg = current_rcgs_map[rcg_type] + incoming_rcg = incoming_rcgs_map[rcg_type] + + if incoming_rcg["config"] is None: + incoming_rcg["config"] = dict() + + # TODO Consolidate into util function; see cm_service_role_config_group:279-302 + payload = ApiRoleConfigGroup() + + # Update display name + incoming_display_name = incoming_rcg.get("display_name") + if ( + incoming_display_name is not None + and incoming_display_name != existing_rcg.display_name + ): self.changed = True + payload.display_name = incoming_display_name - elif self.state in ["started"]: - - matching_roles = [] - new_roles = [] - for role in self.role: - if role in existing_roles: - matching_roles.append(role) - else: - new_roles.append(role) - - new_roles_to_start = [] - if new_roles: - body = { - "items": [ - {"type": role, "hostRef": {"hostId": host_id}} - for role in new_roles - ] - } - newly_added_roles = role_api_instance.create_roles( - body=body - ).to_dict() - - for role in newly_added_roles["items"]: - new_roles_to_start.append(role["name"]) - body = {"items": new_roles_to_start} - - existing_roles_state = [] - for role in matching_roles: - for item in available_roles_info["items"]: - if role == item["type"]: - existing_roles_state.append( - { - "type": item["type"], - "role_state": item["role_state"].lower(), - "name": item["name"], - } - ) + # Reconcile configurations + if existing_rcg.config or self.purge: + updates = ConfigListUpdates( + existing_rcg.config, incoming_rcg["config"], self.purge + ) + + if updates.changed: + self.changed = True + + if self.module._diff: + rcg_diff["before"].update(config=updates.diff["before"]) + rcg_diff["after"].update(config=updates.diff["after"]) + + payload.config = updates.config + + # Execute changes if needed + if ( + payload.display_name is not None or payload.config is not None + ) and not self.module.check_mode: + rcg_api.update_role_config_group( + existing_rcg.name, message=self.message, body=payload + ) - existing_roles_to_start = [] - for role in existing_roles_state: - if role["role_state"] == "stopped": - existing_roles_to_start.append(role["name"]) - - all_roles_to_start = new_roles_to_start + existing_roles_to_start - body = {"items": all_roles_to_start} - - if all_roles_to_start: - start_roles_request = role_cmd_api_instance.start_command( - body=body - ).to_dict() - command_id = start_roles_request["items"][0]["id"] - self.wait_for_command_state( - command_id=command_id, polling_interval=5 + # Add any new role config groups + for rcg_type in incoming_set - current_set: + self.changed = True + + if self.module._diff: + rcg_diff = dict(before=dict(), after=dict()) + + existing_rcg = get_mgmt_base_role_config_group( + self.api_client, rcg_type + ) + incoming_rcg = incoming_rcgs_map[rcg_type] + + payload = ApiRoleConfigGroup() + + incoming_display_name = incoming_rcg.get("display_name") + if incoming_display_name is not None: + if self.module._diff: + rcg_diff["before"].update( + display_name=existing_rcg.display_name + ) + rcg_diff["after"].update(display_name=incoming_display_name) + payload.display_name = incoming_display_name + + incoming_rcg_config = incoming_rcg.get("config") + if incoming_rcg_config: + updates = ConfigListUpdates( + existing_rcg.config, incoming_rcg_config, self.purge ) - self.cm_service_output = ( - role_api_instance.read_roles().to_dict() + + if self.module._diff: + rcg_diff["before"].update(config=updates.diff["before"]) + rcg_diff["after"].update(config=updates.diff["after"]) + + payload.config = updates.config + else: + payload.config = ApiConfigList() + + if not self.module.check_mode: + rcg_api.update_role_config_group( + existing_rcg.name, message=self.message, body=payload ) + + # Remove any undeclared role config groups + if self.purge: + for rcg_type in current_set - incoming_set: self.changed = True - else: - self.cm_service_output = ( - role_api_instance.read_roles().to_dict() + + if self.module._diff: + rcg_diff = dict(before=dict(), after=dict()) + + existing_rcg = get_mgmt_base_role_config_group( + self.api_client, rcg_type ) - self.changed = False - - elif self.state in ["stopped"]: - matching_roles = [] - for role in self.role: - if role in existing_roles: - matching_roles.append(role) - - matching_roles_state = [] - for role in matching_roles: - for item in available_roles_info["items"]: - if role == item["type"]: - matching_roles_state.append( - { - "type": item["type"], - "role_state": item["role_state"].lower(), - "name": item["name"], - } - ) - roles_to_stop = [] - for role in matching_roles_state: - if role["role_state"] == "started": - roles_to_stop.append(role["name"]) - body = {"items": roles_to_stop} + payload = ApiRoleConfigGroup( + display_name=f"mgmt-{rcg_type}-BASE" + ) - if roles_to_stop: - role_cmd_api_instance.stop_command(body=body) - self.cm_service_output = ( - role_api_instance.read_roles().to_dict() + updates = ConfigListUpdates( + existing_rcg.config, dict(), self.purge ) + + if self.module._diff: + rcg_diff["before"].update(config=updates.diff["before"]) + rcg_diff["after"].update(config=updates.diff["after"]) + + payload.config = updates.config + + if not self.module.check_mode: + rcg_api.update_role_config_group( + existing_rcg.name, message=self.message, body=payload + ) + + # Manage roles + if self.roles or self.purge: + # Get existing roles (ApiRole) + current_roles_map = {r.type: r for r in current.roles} + + # Get incoming roles (dict) + if self.roles is None: + incoming_roles_map = dict() + else: + incoming_roles_map = {r["type"]: r for r in self.roles} + + # Create sets of the roles + current_set = set(current_roles_map.keys()) + incoming_set = set(incoming_roles_map.keys()) + + # Update any existing roles + for role_type in current_set & incoming_set: + existing_role = current_roles_map[role_type] + incoming_role = incoming_roles_map[role_type] + + if incoming_role["config"] is None: + incoming_role["config"] = dict() + + # If the host has changed, destroy and rebuild completely + incoming_hostname = incoming_role.get("cluster_hostname") + incoming_host_id = incoming_role.get("cluster_host_id") + if ( + incoming_hostname is not None + and incoming_hostname != existing_role.host_ref.hostname + ) or ( + incoming_host_id is not None + and incoming_host_id != existing_role.host_ref.host_id + ): self.changed = True + + # Use the new configuration or copy from the existing + new_config = ( + incoming_role["config"] + if incoming_role["config"] + else {c.name: c.value for c in existing_role.config.items} + ) + + new_role = create_role( + api_client=self.api_client, + role_type=existing_role.type, + hostname=incoming_hostname, + host_id=incoming_host_id, + config=new_config, + ) + + if not self.module.check_mode: + role_api.delete_role(existing_role.name) + + rebuilt_role = next( + ( + iter( + role_api.create_roles( + body=ApiRoleList(items=[new_role]) + ).items + ) + ), + {}, + ) + if not rebuilt_role: + self.module.fail_json( + msg="Unable to recreate role, " + + existing_role.name, + role=to_native(rebuilt_role.to_dict()), + ) + + # Else address any updates else: - self.cm_service_output = ( - role_api_instance.read_roles().to_dict() + updates = ConfigListUpdates( + existing_role.config, + incoming_role["config"], + self.purge, ) - self.changed = False - - elif self.state in ["restarted"]: - matching_roles = [] - for role in self.role: - if role in existing_roles: - matching_roles.append(role) - - matching_roles_state = [] - for role in matching_roles: - for item in available_roles_info["items"]: - if role == item["type"]: - matching_roles_state.append( - { - "type": item["type"], - "role_state": item["role_state"].lower(), - "name": item["name"], - } + + if updates.changed: + self.changed = True + + if not self.module.check_mode: + role_api.update_role_config( + role_name=existing_role.name, + message=self.message, + body=updates.config, ) - roles_to_restart = [] - for role in matching_roles_state: - roles_to_restart.append(role["name"]) - body = {"items": roles_to_restart} + # Add any new roles + for role_type in incoming_set - current_set: + self.changed = True - if roles_to_restart: - role_cmd_api_instance.restart_command(body=body) - self.cm_service_output = ( - role_api_instance.read_roles().to_dict() + incoming_role = incoming_roles_map[role_type] + + new_role = create_role( + api_client=self.api_client, + role_type=incoming_role.get("type"), + hostname=incoming_role.get("cluster_hostname"), + host_id=incoming_role.get("cluster_host_id"), + config=incoming_role.get("config"), + ) + + if not self.module.check_mode: + created_role = next( + ( + iter( + role_api.create_roles( + body=ApiRoleList(items=[new_role]) + ).items + ) + ), + {}, ) + if not created_role: + self.module.fail_json( + msg="Unable to create new role", + role=to_native(new_role.to_dict()), + ) + + # Remove any undeclared roles if directed + if self.purge: + for role_type in current_set - incoming_set: self.changed = True - if self.purge: - mgmt_service_api_instance.delete_cms() - body = {"roles": [{"type": role} for role in self.role]} - mgmt_service_api_instance.setup_cms(body=body) - self.cm_service_output = role_api_instance.read_roles().to_dict() - - if self.state in ["started"]: - start_roles_request = api_instance.start_command().to_dict() - command_id = start_roles_request["id"] - self.wait_for_command_state( - command_id=command_id, polling_interval=5 - ) - self.cm_service_output = role_api_instance.read_roles().to_dict() - self.changed = True + existing_role = current_roles_map[role_type] - except ApiException as e: - if e.status == 404 or 400: - roles_dict = {"roles": [{"type": role} for role in self.role]} - api_instance.setup_cms(body=roles_dict) + if not self.module.check_mode: + role_api.delete_role(existing_role.name) - if self.state in ["started"]: - start_roles_request = api_instance.start_command().to_dict() - command_id = start_roles_request["id"] - self.wait_for_command_state( - command_id=command_id, polling_interval=5 - ) - self.cm_service_output = role_api_instance.read_roles().to_dict() - else: - self.cm_service_output = role_api_instance.read_roles().to_dict() - self.changed = True + # Handle various states + if self.state == "started" and current.service_state not in [ + ApiServiceState.STARTED + ]: + self.exec_service_command( + current, ApiServiceState.STARTED, service_api.start_command + ) + elif self.state == "stopped" and current.service_state not in [ + ApiServiceState.STOPPED, + ApiServiceState.NA, + ]: + self.exec_service_command( + current, ApiServiceState.STOPPED, service_api.stop_command + ) + elif self.state == "restarted": + self.exec_service_command( + current, ApiServiceState.STARTED, service_api.restart_command + ) + + # If there are changes, get a fresh read + if self.changed: + refresh = read_cm_service(self.api_client) + self.output = parse_service_result(refresh) + # Otherwise, return the existing + else: + self.output = parse_service_result(current) + else: + self.module.fail_json(msg=f"Invalid state: {self.state}") + + def exec_service_command( + self, service: ApiService, value: str, cmd: Callable[[None], ApiCommand] + ): + self.changed = True + if self.module._diff: + self.diff["before"].update(service_state=service.service_state) + self.diff["after"].update(service_state=value) + + if not self.module.check_mode: + self.wait_command(cmd()) + + def handle_commands(self, commands: ApiBulkCommandList): + if commands.errors: + error_msg = "\n".join(commands.errors) + self.module.fail_json(msg=error_msg) + + for c in commands.items: + # Not in parallel, but should only be a single command + self.wait_command(c) def main(): - module = ClouderaManagerModule.ansible_module( + module = ClouderaManagerMutableModule.ansible_module( argument_spec=dict( - role=dict(required=True, type="list"), - purge=dict(required=False, type="bool", default=False), + config=dict(type="dict", aliases=["params", "parameters"]), + role_config_groups=dict( + type="list", + elements="dict", + options=dict( + display_name=dict(), # TODO Remove display_name as an option + type=dict(required=True, aliases=["role_type"]), + config=dict( + required=True, type="dict", aliases=["params", "parameters"] + ), + ), + ), + roles=dict( + type="list", + elements="dict", + options=dict( + cluster_hostname=dict(aliases=["cluster_host"]), + cluster_host_id=dict(), + config=dict(type="dict", aliases=["params", "parameters"]), + type=dict(required=True, aliases=["role_type"]), + ), + mutually_exclusive=[["cluster_hostname", "cluster_host_id"]], + ), + maintenance=dict(type="bool", aliases=["maintenance_mode"]), + purge=dict(type="bool", default=False), state=dict( type="str", - default="started", + default="present", choices=["started", "stopped", "absent", "present", "restarted"], ), ), - supports_check_mode=False, + supports_check_mode=True, ) - result = ClouderaService(module) - - changed = result.changed + result = ClouderaManagerService(module) output = dict( - changed=changed, - cloudera_manager=result.cm_service_output, + changed=result.changed, + service=result.output, ) if result.debug: diff --git a/plugins/modules/cm_service_info.py b/plugins/modules/cm_service_info.py index 4c72d84f..6d67e19e 100644 --- a/plugins/modules/cm_service_info.py +++ b/plugins/modules/cm_service_info.py @@ -1,4 +1,7 @@ -# Copyright 2024 Cloudera, Inc. All Rights Reserved. +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,34 +15,31 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( - ClouderaManagerModule, -) - -from cm_client import MgmtServiceResourceApi - -ANSIBLE_METADATA = { - "metadata_version": "1.1", - "status": ["preview"], - "supported_by": "community", -} - DOCUMENTATION = r""" ---- module: cm_service_info -short_description: Retrieve information about the Cloudera Management Services +short_description: Retrieve information about the Cloudera Management service description: - Gather information about the Cloudera Manager service. author: - - "Ronald Suplina (@rsuplina)" + - Ronald Suplina (@rsuplina) + - Webster Mudge (@wmudge) +extends_documentation_fragment: + - cloudera.cluster.cm_options + - cloudera.cluster.cm_endpoint +attributes: + check_mode: + support: full requirements: - - cm_client + - cm-client +seealso: + - module: cloudera.cluster.cm_service + - module: cloudera.cluster.cm_service_role + - module: cloudera.cluster.cm_service_role_config_group """ EXAMPLES = r""" ---- -- name: Gather details using an host - cloudera.cluster.cm_version: +- name: Gather details of the Cloudera Manager service + cloudera.cluster.cm_service_info: host: "example.cloudera.host" username: "will_jordan" password: "S&peR4Ec*re" @@ -47,116 +47,330 @@ """ RETURN = r""" ---- -cloudera_manager: - description: Details about Cloudera Manager Service - type: dict - contains: +service: + description: The Cloudera Manager service. + type: dict + contains: + client_config_staleness_status: + description: Status of client configuration for the Cloudera Manager service. + type: str + returned: optional + cluster_name: + description: The associated cluster name. + type: str + returned: optional + config: + description: Service-wide configuration for the Cloudera Manager service. + type: dict + returned: optional + config_staleness_status: + description: Status of configuration staleness for the Cloudera Manager service. + type: str + returned: optional + sample: + - FRESH + - STALE_REFRESHABLE + - STALE + display_name: + description: Display name of the Cloudera Manager service. + type: str + returned: always + health_checks: + description: Lists all available health checks for the Cloudera Manager service. + type: list + elements: dict + returned: optional + contains: + explanation: + description: A descriptor for the health check. + type: str + returned: optional name: - description: The Cloudera Manager service name. - type: str - returned: optional - type: - description: The Cloudera Manager service type. - type: str - returned: optional - cluster_ref: - description: Reference to a cluster. - type: str - returned: optional - service_state: - description: State of the Cloudera Manager Service. - type: str - returned: optional - health_summary: - description: Health of the Cloudera Manager Service. - type: str - returned: optional - config_stale: - description: Configuration state of Cloudera Manager Service. - type: str - returned: optional + description: Unique name fore the health check. + type: str + returned: always + summary: + description: The summary status of the health check. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + suppressed: + description: + - Whether the health check is suppressed. + - A suppressed health check is not considered when computing the overall health. + type: bool + returned: always + health_summary: + description: Health of the Cloudera Manager service. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + maintenance_mode: + description: Whether maintance mode is enabled for the Cloudera Manager service. + type: bool + returned: always + maintenance_owners: + description: List of objects that trigger the Cloudera Manager service to be in maintenance mode. + type: list + elements: str + returned: optional + sample: + - CLUSTER + - SERVICE + - ROLE + - HOST + - CONTROL_PLANE + name: + description: Name (identifier) of the Cloudera Manager service. + type: str + returned: always + role_config_groups: + description: List of role configuration groups for Cloudera Manager service. + type: list + elements: dict + returned: optional + contains: + base: + description: Whether the role config group is a base (default) group. + type: bool + returned: always + config: + description: Configuration for the role config group. + type: dict + returned: optional + display_name: + description: Display name for the role config group. + type: str + returned: always + name: + description: Name (identifier) of the role config group. + type: str + returned: always + role_type: + description: The type of roles in this group. + type: str + returned: always + service_name: + description: Name (identifier) of the associated service of the role config group. + type: str + returned: always + roles: + description: List of role instances for Cloudera Manager service. + type: list + elements: dict + returned: optional + contains: + commission_state: + description: Commission state of the Cloudera Manager service role. + type: str + returned: always + sample: + - COMMISSIONED + - DECOMMISSIONING + - DECOMMISSIONED + - UNKNOWN + - OFFLINING + - OFFLINED + config: + description: Role override configuration for the Cloudera Manager service. + type: dict + returned: optional config_staleness_status: - description: Status of configuration staleness for Cloudera Manager Service. - type: str - returned: optional - client_config_staleness_status: - description: Status of Client configuration for Cloudera Manager Service. - type: str - returned: optional + description: Status of configuration staleness for the Cloudera Manager service role. + type: str + returned: always + sample: + - FRESH + - STALE_REFRESHABLE + - STALE + ha_status: + description: High-availability status for the Cloudera Manager service. + type: str + returned: optional + sample: + - ACTIVE + - STANDBY + - UNKNOWN health_checks: - description: Lists all available health checks for Cloudera Manager Service. - type: dict - returned: optional - service_url: - description: Service url for Cloudera Manager Service. - type: str - returned: optional - role_instances_url: - description: Role instance url for Cloudera Manager Service. - type: str - returned: optional + description: List of all available health checks for Cloudera Manager service role. + type: list + elements: dict + returned: optional + contains: + explanation: + description: The explanation of this health check. + type: str + returned: optional + name: + description: Unique name of this health check. + type: str + returned: always + summary: + description: The high-level health status of the health check. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + suppressed: + description: + - Whether this health check is suppressed. + - A suppressed health check is not considered when computing the role's overall health. + type: bool + returned: optional + health_summary: + description: The high-level health status of the Cloudera Manager service role. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + host_id: + description: The unique ID of the cluster host. + type: str + returned: always maintenance_mode: - description: Maintance mode of Cloudera Manager Service. - type: bool - returned: optional + description: Whether the Cloudera Manager service role is in maintenance mode. + type: bool + returned: always maintenance_owners: - description: List of Maintance owners for Cloudera Manager Service. - type: list - returned: optional - config: - description: Configuration details for Cloudera Manager Service. - type: dict - returned: optional - roles: - description: Role list of Cloudera Manager Service. - type: dict - returned: optional - display_name: - description: Display name of Cloudera Manager Service. - type: dict - returned: optional - role_config_groups: - description: List of role configuration groups for Cloudera Manager Service. - type: list - returned: optional - replication_schedules: - description: List of replication schedules for Cloudera Manager Service. - type: list - returned: optional - snapshot_policies: - description: Snapshot policy for Cloudera Manager Service. - type: str - returned: optional - entity_status: - description: Health status of entities for Cloudera Manager Service. - type: str - returned: optional + description: List of objects that trigger the Cloudera Manager service role to be in maintenance mode. + type: list + elements: str + returned: optional + sample: + - CLUSTER + - SERVICE + - ROLE + - HOST + - CONTROL_PLANE + name: + description: + - The Cloudera Manager service role name. + - Note, this is an auto-generated name and cannot be changed. + type: str + returned: always + role_config_group_name: + description: The name of the Cloudera Manager Service role config group, which uniquely identifies it in a Cloudera Manager installation. + type: str + returned: always + role_state: + description: State of the Cloudera Manager service role. + type: str + returned: always + sample: + - HISTORY_NOT_AVAILABLE + - UNKNOWN + - STARTING + - STARTED + - STOPPING + - STOPPED + - NA + service_name: + description: The name of the Cloudera Manager service, which uniquely identifies it in a deployment. + type: str + returned: always tags: - description: List of tags for Cloudera Manager Service. - type: list - returned: optional - service_version: - description: Version of Cloudera Manager Service. - type: str - returned: optional + description: Set of tags for the Cloudera Manager service role. + type: dict + returned: optional + type: + description: The Cloudera Manager service role type. + type: str + returned: always + sample: + - HOSTMONITOR + - ALERTPUBLISHER + - SERVICEMONITOR + - REPORTSMANAGER + - EVENTSERVER + zoo_keeper_server_mode: + description: + - The Zookeeper server mode for this Cloudera Manager service role. + - Note that for non-Zookeeper Server roles, this will be V(null). + type: str + returned: optional + service_state: + description: Run state of the Cloudera Manager service. + type: str + returned: always + sample: + - HISTORY_NOT_AVAILABLE + - UNKNOWN + - STARTING + - STARTED + - STOPPING + - STOPPED + - NA + service_version: + description: Version of Cloudera Manager service. + type: str + returned: always + tags: + description: List of tags for Cloudera Manager service. + type: list + returned: optional + type: + description: Type of the Cloudera Manager service, i.e. MGMT. + type: str + returned: always + sample: + - MGMT """ +from cm_client.rest import ApiException + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerModule, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.service_utils import ( + parse_service_result, + read_cm_service, +) + + class ClouderaServiceInfo(ClouderaManagerModule): def __init__(self, module): super(ClouderaServiceInfo, self).__init__(module) # Initialize the return values - self.cm_service_info = dict() + self.output = dict() # Execute the logic self.process() @ClouderaManagerModule.handle_process def process(self): - api_instance = MgmtServiceResourceApi(self.api_client) - self.cm_service_info = api_instance.read_service().to_dict() + result = None + try: + result = read_cm_service(self.api_client) + except ApiException as ex: + if ex.status != 404: + raise ex + + if result is not None: + self.output = parse_service_result(result) def main(): @@ -166,7 +380,7 @@ def main(): output = dict( changed=False, - cloudera_manager=result.cm_service_info, + service=result.output, ) if result.debug: diff --git a/plugins/modules/cm_service_role.py b/plugins/modules/cm_service_role.py new file mode 100644 index 00000000..69feda02 --- /dev/null +++ b/plugins/modules/cm_service_role.py @@ -0,0 +1,632 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2025 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = r""" +module: cm_service_role +short_description: Manage a Cloudera Manager Service role +description: + - Manage a Cloudera Manager Service role. +author: + - Webster Mudge (@wmudge) +options: + cluster_hostname: + description: + - The hostname of an instance for the role. + - If the hostname is different that the existing host for the O(type), the role will be destroyed and rebuilt on the declared host. + - Mutually exclusive with O(cluster_host_id). + type: str + aliases: + - cluster_host + cluster_host_id: + description: + - The host ID of an instance for the role. + - If the host ID is different that the existing host for the O(type), the role will be destroyed and rebuilt on the declared host. + - Mutually exclusive with O(cluster_hostname). + type: str + type: + description: + - A role type for the role. + type: str + required: True + aliases: + - role_type + config: + description: + - The role configuration to set, i.e. role overrides, for the instance. + - To unset a parameter, use V(None) as the value. + type: dict + aliases: + - params + - parameters + maintenance: + description: + - Flag for whether the role should be in maintenance mode. + type: bool + aliases: + - maintenance_mode + purge: + description: + - Flag for whether the declared role configurations should append or overwrite any existing configurations. + - To clear all role configurations, set O(config={}), i.e. an empty dictionary, or omit entirely, and set O(purge=True). + type: bool + default: False + state: + description: + - The state of the role. + - Note, if the declared state is invalid for the role, the module will return an error. + - Note, V(restarted) is always force a change of state of the role. + type: str + default: present + choices: + - present + - absent + - restarted + - started + - stopped +extends_documentation_fragment: + - cloudera.cluster.cm_options + - cloudera.cluster.cm_endpoint + - cloudera.cluster.message +attributes: + check_mode: + support: full + diff_mode: + support: full + platform: + platforms: all +requirements: + - cm-client +seealso: + - module: cloudera.cluster.cm_service + - module: cloudera.cluster.cm_service_role_config_group +""" + +EXAMPLES = r""" +- name: Establish a Cloudera Manager Service role + cloudera.cluster.cm_service_role: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + cluster_hostname: worker-01.cloudera.internal + +- name: Set a Cloudera Manager Service role to maintenance mode + cloudera.cluster.cm_service_role: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + maintenance: yes + +- name: Update (append) role configurations to a Cloudera Manager Service role + cloudera.cluster.cm_service_role: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + config: + some_config: value_one + another_config: value_two + +- name: Set (purge) role configurations to a Cloudera Manager Service role + cloudera.cluster.cm_service_role: + host: example.cloudera.com + username: "jane_smith" + type: HOSTMONITOR + config: + yet_another_config: value_three + purge: yes + +- name: Remove all role configurations on a Cloudera Manager Service role + cloudera.cluster.cm_service_role: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + purge: yes + +- name: Start a Cloudera Manager Service role + cloudera.cluster.cm_service_role: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + state: started + +- name: Force a restart to a Cloudera Manager Service role + cloudera.cluster.cm_service_role: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + state: restarted + +- name: Remove a Cloudera Manager Service role + cloudera.cluster.cm_service_role: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + state: absent +""" + +RETURN = r""" +role: + description: Details about the Cloudera Manager service role. + type: dict + returned: always + contains: + commission_state: + description: Commission state of the Cloudera Manager service role. + type: str + returned: always + sample: + - COMMISSIONED + - DECOMMISSIONING + - DECOMMISSIONED + - UNKNOWN + - OFFLINING + - OFFLINED + config: + description: Role override configuration for the Cloudera Manager service. + type: dict + returned: optional + config_staleness_status: + description: Status of configuration staleness for the Cloudera Manager service role. + type: str + returned: always + sample: + - FRESH + - STALE_REFRESHABLE + - STALE + ha_status: + description: High-availability status for the Cloudera Manager service. + type: str + returned: optional + sample: + - ACTIVE + - STANDBY + - UNKNOWN + health_checks: + description: List of all available health checks for Cloudera Manager service role. + type: list + elements: dict + returned: optional + contains: + explanation: + description: The explanation of this health check. + type: str + returned: optional + name: + description: Unique name of this health check. + type: str + returned: always + summary: + description: The high-level health status of the health check. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + suppressed: + description: + - Whether this health check is suppressed. + - A suppressed health check is not considered when computing the role's overall health. + type: bool + returned: optional + health_summary: + description: The high-level health status of the Cloudera Manager service role. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + host_id: + description: The unique ID of the cluster host. + type: str + returned: always + maintenance_mode: + description: Whether the Cloudera Manager service role is in maintenance mode. + type: bool + returned: always + maintenance_owners: + description: List of objects that trigger the Cloudera Manager service role to be in maintenance mode. + type: list + elements: str + returned: optional + sample: + - CLUSTER + - SERVICE + - ROLE + - HOST + - CONTROL_PLANE + name: + description: + - The Cloudera Manager service role name. + - Note, this is an auto-generated name and cannot be changed. + type: str + returned: always + role_config_group_name: + description: The name of the Cloudera Manager Service role config group, which uniquely identifies it in a Cloudera Manager installation. + type: str + returned: always + role_state: + description: State of the Cloudera Manager service role. + type: str + returned: always + sample: + - HISTORY_NOT_AVAILABLE + - UNKNOWN + - STARTING + - STARTED + - STOPPING + - STOPPED + - NA + service_name: + description: The name of the Cloudera Manager service, which uniquely identifies it in a deployment. + type: str + returned: always + tags: + description: Set of tags for the Cloudera Manager service role. + type: dict + returned: optional + type: + description: The Cloudera Manager service role type. + type: str + returned: always + sample: + - HOSTMONITOR + - ALERTPUBLISHER + - SERVICEMONITOR + - REPORTSMANAGER + - EVENTSERVER + zoo_keeper_server_mode: + description: + - The Zookeeper server mode for this Cloudera Manager service role. + - Note that for non-Zookeeper Server roles, this will be V(null). + type: str + returned: optional +""" + +from collections.abc import Callable + +from cm_client import ( + ApiBulkCommandList, + ApiCommand, + ApiRole, + ApiRoleList, + ApiRoleNameList, + ApiRoleState, + MgmtRolesResourceApi, + MgmtRoleCommandsResourceApi, + MgmtServiceResourceApi, +) +from cm_client.rest import ApiException + +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerMutableModule, + ConfigListUpdates, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + create_role, + parse_role_result, + read_cm_role, +) + + +class ClouderaManagerServiceRole(ClouderaManagerMutableModule): + def __init__(self, module): + super(ClouderaManagerServiceRole, self).__init__(module) + + # Set the parameters + self.cluster_hostname = self.get_param("cluster_hostname") + self.cluster_host_id = self.get_param("cluster_host_id") + self.config = self.get_param("config") + self.maintenance = self.get_param("maintenance") + self.type = self.get_param("type") + self.state = self.get_param("state") + self.purge = self.get_param("purge") + + # Initialize the return values + self.changed = False + self.diff = dict(before={}, after={}) + self.output = dict() + + # Execute the logic + self.process() + + @ClouderaManagerMutableModule.handle_process + def process(self): + + service_api = MgmtServiceResourceApi(self.api_client) + role_api = MgmtRolesResourceApi(self.api_client) + role_cmd_api = MgmtRoleCommandsResourceApi(self.api_client) + + # Confirm that CMS is present + try: + service_api.read_service() + except ApiException as ex: + if ex.status == 404: + self.module.fail_json(msg="Cloudera Management service does not exist") + else: + raise ex + + current = None + + # Discover the role by its type + try: + current = read_cm_role(api_client=self.api_client, role_type=self.type) + except ApiException as ex: + if ex.status != 404: + raise ex + + # If deleting, do so and exit + if self.state == "absent": + if current: + self.deprovision_role(role_api, current) + + # Otherwise, manage the configuration and state + elif self.state in ["present", "restarted", "started", "stopped"]: + # If it is a new role + if not current: + new_role = create_role( + api_client=self.api_client, + role_type=self.type, + hostname=self.cluster_hostname, + host_id=self.cluster_host_id, + config=self.config, + ) + current = self.provision_role(role_api, new_role) + self.handle_maintenance(role_api, current) + # Else if it exists, but the host has changed, destroy and rebuild completely + elif ( + current + and ( + self.cluster_hostname is not None + and self.cluster_hostname != current.host_ref.hostname + ) + or ( + self.cluster_host_id is not None + and self.cluster_host_id != current.host_ref.host_id + ) + ): + if self.config: + new_config = self.config + else: + new_config = {c.name: c.value for c in current.config.items} + + new_role = create_role( + api_client=self.api_client, + role_type=current.type, + hostname=self.cluster_hostname, + host_id=self.cluster_host_id, + config=new_config, + ) + current = self.reprovision_role(role_api, current, new_role) + self.handle_maintenance(role_api, current) + # Else it exists, so address any changes + else: + self.handle_maintenance(role_api, current) + + # Handle role override configurations + if self.config or self.purge: + if self.config is None: + self.config = dict() + + updates = ConfigListUpdates(current.config, self.config, self.purge) + + if updates.changed: + self.changed = True + + if self.module._diff: + self.diff["before"].update(config=updates.diff["before"]) + self.diff["after"].update(config=updates.diff["after"]) + + if not self.module.check_mode: + role_api.update_role_config( + current.name, + message=self.message, + body=updates.config, + ) + + # Handle the various states + if self.state == "started" and current.role_state not in [ + ApiRoleState.STARTED + ]: + self.exec_role_command( + current, ApiRoleState.STARTED, role_cmd_api.start_command + ) + elif self.state == "stopped" and current.role_state not in [ + ApiRoleState.STOPPED, + ApiRoleState.NA, + ]: + self.exec_role_command( + current, ApiRoleState.STOPPED, role_cmd_api.stop_command + ) + elif self.state == "restarted": + self.exec_role_command( + current, ApiRoleState.STARTED, role_cmd_api.restart_command + ) + + # If there are changes, get a fresh read + if self.changed: + refresh = role_api.read_role(current.name) + refresh.config = role_api.read_role_config(current.name) + self.output = parse_role_result(refresh) + # Otherwise return the existing + else: + self.output = parse_role_result(current) + else: + self.module.fail_json(msg=f"Invalid state: {self.state}") + + def exec_role_command( + self, role: ApiRole, value: str, cmd: Callable[[ApiRoleNameList], ApiCommand] + ): + self.changed = True + if self.module._diff: + self.diff["before"].update(role_state=role.role_state) + self.diff["after"].update(role_state=value) + + if not self.module.check_mode: + self.handle_commands(cmd(body=ApiRoleNameList(items=[role.name]))) + + def handle_maintenance(self, role_api: MgmtRolesResourceApi, role: ApiRole) -> None: + if self.maintenance is not None and self.maintenance != role.maintenance_mode: + self.changed = True + + if self.module._diff: + self.diff["before"].update(maintenance_mode=role.maintenance_mode) + self.diff["after"].update(maintenance_mode=self.maintenance) + + if not self.module.check_mode: + if self.maintenance: + maintenance_cmd = role_api.enter_maintenance_mode(role.name) + else: + maintenance_cmd = role_api.exit_maintenance_mode(role.name) + + if maintenance_cmd.success is False: + self.module.fail_json( + msg=f"Unable to set Maintenance mode to '{self.maintenance}': {maintenance_cmd.result_message}" + ) + + def provision_role(self, role_api: MgmtRolesResourceApi, role: ApiRole) -> ApiRole: + self.changed = True + + if self.module._diff: + self.diff = dict( + before={}, + after=role.to_dict(), + ) + + if not self.module.check_mode: + created_role = next( + ( + iter( + role_api.create_roles( + body=ApiRoleList(items=[role]), + ).items + ) + ), + {}, + ) + if not created_role: + self.module.fail_json( + msg="Unable to create new role", role=to_native(role.to_dict()) + ) + return created_role + + def reprovision_role( + self, role_api: MgmtRolesResourceApi, existing_role: ApiRole, new_role: ApiRole + ) -> ApiRole: + self.changed = True + + if self.module._diff: + self.diff = dict( + before=existing_role.to_dict(), + after=new_role.to_dict(), + ) + + if not self.module.check_mode: + role_api.delete_role(existing_role.name) + + rebuilt_role = next( + ( + iter( + role_api.create_roles( + body=ApiRoleList(items=[new_role]), + ).items + ) + ), + {}, + ) + if not rebuilt_role: + self.module.fail_json( + msg="Unable to recreate role, " + existing_role.name, + role=to_native(rebuilt_role.to_dict()), + ) + return rebuilt_role + else: + return existing_role + + def deprovision_role(self, role_api: MgmtRolesResourceApi, role: ApiRole) -> None: + self.changed = True + + if self.module._diff: + self.diff = dict(before=parse_role_result(role), after=dict()) + + if not self.module.check_mode: + role_api.delete_role(role.name) + + def handle_commands(self, commands: ApiBulkCommandList): + if commands.errors: + error_msg = "\n".join(commands.errors) + self.module.fail_json(msg=error_msg) + + for c in commands.items: + # Not in parallel, but should only be a single command + self.wait_command(c) + + +def main(): + module = ClouderaManagerMutableModule.ansible_module( + argument_spec=dict( + cluster_hostname=dict(aliases=["cluster_host"]), + cluster_host_id=dict(), + maintenance=dict(type="bool", aliases=["maintenance_mode"]), + config=dict(type="dict", aliases=["params", "parameters"]), + purge=dict(type="bool", default=False), + type=dict(required=True, aliases=["role_type"]), + state=dict( + default="present", + choices=["present", "absent", "restarted", "started", "stopped"], + ), + ), + mutually_exclusive=[ + ["cluster_hostname", "cluster_host_id"], + ], + supports_check_mode=True, + ) + + result = ClouderaManagerServiceRole(module) + + output = dict( + changed=result.changed, + role=result.output, + ) + + if module._diff: + output.update(diff=result.diff) + + if result.debug: + log = result.log_capture.getvalue() + output.update(debug=log, debug_lines=log.split("\n")) + + module.exit_json(**output) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cm_service_role_config.py b/plugins/modules/cm_service_role_config.py new file mode 100644 index 00000000..fc6efbf3 --- /dev/null +++ b/plugins/modules/cm_service_role_config.py @@ -0,0 +1,322 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = r""" +module: cm_service_role_config +short_description: Manage a service role configuration in cluster +description: + - Manage a service role configuration (role-specific) in a cluster. +author: + - "Webster Mudge (@wmudge)" +requirements: + - cm-client +options: + name: + description: + - A Cloudera Manager Service role name to manage. + - One of C(name) or C(type) is required. + type: str + aliases: + - role_name + - role + type: + description: + - A Cloudera Manager Service role type to manage. + - One of C(name) or C(type) is required. + type: str + aliases: + - role_type + parameters: + description: + - The role-specific configuration to set, i.e. role overrides. + - To unset a parameter, use C(None) as the value. + type: dict + required: yes + aliases: + - params + purge: + description: + - Flag for whether the declared parameters should append or overwrite any existing parameters. + - To clear all parameters, set I(parameters={}), i.e. an empty dictionary, and I(purge=True). + type: bool + default: False + view: + description: + - The view to return. + type: str + default: summary + choices: + - summary + - full +extends_documentation_fragment: + - ansible.builtin.action_common_attributes + - cloudera.cluster.cm_options + - cloudera.cluster.cm_endpoint + - cloudera.cluster.purge + - cloudera.cluster.message +attributes: + check_mode: + support: full + diff_mode: + support: full + platform: + platforms: all +""" + +EXAMPLES = r""" +- name: Update (append) Cloudera manager Service Host Monitor role parameters + cloudera.cluster.cm_service_role_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + parameters: + a_configuration: "schema://host:port" + another_configuration: 234 + +- name: Reset a Cloudera manager Service Host Monitor role parameter + cloudera.cluster.cm_service_role_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + name: "a-non-default-role-name" + parameters: + more_configuration: None + +- name: Update (with purge) Cloudera manager Service Host Monitor role parameters + cloudera.cluster.cluster_service_role_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + parameters: + config_one: None + config_two: ValueTwo + config_three: 2345 + +- name: Reset all Cloudera manager Service Host Monitor role parameters + cloudera.cluster.cluster_service_role_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + parameters: {} + purge: yes +""" + +RETURN = r""" +config: + description: + - List of Cloudera Manager Service role configurations. + - Returns the C(summary) view of the resulting configuration. + type: list + elements: dict + returned: always + contains: + name: + description: + - The canonical name that identifies this configuration parameter. + type: str + returned: when supported + value: + description: + - The user-defined value. + - When absent, the default value (if any) will be used. + - Can also be absent, when enumerating allowed configs. + type: str + returned: when supported + required: + description: + - Whether this configuration is required for the object. + - If any required configuration is not set, operations on the object may not work. + - Requires I(full) view. + type: bool + returned: when supported + default: + description: + - The default value. + - Requires I(full) view. + type: str + returned: when supported + display_name: + description: + - A user-friendly name of the parameters, as would have been shown in the web UI. + - Requires I(full) view. + type: str + returned: when supported + description: + description: + - A textual description of the parameter. + - Requires I(full) view. + type: str + returned: when supported + related_name: + description: + - If applicable, contains the related configuration variable used by the source project. + - Requires I(full) view. + type: str + returned: when supported + sensitive: + description: + - Whether this configuration is sensitive, i.e. contains information such as passwords, which might affect how the value of this configuration might be shared by the caller. + type: bool + returned: when supported + validate_state: + description: + - State of the configuration parameter after validation. + - Requires I(full) view. + type: str + returned: when supported + validation_message: + description: + - A message explaining the parameter's validation state. + - Requires I(full) view. + type: str + returned: when supported + validation_warnings_suppressed: + description: + - Whether validation warnings associated with this parameter are suppressed. + - In general, suppressed validation warnings are hidden in the Cloudera Manager UI. + - Configurations that do not produce warnings will not contain this field. + - Requires I(full) view. + type: bool + returned: when supported +""" + +import json + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerMutableModule, + ConfigListUpdates, +) + +from cm_client import MgmtRolesResourceApi +from cm_client.rest import ApiException + + +class ClouderaManagerServiceRoleConfig(ClouderaManagerMutableModule): + def __init__(self, module): + super(ClouderaManagerServiceRoleConfig, self).__init__(module) + + # Set the parameters + self.name = self.get_param("name") + self.type = self.get_param("type") + self.params = self.get_param("parameters") + self.purge = self.get_param("purge") + self.view = self.get_param("view") + + # Initialize the return value + self.changed = False + self.diff = {} + self.config = [] + + # Execute the logic + self.process() + + @ClouderaManagerMutableModule.handle_process + def process(self): + refresh = True + role_api = MgmtRolesResourceApi(self.api_client) + + try: + if self.name is None: + role = next( + iter( + [r for r in role_api.read_roles().items if r.type == self.type] + ), + None, + ) + if role is None: + self.module.fail_json( + msg=f"Unable to find Cloudera Manager Service role type '{self.type}" + ) + else: + self.name = role.name + + # For some reason, the call to read_roles() doesn't retrieve the configuration + existing = role_api.read_role_config(self.name) + except ApiException as ex: + if ex.status == 404: + self.module.fail_json(msg=json.loads(ex.body)["message"]) + else: + raise ex + + updates = ConfigListUpdates(existing, self.params, self.purge) + + if updates.changed: + self.changed = True + + if self.module._diff: + self.diff = updates.diff + + if not self.module.check_mode: + self.config = [ + p.to_dict() + for p in role_api.update_role_config( + self.name, + message=self.message, + body=updates.config, + ).items + ] + + if self.view == "full": + refresh = False + + if refresh: + self.config = [ + p.to_dict() + for p in role_api.read_role_config(self.name, view=self.view).items + ] + + +def main(): + module = ClouderaManagerMutableModule.ansible_module( + argument_spec=dict( + name=dict(aliases=["role_name", "role"]), + type=dict(aliases=["role_type"]), + parameters=dict(type="dict", required=True, aliases=["params"]), + purge=dict(type="bool", default=False), + view=dict( + default="summary", + choices=["summary", "full"], + ), + ), + required_one_of=[ + ["name", "type"], + ], + supports_check_mode=True, + ) + + result = ClouderaManagerServiceRoleConfig(module) + + output = dict( + changed=result.changed, + config=result.config, + ) + + if module._diff: + output.update(diff=result.diff) + + if result.debug: + log = result.log_capture.getvalue() + output.update(debug=log, debug_lines=log.split("\n")) + + module.exit_json(**output) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cm_service_role_config_group.py b/plugins/modules/cm_service_role_config_group.py new file mode 100644 index 00000000..e2d0bb59 --- /dev/null +++ b/plugins/modules/cm_service_role_config_group.py @@ -0,0 +1,254 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2025 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = r""" +module: cm_service_role_config_group +short_description: Manage a Cloudera Manager Service role config group. +description: + - Manage a Cloudera Manager Service role config group. +author: + - Webster Mudge (@wmudge) +options: + type: + description: + - The role type defining the role config group. + type: str + required: yes + aliases: + - role_type + display_name: + description: + - The display name for this role config group. + config: + description: + - The role configuration to set. + - To unset a parameter, use V(None) as the value. + type: dict + aliases: + - params + - parameters + purge: + description: + - Whether to reset configuration parameters to only the declared entries. + type: bool + default: no +extends_documentation_fragment: + - cloudera.cluster.cm_options + - cloudera.cluster.cm_endpoint + - cloudera.cluster.message +attributes: + check_mode: + support: full + diff_mode: + support: full + platform: + platforms: all +requirements: + - cm-client +seealso: + - module: cloudera.cluster.cm_service +""" + +EXAMPLES = r""" +- name: Update the configuration of a Cloudera Manager service role config group + cloudera.cluster.cm_service_role_config_group: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + config: + some_parameter: True + +- name: Update the configuration of a Cloudera Manager service role config group, purging undeclared parameters + cloudera.cluster.cm_service_role_config_group: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + config: + another_parameter: 3456 + purge: yes + +- name: Reset the configuration of a Cloudera Manager service role config group + cloudera.cluster.cm_service_role_config_group: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + purge: yes +""" + +RETURN = r""" +role_config_group: + description: A Cloudera Manager service role config group. + type: dict + returned: always + contains: + base: + description: Whether the role config group is a base group. + type: bool + returned: always + config: + description: Set of configurations for the role config group. + type: dict + returned: optional + display_name: + description: Display name of the role config group. + type: str + returned: always + name: + description: Name (identifier) of the role config group. + type: str + returned: always + role_names: + description: List of role names (identifiers) associated with this role config group. + type: list + elements: str + returned: optional + role_type: + description: The type of the roles in this role config group. + type: str + returned: always + service_name: + description: Service name associated with this role config group. + type: str + returned: always +""" + +from cm_client import ( + ApiRoleConfigGroup, + MgmtRoleConfigGroupsResourceApi, + MgmtServiceResourceApi, +) +from cm_client.rest import ApiException + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerMutableModule, + ConfigListUpdates, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + parse_role_config_group_result, + get_mgmt_base_role_config_group, +) + + +class ClouderaManagerServiceRoleConfigGroup(ClouderaManagerMutableModule): + def __init__(self, module): + super(ClouderaManagerServiceRoleConfigGroup, self).__init__(module) + + # Set the parameters + self.type = self.get_param("type") + self.config = self.get_param("config") + self.purge = self.get_param("purge") + + # Initialize the return value + self.changed = False + self.diff = dict(before=dict(), after=dict()) + self.output = {} + + # Execute the logic + self.process() + + @ClouderaManagerMutableModule.handle_process + def process(self): + # Confirm that CMS is present + try: + MgmtServiceResourceApi(self.api_client).read_service() + except ApiException as ex: + if ex.status == 404: + self.module.fail_json(msg="Cloudera Management service does not exist") + else: + raise ex + + rcg_api = MgmtRoleConfigGroupsResourceApi(self.api_client) + + # Retrieve the base RCG (the _only_ RCG for CMS roles) + try: + current = get_mgmt_base_role_config_group(self.api_client, self.type) + if current is None: + self.module.fail_json( + msg=f"Unable to find Cloudera Manager service base role config group for role type '{self.type}'" + ) + except ApiException as ex: + if ex.status != 404: + raise ex + + # Reconcile configurations + if self.config or self.purge: + if self.config is None: + self.config = dict() + + updates = ConfigListUpdates(current.config, self.config, self.purge) + + if updates.changed: + self.changed = True + + if self.module._diff: + self.diff["before"].update(config=updates.diff["before"]) + self.diff["after"].update(config=updates.diff["after"]) + + # Execute changes if needed + if not self.module.check_mode: + current = rcg_api.update_role_config_group( + current.name, + message=self.message, + body=ApiRoleConfigGroup( + name=current.name, + role_type=current.role_type, + config=updates.config, + display_name=current.display_name, + ), + ) + + # Parse the results + self.output = parse_role_config_group_result(current) + + # Report on any role associations + self.output.update( + role_names=[r.name for r in rcg_api.read_roles(current.name).items] + ) + + +def main(): + module = ClouderaManagerMutableModule.ansible_module( + argument_spec=dict( + type=dict(required=True, aliases=["role_type"]), + config=dict(required=True, type="dict", aliases=["params", "parameters"]), + purge=dict(type="bool", default=False), + ), + supports_check_mode=True, + ) + + result = ClouderaManagerServiceRoleConfigGroup(module) + + output = dict( + changed=result.changed, + role_config_group=result.output, + ) + + if module._diff: + output.update(diff=result.diff) + + if result.debug: + log = result.log_capture.getvalue() + output.update(debug=log, debug_lines=log.split("\n")) + + module.exit_json(**output) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cm_service_role_config_group_config.py b/plugins/modules/cm_service_role_config_group_config.py new file mode 100644 index 00000000..c6beca9c --- /dev/null +++ b/plugins/modules/cm_service_role_config_group_config.py @@ -0,0 +1,307 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = r""" +module: cm_service_role_config_group_config +short_description: Manage the configuration of a Cloudera Manager Service role config group. +description: + - Manage the configuration details of a role config group of the Cloudera Manager Service. +author: + - "Webster Mudge (@wmudge)" +requirements: + - cm-client +options: + name: + description: + - A role config group name to manage. + - One of C(name) or C(type) is required. + type: str + aliases: + - role_config_group + type: + description: + - The role type of the role config group to manage. + - Retrieves the default role config group for the given role type. + - One of C(name) or C(type) is required. + type: str + aliases: + - role_type + parameters: + description: + - The role configuration to set. + - To unset a parameter, use C(None) as the value. + type: dict + required: yes + aliases: + - params + view: + description: + - The view to materialize. + type: str + default: summary + choices: + - summary + - full +extends_documentation_fragment: + - cloudera.cluster.cm_options + - cloudera.cluster.cm_endpoint + - cloudera.cluster.purge + - cloudera.cluster.message +attributes: + check_mode: + support: full + diff_mode: + support: full + platform: + platforms: all +""" + +EXAMPLES = r""" +- name: Update (append) several role config group parameters for a Cloudera Manager Service role type + cloudera.cluster.cm_service_role_config_group_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + parameters: + a_configuration: "schema://host:port" + another_configuration: 234 + +- name: Reset a role config group parameter for a Cloudera Manager Service role type + cloudera.cluster.cm_service_role_config_group_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + parameters: + some_conf: None + +- name: Update (purge) role config group parameters (by name) for a Cloudera Manager Service role + cloudera.cluster.service_role_config_group_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + name: "a-non-default-rcg" + parameters: + config_one: ValueOne + config_two: 4567 + purge: yes + +- name: Reset all role config group parameters for a Cloudera Manager Service role type + cloudera.cluster.service_role_config_group_config: + host: example.cloudera.com + username: "jane_smith" + password: "S&peR4Ec*re" + type: HOSTMONITOR + parameters: {} + purge: yes +""" + +RETURN = r""" +config: + description: + - List of configurations for a Cloudera Manager Service role config group. + type: list + elements: dict + returned: always + contains: + name: + description: + - The canonical name that identifies this configuration parameter. + type: str + returned: when supported + value: + description: + - The user-defined value. + - When absent, the default value (if any) will be used. + - Can also be absent, when enumerating allowed configs. + type: str + returned: when supported + required: + description: + - Whether this configuration is required for the object. + - If any required configuration is not set, operations on the object may not work. + - Requires I(full) view. + type: bool + returned: when supported + default: + description: + - The default value. + - Requires I(full) view. + type: str + returned: when supported + display_name: + description: + - A user-friendly name of the parameters, as would have been shown in the web UI. + - Requires I(full) view. + type: str + returned: when supported + description: + description: + - A textual description of the parameter. + - Requires I(full) view. + type: str + returned: when supported + related_name: + description: + - If applicable, contains the related configuration variable used by the source project. + - Requires I(full) view. + type: str + returned: when supported + sensitive: + description: + - Whether this configuration is sensitive, i.e. contains information such as passwords, which might affect how the value of this configuration might be shared by the caller. + type: bool + returned: when supported + validate_state: + description: + - State of the configuration parameter after validation. + - Requires I(full) view. + type: str + returned: when supported + validation_message: + description: + - A message explaining the parameter's validation state. + - Requires I(full) view. + type: str + returned: when supported + validation_warnings_suppressed: + description: + - Whether validation warnings associated with this parameter are suppressed. + - In general, suppressed validation warnings are hidden in the Cloudera Manager UI. + - Configurations that do not produce warnings will not contain this field. + - Requires I(full) view. + type: bool + returned: when supported +""" + +import json + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerMutableModule, + ConfigListUpdates, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + get_mgmt_base_role_config_group, +) + +from cm_client import MgmtRoleConfigGroupsResourceApi +from cm_client.rest import ApiException + + +class ClouderaManagerServiceRoleConfigGroupConfig(ClouderaManagerMutableModule): + def __init__(self, module): + super(ClouderaManagerServiceRoleConfigGroupConfig, self).__init__(module) + + # Set the parameters + self.name = self.get_param("name") + self.type = self.get_param("type") + self.params = self.get_param("parameters") + self.purge = self.get_param("purge") + self.view = self.get_param("view") + + # Initialize the return values + self.changed = False + self.diff = {} + self.config = [] + + # Execute the logic + self.process() + + @ClouderaManagerMutableModule.handle_process + def process(self): + refresh = True + rcg_api = MgmtRoleConfigGroupsResourceApi(self.api_client) + + try: + if self.name is None: + rcg = get_mgmt_base_role_config_group(self.api_client, self.type) + if rcg is None: + self.module.fail_json( + msg=f"Unable to find Cloudera Manager Service base role config group for role type '{self.type}'" + ) + self.name = rcg.name + + existing = rcg_api.read_config(self.name) + except ApiException as ae: + if ae.status == 404: + self.module.fail_json(msg=json.loads(ae.body)["message"]) + else: + raise ae + + updates = ConfigListUpdates(existing, self.params, self.purge) + + if updates.changed: + self.changed = True + + if self.module._diff: + self.diff = updates.diff + + if not self.module.check_mode: + self.config = [ + p.to_dict() + for p in rcg_api.update_config( + self.name, + message=self.message, + body=updates.config, + ).items + ] + + if self.view == "full": + refresh = False + + if refresh: + self.config = [ + p.to_dict() + for p in rcg_api.read_config(self.name, view=self.view).items + ] + + +def main(): + module = ClouderaManagerMutableModule.ansible_module( + argument_spec=dict( + name=dict(aliases=["role_config_group"]), + type=dict(aliases=["role_type"]), + parameters=dict(type="dict", required=True, aliases=["params"]), + purge=dict(type="bool", default=False), + view=dict( + default="summary", + choices=["summary", "full"], + ), + ), + required_one_of=[ + ["name", "type"], + ], + supports_check_mode=True, + ) + + result = ClouderaManagerServiceRoleConfigGroupConfig(module) + + output = dict( + changed=result.changed, + config=result.config, + ) + + if result.debug: + log = result.log_capture.getvalue() + output.update(debug=log, debug_lines=log.split("\n")) + + module.exit_json(**output) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cm_service_role_config_group_info.py b/plugins/modules/cm_service_role_config_group_info.py new file mode 100644 index 00000000..d4cffe5a --- /dev/null +++ b/plugins/modules/cm_service_role_config_group_info.py @@ -0,0 +1,195 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2025 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = r""" +module: cm_service_role_config_group_info +short_description: Retrieve information about Cloudera Management service role config groups. +description: + - Gather information about Cloudera Manager service role config groups. +author: + - Webster Mudge (@wmudge) +options: + type: + description: + - The role type defining the role config group. + type: str + aliases: + - role_type +extends_documentation_fragment: + - cloudera.cluster.cm_options + - cloudera.cluster.cm_endpoint +attributes: + check_mode: + support: full +requirements: + - cm-client +seealso: + - module: cloudera.cluster.cm_service_role_config_group +""" + +EXAMPLES = r""" +- name: Gather details of an individual Cloudera Manager service role config group. + cloudera.cluster.cm_service_role_config_group_info: + host: "example.cloudera.host" + username: "will_jordan" + password: "S&peR4Ec*re" + type: HOSTMONITOR + register: cm_output + +- name: Gather details of all Cloudera Manager service role config groups. + cloudera.cluster.cm_service_role_config_group_info: + host: "example.cloudera.host" + username: "will_jordan" + password: "S&peR4Ec*re" + register: cm_output +""" + +RETURN = r""" +role_config_groups: + description: List of Cloudera Manager service role config groups. + type: list + elements: dict + returned: always + contains: + base: + description: Whether the role config group is a base group. + type: bool + returned: always + config: + description: Set of configurations for the role config group. + type: dict + returned: optional + display_name: + description: Display name of the role config group. + type: str + returned: always + name: + description: Name (identifier) of the role config group. + type: str + returned: always + role_names: + description: List of role names (identifiers) associated with this role config group. + type: list + elements: str + returned: optional + role_type: + description: The type of the roles in this role config group. + type: str + returned: always + service_name: + description: Service name associated with this role config group. + type: str + returned: always +""" + + +from cm_client import ( + ApiRoleConfigGroup, + MgmtRoleConfigGroupsResourceApi, + MgmtServiceResourceApi, +) +from cm_client.rest import ApiException + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerModule, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( + parse_role_config_group_result, + get_mgmt_base_role_config_group, +) + + +class ClouderaServiceRoleConfigGroupInfo(ClouderaManagerModule): + def __init__(self, module): + super(ClouderaServiceRoleConfigGroupInfo, self).__init__(module) + + # Set the parameters + self.type = self.get_param("type") + + # Initialize the return values + self.output = list() + + # Execute the logic + self.process() + + @ClouderaManagerModule.handle_process + def process(self): + # Confirm that CMS is present + try: + MgmtServiceResourceApi(self.api_client).read_service() + except ApiException as ex: + if ex.status == 404: + self.module.fail_json(msg="Cloudera Management service does not exist") + else: + raise ex + + rcg_api = MgmtRoleConfigGroupsResourceApi(self.api_client) + + # Retrieve the base RCG (the _only_ RCG for CMS roles) + if self.type: + try: + current = get_mgmt_base_role_config_group(self.api_client, self.type) + except ApiException as ex: + if ex.status != 404: + raise ex + + if current is not None: + result = parse_role_config_group_result(current) + result.update( + role_names=[r.name for r in rcg_api.read_roles(current.name).items] + ) + self.output.append(result) + else: + + def process_result(rcg: ApiRoleConfigGroup) -> dict: + result = parse_role_config_group_result(rcg) + result.update( + role_names=[r.name for r in rcg_api.read_roles(rcg.name).items] + ) + return result + + self.output = [ + process_result(r) + for r in rcg_api.read_role_config_groups().items + if r.base + ] + + +def main(): + module = ClouderaManagerModule.ansible_module( + argument_spec=dict( + type=dict(aliases=["role_type"]), + ), + supports_check_mode=False, + ) + + result = ClouderaServiceRoleConfigGroupInfo(module) + + output = dict( + changed=False, + role_config_groups=result.output, + ) + + if result.debug: + log = result.log_capture.getvalue() + output.update(debug=log, debug_lines=log.split("\n")) + + module.exit_json(**output) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/cm_service_role_info.py b/plugins/modules/cm_service_role_info.py new file mode 100644 index 00000000..a8aad2f5 --- /dev/null +++ b/plugins/modules/cm_service_role_info.py @@ -0,0 +1,290 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2025 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DOCUMENTATION = r""" +module: cm_service_role_info +short_description: Retrieve information about Cloudera Management service roles. +description: + - Gather information about one or all Cloudera Manager service roles. +author: + - Webster Mudge (@wmudge) +options: + type: + description: + - The role type of the role. + type: str + aliases: + - role_type +extends_documentation_fragment: + - cloudera.cluster.cm_options + - cloudera.cluster.cm_endpoint +attributes: + check_mode: + support: full +requirements: + - cm-client +seealso: + - module: cloudera.cluster.cm_service_role +""" + +EXAMPLES = r""" +- name: Gather details of an individual Cloudera Manager service role. + cloudera.cluster.cm_service_role_info: + host: "example.cloudera.host" + username: "john_doe" + password: "S&peR4Ec*re" + type: HOSTMONITOR + register: cm_output + +- name: Gather details of all Cloudera Manager service roles. + cloudera.cluster.cm_service_role_info: + host: "example.cloudera.host" + username: "john_doe" + password: "S&peR4Ec*re" + register: cm_output +""" + +RETURN = r""" +roles: + description: List of Cloudera Manager service roles. + type: list + elements: dict + returned: always + contains: + commission_state: + description: Commission state of the Cloudera Manager service role. + type: str + returned: always + sample: + - COMMISSIONED + - DECOMMISSIONING + - DECOMMISSIONED + - UNKNOWN + - OFFLINING + - OFFLINED + config: + description: Role override configuration for the Cloudera Manager service. + type: dict + returned: optional + config_staleness_status: + description: Status of configuration staleness for the Cloudera Manager service role. + type: str + returned: always + sample: + - FRESH + - STALE_REFRESHABLE + - STALE + ha_status: + description: High-availability status for the Cloudera Manager service. + type: str + returned: optional + sample: + - ACTIVE + - STANDBY + - UNKNOWN + health_checks: + description: List of all available health checks for Cloudera Manager service role. + type: list + elements: dict + returned: optional + contains: + explanation: + description: The explanation of this health check. + type: str + returned: optional + name: + description: Unique name of this health check. + type: str + returned: always + summary: + description: The high-level health status of the health check. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + suppressed: + description: + - Whether this health check is suppressed. + - A suppressed health check is not considered when computing the role's overall health. + type: bool + returned: optional + health_summary: + description: The high-level health status of the Cloudera Manager service role. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + host_id: + description: The unique ID of the cluster host. + type: str + returned: always + maintenance_mode: + description: Whether the Cloudera Manager service role is in maintenance mode. + type: bool + returned: always + maintenance_owners: + description: List of objects that trigger the Cloudera Manager service role to be in maintenance mode. + type: list + elements: str + returned: optional + sample: + - CLUSTER + - SERVICE + - ROLE + - HOST + - CONTROL_PLANE + name: + description: + - The Cloudera Manager service role name. + - Note, this is an auto-generated name and cannot be changed. + type: str + returned: always + role_config_group_name: + description: The name of the Cloudera Manager Service role config group, which uniquely identifies it in a Cloudera Manager installation. + type: str + returned: always + role_state: + description: State of the Cloudera Manager service role. + type: str + returned: always + sample: + - HISTORY_NOT_AVAILABLE + - UNKNOWN + - STARTING + - STARTED + - STOPPING + - STOPPED + - NA + service_name: + description: The name of the Cloudera Manager service, which uniquely identifies it in a deployment. + type: str + returned: always + tags: + description: Set of tags for the Cloudera Manager service role. + type: dict + returned: optional + type: + description: The Cloudera Manager service role type. + type: str + returned: always + sample: + - HOSTMONITOR + - ALERTPUBLISHER + - SERVICEMONITOR + - REPORTSMANAGER + - EVENTSERVER + zoo_keeper_server_mode: + description: + - The Zookeeper server mode for this Cloudera Manager service role. + - Note that for non-Zookeeper Server roles, this will be V(null). + type: str + returned: optional +""" + + +from cm_client import ( + MgmtServiceResourceApi, +) +from cm_client.rest import ApiException + +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + ClouderaManagerModule, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + parse_role_result, + read_cm_role, + read_cm_roles, +) + + +class ClouderaServiceRoleInfo(ClouderaManagerModule): + def __init__(self, module): + super(ClouderaServiceRoleInfo, self).__init__(module) + + # Set the parameters + self.type = self.get_param("type") + + # Initialize the return values + self.output = list() + + # Execute the logic + self.process() + + @ClouderaManagerModule.handle_process + def process(self): + # Confirm that CMS is present + try: + MgmtServiceResourceApi(self.api_client).read_service() + except ApiException as ex: + if ex.status == 404: + self.module.fail_json(msg="Cloudera Management service does not exist") + else: + raise ex + + # Retrieve the specified role by type + if self.type: + result = None + + try: + result = read_cm_role(api_client=self.api_client, role_type=self.type) + except ApiException as ex: + if ex.status != 404: + raise ex + + if result is not None: + self.output.append(parse_role_result(result)) + else: + self.output = [ + parse_role_result(r) + for r in read_cm_roles(api_client=self.api_client).items + ] + + +def main(): + module = ClouderaManagerModule.ansible_module( + argument_spec=dict( + type=dict(aliases=["role_type"]), + ), + supports_check_mode=False, + ) + + result = ClouderaServiceRoleInfo(module) + + output = dict( + changed=False, + roles=result.output, + ) + + if result.debug: + log = result.log_capture.getvalue() + output.update(debug=log, debug_lines=log.split("\n")) + + module.exit_json(**output) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/service_role.py b/plugins/modules/service_role.py index f9bed4a1..d92ca533 100644 --- a/plugins/modules/service_role.py +++ b/plugins/modules/service_role.py @@ -16,9 +16,11 @@ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( ClouderaManagerMutableModule, - parse_role_result, resolve_tag_updates, ) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + parse_role_result, +) from cm_client import ( ApiEntityTag, @@ -34,6 +36,7 @@ ) from cm_client.rest import ApiException + ANSIBLE_METADATA = { "metadata_version": "1.1", "status": ["preview"], diff --git a/plugins/modules/service_role_config_group.py b/plugins/modules/service_role_config_group.py index 5d1f4449..b54ffeef 100644 --- a/plugins/modules/service_role_config_group.py +++ b/plugins/modules/service_role_config_group.py @@ -16,6 +16,9 @@ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( ClouderaManagerMutableModule, +) + +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( parse_role_config_group_result, ) diff --git a/plugins/modules/service_role_config_group_config_info.py b/plugins/modules/service_role_config_group_config_info.py index fc127dc8..ba25a6cb 100644 --- a/plugins/modules/service_role_config_group_config_info.py +++ b/plugins/modules/service_role_config_group_config_info.py @@ -16,7 +16,6 @@ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( ClouderaManagerModule, - parse_role_config_group_result, ) from cm_client import ( @@ -26,6 +25,7 @@ ) from cm_client.rest import ApiException + ANSIBLE_METADATA = { "metadata_version": "1.1", "status": ["preview"], diff --git a/plugins/modules/service_role_config_group_info.py b/plugins/modules/service_role_config_group_info.py index 46e95af4..cc71314b 100644 --- a/plugins/modules/service_role_config_group_info.py +++ b/plugins/modules/service_role_config_group_info.py @@ -16,6 +16,9 @@ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( ClouderaManagerModule, +) + +from ansible_collections.cloudera.cluster.plugins.module_utils.role_config_group_utils import ( parse_role_config_group_result, ) @@ -26,6 +29,7 @@ ) from cm_client.rest import ApiException + ANSIBLE_METADATA = { "metadata_version": "1.1", "status": ["preview"], diff --git a/plugins/modules/service_role_info.py b/plugins/modules/service_role_info.py index c0e1f63f..9581a8bb 100644 --- a/plugins/modules/service_role_info.py +++ b/plugins/modules/service_role_info.py @@ -18,12 +18,16 @@ from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( ClouderaManagerModule, +) + +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( parse_role_result, ) from cm_client import ClustersResourceApi, RolesResourceApi, ServicesResourceApi from cm_client.rest import ApiException + ANSIBLE_METADATA = { "metadata_version": "1.1", "status": ["preview"], diff --git a/pyproject.toml b/pyproject.toml index a36945c5..f06438c1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,12 +6,13 @@ readme = "README.md" requires-python = ">=3.8" license = "Apache-2.0" keywords = [] -authors = [ - { name = "Webster Mudge", email = "wmudge@cloudera.com" }, -] +authors = [{ name = "Webster Mudge", email = "wmudge@cloudera.com" }] classifiers = [] dependencies = [] +[tool.hatch.build.targets.wheel] +bypass-selection = true + [tool.hatch.version] path = "galaxy.yml" pattern = "version:\\s+(?P[\\d\\.]+)" @@ -29,7 +30,7 @@ dependencies = [ "molecule-plugins", "molecule-plugins[ec2]", "tox-ansible", - "ansible-core<2.17", # For RHEL 8 support + "ansible-core<2.17", # For RHEL 8 support "jmespath", "cm-client", ] @@ -37,23 +38,30 @@ dependencies = [ [tool.hatch.envs.lint] python = "3.12" skip-install = true -extra-dependencies = [ - "ansible-lint", -] +extra-dependencies = ["ansible-lint"] [tool.hatch.envs.lint.scripts] run = "pre-commit run -a" [tool.pytest.ini_options] -testpaths = [ - "tests", -] +# addopts = [ +# "--lf", +# "--nf", +# ] +testpaths = ["tests"] filterwarnings = [ "ignore:AnsibleCollectionFinder has already been configured", "ignore:'crypt' is deprecated and slated for removal in Python 3.13:DeprecationWarning", + "ignore:URLs without a scheme:DeprecationWarning", + "ignore:HTTPResponse.getheaders():DeprecationWarning", ] markers = [ - "prepare: Prepare Cloudera Manager and resources for tests", + "service_config: Prepare service-wide configurations for tests", + "role_config: Prepare role override configurations for tests", + "role_config_group_config: Prepare role config group configurations for tests", + "role_config_group: Prepare a role config group for tests.", + "role: Prepare a role for tests.", + "role_state: Prepare a role state for tests.", ] [build-system] diff --git a/requirements.txt b/requirements.txt index 78e23e5b..49d92201 100644 --- a/requirements.txt +++ b/requirements.txt @@ -16,4 +16,4 @@ jmespath # For cm_service lookup -cm_client +cm-client diff --git a/tests/return-values-cm_service.yml b/tests/return-values-cm_service.yml new file mode 100644 index 00000000..3c6469fe --- /dev/null +++ b/tests/return-values-cm_service.yml @@ -0,0 +1,303 @@ +# Copyright 2025 Cloudera, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +service: + description: The Cloudera Manager service. + type: dict + contains: + client_config_staleness_status: + description: Status of client configuration for the Cloudera Manager service. + type: str + returned: optional + cluster_name: + description: The associated cluster name. + type: str + returned: optional + config: + description: Service-wide configuration for the Cloudera Manager service. + type: dict + returned: optional + config_staleness_status: + description: Status of configuration staleness for the Cloudera Manager service. + type: str + returned: optional + sample: + - FRESH + - STALE_REFRESHABLE + - STALE + display_name: + description: Display name of the Cloudera Manager service. + type: str + returned: always + health_checks: + description: Lists all available health checks for the Cloudera Manager service. + type: list + elements: dict + returned: optional + contains: + explanation: + description: A descriptor for the health check. + type: str + returned: optional + name: + description: Unique name fore the health check. + type: str + returned: always + summary: + description: The summary status of the health check. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + suppressed: + description: + - Whether the health check is suppressed. + - A suppressed health check is not considered when computing the overall health. + type: bool + returned: always + health_summary: + description: Health of the Cloudera Manager service. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + maintenance_mode: + description: Whether maintance mode is enabled for the Cloudera Manager service. + type: bool + returned: always + maintenance_owners: + description: List of objects that trigger the Cloudera Manager service to be in maintenance mode. + type: list + elements: str + returned: optional + sample: + - CLUSTER + - SERVICE + - ROLE + - HOST + - CONTROL_PLANE + name: + description: Name (identifier) of the Cloudera Manager service. + type: str + returned: always + role_config_groups: + description: List of role configuration groups for Cloudera Manager service. + type: list + elements: dict + returned: optional + contains: + base: + description: Whether the role config group is a base (default) group. + type: bool + returned: always + config: + description: Configuration for the role config group. + type: dict + returned: optional + display_name: + description: Display name for the role config group. + type: str + returned: always + name: + description: Name (identifier) of the role config group. + type: str + returned: always + role_type: + description: The type of roles in this group. + type: str + returned: always + service_name: + description: Name (identifier) of the associated service of the role config group. + type: str + returned: always + roles: + description: List of role instances for Cloudera Manager service. + type: list + elements: dict + returned: optional + contains: + commission_state: + description: Commission state of the Cloudera Manager service role. + type: str + returned: always + sample: + - COMMISSIONED + - DECOMMISSIONING + - DECOMMISSIONED + - UNKNOWN + - OFFLINING + - OFFLINED + config: + description: Role override configuration for the Cloudera Manager service. + type: dict + returned: optional + config_staleness_status: + description: Status of configuration staleness for the Cloudera Manager service role. + type: str + returned: always + sample: + - FRESH + - STALE_REFRESHABLE + - STALE + ha_status: + description: High-availability status for the Cloudera Manager service. + type: str + returned: optional + sample: + - ACTIVE + - STANDBY + - UNKNOWN + health_checks: + description: List of all available health checks for Cloudera Manager service role. + type: list + elements: dict + returned: optional + contains: + explanation: + description: The explanation of this health check. + type: str + returned: optional + name: + description: Unique name of this health check. + type: str + returned: always + summary: + description: The high-level health status of the health check. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + suppressed: + description: + - Whether this health check is suppressed. + - A suppressed health check is not considered when computing the role's overall health. + type: bool + returned: optional + health_summary: + description: The high-level health status of the Cloudera Manager service role. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + host_id: + description: The unique ID of the cluster host. + type: str + returned: always + maintenance_mode: + description: Whether the Cloudera Manager service role is in maintenance mode. + type: bool + returned: always + maintenance_owners: + description: List of objects that trigger the Cloudera Manager service role to be in maintenance mode. + type: list + elements: str + returned: optional + sample: + - CLUSTER + - SERVICE + - ROLE + - HOST + - CONTROL_PLANE + name: + description: + - The Cloudera Manager service role name. + - Note, this is an auto-generated name and cannot be changed. + type: str + returned: always + role_config_group_name: + description: The name of the Cloudera Manager Service role config group, which uniquely identifies it in a Cloudera Manager installation. + type: str + returned: always + role_state: + description: State of the Cloudera Manager service role. + type: str + returned: always + sample: + - HISTORY_NOT_AVAILABLE + - UNKNOWN + - STARTING + - STARTED + - STOPPING + - STOPPED + - NA + service_name: + description: The name of the Cloudera Manager service, which uniquely identifies it in a deployment. + type: str + returned: always + tags: + description: Set of tags for the Cloudera Manager service role. + type: dict + returned: optional + type: + description: The Cloudera Manager service role type. + type: str + returned: always + sample: + - HOSTMONITOR + - ALERTPUBLISHER + - SERVICEMONITOR + - REPORTSMANAGER + - EVENTSERVER + zoo_keeper_server_mode: + description: + - The Zookeeper server mode for this Cloudera Manager service role. + - Note that for non-Zookeeper Server roles, this will be V(null). + type: str + returned: optional + service_state: + description: Run state of the Cloudera Manager service. + type: str + returned: always + sample: + - HISTORY_NOT_AVAILABLE + - UNKNOWN + - STARTING + - STARTED + - STOPPING + - STOPPED + - NA + service_version: + description: Version of Cloudera Manager service. + type: str + returned: always + tags: + description: List of tags for Cloudera Manager service. + type: list + returned: optional + type: + description: Type of the Cloudera Manager service, i.e. MGMT. + type: str + returned: always + sample: + - MGMT diff --git a/tests/return-values-cm_service_role.yml b/tests/return-values-cm_service_role.yml new file mode 100644 index 00000000..f91235a7 --- /dev/null +++ b/tests/return-values-cm_service_role.yml @@ -0,0 +1,157 @@ +# Copyright 2025 Cloudera, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +role: + description: Details about the Cloudera Manager service role. + type: dict + returned: always + contains: + commission_state: + description: Commission state of the Cloudera Manager service role. + type: str + returned: always + sample: + - COMMISSIONED + - DECOMMISSIONING + - DECOMMISSIONED + - UNKNOWN + - OFFLINING + - OFFLINED + config: + description: Role override configuration for the Cloudera Manager service. + type: dict + returned: optional + config_staleness_status: + description: Status of configuration staleness for the Cloudera Manager service role. + type: str + returned: always + sample: + - FRESH + - STALE_REFRESHABLE + - STALE + ha_status: + description: High-availability status for the Cloudera Manager service. + type: str + returned: optional + sample: + - ACTIVE + - STANDBY + - UNKNOWN + health_checks: + description: List of all available health checks for Cloudera Manager service role. + type: list + elements: dict + returned: optional + contains: + explanation: + description: The explanation of this health check. + type: str + returned: optional + name: + description: Unique name of this health check. + type: str + returned: always + summary: + description: The high-level health status of the health check. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + suppressed: + description: + - Whether this health check is suppressed. + - A suppressed health check is not considered when computing the role's overall health. + type: bool + returned: optional + health_summary: + description: The high-level health status of the Cloudera Manager service role. + type: str + returned: always + sample: + - DISABLED + - HISTORY_NOT_AVAILABLE + - NOT_AVAILABLE + - GOOD + - CONCERNING + - BAD + host_id: + description: The unique ID of the cluster host. + type: str + returned: always + maintenance_mode: + description: Whether the Cloudera Manager service role is in maintenance mode. + type: bool + returned: always + maintenance_owners: + description: List of objects that trigger the Cloudera Manager service role to be in maintenance mode. + type: list + elements: str + returned: optional + sample: + - CLUSTER + - SERVICE + - ROLE + - HOST + - CONTROL_PLANE + name: + description: + - The Cloudera Manager service role name. + - Note, this is an auto-generated name and cannot be changed. + type: str + returned: always + role_config_group_name: + description: The name of the Cloudera Manager Service role config group, which uniquely identifies it in a Cloudera Manager installation. + type: str + returned: always + role_state: + description: State of the Cloudera Manager service role. + type: str + returned: always + sample: + - HISTORY_NOT_AVAILABLE + - UNKNOWN + - STARTING + - STARTED + - STOPPING + - STOPPED + - NA + service_name: + description: The name of the Cloudera Manager service, which uniquely identifies it in a deployment. + type: str + returned: always + tags: + description: Set of tags for the Cloudera Manager service role. + type: dict + returned: optional + type: + description: The Cloudera Manager service role type. + type: str + returned: always + sample: + - HOSTMONITOR + - ALERTPUBLISHER + - SERVICEMONITOR + - REPORTSMANAGER + - EVENTSERVER + zoo_keeper_server_mode: + description: + - The Zookeeper server mode for this Cloudera Manager service role. + - Note that for non-Zookeeper Server roles, this will be V(null). + type: str + returned: optional diff --git a/tests/return-values-cm_service_role_config_group.yml b/tests/return-values-cm_service_role_config_group.yml new file mode 100644 index 00000000..9b09b813 --- /dev/null +++ b/tests/return-values-cm_service_role_config_group.yml @@ -0,0 +1,48 @@ +# Copyright 2025 Cloudera, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +role_config_group: + description: A Cloudera Manager service role config group. + type: dict + returned: always + contains: + base: + description: Whether the role config group is a base group. + type: bool + returned: always + config: + description: Set of configurations for the role config group. + type: dict + returned: optional + display_name: + description: Display name of the role config group. + type: str + returned: always + name: + description: Name (identifier) of the role config group. + type: str + returned: always + role_names: + description: List of role names (identifiers) associated with this role config group. + type: list + elements: str + returned: optional + role_type: + description: The type of the roles in this role config group. + type: str + returned: always + service_name: + description: Service name associated with this role config group. + type: str + returned: always diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index f593ed44..44b4ba0a 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2024 Cloudera, Inc. +# Copyright 2025 Cloudera, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,15 +22,35 @@ ApiCluster, ApiCommand, ApiConfig, + ApiConfigList, + ApiHostRef, + ApiRole, + ApiRoleConfigGroup, + ApiRoleList, + ApiRoleNameList, + ApiRoleState, ApiService, ApiServiceConfig, ApiServiceList, ClustersResourceApi, CommandsResourceApi, + MgmtRolesResourceApi, + MgmtRoleCommandsResourceApi, + MgmtRoleConfigGroupsResourceApi, ServicesResourceApi, ) from cm_client.rest import ApiException +from ansible_collections.cloudera.cluster.plugins.module_utils.cm_utils import ( + resolve_parameter_updates, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.host_utils import ( + get_host_ref, +) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + get_mgmt_roles, +) + class AnsibleExitJson(Exception): """Exception class to be raised by module.exit_json and caught by the test case""" @@ -55,7 +75,7 @@ def __init__(self, kwargs): def wait_for_command( api_client: ApiClient, command: ApiCommand, polling: int = 120, delay: int = 5 ): - """Polls Cloudera Manager to wait for a Command to complete.""" + """Polls Cloudera Manager to wait for given Command to succeed or fail.""" poll_count = 0 while command.active: @@ -84,7 +104,7 @@ def provision_service( Exception: _description_ Yields: - ApiService: _description_ + Generator[ApiService]: _description_ """ api = ServicesResourceApi(api_client) @@ -126,7 +146,7 @@ def service_wide_config( Exception: _description_ Yields: - ApiService: _description_ + Generator[ApiService]: _description_ """ service_api = ServicesResourceApi(api_client) @@ -179,3 +199,235 @@ def service_wide_config( message=f"{message}::reset", body=ApiServiceConfig(items=reconciled), ) + + +def provision_cm_role( + api_client: ApiClient, role_name: str, role_type: str, host_id: str +) -> Generator[ApiRole]: + """Yield a newly-created Cloudera Manager Service role, deleting the + role after use. Use with 'yield from' within a pytest fixture. + + Args: + api_client (ApiClient): _description_ + role_name (str): _description_ + role_type (str): _description_ + host_id (str): _description_ + + Yields: + Generator[ApiRole]: _description_ + """ + api = MgmtRolesResourceApi(api_client) + + role = ApiRole( + name=role_name, + type=role_type, + host_ref=ApiHostRef(host_id=host_id), + ) + + provisioned_role = next( + iter(api.create_roles(body=ApiRoleList(items=[role])).items), None + ) + + yield provisioned_role + + try: + api.delete_role(role_name=provisioned_role.name) + except ApiException as ae: + if ae.status != 404: + raise ae + + +def set_cm_role( + api_client: ApiClient, cluster: ApiCluster, role: ApiRole +) -> Generator[ApiRole]: + """Set a net-new Cloudera Manager Service role. Yields the new role, + resetting to any existing role upon completion. Use with 'yield from' + within a pytest fixture. + """ + role_api = MgmtRolesResourceApi(api_client) + role_cmd_api = MgmtRoleCommandsResourceApi(api_client) + + # Check for existing management role + pre_role = next( + iter([r for r in get_mgmt_roles(api_client, role.type).items]), None + ) + + if pre_role is not None: + # Get the current state + pre_role.config = role_api.read_role_config(role_name=pre_role.name) + + # Remove the prior role + role_api.delete_role(role_name=pre_role.name) + + if not role.host_ref: + cluster_api = ClustersResourceApi(api_client) + + # Get first host of the cluster + hosts = cluster_api.list_hosts(cluster_name=cluster.name) + + if not hosts.items: + raise Exception( + "No available hosts to assign the Cloudera Manager Service role." + ) + + role.host_ref = get_host_ref(api_client, host_id=hosts.items[0].host_id) + + # Create the role under test + current_role = next( + iter(role_api.create_roles(body=ApiRoleList(items=[role])).items), None + ) + current_role.config = role_api.read_role_config(role_name=current_role.name) + + if role.maintenance_mode: + role_api.enter_maintenance_mode(role_name=current_role.name) + + if role.role_state in [ApiRoleState.STARTING, ApiRoleState.STARTED]: + start_cmds = role_cmd_api.start_command( + body=ApiRoleNameList(items=[current_role.name]) + ) + if start_cmds.errors: + error_msg = "\n".join(start_cmds.errors) + raise Exception(error_msg) + + for cmd in start_cmds.items: + # Serial monitoring + wait_for_command(api_client=api_client, command=cmd) + + # Yield the role under test + yield current_role + + # Remove the role under test + current_role = role_api.delete_role(role_name=current_role.name) + + # Reinstate the previous role + if pre_role is not None: + role_api.create_roles(body=ApiRoleList(items=[pre_role])) + if pre_role.maintenance_mode: + role_api.enter_maintenance_mode(pre_role.name) + if pre_role.role_state in [ApiRoleState.STARTED, ApiRoleState.STARTING]: + restart_cmds = role_cmd_api.restart_command( + body=ApiRoleNameList(items=[pre_role.name]) + ) + if restart_cmds.errors: + error_msg = "\n".join(restart_cmds.errors) + raise Exception(error_msg) + + for cmd in restart_cmds.items: + # Serial monitoring + wait_for_command(api_client=api_client, command=cmd) + + +def set_cm_role_config( + api_client: ApiClient, role: ApiRole, params: dict, message: str +) -> Generator[ApiRole]: + """Update a role configuration for a given role. Yields the + role, resetting the configuration to its prior state. Use with + 'yield from' within a pytest fixture. + + Args: + api_client (ApiClient): _description_ + role (ApiRole): _description_ + params (dict): _description_ + message (str): _description_ + + Raises: + Exception: _description_ + + Yields: + Generator[ApiRole]: _description_ + """ + role_api = MgmtRolesResourceApi(api_client) + + # Retrieve all of the pre-setup configurations + pre = role_api.read_role_config(role.name) + + # Set the test configurations + # Do so serially, since a failed update due to defaults (see ApiException) will cause remaining + # configuration entries to not run. Long-term solution is to check-and-set, which is + # what the Ansible modules do... + for k, v in params.items(): + try: + role_api.update_role_config( + role_name=role.name, + message=f"{message}::set", + body=ApiConfigList(items=[ApiConfig(name=k, value=v)]), + ) + except ApiException as ae: + if ae.status != 400 or "delete with template" not in str(ae.body): + raise Exception(str(ae)) + + # Yield the targeted role + yield role_api.read_role(role_name=role.name) + + # Retrieve all of the post-setup configurations + post = role_api.read_role_config(role_name=role.name) + + # Reconcile the configurations + pre_set = set([c.name for c in pre.items]) + + reconciled = pre.items.copy() + reconciled.extend( + [ + ApiConfig(name=k.name, value=None) + for k in post.items + if k.name not in pre_set + ] + ) + + role_api.update_role_config( + role_name=role.name, + message=f"{message}::reset", + body=ApiConfigList(items=reconciled), + ) + + +def set_cm_role_config_group( + api_client: ApiClient, + role_config_group: ApiRoleConfigGroup, + update: ApiRoleConfigGroup, + message: str, +) -> Generator[ApiRoleConfigGroup]: + """Update a configuration for a given Cloudera Manager Service role config group. + Yields the role config group and upon returning control, will reset the + configuration to its prior state. + Use with 'yield from' within a pytest fixture. + + Args: + api_client (ApiClient): CM API client + role_config_group (ApiRoleConfigGroup): The Role Config Group to manage + update (ApiRoleConfigGroup): The state to set + message (str): Transaction descriptor; will be appended with '::[re]set' + + Yields: + Generator[ApiRoleConfigGroup]: The updated Role Config Group + """ + rcg_api = MgmtRoleConfigGroupsResourceApi(api_client) + + # Ensure the modification (not a replacement) of the existing role config group + update.name = role_config_group.name + + # Update the role config group + pre_rcg = rcg_api.update_role_config_group( + role_config_group.name, message=f"{message}::set", body=update + ) + + yield pre_rcg + + # Reread the role config group + post_rcg = rcg_api.read_role_config_group(role_config_group_name=pre_rcg.name) + + # Revert the changes + config_revert = resolve_parameter_updates( + {c.name: c.value for c in post_rcg.config.items}, + {c.name: c.value for c in role_config_group.config.items}, + True, + ) + + if config_revert: + role_config_group.config = ApiConfigList( + items=[ApiConfig(name=k, value=v) for k, v in config_revert.items()] + ) + + rcg_api.update_role_config_group( + role_config_group.name, message=f"{message}::reset", body=role_config_group + ) diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 5fb502e1..3394ff15 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -26,20 +26,34 @@ import sys import yaml +from collections.abc import Generator from pathlib import Path +from time import sleep from cm_client import ( + ApiBulkCommandList, ApiClient, ApiClusterList, ApiCluster, + ApiCommand, ApiConfig, ApiHostRef, ApiHostRefList, + ApiRole, + ApiRoleConfigGroup, + ApiRoleList, + ApiRoleNameList, + ApiRoleState, ApiService, ApiServiceConfig, + ApiServiceState, ClustersResourceApi, + CommandsResourceApi, Configuration, HostsResourceApi, + MgmtRoleCommandsResourceApi, + MgmtRoleConfigGroupsResourceApi, + MgmtRolesResourceApi, MgmtServiceResourceApi, ParcelResourceApi, ParcelsResourceApi, @@ -53,9 +67,16 @@ Parcel, ) +from ansible_collections.cloudera.cluster.plugins.module_utils.role_utils import ( + get_mgmt_roles, +) + from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleFailJson, AnsibleExitJson, + provision_cm_role, + set_cm_role_config, + set_cm_role_config_group, ) @@ -153,6 +174,10 @@ def cm_api_client(conn) -> ApiClient: # Handle redirects redirect = rest.GET(url).urllib3_response.geturl() + + if redirect == None: + raise Exception("Unable to establish connection to Cloudera Manager") + if redirect != "/": url = redirect @@ -174,7 +199,23 @@ def cm_api_client(conn) -> ApiClient: @pytest.fixture(scope="session") def base_cluster(cm_api_client, request): - """Provision a CDH Base cluster.""" + """Provision a CDH Base cluster. If the variable 'CM_CLUSTER' is present, + will attempt to read and yield a reference to this cluster. Otherwise, + will yield a new base cluster with a single host, deleting the cluster + once completed. + + Args: + cm_api_client (_type_): _description_ + request (_type_): _description_ + + Raises: + Exception: _description_ + Exception: _description_ + Exception: _description_ + + Yields: + _type_: _description_ + """ cluster_api = ClustersResourceApi(cm_api_client) @@ -250,14 +291,32 @@ def base_cluster(cm_api_client, request): @pytest.fixture(scope="session") -def cms(cm_api_client, request): - """Provisions Cloudera Manager Service.""" +def cms(cm_api_client: ApiClient, request) -> Generator[ApiService]: + """Provisions Cloudera Manager Service. If the Cloudera Manager Service + is present, will read and yield this reference. Otherwise, will + yield a new Cloudera Manager Service, deleting it after use. - api = MgmtServiceResourceApi(cm_api_client) + NOTE! A new Cloudera Manager Service will _not_ be provisioned if + there are any existing clusters within the deployment! Therefore, + you must only run this fixture to provision a net-new Cloudera Manager + Service on a bare deployment, i.e. Cloudera Manager and hosts only. + + Args: + cm_api_client (ApiClient): _description_ + request (_type_): _description_ + + Raises: + Exception: _description_ + + Yields: + Generator[ApiService]: _description_ + """ + + cms_api = MgmtServiceResourceApi(cm_api_client) # Return if the Cloudera Manager Service is already present try: - yield api.read_service() + yield cms_api.read_service() return except ApiException as ae: if ae.status != 404 or "Cannot find management service." not in str(ae.body): @@ -269,13 +328,174 @@ def cms(cm_api_client, request): type="MGMT", ) - yield api.setup_cms(body=service) + cm_service = cms_api.setup_cms(body=service) + + # Do not set up any roles -- just the CMS service itself + # cms_api.auto_assign_roles() + + yield cm_service + + cms_api.delete_cms() + + +@pytest.fixture(scope="function") +def cms_cleared(cm_api_client) -> Generator[None]: + """Clears any existing Cloudera Manager Service, yields, and upon + return, removes any new service and reinstates the existing service, + if present. + + Args: + cm_api_client (_type_): _description_ + + Raises: + ae: _description_ + + Yields: + Generator[None]: _description_ + """ + service_api = MgmtServiceResourceApi(cm_api_client) + rcg_api = MgmtRoleConfigGroupsResourceApi(cm_api_client) + role_api = MgmtRolesResourceApi(cm_api_client) + + pre_service = None + + try: + pre_service = service_api.read_service() + except ApiException as ae: + if ae.status != 404: + raise ae + + if pre_service is not None: + # Get the current state + pre_service.config = service_api.read_service_config() + + # Get the role config groups' state + if pre_service.role_config_groups is not None: + for rcg in pre_service.role_config_groups: + rcg.config = rcg_api.read_config(rcg.name) + + # Get each of its roles' state + if pre_service.roles is not None: + for r in pre_service.roles: + r.config = role_api.read_role_config(role_name=r.name) + + # Remove the prior CMS + service_api.delete_cms() + + # Yield now that the prior CMS has been removed + yield + + # Remove any created CMS + try: + service_api.delete_cms() + except ApiException as ae: + if ae.status != 404: + raise ae + + # Reinstate the prior CMS + if pre_service is not None: + service_api.setup_cms(body=pre_service) + if pre_service.maintenance_mode: + maintenance_cmd = service_api.enter_maintenance_mode() + monitor_command(api_client=cm_api_client, command=maintenance_cmd) + if pre_service.service_state in [ + ApiServiceState.STARTED, + ApiServiceState.STARTING, + ]: + restart_cmd = service_api.restart_command() + monitor_command(api_client=cm_api_client, command=restart_cmd) + + +@pytest.fixture(scope="function") +def cms_auto(cm_api_client, cms_cleared) -> Generator[ApiService]: + """Create a new Cloudera Manager Service on the first available host and auto-configures + the following roles: + - HOSTMONITOR + - SERVICEMONITOR + - EVENTSERVER + - ALERTPUBLISHER + + It starts this Cloudera Manager Service, yields, and will remove this service if the tests + do not. (This fixture delegates to the 'cms_cleared' fixture.) + + Args: + cm_api_client (_type_): _description_ + cms_cleared (_type_): _description_ + + Yields: + Generator[ApiService]: _description_ + """ + service_api = MgmtServiceResourceApi(cm_api_client) + host_api = HostsResourceApi(cm_api_client) + + host = next((h for h in host_api.read_hosts().items if not h.cluster_ref), None) + + if host is None: + raise Exception("No available hosts to assign Cloudera Manager Service roles") + + service_api.setup_cms( + body=ApiService( + type="MGMT", + roles=[ + ApiRole(type="HOSTMONITOR"), + ApiRole(type="SERVICEMONITOR"), + ApiRole(type="EVENTSERVER"), + ApiRole(type="ALERTPUBLISHER"), + ], + ) + ) + service_api.auto_configure() + + monitor_command(cm_api_client, service_api.start_command()) + + yield service_api.read_service() + + +@pytest.fixture(scope="function") +def cms_auto_no_start(cm_api_client, cms_cleared) -> Generator[ApiService]: + """Create a new Cloudera Manager Service on the first available host and auto-configures + the following roles: + - HOSTMONITOR + - SERVICEMONITOR + - EVENTSERVER + - ALERTPUBLISHER + + It does not start this Cloudera Manager Service, yields, and will remove this service if + the tests do not. (This fixture delegates to the 'cms_cleared' fixture.) + + Args: + cm_api_client (_type_): _description_ + cms_cleared (_type_): _description_ + + Yields: + Generator[ApiService]: _description_ + """ + service_api = MgmtServiceResourceApi(cm_api_client) + host_api = HostsResourceApi(cm_api_client) + + host = next((h for h in host_api.read_hosts().items if not h.cluster_ref), None) + + if host is None: + raise Exception("No available hosts to assign Cloudera Manager Service roles") + + service_api.setup_cms( + body=ApiService( + type="MGMT", + roles=[ + ApiRole(type="HOSTMONITOR"), + ApiRole(type="SERVICEMONITOR"), + ApiRole(type="EVENTSERVER"), + ApiRole(type="ALERTPUBLISHER"), + ], + ) + ) + service_api.auto_configure() - api.delete_cms() + yield service_api.read_service() @pytest.fixture(scope="function") -def cms_service_config(cm_api_client, cms, request): +def cms_config(cm_api_client, cms, request) -> Generator[ApiService]: """Configures service-wide configurations for the Cloudera Manager Service""" marker = request.node.get_closest_marker("service_config") @@ -295,7 +515,7 @@ def cms_service_config(cm_api_client, cms, request): for k, v in marker.args[0].items(): try: api.update_service_config( - message=f"{request.node.name}::set", + message=f"{Path(request.node.parent.name).stem}::{request.node.name}::set", body=ApiServiceConfig(items=[ApiConfig(name=k, value=v)]), ) except ApiException as ae: @@ -321,6 +541,187 @@ def cms_service_config(cm_api_client, cms, request): ) api.update_service_config( - message=f"{request.node.name}::reset", + message=f"{Path(request.node.parent.name).stem}::{request.node.name}::reset", body=ApiServiceConfig(items=reconciled), ) + + +@pytest.fixture(scope="function") +def host_monitor(cm_api_client, cms, request) -> Generator[ApiRole]: + api = MgmtRolesResourceApi(cm_api_client) + + hm = next( + iter([r for r in api.read_roles().items if r.type == "HOSTMONITOR"]), None + ) + + if hm is not None: + yield hm + else: + host_api = HostsResourceApi(cm_api_client) + host = next((h for h in host_api.read_hosts().items if not h.cluster_ref), None) + + if host is None: + raise Exception( + "No available hosts to assign Cloudera Manager Service role" + ) + else: + name = Path(request.fixturename).stem + yield from provision_cm_role( + cm_api_client, name, "HOSTMONITOR", host.host_id + ) + + +@pytest.fixture(scope="function") +def host_monitor_config(cm_api_client, host_monitor, request) -> Generator[ApiRole]: + marker = request.node.get_closest_marker("role_config") + + if marker is None: + raise Exception("No role_config marker found.") + + yield from set_cm_role_config( + api_client=cm_api_client, + role=host_monitor, + params=marker.args[0], + message=f"{Path(request.node.parent.name).stem}::{request.node.name}", + ) + + +@pytest.fixture(scope="function") +def host_monitor_role_group_config( + cm_api_client, host_monitor, request +) -> Generator[ApiRoleConfigGroup]: + """Configures the base Role Config Group for the Host Monitor role of a Cloudera Manager Service.""" + marker = request.node.get_closest_marker("role_config_group") + + if marker is None: + raise Exception("No 'role_config_group' marker found.") + + rcg_api = MgmtRoleConfigGroupsResourceApi(cm_api_client) + rcg = rcg_api.read_role_config_group( + host_monitor.role_config_group_ref.role_config_group_name + ) + rcg.config = rcg_api.read_config(role_config_group_name=rcg.name) + + yield from set_cm_role_config_group( + api_client=cm_api_client, + role_config_group=rcg, + update=marker.args[0], + message=f"{Path(request.node.parent.name).stem}::{request.node.name}", + ) + + +@pytest.fixture(scope="function") +def host_monitor_cleared(cm_api_client, cms) -> Generator[None]: + role_api = MgmtRolesResourceApi(cm_api_client) + role_cmd_api = MgmtRoleCommandsResourceApi(cm_api_client) + + # Check for existing management role + pre_role = next( + iter([r for r in get_mgmt_roles(cm_api_client, "HOSTMONITOR").items]), None + ) + + if pre_role is not None: + # Get the current state + pre_role.config = role_api.read_role_config(role_name=pre_role.name) + + # Remove the prior role + role_api.delete_role(role_name=pre_role.name) + + # Yield now that the role has been removed + yield + + # Reinstate the previous role + if pre_role is not None: + role_api.create_roles(body=ApiRoleList(items=[pre_role])) + if pre_role.maintenance_mode: + role_api.enter_maintenance_mode(pre_role.name) + if pre_role.role_state in [ApiRoleState.STARTED, ApiRoleState.STARTING]: + restart_cmds = role_cmd_api.restart_command( + body=ApiRoleNameList(items=[pre_role.name]) + ) + handle_commands(api_client=cm_api_client, commands=restart_cmds) + + +@pytest.fixture(scope="function") +def host_monitor_state( + cm_api_client, host_monitor, request +) -> Generator[ApiRoleConfigGroup]: + marker = request.node.get_closest_marker("role_state") + + if marker is None: + raise Exception("No 'role_state' marker found.") + + role_state = marker.args[0] + + role_api = MgmtRolesResourceApi(cm_api_client) + cmd_api = MgmtRoleCommandsResourceApi(cm_api_client) + + # Get the current state + pre_role = role_api.read_role(host_monitor.name) + + # Set the role state + if pre_role.role_state != role_state: + if role_state in [ApiRoleState.STARTED]: + handle_commands( + api_client=cm_api_client, + commands=cmd_api.start_command( + body=ApiRoleNameList(items=[host_monitor.name]) + ), + ) + elif role_state in [ApiRoleState.STOPPED]: + handle_commands( + api_client=cm_api_client, + commands=cmd_api.stop_command( + body=ApiRoleNameList(items=[host_monitor.name]) + ), + ) + + # Yield the role + current_role = role_api.read_role(host_monitor.name) + current_role.config = role_api.read_role_config(host_monitor.name) + yield current_role + + # Retrieve the test changes + post_role = role_api.read_role(role_name=host_monitor.name) + post_role.config = role_api.read_role_config(role_name=host_monitor.name) + + # Reset state + if pre_role.role_state != post_role.role_state: + if pre_role.role_state in [ApiRoleState.STARTED]: + handle_commands( + api_client=cm_api_client, + commands=cmd_api.start_command( + body=ApiRoleNameList(items=[host_monitor.name]) + ), + ) + elif pre_role.role_state in [ApiRoleState.STOPPED]: + handle_commands( + api_client=cm_api_client, + commands=cmd_api.stop_command( + body=ApiRoleNameList(items=[host_monitor.name]) + ), + ) + + +def handle_commands(api_client: ApiClient, commands: ApiBulkCommandList): + if commands.errors: + error_msg = "\n".join(commands.errors) + raise Exception(error_msg) + + for cmd in commands.items: + # Serial monitoring + monitor_command(api_client, cmd) + + +def monitor_command( + api_client: ApiClient, command: ApiCommand, polling: int = 10, delay: int = 15 +): + poll_count = 0 + while command.active: + if poll_count > polling: + raise Exception("Command timeout: " + str(command.id)) + sleep(delay) + poll_count += 1 + command = CommandsResourceApi(api_client).read_command(command.id) + if not command.success: + raise Exception(command.result_message) diff --git a/tests/unit/plugins/modules/cm_service/test_cm_service.py b/tests/unit/plugins/modules/cm_service/test_cm_service.py index 5614fe61..5abace09 100644 --- a/tests/unit/plugins/modules/cm_service/test_cm_service.py +++ b/tests/unit/plugins/modules/cm_service/test_cm_service.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,10 +17,18 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -import os + import logging import pytest +from pathlib import Path + +from cm_client import ( + ApiService, + ApiServiceState, + MgmtServiceResourceApi, +) + from ansible_collections.cloudera.cluster.plugins.modules import cm_service from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleExitJson, @@ -30,23 +38,359 @@ LOG = logging.getLogger(__name__) -def test_pytest_cm_service(module_args): +def test_state_present(conn, module_args, cms_cleared, request): + module_args( + { + **conn, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + + +def test_state_absent(conn, module_args, cm_api_client, cms_cleared, request): module_args( { - "username": os.getenv("CM_USERNAME"), - "password": os.getenv("CM_PASSWORD"), - "host": os.getenv("CM_HOST"), - "port": "7180", - "verify_tls": "no", - "debug": "yes", + **conn, + "state": "absent", + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + service_api = MgmtServiceResourceApi(cm_api_client) + service_api.setup_cms(body=ApiService(type="MGMT")) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert not e.value.service + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert not e.value.service + + +def test_state_absent_running_roles(conn, module_args, cms_auto, request): + module_args( + { + **conn, + "state": "absent", + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert not e.value.service + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert not e.value.service + + +def test_state_started(conn, module_args, cm_api_client, cms_auto_no_start, request): + module_args( + { + **conn, "state": "started", - "role": ["SERVICEMONITOR", "HOSTMONITOR", "EVENTSERVER", "ALERTPUBLISHER"], + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", } ) - # with pytest.raises(AnsibleFailJson, match=r"boom") as e: with pytest.raises(AnsibleExitJson) as e: cm_service.main() - # LOG.info(str(e.value)) - LOG.info(str(e.value.cloudera_manager)) + assert e.value.changed == True + assert e.value.service["service_state"] == ApiServiceState.STARTED + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert e.value.service["service_state"] == ApiServiceState.STARTED + + +def test_state_stopped(conn, module_args, cm_api_client, cms_auto, request): + module_args( + { + **conn, + "state": "stopped", + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert e.value.service["service_state"] == ApiServiceState.STOPPED + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert e.value.service["service_state"] == ApiServiceState.STOPPED + + +def test_state_restarted(conn, module_args, cm_api_client, cms_auto, request): + module_args( + { + **conn, + "state": "restarted", + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert e.value.service["service_state"] == ApiServiceState.STARTED + + # Idempotency (rather, demonstrate that restart always invokes a changed state) + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert e.value.service["service_state"] == ApiServiceState.STARTED + + +def test_new_maintenance_enabled(conn, module_args, cms_cleared, request): + module_args( + { + **conn, + "maintenance": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert e.value.service["maintenance_mode"] == True + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert e.value.service["maintenance_mode"] == True + + +def test_new_config(conn, module_args, cms_cleared, request): + module_args( + { + **conn, + "parameters": dict(mgmt_emit_sensitive_data_in_stderr=True), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_emit_sensitive_data_in_stderr="True") + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert expected.items() <= e.value.service["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert expected.items() <= e.value.service["config"].items() + + +def test_existing_maintenance_enabled(conn, module_args, cm_api_client, cms, request): + module_args( + { + **conn, + "maintenance": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + service_api = MgmtServiceResourceApi(cm_api_client) + service_api.exit_maintenance_mode() + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert e.value.service["maintenance_mode"] == True + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert e.value.service["maintenance_mode"] == True + + +def test_existing_maintenance_disabled(conn, module_args, cm_api_client, cms, request): + module_args( + { + **conn, + "maintenance": False, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + service_api = MgmtServiceResourceApi(cm_api_client) + service_api.enter_maintenance_mode() + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert e.value.service["maintenance_mode"] == False + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert e.value.service["maintenance_mode"] == False + + +@pytest.mark.service_config(dict(log_event_retry_frequency=10)) +def test_existing_set_parameters(conn, module_args, cms_config, request): + module_args( + { + **conn, + "parameters": dict(mgmt_emit_sensitive_data_in_stderr=True), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict( + mgmt_emit_sensitive_data_in_stderr="True", log_event_retry_frequency="10" + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert expected.items() <= e.value.service["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert expected.items() <= e.value.service["config"].items() + + +@pytest.mark.service_config( + dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) +) +def test_existing_unset_parameters(conn, module_args, cms_config, request): + module_args( + { + **conn, + "parameters": dict(mgmt_emit_sensitive_data_in_stderr=None), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + expected = dict(log_event_retry_frequency="10") + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert expected.items() <= e.value.service["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert expected.items() <= e.value.service["config"].items() + + +@pytest.mark.service_config( + dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) +) +def test_existing_set_parameters_with_purge(conn, module_args, cms_config, request): + module_args( + { + **conn, + "parameters": dict(mgmt_emit_sensitive_data_in_stderr=True), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_emit_sensitive_data_in_stderr="True") + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert expected.items() <= e.value.service["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert expected.items() <= e.value.service["config"].items() + + +@pytest.mark.service_config( + dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) +) +def test_existing_purge_all_parameters(conn, module_args, cms_config, request): + module_args( + { + **conn, + "parameters": dict(), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert len(e.value.service["config"]) == 0 + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["config"]) == 0 diff --git a/tests/unit/plugins/modules/cm_service/test_cm_service_role_config_groups.py b/tests/unit/plugins/modules/cm_service/test_cm_service_role_config_groups.py new file mode 100644 index 00000000..3f99ef5f --- /dev/null +++ b/tests/unit/plugins/modules/cm_service/test_cm_service_role_config_groups.py @@ -0,0 +1,261 @@ +# -*- coding: utf-8 -*- + +# Copyright 2025 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import pytest + +from pathlib import Path + +from cm_client import ( + ApiConfig, + ApiConfigList, + ApiRoleConfigGroup, +) + +from ansible_collections.cloudera.cluster.plugins.modules import cm_service +from ansible_collections.cloudera.cluster.tests.unit import ( + AnsibleExitJson, +) + +LOG = logging.getLogger(__name__) + + +def test_new_role_config_group(conn, module_args, cms_cleared, request): + expected = dict(alert_mailserver_username="FooBar") + + module_args( + { + **conn, + "role_config_groups": [ + { + "type": "ALERTPUBLISHER", + "config": expected, + } + ], + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert len(e.value.service["role_config_groups"]) == 1 + assert ( + expected.items() <= e.value.service["role_config_groups"][0]["config"].items() + ) + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["role_config_groups"]) == 1 + assert ( + expected.items() <= e.value.service["role_config_groups"][0]["config"].items() + ) + + +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + role_type="HOSTMONITOR", + config=ApiConfigList( + items=[ + ApiConfig(name="mgmt_num_descriptor_fetch_tries", value=16), + ApiConfig(name="process_start_secs", value=36), + ] + ), + ) +) +def test_existing_role_config_group_set( + conn, module_args, host_monitor_role_group_config, request +): + expected = dict(mgmt_num_descriptor_fetch_tries="16", process_start_secs="96") + + module_args( + { + **conn, + "role_config_groups": [ + { + "type": "HOSTMONITOR", + "config": dict(process_start_secs="96"), + } + ], + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert len(e.value.service["role_config_groups"]) == 1 + assert ( + expected.items() <= e.value.service["role_config_groups"][0]["config"].items() + ) + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["role_config_groups"]) == 1 + assert ( + expected.items() <= e.value.service["role_config_groups"][0]["config"].items() + ) + + +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + role_type="HOSTMONITOR", + config=ApiConfigList( + items=[ + ApiConfig(name="mgmt_num_descriptor_fetch_tries", value=17), + ApiConfig(name="process_start_secs", value=37), + ] + ), + ) +) +def test_existing_role_config_group_unset( + conn, module_args, host_monitor_role_group_config, request +): + expected = dict( + mgmt_num_descriptor_fetch_tries="17", + ) + + module_args( + { + **conn, + "role_config_groups": [ + { + "type": "HOSTMONITOR", + "config": dict(process_start_secs=None), + } + ], + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert len(e.value.service["role_config_groups"]) == 1 + assert ( + expected.items() <= e.value.service["role_config_groups"][0]["config"].items() + ) + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["role_config_groups"]) == 1 + assert ( + expected.items() <= e.value.service["role_config_groups"][0]["config"].items() + ) + + +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + role_type="HOSTMONITOR", + config=ApiConfigList( + items=[ + ApiConfig(name="mgmt_num_descriptor_fetch_tries", value=18), + ApiConfig(name="process_start_secs", value=38), + ] + ), + ) +) +def test_existing_role_config_group_purge( + conn, module_args, host_monitor_role_group_config, request +): + expected = dict( + mgmt_num_descriptor_fetch_tries="28", + ) + + module_args( + { + **conn, + "role_config_groups": [ + { + "type": "HOSTMONITOR", + "config": dict(mgmt_num_descriptor_fetch_tries=28), + } + ], + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert len(e.value.service["role_config_groups"]) == 1 + assert ( + expected.items() <= e.value.service["role_config_groups"][0]["config"].items() + ) + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["role_config_groups"]) == 1 + assert ( + expected.items() <= e.value.service["role_config_groups"][0]["config"].items() + ) + + +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + role_type="HOSTMONITOR", + config=ApiConfigList( + items=[ + ApiConfig(name="mgmt_num_descriptor_fetch_tries", value=18), + ApiConfig(name="process_start_secs", value=38), + ] + ), + ) +) +def test_existing_role_config_group_purge_all( + conn, module_args, host_monitor_role_group_config, request +): + module_args( + { + **conn, + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert len(e.value.service["role_config_groups"]) == 0 + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["role_config_groups"]) == 0 diff --git a/tests/unit/plugins/modules/cm_service/test_cm_service_roles.py b/tests/unit/plugins/modules/cm_service/test_cm_service_roles.py new file mode 100644 index 00000000..2de5ee43 --- /dev/null +++ b/tests/unit/plugins/modules/cm_service/test_cm_service_roles.py @@ -0,0 +1,454 @@ +# -*- coding: utf-8 -*- + +# Copyright 2025 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import pytest + +from pathlib import Path + +from cm_client import ( + HostsResourceApi, +) + +from ansible_collections.cloudera.cluster.plugins.modules import cm_service +from ansible_collections.cloudera.cluster.tests.unit import ( + AnsibleExitJson, + AnsibleFailJson, +) + +LOG = logging.getLogger(__name__) + + +def test_new_role(conn, module_args, cm_api_client, cms_cleared, request): + host_api = HostsResourceApi(cm_api_client) + host = next((h for h in host_api.read_hosts().items if not h.cluster_ref), None) + if host is None: + raise Exception("No available hosts to assign Cloudera Manager Service role") + + module_args( + { + **conn, + "roles": [ + { + "type": "HOSTMONITOR", + "cluster_host_id": host.host_id, + } + ], + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 1 + assert not e.value.service["roles"][0]["config"] + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 1 + assert not e.value.service["roles"][0]["config"] + + +def test_new_role_config(conn, module_args, cm_api_client, cms_cleared, request): + host_api = HostsResourceApi(cm_api_client) + host = next((h for h in host_api.read_hosts().items if not h.cluster_ref), None) + if host is None: + raise Exception("No available hosts to assign Cloudera Manager Service role") + + expected = dict(mgmt_num_descriptor_fetch_tries="15") + + module_args( + { + **conn, + "roles": [ + { + "type": "HOSTMONITOR", + "cluster_host_id": host.host_id, + "config": expected, + } + ], + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 1 + assert expected.items() <= e.value.service["roles"][0]["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 1 + assert expected.items() <= e.value.service["roles"][0]["config"].items() + + +def test_existing_role_new(conn, module_args, cm_api_client, cms, request): + host_api = HostsResourceApi(cm_api_client) + host = next((h for h in host_api.read_hosts().items if not h.cluster_ref), None) + if host is None: + raise Exception("No available hosts to assign Cloudera Manager Service role") + + module_args( + { + **conn, + "roles": [ + { + "type": "HOSTMONITOR", + "cluster_host_id": host.host_id, + } + ], + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 1 + assert not e.value.service["roles"][0]["config"] + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 1 + assert not e.value.service["roles"][0]["config"] + + +def test_existing_role_new_config_set(conn, module_args, cm_api_client, cms, request): + host_api = HostsResourceApi(cm_api_client) + host = next((h for h in host_api.read_hosts().items if not h.cluster_ref), None) + if host is None: + raise Exception("No available hosts to assign Cloudera Manager Service role") + + expected = dict(mgmt_num_descriptor_fetch_tries="15") + + module_args( + { + **conn, + "roles": [ + { + "type": "HOSTMONITOR", + "cluster_host_id": host.host_id, + "config": expected, + } + ], + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 1 + assert expected.items() <= e.value.service["roles"][0]["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 1 + assert expected.items() <= e.value.service["roles"][0]["config"].items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=15, process_start_secs=35) +) +def test_existing_role_existing_config_set( + conn, module_args, cm_api_client, host_monitor_config, request +): + expected = dict(process_start_secs="35") + + module_args( + { + **conn, + "roles": [ + { + "type": "HOSTMONITOR", + # "cluster_host_id": host.host_id, + "config": { + "mgmt_num_descriptor_fetch_tries": None, + }, + } + ], + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 1 + assert expected.items() <= e.value.service["roles"][0]["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 1 + assert expected.items() <= e.value.service["roles"][0]["config"].items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=15, process_start_secs=35) +) +def test_existing_role_existing_config_unset( + conn, module_args, cm_api_client, host_monitor_config, request +): + expected = dict(process_start_secs="35") + + module_args( + { + **conn, + "roles": [ + { + "type": "HOSTMONITOR", + # "cluster_host_id": host.host_id, + "config": { + "mgmt_num_descriptor_fetch_tries": None, + }, + } + ], + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 1 + assert expected.items() <= e.value.service["roles"][0]["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 1 + assert expected.items() <= e.value.service["roles"][0]["config"].items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=16, process_start_secs=36) +) +def test_existing_role_existing_config_purge( + conn, module_args, cm_api_client, host_monitor_config, request +): + expected = dict(process_start_secs="36") + + module_args( + { + **conn, + "roles": [ + { + "type": "HOSTMONITOR", + # "cluster_host_id": host.host_id, + "config": { + "process_start_secs": 36, + }, + } + ], + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 1 + assert expected.items() <= e.value.service["roles"][0]["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 1 + assert expected.items() <= e.value.service["roles"][0]["config"].items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=17, process_start_secs=37) +) +def test_existing_role_existing_config_purge_all( + conn, module_args, cm_api_client, host_monitor_config, request +): + module_args( + { + **conn, + "roles": [ + { + "type": "HOSTMONITOR", + # "cluster_host_id": host.host_id, + } + ], + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 1 + assert not e.value.service["roles"][0]["config"] + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 1 + assert not e.value.service["roles"][0]["config"] + + +def test_existing_role_config_invalid(conn, module_args, cm_api_client, cms, request): + host_api = HostsResourceApi(cm_api_client) + host = next((h for h in host_api.read_hosts().items if not h.cluster_ref), None) + if host is None: + raise Exception("No available hosts to assign Cloudera Manager Service role") + + expected = dict(mgmt_emit_sensitive_data_in_stderr=True) + + module_args( + { + **conn, + "roles": [ + { + "type": "HOSTMONITOR", + "cluster_host_id": host.host_id, + "config": expected, + } + ], + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleFailJson, match="Unknown configuration attribute"): + cm_service.main() + + +def test_existing_role_relocate( + conn, module_args, cm_api_client, host_monitor, request +): + host_api = HostsResourceApi(cm_api_client) + host = next( + ( + h + for h in host_api.read_hosts().items + if not h.cluster_ref and h.host_id != host_monitor.host_ref.host_id + ), + None, + ) + if host is None: + raise Exception("No available hosts to relocate Cloudera Manager Service role") + + module_args( + { + **conn, + "roles": [ + { + "type": "HOSTMONITOR", + "cluster_host_id": host.host_id, + } + ], + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert len(e.value.service["roles"]) == 1 + assert e.value.service["roles"][0]["host_id"] == host.host_id + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert len(e.value.service["roles"]) == 1 + assert e.value.service["roles"][0]["host_id"] == host.host_id + + +def test_existing_role_purge(conn, module_args, host_monitor, request): + module_args( + { + **conn, + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == True + assert not e.value.service["roles"] + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service.main() + + assert e.value.changed == False + assert not e.value.service["roles"] diff --git a/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py b/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py index 9e208227..39856c63 100644 --- a/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py +++ b/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py @@ -38,10 +38,12 @@ def test_missing_required(conn, module_args): def test_present_invalid_parameter(conn, module_args): - conn.update( - parameters=dict(example="Example"), + module_args( + { + **conn, + "parameters": dict(example="Example"), + } ) - module_args(conn) with pytest.raises( AnsibleFailJson, match="Unknown configuration attribute 'example'" @@ -52,7 +54,7 @@ def test_present_invalid_parameter(conn, module_args): @pytest.mark.service_config( dict(mgmt_emit_sensitive_data_in_stderr=False, log_event_retry_frequency=10) ) -def test_set_parameters(conn, module_args, cms_service_config): +def test_set_parameters(conn, module_args, cms_config): module_args( { **conn, @@ -84,7 +86,7 @@ def test_set_parameters(conn, module_args, cms_service_config): @pytest.mark.service_config( dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) ) -def test_unset_parameters(conn, module_args, cms_service_config): +def test_unset_parameters(conn, module_args, cms_config): module_args( { **conn, @@ -112,7 +114,7 @@ def test_unset_parameters(conn, module_args, cms_service_config): @pytest.mark.service_config( dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) ) -def test_set_parameters_with_purge(conn, module_args, cms_service_config): +def test_set_parameters_with_purge(conn, module_args, cms_config): module_args( { **conn, @@ -143,7 +145,7 @@ def test_set_parameters_with_purge(conn, module_args, cms_service_config): @pytest.mark.service_config( dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) ) -def test_purge_all_parameters(conn, module_args, cms_service_config): +def test_purge_all_parameters(conn, module_args, cms_config): module_args( { **conn, diff --git a/tests/unit/plugins/modules/cm_service_info/test_cm_service_info.py b/tests/unit/plugins/modules/cm_service_info/test_cm_service_info.py index 0561dba6..eb679787 100644 --- a/tests/unit/plugins/modules/cm_service_info/test_cm_service_info.py +++ b/tests/unit/plugins/modules/cm_service_info/test_cm_service_info.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# Copyright 2025 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,39 +18,32 @@ __metaclass__ = type -import os +import logging import pytest -import unittest from ansible_collections.cloudera.cluster.plugins.modules import cm_service_info -from ansible_collections.cloudera.cluster.tests.unit.plugins.modules.utils import ( +from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleExitJson, - AnsibleFailJson, - ModuleTestCase, - setup_module_args, ) +LOG = logging.getLogger(__name__) -@unittest.skipUnless( - os.getenv("CM_USERNAME"), "Cloudera Manager access parameters not set" -) -class TestCMServiceInfo(ModuleTestCase): - def test_service_info(self): - setup_module_args( - { - "username": os.getenv("CM_USERNAME"), - "password": os.getenv("CM_PASSWORD"), - "host": os.getenv("CM_HOST"), - "port": "7180", - "verify_tls": "no", - "debug": "yes", - } - ) - - with pytest.raises(AnsibleExitJson) as e: - cm_service_info.main() - - -if __name__ == "__main__": - unittest.main() + +def test_read_service(conn, module_args, cms_auto): + module_args({**conn}) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_info.main() + + assert e.value.changed == False + assert cms_auto.name == e.value.service["name"] + + +def test_read_service_nonexistent(conn, module_args): + module_args({**conn}) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_info.main() + + assert not e.value.service diff --git a/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py b/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py new file mode 100644 index 00000000..1501ce85 --- /dev/null +++ b/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py @@ -0,0 +1,484 @@ +# -*- coding: utf-8 -*- + +# Copyright 2025 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import pytest + +from pathlib import Path + +from cm_client import ( + ApiRoleState, + HostsResourceApi, + MgmtRolesResourceApi, +) + +from ansible_collections.cloudera.cluster.plugins.modules import cm_service_role +from ansible_collections.cloudera.cluster.tests.unit import ( + AnsibleExitJson, + AnsibleFailJson, +) + +LOG = logging.getLogger(__name__) + + +def test_missing_required(conn, module_args): + module_args(conn) + + with pytest.raises(AnsibleFailJson, match="type"): + cm_service_role.main() + + +def test_mutually_exclusive(conn, module_args): + module_args({**conn, "cluster_hostname": "hostname", "cluster_host_id": "host_id"}) + + with pytest.raises( + AnsibleFailJson, + match="parameters are mutually exclusive: cluster_hostname|cluster_host_id", + ): + cm_service_role.main() + + +def test_existing_relocate(conn, module_args, cm_api_client, host_monitor, request): + host_api = HostsResourceApi(cm_api_client) + host = next( + ( + h + for h in host_api.read_hosts().items + if not h.cluster_ref and h.host_id != host_monitor.host_ref.host_id + ), + None, + ) + if host is None: + raise Exception("No available hosts to relocate Cloudera Manager Service role") + + module_args( + { + **conn, + "type": host_monitor.type, + "cluster_host_id": host.host_id, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == True + assert e.value.role["host_id"] == host.host_id + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == False + assert e.value.role["host_id"] == host.host_id + + +def test_new(conn, module_args, cm_api_client, cms, request): + host_api = HostsResourceApi(cm_api_client) + host = next((h for h in host_api.read_hosts().items if not h.cluster_ref), None) + + if host is None: + raise Exception("No available hosts to assign Cloudera Manager Service role") + + module_args( + { + **conn, + "type": "HOSTMONITOR", + "cluster_host_id": host.host_id, + "config": dict(mgmt_num_descriptor_fetch_tries=55), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="55") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == True + assert expected.items() <= e.value.role["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == False + assert expected.items() <= e.value.role["config"].items() + + +def test_new_maintenance_mode_enabled(conn, module_args, cm_api_client, cms, request): + host_api = HostsResourceApi(cm_api_client) + host = next((h for h in host_api.read_hosts().items if not h.cluster_ref), None) + + if host is None: + raise Exception("No available hosts to assign Cloudera Manager Service role") + + module_args( + { + **conn, + "type": "HOSTMONITOR", + "cluster_host_id": host.host_id, + "maintenance": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == True + assert e.value.role["maintenance_mode"] == True + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == False + assert e.value.role["maintenance_mode"] == True + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_existing_set(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "type": host_monitor_config.type, + "config": dict(mgmt_num_descriptor_fetch_tries=55), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="55", process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == True + assert expected.items() <= e.value.role["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == False + assert expected.items() <= e.value.role["config"].items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=12, process_start_secs=22) +) +def test_existing_unset(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "type": host_monitor_config.type, + "config": dict(mgmt_num_descriptor_fetch_tries=None), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + expected = dict(process_start_secs="22") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == True + assert expected.items() <= e.value.role["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == False + assert expected.items() <= e.value.role["config"].items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=13, process_start_secs=23) +) +def test_existing_purge(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "type": host_monitor_config.type, + "config": dict(mgmt_num_descriptor_fetch_tries=33), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="33") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == True + assert expected.items() <= e.value.role["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == False + assert expected.items() <= e.value.role["config"].items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=14, process_start_secs=24) +) +def test_existing_purge_all(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "type": host_monitor_config.type, + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == True + assert len(e.value.role["config"]) == 0 + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == False + assert len(e.value.role["config"]) == 0 + + +def test_existing_maintenance_mode_enabled( + conn, module_args, cm_api_client, host_monitor, request +): + module_args( + { + **conn, + "type": host_monitor.type, + "maintenance": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + role_api = MgmtRolesResourceApi(cm_api_client) + role_api.exit_maintenance_mode(host_monitor.name) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == True + assert e.value.role["maintenance_mode"] == True + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == False + assert e.value.role["maintenance_mode"] == True + + +def test_existing_maintenance_mode_disabled( + conn, module_args, cm_api_client, host_monitor, request +): + module_args( + { + **conn, + "type": host_monitor.type, + "maintenance": False, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + # TODO Turn this into a fixture - host_monitor_maintenance + role_api = MgmtRolesResourceApi(cm_api_client) + role_api.enter_maintenance_mode(host_monitor.name) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == True + assert e.value.role["maintenance_mode"] == False + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == False + assert e.value.role["maintenance_mode"] == False + + +def test_existing_state_present(conn, module_args, host_monitor, request): + module_args( + { + **conn, + "type": host_monitor.type, + "state": "present", + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == False + assert e.value.role + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == False + assert e.value.role + + +@pytest.mark.role_state(ApiRoleState.STOPPED) +def test_existing_state_started( + conn, module_args, cms_auto, host_monitor_state, request +): + module_args( + { + **conn, + "type": host_monitor_state.type, + "state": "started", + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == True + assert e.value.role["role_state"] == ApiRoleState.STARTED + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == False + assert e.value.role["role_state"] == ApiRoleState.STARTED + + +@pytest.mark.role_state(ApiRoleState.STARTED) +def test_existing_state_stopped( + conn, module_args, cms_auto, host_monitor_state, request +): + module_args( + { + **conn, + "type": host_monitor_state.type, + "state": "stopped", + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == True + assert e.value.role["role_state"] == ApiRoleState.STOPPED + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == False + assert e.value.role["role_state"] == ApiRoleState.STOPPED + + +@pytest.mark.role_state(ApiRoleState.STARTED) +def test_existing_state_restarted( + conn, module_args, cms_auto, host_monitor_state, request +): + module_args( + { + **conn, + "type": host_monitor_state.type, + "state": "restarted", + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == True + assert e.value.role["role_state"] == ApiRoleState.STARTED + + # Idempotency (restart always forces a changed state) + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == True + assert e.value.role["role_state"] == ApiRoleState.STARTED + + +def test_existing_state_absent(conn, module_args, cms_auto, host_monitor, request): + module_args( + { + **conn, + "type": host_monitor.type, + "state": "absent", + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == True + assert not e.value.role + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role.main() + + assert e.value.changed == False + assert not e.value.role diff --git a/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py b/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py new file mode 100644 index 00000000..d9581e4d --- /dev/null +++ b/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py @@ -0,0 +1,314 @@ +# -*- coding: utf-8 -*- + +# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import pytest + +from pathlib import Path + +from ansible_collections.cloudera.cluster.plugins.modules import cm_service_role_config +from ansible_collections.cloudera.cluster.tests.unit import ( + AnsibleExitJson, + AnsibleFailJson, +) + +LOG = logging.getLogger(__name__) + + +def test_missing_required(conn, module_args): + module_args(conn) + + with pytest.raises(AnsibleFailJson, match="parameters"): + cm_service_role_config.main() + + +def test_missing_required_if(conn, module_args): + module_args( + { + **conn, + "parameters": dict(), + } + ) + + with pytest.raises(AnsibleFailJson, match="name, type"): + cm_service_role_config.main() + + +def test_present_invalid_parameter(conn, module_args, host_monitor): + module_args( + { + **conn, + "role": host_monitor.name, + "parameters": dict(example="Example"), + } + ) + + with pytest.raises( + AnsibleFailJson, match="Unknown configuration attribute 'example'" + ): + cm_service_role_config.main() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "role": host_monitor_config.name, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters_role_type(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "role_type": host_monitor_config.type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_unset_parameters(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "role": host_monitor_config.name, + "parameters": dict(mgmt_num_descriptor_fetch_tries=None), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + expected = dict(process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_unset_parameters_role_type(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "type": host_monitor_config.type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=None), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + expected = dict(process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters_with_purge(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "role": host_monitor_config.name, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_set_parameters_with_purge_role_type( + conn, module_args, host_monitor_config, request +): + module_args( + { + **conn, + "role_type": host_monitor_config.type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_purge_all_parameters(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "role": host_monitor_config.name, + "parameters": dict(), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert len(e.value.config) == 0 + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert len(e.value.config) == 0 + + +@pytest.mark.role_config( + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) +) +def test_purge_all_parameters_role_type( + conn, module_args, host_monitor_config, request +): + module_args( + { + **conn, + "type": host_monitor_config.type, + "parameters": dict(), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == True + assert len(e.value.config) == 0 + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config.main() + + assert e.value.changed == False + assert len(e.value.config) == 0 diff --git a/tests/unit/plugins/modules/cm_service_role_config_group/test_cm_service_role_config_group.py b/tests/unit/plugins/modules/cm_service_role_config_group/test_cm_service_role_config_group.py new file mode 100644 index 00000000..059524c8 --- /dev/null +++ b/tests/unit/plugins/modules/cm_service_role_config_group/test_cm_service_role_config_group.py @@ -0,0 +1,211 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import pytest + +from pathlib import Path + +from cm_client import ( + ApiConfig, + ApiConfigList, + ApiRoleConfigGroup, +) + +from ansible_collections.cloudera.cluster.plugins.modules import ( + cm_service_role_config_group, +) +from ansible_collections.cloudera.cluster.tests.unit import ( + AnsibleExitJson, + AnsibleFailJson, +) + +LOG = logging.getLogger(__name__) + + +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) +) +def test_cm_role_config_group_config_set( + conn, module_args, host_monitor_role_group_config, request +): + module_args( + { + **conn, + "type": host_monitor_role_group_config.role_type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group.main() + + assert e.value.changed == True + assert expected.items() <= e.value.role_config_group["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group.main() + + assert e.value.changed == False + assert expected.items() <= e.value.role_config_group["config"].items() + + +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) +) +def test_cm_role_config_group_config_unset( + conn, module_args, host_monitor_role_group_config, request +): + module_args( + { + **conn, + "type": host_monitor_role_group_config.role_type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=None), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group.main() + + assert e.value.changed == True + assert expected.items() <= e.value.role_config_group["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group.main() + + assert e.value.changed == False + assert expected.items() <= e.value.role_config_group["config"].items() + + +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) +) +def test_cm_role_config_group_config_set_purge( + conn, module_args, host_monitor_role_group_config, request +): + module_args( + { + **conn, + "type": host_monitor_role_group_config.role_type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group.main() + + assert e.value.changed == True + assert expected.items() <= e.value.role_config_group["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group.main() + + assert e.value.changed == False + assert expected.items() <= e.value.role_config_group["config"].items() + + +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) +) +def test_cm_role_config_group_config_purge_all( + conn, module_args, host_monitor_role_group_config, request +): + module_args( + { + **conn, + "type": host_monitor_role_group_config.role_type, + "parameters": dict(), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict() + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group.main() + + assert e.value.changed == True + assert expected.items() <= e.value.role_config_group["config"].items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group.main() + + assert e.value.changed == False + assert expected.items() <= e.value.role_config_group["config"].items() diff --git a/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py b/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py new file mode 100644 index 00000000..b2898baf --- /dev/null +++ b/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py @@ -0,0 +1,395 @@ +# -*- coding: utf-8 -*- + +# Copyright 2024 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import pytest + +from pathlib import Path + +from cm_client import ( + ApiConfig, + ApiConfigList, + ApiRoleConfigGroup, +) + +from ansible_collections.cloudera.cluster.plugins.modules import ( + cm_service_role_config_group_config, +) + +from ansible_collections.cloudera.cluster.tests.unit import ( + AnsibleExitJson, + AnsibleFailJson, +) + +LOG = logging.getLogger(__name__) + + +def test_missing_required(conn, module_args): + module_args(conn) + + with pytest.raises(AnsibleFailJson, match="parameters"): + cm_service_role_config_group_config.main() + + +def test_missing_required_if(conn, module_args): + module_args( + { + **conn, + "parameters": dict(), + } + ) + + with pytest.raises(AnsibleFailJson, match="name, type"): + cm_service_role_config_group_config.main() + + +def test_present_invalid_parameter(conn, module_args, host_monitor): + module_args( + { + **conn, + "name": host_monitor.role_config_group_ref.role_config_group_name, + "parameters": dict(example="Example"), + } + ) + + with pytest.raises( + AnsibleFailJson, match="Unknown configuration attribute 'example'" + ): + cm_service_role_config_group_config.main() + + +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) +) +def test_set_parameters(conn, module_args, host_monitor_role_group_config, request): + module_args( + { + **conn, + "name": host_monitor_role_group_config.name, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) +) +def test_set_parameters_role_type(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "type": host_monitor_config.role_type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) +) +def test_unset_parameters(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "name": host_monitor_config.name, + "parameters": dict(mgmt_num_descriptor_fetch_tries=None), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + expected = dict(process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) +) +def test_unset_parameters_role_type(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "type": host_monitor_config.role_type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=None), + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + } + ) + + expected = dict(process_start_secs="21") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) +) +def test_set_parameters_with_purge(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "name": host_monitor_config.name, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) +) +def test_set_parameters_with_purge_role_type( + conn, module_args, host_monitor_config, request +): + module_args( + { + **conn, + "type": host_monitor_config.role_type, + "parameters": dict(mgmt_num_descriptor_fetch_tries=32), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + expected = dict(mgmt_num_descriptor_fetch_tries="32") + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == True + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == False + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) +) +def test_purge_all_parameters(conn, module_args, host_monitor_config, request): + module_args( + { + **conn, + "name": host_monitor_config.name, + "parameters": dict(), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == True + assert len(e.value.config) == 0 + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == False + assert len(e.value.config) == 0 + + +@pytest.mark.role_config_group( + ApiRoleConfigGroup( + config=ApiConfigList( + items=[ + ApiConfig(k, v) + for k, v in dict( + mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + ).items() + ] + ) + ) +) +def test_purge_all_parameters_role_type( + conn, module_args, host_monitor_config, request +): + module_args( + { + **conn, + "type": host_monitor_config.role_type, + "parameters": dict(), + "purge": True, + "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", + # _ansible_check_mode=True, + # _ansible_diff=True, + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == True + assert len(e.value.config) == 0 + + # Idempotency + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_config.main() + + assert e.value.changed == False + assert len(e.value.config) == 0 diff --git a/tests/unit/plugins/modules/cm_service_role_config_group_info/test_cm_service_role_config_group_info.py b/tests/unit/plugins/modules/cm_service_role_config_group_info/test_cm_service_role_config_group_info.py new file mode 100644 index 00000000..c5d1605b --- /dev/null +++ b/tests/unit/plugins/modules/cm_service_role_config_group_info/test_cm_service_role_config_group_info.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- + +# Copyright 2025 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import pytest + + +from ansible_collections.cloudera.cluster.plugins.modules import ( + cm_service_role_config_group_info, +) +from ansible_collections.cloudera.cluster.tests.unit import ( + AnsibleExitJson, + AnsibleFailJson, +) + +LOG = logging.getLogger(__name__) + + +def test_read_role_config_groups(conn, module_args, cms_auto): + module_args({**conn}) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_info.main() + + assert e.value.changed == False + assert ( + len(e.value.role_config_groups) == 9 + ) # Gets all the base RCGs for all potential CM service roles + + +def test_read_role_config_group(conn, module_args, cms_auto): + module_args( + { + **conn, + "type": "HOSTMONITOR", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_info.main() + + assert e.value.changed == False + assert len(e.value.role_config_groups) == 1 + + +def test_read_role_config_group_nonexistent(conn, module_args, cms_auto): + module_args( + { + **conn, + "type": "DOESNOTEXIST", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_config_group_info.main() + + assert len(e.value.role_config_groups) == 0 + + +def test_read_service_nonexistent(conn, module_args): + module_args({**conn}) + + with pytest.raises( + AnsibleFailJson, match="Cloudera Management service does not exist" + ) as e: + cm_service_role_config_group_info.main() diff --git a/tests/unit/plugins/modules/cm_service_role_info/test_cm_service_role_info.py b/tests/unit/plugins/modules/cm_service_role_info/test_cm_service_role_info.py new file mode 100644 index 00000000..dee3402d --- /dev/null +++ b/tests/unit/plugins/modules/cm_service_role_info/test_cm_service_role_info.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- + +# Copyright 2025 Cloudera, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import pytest + + +from ansible_collections.cloudera.cluster.plugins.modules import cm_service_role_info +from ansible_collections.cloudera.cluster.tests.unit import ( + AnsibleExitJson, + AnsibleFailJson, +) + +LOG = logging.getLogger(__name__) + + +def test_read_roles(conn, module_args, cms_auto): + module_args({**conn}) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_info.main() + + assert e.value.changed == False + assert len(e.value.roles) == 4 + + +def test_read_role(conn, module_args, cms_auto): + module_args( + { + **conn, + "type": "HOSTMONITOR", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_info.main() + + assert e.value.changed == False + assert len(e.value.roles) == 1 + + +def test_read_role_nonexistent(conn, module_args, cms_auto): + module_args( + { + **conn, + "type": "DOESNOTEXIST", + } + ) + + with pytest.raises(AnsibleExitJson) as e: + cm_service_role_info.main() + + assert len(e.value.roles) == 0 + + +def test_read_service_nonexistent(conn, module_args): + module_args({**conn}) + + with pytest.raises( + AnsibleFailJson, match="Cloudera Management service does not exist" + ) as e: + cm_service_role_info.main() diff --git a/tests/unit/plugins/modules/service_config/test_service_config.py b/tests/unit/plugins/modules/service_config/test_service_config.py index 707208a8..cf767d40 100644 --- a/tests/unit/plugins/modules/service_config/test_service_config.py +++ b/tests/unit/plugins/modules/service_config/test_service_config.py @@ -128,11 +128,11 @@ def test_present_invalid_cluster(conn, module_args): service_config.main() -def test_present_invalid_service(conn, module_args, target_service): +def test_present_invalid_service(conn, module_args, zk_service): module_args( { **conn, - "cluster": target_service.cluster_ref.cluster_name, + "cluster": zk_service.cluster_ref.cluster_name, "service": "example", "parameters": dict(example="Example"), } @@ -142,12 +142,12 @@ def test_present_invalid_service(conn, module_args, target_service): service_config.main() -def test_present_invalid_parameter(conn, module_args, target_service): +def test_present_invalid_parameter(conn, module_args, zk_service): module_args( { **conn, - "cluster": target_service.cluster_ref.cluster_name, - "service": target_service.name, + "cluster": zk_service.cluster_ref.cluster_name, + "service": zk_service.name, "parameters": dict(example="Example"), } ) diff --git a/tests/unit/requirements.txt b/tests/unit/requirements.txt index ece294ca..db7e089e 100644 --- a/tests/unit/requirements.txt +++ b/tests/unit/requirements.txt @@ -12,4 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -cm-client +pre-commit +pytest +pytest-mock +ansible-core<2.17 # For RHEL 8 support +molecule +molecule-plugins +molecule-plugins[ec2] +tox-ansible