From d691f07c6f552870530a560564ce55fbcac73458 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 18 Jun 2025 16:11:21 -0400 Subject: [PATCH 01/21] Update documentation for antsibull-docs linting Signed-off-by: Webster Mudge --- plugins/lookup/cm_license.py | 4 +- plugins/modules/cm_autotls.py | 6 +-- plugins/modules/cm_kerberos.py | 4 +- plugins/modules/cm_service.py | 13 ++++--- plugins/modules/cm_service_info.py | 4 +- plugins/modules/cm_service_role.py | 1 + .../modules/cm_service_role_config_group.py | 1 + .../cm_service_role_config_group_config.py | 1 + .../cm_service_role_config_group_info.py | 1 + plugins/modules/cm_service_role_info.py | 1 + plugins/modules/data_context.py | 4 ++ plugins/modules/external_account.py | 5 ++- plugins/modules/external_account_info.py | 3 +- plugins/modules/external_user_mappings.py | 4 ++ .../modules/external_user_mappings_info.py | 3 ++ plugins/modules/host.py | 8 ++-- plugins/modules/host_config.py | 4 ++ plugins/modules/host_info.py | 1 + plugins/modules/host_template.py | 38 +++++++++---------- plugins/modules/service.py | 4 +- plugins/modules/service_info.py | 1 + plugins/modules/service_role.py | 1 + plugins/modules/service_role_config_group.py | 1 + .../modules/service_role_config_group_info.py | 17 +-------- 24 files changed, 69 insertions(+), 61 deletions(-) diff --git a/plugins/lookup/cm_license.py b/plugins/lookup/cm_license.py index 85e77f1f..8b2877eb 100644 --- a/plugins/lookup/cm_license.py +++ b/plugins/lookup/cm_license.py @@ -78,7 +78,7 @@ description: - The contents of the license. type: dict - options: + contains: deactivation_date: description: Date of license deactivation. returned: always @@ -95,7 +95,7 @@ returned: always password: description: Computed password of the license. - return: always + returned: always start_date: description: Date of license activation. returned: always diff --git a/plugins/modules/cm_autotls.py b/plugins/modules/cm_autotls.py index 19ac868e..a0de1a45 100644 --- a/plugins/modules/cm_autotls.py +++ b/plugins/modules/cm_autotls.py @@ -54,7 +54,7 @@ interpret_as_filenames: description: - Whether specific parameters are interpreted as filenames local to the Cloudera Manager host. - - When V(true), the following parameter are filenames - O(cm_host_cert), O(cm_host_key), O(ca_cert), O(keystore_passwd), O(truststore_passwd), O(trusted_ca_certs), O(host_certs.host_cert) and O(host_certs.host_key). + - When V(true), the following parameter are filenames - O(cm_host_cert), O(cm_host_key), O(ca_cert), O(keystore_passwd), O(truststore_passwd), O(trusted_ca_certs), O(host_certs[].certificate) and O(host_certs[].key). type: bool required: false default: true @@ -80,7 +80,7 @@ connection_password: description: - The password used to authenticate with the hosts. - - Specify either this or a O(connection_password_private_key). + - Specify either this or a O(connection_private_key). type: str connection_private_key: description: @@ -158,7 +158,7 @@ platform: platforms: all notes: - - Using the C(cm_config) with O(purge=yes) will remove the Cloudera Manager configurations set by this module. + - Using the C(cm_config) with O(cloudera.cluster.cm_config#module:purge=yes) will remove the Cloudera Manager configurations set by this module. - Requires C(cm_client). seealso: - module: cloudera.cluster.cm_config diff --git a/plugins/modules/cm_kerberos.py b/plugins/modules/cm_kerberos.py index eb437c22..c531d64e 100644 --- a/plugins/modules/cm_kerberos.py +++ b/plugins/modules/cm_kerberos.py @@ -116,7 +116,7 @@ description: - Custom Kerberos Keytab Retrieval Script. - Specify the path to a custom script, or executable, to retrieve a Kerberos keytab. - - The target script should accept two arguments: a destination path for the resulting keytab and the full principal name of the owner of the keytab. + - "The target script should accept two arguments: a destination path for the resulting keytab and the full principal name of the owner of the keytab." type: str required: false kdc_admin_user: @@ -134,7 +134,7 @@ - cloudera.cluster.cm_endpoint - cloudera.cluster.message notes: - - Using the C(cm_config) module with O(purge=yes) will remove the Cloudera Manager configurations set by this module. + - Using O(cloudera.cluster.cm_config#module:purge=yes) will remove the Cloudera Manager configurations set by this module. - Requires C(cm_client). seealso: - module: cloudera.cluster.cm_config diff --git a/plugins/modules/cm_service.py b/plugins/modules/cm_service.py index 0e6d4226..819cc175 100644 --- a/plugins/modules/cm_service.py +++ b/plugins/modules/cm_service.py @@ -35,7 +35,7 @@ role_config_groups: description: - A list of one or more role config groups to manage. - - Each role config group is the I(base) for the O(type). + - Each role config group is the I(base) for the O(role_config_groups[].type). type: list elements: dict suboptions: @@ -66,16 +66,16 @@ cluster_hostname: description: - The hostname of an instance for the role. - - If the hostname is different than that of the existing instance for the O(type), the role will be destroyed and rebuilt on the declared host. - - Mutually exclusive with O(cluster_host_id). + - If the hostname is different than that of the existing instance for the O(roles[].type), the role will be destroyed and rebuilt on the declared host. + - Mutually exclusive with O(roles[].cluster_host_id). type: str aliases: - cluster_host cluster_host_id: description: - The host ID of the instance for the role. - - If the host ID is different than that of the existing instance for the O(type), the role will be destroyed and rebuilt on the declared host. - - Mutually exclusive with O(cluster_hostname). + - If the host ID is different than that of the existing instance for the O(roles[].type), the role will be destroyed and rebuilt on the declared host. + - Mutually exclusive with O(roles[].cluster_hostname). type: str config: description: @@ -110,7 +110,7 @@ state: description: - The operating state of the service. - - The V(restarted) value will always restart the service and set RV(changed=True). + - The V(restarted) value will always restart the service and set RV(ignore:changed=True). type: str default: started choices: @@ -124,6 +124,7 @@ - cloudera.cluster.cm_options - cloudera.cluster.cm_endpoint - cloudera.cluster.message + - ansible.builtin.action_common_attributes attributes: check_mode: support: full diff --git a/plugins/modules/cm_service_info.py b/plugins/modules/cm_service_info.py index 10d07d28..226d8d3c 100644 --- a/plugins/modules/cm_service_info.py +++ b/plugins/modules/cm_service_info.py @@ -26,9 +26,7 @@ extends_documentation_fragment: - cloudera.cluster.cm_options - cloudera.cluster.cm_endpoint -attributes: - check_mode: - support: full + - ansible.builtin.action_common_attributes requirements: - cm-client seealso: diff --git a/plugins/modules/cm_service_role.py b/plugins/modules/cm_service_role.py index c2bd914d..820f8929 100644 --- a/plugins/modules/cm_service_role.py +++ b/plugins/modules/cm_service_role.py @@ -81,6 +81,7 @@ - cloudera.cluster.cm_options - cloudera.cluster.cm_endpoint - cloudera.cluster.message + - ansible.builtin.action_common_attributes attributes: check_mode: support: full diff --git a/plugins/modules/cm_service_role_config_group.py b/plugins/modules/cm_service_role_config_group.py index 76fd346e..5f7991a0 100644 --- a/plugins/modules/cm_service_role_config_group.py +++ b/plugins/modules/cm_service_role_config_group.py @@ -50,6 +50,7 @@ - cloudera.cluster.cm_options - cloudera.cluster.cm_endpoint - cloudera.cluster.message + - ansible.builtin.action_common_attributes attributes: check_mode: support: full diff --git a/plugins/modules/cm_service_role_config_group_config.py b/plugins/modules/cm_service_role_config_group_config.py index c6beca9c..6a79e3ca 100644 --- a/plugins/modules/cm_service_role_config_group_config.py +++ b/plugins/modules/cm_service_role_config_group_config.py @@ -61,6 +61,7 @@ - cloudera.cluster.cm_endpoint - cloudera.cluster.purge - cloudera.cluster.message + - ansible.builtin.action_common_attributes attributes: check_mode: support: full diff --git a/plugins/modules/cm_service_role_config_group_info.py b/plugins/modules/cm_service_role_config_group_info.py index fb3a35d9..a6dc301a 100644 --- a/plugins/modules/cm_service_role_config_group_info.py +++ b/plugins/modules/cm_service_role_config_group_info.py @@ -32,6 +32,7 @@ extends_documentation_fragment: - cloudera.cluster.cm_options - cloudera.cluster.cm_endpoint + - ansible.builtin.action_common_attributes attributes: check_mode: support: full diff --git a/plugins/modules/cm_service_role_info.py b/plugins/modules/cm_service_role_info.py index f95634dd..48ea9f9d 100644 --- a/plugins/modules/cm_service_role_info.py +++ b/plugins/modules/cm_service_role_info.py @@ -32,6 +32,7 @@ extends_documentation_fragment: - cloudera.cluster.cm_options - cloudera.cluster.cm_endpoint + - ansible.builtin.action_common_attributes attributes: check_mode: support: full diff --git a/plugins/modules/data_context.py b/plugins/modules/data_context.py index 843e1608..64ac389f 100644 --- a/plugins/modules/data_context.py +++ b/plugins/modules/data_context.py @@ -59,6 +59,10 @@ choices: - present - absent +extends_documentation_fragment: + - cloudera.cluster.cm_options + - cloudera.cluster.cm_endpoint + - ansible.builtin.action_common_attributes attributes: check_mode: support: full diff --git a/plugins/modules/external_account.py b/plugins/modules/external_account.py index 9023f8ae..a980e0fb 100644 --- a/plugins/modules/external_account.py +++ b/plugins/modules/external_account.py @@ -43,8 +43,8 @@ - BASICAUTH state: description: - - If I(state=present), the account will be created or updated. - - If I(state=absent), the account will be deleted. + - If O(state=present), the account will be created or updated. + - If O(state=absent), the account will be deleted. type: str required: no default: present @@ -109,6 +109,7 @@ - cloudera.cluster.cm_options - cloudera.cluster.cm_endpoint - cloudera.cluster.message + - ansible.builtin.action_common_attributes attributes: check_mode: support: full diff --git a/plugins/modules/external_account_info.py b/plugins/modules/external_account_info.py index 167d4adb..ffee00e8 100644 --- a/plugins/modules/external_account_info.py +++ b/plugins/modules/external_account_info.py @@ -22,8 +22,6 @@ - Provides details for a specific account or retrieves all external accounts configured in Cloudera Manager. author: - "Ronald Suplina (@rsuplina)" -requirements: - - cm_client options: name: description: @@ -45,6 +43,7 @@ - cloudera.cluster.cm_options - cloudera.cluster.cm_endpoint - cloudera.cluster.message + - ansible.builtin.action_common_attributes attributes: check_mode: support: full diff --git a/plugins/modules/external_user_mappings.py b/plugins/modules/external_user_mappings.py index 3ba9e8a9..735468f1 100644 --- a/plugins/modules/external_user_mappings.py +++ b/plugins/modules/external_user_mappings.py @@ -67,6 +67,10 @@ - If I(purge=False), the provided authorization roles will be added to the existing ones, and any duplicates will be ignored. type: bool default: False +extends_documentation_fragment: + - cloudera.cluster.cm_options + - cloudera.cluster.cm_endpoint + - ansible.builtin.action_common_attributes attributes: check_mode: support: full diff --git a/plugins/modules/external_user_mappings_info.py b/plugins/modules/external_user_mappings_info.py index afbd0dc4..944a3672 100644 --- a/plugins/modules/external_user_mappings_info.py +++ b/plugins/modules/external_user_mappings_info.py @@ -35,6 +35,9 @@ - The uuid of the external mapping. type: str required: no +extends_documentation_fragment: + - cloudera.cluster.cm_options + - cloudera.cluster.cm_endpoint """ EXAMPLES = r""" diff --git a/plugins/modules/host.py b/plugins/modules/host.py index 8ac17d56..edb07594 100644 --- a/plugins/modules/host.py +++ b/plugins/modules/host.py @@ -74,7 +74,7 @@ - Role configuration overrides for the host. type: list elements: dict - options: + suboptions: service: description: - The service of the role instance on the host. @@ -102,7 +102,7 @@ - Role config groups (and associated role instances) to apply to the host. type: list elements: dict - options: + suboptions: service: description: - The service of the role config group (and associated role instance) on the host. @@ -113,14 +113,14 @@ type: description: - The base role type of the role config group (and associated role instance) on the host. - - One of O(type) or O(name) is required. + - One of O(role_config_groups[].type) or O(role_config_groups[].name) is required. type: str aliases: - role_type name: description: - The name of the role config group (and associated role instance) on the host. - - One of O(type) or O(name) is required. + - One of O(role_config_groups[].type) or O(role_config_groups[].name) is required. type: str tags: description: diff --git a/plugins/modules/host_config.py b/plugins/modules/host_config.py index 20a712e4..a2eb13be 100644 --- a/plugins/modules/host_config.py +++ b/plugins/modules/host_config.py @@ -50,6 +50,10 @@ choices: - summary - full +extends_documentation_fragment: + - cloudera.cluster.cm_options + - cloudera.cluster.cm_endpoint + - ansible.builtin.action_common_attributes attributes: check_mode: support: full diff --git a/plugins/modules/host_info.py b/plugins/modules/host_info.py index 771a0b8c..d2bf4dfb 100644 --- a/plugins/modules/host_info.py +++ b/plugins/modules/host_info.py @@ -46,6 +46,7 @@ extends_documentation_fragment: - cloudera.cluster.cm_options - cloudera.cluster.cm_endpoint + - ansible.builtin.action_common_attributes attributes: check_mode: support: full diff --git a/plugins/modules/host_template.py b/plugins/modules/host_template.py index d106c07b..f60aebcd 100644 --- a/plugins/modules/host_template.py +++ b/plugins/modules/host_template.py @@ -50,7 +50,7 @@ name: description: - The name of the custom role config group for the specified service. - - Mutually exclusive with O(type). + - Mutually exclusive with O(role_config_groups[].type). type: str required: no service: @@ -59,30 +59,30 @@ type: str required: yes aliases: - - service_name + - service_name type: description: - The name of the role type of the base role config group for the specified service. - - Mutually exclusive with O(name). + - Mutually exclusive with O(role_config_groups[].name). type: str required: no aliases: - - role_type - purge: - description: - - Flag for whether the declared role config groups should append or overwrite any existing entries. - - To clear all configuration overrides or tags, set O(role_config_groups={}), i.e. an empty dictionary, and set O(purge=True). - type: bool - default: False - state: - description: - - The state of the host template. - type: str - required: no - choices: - - present - - absent - default: present + - role_type + purge: + description: + - Flag for whether the declared role config groups should append or overwrite any existing entries. + - To clear all configuration overrides or tags, set O(role_config_groups={}), i.e. an empty dictionary, and set O(purge=True). + type: bool + default: False + state: + description: + - The state of the host template. + type: str + required: no + choices: + - present + - absent + default: present extends_documentation_fragment: - ansible.builtin.action_common_attributes - cloudera.cluster.cm_options diff --git a/plugins/modules/service.py b/plugins/modules/service.py index 34d5fb39..8866a175 100644 --- a/plugins/modules/service.py +++ b/plugins/modules/service.py @@ -90,7 +90,7 @@ - If O(purge=True), undeclared roles for the service will be removed from the hosts. type: list elements: dict - options: + suboptions: type: description: - The role instance type to provision on the designated cluster hosts. @@ -135,7 +135,7 @@ config groups cannot be removed.) type: list elements: dict - options: + suboptions: name: description: - The name of a custom role config group. diff --git a/plugins/modules/service_info.py b/plugins/modules/service_info.py index 136e061e..285cd242 100644 --- a/plugins/modules/service_info.py +++ b/plugins/modules/service_info.py @@ -41,6 +41,7 @@ extends_documentation_fragment: - cloudera.cluster.cm_options - cloudera.cluster.cm_endpoint + - ansible.builtin.action_common_attributes attributes: check_mode: support: full diff --git a/plugins/modules/service_role.py b/plugins/modules/service_role.py index 1fcc51a3..487f305e 100644 --- a/plugins/modules/service_role.py +++ b/plugins/modules/service_role.py @@ -122,6 +122,7 @@ - cloudera.cluster.cm_options - cloudera.cluster.cm_endpoint - cloudera.cluster.message + - ansible.builtin.action_common_attributes attributes: check_mode: support: full diff --git a/plugins/modules/service_role_config_group.py b/plugins/modules/service_role_config_group.py index 430b0e88..2bea1c61 100644 --- a/plugins/modules/service_role_config_group.py +++ b/plugins/modules/service_role_config_group.py @@ -91,6 +91,7 @@ - cloudera.cluster.cm_options - cloudera.cluster.cm_endpoint - cloudera.cluster.message + - ansible.builtin.action_common_attributes attributes: check_mode: support: full diff --git a/plugins/modules/service_role_config_group_info.py b/plugins/modules/service_role_config_group_info.py index 73324c71..cb70af5a 100644 --- a/plugins/modules/service_role_config_group_info.py +++ b/plugins/modules/service_role_config_group_info.py @@ -37,15 +37,6 @@ required: yes aliases: - service_name - type: - description: - - The role type defining the role config group(s). - - If specified, will return all role config groups for the type. - - Mutually exclusive with O(name). - type: str - aliases: - - role_type - name: type: description: - The role type defining the role config group(s). @@ -65,13 +56,7 @@ extends_documentation_fragment: - cloudera.cluster.cm_options - cloudera.cluster.cm_endpoint -attributes: - check_mode: - support: full -requirements: - - cm-client -seealso: - - module: cloudera.cluster.service_role_config_group + - ansible.builtin.action_common_attributes attributes: check_mode: support: full From 0300f72d9f2d3ca4bad8f51e508e2583d1864b64 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 18 Jun 2025 17:34:28 -0400 Subject: [PATCH 02/21] Update for ansible-lint errors and warnings Signed-off-by: Webster Mudge --- .github/workflows/label_pr.yml | 2 +- .github/workflows/pre-commit.yml | 8 +- .github/workflows/publish_docs.yml | 3 +- .github/workflows/publish_galaxy.yml | 1 - .github/workflows/reset_pr.yml | 6 +- .github/workflows/validate_pr.yml | 8 +- .github/workflows/validate_pr_docs.yml | 5 +- .pre-commit-config.yaml | 1 + docs/links.yml | 2 +- galaxy.yml | 59 ++++++------- meta/runtime.yml | 3 +- plugins/filter/append_database_port.yml | 2 +- plugins/filter/cluster_service_role_hosts.yml | 2 +- plugins/filter/default_database_port.yml | 2 +- plugins/filter/extract_custom_role_groups.yml | 2 +- plugins/filter/extract_custom_roles.yml | 2 +- plugins/filter/extract_parcel_urls.yml | 2 +- .../extract_products_from_manifests.yml | 2 +- plugins/filter/extract_role_and_group.yml | 2 +- plugins/filter/filter_null_configs.yml | 2 +- plugins/filter/find_clusters.yml | 2 +- plugins/filter/flatten_dict_list.yml | 2 +- plugins/filter/format_database_type.yml | 2 +- .../filter/get_database_collation_mysql.yml | 2 +- .../filter/get_database_encoding_mysql.yml | 2 +- plugins/filter/get_major_version.yml | 2 +- plugins/filter/get_product_version.yml | 2 +- plugins/filter/to_ldap_type_enum.yml | 2 +- plugins/modules/assemble_cluster_template.py | 2 +- plugins/modules/cluster.py | 12 +-- plugins/modules/cluster_info.py | 1 - plugins/modules/cm_config_info.py | 4 +- plugins/modules/cm_resource.py | 9 +- plugins/modules/cm_service.py | 10 +-- plugins/modules/cm_service_config.py | 4 +- plugins/modules/cm_service_role.py | 6 +- plugins/modules/cm_service_role_config.py | 2 +- .../modules/cm_service_role_config_group.py | 6 +- .../cm_service_role_config_group_config.py | 4 +- plugins/modules/data_context.py | 10 +-- plugins/modules/data_context_info.py | 4 +- plugins/modules/external_user_mappings.py | 8 +- plugins/modules/host_config.py | 7 +- plugins/modules/host_config_info.py | 4 +- plugins/modules/host_template.py | 12 +-- plugins/modules/host_template_info.py | 4 +- plugins/modules/service.py | 10 +-- plugins/modules/service_config.py | 4 +- plugins/modules/service_role.py | 6 +- plugins/modules/service_role_config.py | 8 +- plugins/modules/service_role_config_group.py | 4 +- .../service_role_config_group_config.py | 4 +- plugins/modules/user.py | 2 +- plugins/modules/user_info.py | 1 - .../assemble_template/meta/argument_specs.yml | 3 +- roles/assemble_template/tasks/main.yml | 7 +- .../admin_password/check/tasks/main.yml | 9 +- .../admin_password/set/tasks/main.yml | 1 - roles/cloudera_manager/agent/tasks/main.yml | 3 +- .../agent_config/tasks/main.yml | 1 - .../api_client/handlers/main.yml | 1 - .../api_client/tasks/main.yml | 1 - .../cloudera_manager/api_hosts/tasks/main.yml | 1 - roles/cloudera_manager/autotls/tasks/main.yml | 3 +- .../autotls/tasks/patch_old_cm.yml | 2 +- .../cloudera_manager/common/defaults/main.yml | 2 +- .../cloudera_manager/common/handlers/main.yml | 1 - .../cloudera_manager/config/defaults/main.yml | 2 +- roles/cloudera_manager/config/tasks/main.yml | 1 - roles/cloudera_manager/csds/tasks/main.yml | 5 +- roles/cloudera_manager/daemons/tasks/main.yml | 3 +- .../database/defaults/main.yml | 3 +- .../database/handlers/main.yml | 1 - .../database/tasks/embedded.yml | 5 +- .../database/tasks/external.yml | 26 +++--- .../cloudera_manager/database/tasks/main.yml | 1 - .../external_account/tasks/main.yml | 1 - .../external_auth/defaults/main.yml | 2 +- .../external_auth/tasks/create_mapping.yml | 1 + .../external_auth/tasks/main.yml | 49 +++++------ .../external_auth/vars/freeipa.yml | 12 +-- .../external_auth/vars/main.yml | 12 +-- .../hosts_config/tasks/main.yml | 3 +- .../cloudera_manager/kerberos/tasks/main.yml | 3 +- .../license/tasks/enterprise.yml | 3 +- roles/cloudera_manager/license/tasks/main.yml | 1 - .../cloudera_manager/license/tasks/trial.yml | 3 +- .../preload_parcels/defaults/main.yml | 1 + .../preload_parcels/tasks/main.yml | 2 +- roles/cloudera_manager/repo/defaults/main.yml | 2 +- .../repo/tasks/main-Debian.yml | 16 ++-- .../repo/tasks/main-RedHat.yml | 3 +- roles/cloudera_manager/repo/tasks/main.yml | 4 +- roles/cloudera_manager/repo/vars/Debian.yml | 6 +- roles/cloudera_manager/repo/vars/RedHat.yml | 6 +- roles/cloudera_manager/server/tasks/main.yml | 5 +- .../server_tls/tasks/main.yml | 8 +- .../services_info/defaults/main.yml | 1 + .../services_info/tasks/main.yml | 25 +++--- .../session_timeout/tasks/main.yml | 1 - .../wait_for_heartbeat/tasks/main.yml | 1 - roles/config/cluster/base/tasks/main.yml | 1 - roles/config/cluster/base/vars/main.yml | 47 +++++----- roles/config/cluster/common/defaults/main.yml | 8 +- roles/config/cluster/ecs/tasks/main.yml | 1 - roles/config/cluster/ecs/vars/main.yml | 2 +- roles/config/cluster/kts/tasks/main.yml | 1 - roles/config/cluster/kts/vars/main.yml | 2 +- roles/config/services/kms/tasks/main.yml | 1 - roles/config/services/kms_tls/tasks/main.yml | 6 +- roles/config/services/mgmt/tasks/main.yml | 1 - roles/config/services/oozie_ui/tasks/main.yml | 2 +- .../tasks/main.yml | 9 +- .../solr_ranger_plugin/tasks/main.yml | 8 +- .../deployment/cluster/tasks/create_base.yml | 5 +- .../cluster/tasks/create_data_context.yml | 1 - roles/deployment/cluster/tasks/create_ecs.yml | 11 +-- roles/deployment/cluster/tasks/create_kts.yml | 42 +++++---- roles/deployment/cluster/tasks/fs2cs.yml | 5 +- roles/deployment/cluster/tasks/main.yml | 5 +- roles/deployment/cluster/tasks/nav2atlas.yml | 10 +-- .../deployment/cluster/tasks/update_base.yml | 6 +- .../tasks/update_role_config_group.yml | 4 +- .../deployment/cluster/tasks/upgrade_kts.yml | 7 +- roles/deployment/databases/tasks/main.yml | 1 - roles/deployment/databases/tasks/mariadb.yml | 9 +- roles/deployment/databases/tasks/mysql.yml | 9 +- .../deployment/databases/tasks/postgresql.yml | 5 +- roles/deployment/definition/defaults/main.yml | 18 ++-- roles/deployment/definition/tasks/main.yml | 1 - roles/deployment/groupby/tasks/main.yml | 11 ++- roles/deployment/repometa/tasks/parcels.yml | 5 +- .../repometa/tasks/prepare-Debian.yml | 1 - .../repometa/tasks/prepare-RedHat.yml | 1 - .../services/kms/tasks/create_kms.yml | 1 - roles/deployment/services/kms/tasks/main.yml | 3 +- .../deployment/services/kms_ha/tasks/main.yml | 4 +- .../kts_high_availability/tasks/main.yml | 4 +- roles/deployment/services/mgmt/tasks/main.yml | 1 - .../deployment/services/wxm/defaults/main.yml | 5 +- .../wxm/tasks/configure_telemetry.yml | 2 - roles/deployment/services/wxm/tasks/main.yml | 3 +- .../services/wxm/tasks/truststore_to_base.yml | 8 +- .../ca_server/molecule/default/verify.yml | 40 ++++----- .../ca_server/tasks/create_ca.yml | 21 ++--- roles/infrastructure/ca_server/tasks/main.yml | 5 +- .../custom_repo/defaults/main.yml | 2 +- .../custom_repo/tasks/install_parcels.yml | 11 ++- ...nstall_parcels_from_tars_on_controller.yml | 11 ++- .../infrastructure/custom_repo/tasks/main.yml | 5 +- .../tasks/rehost_files_from_download.yml | 2 +- roles/infrastructure/haproxy/tasks/main.yml | 3 +- .../krb5_client/handlers/main.yml | 1 + .../krb5_client/tasks/freeipa.yml | 1 - .../krb5_client/tasks/freeipa_autodns.yml | 8 +- .../krb5_client/tasks/freeipa_dbus_patch.yml | 4 +- .../krb5_client/tasks/pvc_configs.yml | 8 +- .../krb5_common/defaults/main.yml | 1 + roles/infrastructure/krb5_conf/tasks/mit.yml | 3 +- .../tasks/fix_freeipa_collection.yml | 12 +-- .../krb5_server/tasks/freeipa.yml | 4 +- .../infrastructure/krb5_server/tasks/mit.yml | 6 +- .../krb5_server/vars/RedHat-7.yml | 2 +- .../krb5_server/vars/RedHat-8.yml | 2 +- .../krb5_server/vars/Ubuntu.yml | 2 +- .../krb5_server/vars/default.yml | 2 +- roles/infrastructure/rdbms/handlers/main.yml | 1 - .../rdbms/tasks/mariadb-Debian.yml | 1 - .../rdbms/tasks/mariadb-RedHat.yml | 1 - .../rdbms/tasks/mysql-RedHat.yml | 2 - .../rdbms/tasks/postgresql-Debian.yml | 1 - .../rdbms/tasks/postgresql-RedHat.yml | 1 - .../rdbms/tasks/template_fix.yml | 7 +- .../infrastructure/rdbms/vars/postgresql.yml | 14 +-- .../operations/delete_cluster/tasks/main.yml | 5 +- roles/operations/delete_cms/tasks/main.yml | 1 - .../tasks/cluster_find_ranger.yml | 1 - .../refresh_ranger_kms_repo/tasks/main.yml | 1 - .../tasks/setup_cluster.yml | 5 +- .../operations/restart_cluster/tasks/main.yml | 1 - roles/operations/restart_stale/tasks/main.yml | 1 - .../restart_stale/tasks/restart.yml | 1 - roles/operations/stop_cluster/tasks/main.yml | 5 +- roles/prereqs/jdk/defaults/main.yml | 2 +- roles/prereqs/jdk/tasks/main.yml | 15 ++-- roles/prereqs/kerberos/tasks/main.yml | 1 - roles/prereqs/license/defaults/main.yml | 3 +- roles/prereqs/license/tasks/main.yml | 2 +- .../local_accounts_common/defaults/main.yml | 87 +++++++++---------- roles/prereqs/mysql_connector/tasks/main.yml | 13 ++- roles/prereqs/oracle_connector/tasks/main.yml | 14 ++- roles/prereqs/os/defaults/main.yml | 12 +-- roles/prereqs/os/handlers/main.yml | 1 - roles/prereqs/os/tasks/main-Debian.yml | 4 +- roles/prereqs/os/tasks/main-RedHat.yml | 20 ++--- roles/prereqs/os/tasks/main.yml | 17 ++-- roles/prereqs/os/tasks/rngd.yml | 7 +- .../postgresql_connector/tasks/main.yml | 15 ++-- roles/prereqs/pvc_ecs/tasks/main.yml | 9 +- roles/prereqs/user_accounts/tasks/main.yml | 3 +- .../user_accounts_ecs/defaults/main.yml | 9 +- .../prereqs/user_accounts_ecs/tasks/main.yml | 4 +- .../molecule/default/molecule.yml | 2 +- .../molecule/default/verify.yml | 6 +- .../tls_generate_csr/tasks/acls-ecs.yml | 11 ++- .../security/tls_generate_csr/tasks/acls.yml | 11 ++- .../security/tls_generate_csr/tasks/main.yml | 9 +- .../security/tls_install_certs/tasks/main.yml | 35 ++++---- roles/security/tls_nifi/tasks/main.yml | 1 - .../tls_signing/tasks/csr_signing_local.yml | 5 +- roles/security/tls_signing/tasks/main.yml | 1 - .../tls_signing/tasks/signing_freeipa.yml | 21 +++-- roles/teardown/meta/main.yml | 3 +- roles/teardown/tasks/main.yml | 13 ++- roles/teardown/tasks/teardown_cdsw.yml | 1 - .../tasks/teardown_cloudera_agent.yml | 1 + .../tasks/teardown_cloudera_server.yml | 3 +- roles/teardown/tasks/teardown_cluster.yml | 5 +- roles/teardown/tasks/teardown_cms.yml | 1 - .../tasks/teardown_cms_role_directories.yml | 1 - .../tasks/teardown_cms_role_directory.yml | 1 - roles/teardown/tasks/teardown_database.yml | 13 ++- roles/teardown/tasks/teardown_ecs.yml | 4 +- roles/teardown/tasks/teardown_kms.yml | 1 - .../tasks/teardown_role_directories.yml | 1 - .../tasks/teardown_role_directory.yml | 1 - .../tasks/teardown_service_directories.yml | 1 - roles/teardown/vars/main.yml | 2 +- roles/verify/definition/tasks/main.yml | 29 +++---- roles/verify/inventory/tasks/main.yml | 17 ++-- .../parcels_and_roles/tasks/check_cluster.yml | 1 - .../tasks/check_cluster_config_roles.yml | 1 - .../tasks/check_template.yml | 1 - .../tasks/check_template_roles.yml | 1 - roles/verify/parcels_and_roles/tasks/main.yml | 1 - tests/config.yml | 3 +- .../unit/plugins/modules/cluster/example.yml | 1 + 237 files changed, 678 insertions(+), 784 deletions(-) diff --git a/.github/workflows/label_pr.yml b/.github/workflows/label_pr.yml index dfcd25a9..9ee76a5c 100644 --- a/.github/workflows/label_pr.yml +++ b/.github/workflows/label_pr.yml @@ -55,7 +55,7 @@ jobs: let fs = require('fs'); fs.writeFileSync(`${process.env.GITHUB_WORKSPACE}/pr_number.zip`, Buffer.from(download.data)); - - name: 'Unzip artifact' + - name: "Unzip artifact" run: unzip pr_number.zip - name: Read the PR number diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index 1b6688dc..80058b53 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -1,3 +1,4 @@ +--- # Copyright 2024 Cloudera, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,11 +19,10 @@ on: pull_request: push: branches: [main, devel] - jobs: pre-commit: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 - - uses: actions/setup-python@v5 - - uses: pre-commit/action@v3.0.1 + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + - uses: pre-commit/action@v3.0.1 diff --git a/.github/workflows/publish_docs.yml b/.github/workflows/publish_docs.yml index 53e98007..13ff9147 100644 --- a/.github/workflows/publish_docs.yml +++ b/.github/workflows/publish_docs.yml @@ -19,10 +19,9 @@ name: Publish documentation on: push: branches: - - 'main' + - "main" workflow_dispatch: - jobs: build-ansible-docs: name: Build Ansible Docs diff --git a/.github/workflows/publish_galaxy.yml b/.github/workflows/publish_galaxy.yml index b61ebc9c..3f39b8ba 100644 --- a/.github/workflows/publish_galaxy.yml +++ b/.github/workflows/publish_galaxy.yml @@ -19,7 +19,6 @@ name: Publish to Ansible Galaxy on: release: types: [published] - jobs: galaxy_release: runs-on: ubuntu-latest diff --git a/.github/workflows/reset_pr.yml b/.github/workflows/reset_pr.yml index 3c4c4735..897574b0 100644 --- a/.github/workflows/reset_pr.yml +++ b/.github/workflows/reset_pr.yml @@ -22,9 +22,9 @@ on: - synchronize - ready_for_review branches: - - 'release/**' - - 'devel' - - 'devel-pvc-base' + - "release/**" + - "devel" + - "devel-pvc-base" jobs: reset: diff --git a/.github/workflows/validate_pr.yml b/.github/workflows/validate_pr.yml index dab2ddac..3d947414 100644 --- a/.github/workflows/validate_pr.yml +++ b/.github/workflows/validate_pr.yml @@ -19,8 +19,8 @@ name: Validate Pull Request on: pull_request: branches: - - 'release/**' - - 'devel' + - "release/**" + - "devel" jobs: validate: @@ -32,8 +32,8 @@ jobs: - name: Setup Python and caching uses: actions/setup-python@v4 with: - python-version: '3.9' - cache: 'pip' + python-version: "3.9" + cache: "pip" - name: Set up Ansible and Ansible collections and roles run: | diff --git a/.github/workflows/validate_pr_docs.yml b/.github/workflows/validate_pr_docs.yml index e242ea7f..d589ee0e 100644 --- a/.github/workflows/validate_pr_docs.yml +++ b/.github/workflows/validate_pr_docs.yml @@ -19,11 +19,10 @@ name: Validate Pull Request documentation on: pull_request: branches: - - 'release/**' - - 'devel' + - "release/**" + - "devel" workflow_dispatch: - jobs: validate-docs: name: Validate Ansible Docs diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9cda92b6..0916215b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,3 +1,4 @@ +--- # Copyright 2024 Cloudera, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/docs/links.yml b/docs/links.yml index 7a8b2442..bc6fedb7 100644 --- a/docs/links.yml +++ b/docs/links.yml @@ -3,7 +3,7 @@ edit_on_github: repository: cloudera-labs/cloudera.cluster branch: main - path_prefix: '' + path_prefix: "" extra_links: - description: Submit a bug report diff --git a/galaxy.yml b/galaxy.yml index 72af1b5b..1cc27d8c 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -14,43 +14,44 @@ --- -namespace: cloudera -name: cluster -version: 4.5.0-rc1 -readme: README.md +namespace: cloudera +name: cluster +version: 4.5.0-rc1 +readme: README.md authors: -- Webster Mudge @wmudge -- Ronald Suplina @rsuplina -- Jim Enright @jimright + - Webster Mudge @wmudge + - Ronald Suplina @rsuplina + - Jim Enright @jimright description: > A set of roles, modules, and other plugins for interacting with the services - and endpoints provided by Cloudera on Premises and Cloudera Manager (CM). + and endpoints provided by Cloudera Manager (CM). license_file: LICENSE tags: -- cloudera -- cdp -- cdh -- private_cloud -- on_premise -- data_services -- cloudera_manager -- cm + - application + - cloud + - tools + - cloudera + - cdp + - cdh + - private_cloud + - on_premise + - data_services + - cloudera_manager + - cm dependencies: - 'ansible.posix': '1.3.0' - 'community.crypto': '2.2.1' - 'community.general': '4.5.0' + "ansible.posix": "1.3.0" + "community.crypto": "2.2.1" + "community.general": "4.5.0" -repository: https://github.com/cloudera-labs/cloudera.cluster -homepage: https://github.com/cloudera-labs/cloudera.cluster -issues: https://github.com/cloudera-labs/cloudera.cluster/issues -documentation: https://cloudera-labs.github.io/cloudera.cluster +repository: https://github.com/cloudera-labs/cloudera.cluster +homepage: https://github.com/cloudera-labs/cloudera.cluster +issues: https://github.com/cloudera-labs/cloudera.cluster/issues +documentation: https://cloudera-labs.github.io/cloudera.cluster build_ignore: -- '.*' -- docs -- docsrc -- site - -... + - ".*" + - docs + - docsrc + - site diff --git a/meta/runtime.yml b/meta/runtime.yml index 6b7c4978..536567bf 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -1,3 +1,4 @@ +--- # -*- coding: utf-8 -*- # Copyright 2025 Cloudera, Inc. All Rights Reserved. @@ -14,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -requires_ansible: ">=2.10" +requires_ansible: ">=2.15.0" action_groups: cm: diff --git a/plugins/filter/append_database_port.yml b/plugins/filter/append_database_port.yml index bb1b5b19..684b8941 100644 --- a/plugins/filter/append_database_port.yml +++ b/plugins/filter/append_database_port.yml @@ -1,8 +1,8 @@ +--- DOCUMENTATION: name: append_database_port short_description: append_database_port description: append_database_port EXAMPLES: - RETURN: diff --git a/plugins/filter/cluster_service_role_hosts.yml b/plugins/filter/cluster_service_role_hosts.yml index 69d91379..e2cb4a50 100644 --- a/plugins/filter/cluster_service_role_hosts.yml +++ b/plugins/filter/cluster_service_role_hosts.yml @@ -1,8 +1,8 @@ +--- DOCUMENTATION: name: cluster_service_role_hosts short_description: cluster_service_role_hosts description: cluster_service_role_hosts EXAMPLES: - RETURN: diff --git a/plugins/filter/default_database_port.yml b/plugins/filter/default_database_port.yml index ee07c9ea..a6e4aa00 100644 --- a/plugins/filter/default_database_port.yml +++ b/plugins/filter/default_database_port.yml @@ -1,8 +1,8 @@ +--- DOCUMENTATION: name: default_database_port short_description: default_database_port description: default_database_port EXAMPLES: - RETURN: diff --git a/plugins/filter/extract_custom_role_groups.yml b/plugins/filter/extract_custom_role_groups.yml index e5c0f4aa..108fbf15 100644 --- a/plugins/filter/extract_custom_role_groups.yml +++ b/plugins/filter/extract_custom_role_groups.yml @@ -1,8 +1,8 @@ +--- DOCUMENTATION: name: extract_custom_role_groups short_description: extract_custom_role_groups description: extract_custom_role_groups EXAMPLES: - RETURN: diff --git a/plugins/filter/extract_custom_roles.yml b/plugins/filter/extract_custom_roles.yml index bb6512a8..695345da 100644 --- a/plugins/filter/extract_custom_roles.yml +++ b/plugins/filter/extract_custom_roles.yml @@ -1,8 +1,8 @@ +--- DOCUMENTATION: name: extract_custom_roles short_description: extract_custom_roles description: extract_custom_roles EXAMPLES: - RETURN: diff --git a/plugins/filter/extract_parcel_urls.yml b/plugins/filter/extract_parcel_urls.yml index 5abdc4db..4e617638 100644 --- a/plugins/filter/extract_parcel_urls.yml +++ b/plugins/filter/extract_parcel_urls.yml @@ -1,8 +1,8 @@ +--- DOCUMENTATION: name: extract_parcel_urls short_description: extract_parcel_urls description: extract_parcel_urls EXAMPLES: - RETURN: diff --git a/plugins/filter/extract_products_from_manifests.yml b/plugins/filter/extract_products_from_manifests.yml index 1d05f48a..e834ea28 100644 --- a/plugins/filter/extract_products_from_manifests.yml +++ b/plugins/filter/extract_products_from_manifests.yml @@ -1,8 +1,8 @@ +--- DOCUMENTATION: name: extract_products_from_manifests short_description: extract_products_from_manifests description: extract_products_from_manifests EXAMPLES: - RETURN: diff --git a/plugins/filter/extract_role_and_group.yml b/plugins/filter/extract_role_and_group.yml index d172969b..181192c7 100644 --- a/plugins/filter/extract_role_and_group.yml +++ b/plugins/filter/extract_role_and_group.yml @@ -1,8 +1,8 @@ +--- DOCUMENTATION: name: extract_role_and_group short_description: extract_role_and_group description: extract_role_and_group EXAMPLES: - RETURN: diff --git a/plugins/filter/filter_null_configs.yml b/plugins/filter/filter_null_configs.yml index a8d4be3a..8c807050 100644 --- a/plugins/filter/filter_null_configs.yml +++ b/plugins/filter/filter_null_configs.yml @@ -1,8 +1,8 @@ +--- DOCUMENTATION: name: fill_null_configs short_description: fill_null_configs description: fill_null_configs EXAMPLES: - RETURN: diff --git a/plugins/filter/find_clusters.yml b/plugins/filter/find_clusters.yml index 99a0be9a..65d13152 100644 --- a/plugins/filter/find_clusters.yml +++ b/plugins/filter/find_clusters.yml @@ -1,8 +1,8 @@ +--- DOCUMENTATION: name: find_clusters short_description: find_clusters description: find_clusters EXAMPLES: - RETURN: diff --git a/plugins/filter/flatten_dict_list.yml b/plugins/filter/flatten_dict_list.yml index 39b6fe33..a9e0f8e0 100644 --- a/plugins/filter/flatten_dict_list.yml +++ b/plugins/filter/flatten_dict_list.yml @@ -1,8 +1,8 @@ +--- DOCUMENTATION: name: flatten_dict_list short_description: flatten_dict_list description: flatten_dict_list EXAMPLES: - RETURN: diff --git a/plugins/filter/format_database_type.yml b/plugins/filter/format_database_type.yml index 1cc588a5..54909885 100644 --- a/plugins/filter/format_database_type.yml +++ b/plugins/filter/format_database_type.yml @@ -1,8 +1,8 @@ +--- DOCUMENTATION: name: format_database_type short_description: format_database_type description: format_database_type EXAMPLES: - RETURN: diff --git a/plugins/filter/get_database_collation_mysql.yml b/plugins/filter/get_database_collation_mysql.yml index ce199519..6a299eb2 100644 --- a/plugins/filter/get_database_collation_mysql.yml +++ b/plugins/filter/get_database_collation_mysql.yml @@ -1,8 +1,8 @@ +--- DOCUMENTATION: name: get_database_collation_mysql short_description: get_database_collation_mysql description: get_database_collation_mysql EXAMPLES: - RETURN: diff --git a/plugins/filter/get_database_encoding_mysql.yml b/plugins/filter/get_database_encoding_mysql.yml index c20f2969..65146220 100644 --- a/plugins/filter/get_database_encoding_mysql.yml +++ b/plugins/filter/get_database_encoding_mysql.yml @@ -1,8 +1,8 @@ +--- DOCUMENTATION: name: get_database_encoding_mysql short_description: get_database_encoding_mysql description: get_database_encoding_mysql EXAMPLES: - RETURN: diff --git a/plugins/filter/get_major_version.yml b/plugins/filter/get_major_version.yml index ea2a5cb3..9febf3a0 100644 --- a/plugins/filter/get_major_version.yml +++ b/plugins/filter/get_major_version.yml @@ -1,8 +1,8 @@ +--- DOCUMENTATION: name: get_major_version short_description: get_major_version description: get_major_version EXAMPLES: - RETURN: diff --git a/plugins/filter/get_product_version.yml b/plugins/filter/get_product_version.yml index c084b1aa..966765ee 100644 --- a/plugins/filter/get_product_version.yml +++ b/plugins/filter/get_product_version.yml @@ -1,8 +1,8 @@ +--- DOCUMENTATION: name: get_product_version short_description: get_product_version description: get_product_version EXAMPLES: - RETURN: diff --git a/plugins/filter/to_ldap_type_enum.yml b/plugins/filter/to_ldap_type_enum.yml index acfc3895..1367f40c 100644 --- a/plugins/filter/to_ldap_type_enum.yml +++ b/plugins/filter/to_ldap_type_enum.yml @@ -1,8 +1,8 @@ +--- DOCUMENTATION: name: to_ldap_type_enum short_description: to_ldap_type_enum description: to_ldap_type_enum EXAMPLES: - RETURN: diff --git a/plugins/modules/assemble_cluster_template.py b/plugins/modules/assemble_cluster_template.py index 0c237160..e2c7b13b 100644 --- a/plugins/modules/assemble_cluster_template.py +++ b/plugins/modules/assemble_cluster_template.py @@ -123,7 +123,7 @@ cloudera.cluster.assemble_cluster_template: src: /tmp/examples dest: /opt/cloudera/cluster-template.json - remote_src: yes + remote_src: true """ RETURN = r"""#""" diff --git a/plugins/modules/cluster.py b/plugins/modules/cluster.py index 782edda3..851eb054 100644 --- a/plugins/modules/cluster.py +++ b/plugins/modules/cluster.py @@ -520,12 +520,12 @@ type: ZOOKEEPER display_name: Zookeeper config: - zookeeper_datadir_autocreate: yes + zookeeper_datadir_autocreate: true - name: hdfs-0 type: HDFS config: - zookeeper_service: zookeeper-0 - core_connector: core-settings-0 + zookeeper_service: zookeeper-0 + core_connector: core-settings-0 role_groups: - type: DATANODE config: @@ -595,7 +595,7 @@ port: "7180" name: example-cluster template: "./files/cluster-template.json" - add_repositories: yes + add_repositories: true - name: Create an ECS cluster cloudera.cluster.cluster: @@ -656,8 +656,8 @@ remote_repo_url: "https://test_website/cdp-pvc-ds/1.5.1" control_plane_config: ContainerInfo: - Mode: public - CopyDocker: false + Mode: public + CopyDocker: false Database: Mode: embedded EmbeddedDbStorage: 50 diff --git a/plugins/modules/cluster_info.py b/plugins/modules/cluster_info.py index 17bcb2fc..271d3a13 100644 --- a/plugins/modules/cluster_info.py +++ b/plugins/modules/cluster_info.py @@ -48,7 +48,6 @@ username: "jane_smith" password: "S&peR4Ec*re" port: "7180" - """ RETURN = r""" diff --git a/plugins/modules/cm_config_info.py b/plugins/modules/cm_config_info.py index 416f7685..c4a29bda 100644 --- a/plugins/modules/cm_config_info.py +++ b/plugins/modules/cm_config_info.py @@ -41,14 +41,14 @@ EXAMPLES = r""" - name: Retrieve the summary (default) settings - cloudera.cluster.cm_config_info + cloudera.cluster.cm_config_info: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" register: summary - name: Retrieve the full settings - cloudera.cluster.cm_config_info + cloudera.cluster.cm_config_info: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" diff --git a/plugins/modules/cm_resource.py b/plugins/modules/cm_resource.py index f6f3744a..880a1fcb 100644 --- a/plugins/modules/cm_resource.py +++ b/plugins/modules/cm_resource.py @@ -34,9 +34,9 @@ type: str required: True choices: - - DELETE - - POST - - PUT + - DELETE + - POST + - PUT body: description: - HTTP body for the CM API endpoint call. @@ -72,7 +72,8 @@ - name: "ROLE_LIMITED" - name: Delete a Cloudera Manager user using a custom SSL certificate - host: example.cloudera.com + cloudera.cluster.cm_resource: + host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" path: "/user/existing_user" diff --git a/plugins/modules/cm_service.py b/plugins/modules/cm_service.py index 819cc175..cb60598d 100644 --- a/plugins/modules/cm_service.py +++ b/plugins/modules/cm_service.py @@ -163,7 +163,7 @@ password: "S&peR4Ec*re" config: mgmt_pause_duration_window: 10 - ldap_monitoring_enabled: no + ldap_monitoring_enabled: false - name: Unset a service-wide configuration for Cloudera Manager service cloudera.cluster.cm_service: @@ -218,12 +218,12 @@ process_start_secs: None - name: Update the service state to only the declared configuration - cloudera.cluster.cm_service + cloudera.cluster.cm_service: host: "cm.example.com" username: "jane_smith" password: "S&peR4Ec*re" state: started - purge: yes + purge: true config: mgmt_pause_duration_window: 10 role_config_groups: @@ -244,14 +244,14 @@ cluster_hostname: "services01.example.com" - name: Stop the Cloudera Manager service - cloudera.cluster.cm_service + cloudera.cluster.cm_service: host: "cm.example.com" username: "jane_smith" password: "S&peR4Ec*re" state: "stopped" - name: Remove the Cloudera Manager service and its roles and role config groups - cloudera.cluster.cm_service + cloudera.cluster.cm_service: host: "cm.example.com" username: "jane_smith" password: "S&peR4Ec*re" diff --git a/plugins/modules/cm_service_config.py b/plugins/modules/cm_service_config.py index 436eba18..ea4992bb 100644 --- a/plugins/modules/cm_service_config.py +++ b/plugins/modules/cm_service_config.py @@ -84,7 +84,7 @@ parameters: config_one: ValueOne config_two: 4567 - purge: yes + purge: true - name: Reset all service-wide parameters cloudera.cluster.cm_service_config: @@ -94,7 +94,7 @@ cluster: example-cluster service: example-service parameters: {} - purge: yes + purge: true """ RETURN = r""" diff --git a/plugins/modules/cm_service_role.py b/plugins/modules/cm_service_role.py index 820f8929..6ecbafb7 100644 --- a/plugins/modules/cm_service_role.py +++ b/plugins/modules/cm_service_role.py @@ -111,7 +111,7 @@ username: "jane_smith" password: "S&peR4Ec*re" type: HOSTMONITOR - maintenance: yes + maintenance: true - name: Update (append) role configurations to a Cloudera Manager Service role cloudera.cluster.cm_service_role: @@ -130,7 +130,7 @@ type: HOSTMONITOR config: yet_another_config: value_three - purge: yes + purge: true - name: Remove all role configurations on a Cloudera Manager Service role cloudera.cluster.cm_service_role: @@ -138,7 +138,7 @@ username: "jane_smith" password: "S&peR4Ec*re" type: HOSTMONITOR - purge: yes + purge: true - name: Start a Cloudera Manager Service role cloudera.cluster.cm_service_role: diff --git a/plugins/modules/cm_service_role_config.py b/plugins/modules/cm_service_role_config.py index fc6efbf3..051258a0 100644 --- a/plugins/modules/cm_service_role_config.py +++ b/plugins/modules/cm_service_role_config.py @@ -115,7 +115,7 @@ password: "S&peR4Ec*re" type: HOSTMONITOR parameters: {} - purge: yes + purge: true """ RETURN = r""" diff --git a/plugins/modules/cm_service_role_config_group.py b/plugins/modules/cm_service_role_config_group.py index 5f7991a0..30557736 100644 --- a/plugins/modules/cm_service_role_config_group.py +++ b/plugins/modules/cm_service_role_config_group.py @@ -74,7 +74,7 @@ password: "S&peR4Ec*re" type: HOSTMONITOR config: - some_parameter: True + some_parameter: true - name: Update the configuration of a Cloudera Manager service role config group, purging undeclared parameters cloudera.cluster.cm_service_role_config_group: @@ -84,7 +84,7 @@ type: HOSTMONITOR config: another_parameter: 3456 - purge: yes + purge: true - name: Reset the configuration of a Cloudera Manager service role config group cloudera.cluster.cm_service_role_config_group: @@ -92,7 +92,7 @@ username: "jane_smith" password: "S&peR4Ec*re" type: HOSTMONITOR - purge: yes + purge: true """ RETURN = r""" diff --git a/plugins/modules/cm_service_role_config_group_config.py b/plugins/modules/cm_service_role_config_group_config.py index 6a79e3ca..0c1f6104 100644 --- a/plugins/modules/cm_service_role_config_group_config.py +++ b/plugins/modules/cm_service_role_config_group_config.py @@ -100,7 +100,7 @@ parameters: config_one: ValueOne config_two: 4567 - purge: yes + purge: true - name: Reset all role config group parameters for a Cloudera Manager Service role type cloudera.cluster.service_role_config_group_config: @@ -109,7 +109,7 @@ password: "S&peR4Ec*re" type: HOSTMONITOR parameters: {} - purge: yes + purge: true """ RETURN = r""" diff --git a/plugins/modules/data_context.py b/plugins/modules/data_context.py index 64ac389f..7f197ef2 100644 --- a/plugins/modules/data_context.py +++ b/plugins/modules/data_context.py @@ -72,17 +72,17 @@ EXAMPLES = r""" - name: Create a Data Context - cloudera.cluster.data_context + cloudera.cluster.data_context: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" name: "base_services" cluster: "example_cluster" - services: ['hive','atlas','hdfs','ranger'] + services: ['hive', 'atlas', 'hdfs', 'ranger'] state: present - name: Delete a data context - cloudera.cluster.data_context + cloudera.cluster.data_context: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" @@ -90,13 +90,13 @@ state: absent - name: Update an existing data context - cloudera.cluster.data_context + cloudera.cluster.data_context: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" name: "base_services" cluster: "example_cluster" - services: ['hive','atlas','hdfs'] + services: ['hive', 'atlas', 'hdfs'] state: present """ diff --git a/plugins/modules/data_context_info.py b/plugins/modules/data_context_info.py index f59d6662..06c93340 100644 --- a/plugins/modules/data_context_info.py +++ b/plugins/modules/data_context_info.py @@ -37,14 +37,14 @@ EXAMPLES = r""" - name: Gather details about specific data context - cloudera.cluster.data_context_info + cloudera.cluster.data_context_info: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" name: "SDX" - name: Gather details about all data contexts within the cluster - cloudera.cluster.data_context_info + cloudera.cluster.data_context_info: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" diff --git a/plugins/modules/external_user_mappings.py b/plugins/modules/external_user_mappings.py index 735468f1..c73c3efd 100644 --- a/plugins/modules/external_user_mappings.py +++ b/plugins/modules/external_user_mappings.py @@ -97,7 +97,7 @@ name: "basic_user" state: "present" type: "LDAP" - auth_roles: ["ROLE_DASHBOARD_USER","ROLE_USER","ROLE_CLUSTER_CREATOR"] + auth_roles: ["ROLE_DASHBOARD_USER", "ROLE_USER", "ROLE_CLUSTER_CREATOR"] - name: Replace current permissions in external user mapping cloudera.cluster.external_user_mappings: @@ -108,7 +108,7 @@ state: "present" purge: "True" type: "LDAP" - auth_roles: ["ROLE_DASHBOARD_USER","ROLE_USER"] + auth_roles: ["ROLE_DASHBOARD_USER", "ROLE_USER"] - name: Remove specified authorization roles from external user mapping cloudera.cluster.external_user_mappings: @@ -118,7 +118,7 @@ name: "default_user" state: "absent" type: "LDAP" - auth_roles: ["ROLE_DASHBOARD_USER","ROLE_USER"] + auth_roles: ["ROLE_DASHBOARD_USER", "ROLE_USER"] - name: Remove external user mapping cloudera.cluster.external_user_mappings: @@ -135,7 +135,7 @@ username: "jane_smith" password: "S&peR4Ec*re" name: "basic_user" - purge: True + purge: true auth_roles: [] """ diff --git a/plugins/modules/host_config.py b/plugins/modules/host_config.py index a2eb13be..229fd841 100644 --- a/plugins/modules/host_config.py +++ b/plugins/modules/host_config.py @@ -63,7 +63,7 @@ EXAMPLES = r""" - name: Update host configuration parameters - cloudera.cluster.host_config + cloudera.cluster.host_config: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" @@ -73,16 +73,15 @@ port_configuration: 8777 - name: Reset all host configurations and update specified parameters - cloudera.cluster.host_config + cloudera.cluster.host_config: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" name: example.cloudera.com - purge: yes + purge: true parameters: some_configuration_path: "/usr/bin/java" port_configuration: 8777 - """ RETURN = r""" diff --git a/plugins/modules/host_config_info.py b/plugins/modules/host_config_info.py index cac421b5..ac25c9ab 100644 --- a/plugins/modules/host_config_info.py +++ b/plugins/modules/host_config_info.py @@ -45,7 +45,7 @@ EXAMPLES = r""" - name: Gather the configuration details for a host - cloudera.cluster.host_config_info + cloudera.cluster.host_config_info: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" @@ -53,7 +53,7 @@ view: summary - name: Gather the configuration details in 'full' for a host - cloudera.cluster.host_config_info + cloudera.cluster.host_config_info: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" diff --git a/plugins/modules/host_template.py b/plugins/modules/host_template.py index f60aebcd..5e32c5ce 100644 --- a/plugins/modules/host_template.py +++ b/plugins/modules/host_template.py @@ -102,7 +102,7 @@ EXAMPLES = r""" - name: Provision a host template with a base role config group assignment - cloudera.cluster.host_template + cloudera.cluster.host_template: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" @@ -113,7 +113,7 @@ service: hdfs-service-1 - name: Provision a host template with a named (custom) role config group assignment - cloudera.cluster.host_template + cloudera.cluster.host_template: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" @@ -124,7 +124,7 @@ service: zookeeper-service-1 - name: Update (append) a role config group to a host template - cloudera.cluster.host_template + cloudera.cluster.host_template: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" @@ -135,7 +135,7 @@ service: ozone-service-2 - name: Update (reset) the role config groups of a host template - cloudera.cluster.host_template + cloudera.cluster.host_template: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" @@ -146,10 +146,10 @@ service: hdfs-service-1 - type: OZONE_DATANODE service: ozone-service-2 - purge: yes + purge: true - name: Remove a host template - cloudera.cluster.host_template + cloudera.cluster.host_template: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" diff --git a/plugins/modules/host_template_info.py b/plugins/modules/host_template_info.py index a8f9259a..3dc82c75 100644 --- a/plugins/modules/host_template_info.py +++ b/plugins/modules/host_template_info.py @@ -55,7 +55,7 @@ EXAMPLES = r""" - name: Retrieve the defailts about a specific host template - cloudera.cluster.host_template_info + cloudera.cluster.host_template_info: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" @@ -63,7 +63,7 @@ name: "example_host_template" - name: Retrieve the details about all host templates within the cluster - cloudera.cluster.host_template_info + cloudera.cluster.host_template_info: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" diff --git a/plugins/modules/service.py b/plugins/modules/service.py index 8866a175..389249c0 100644 --- a/plugins/modules/service.py +++ b/plugins/modules/service.py @@ -229,7 +229,7 @@ password: "S&peR4Ec*re" cluster: example_cluster service: example_ecs - maintenance: yes + maintenance: true - name: Update (append) several tags on a cluster service cloudera.cluster.service: @@ -251,7 +251,7 @@ service: example_ecs tags: tag_three: value_three - purge: yes + purge: true - name: Remove all the tags on a cluster service cloudera.cluster.service: @@ -261,7 +261,7 @@ cluster: example_cluster service: example_ecs tags: {} - purge: yes + purge: true - name: Update (append) several service-wide configurations on a cluster service cloudera.cluster.service: @@ -284,7 +284,7 @@ config: param_one: 1 param_three: three - purge: yes + purge: true - name: Remove all the service-wide configurations on a cluster service cloudera.cluster.service: @@ -294,7 +294,7 @@ cluster: example_cluster service: example_ecs config: {} - purge: yes + purge: true - name: Provision role instances on cluster hosts for a cluster service cloudera.cluster.service: diff --git a/plugins/modules/service_config.py b/plugins/modules/service_config.py index ccf9357c..3fd6b699 100644 --- a/plugins/modules/service_config.py +++ b/plugins/modules/service_config.py @@ -103,7 +103,7 @@ parameters: config_one: ValueOne config_two: 4567 - purge: yes + purge: true - name: Reset all service-wide parameters cloudera.cluster.service_config: @@ -113,7 +113,7 @@ cluster: example-cluster service: example-service parameters: {} - purge: yes + purge: true """ RETURN = r""" diff --git a/plugins/modules/service_role.py b/plugins/modules/service_role.py index 487f305e..e69105a8 100644 --- a/plugins/modules/service_role.py +++ b/plugins/modules/service_role.py @@ -155,7 +155,7 @@ cluster: example-cluster service: example-hdfs name: example-GATEWAY - maintenance: yes + maintenance: true - name: Update (append) tags to a service role cloudera.cluster.service_role: @@ -180,7 +180,7 @@ cluster_hostname: worker-01.cloudera.internal tags: tag_three: value_three - purge: yes + purge: true - name: Remove all tags on a service role cloudera.cluster.service_role: @@ -192,7 +192,7 @@ type: GATEWAY cluster_hostname: worker-01.cloudera.internal tags: {} - purge: yes + purge: true - name: Start a service role cloudera.cluster.service_role: diff --git a/plugins/modules/service_role_config.py b/plugins/modules/service_role_config.py index ad159ecf..e88249b7 100644 --- a/plugins/modules/service_role_config.py +++ b/plugins/modules/service_role_config.py @@ -89,7 +89,7 @@ another_configuration: 234 - name: Reset a role parameter - cloudera.cluster.cluster_service_role_config: + cloudera.cluster.service_role_config: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" @@ -99,7 +99,7 @@ more_configuration: None - name: Update (purge) role parameters - cloudera.cluster.cluster_service_role_config: + cloudera.cluster.service_role_config: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" @@ -111,14 +111,14 @@ config_three: 2345 - name: Reset all role parameters - cloudera.cluster.cluster_service_role_config: + cloudera.cluster.service_role_config: host: example.cloudera.com username: "jane_smith" password: "S&peR4Ec*re" cluster: example-cluster service: example-service parameters: {} - purge: yes + purge: true """ RETURN = r""" diff --git a/plugins/modules/service_role_config_group.py b/plugins/modules/service_role_config_group.py index 2bea1c61..2a7e06da 100644 --- a/plugins/modules/service_role_config_group.py +++ b/plugins/modules/service_role_config_group.py @@ -129,7 +129,7 @@ type: SERVER config: another_parameter: 12345 - purge: yes + purge: true - name: Update the base role config group for a role type cloudera.cluster.service_role_config_group: @@ -152,7 +152,7 @@ service: ZooKeeper name: Example-ZK-Server type: SERVER - purge: yes + purge: true """ RETURN = r""" diff --git a/plugins/modules/service_role_config_group_config.py b/plugins/modules/service_role_config_group_config.py index f56a7eb3..978179cc 100644 --- a/plugins/modules/service_role_config_group_config.py +++ b/plugins/modules/service_role_config_group_config.py @@ -110,7 +110,7 @@ parameters: config_one: ValueOne config_two: 4567 - purge: yes + purge: true - name: Reset all role config group parameters cloudera.cluster.service_role_config_group_config: @@ -120,7 +120,7 @@ cluster: example-cluster service: example-service parameters: {} - purge: yes + purge: true """ RETURN = r""" diff --git a/plugins/modules/user.py b/plugins/modules/user.py index 48c5d3fe..dce95c4e 100644 --- a/plugins/modules/user.py +++ b/plugins/modules/user.py @@ -84,7 +84,7 @@ password: "S&peR4Ec*re" account_name: "john" account_password: "Password123" - roles: ["Configurator","Dashboard User","Limited Operator"] + roles: ["Configurator", "Dashboard User", "Limited Operator"] state: "present" - name: Reduce permissions on user to a single role diff --git a/plugins/modules/user_info.py b/plugins/modules/user_info.py index 08cdd2a4..edf13bc7 100644 --- a/plugins/modules/user_info.py +++ b/plugins/modules/user_info.py @@ -48,7 +48,6 @@ username: "jane_smith" password: "S&peR4Ec*re" account_name: "john" - """ RETURN = r""" diff --git a/roles/assemble_template/meta/argument_specs.yml b/roles/assemble_template/meta/argument_specs.yml index f34ebdb3..8e032be9 100644 --- a/roles/assemble_template/meta/argument_specs.yml +++ b/roles/assemble_template/meta/argument_specs.yml @@ -18,7 +18,8 @@ argument_specs: main: short_description: "Discover and render files into a cluster template" description: - - Discovers fragment files in a specified directory, loops through fragment files rendering them through M(ansible.builtin.template), places them in a temporary directory, and then assembles a single, final cluster template. + - Discovers fragment files in a specified directory, loops through fragment files rendering them through M(ansible.builtin.template), places them in a + temporary directory, and then assembles a single, final cluster template. - Fragment files must be located on the Ansible controller since M(ansible.builtin.template) only executes on the controller. - This ensures that the template processing occurs on the Ansible controller machine. author: diff --git a/roles/assemble_template/tasks/main.yml b/roles/assemble_template/tasks/main.yml index c76ed466..286525a5 100644 --- a/roles/assemble_template/tasks/main.yml +++ b/roles/assemble_template/tasks/main.yml @@ -13,13 +13,12 @@ # limitations under the License. --- - - name: Discover fragment files ansible.builtin.find: paths: "{{ cluster_template_fragments_directory }}" patterns: "{{ cluster_template_fragments_regex | default(omit) }}" - use_regex: yes - recurse: yes + use_regex: true + recurse: true register: fragments delegate_to: localhost @@ -33,6 +32,7 @@ ansible.builtin.template: src: "{{ __fragment.path }}" dest: "{{ fragments_temp_directory.path }}/{{ __fragment.path | basename }}" + mode: "0644" loop: "{{ fragments.files }}" loop_control: loop_var: __fragment @@ -44,6 +44,7 @@ cloudera.cluster.assemble_cluster_template: src: "{{ fragments_temp_directory.path }}" dest: "{{ cluster_template_file }}" + mode: "0644" - name: Remove temporary directory ansible.builtin.file: diff --git a/roles/cloudera_manager/admin_password/check/tasks/main.yml b/roles/cloudera_manager/admin_password/check/tasks/main.yml index 19a6f325..2d5dc44a 100644 --- a/roles/cloudera_manager/admin_password/check/tasks/main.yml +++ b/roles/cloudera_manager/admin_password/check/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Wait for Cloudera Manager Port to be up delegate_to: "{{ groups.cloudera_manager[0] if 'cloudera_manager' in groups else 'localhost' }}" ansible.builtin.wait_for: @@ -34,14 +33,14 @@ validate_certs: "{{ cloudera_manager_tls_validate_certs }}" url_username: "admin" url_password: "admin" - force_basic_auth: yes + force_basic_auth: true status_code: [200, 401] register: default_admin_password_check retries: 30 until: - default_admin_password_check.status is defined - default_admin_password_check.status != -1 - run_once: True + run_once: true when: - cloudera_manager_admin_password is defined - cloudera_manager_api_password == 'admin' @@ -50,7 +49,7 @@ delegate_to: "{{ groups.cloudera_manager[0] if 'cloudera_manager' in groups else 'localhost' }}" set_fact: cloudera_manager_api_password: "{{ cloudera_manager_admin_password }}" - run_once: True + run_once: true when: - cloudera_manager_admin_password is defined - cloudera_manager_api_password == 'admin' @@ -62,4 +61,4 @@ - delegate_to: "{{ groups.cloudera_manager[0] if 'cloudera_manager' in groups else 'localhost' }}" set_fact: cloudera_manager_api_password: "{{ cloudera_manager_api_password }}" - run_once: True + run_once: true diff --git a/roles/cloudera_manager/admin_password/set/tasks/main.yml b/roles/cloudera_manager/admin_password/set/tasks/main.yml index c4a0a5c8..0b298f3b 100644 --- a/roles/cloudera_manager/admin_password/set/tasks/main.yml +++ b/roles/cloudera_manager/admin_password/set/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Update the Cloudera Manager admin password cloudera.cluster.cm_api: endpoint: /users/admin diff --git a/roles/cloudera_manager/agent/tasks/main.yml b/roles/cloudera_manager/agent/tasks/main.yml index b29da920..d67a2f2e 100644 --- a/roles/cloudera_manager/agent/tasks/main.yml +++ b/roles/cloudera_manager/agent/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Gather the package facts ansible.builtin.package_facts: manager: auto @@ -39,5 +38,5 @@ ansible.builtin.package: lock_timeout: "{{ (ansible_os_family == 'RedHat') | ternary(60, omit) }}" name: cloudera-manager-agent - update_cache: yes + update_cache: true state: latest diff --git a/roles/cloudera_manager/agent_config/tasks/main.yml b/roles/cloudera_manager/agent_config/tasks/main.yml index ceeff95f..48c02487 100644 --- a/roles/cloudera_manager/agent_config/tasks/main.yml +++ b/roles/cloudera_manager/agent_config/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Set Cloudera Manager agent 'server_host' in config.ini lineinfile: dest: "{{ cloudera_manager_agent_config_file }}" diff --git a/roles/cloudera_manager/api_client/handlers/main.yml b/roles/cloudera_manager/api_client/handlers/main.yml index 6531575d..a6f6d77b 100644 --- a/roles/cloudera_manager/api_client/handlers/main.yml +++ b/roles/cloudera_manager/api_client/handlers/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: restart cloudera management service cm_api: endpoint: /cm/service/commands/restart diff --git a/roles/cloudera_manager/api_client/tasks/main.yml b/roles/cloudera_manager/api_client/tasks/main.yml index 0ec9f28f..e5f9bb9f 100644 --- a/roles/cloudera_manager/api_client/tasks/main.yml +++ b/roles/cloudera_manager/api_client/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - set_fact: cloudera_manager_url: "{{ cloudera_manager_protocol }}://{{ cloudera_manager_host }}:{{ cloudera_manager_port }}" when: cloudera_manager_url is not defined diff --git a/roles/cloudera_manager/api_hosts/tasks/main.yml b/roles/cloudera_manager/api_hosts/tasks/main.yml index 64085f0f..70f7ed7b 100644 --- a/roles/cloudera_manager/api_hosts/tasks/main.yml +++ b/roles/cloudera_manager/api_hosts/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Get the host identifiers and names from Cloudera Manager delegate_to: "{{ groups.cloudera_manager[0] if 'cloudera_manager' in groups else 'localhost' }}" cloudera.cluster.cm_api: diff --git a/roles/cloudera_manager/autotls/tasks/main.yml b/roles/cloudera_manager/autotls/tasks/main.yml index 83e5bf35..3e46fca5 100644 --- a/roles/cloudera_manager/autotls/tasks/main.yml +++ b/roles/cloudera_manager/autotls/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Check Cloudera Manager version cloudera.cluster.cm_api: endpoint: /cm/version @@ -25,7 +24,7 @@ - name: Patch Cloudera Manager older than 7.3 include_tasks: - file: patch_old_cm + file: patch_old_cm.yml when: response.json.version is version('7.3.0', '<') - name: Check if password or key is used to connect to machines diff --git a/roles/cloudera_manager/autotls/tasks/patch_old_cm.yml b/roles/cloudera_manager/autotls/tasks/patch_old_cm.yml index 7db78d00..9973cdc3 100644 --- a/roles/cloudera_manager/autotls/tasks/patch_old_cm.yml +++ b/roles/cloudera_manager/autotls/tasks/patch_old_cm.yml @@ -5,7 +5,7 @@ dest: /opt/cloudera/cm-agent/lib/python2.7/site-packages/cmf/tools/cert.py_patch owner: cloudera-scm group: cloudera-scm - mode: '0644' + mode: "0644" - name: Backup cert.py shell: cp /opt/cloudera/cm-agent/lib/python2.7/site-packages/cmf/tools/cert.py /opt/cloudera/cm-agent/lib/python2.7/site-packages/cmf/tools/cert.py.backup diff --git a/roles/cloudera_manager/common/defaults/main.yml b/roles/cloudera_manager/common/defaults/main.yml index 17764193..41fa81a5 100644 --- a/roles/cloudera_manager/common/defaults/main.yml +++ b/roles/cloudera_manager/common/defaults/main.yml @@ -21,7 +21,7 @@ cloudera_manager_host_remote: "{{ hostvars[groups.cloudera_manager[0]].ansible_h cloudera_manager_host: "{{ cloudera_manager_host_remote if 'localhost' in inventory_hostname else cloudera_manager_host_local }}" # cloudera_manager_port: 7180 -cloudera_manager_database_embedded: False +cloudera_manager_database_embedded: false cloudera_manager_database_host: "{{ database_host }}" cloudera_manager_database_type: "{{ database_type }}" cloudera_manager_database_name: scm diff --git a/roles/cloudera_manager/common/handlers/main.yml b/roles/cloudera_manager/common/handlers/main.yml index 59d213c6..3ef52b0c 100644 --- a/roles/cloudera_manager/common/handlers/main.yml +++ b/roles/cloudera_manager/common/handlers/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: start cloudera-scm-server service: name: cloudera-scm-server diff --git a/roles/cloudera_manager/config/defaults/main.yml b/roles/cloudera_manager/config/defaults/main.yml index dea1125f..06934c02 100644 --- a/roles/cloudera_manager/config/defaults/main.yml +++ b/roles/cloudera_manager/config/defaults/main.yml @@ -14,7 +14,7 @@ --- api_config_endpoint: cm/config -api_config_keys_uppercase: True +api_config_keys_uppercase: true cm_api_defaults: PARCEL_DISTRIBUTE_RATE_LIMIT_KBS_PER_SECOND: 256000 api_configs: "{{ cm_api_defaults | combine(cloudera_manager_options | default({}), recursive=True) }}" diff --git a/roles/cloudera_manager/config/tasks/main.yml b/roles/cloudera_manager/config/tasks/main.yml index bf4b31e8..0f6406d7 100644 --- a/roles/cloudera_manager/config/tasks/main.yml +++ b/roles/cloudera_manager/config/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Get existing configs delegate_to: "{{ groups.cloudera_manager[0] if 'cloudera_manager' in groups else 'localhost' }}" cloudera.cluster.cm_api: diff --git a/roles/cloudera_manager/csds/tasks/main.yml b/roles/cloudera_manager/csds/tasks/main.yml index c5ca205b..07a37ddd 100644 --- a/roles/cloudera_manager/csds/tasks/main.yml +++ b/roles/cloudera_manager/csds/tasks/main.yml @@ -13,14 +13,13 @@ # limitations under the License. --- - - name: Create CSD directory file: path: "{{ cloudera_manager_csd_directory }}" state: directory owner: cloudera-scm group: cloudera-scm - mode: 0755 + mode: "0755" - name: Download CSDs get_url: @@ -28,7 +27,7 @@ dest: "{{ cloudera_manager_csd_directory }}" url_username: "{{ cloudera_manager_repo_username | default(omit) }}" url_password: "{{ cloudera_manager_repo_password | default(omit) }}" - mode: 0644 + mode: "0644" loop: "{{ cloudera_manager_csds }}" loop_control: loop_var: __csd_item diff --git a/roles/cloudera_manager/daemons/tasks/main.yml b/roles/cloudera_manager/daemons/tasks/main.yml index f8d05b7c..50f9e73d 100644 --- a/roles/cloudera_manager/daemons/tasks/main.yml +++ b/roles/cloudera_manager/daemons/tasks/main.yml @@ -13,10 +13,9 @@ # limitations under the License. --- - - name: Install Cloudera Manager daemons package ansible.builtin.package: lock_timeout: "{{ (ansible_os_family == 'RedHat') | ternary(180, omit) }}" name: cloudera-manager-daemons - update_cache: yes + update_cache: true state: present diff --git a/roles/cloudera_manager/database/defaults/main.yml b/roles/cloudera_manager/database/defaults/main.yml index d43e8eaf..878d9ab2 100644 --- a/roles/cloudera_manager/database/defaults/main.yml +++ b/roles/cloudera_manager/database/defaults/main.yml @@ -14,5 +14,6 @@ --- -cloudera_manager_database_prepare_script: "{{ '/opt/cloudera/cm/schema/scm_prepare_database.sh' if cloudera_manager_version is version('6.0.0', '>=') else '/usr/share/cmf/schema/scm_prepare_database.sh' }}" +cloudera_manager_database_prepare_script: "{{ '/opt/cloudera/cm/schema/scm_prepare_database.sh' if cloudera_manager_version is version('6.0.0', '>=') else '/usr/share/cmf/schema/scm_prepare_database.sh' + }}" cloudera_manager_database_ranger_script: /opt/cloudera/cm/bin/gen_embedded_ranger_db.sh diff --git a/roles/cloudera_manager/database/handlers/main.yml b/roles/cloudera_manager/database/handlers/main.yml index 23da945f..df605c8b 100644 --- a/roles/cloudera_manager/database/handlers/main.yml +++ b/roles/cloudera_manager/database/handlers/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: start cloudera-scm-server-db service: name: cloudera-scm-server-db diff --git a/roles/cloudera_manager/database/tasks/embedded.yml b/roles/cloudera_manager/database/tasks/embedded.yml index fca93ba8..f7eb2513 100644 --- a/roles/cloudera_manager/database/tasks/embedded.yml +++ b/roles/cloudera_manager/database/tasks/embedded.yml @@ -13,13 +13,12 @@ # limitations under the License. --- - - name: Install Cloudera Manager embedded database ansible.builtin.package: lock_timeout: "{{ (ansible_os_family == 'RedHat') | ternary(60, omit) }}" name: - - cloudera-manager-server-db-2 + - cloudera-manager-server-db-2 state: present - changed_when: True + changed_when: true notify: - start cloudera-scm-server-db diff --git a/roles/cloudera_manager/database/tasks/external.yml b/roles/cloudera_manager/database/tasks/external.yml index 3bbb29af..e82f1204 100644 --- a/roles/cloudera_manager/database/tasks/external.yml +++ b/roles/cloudera_manager/database/tasks/external.yml @@ -13,18 +13,17 @@ # limitations under the License. --- - - name: Create Cloudera Manager database user for Postgres postgresql_user: name: "{{ cloudera_manager_database_user }}" password: "{{ cloudera_manager_database_password }}" delegate_to: "{{ cloudera_manager_database_host }}" connection: ssh - become: yes + become: true become_user: postgres when: - - cloudera_manager_database_type == 'postgresql' - - cloudera_manager_database_host in groups.db_server | default([]) + - cloudera_manager_database_type == 'postgresql' + - cloudera_manager_database_host in groups.db_server | default([]) - name: Create Cloudera Manager database postgresql_db: @@ -33,24 +32,24 @@ encoding: UTF-8 delegate_to: "{{ cloudera_manager_database_host }}" connection: ssh - become: yes + become: true become_user: postgres when: - - cloudera_manager_database_type == 'postgresql' - - cloudera_manager_database_host in groups.db_server | default([]) + - cloudera_manager_database_type == 'postgresql' + - cloudera_manager_database_host in groups.db_server | default([]) - name: Create Cloudera Manager database user for MySQL mysql_user: name: "{{ cloudera_manager_database_user }}" password: "{{ cloudera_manager_database_password }}" update_password: always - host: '%' + host: "%" priv: "{{ cloudera_manager_database_name }}.*:ALL" delegate_to: "{{ cloudera_manager_database_host }}" connection: ssh when: - - cloudera_manager_database_type == 'mysql' or cloudera_manager_database_type == 'mariadb' - - cloudera_manager_database_host in groups.db_server | default([]) + - cloudera_manager_database_type == 'mysql' or cloudera_manager_database_type == 'mariadb' + - cloudera_manager_database_host in groups.db_server | default([]) - name: Create Cloudera Manager database mysql_db: @@ -60,9 +59,8 @@ delegate_to: "{{ cloudera_manager_database_host }}" connection: ssh when: - - cloudera_manager_database_type == 'mysql' or cloudera_manager_database_type == 'mariadb' - - cloudera_manager_database_host in groups.db_server | default([]) - + - cloudera_manager_database_type == 'mysql' or cloudera_manager_database_type == 'mariadb' + - cloudera_manager_database_host in groups.db_server | default([]) - name: Prepare Cloudera Manager Server external database command: | @@ -71,4 +69,4 @@ {{ cloudera_manager_database_name }} {{ cloudera_manager_database_user }} {{ cloudera_manager_database_password }} - changed_when: False + changed_when: false diff --git a/roles/cloudera_manager/database/tasks/main.yml b/roles/cloudera_manager/database/tasks/main.yml index a80fec85..2c6b929f 100644 --- a/roles/cloudera_manager/database/tasks/main.yml +++ b/roles/cloudera_manager/database/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Configure Cloudera Manager database (external) include_tasks: external.yml when: not cloudera_manager_database_embedded diff --git a/roles/cloudera_manager/external_account/tasks/main.yml b/roles/cloudera_manager/external_account/tasks/main.yml index fc539db2..e247efe5 100644 --- a/roles/cloudera_manager/external_account/tasks/main.yml +++ b/roles/cloudera_manager/external_account/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Wait for Cloudera Manager Port to be up delegate_to: "{{ groups.cloudera_manager[0] if 'cloudera_manager' in groups else 'localhost' }}" ansible.builtin.wait_for: diff --git a/roles/cloudera_manager/external_auth/defaults/main.yml b/roles/cloudera_manager/external_auth/defaults/main.yml index 0947c1f2..dce53e20 100644 --- a/roles/cloudera_manager/external_auth/defaults/main.yml +++ b/roles/cloudera_manager/external_auth/defaults/main.yml @@ -14,4 +14,4 @@ --- -freeipa_activated: False +freeipa_activated: false diff --git a/roles/cloudera_manager/external_auth/tasks/create_mapping.yml b/roles/cloudera_manager/external_auth/tasks/create_mapping.yml index c2cca5be..8d84615c 100644 --- a/roles/cloudera_manager/external_auth/tasks/create_mapping.yml +++ b/roles/cloudera_manager/external_auth/tasks/create_mapping.yml @@ -1,3 +1,4 @@ +--- # Copyright 2023 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/roles/cloudera_manager/external_auth/tasks/main.yml b/roles/cloudera_manager/external_auth/tasks/main.yml index a9fdbaef..d272f685 100644 --- a/roles/cloudera_manager/external_auth/tasks/main.yml +++ b/roles/cloudera_manager/external_auth/tasks/main.yml @@ -13,15 +13,13 @@ # limitations under the License. --- - - name: Conditionally load in variables for initializing IPA ansible.builtin.include_vars: file: freeipa.yml when: - - freeipa_activated - - cloudera_manager_external_auth is undefined - - cloudera_manager_version is version('6.0.0','>=') - + - freeipa_activated + - cloudera_manager_external_auth is undefined + - cloudera_manager_version is version('6.0.0','>=') - name: Select external auth provider details set_fact: @@ -32,20 +30,19 @@ include_role: name: cloudera.cluster.cloudera_manager.config vars: - api_config_keys_uppercase: True + api_config_keys_uppercase: true api_configs: "{{ lookup('template', 'external_auth_configs.j2') | from_yaml }}" when: auth_provider is defined and cloudera_manager_version is version('6.0.0','>=') - block: + - name: Get auth roles from Cloudera Manager + cloudera.cluster.cm_api: + endpoint: /authRoles + register: response - - name: Get auth roles from Cloudera Manager - cloudera.cluster.cm_api: - endpoint: /authRoles - register: response - - - name: Create auth role name to UUID mapping - set_fact: - auth_role_uuids: "{{ response.json['items'] | items2dict(key_name='name', value_name='uuid') }}" + - name: Create auth role name to UUID mapping + set_fact: + auth_role_uuids: "{{ response.json['items'] | items2dict(key_name='name', value_name='uuid') }}" ## BUG: Can't set all mappings in one API call because of OPSAPS-56242 # - name: Set Cloudera Manager external auth user to role mappings @@ -55,18 +52,18 @@ # method: POST # when: cloudera_manager_external_auth.role_mappings is defined - - name: Set Cloudera Manager external auth user to role mappings - include_tasks: create_mapping.yml - loop: "{{ cloudera_manager_external_auth.role_mappings }}" - loop_control: - loop_var: role_mapping + - name: Set Cloudera Manager external auth user to role mappings + include_tasks: create_mapping.yml + loop: "{{ cloudera_manager_external_auth.role_mappings }}" + loop_control: + loop_var: role_mapping - - name: Restart Cloudera Manager server - service: - name: cloudera-scm-server - state: restarted - become: yes - notify: - - wait cloudera-scm-server + - name: Restart Cloudera Manager server + service: + name: cloudera-scm-server + state: restarted + become: true + notify: + - wait cloudera-scm-server when: cloudera_manager_external_auth.role_mappings is defined and cloudera_manager_version is version('6.0.0','>=') diff --git a/roles/cloudera_manager/external_auth/vars/freeipa.yml b/roles/cloudera_manager/external_auth/vars/freeipa.yml index afad671b..1b8b1952 100644 --- a/roles/cloudera_manager/external_auth/vars/freeipa.yml +++ b/roles/cloudera_manager/external_auth/vars/freeipa.yml @@ -16,17 +16,17 @@ default_free_ipa_role_mappings: - group: admins - roles: [ ROLE_ADMIN ] + roles: [ROLE_ADMIN] - group: auditors - roles: [ ROLE_AUDITOR ] + roles: [ROLE_AUDITOR] - group: users - roles: [ ROLE_USER ] + roles: [ROLE_USER] cloudera_manager_external_auth: provider: "FreeIPA" - external_first: no - external_only: no - external_set: yes + external_first: false + external_only: false + external_set: true role_mappings: "{{ default_free_ipa_role_mappings }}" auth_providers: diff --git a/roles/cloudera_manager/external_auth/vars/main.yml b/roles/cloudera_manager/external_auth/vars/main.yml index ba87e6c9..76f3bce6 100644 --- a/roles/cloudera_manager/external_auth/vars/main.yml +++ b/roles/cloudera_manager/external_auth/vars/main.yml @@ -32,11 +32,11 @@ auth_role_display_names: default_free_ipa_role_mappings: - group: admins - roles: [ ROLE_ADMIN ] + roles: [ROLE_ADMIN] - group: auditors - roles: [ ROLE_AUDITOR ] + roles: [ROLE_AUDITOR] - group: users - roles: [ ROLE_USER ] + roles: [ROLE_USER] # when: # - freeipa_activated # - cloudera_manager_external_auth is undefined @@ -44,9 +44,9 @@ default_free_ipa_role_mappings: cloudera_manager_external_auth: provider: "FreeIPA" - external_first: no - external_only: no - external_set: yes + external_first: false + external_only: false + external_set: true role_mappings: "{{ default_free_ipa_role_mappings }}" # when: # - freeipa_activated diff --git a/roles/cloudera_manager/hosts_config/tasks/main.yml b/roles/cloudera_manager/hosts_config/tasks/main.yml index f04baa52..2ff56c41 100644 --- a/roles/cloudera_manager/hosts_config/tasks/main.yml +++ b/roles/cloudera_manager/hosts_config/tasks/main.yml @@ -1,5 +1,4 @@ --- - # Copyright 2023 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +21,7 @@ include_role: name: cloudera.cluster.cloudera_manager.config vars: - api_config_keys_uppercase: False + api_config_keys_uppercase: false api_config_endpoint: cm/allHosts/config api_configs: "{{ definition.hosts.configs }}" when: definition.hosts.configs is defined diff --git a/roles/cloudera_manager/kerberos/tasks/main.yml b/roles/cloudera_manager/kerberos/tasks/main.yml index ce1a3f05..eafa986c 100644 --- a/roles/cloudera_manager/kerberos/tasks/main.yml +++ b/roles/cloudera_manager/kerberos/tasks/main.yml @@ -13,12 +13,11 @@ # limitations under the License. --- - - name: Set Cloudera Manager Kerberos configs include_role: name: cloudera.cluster.cloudera_manager.config vars: - api_config_keys_uppercase: True + api_config_keys_uppercase: true api_configs: "{{ lookup('template', 'kerberos_configs.j2') | from_yaml }}" - name: Import KDC admin credentials diff --git a/roles/cloudera_manager/license/tasks/enterprise.yml b/roles/cloudera_manager/license/tasks/enterprise.yml index 5db30f85..2cbd28cf 100644 --- a/roles/cloudera_manager/license/tasks/enterprise.yml +++ b/roles/cloudera_manager/license/tasks/enterprise.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Get current Cloudera license status cloudera.cluster.cm_api: endpoint: /cm/license @@ -44,7 +43,7 @@ ansible.builtin.assert: that: __cloudera_license_file.stat.exists fail_msg: "Expected to find Cloudera License file at {{ license_local_tmp_path }}" - quiet: yes + quiet: true - name: Post license file to Cloudera Manager API ansible.builtin.shell: > diff --git a/roles/cloudera_manager/license/tasks/main.yml b/roles/cloudera_manager/license/tasks/main.yml index a618cc81..a5f02542 100644 --- a/roles/cloudera_manager/license/tasks/main.yml +++ b/roles/cloudera_manager/license/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Upload enterprise license include_tasks: enterprise.yml when: cloudera_manager_license_type == 'enterprise' diff --git a/roles/cloudera_manager/license/tasks/trial.yml b/roles/cloudera_manager/license/tasks/trial.yml index 9187f32f..d6c2a830 100644 --- a/roles/cloudera_manager/license/tasks/trial.yml +++ b/roles/cloudera_manager/license/tasks/trial.yml @@ -13,10 +13,9 @@ # limitations under the License. --- - - name: Begin Cloudera Manager trial license cloudera.cluster.cm_api: endpoint: /cm/trial/begin method: POST status_code: 200,204 - ignore_errors: True + ignore_errors: true diff --git a/roles/cloudera_manager/preload_parcels/defaults/main.yml b/roles/cloudera_manager/preload_parcels/defaults/main.yml index aca2a1ab..a774fef1 100644 --- a/roles/cloudera_manager/preload_parcels/defaults/main.yml +++ b/roles/cloudera_manager/preload_parcels/defaults/main.yml @@ -1,3 +1,4 @@ +--- # Copyright 2023 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/roles/cloudera_manager/preload_parcels/tasks/main.yml b/roles/cloudera_manager/preload_parcels/tasks/main.yml index 769fa039..ed5b812a 100644 --- a/roles/cloudera_manager/preload_parcels/tasks/main.yml +++ b/roles/cloudera_manager/preload_parcels/tasks/main.yml @@ -32,7 +32,7 @@ dest: "/opt/cloudera/parcel-repo/{{ __parcel_download_item | urlsplit('path') | basename | replace('.sha1', '.sha') }}" - name: Track async downloads to completion [ This may take a while if your files are very large or far away ] - when : preload_parcels + when: preload_parcels loop: "{{ __infra_download_parcels_results.results }}" loop_control: loop_var: __download_async_item diff --git a/roles/cloudera_manager/repo/defaults/main.yml b/roles/cloudera_manager/repo/defaults/main.yml index 42ac1005..95ce1852 100644 --- a/roles/cloudera_manager/repo/defaults/main.yml +++ b/roles/cloudera_manager/repo/defaults/main.yml @@ -18,6 +18,6 @@ cloudera_manager_version: 7.6.1 cloudera_manager_distro_name: "{{ ansible_os_family | lower }}" cloudera_manager_distro_version: "{{ ansible_distribution_major_version }}" -install_repo_on_host: yes +install_repo_on_host: true set_custom_repo_as_archive_base_url: "{{ use_custom_repo_as_archive_base_url | default(True) }}" diff --git a/roles/cloudera_manager/repo/tasks/main-Debian.yml b/roles/cloudera_manager/repo/tasks/main-Debian.yml index 679d5dbe..c67f006c 100644 --- a/roles/cloudera_manager/repo/tasks/main-Debian.yml +++ b/roles/cloudera_manager/repo/tasks/main-Debian.yml @@ -13,22 +13,24 @@ # limitations under the License. --- - - name: Add credentials to repository URLs if required set_fact: - cloudera_manager_repo_url_with_creds: "{{ cloudera_manager_repo_url | regex_replace('^(?Phttp[s]?://)','\\g' + cloudera_manager_repo_username|string + ':' + cloudera_manager_repo_password|string + '@')}}" - cloudera_manager_repo_key_with_creds: "{{ cloudera_manager_repo_key | regex_replace('^(?Phttp[s]?://)','\\g' + cloudera_manager_repo_username|string + ':' + cloudera_manager_repo_password|string + '@')}}" - no_log: yes + cloudera_manager_repo_url_with_creds: "{{ cloudera_manager_repo_url | regex_replace('^(?Phttp[s]?://)','\\g' + cloudera_manager_repo_username|string + + ':' + cloudera_manager_repo_password|string + '@')}}" + cloudera_manager_repo_key_with_creds: "{{ cloudera_manager_repo_key | regex_replace('^(?Phttp[s]?://)','\\g' + cloudera_manager_repo_username|string + + ':' + cloudera_manager_repo_password|string + '@')}}" + no_log: true when: cloudera_manager_repo_username is defined - name: Add Cloudera Manager apt repository key apt_key: url: "{{ cloudera_manager_repo_key_with_creds | default(cloudera_manager_repo_key) }}" state: present - no_log: yes + no_log: true - name: Add Cloudera Manager apt repository apt_repository: - repo: "deb [arch=amd64] {{ cloudera_manager_repo_url_with_creds | default(cloudera_manager_repo_url) }} {{ cloudera_manager_repo_apt_codename }} {{ cloudera_manager_repo_apt_component }}" + repo: "deb [arch=amd64] {{ cloudera_manager_repo_url_with_creds | default(cloudera_manager_repo_url) }} {{ cloudera_manager_repo_apt_codename }} {{ cloudera_manager_repo_apt_component + }}" filename: cloudera-manager - no_log: yes + no_log: true diff --git a/roles/cloudera_manager/repo/tasks/main-RedHat.yml b/roles/cloudera_manager/repo/tasks/main-RedHat.yml index 4a801a80..428be4da 100644 --- a/roles/cloudera_manager/repo/tasks/main-RedHat.yml +++ b/roles/cloudera_manager/repo/tasks/main-RedHat.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Add Cloudera Manager yum repository ansible.builtin.yum_repository: name: cloudera-manager @@ -21,7 +20,7 @@ baseurl: "{{ cloudera_manager_repo_url }}" gpgkey: "{{ cloudera_manager_repo_key }}" gpgcheck: "{{ cloudera_manager_repo_gpgcheck | default((cloudera_manager_version.split('.')[0] == '5' ) | ternary('no', 'yes')) }}" - enabled: yes + enabled: true username: "{{ cloudera_manager_repo_username | default('') }}" password: "{{ cloudera_manager_repo_password | default('') }}" diff --git a/roles/cloudera_manager/repo/tasks/main.yml b/roles/cloudera_manager/repo/tasks/main.yml index b957f147..b1765327 100644 --- a/roles/cloudera_manager/repo/tasks/main.yml +++ b/roles/cloudera_manager/repo/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Include variables include_vars: file: "{{ ansible_os_family }}.yml" @@ -27,7 +26,8 @@ - name: Correct repo URL for Redhat with cm5 ansible.builtin.set_fact: - __cloudera_manager_repo_url_paywall: "{{ cloudera_archive_base_url | regex_replace('/?$','') }}/p/cm{{ __cloudera_manager_major_version }}/redhat/{{ ansible_distribution_major_version }}/x86_64/cm/{{ cloudera_manager_version }}" + __cloudera_manager_repo_url_paywall: "{{ cloudera_archive_base_url | regex_replace('/?$','') }}/p/cm{{ __cloudera_manager_major_version }}/redhat/{{ ansible_distribution_major_version + }}/x86_64/cm/{{ cloudera_manager_version }}" when: - ansible_os_family != "Debian" - cloudera_manager_version.split('.')[0] == "5" diff --git a/roles/cloudera_manager/repo/vars/Debian.yml b/roles/cloudera_manager/repo/vars/Debian.yml index 7aade359..1a022d52 100644 --- a/roles/cloudera_manager/repo/vars/Debian.yml +++ b/roles/cloudera_manager/repo/vars/Debian.yml @@ -14,8 +14,10 @@ --- __cloudera_manager_major_version: "{{ cloudera_manager_version.split('.')[0] }}" -__cloudera_manager_repo_url_trial: "{{ cloudera_archive_base_url }}/cm{{ __cloudera_manager_major_version }}/{{ cloudera_manager_version }}/{{ ansible_distribution | lower }}{{ ansible_distribution_version | replace('.','') }}/apt" -__cloudera_manager_repo_url_paywall: "{{ cloudera_archive_base_url }}/p/cm{{ __cloudera_manager_major_version }}/{{ cloudera_manager_version }}/{{ ansible_distribution | lower }}{{ ansible_distribution_version | replace('.','') }}/apt" +__cloudera_manager_repo_url_trial: "{{ cloudera_archive_base_url }}/cm{{ __cloudera_manager_major_version }}/{{ cloudera_manager_version }}/{{ ansible_distribution + | lower }}{{ ansible_distribution_version | replace('.','') }}/apt" +__cloudera_manager_repo_url_paywall: "{{ cloudera_archive_base_url }}/p/cm{{ __cloudera_manager_major_version }}/{{ cloudera_manager_version }}/{{ ansible_distribution + | lower }}{{ ansible_distribution_version | replace('.','') }}/apt" __cloudera_manager_repo_key_filename: archive.key __cloudera_manager_repo_key_trial: "{{ __cloudera_manager_repo_url_trial }}/{{ __cloudera_manager_repo_key_filename }}" __cloudera_manager_repo_key_paywall: "{{ __cloudera_manager_repo_url_paywall }}/{{ __cloudera_manager_repo_key_filename }}" diff --git a/roles/cloudera_manager/repo/vars/RedHat.yml b/roles/cloudera_manager/repo/vars/RedHat.yml index b0d2e1bf..fdefcf87 100644 --- a/roles/cloudera_manager/repo/vars/RedHat.yml +++ b/roles/cloudera_manager/repo/vars/RedHat.yml @@ -17,8 +17,10 @@ __cloudera_manager_major_version: "{{ cloudera_manager_version.split('.')[0] }}" __cloudera_manager_cm5_path: "{{ ansible_os_family | lower }}/{{ ansible_distribution_major_version }}/x86_64/cm/{{ cloudera_manager_version }}" __cloudera_manager_cm6_path: "{{ cloudera_manager_version }}/{{ cloudera_manager_distro_name }}{{ cloudera_manager_distro_version }}/yum" -__cloudera_manager_repo_url_trial: "{{ cloudera_archive_base_url | regex_replace('/?$','') }}/cm{{ __cloudera_manager_major_version }}/{{ cloudera_manager_version }}/{{ cloudera_manager_distro_name }}{{ cloudera_manager_distro_version }}/yum" -__cloudera_manager_repo_url_paywall: "{{ cloudera_archive_base_url | regex_replace('/?$','') }}/p/cm{{ __cloudera_manager_major_version }}/{{ (__cloudera_manager_major_version == '5' ) | ternary(__cloudera_manager_cm5_path, __cloudera_manager_cm6_path) }}" +__cloudera_manager_repo_url_trial: "{{ cloudera_archive_base_url | regex_replace('/?$','') }}/cm{{ __cloudera_manager_major_version }}/{{ cloudera_manager_version + }}/{{ cloudera_manager_distro_name }}{{ cloudera_manager_distro_version }}/yum" +__cloudera_manager_repo_url_paywall: "{{ cloudera_archive_base_url | regex_replace('/?$','') }}/p/cm{{ __cloudera_manager_major_version }}/{{ (__cloudera_manager_major_version + == '5' ) | ternary(__cloudera_manager_cm5_path, __cloudera_manager_cm6_path) }}" __cloudera_manager_repo_key_filename: "RPM-GPG-KEY-cloudera" __cloudera_manager_repo_key_trial: "{{ __cloudera_manager_repo_url_trial }}/{{ __cloudera_manager_repo_key_filename }}" diff --git a/roles/cloudera_manager/server/tasks/main.yml b/roles/cloudera_manager/server/tasks/main.yml index 81392923..eca6f7aa 100644 --- a/roles/cloudera_manager/server/tasks/main.yml +++ b/roles/cloudera_manager/server/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Gather the package facts ansible.builtin.package_facts: manager: auto @@ -44,7 +43,7 @@ - name: Customize CMF_SERVER_ARGS lineinfile: path: /etc/default/cloudera-scm-server - regexp: '^CMF_SERVER_ARGS=' + regexp: "^CMF_SERVER_ARGS=" line: 'CMF_SERVER_ARGS="{{ cloudera_manager_cmf_server_args }}"' when: cloudera_manager_cmf_server_args is defined @@ -52,7 +51,7 @@ - name: Customize CMF_JAVA_OPTS lineinfile: path: /etc/default/cloudera-scm-server - regexp: '^export CMF_JAVA_OPTS=' + regexp: "^export CMF_JAVA_OPTS=" line: 'export CMF_JAVA_OPTS="{{ cloudera_manager_cmf_java_opts }}"' when: cloudera_manager_cmf_java_opts is defined diff --git a/roles/cloudera_manager/server_tls/tasks/main.yml b/roles/cloudera_manager/server_tls/tasks/main.yml index b9cc77f1..d50b9624 100644 --- a/roles/cloudera_manager/server_tls/tasks/main.yml +++ b/roles/cloudera_manager/server_tls/tasks/main.yml @@ -13,13 +13,12 @@ # limitations under the License. --- - - name: Set Cloudera Manager TLS configs include_role: name: cloudera.cluster.cloudera_manager.config when: tls | default(False) vars: - api_config_keys_uppercase: True + api_config_keys_uppercase: true api_configs: "{{ lookup('template', 'tls_configs.j2') | from_yaml }}" - fail: @@ -30,8 +29,9 @@ - name: Customize CMF_JAVA_OPTS lineinfile: path: /etc/default/cloudera-scm-server - regexp: '^export CMF_JAVA_OPTS=' - line: 'export CMF_JAVA_OPTS="{{ opts }} -Djavax.net.ssl.trustStore={{ tls_uber_truststore_path }} -Djavax.net.ssl.trustStorePassword={{ tls_truststore_password }}"' + regexp: "^export CMF_JAVA_OPTS=" + line: 'export CMF_JAVA_OPTS="{{ opts }} -Djavax.net.ssl.trustStore={{ tls_uber_truststore_path }} -Djavax.net.ssl.trustStorePassword={{ tls_truststore_password + }}"' vars: opts: "{{ cloudera_manager_cmf_java_opts | default(cloudera_manager_cmf_java_opts_default) }}" when: diff --git a/roles/cloudera_manager/services_info/defaults/main.yml b/roles/cloudera_manager/services_info/defaults/main.yml index 466cf679..0755cf99 100644 --- a/roles/cloudera_manager/services_info/defaults/main.yml +++ b/roles/cloudera_manager/services_info/defaults/main.yml @@ -1,3 +1,4 @@ +--- cluster_name: Default ranger_user: "{{ ranger_rangeradmin_user | default('admin') }}" ranger_password: "{{ ranger_rangeradmin_user_password | default(cloudera_manager_admin_password) }}" diff --git a/roles/cloudera_manager/services_info/tasks/main.yml b/roles/cloudera_manager/services_info/tasks/main.yml index 47159804..c02fb590 100644 --- a/roles/cloudera_manager/services_info/tasks/main.yml +++ b/roles/cloudera_manager/services_info/tasks/main.yml @@ -1,28 +1,27 @@ --- - - name: Get All services from CM cloudera.cluster.cm_api: endpoint: "/clusters/{{ cluster_name | urlencode() }}/services" register: cloudera_manager_all_services - no_log: yes # overly verbose + no_log: true # overly verbose - name: Get All Mgmt Roles from CM cloudera.cluster.cm_api: endpoint: "/cm/service/roles" register: cloudera_manager_mgmt_roles - no_log: yes # overly verbose + no_log: true # overly verbose - name: Get CM Hosts info cloudera.cluster.cm_api: endpoint: "/hosts" register: hosts_details - no_log: yes # overly verbose + no_log: true # overly verbose - name: Get CM deployment of services cloudera.cluster.cm_api: endpoint: "/cm/deployment" register: cm_deployment_services - no_log: yes # overly verbose + no_log: true # overly verbose - name: Get cluster parcel details cloudera.cluster.cm_api: @@ -113,7 +112,7 @@ cloudera.cluster.cm_api: endpoint: "/clusters/{{ cluster_name | urlencode() }}/services/{{ wxm_service_name | lower }}/roles" register: cloudera_manager_wxm_all_roles - no_log: yes # overly verbose + no_log: true # overly verbose - set_fact: wxm_api_server: "{{ cloudera_manager_wxm_all_roles.json | community.general.json_query(query) }}" @@ -124,7 +123,7 @@ cloudera.cluster.cm_api: endpoint: "/clusters/{{ cluster_name | urlencode() }}/services/{{ wxm_service_name | lower }}/roleConfigGroups/{{ wxm_service_name | lower }}-THUNDERHEAD_SIGMA_CONSOLE-BASE/config?view=full" register: cloudera_manager_wxm_all_rcgs - no_log: yes # overly verbose + no_log: true # overly verbose - set_fact: wxm_ssl_enabled: "{{ cloudera_manager_wxm_all_roles.json | community.general.json_query(query) }}" @@ -162,13 +161,13 @@ cloudera.cluster.cm_api: endpoint: "/clusters/{{ cluster_name | urlencode() }}/services/{{ ranger_service_name | lower }}/config?view=full" register: full_ranger_config - no_log: yes # overly verbose + no_log: true # overly verbose - name: Get Ranger Admin full config cloudera.cluster.cm_api: endpoint: "/clusters/{{ cluster_name | urlencode() }}/services/{{ ranger_service_name | lower }}/roleConfigGroups/{{ ranger_service_name | lower }}-RANGER_ADMIN-BASE/config?view=full" register: full_ranger_admin_config - no_log: yes # overly verbose + no_log: true # overly verbose - set_fact: ranger_ssl: "{{ full_ranger_admin_config.json | community.general.json_query(query) }}" @@ -219,7 +218,7 @@ cloudera.cluster.cm_api: endpoint: "/clusters/{{ cluster_name | urlencode() }}/services/{{ solr_service_name | lower }}/roles" register: solr_roles - no_log: yes # overly verbose + no_log: true # overly verbose - set_fact: solr_all_hosts: "{{ solr_roles.json | community.general.json_query(query) }}" @@ -230,13 +229,13 @@ cloudera.cluster.cm_api: endpoint: "/clusters/{{ cluster_name | urlencode() }}/services/{{ solr_service_name | lower }}/config?view=full" register: solr_full_config - no_log: yes # overly verbose + no_log: true # overly verbose - name: Get SolR full config for SOLR_SERVER-BASE cloudera.cluster.cm_api: endpoint: "/clusters/{{ cluster_name | urlencode() }}/services/{{ solr_service_name | lower }}/roleConfigGroups/{{ solr_service_name | lower }}-SOLR_SERVER-BASE/config?view=full" register: solr_full_config_base - no_log: yes # overly verbose + no_log: true # overly verbose # Additional solr configs # Set SoLR protocol @@ -298,7 +297,7 @@ cloudera.cluster.cm_api: endpoint: "/clusters/{{ cluster_name | urlencode() }}/services/{{ knox_service_name | lower }}/roleConfigGroups/{{ knox_service_name | lower }}-KNOX_GATEWAY-BASE/config?view=full" register: knox_full_config - no_log: yes # overly verbose + no_log: true # overly verbose - set_fact: gateway_descriptor_cdp_proxy: "{{ knox_full_config.json | community.general.json_query(query) }}" diff --git a/roles/cloudera_manager/session_timeout/tasks/main.yml b/roles/cloudera_manager/session_timeout/tasks/main.yml index 0965cba6..4b5722fa 100644 --- a/roles/cloudera_manager/session_timeout/tasks/main.yml +++ b/roles/cloudera_manager/session_timeout/tasks/main.yml @@ -1,5 +1,4 @@ --- - - name: Set session timeout to 30 days cloudera.cluster.cm_api: endpoint: /cm/config diff --git a/roles/cloudera_manager/wait_for_heartbeat/tasks/main.yml b/roles/cloudera_manager/wait_for_heartbeat/tasks/main.yml index 1ca80710..62910bc4 100644 --- a/roles/cloudera_manager/wait_for_heartbeat/tasks/main.yml +++ b/roles/cloudera_manager/wait_for_heartbeat/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Read the Cloudera Manager agent UUID slurp: path: "{{ cloudera_manager_agent_lib_directory }}/uuid" diff --git a/roles/config/cluster/base/tasks/main.yml b/roles/config/cluster/base/tasks/main.yml index aeec29b9..690bd12b 100644 --- a/roles/config/cluster/base/tasks/main.yml +++ b/roles/config/cluster/base/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - # This variable is used by other roles # please take care when changing it - set_fact: diff --git a/roles/config/cluster/base/vars/main.yml b/roles/config/cluster/base/vars/main.yml index 02cf6975..e30e2a2c 100644 --- a/roles/config/cluster/base/vars/main.yml +++ b/roles/config/cluster/base/vars/main.yml @@ -15,19 +15,19 @@ --- custom_config_templates: -# Explicit defaults - only run if we're neither updating services nor doing an upgrade + # Explicit defaults - only run if we're neither updating services nor doing an upgrade - template: configs/defaults.j2 condition: "{{ not(update_services|default(false)|bool or cdh_cdp_upgrade|default(false)|bool) }}" -# Custom configurations for databases + # Custom configurations for databases - template: configs/databases.j2 - template: configs/databases-7.1.0.j2 condition: "{{ cloudera_runtime_version is version('7.1.0','>=') and cloudera_runtime_version is version('7.1.9','<') }}" - template: configs/databases-7.1.9.j2 condition: "{{ cloudera_runtime_version is version('7.1.9','>=') }}" -# Custom configurations for Infra Solr + # Custom configurations for Infra Solr - template: configs/infra-solr.j2 condition: "{{ 'INFRA_SOLR' in cluster.services }}" -# Custom configurations for logging + # Custom configurations for logging - template: configs/logdirs.j2 - template: configs/logdirs-6.x.j2 condition: "{{ cloudera_runtime_version is version('6.0.0','>=') and cloudera_runtime_version is version('7.0.0','<') }}" @@ -36,26 +36,30 @@ custom_config_templates: - template: configs/logdirs-7.1.9.j2 condition: "{{ cloudera_runtime_version is version('7.1.9','>=') }}" - template: configs/logdirs-ranger-spooldirs.j2 - condition: "{{ cloudera_runtime_version is version('7.1.0','>=') and (cloudera_runtime_pre_upgrade is undefined or cloudera_runtime_pre_upgrade is version('7.1.0','>=')) and cloudera_runtime_version is version('7.1.9','<') }}" + condition: "{{ cloudera_runtime_version is version('7.1.0','>=') and (cloudera_runtime_pre_upgrade is undefined or cloudera_runtime_pre_upgrade is version('7.1.0','>=')) + and cloudera_runtime_version is version('7.1.9','<') }}" - template: configs/logdirs-ranger-spooldirs-7.1.9.j2 condition: "{{ cloudera_runtime_version is version('7.1.9','>=') }}" -# Custom configurations for out-of-memory behaviour, heap dumps etc + # Custom configurations for out-of-memory behaviour, heap dumps etc - template: configs/oom.j2 condition: "{{ cluster.oom is defined }}" - template: configs/oom-6.3.0.j2 condition: "{{ cluster.oom is defined and cloudera_runtime_version is version('6.0.0','>=') }}" - template: configs/oom-7.1.0.j2 condition: "{{ cluster.oom is defined and cloudera_runtime_version is version('7.1.0','>=') }}" -# Custom configurations for Kerberos + # Custom configurations for Kerberos - template: configs/kerberos-5.x.j2 - condition: "{{ cluster.security.kerberos | default(False) and (cloudera_manager_version is version('6.0.0','<') or cluster.type | default('base') == 'compute') }}" + condition: "{{ cluster.security.kerberos | default(False) and (cloudera_manager_version is version('6.0.0','<') or cluster.type | default('base') == 'compute') + }}" - template: configs/kerberos-6.x-7.x.j2 condition: "{{ cluster.security.kerberos | default(False) and cloudera_manager_version is version('6.0.0','>=') }}" - template: configs/kerberos-7.x.j2 - condition: "{{ cluster.security.kerberos | default(False) and cloudera_runtime_version is version('7.1.0','>=') and (cloudera_runtime_pre_upgrade is undefined or cloudera_runtime_pre_upgrade is version('7.1.0','>=')) }}" + condition: "{{ cluster.security.kerberos | default(False) and cloudera_runtime_version is version('7.1.0','>=') and (cloudera_runtime_pre_upgrade is undefined + or cloudera_runtime_pre_upgrade is version('7.1.0','>=')) }}" - template: configs/trusted-realms.j2 - condition: "{{ cluster.security.kerberos | default(False) and auth_providers | default({}) | dict2items | json_query('[?value.type == `KERBEROS`]') | length > 0 }}" -# Custom configurations for TLS + condition: "{{ cluster.security.kerberos | default(False) and auth_providers | default({}) | dict2items | json_query('[?value.type == `KERBEROS`]') | length > + 0 }}" + # Custom configurations for TLS - template: configs/tls.j2 condition: "{{ cluster.security.tls | default(False) }}" - template: configs/tls-6.x.j2 @@ -63,12 +67,13 @@ custom_config_templates: - template: configs/tls-7.1.0.j2 condition: "{{ cluster.security.tls | default(False) and cloudera_runtime_version is version('7.1.0','>=') }}" - template: configs/tls-7.1.4.j2 - condition: "{{ cluster.security.tls | default(False) and cloudera_runtime_version is version('7.1.4','>=') and (cloudera_runtime_pre_upgrade is undefined or cloudera_runtime_pre_upgrade is version('7.1.4','>=')) }}" + condition: "{{ cluster.security.tls | default(False) and cloudera_runtime_version is version('7.1.4','>=') and (cloudera_runtime_pre_upgrade is undefined or cloudera_runtime_pre_upgrade + is version('7.1.4','>=')) }}" - template: configs/tls-7.3.1.j2 condition: "{{ cluster.security.tls | default(False) and cloudera_manager_version is version('7.3.1', '>=') }}" - template: configs/tls-cm-7.j2 condition: "{{ cluster.security.tls | default(False) and cloudera_manager_version is version('7.1.0','>=') }}" -# Custom configurations for Cloudera Streams Processing components on CDH 6.x + # Custom configurations for Cloudera Streams Processing components on CDH 6.x - template: configs/schemaregistry.j2 condition: >- {{ cloudera_runtime_version is version('7.0.0','<') @@ -77,32 +82,32 @@ custom_config_templates: condition: >- {{ cloudera_runtime_version is version('7.0.0','<') and 'STREAMS_MESSAGING_MANAGER' in cluster.services }} -# Custom configurations for Phoenix + # Custom configurations for Phoenix - template: configs/phoenix.j2 condition: "{{ 'PHOENIX' in cluster.services }}" -# Custom configurations for Ranger + # Custom configurations for Ranger - template: configs/ranger.j2 condition: "{{ 'RANGER' in cluster.services }}" -# Custom configurations for Sentry + # Custom configurations for Sentry - template: configs/sentry.j2 condition: "{{ 'SENTRY' in cluster.services }}" -# Custom configurations for WorkloadXM + # Custom configurations for WorkloadXM - template: configs/wxm.j2 condition: "{{ 'WXM' in cluster.services }}" -# Custom configuration for /var/lib directory + # Custom configuration for /var/lib directory - template: configs/varlib-7.1.0.j2 condition: "{{ cluster.varlib_base is defined and cloudera_runtime_version is version('7.1.3','>=') }}" -# LDAP configuration + # LDAP configuration - template: configs/ldap.j2 condition: >- {{ service_auth_provider is defined and service_auth_provider in auth_providers | default({}) and auth_providers[service_auth_provider].type | default('LDAP') == "LDAP" }} -# Custom configuration for when adding services + # Custom configuration for when adding services - template: configs/inter-service-dependencies.j2 condition: "{{ (update_services | default(false) or (cdh_cdp_upgrade|default(false)|bool)) }}" -# Workarounds for bugs / known issues + # Workarounds for bugs / known issues - template: workarounds/OPSAPS-56076.j2 # TODO update fix version condition: >- diff --git a/roles/config/cluster/common/defaults/main.yml b/roles/config/cluster/common/defaults/main.yml index 6966d5ff..3b5b77e3 100644 --- a/roles/config/cluster/common/defaults/main.yml +++ b/roles/config/cluster/common/defaults/main.yml @@ -14,8 +14,8 @@ --- -cluster_template_dry_run: False -tls: False +cluster_template_dry_run: false +tls: false default_cluster_type: base @@ -23,7 +23,6 @@ pvc_type: "" kms_services: [KEYTRUSTEE, RANGER_KMS, RANGER_KMS_KTS] sdx_services: [ATLAS, HDFS, HIVE, RANGER, SENTRY] - default_cluster_base: name: Cluster data_contexts: @@ -38,13 +37,10 @@ default_cluster_kts: DB_ACTIVE: {} KEYTRUSTEE_PASSIVE_SERVER: {} DB_PASSIVE: {} - default_cluster_ecs: name: ECS services: [DOCKER, ECS] - ecs_databases: [ALERTS, CLASSIC_CLUSTERS, CLUSTER_ACCESS_MANAGER, CLUSTER_PROXY, DEX, DWX, ENV, LIFTIE, MLX, RESOURCEPOOL_MANAGER, UMS] - default_cluster_compute: base_cluster: data_context: SDX diff --git a/roles/config/cluster/ecs/tasks/main.yml b/roles/config/cluster/ecs/tasks/main.yml index aeec29b9..690bd12b 100644 --- a/roles/config/cluster/ecs/tasks/main.yml +++ b/roles/config/cluster/ecs/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - # This variable is used by other roles # please take care when changing it - set_fact: diff --git a/roles/config/cluster/ecs/vars/main.yml b/roles/config/cluster/ecs/vars/main.yml index 8c40b01f..09d498ab 100644 --- a/roles/config/cluster/ecs/vars/main.yml +++ b/roles/config/cluster/ecs/vars/main.yml @@ -15,5 +15,5 @@ --- custom_config_templates: -# Custom configurations for ECS + # Custom configurations for ECS - template: configs/ecs.j2 diff --git a/roles/config/cluster/kts/tasks/main.yml b/roles/config/cluster/kts/tasks/main.yml index 625300d2..22adaafa 100644 --- a/roles/config/cluster/kts/tasks/main.yml +++ b/roles/config/cluster/kts/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Retrieve repository metadata include_role: name: cloudera.cluster.deployment.repometa diff --git a/roles/config/cluster/kts/vars/main.yml b/roles/config/cluster/kts/vars/main.yml index 05853527..d0c5aa5d 100644 --- a/roles/config/cluster/kts/vars/main.yml +++ b/roles/config/cluster/kts/vars/main.yml @@ -15,6 +15,6 @@ --- custom_config_templates: -# Custom configurations for TLS + # Custom configurations for TLS - template: configs/tls.j2 condition: "{{ cluster.security.tls | default(False) }}" diff --git a/roles/config/services/kms/tasks/main.yml b/roles/config/services/kms/tasks/main.yml index 9eb00494..3a7283b8 100644 --- a/roles/config/services/kms/tasks/main.yml +++ b/roles/config/services/kms/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Reset custom configuration dictionary set_fact: merged_configs: {} diff --git a/roles/config/services/kms_tls/tasks/main.yml b/roles/config/services/kms_tls/tasks/main.yml index 689b818c..9d7fbc41 100644 --- a/roles/config/services/kms_tls/tasks/main.yml +++ b/roles/config/services/kms_tls/tasks/main.yml @@ -4,7 +4,7 @@ endpoint: "/clusters/{{ cluster_name | urlencode() }}/services/{{ kms_service_name | lower }}/roleConfigGroups/{{ kms_service_name | lower }}-RANGER_KMS_SERVER_KTS-BASE/config" method: PUT body: "{{ lookup('file', 'kms_tls.json', errors='ignore' ) }}" - ignore_errors: yes + ignore_errors: true when: cloudera_manager_version is version('7.0.0','>=') - name: Push TLS settings for Keytrustee roleConfigGroups @@ -12,7 +12,7 @@ endpoint: "/clusters/{{ cluster_name | urlencode() }}/services/{{ kms_service_name | lower }}/roleConfigGroups/{{ kms_service_name | lower }}-KMS_KEYTRUSTEE-BASE/config" method: PUT body: "{{ lookup('file', 'kms_tls_cdh.json', errors='ignore' ) }}" - ignore_errors: yes + ignore_errors: true when: cloudera_manager_version is version('7.0.0','<') - name: Push TLS settings for Keytrustee config @@ -20,7 +20,7 @@ endpoint: "/clusters/{{ cluster_name | urlencode() }}/services/{{ kms_service_name | lower }}/config" method: PUT body: "{{ lookup('file', 'kms_tls_cdh_kms.json', errors='ignore' ) }}" - ignore_errors: yes + ignore_errors: true when: cloudera_manager_version is version('7.0.0','<') # Restart all clusters to be sure diff --git a/roles/config/services/mgmt/tasks/main.yml b/roles/config/services/mgmt/tasks/main.yml index a07aae3e..370bd50d 100644 --- a/roles/config/services/mgmt/tasks/main.yml +++ b/roles/config/services/mgmt/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - # This variable is used by other roles # please take care when changing it - set_fact: diff --git a/roles/config/services/oozie_ui/tasks/main.yml b/roles/config/services/oozie_ui/tasks/main.yml index e7ef38e4..b5edc575 100644 --- a/roles/config/services/oozie_ui/tasks/main.yml +++ b/roles/config/services/oozie_ui/tasks/main.yml @@ -18,7 +18,7 @@ owner: oozie group: oozie state: directory - mode: '0755' + mode: "0755" ignore_errors: true - name: Install unzip diff --git a/roles/config/services/ranger_pvc_default_policies/tasks/main.yml b/roles/config/services/ranger_pvc_default_policies/tasks/main.yml index 828f1d02..07596116 100644 --- a/roles/config/services/ranger_pvc_default_policies/tasks/main.yml +++ b/roles/config/services/ranger_pvc_default_policies/tasks/main.yml @@ -1,5 +1,4 @@ --- - - name: Post Ranger policies declared in policies directory register: __ranger_pol_response uri: @@ -7,13 +6,13 @@ method: POST user: "{{ ranger_user }}" password: "{{ ranger_password }}" - return_content: yes + return_content: true body: "{{ lookup('template', '{{ item.src }}' ) }}" body_format: json status_code: 200 - validate_certs: no - force_basic_auth: yes - no_log: True + validate_certs: false + force_basic_auth: true + no_log: true with_filetree: "{{ role_path }}/policies" failed_when: - __ranger_pol_response is failed diff --git a/roles/config/services/solr_ranger_plugin/tasks/main.yml b/roles/config/services/solr_ranger_plugin/tasks/main.yml index aff71252..b778d298 100644 --- a/roles/config/services/solr_ranger_plugin/tasks/main.yml +++ b/roles/config/services/solr_ranger_plugin/tasks/main.yml @@ -6,13 +6,13 @@ method: POST user: "{{ ranger_user }}" password: "{{ ranger_password }}" - return_content: yes + return_content: true body: "{{ lookup('template', 'solr_plugin.json' ) }}" body_format: json status_code: 200 - validate_certs: no - force_basic_auth: yes - no_log: yes + validate_certs: false + force_basic_auth: true + no_log: true failed_when: - __ranger_solr_plugin is failed - "'Duplicate service name' not in __ranger_solr_plugin.json.msgDesc" diff --git a/roles/deployment/cluster/tasks/create_base.yml b/roles/deployment/cluster/tasks/create_base.yml index 4f2f6f21..c88bb85f 100644 --- a/roles/deployment/cluster/tasks/create_base.yml +++ b/roles/deployment/cluster/tasks/create_base.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Generate complete base cluster configs include_role: name: cloudera.cluster.config.cluster.base @@ -28,7 +27,7 @@ template: src: cluster_template/main.j2 dest: /tmp/cluster_template_{{ cluster.name | replace(' ','_') }}.json - mode: 0600 + mode: "0600" #when: cluster_template_dry_run - name: Import cluster template for {{ cluster.name }} @@ -37,7 +36,7 @@ method: POST body: "{{ lookup('template', 'cluster_template/main.j2', convert_data=False) }}" register: cluster_template_result - ignore_errors: yes + ignore_errors: true when: not cluster_template_dry_run - name: Find cluster template command URL for troubleshooting diff --git a/roles/deployment/cluster/tasks/create_data_context.yml b/roles/deployment/cluster/tasks/create_data_context.yml index ea38378c..fff174a6 100644 --- a/roles/deployment/cluster/tasks/create_data_context.yml +++ b/roles/deployment/cluster/tasks/create_data_context.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Create data contexts cloudera.cluster.cm_api: endpoint: /dataContexts diff --git a/roles/deployment/cluster/tasks/create_ecs.yml b/roles/deployment/cluster/tasks/create_ecs.yml index 55be5dc4..50f3aa06 100644 --- a/roles/deployment/cluster/tasks/create_ecs.yml +++ b/roles/deployment/cluster/tasks/create_ecs.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Generate complete ecs cluster configs include_role: name: cloudera.cluster.config.cluster.ecs @@ -59,7 +58,8 @@ register: cm_config - set_fact: - parcel_repos: "{{ (cm_config.json | json_query('items[?name==`REMOTE_PARCEL_REPO_URLS`].value') | default(['']))[0].split(',') | default([]) | union(cluster.repositories) }}" + parcel_repos: "{{ (cm_config.json | json_query('items[?name==`REMOTE_PARCEL_REPO_URLS`].value') | default(['']))[0].split(',') | default([]) | union(cluster.repositories) + }}" - name: Update parcelrepos cloudera.cluster.cm_api: @@ -100,7 +100,7 @@ cloudera.cluster.cm_api: endpoint: /clusters/{{cluster.name | urlencode() }}/parcels/products/ECS/versions/{{ new_parcel_version }} register: parcels_response - until: parcels_response.json.stage in ("DISTRIBUTED", "ACTIVATED") + until: parcels_response.json.stage in ("DISTRIBUTED", "ACTIVATED") retries: "{{ parcel_poll_max_retries | default(30) }}" delay: "{{ parcel_poll_duration | default(60) }}" @@ -113,7 +113,7 @@ cloudera.cluster.cm_api: endpoint: /clusters/{{cluster.name | urlencode() }}/parcels/products/ECS/versions/{{ new_parcel_version }} register: parcels_response - until: parcels_response.json.stage in ("ACTIVATED") + until: parcels_response.json.stage in ("ACTIVATED") retries: "{{ parcel_poll_max_retries | default(30) }}" delay: "{{ parcel_poll_duration | default(60) }}" @@ -125,7 +125,8 @@ - name: Generate custom values - Embedded when: cluster.controlplane_config.Database.Mode == 'embedded' set_fact: - custom_values: "{{ lookup('template', 'cluster_template/ecs/controlPlaneValuesEmbedded.j2') | from_yaml | combine(cluster.controlplane_config, recursive=True) }}" + custom_values: "{{ lookup('template', 'cluster_template/ecs/controlPlaneValuesEmbedded.j2') | from_yaml | combine(cluster.controlplane_config, recursive=True) + }}" - name: Show custom values debug: diff --git a/roles/deployment/cluster/tasks/create_kts.yml b/roles/deployment/cluster/tasks/create_kts.yml index 89d293d3..742d2fbb 100644 --- a/roles/deployment/cluster/tasks/create_kts.yml +++ b/roles/deployment/cluster/tasks/create_kts.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Generate complete kts cluster configs include_role: name: cloudera.cluster.config.cluster.kts @@ -25,26 +24,25 @@ #when: cluster_template_dry_run - block: - - - name: Import cluster template for {{ cluster.name }} - cloudera.cluster.cm_api: - endpoint: /cm/importClusterTemplate?addRepositories=true - method: POST - body: "{{ lookup('template', 'cluster_template/main.j2', convert_data=False) }}" - register: cluster_template_result - failed_when: '"Status code was 400" in cluster_template_result.msg' - - - set_fact: - first_run_failure: > - {{ cluster_template_result.json | - json_query('children.items[?resultMessage==`Failed to perform First Run of services.`]') }} - - # If we have installed a cluster with Key Trustee Server HA, first run will have failed (but this is ok) - # Stop the service now in preparation for remedial action - - name: Stop Key Trustee Server service when first run failed - cloudera.cluster.cm_api: - endpoint: /clusters/{{ cluster.name | urlencode() }}/services/keytrustee_server/commands/stop - method: POST - when: "'kts_passive' in groups and first_run_failure" + - name: Import cluster template for {{ cluster.name }} + cloudera.cluster.cm_api: + endpoint: /cm/importClusterTemplate?addRepositories=true + method: POST + body: "{{ lookup('template', 'cluster_template/main.j2', convert_data=False) }}" + register: cluster_template_result + failed_when: '"Status code was 400" in cluster_template_result.msg' + + - set_fact: + first_run_failure: > + {{ cluster_template_result.json | + json_query('children.items[?resultMessage==`Failed to perform First Run of services.`]') }} + + # If we have installed a cluster with Key Trustee Server HA, first run will have failed (but this is ok) + # Stop the service now in preparation for remedial action + - name: Stop Key Trustee Server service when first run failed + cloudera.cluster.cm_api: + endpoint: /clusters/{{ cluster.name | urlencode() }}/services/keytrustee_server/commands/stop + method: POST + when: "'kts_passive' in groups and first_run_failure" when: not cluster_template_dry_run diff --git a/roles/deployment/cluster/tasks/fs2cs.yml b/roles/deployment/cluster/tasks/fs2cs.yml index 9acf0a9f..f82d3f45 100644 --- a/roles/deployment/cluster/tasks/fs2cs.yml +++ b/roles/deployment/cluster/tasks/fs2cs.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - set_fact: ts_minus_one_week: "{{ '%Y-%m-%dT%H:%M:%S'| strftime(ansible_date_time.epoch|int - 604800) }}" @@ -21,7 +20,7 @@ cloudera.cluster.cm_api: endpoint: "/timeseries?query=SELECT%20fair_share_mb_cumulative,%20fair_share_vcores_cumulative%20WHERE%20queueName%20=%20root&from={{ ts_minus_one_week }}&desiredRollup=DAILY" method: GET - return_content: yes + return_content: true register: yarn_stats - set_fact: @@ -34,7 +33,7 @@ cloudera.cluster.cm_api: endpoint: /clusters/{{ cluster.name | urlencode() }}/services/yarn/config method: GET - return_content: yes + return_content: true register: yarn_conf - set_fact: diff --git a/roles/deployment/cluster/tasks/main.yml b/roles/deployment/cluster/tasks/main.yml index b15c67e8..469e2c83 100644 --- a/roles/deployment/cluster/tasks/main.yml +++ b/roles/deployment/cluster/tasks/main.yml @@ -16,14 +16,14 @@ - name: Include config cluster defaults for deployment ansible.builtin.include_role: name: cloudera.cluster.config.cluster.common - public: yes + public: true ## Nico - name: Apply "all hosts" configs include_role: name: cloudera.cluster.cloudera_manager.config vars: - api_config_keys_uppercase: False + api_config_keys_uppercase: false api_config_endpoint: cm/allHosts/config api_configs: "{{ definition.hosts.configs }}" when: definition.hosts.configs is defined @@ -99,7 +99,6 @@ - '"kts_active" in groups' - (deploy_only is defined and 'encryption' in deploy_only) or deploy_only is not defined - # Add deploy_only="encryption" to select kts from several clusters in clusters.yml - name: Upgrade Key Trustee server cluster include_tasks: upgrade_kts.yml diff --git a/roles/deployment/cluster/tasks/nav2atlas.yml b/roles/deployment/cluster/tasks/nav2atlas.yml index 8dc1f30d..b2933bc4 100644 --- a/roles/deployment/cluster/tasks/nav2atlas.yml +++ b/roles/deployment/cluster/tasks/nav2atlas.yml @@ -13,12 +13,11 @@ # limitations under the License. --- - - name: Get CM Service role description cloudera.cluster.cm_api: endpoint: /cm/service/roles/ method: GET - return_content: yes + return_content: true register: cm_roles - set_fact: @@ -31,7 +30,7 @@ cloudera.cluster.cm_api: endpoint: "/cm/service/roles/{{ nav_service }}/config" method: GET - return_content: yes + return_content: true register: nav_config - set_fact: @@ -49,7 +48,7 @@ ansible.builtin.file: state: directory path: "{{ nav2atlas_dir }}" - mode: 0700 + mode: "0700" owner: atlas group: atlas delegate_to: "{{ groups.atlas_atlas_server | first }}" @@ -68,7 +67,8 @@ delegate_to: "{{ groups.atlas_atlas_server | first }}" - set_fact: - atlas_migration_conf: "atlas.migration.data.filename={{ nav2atlas_dir }}/{{ cluster.name | replace(' ','_') }}-atlas-data.zip\natlas.migration.mode.batch.size=200\natlas.migration.mode.workers=8\natlas.patch.numWorkers=14\natlas.patch.batchSize=3000" + atlas_migration_conf: "atlas.migration.data.filename={{ nav2atlas_dir }}/{{ cluster.name | replace(' ','_') }}-atlas-data.zip\natlas.migration.mode.batch.size=200\n\ + atlas.migration.mode.workers=8\natlas.patch.numWorkers=14\natlas.patch.batchSize=3000" - name: Update Capacity Scheduler Config in CM cloudera.cluster.cm_api: diff --git a/roles/deployment/cluster/tasks/update_base.yml b/roles/deployment/cluster/tasks/update_base.yml index 208a0a0c..105c0498 100644 --- a/roles/deployment/cluster/tasks/update_base.yml +++ b/roles/deployment/cluster/tasks/update_base.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Generate complete base cluster configs include_role: name: config/cluster/base @@ -41,7 +40,8 @@ register: cm_config - set_fact: - parcel_repos: "{{ (cm_config.json | json_query('items[?name==`REMOTE_PARCEL_REPO_URLS`].value') | default(['']))[0].split(',') | default([]) | union(cluster.repositories) }}" + parcel_repos: "{{ (cm_config.json | json_query('items[?name==`REMOTE_PARCEL_REPO_URLS`].value') | default(['']))[0].split(',') | default([]) | union(cluster.repositories) + }}" - name: Update parcelrepos cloudera.cluster.cm_api: @@ -82,7 +82,7 @@ cloudera.cluster.cm_api: endpoint: /clusters/{{cluster.name | urlencode() }}/parcels/products/CDH/versions/{{ new_parcel_version }} register: parcels_response - until: parcels_response.json.stage in ("DISTRIBUTED", "ACTIVATED") + until: parcels_response.json.stage in ("DISTRIBUTED", "ACTIVATED") retries: "{{ parcel_poll_max_retries | default(30) }}" delay: "{{ parcel_poll_duration | default(60) }}" diff --git a/roles/deployment/cluster/tasks/update_role_config_group.yml b/roles/deployment/cluster/tasks/update_role_config_group.yml index a5979d98..5afba460 100644 --- a/roles/deployment/cluster/tasks/update_role_config_group.yml +++ b/roles/deployment/cluster/tasks/update_role_config_group.yml @@ -13,10 +13,10 @@ # limitations under the License. --- - - name: Update service role configs cloudera.cluster.cm_api: - endpoint: /clusters/{{ cluster.name | urlencode() }}/services/{{ service | lower }}/roleConfigGroups/{{ service | lower }}-{{ role_type }}-BASE/config?message=Automated%20updates%20from%20Ansible + endpoint: /clusters/{{ cluster.name | urlencode() }}/services/{{ service | lower }}/roleConfigGroups/{{ service | lower }}-{{ role_type + }}-BASE/config?message=Automated%20updates%20from%20Ansible method: PUT body: "{{ lookup('template', 'services/roleConfigGroupConfig.j2', convert_data=False) }}" loop: "{{ role_mappings[service] }}" diff --git a/roles/deployment/cluster/tasks/upgrade_kts.yml b/roles/deployment/cluster/tasks/upgrade_kts.yml index 813d6e03..5b061f68 100644 --- a/roles/deployment/cluster/tasks/upgrade_kts.yml +++ b/roles/deployment/cluster/tasks/upgrade_kts.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Generate complete kts cluster configs include_role: name: config/cluster/kts @@ -35,7 +34,8 @@ register: cm_config - set_fact: - parcel_repos: "{{ (cm_config.json | json_query('items[?name==`REMOTE_PARCEL_REPO_URLS`].value') | default(['']))[0].split(',') | default([]) | union(cluster.repositories) }}" + parcel_repos: "{{ (cm_config.json | json_query('items[?name==`REMOTE_PARCEL_REPO_URLS`].value') | default(['']))[0].split(',') | default([]) | union(cluster.repositories) + }}" - name: Update parcelrepos cloudera.cluster.cm_api: @@ -61,7 +61,8 @@ register: installed_parcels - set_fact: - installed_parcel_version: "{{ installed_parcels.json | json_query('items[?product==`KEYTRUSTEE_SERVER` && stage==`ACTIVATED`]') | cloudera.cluster.get_product_version('KEYTRUSTEE_SERVER') }}" + installed_parcel_version: "{{ installed_parcels.json | json_query('items[?product==`KEYTRUSTEE_SERVER` && stage==`ACTIVATED`]') | cloudera.cluster.get_product_version('KEYTRUSTEE_SERVER') + }}" - name: Download and upgrade KTS Parcel block: diff --git a/roles/deployment/databases/tasks/main.yml b/roles/deployment/databases/tasks/main.yml index 12c6ee27..3d5e0926 100644 --- a/roles/deployment/databases/tasks/main.yml +++ b/roles/deployment/databases/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Create databases and users include_tasks: file: "{{ database_type }}.yml" diff --git a/roles/deployment/databases/tasks/mariadb.yml b/roles/deployment/databases/tasks/mariadb.yml index 792a91ea..11fe437a 100644 --- a/roles/deployment/databases/tasks/mariadb.yml +++ b/roles/deployment/databases/tasks/mariadb.yml @@ -13,13 +13,12 @@ # limitations under the License. --- - - name: Create databases mysql_db: name: "{{ databases[service].name }}" encoding: "{{ service | cloudera.cluster.get_database_encoding_mysql }}" collation: "{{ service | cloudera.cluster.get_database_collation_mysql }}" - become: yes + become: true loop: "{{ databases }}" loop_control: loop_var: service @@ -32,10 +31,10 @@ name: "{{ databases[service].user }}" password: "{{ databases[service].password }}" update_password: always - host: '%' + host: "%" priv: "{{ databases[service].name }}.*:ALL" - no_log: yes - become: yes + no_log: true + become: true loop: "{{ databases }}" loop_control: loop_var: service diff --git a/roles/deployment/databases/tasks/mysql.yml b/roles/deployment/databases/tasks/mysql.yml index 8e789cf8..f31c2e2c 100644 --- a/roles/deployment/databases/tasks/mysql.yml +++ b/roles/deployment/databases/tasks/mysql.yml @@ -13,13 +13,12 @@ # limitations under the License. --- - - name: Create databases mysql_db: name: "{{ databases[service].name }}" encoding: "{{ service | cloudera.cluster.get_database_encoding_mysql }}" collation: "{{ service | cloudera.cluster.get_database_collation_mysql }}" - become: yes + become: true loop: "{{ databases }}" loop_control: loop_var: service @@ -32,10 +31,10 @@ name: "{{ databases[service].user }}" password: "{{ databases[service].password }}" update_password: always - host: '%' + host: "%" priv: "{{ databases[service].name }}.*:ALL" - no_log: yes - become: yes + no_log: true + become: true loop: "{{ databases }}" loop_control: loop_var: service diff --git a/roles/deployment/databases/tasks/postgresql.yml b/roles/deployment/databases/tasks/postgresql.yml index f6b29756..48ce90f8 100644 --- a/roles/deployment/databases/tasks/postgresql.yml +++ b/roles/deployment/databases/tasks/postgresql.yml @@ -13,12 +13,11 @@ # limitations under the License. --- - - name: Create database roles postgresql_user: name: "{{ databases[item].user }}" password: "{{ databases[item].password }}" - become: yes + become: true become_user: postgres with_items: "{{ databases }}" delegate_to: "{{ databases[item].host }}" @@ -30,7 +29,7 @@ name: "{{ databases[item].name }}" owner: "{{ databases[item].user }}" encoding: UTF-8 - become: yes + become: true become_user: postgres with_items: "{{ databases }}" delegate_to: "{{ databases[item].host }}" diff --git a/roles/deployment/definition/defaults/main.yml b/roles/deployment/definition/defaults/main.yml index c2441025..3ee772cc 100644 --- a/roles/deployment/definition/defaults/main.yml +++ b/roles/deployment/definition/defaults/main.yml @@ -15,21 +15,20 @@ --- database_host: "{{ groups['db_server'][0] | default('cloudera_manager[0]') }}" - database_default_password: changeme database_tls: false database_type: postgresql database_version: "{{ default_database_versions[database_type][ansible_distribution_major_version] }}" default_database_versions: postgresql: - '7': 10 - '8': 12 + "7": 10 + "8": 12 mariadb: - '7': 10.2 - '8': 10.2 + "7": 10.2 + "8": 10.2 mysql: - '7': 5.7 - '8': 8.0 + "7": 5.7 + "8": 8.0 # Located in cloudera.cluster.infrastructure.krb5_common #krb5_realm: CLOUDERA.LOCAL @@ -39,7 +38,7 @@ default_database_versions: #krb5_enc_types: "aes256-cts aes128-cts" manual_tls_cert_distribution: false -local_temp_dir: '/tmp' +local_temp_dir: "/tmp" database_defaults: DAS: @@ -121,7 +120,7 @@ database_defaults: user: queryprocessor password: "{{ database_default_password }}" -#New in 7.1.9, postgresql only until CHF2 + #New in 7.1.9, postgresql only until CHF2 QUEUEMANAGER: host: "{{ database_host }}" port: "{{ database_type | cloudera.cluster.default_database_port }}" @@ -130,7 +129,6 @@ database_defaults: user: queuemanager password: "{{ database_default_password }}" - databases_cm_svcs: ACTIVITYMONITOR: host: "{{ database_host }}" diff --git a/roles/deployment/definition/tasks/main.yml b/roles/deployment/definition/tasks/main.yml index 84004f6f..c716f951 100644 --- a/roles/deployment/definition/tasks/main.yml +++ b/roles/deployment/definition/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Generate host_template cluster map ansible.builtin.set_fact: _host_template_cluster_map: "{{ lookup('template', './template_cluster_map.j2') | from_yaml }}" diff --git a/roles/deployment/groupby/tasks/main.yml b/roles/deployment/groupby/tasks/main.yml index 3f4c9892..055a80c6 100644 --- a/roles/deployment/groupby/tasks/main.yml +++ b/roles/deployment/groupby/tasks/main.yml @@ -13,18 +13,17 @@ # limitations under the License. --- - - name: Group by host template group_by: key: "{{ 'host_template_' ~ host_template if host_template is defined else 'no_template' }}" - name: Find the correct host template block: - - fail: - msg: "Unable to host template {{ host_template }} in the cluster definition" - when: content | length == 0 - - set_fact: - host_template_content: "{{ content | first }}" + - fail: + msg: "Unable to host template {{ host_template }} in the cluster definition" + when: content | length == 0 + - set_fact: + host_template_content: "{{ content | first }}" vars: query: "clusters[].host_templates[].\"{{ host_template }}\"" content: "{{ _pre_template_cluster | json_query(query) }}" diff --git a/roles/deployment/repometa/tasks/parcels.yml b/roles/deployment/repometa/tasks/parcels.yml index 1279b33d..a4f37231 100644 --- a/roles/deployment/repometa/tasks/parcels.yml +++ b/roles/deployment/repometa/tasks/parcels.yml @@ -13,13 +13,12 @@ # limitations under the License. --- - - name: Download parcel manifest information delegate_to: "{{ groups.cloudera_manager[0] if 'cloudera_manager' in groups else 'localhost' }}" uri: url: "{{ repository | regex_replace('/?$','') + '/manifest.json' }}" status_code: 200 - return_content: yes + return_content: true url_username: "{{ parcel_repo_username | default(omit) }}" url_password: "{{ parcel_repo_password | default(omit) }}" run_once: true @@ -27,7 +26,7 @@ loop: "{{ cluster.repositories }}" loop_control: loop_var: repository - check_mode: no + check_mode: false - debug: msg: "This Play Host OS Distro is {{ cluster_os_distribution }}" diff --git a/roles/deployment/repometa/tasks/prepare-Debian.yml b/roles/deployment/repometa/tasks/prepare-Debian.yml index 99ecc703..956fdb88 100644 --- a/roles/deployment/repometa/tasks/prepare-Debian.yml +++ b/roles/deployment/repometa/tasks/prepare-Debian.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Set OS Distribution for parcel filtering when: cluster_os_distribution is undefined ansible.builtin.set_fact: diff --git a/roles/deployment/repometa/tasks/prepare-RedHat.yml b/roles/deployment/repometa/tasks/prepare-RedHat.yml index 01e3d4fa..224a062c 100644 --- a/roles/deployment/repometa/tasks/prepare-RedHat.yml +++ b/roles/deployment/repometa/tasks/prepare-RedHat.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Set OS Distribution for parcel filtering when: cluster_os_distribution is undefined ansible.builtin.set_fact: diff --git a/roles/deployment/services/kms/tasks/create_kms.yml b/roles/deployment/services/kms/tasks/create_kms.yml index e167f63b..10cd6710 100644 --- a/roles/deployment/services/kms/tasks/create_kms.yml +++ b/roles/deployment/services/kms/tasks/create_kms.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Get cluster parcel details cloudera.cluster.cm_api: endpoint: /clusters/{{ __kms_cluster.name | urlencode() }}/parcels diff --git a/roles/deployment/services/kms/tasks/main.yml b/roles/deployment/services/kms/tasks/main.yml index a597360f..bb70e401 100644 --- a/roles/deployment/services/kms/tasks/main.yml +++ b/roles/deployment/services/kms/tasks/main.yml @@ -13,11 +13,10 @@ # limitations under the License. --- - - name: Get Key Trustee organisation auth secret shell: > keytrustee-orgtool --confdir {{ keytrustee_server_conf_dir }} list - become: yes + become: true delegate_to: "{{ groups.kts_active | first }}" connection: ssh register: orgtool_output diff --git a/roles/deployment/services/kms_ha/tasks/main.yml b/roles/deployment/services/kms_ha/tasks/main.yml index e2e8eb51..8f338888 100644 --- a/roles/deployment/services/kms_ha/tasks/main.yml +++ b/roles/deployment/services/kms_ha/tasks/main.yml @@ -19,7 +19,7 @@ owner: root group: root state: directory - mode: 0777 + mode: "0777" - name: Fetch keys from first KMS server delegate_to: "{{ groups.kms_servers | first }}" @@ -37,7 +37,7 @@ dest: "{{ kms_conf_dir }}" owner: "{{ kms_user }}" group: "{{ kms_group }}" - mode: 0600 + mode: "0600" loop: "{{ groups.kms_servers[1:] }}" loop_control: loop_var: __kms_host diff --git a/roles/deployment/services/kts_high_availability/tasks/main.yml b/roles/deployment/services/kts_high_availability/tasks/main.yml index 3a5d81fb..100d05fb 100644 --- a/roles/deployment/services/kts_high_availability/tasks/main.yml +++ b/roles/deployment/services/kts_high_availability/tasks/main.yml @@ -31,7 +31,7 @@ owner: root group: root state: directory - mode: 0777 + mode: "0777" # GnuPG 2.1+ uses .kbx for keyring, and retired secring / random_seed - name: Determine gnupg version @@ -53,7 +53,7 @@ dest: "{{ keytrustee_server_conf_dir }}" owner: keytrustee group: keytrustee - mode: 0600 + mode: "0600" - name: Delete temp directory file: diff --git a/roles/deployment/services/mgmt/tasks/main.yml b/roles/deployment/services/mgmt/tasks/main.yml index d88933fc..23e8b3f9 100644 --- a/roles/deployment/services/mgmt/tasks/main.yml +++ b/roles/deployment/services/mgmt/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Generate mgmt configs include_role: name: cloudera.cluster.config.services.mgmt diff --git a/roles/deployment/services/wxm/defaults/main.yml b/roles/deployment/services/wxm/defaults/main.yml index 1eec45c5..f668b346 100644 --- a/roles/deployment/services/wxm/defaults/main.yml +++ b/roles/deployment/services/wxm/defaults/main.yml @@ -1,2 +1,3 @@ -altus_key_id: '' -altus_private_key: '' +--- +altus_key_id: "" +altus_private_key: "" diff --git a/roles/deployment/services/wxm/tasks/configure_telemetry.yml b/roles/deployment/services/wxm/tasks/configure_telemetry.yml index 5fee561f..036e591b 100644 --- a/roles/deployment/services/wxm/tasks/configure_telemetry.yml +++ b/roles/deployment/services/wxm/tasks/configure_telemetry.yml @@ -1,5 +1,4 @@ --- - # Add access key for Altus to base CM - name: Set Altus private Key into one line @@ -23,7 +22,6 @@ method: POST body: "{{ lookup('template', 'add_altus_key.json') }}" - # Get host Id of this host as it is required to add it as the TP host - set_fact: tp_host_id: "{{ hosts_details.json | community.general.json_query(query) }}" diff --git a/roles/deployment/services/wxm/tasks/main.yml b/roles/deployment/services/wxm/tasks/main.yml index 4b141105..6a832665 100644 --- a/roles/deployment/services/wxm/tasks/main.yml +++ b/roles/deployment/services/wxm/tasks/main.yml @@ -1,10 +1,9 @@ --- - - assert: that: - altus_private_key | length > 0 - altus_key_id | length > 0 - quiet: True + quiet: true fail_msg: >- Altus key id and private key must be provided to configure Telemetry service for WXM diff --git a/roles/deployment/services/wxm/tasks/truststore_to_base.yml b/roles/deployment/services/wxm/tasks/truststore_to_base.yml index cdd1bcf0..9d7b59e2 100644 --- a/roles/deployment/services/wxm/tasks/truststore_to_base.yml +++ b/roles/deployment/services/wxm/tasks/truststore_to_base.yml @@ -9,13 +9,15 @@ cloudera.cluster.cm_api: endpoint: "/certs/truststorePassword" register: cloudera_manager_truststore_password_api - no_log: True + no_log: true # Import CA from WXM cluster and import it into base truststore - name: Import truststore into Base truststore - shell: "echo 'yes' | keytool -import -alias wxm_truststore -keystore /var/lib/cloudera-scm-agent/agent-cert/cm-auto-global_truststore.jks -file /tmp/wxm_truststore.pem -storepass {{ cloudera_manager_truststore_password_api.content }}" + shell: "echo 'yes' | keytool -import -alias wxm_truststore -keystore /var/lib/cloudera-scm-agent/agent-cert/cm-auto-global_truststore.jks -file /tmp/wxm_truststore.pem + -storepass {{ cloudera_manager_truststore_password_api.content }}" ignore_errors: true - name: Import truststore into Base truststore - shell: "echo 'yes' | keytool -import -alias wxm_truststore -keystore /var/lib/cloudera-scm-agent/agent-cert/cm-auto-in_cluster_truststore.jks -file /tmp/wxm_truststore.pem -storepass {{ cloudera_manager_truststore_password_api.content }}" + shell: "echo 'yes' | keytool -import -alias wxm_truststore -keystore /var/lib/cloudera-scm-agent/agent-cert/cm-auto-in_cluster_truststore.jks -file /tmp/wxm_truststore.pem + -storepass {{ cloudera_manager_truststore_password_api.content }}" ignore_errors: true diff --git a/roles/infrastructure/ca_server/molecule/default/verify.yml b/roles/infrastructure/ca_server/molecule/default/verify.yml index e5ac8599..bf767768 100644 --- a/roles/infrastructure/ca_server/molecule/default/verify.yml +++ b/roles/infrastructure/ca_server/molecule/default/verify.yml @@ -13,32 +13,30 @@ # limitations under the License. --- - - name: Verify hosts: all - gather_facts: no + gather_facts: false tasks: + - name: Output Root CA cert details + shell: openssl x509 -in /ca/certs/ca.cert.pem -noout -text + register: root_ca_output - - name: Output Root CA cert details - shell: openssl x509 -in /ca/certs/ca.cert.pem -noout -text - register: root_ca_output - - - name: Check Root CA issuer - assert: - that: "'Issuer: C=US, O=Cloudera, Inc., OU=PS, CN=Root CA' in root_ca_output.stdout" + - name: Check Root CA issuer + assert: + that: "'Issuer: C=US, O=Cloudera, Inc., OU=PS, CN=Root CA' in root_ca_output.stdout" - - name: Check Root CA subject - assert: - that: "'Subject: C=US, O=Cloudera, Inc., OU=PS, CN=Root CA' in root_ca_output.stdout" + - name: Check Root CA subject + assert: + that: "'Subject: C=US, O=Cloudera, Inc., OU=PS, CN=Root CA' in root_ca_output.stdout" - - name: Output Intermediate CA cert details - shell: openssl x509 -in /ca/intermediate/certs/intermediate.cert.pem -noout -text - register: intermediate_ca_output + - name: Output Intermediate CA cert details + shell: openssl x509 -in /ca/intermediate/certs/intermediate.cert.pem -noout -text + register: intermediate_ca_output - - name: Check Intermediate CA issuer - assert: - that: "'Issuer: C=US, O=Cloudera, Inc., OU=PS, CN=Root CA' in intermediate_ca_output.stdout" + - name: Check Intermediate CA issuer + assert: + that: "'Issuer: C=US, O=Cloudera, Inc., OU=PS, CN=Root CA' in intermediate_ca_output.stdout" - - name: Check Intermediate CA subject - assert: - that: "'Subject: C=US, O=Cloudera, Inc., OU=PS, CN=Intermediate CA' in intermediate_ca_output.stdout" + - name: Check Intermediate CA subject + assert: + that: "'Subject: C=US, O=Cloudera, Inc., OU=PS, CN=Intermediate CA' in intermediate_ca_output.stdout" diff --git a/roles/infrastructure/ca_server/tasks/create_ca.yml b/roles/infrastructure/ca_server/tasks/create_ca.yml index 431312ec..e9d24cd0 100644 --- a/roles/infrastructure/ca_server/tasks/create_ca.yml +++ b/roles/infrastructure/ca_server/tasks/create_ca.yml @@ -1,3 +1,4 @@ +--- # Copyright 2023 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,7 +17,7 @@ file: state: directory path: "{{ dir }}" - mode: 0700 + mode: "0700" owner: root loop: - "{{ ca_server_root_path }}" @@ -31,8 +32,8 @@ file: state: touch path: "{{ ca_server_root_path }}/index.txt" - mode: 0700 - changed_when: False + mode: "0700" + changed_when: false - name: Write serial shell: @@ -44,7 +45,7 @@ src: root.openssl.cnf.j2 dest: "{{ ca_server_root_path }}/openssl.cnf" owner: root - mode: 0644 + mode: "0644" - name: Generate root private key openssl_privatekey: @@ -52,7 +53,7 @@ size: "{{ ca_server_root_key_size }}" cipher: "{{ ca_server_root_key_cipher }}" passphrase: "{{ ca_server_root_key_password }}" - mode: 0400 + mode: "0400" - set_fact: subject_root: "/{{ lookup('template', 'root_dn.j2') | from_yaml | map('regex_replace', '/', '\\/') | join('/') }}" @@ -78,7 +79,7 @@ file: state: directory path: "{{ dir }}" - mode: 0700 + mode: "0700" owner: root loop: - "{{ ca_server_intermediate_path }}" @@ -94,8 +95,8 @@ file: state: touch path: "{{ ca_server_intermediate_path }}/index.txt" - mode: 0700 - changed_when: False + mode: "0700" + changed_when: false - name: Write serial shell: @@ -107,7 +108,7 @@ src: intermediate.openssl.cnf.j2 dest: "{{ ca_server_intermediate_path }}/openssl.cnf" owner: root - mode: 0644 + mode: "0644" - name: Generate intermediate private key openssl_privatekey: @@ -115,7 +116,7 @@ size: 4096 cipher: "{{ ca_server_root_key_cipher }}" passphrase: "{{ ca_server_intermediate_key_password }}" - mode: 0400 + mode: "0400" - name: Generate intermediate CSR shell: diff --git a/roles/infrastructure/ca_server/tasks/main.yml b/roles/infrastructure/ca_server/tasks/main.yml index c251ca64..08dad235 100644 --- a/roles/infrastructure/ca_server/tasks/main.yml +++ b/roles/infrastructure/ca_server/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Include OS-specific variables include_vars: file: "{{ ansible_os_family }}.yml" @@ -82,9 +81,9 @@ - name: Ensure the intermediate CA config has the correct section ([ cloudera_req ]) lineinfile: path: "{{ ca_server_intermediate_path }}/openssl.cnf" - regexp: '\[\s*cloudera_req\s*\]' + regexp: "\\[\\s*cloudera_req\\s*\\]" state: absent - check_mode: yes + check_mode: true changed_when: false register: intermediate_conf_check when: intermediate_ca_conf_exists diff --git a/roles/infrastructure/custom_repo/defaults/main.yml b/roles/infrastructure/custom_repo/defaults/main.yml index a7cc8315..c5207c7e 100644 --- a/roles/infrastructure/custom_repo/defaults/main.yml +++ b/roles/infrastructure/custom_repo/defaults/main.yml @@ -17,6 +17,6 @@ local_temp_dir: /tmp repo_tar_local_dir: repo repo_tar_files: "{{ definition.repo_tar_files | default([]) }}" -keep_newer: yes +keep_newer: true custom_repo_rehost_files: "{{ definition.custom_repo_rehost_files | default([]) }}" diff --git a/roles/infrastructure/custom_repo/tasks/install_parcels.yml b/roles/infrastructure/custom_repo/tasks/install_parcels.yml index 4443cc51..03616589 100644 --- a/roles/infrastructure/custom_repo/tasks/install_parcels.yml +++ b/roles/infrastructure/custom_repo/tasks/install_parcels.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Work out temp and repo paths set_fact: temp_dir: "{{ local_temp_dir }}/{{ repo_tar_file | regex_replace('.tar.gz','') }}" @@ -27,22 +26,22 @@ - name: Find parcel files find: path: "{{ temp_dir }}" - patterns: '*.parcel,*.parcel.sha,*.parcel.sha1,.*.parcel.sha256,manifest.json' - recurse: yes + patterns: "*.parcel,*.parcel.sha,*.parcel.sha1,.*.parcel.sha256,manifest.json" + recurse: true register: files - name: Create parcel repo directory file: path: "{{ repo_dir }}" state: directory - mode: 0755 + mode: "0755" - name: Copy parcel files into correct location copy: src: "{{ item.path }}" dest: "{{ repo_dir }}" - remote_src: yes - mode: 0644 + remote_src: true + mode: "0644" with_items: "{{ files.files }}" - name: Remove temp directory diff --git a/roles/infrastructure/custom_repo/tasks/install_parcels_from_tars_on_controller.yml b/roles/infrastructure/custom_repo/tasks/install_parcels_from_tars_on_controller.yml index 9dec3e00..39bdebfb 100644 --- a/roles/infrastructure/custom_repo/tasks/install_parcels_from_tars_on_controller.yml +++ b/roles/infrastructure/custom_repo/tasks/install_parcels_from_tars_on_controller.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Work out temp and repo paths set_fact: temp_dir: "{{ local_temp_dir }}/{{ repo_tar_file | regex_replace('.tar.gz','') }}" @@ -28,22 +27,22 @@ - name: Find parcel files find: path: "{{ temp_dir }}" - patterns: '*.parcel,*.parcel.sha,*.parcel.sha1,.*.parcel.sha256,manifest.json' - recurse: yes + patterns: "*.parcel,*.parcel.sha,*.parcel.sha1,.*.parcel.sha256,manifest.json" + recurse: true register: files - name: Create parcel repo directory file: path: "{{ repo_dir }}" state: directory - mode: 0755 + mode: "0755" - name: Copy parcel files into correct location copy: src: "{{ item.path }}" dest: "{{ repo_dir }}" - remote_src: yes - mode: 0644 + remote_src: true + mode: "0644" with_items: "{{ files.files }}" - name: Remove temp directory diff --git a/roles/infrastructure/custom_repo/tasks/main.yml b/roles/infrastructure/custom_repo/tasks/main.yml index 8aa6ad38..b5fb1f0c 100644 --- a/roles/infrastructure/custom_repo/tasks/main.yml +++ b/roles/infrastructure/custom_repo/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Include variables include_vars: file: "{{ ansible_os_family }}.yml" @@ -21,7 +20,7 @@ - name: Install {{ httpd_package }} ansible.builtin.package: lock_timeout: "{{ (ansible_os_family == 'RedHat') | ternary(60, omit) }}" - update_cache: yes + update_cache: true name: "{{ httpd_package }}" state: present @@ -40,5 +39,5 @@ - name: Start and enable httpd service: name: "{{ httpd_service }}" - enabled: yes + enabled: true state: restarted diff --git a/roles/infrastructure/custom_repo/tasks/rehost_files_from_download.yml b/roles/infrastructure/custom_repo/tasks/rehost_files_from_download.yml index 3520e8b8..c976ae0e 100644 --- a/roles/infrastructure/custom_repo/tasks/rehost_files_from_download.yml +++ b/roles/infrastructure/custom_repo/tasks/rehost_files_from_download.yml @@ -61,7 +61,7 @@ loop_var: __tmp_unpack_item ansible.builtin.unarchive: extra_opts: [--strip-components=1] - remote_src: yes + remote_src: true src: "/var/www/html{{ __tmp_unpack_item | urlsplit('path') }}" dest: "/var/www/html{{ __tmp_unpack_item | urlsplit('path') | regex_replace('^(.+)repo.+-(.+)\\.tar\\.gz$', '\\1\\2' + '/yum/') }}" keep_newer: "{{ keep_newer }}" diff --git a/roles/infrastructure/haproxy/tasks/main.yml b/roles/infrastructure/haproxy/tasks/main.yml index 4a94427b..d74fa39f 100644 --- a/roles/infrastructure/haproxy/tasks/main.yml +++ b/roles/infrastructure/haproxy/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Install HAProxy ansible.builtin.package: lock_timeout: "{{ (ansible_os_family == 'RedHat') | ternary(60, omit) }}" @@ -23,5 +22,5 @@ - name: Enable HAProxy service: name: haproxy - enabled: yes + enabled: true state: restarted diff --git a/roles/infrastructure/krb5_client/handlers/main.yml b/roles/infrastructure/krb5_client/handlers/main.yml index e2ea5991..c41dffe7 100644 --- a/roles/infrastructure/krb5_client/handlers/main.yml +++ b/roles/infrastructure/krb5_client/handlers/main.yml @@ -1,3 +1,4 @@ +--- # Copyright 2023 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/roles/infrastructure/krb5_client/tasks/freeipa.yml b/roles/infrastructure/krb5_client/tasks/freeipa.yml index f90ad950..a2135e15 100644 --- a/roles/infrastructure/krb5_client/tasks/freeipa.yml +++ b/roles/infrastructure/krb5_client/tasks/freeipa.yml @@ -1,5 +1,4 @@ --- - # Copyright 2023 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/roles/infrastructure/krb5_client/tasks/freeipa_autodns.yml b/roles/infrastructure/krb5_client/tasks/freeipa_autodns.yml index 2dcef68e..ffcb5e4c 100644 --- a/roles/infrastructure/krb5_client/tasks/freeipa_autodns.yml +++ b/roles/infrastructure/krb5_client/tasks/freeipa_autodns.yml @@ -21,7 +21,7 @@ - name: Gather facts from KRB5 Server ansible.builtin.setup: gather_subset: - - 'default_ipv4' + - "default_ipv4" delegate_to: "{{ krb5_ip_collect_item }}" delegate_facts: true loop: "{{ groups['krb5_server'] }}" @@ -41,7 +41,7 @@ file: path: /etc/NetworkManager/conf.d/ state: directory - recurse: yes + recurse: true - name: Ensure dns configuration persists through reboot ansible.builtin.copy: @@ -49,7 +49,7 @@ [main] dns=none dest: /etc/NetworkManager/conf.d/disable-resolve.conf-managing.conf - backup: yes + backup: true - name: Disable nm-cloud-setup if present when: @@ -57,7 +57,7 @@ - ansible_os_family == 'RedHat' block: - name: Disable nm-cloud-setup if present - ignore_errors: yes + ignore_errors: true loop_control: loop_var: __nm_cloud_setup_disable_item loop: diff --git a/roles/infrastructure/krb5_client/tasks/freeipa_dbus_patch.yml b/roles/infrastructure/krb5_client/tasks/freeipa_dbus_patch.yml index e5602af8..a7705c97 100644 --- a/roles/infrastructure/krb5_client/tasks/freeipa_dbus_patch.yml +++ b/roles/infrastructure/krb5_client/tasks/freeipa_dbus_patch.yml @@ -12,8 +12,8 @@ - name: Ensure dbus is enabled and unmasked systemd: name: dbus - enabled: yes - masked: no + enabled: true + masked: false ignore_errors: true - name: Restart DBUS diff --git a/roles/infrastructure/krb5_client/tasks/pvc_configs.yml b/roles/infrastructure/krb5_client/tasks/pvc_configs.yml index 4bcb9746..ffbbfc04 100644 --- a/roles/infrastructure/krb5_client/tasks/pvc_configs.yml +++ b/roles/infrastructure/krb5_client/tasks/pvc_configs.yml @@ -16,7 +16,7 @@ - name: Add Renewable ticket lifetime blockinfile: dest: "/etc/krb5.conf" - insertafter: 'ticket_lifetime = 24h' + insertafter: "ticket_lifetime = 24h" block: | renew_lifetime = 7d max_life = 365d @@ -26,14 +26,14 @@ - name: Comment default_ccache_name in krb5.conf replace: dest: /etc/krb5.conf - regexp: 'default_ccache_name = KEYRING:persistent:%{uid}' - replace: '#default_ccache_name = KEYRING:persistent:%{uid}' + regexp: "default_ccache_name = KEYRING:persistent:%{uid}" + replace: "#default_ccache_name = KEYRING:persistent:%{uid}" ignore_errors: true - name: Adding enctypes for Hue blockinfile: dest: "/etc/krb5.conf" - insertafter: 'ticket_lifetime = 24h' + insertafter: "ticket_lifetime = 24h" block: | default_tgs_enctypes= des3-cbc-sha1 aes256-cts-hmac-sha1-96 arcfour-hmac aes128-cts-hmac-sha1-96 des-cbc-md5 default_tkt_enctypes = des3-cbc-sha1 aes256-cts-hmac-sha1-96 arcfour-hmac aes128-cts-hmac-sha1-96 des-cbc-md5 diff --git a/roles/infrastructure/krb5_common/defaults/main.yml b/roles/infrastructure/krb5_common/defaults/main.yml index e9ed4aed..99e8ae36 100644 --- a/roles/infrastructure/krb5_common/defaults/main.yml +++ b/roles/infrastructure/krb5_common/defaults/main.yml @@ -1,3 +1,4 @@ +--- # Copyright 2023 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/roles/infrastructure/krb5_conf/tasks/mit.yml b/roles/infrastructure/krb5_conf/tasks/mit.yml index 4f379062..5827213d 100644 --- a/roles/infrastructure/krb5_conf/tasks/mit.yml +++ b/roles/infrastructure/krb5_conf/tasks/mit.yml @@ -13,10 +13,9 @@ # limitations under the License. --- - - name: Create krb5.conf template: src: "{{ krb5_conf_template | default('krb5.conf.j2') }}" dest: /etc/krb5.conf - backup: yes + backup: true when: not (skip_krb5_conf_distribution | default(False)) diff --git a/roles/infrastructure/krb5_server/tasks/fix_freeipa_collection.yml b/roles/infrastructure/krb5_server/tasks/fix_freeipa_collection.yml index 4eb895df..a577afb0 100644 --- a/roles/infrastructure/krb5_server/tasks/fix_freeipa_collection.yml +++ b/roles/infrastructure/krb5_server/tasks/fix_freeipa_collection.yml @@ -23,29 +23,29 @@ - name: Add a sleep before calling certmonger for Py27 lineinfile: path: /usr/lib/python2.7/site-packages/ipaserver/install/dogtaginstance.py - insertbefore: '.*find_ca_by_nickname.*' - line: ' time.sleep(20)' + insertbefore: ".*find_ca_by_nickname.*" + line: " time.sleep(20)" state: present ignore_errors: true - name: Raise timeout for CA wait for Py27 lineinfile: path: /usr/lib/python2.7/site-packages/ipalib/constants.py - regexp: '^CA_DBUS_TIMEOUT' + regexp: "^CA_DBUS_TIMEOUT" line: CA_DBUS_TIMEOUT = 360 ignore_errors: true - name: Add a sleep before calling certmonger for Py36 lineinfile: path: /usr/lib/python3.6/site-packages/ipaserver/install/dogtaginstance.py - insertbefore: '.*find_ca_by_nickname.*' - line: ' time.sleep(20)' + insertbefore: ".*find_ca_by_nickname.*" + line: " time.sleep(20)" state: present ignore_errors: true - name: Raise timeout for CA wait for Py36 lineinfile: path: /usr/lib/python3.6/site-packages/ipalib/constants.py - regexp: '^CA_DBUS_TIMEOUT' + regexp: "^CA_DBUS_TIMEOUT" line: CA_DBUS_TIMEOUT = 360 ignore_errors: true diff --git a/roles/infrastructure/krb5_server/tasks/freeipa.yml b/roles/infrastructure/krb5_server/tasks/freeipa.yml index f54603a1..9644b124 100644 --- a/roles/infrastructure/krb5_server/tasks/freeipa.yml +++ b/roles/infrastructure/krb5_server/tasks/freeipa.yml @@ -23,7 +23,7 @@ selinux: policy: targeted state: permissive - ignore_errors: yes + ignore_errors: true - name: Setup FreeIPA Server ansible.builtin.include_role: @@ -71,7 +71,7 @@ - name: Gather facts from ECS Server ansible.builtin.setup: gather_subset: - - 'default_ipv4' + - "default_ipv4" delegate_to: "{{ ecs_ip_collect_item }}" delegate_facts: true loop: "{{ groups['ecs_ecs_server'] }}" diff --git a/roles/infrastructure/krb5_server/tasks/mit.yml b/roles/infrastructure/krb5_server/tasks/mit.yml index 89abb44e..55c0cf00 100644 --- a/roles/infrastructure/krb5_server/tasks/mit.yml +++ b/roles/infrastructure/krb5_server/tasks/mit.yml @@ -27,7 +27,7 @@ template: src: "{{ ansible_os_family }}/kdc.conf.j2" dest: "{{ krb5_kdc_state_directory }}/kdc.conf" - backup: yes + backup: true - name: Create KDC database command: "/usr/sbin/kdb5_util create -s -P {{ krb5_kdc_master_password }}" @@ -38,7 +38,7 @@ template: src: kadm5.acl.j2 dest: "{{ krb5_kdc_state_directory }}/kadm5.acl" - backup: yes + backup: true - name: Create Cloudera Manager admin principal command: /usr/sbin/kadmin.local -q "addprinc -pw {{ krb5_kdc_admin_password }} {{ krb5_kdc_admin_user }}" @@ -47,6 +47,6 @@ service: name: "{{ item }}" state: restarted - enabled: yes + enabled: true with_items: - "{{ krb5_services }}" diff --git a/roles/infrastructure/krb5_server/vars/RedHat-7.yml b/roles/infrastructure/krb5_server/vars/RedHat-7.yml index bfeea14e..7711469c 100644 --- a/roles/infrastructure/krb5_server/vars/RedHat-7.yml +++ b/roles/infrastructure/krb5_server/vars/RedHat-7.yml @@ -1,2 +1,2 @@ --- -ipaserver_packages: [ "ipa-server", "libselinux-python" ] +ipaserver_packages: ["ipa-server", "libselinux-python"] diff --git a/roles/infrastructure/krb5_server/vars/RedHat-8.yml b/roles/infrastructure/krb5_server/vars/RedHat-8.yml index c791a5bb..b1a97f36 100644 --- a/roles/infrastructure/krb5_server/vars/RedHat-8.yml +++ b/roles/infrastructure/krb5_server/vars/RedHat-8.yml @@ -1,2 +1,2 @@ --- -ipaserver_packages: [ "@idm:DL1/server" ] +ipaserver_packages: ["@idm:DL1/server"] diff --git a/roles/infrastructure/krb5_server/vars/Ubuntu.yml b/roles/infrastructure/krb5_server/vars/Ubuntu.yml index 91668774..8ba2ed73 100644 --- a/roles/infrastructure/krb5_server/vars/Ubuntu.yml +++ b/roles/infrastructure/krb5_server/vars/Ubuntu.yml @@ -1,2 +1,2 @@ --- -ipaserver_packages: [ "freeipa-server" ] +ipaserver_packages: ["freeipa-server"] diff --git a/roles/infrastructure/krb5_server/vars/default.yml b/roles/infrastructure/krb5_server/vars/default.yml index 6324f7ec..52defc12 100644 --- a/roles/infrastructure/krb5_server/vars/default.yml +++ b/roles/infrastructure/krb5_server/vars/default.yml @@ -1,2 +1,2 @@ --- -ipaserver_packages: [ "ipa-server", "python3-libselinux" ] +ipaserver_packages: ["ipa-server", "python3-libselinux"] diff --git a/roles/infrastructure/rdbms/handlers/main.yml b/roles/infrastructure/rdbms/handlers/main.yml index 034c8b04..0af337e1 100644 --- a/roles/infrastructure/rdbms/handlers/main.yml +++ b/roles/infrastructure/rdbms/handlers/main.yml @@ -13,6 +13,5 @@ # limitations under the License. --- - - name: yum clean metadata ansible.builtin.command: yum clean metadata diff --git a/roles/infrastructure/rdbms/tasks/mariadb-Debian.yml b/roles/infrastructure/rdbms/tasks/mariadb-Debian.yml index 66724199..c37394bc 100644 --- a/roles/infrastructure/rdbms/tasks/mariadb-Debian.yml +++ b/roles/infrastructure/rdbms/tasks/mariadb-Debian.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Install MariaDB apt key apt_key: url: https://mariadb.org/mariadb_release_signing_key.asc diff --git a/roles/infrastructure/rdbms/tasks/mariadb-RedHat.yml b/roles/infrastructure/rdbms/tasks/mariadb-RedHat.yml index da4ae4de..9f49fa26 100644 --- a/roles/infrastructure/rdbms/tasks/mariadb-RedHat.yml +++ b/roles/infrastructure/rdbms/tasks/mariadb-RedHat.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Install MariaDB repository yum_repository: name: MariaDB diff --git a/roles/infrastructure/rdbms/tasks/mysql-RedHat.yml b/roles/infrastructure/rdbms/tasks/mysql-RedHat.yml index 4ad11e57..4af53d1a 100644 --- a/roles/infrastructure/rdbms/tasks/mysql-RedHat.yml +++ b/roles/infrastructure/rdbms/tasks/mysql-RedHat.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Import GPG Key rpm_key: key: https://repo.mysql.com/RPM-GPG-KEY-mysql-2022 @@ -30,7 +29,6 @@ when: - not (skip_rdbms_repo_setup | default(False)) - - name: Install MySQL include_role: name: ansible-role-mysql diff --git a/roles/infrastructure/rdbms/tasks/postgresql-Debian.yml b/roles/infrastructure/rdbms/tasks/postgresql-Debian.yml index 21cb7b98..c14be714 100644 --- a/roles/infrastructure/rdbms/tasks/postgresql-Debian.yml +++ b/roles/infrastructure/rdbms/tasks/postgresql-Debian.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Install PostgreSQL apt key apt_key: url: https://www.postgresql.org/media/keys/ACCC4CF8.asc diff --git a/roles/infrastructure/rdbms/tasks/postgresql-RedHat.yml b/roles/infrastructure/rdbms/tasks/postgresql-RedHat.yml index d5a06625..a7ff40f6 100644 --- a/roles/infrastructure/rdbms/tasks/postgresql-RedHat.yml +++ b/roles/infrastructure/rdbms/tasks/postgresql-RedHat.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Install PostgreSQL common repository yum_repository: name: pgdg-common diff --git a/roles/infrastructure/rdbms/tasks/template_fix.yml b/roles/infrastructure/rdbms/tasks/template_fix.yml index 1a04815e..0899e217 100644 --- a/roles/infrastructure/rdbms/tasks/template_fix.yml +++ b/roles/infrastructure/rdbms/tasks/template_fix.yml @@ -13,23 +13,22 @@ # limitations under the License. --- - - name: Copy SQL to change template to UTF-8 copy: src: files/utf8-template.sql dest: "{{ local_temp_dir }}" owner: postgres group: postgres - mode: 0660 + mode: "0660" - name: Run SQL to change template to UTF-8 command: "psql -f {{ local_temp_dir }}/utf8-template.sql" - become: yes + become: true become_user: postgres - name: Remove SQL file file: path: "{{ local_temp_dir }}/utf8-template.sql" state: absent - become: yes + become: true become_user: postgres diff --git a/roles/infrastructure/rdbms/vars/postgresql.yml b/roles/infrastructure/rdbms/vars/postgresql.yml index a676b7c8..89ce9acd 100644 --- a/roles/infrastructure/rdbms/vars/postgresql.yml +++ b/roles/infrastructure/rdbms/vars/postgresql.yml @@ -15,9 +15,9 @@ --- postgresql_global_config_options: - option: log_directory - value: 'log' + value: "log" - option: listen_addresses - value: '*' + value: "*" - option: max_connections value: 300 - option: ssl @@ -30,8 +30,8 @@ postgresql_global_config_options: value: "{{ tls_chain_path if database_tls else None }}" postgresql_hba_entries: - - {type: local, database: all, user: postgres, auth_method: peer} - - {type: local, database: all, user: all, auth_method: peer} - - {type: host, database: all, user: all, address: '127.0.0.1/32', auth_method: md5} - - {type: host, database: all, user: all, address: '::1/128', auth_method: md5} - - {type: host, database: all, user: all, address: '0.0.0.0/0', auth_method: md5} + - { type: local, database: all, user: postgres, auth_method: peer } + - { type: local, database: all, user: all, auth_method: peer } + - { type: host, database: all, user: all, address: "127.0.0.1/32", auth_method: md5 } + - { type: host, database: all, user: all, address: "::1/128", auth_method: md5 } + - { type: host, database: all, user: all, address: "0.0.0.0/0", auth_method: md5 } diff --git a/roles/operations/delete_cluster/tasks/main.yml b/roles/operations/delete_cluster/tasks/main.yml index 9ed8bc0d..67abce9a 100644 --- a/roles/operations/delete_cluster/tasks/main.yml +++ b/roles/operations/delete_cluster/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Check the cluster exists cloudera.cluster.cm_api: endpoint: /clusters/{{ cluster.name | urlencode() }} @@ -47,8 +46,8 @@ and services.json | json_query(stopped_query) | length == services.json | json_query(all_query) | length vars: - stopped_query: 'items[?(serviceState==`STOPPED` || serviceState==`NA`)]' - all_query: 'items[*]' + stopped_query: "items[?(serviceState==`STOPPED` || serviceState==`NA`)]" + all_query: "items[*]" retries: "{{ teardown_stop_cluster_poll_max_retries | default(30) }}" delay: "{{ teardown_stop_cluster_poll_duration | default(20) }}" diff --git a/roles/operations/delete_cms/tasks/main.yml b/roles/operations/delete_cms/tasks/main.yml index 4aa7ab92..357b5d1f 100644 --- a/roles/operations/delete_cms/tasks/main.yml +++ b/roles/operations/delete_cms/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Check cms exists cloudera.cluster.cm_api: endpoint: /cm/service diff --git a/roles/operations/refresh_ranger_kms_repo/tasks/cluster_find_ranger.yml b/roles/operations/refresh_ranger_kms_repo/tasks/cluster_find_ranger.yml index 4f93f754..dcc75ecd 100644 --- a/roles/operations/refresh_ranger_kms_repo/tasks/cluster_find_ranger.yml +++ b/roles/operations/refresh_ranger_kms_repo/tasks/cluster_find_ranger.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Check a cluster has been specified fail: msg: This task list expects a cluster var. diff --git a/roles/operations/refresh_ranger_kms_repo/tasks/main.yml b/roles/operations/refresh_ranger_kms_repo/tasks/main.yml index 3cb47078..f52e334e 100644 --- a/roles/operations/refresh_ranger_kms_repo/tasks/main.yml +++ b/roles/operations/refresh_ranger_kms_repo/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Refresh the KMS repository include_tasks: setup_cluster.yml loop: "{{ definition.clusters }}" diff --git a/roles/operations/refresh_ranger_kms_repo/tasks/setup_cluster.yml b/roles/operations/refresh_ranger_kms_repo/tasks/setup_cluster.yml index 5ac9216f..ac943810 100644 --- a/roles/operations/refresh_ranger_kms_repo/tasks/setup_cluster.yml +++ b/roles/operations/refresh_ranger_kms_repo/tasks/setup_cluster.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - fail: msg: You must pass a cluster variable to this role when: __cluster_item is not defined @@ -26,7 +25,7 @@ url: "{{ ranger_api_url }}/service" user: "{{ ranger_keyadmin_username }}" password: "{{ ranger_keyadmin_password }}" - force_basic_auth: yes + force_basic_auth: true headers: Accept: "application/json" register: services @@ -43,7 +42,7 @@ url: "{{ ranger_api_url }}/service/{{ kms_service.id }}" user: "{{ ranger_keyadmin_username }}" password: "{{ ranger_keyadmin_password }}" - force_basic_auth: yes + force_basic_auth: true method: DELETE headers: Accept: "application/json" diff --git a/roles/operations/restart_cluster/tasks/main.yml b/roles/operations/restart_cluster/tasks/main.yml index c6077a57..97fd5948 100644 --- a/roles/operations/restart_cluster/tasks/main.yml +++ b/roles/operations/restart_cluster/tasks/main.yml @@ -1,5 +1,4 @@ --- - - name: Restart cluster cm_api: endpoint: /clusters/{{ cluster_to_restart }}/commands/restart diff --git a/roles/operations/restart_stale/tasks/main.yml b/roles/operations/restart_stale/tasks/main.yml index dc517bb5..44b691f4 100644 --- a/roles/operations/restart_stale/tasks/main.yml +++ b/roles/operations/restart_stale/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Get clusters cloudera.cluster.cm_api: endpoint: /clusters diff --git a/roles/operations/restart_stale/tasks/restart.yml b/roles/operations/restart_stale/tasks/restart.yml index 655cfa6f..945003d9 100644 --- a/roles/operations/restart_stale/tasks/restart.yml +++ b/roles/operations/restart_stale/tasks/restart.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Get cluster services cloudera.cluster.cm_api: endpoint: /clusters/{{ cluster.name | urlencode() }}/services diff --git a/roles/operations/stop_cluster/tasks/main.yml b/roles/operations/stop_cluster/tasks/main.yml index 9f591529..4695174f 100644 --- a/roles/operations/stop_cluster/tasks/main.yml +++ b/roles/operations/stop_cluster/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Check the cluster exists cloudera.cluster.cm_api: endpoint: /clusters/{{ cluster.name | urlencode() }} @@ -45,7 +44,7 @@ and services.json | json_query(stopped_query) | length == services.json | json_query(all_query) | length vars: - stopped_query: 'items[?(serviceState==`STOPPED` || serviceState==`NA`)]' - all_query: 'items[*]' + stopped_query: "items[?(serviceState==`STOPPED` || serviceState==`NA`)]" + all_query: "items[*]" retries: "{{ teardown_stop_cluster_poll_max_retries | default(30) }}" delay: "{{ teardown_stop_cluster_poll_duration | default(20) }}" diff --git a/roles/prereqs/jdk/defaults/main.yml b/roles/prereqs/jdk/defaults/main.yml index d443f3f5..2994dfcd 100644 --- a/roles/prereqs/jdk/defaults/main.yml +++ b/roles/prereqs/jdk/defaults/main.yml @@ -22,4 +22,4 @@ jdk_java_security_paths: - /etc/java-8-openjdk/security - /etc/java-11-openjdk/security - /usr/lib64/jvm/java-1.8.0-openjdk-1.8.0/jre/lib/security -jdk_java_security_safe_replace: True +jdk_java_security_safe_replace: true diff --git a/roles/prereqs/jdk/tasks/main.yml b/roles/prereqs/jdk/tasks/main.yml index a5bfe69f..dc32ead5 100644 --- a/roles/prereqs/jdk/tasks/main.yml +++ b/roles/prereqs/jdk/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Include variables include_vars: file: "{{ ansible_os_family }}.yml" @@ -27,18 +26,18 @@ ansible.builtin.package: lock_timeout: "{{ (ansible_os_family == 'RedHat') | ternary(60, omit) }}" name: - - "{{ jdk_package }}" + - "{{ jdk_package }}" state: present - update_cache: yes + update_cache: true - name: Add missing symlinks (if installed from Cloudera repo) block: - name: Find Java home directory find: paths: /usr/java - patterns: 'jdk*-cloudera' + patterns: "jdk*-cloudera" file_type: directory - recurse: no + recurse: false register: jdk_home - name: Create alternatives symlink for java alternatives: @@ -76,7 +75,7 @@ find: paths: "{{ jdk_java_security_paths }}" pattern: "java.security" - follow: yes + follow: true register: java_security - fail: @@ -88,7 +87,7 @@ - name: Enable Unlimited Strength Policy lineinfile: path: "{{ item.path }}" - regexp: '#?crypto.policy=' + regexp: "#?crypto.policy=" line: crypto.policy=unlimited with_items: "{{ java_security.files }}" when: installed_jdk_version is not match("11.*") @@ -96,7 +95,7 @@ - name: Apply workaround for Kerberos issues introduced in OpenJDK 1.8u242 and 11.0.6 (JDK-8215032) lineinfile: path: "{{ item.path }}" - regexp: '^sun.security.krb5.disableReferrals=' + regexp: "^sun.security.krb5.disableReferrals=" line: sun.security.krb5.disableReferrals=true with_items: "{{ java_security.files }}" when: > diff --git a/roles/prereqs/kerberos/tasks/main.yml b/roles/prereqs/kerberos/tasks/main.yml index 75ebc26d..e90e1d0d 100644 --- a/roles/prereqs/kerberos/tasks/main.yml +++ b/roles/prereqs/kerberos/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Include variables include_vars: file: "{{ ansible_os_family }}.yml" diff --git a/roles/prereqs/license/defaults/main.yml b/roles/prereqs/license/defaults/main.yml index 7f2e2fe0..0b21ad94 100644 --- a/roles/prereqs/license/defaults/main.yml +++ b/roles/prereqs/license/defaults/main.yml @@ -1,3 +1,4 @@ +--- # Copyright 2023 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,5 +14,5 @@ # limitations under the License. # Path to the license file on the Ansible controller -cloudera_manager_license_file: '' +cloudera_manager_license_file: "" license_local_tmp_path: /tmp/license.txt diff --git a/roles/prereqs/license/tasks/main.yml b/roles/prereqs/license/tasks/main.yml index 1f63999a..3963ae86 100644 --- a/roles/prereqs/license/tasks/main.yml +++ b/roles/prereqs/license/tasks/main.yml @@ -17,7 +17,7 @@ copy: src: "{{ cloudera_manager_license_file }}" dest: "{{ license_local_tmp_path }}" - mode: 0600 + mode: "0600" when: - cloudera_manager_license_file - "'cloudera_manager' in groups" diff --git a/roles/prereqs/local_accounts_common/defaults/main.yml b/roles/prereqs/local_accounts_common/defaults/main.yml index 8b4bb4cb..54c4abf4 100644 --- a/roles/prereqs/local_accounts_common/defaults/main.yml +++ b/roles/prereqs/local_accounts_common/defaults/main.yml @@ -15,7 +15,6 @@ --- skip_user_group_init: false local_accounts: - - user: accumulo home: /var/lib/accumulo comment: Accumulo @@ -28,15 +27,15 @@ local_accounts: - user: cloudera-scm home: /var/lib/cloudera-scm-server comment: Cloudera Manager - mode: '770' - keystore_acl: True - key_acl: True - key_password_acl: True + mode: "770" + keystore_acl: true + key_acl: true + key_password_acl: true - user: cruisecontrol home: /var/lib/cruise_control comment: Cruise Control - keystore_acl: True + keystore_acl: true - user: druid home: /var/lib/druid @@ -46,24 +45,24 @@ local_accounts: - user: flink home: /var/lib/flink comment: Flink - keystore_acl: True + keystore_acl: true - user: ssb home: /var/lib/ssb comment: SQL Stream Builder - keystore_acl: True - key_acl: True - key_password_acl: True + keystore_acl: true + key_acl: true + key_password_acl: true - user: flume home: /var/lib/flume-ng comment: Flume - keystore_acl: True + keystore_acl: true - user: hbase home: /var/lib/hbase comment: HBase - keystore_acl: True + keystore_acl: true - user: hdfs home: /var/lib/hadoop-hdfs @@ -73,59 +72,59 @@ local_accounts: - user: hive home: /var/lib/hive comment: Hive - keystore_acl: True + keystore_acl: true - user: httpfs home: /var/lib/hadoop-httpfs comment: Hadoop HTTPFS - keystore_acl: True + keystore_acl: true - user: hue home: /usr/lib/hue comment: Hue - key_acl: True - key_password_acl: True + key_acl: true + key_password_acl: true - user: impala home: /var/lib/impala comment: Impala extra_groups: [hive] - key_acl: True - key_password_acl: True + key_acl: true + key_password_acl: true - user: kafka home: /var/lib/kafka comment: Kafka - keystore_acl: True + keystore_acl: true - user: keytrustee home: /var/lib/keytrustee comment: KeyTrustee KMS - keystore_acl: True - key_acl: True - key_password_acl: True + keystore_acl: true + key_acl: true + key_password_acl: true - user: kms home: /var/lib/hadoop-kms comment: Hadoop KMS - keystore_acl: True + keystore_acl: true - user: knox home: /var/lib/knox comment: Knox extra_groups: [hadoop] - keystore_acl: True + keystore_acl: true - user: kudu home: /var/lib/kudu comment: Kudu - key_acl: True - key_password_acl: True + key_acl: true + key_password_acl: true - user: livy home: /var/lib/livy comment: Livy - keystore_acl: True + keystore_acl: true - user: mapred home: /var/lib/hadoop-mapreduce @@ -135,12 +134,12 @@ local_accounts: - user: nifi home: /var/lib/nifi command: NiFi - keystore_acl: True + keystore_acl: true - user: nifiregistry home: /var/lib/nifiregistry command: NiFi Registry - keystore_acl: True + keystore_acl: true - user: nifi home: /var/lib/nifi @@ -153,7 +152,7 @@ local_accounts: - user: oozie home: /var/lib/oozie comment: Oozie User - keystore_acl: True + keystore_acl: true - user: phoenix home: /var/lib/phoenix @@ -177,7 +176,7 @@ local_accounts: - user: schemaregistry home: /var/lib/schemaregistry comment: Schema Registry - keystore_acl: True + keystore_acl: true - user: sentry home: /var/lib/sentry @@ -186,12 +185,12 @@ local_accounts: - user: solr home: /var/lib/solr comment: Solr - keystore_acl: True + keystore_acl: true - user: spark home: /var/lib/spark comment: Spark - keystore_acl: True + keystore_acl: true - user: spark2 home: /var/lib/spark2 @@ -209,13 +208,13 @@ local_accounts: - user: streamsmsgmgr home: /var/lib/streams_messaging_manager comment: Streams Messaging Manager - keystore_acl: True - key_acl: True + keystore_acl: true + key_acl: true - user: streamsrepmgr home: /var/lib/streams_replication_manager comment: Streams Replication Manager - keystore_acl: True + keystore_acl: true - user: superset home: /var/lib/superset @@ -229,19 +228,19 @@ local_accounts: - user: zeppelin home: /var/lib/zeppelin comment: Zeppelin - keystore_acl: True + keystore_acl: true - user: zookeeper home: /var/lib/zookeeper comment: ZooKeeper - keystore_acl: True + keystore_acl: true postgres_accounts: - user: postgres home: /var/lib/pgsql uid: 26 comment: PostgreSQL Server - mode: '770' + mode: "770" shell: /bin/bash unencrypted_key_acl: "{{ database_tls }}" @@ -250,7 +249,7 @@ mariadb_accounts: home: /var/lib/mysql uid: 27 comment: MariaDB Server - mode: '770' + mode: "770" shell: /bin/bash unencrypted_key_acl: "{{ database_tls }}" @@ -258,7 +257,7 @@ ecs_accounts: - user: cloudera-scm home: /var/lib/cloudera-scm-server comment: Cloudera Manager - mode: '770' - keystore_acl: True - key_acl: True - key_password_acl: True + mode: "770" + keystore_acl: true + key_acl: true + key_password_acl: true diff --git a/roles/prereqs/mysql_connector/tasks/main.yml b/roles/prereqs/mysql_connector/tasks/main.yml index 5df02527..3388e424 100644 --- a/roles/prereqs/mysql_connector/tasks/main.yml +++ b/roles/prereqs/mysql_connector/tasks/main.yml @@ -13,14 +13,13 @@ # limitations under the License. --- - - name: Download MySQL Connector/J get_url: url: "{{ mysql_connector_url }}" dest: "{{ mysql_connector_download_dir }}/mysql-connector-java.zip" checksum: "{{ mysql_connector_checksum }}" - mode: 0644 - become: no + mode: "0644" + become: false run_once: true delegate_to: localhost @@ -28,7 +27,7 @@ file: path: /usr/share/java state: directory - mode: 0755 + mode: "0755" - name: Install unzip package ansible.builtin.package: @@ -49,8 +48,8 @@ copy: src: "{{ mysql_connector_local_path }}" dest: /usr/share/java/mysql-connector-java.jar - remote_src: yes - mode: 0644 + remote_src: true + mode: "0644" ignore_errors: "{{ ansible_check_mode }}" # MySql on rhel8 fix @@ -75,7 +74,7 @@ dest: /usr/include/mysql/my_config.h ignore_errors: "{{ ansible_check_mode }}" -## TODO Fix for RHEL8 + ## TODO Fix for RHEL8 - name: Install Mysql packages for python - PyMySQL shell: /usr/local/bin/pip install PyMySQL --force-reinstall --ignore-installed ignore_errors: true diff --git a/roles/prereqs/oracle_connector/tasks/main.yml b/roles/prereqs/oracle_connector/tasks/main.yml index e455a107..7dc79586 100644 --- a/roles/prereqs/oracle_connector/tasks/main.yml +++ b/roles/prereqs/oracle_connector/tasks/main.yml @@ -13,10 +13,8 @@ # limitations under the License. --- - - name: Setup the Oracle JDBC Driver block: - - name: Download Oracle Connector maven_artifact: group_id: "{{ oracle_connector_group_id }}" @@ -24,7 +22,7 @@ version: "{{ oracle_connector_version }}" dest: "{{ local_temp_dir }}/{{ oracle_connector_artifact_id }}-connector-java-{{ oracle_connector_version }}.jar" repository_url: "{{ oracle_connector_maven_url }}" - become: no + become: false run_once: true connection: local delegate_to: localhost @@ -33,13 +31,13 @@ file: path: /usr/share/java state: directory - mode: 0755 + mode: "0755" - name: Copy Oracle Connector jar file to correct location copy: src: "{{ local_temp_dir }}/{{ oracle_connector_artifact_id }}-connector-java-{{ oracle_connector_version }}.jar" dest: /usr/share/java/oracle-connector-java.jar - mode: 0644 + mode: "0644" when: - not (skip_oracle_jdbc_driver_distribution | default(False)) @@ -50,7 +48,7 @@ file: path: /usr/share/oracle/instantclient/lib state: directory - mode: 0755 + mode: "0755" when: - oracle_instantclient_basic_zip is defined - oracle_instantclient_sdk_zip is defined @@ -68,14 +66,14 @@ unarchive: src: "{{ oracle_instantclient_basic_zip }}" dest: /usr/share/oracle/instantclient/lib - extra_opts: [ "-j" ] + extra_opts: ["-j"] when: oracle_instantclient_basic_zip is defined - name: Unarchive sdk instantclient unarchive: src: "{{ oracle_instantclient_sdk_zip }}" dest: /usr/share/oracle/instantclient/lib - extra_opts: [ "-j" ] + extra_opts: ["-j"] when: oracle_instantclient_sdk_zip is defined - name: Install the libaio package diff --git a/roles/prereqs/os/defaults/main.yml b/roles/prereqs/os/defaults/main.yml index c511c6ed..e1820128 100644 --- a/roles/prereqs/os/defaults/main.yml +++ b/roles/prereqs/os/defaults/main.yml @@ -15,11 +15,11 @@ --- kernel_flags: - - { key: vm.swappiness, value: '1' } - - { key: vm.overcommit_memory, value: '1' } - - { key: net.ipv6.conf.all.disable_ipv6, value: '1'} - - { key: net.ipv6.conf.default.disable_ipv6, value: '1' } - - { key: net.ipv6.conf.lo.disable_ipv6, value: '1' } + - { key: vm.swappiness, value: "1" } + - { key: vm.overcommit_memory, value: "1" } + - { key: net.ipv6.conf.all.disable_ipv6, value: "1" } + - { key: net.ipv6.conf.default.disable_ipv6, value: "1" } + - { key: net.ipv6.conf.lo.disable_ipv6, value: "1" } unnecessary_services: - bluetooth @@ -28,6 +28,6 @@ unnecessary_services: - ip6tables - postfix - tuned - - firewalld # Added for ECS deployments on RedHat + - firewalld # Added for ECS deployments on RedHat selinux_state: permissive diff --git a/roles/prereqs/os/handlers/main.yml b/roles/prereqs/os/handlers/main.yml index cf746d74..dcdc8ba4 100644 --- a/roles/prereqs/os/handlers/main.yml +++ b/roles/prereqs/os/handlers/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: restart rngd service: name: "{{ rngd_service }}" diff --git a/roles/prereqs/os/tasks/main-Debian.yml b/roles/prereqs/os/tasks/main-Debian.yml index a68c669f..9e6c6d65 100644 --- a/roles/prereqs/os/tasks/main-Debian.yml +++ b/roles/prereqs/os/tasks/main-Debian.yml @@ -15,7 +15,7 @@ --- - name: Refresh Package cache ansible.builtin.apt: - update_cache: yes + update_cache: true - name: Ensure pip is upgraded ansible.builtin.package: @@ -43,4 +43,4 @@ name: fs.protected_regular value: 0 state: present - reload: yes + reload: true diff --git a/roles/prereqs/os/tasks/main-RedHat.yml b/roles/prereqs/os/tasks/main-RedHat.yml index 1f570e26..81126420 100644 --- a/roles/prereqs/os/tasks/main-RedHat.yml +++ b/roles/prereqs/os/tasks/main-RedHat.yml @@ -27,7 +27,7 @@ ansible.builtin.package: lock_timeout: 180 name: python3 - update_cache: yes + update_cache: true state: present - name: Ensure pip3 is upgraded @@ -49,7 +49,7 @@ ansible.builtin.package: lock_timeout: 180 name: python2 - update_cache: yes + update_cache: true state: present - name: Ensure Python symlink available for Cloudera Manager and Ranger @@ -62,7 +62,7 @@ selinux: policy: targeted state: "{{ selinux_state }}" - ignore_errors: yes + ignore_errors: true - name: Disable Transparent Huge Pages until reboot shell: echo never > /sys/kernel/mm/transparent_hugepage/{{ item }} @@ -72,24 +72,24 @@ - name: Disable Transparent Huge Pages permanently lineinfile: - backup: yes + backup: true path: /etc/rc.d/rc.local line: echo never > /sys/kernel/mm/transparent_hugepage/{{ item }} - mode: 0755 + mode: "0755" with_items: - enabled - defrag - name: Disable Transparent Huge Pages in GRUB config lineinfile: - backup: yes + backup: true state: present path: /etc/default/grub - backrefs: yes + backrefs: true regexp: '^(GRUB_CMDLINE_LINUX=(?!.*hugepage)\"[^\"]+)(\".*)' - line: '\1 transparent_hugepage=never\2' - ignore_errors: yes + line: "\\1 transparent_hugepage=never\\2" + ignore_errors: true - name: Rebuild GRUB shell: grub2-mkconfig -o /boot/grub2/grub.cfg - ignore_errors: yes + ignore_errors: true diff --git a/roles/prereqs/os/tasks/main.yml b/roles/prereqs/os/tasks/main.yml index 378b2cf9..e83563b1 100644 --- a/roles/prereqs/os/tasks/main.yml +++ b/roles/prereqs/os/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Include variables include_vars: file: "{{ ansible_os_family }}.yml" @@ -28,8 +27,8 @@ name: "{{ flag.key }}" value: "{{ flag.value }}" state: present - sysctl_set: yes - reload: yes + sysctl_set: true + reload: true loop: "{{ kernel_flags }}" loop_control: loop_var: flag @@ -43,8 +42,8 @@ ansible.builtin.service: name: "{{ item }}" state: stopped - enabled: no - ignore_errors: yes # fails sometimes with systemd on centos7 where ip6tables are not present + enabled: false + ignore_errors: true # fails sometimes with systemd on centos7 where ip6tables are not present when: "item + '.service' in ansible_facts.services" with_items: "{{ unnecessary_services }}" @@ -74,7 +73,7 @@ service: name: "{{ ntp_service }}" state: started - enabled: yes + enabled: true - name: Install nscd service ansible.builtin.package: @@ -86,13 +85,13 @@ service: name: "{{ nscd_service }}" state: started - enabled: yes + enabled: true - name: Disable nscd caches for services 'passwd', 'group', 'netgroup' replace: path: /etc/nscd.conf - regexp: '^(.*enable-cache.*(passwd|group|netgroup).*)yes$' - replace: '\1no' + regexp: "^(.*enable-cache.*(passwd|group|netgroup).*)yes$" + replace: "\\1no" notify: - restart nscd diff --git a/roles/prereqs/os/tasks/rngd.yml b/roles/prereqs/os/tasks/rngd.yml index 8888312d..633bcd4e 100644 --- a/roles/prereqs/os/tasks/rngd.yml +++ b/roles/prereqs/os/tasks/rngd.yml @@ -13,17 +13,16 @@ # limitations under the License. --- - - name: Install rngd package: name: "{{ rngd_package }}" state: present - update_cache: yes + update_cache: true - name: Enable rngd service: name: "{{ rngd_service }}" - enabled: yes + enabled: true - name: Configure rngd to use /dev/urandom (RHEL/CentOS 7) template: @@ -31,7 +30,7 @@ dest: /etc/systemd/system/rngd.service owner: root group: root - mode: 0644 + mode: "0644" notify: - restart rngd when: ansible_os_family == 'RedHat' and ansible_distribution_major_version|int >= 7 diff --git a/roles/prereqs/postgresql_connector/tasks/main.yml b/roles/prereqs/postgresql_connector/tasks/main.yml index 3fbf6a18..009a628b 100644 --- a/roles/prereqs/postgresql_connector/tasks/main.yml +++ b/roles/prereqs/postgresql_connector/tasks/main.yml @@ -13,14 +13,13 @@ # limitations under the License. --- - - name: Download PostgreSQL Connector get_url: url: "{{ postgresql_connector_url }}" dest: "{{ local_temp_dir }}/postgresql-connector-java.jar" checksum: "{{ postgresql_connector_checksum }}" - mode: 0644 - become: no + mode: "0644" + become: false run_once: true delegate_to: localhost @@ -28,22 +27,22 @@ file: path: /usr/share/java state: directory - mode: 0755 + mode: "0755" - name: Copy PostgreSQL Connector jar file to correct location copy: src: "{{ local_temp_dir }}/postgresql-connector-java.jar" dest: /usr/share/java/postgresql-connector-java.jar - mode: 0644 + mode: "0644" ignore_errors: "{{ ansible_check_mode }}" # SSB will need the python3-psycopg2 connector - name: Create python3-psycopg2 directory file: - path: "/usr/share/python3" - state: directory - mode: '777' + path: "/usr/share/python3" + state: directory + mode: "777" when: install_py3_psycopg2 == true - name: Install python3-psycopg2 diff --git a/roles/prereqs/pvc_ecs/tasks/main.yml b/roles/prereqs/pvc_ecs/tasks/main.yml index a9e9e13e..527a8eed 100644 --- a/roles/prereqs/pvc_ecs/tasks/main.yml +++ b/roles/prereqs/pvc_ecs/tasks/main.yml @@ -1,5 +1,4 @@ --- - # Copyright 2023 Cloudera, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -39,7 +38,7 @@ ansible.builtin.package: lock_timeout: 180 name: "{{ __iptables_item }}" - update_cache: yes + update_cache: true state: present loop: - iptables @@ -54,7 +53,7 @@ - name: Flush iptables ansible.builtin.iptables: - flush: yes + flush: true table: "{{ __iptables_flush_item }}" loop: - filter @@ -65,14 +64,14 @@ loop_control: loop_var: __iptables_flush_item - ## see https://docs.rke2.io/known_issues +## see https://docs.rke2.io/known_issues - name: Set NetworkManager to ignore any ECS calico & flannel interfaces ansible.builtin.copy: src: networkmanager.conf dest: /etc/NetworkManager/conf.d/rke2-canal.config owner: root group: root - mode: 0644 + mode: "0644" when: - ansible_distribution_major_version|int >= 7 - ansible_facts.services["NetworkManager.service"]['status'] != "not-found" diff --git a/roles/prereqs/user_accounts/tasks/main.yml b/roles/prereqs/user_accounts/tasks/main.yml index 97724c8b..d629ff57 100644 --- a/roles/prereqs/user_accounts/tasks/main.yml +++ b/roles/prereqs/user_accounts/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - block: - name: Create hadoop group group: @@ -45,7 +44,7 @@ groups: "{{ account.extra_groups | default([]) }}" uid: "{{ account.uid | default(omit) }}" shell: "{{ account.shell | default('/sbin/nologin') }}" - append: yes + append: true loop: "{{ local_accounts }}" loop_control: loop_var: account diff --git a/roles/prereqs/user_accounts_ecs/defaults/main.yml b/roles/prereqs/user_accounts_ecs/defaults/main.yml index 9a629bbe..59d83eed 100644 --- a/roles/prereqs/user_accounts_ecs/defaults/main.yml +++ b/roles/prereqs/user_accounts_ecs/defaults/main.yml @@ -15,11 +15,10 @@ --- skip_user_group_init: false local_accounts: - - user: cloudera-scm home: /var/lib/cloudera-scm-server comment: Cloudera Manager - mode: '770' - keystore_acl: True - key_acl: True - key_password_acl: True + mode: "770" + keystore_acl: true + key_acl: true + key_password_acl: true diff --git a/roles/prereqs/user_accounts_ecs/tasks/main.yml b/roles/prereqs/user_accounts_ecs/tasks/main.yml index ffda52a8..6486274a 100644 --- a/roles/prereqs/user_accounts_ecs/tasks/main.yml +++ b/roles/prereqs/user_accounts_ecs/tasks/main.yml @@ -13,9 +13,7 @@ # limitations under the License. --- - - block: - - name: Create hadoop group group: name: hadoop @@ -38,7 +36,7 @@ groups: "{{ account.extra_groups | default([]) }}" uid: "{{ account.uid | default(omit) }}" shell: "{{ account.shell | default('/sbin/nologin') }}" - append: yes + append: true loop: "{{ local_accounts }}" loop_control: loop_var: account diff --git a/roles/security/tls_generate_csr/molecule/default/molecule.yml b/roles/security/tls_generate_csr/molecule/default/molecule.yml index 6ae1073a..bb330ab9 100644 --- a/roles/security/tls_generate_csr/molecule/default/molecule.yml +++ b/roles/security/tls_generate_csr/molecule/default/molecule.yml @@ -25,7 +25,7 @@ platforms: provisioner: name: ansible options: - vvv: False + vvv: false scenario: converge_sequence: - converge diff --git a/roles/security/tls_generate_csr/molecule/default/verify.yml b/roles/security/tls_generate_csr/molecule/default/verify.yml index 8fa77931..d98d9da8 100644 --- a/roles/security/tls_generate_csr/molecule/default/verify.yml +++ b/roles/security/tls_generate_csr/molecule/default/verify.yml @@ -18,6 +18,6 @@ - name: Verify hosts: all tasks: - - name: Example assertion - assert: - that: true + - name: Example assertion + assert: + that: true diff --git a/roles/security/tls_generate_csr/tasks/acls-ecs.yml b/roles/security/tls_generate_csr/tasks/acls-ecs.yml index 72d7b47f..c16c68eb 100644 --- a/roles/security/tls_generate_csr/tasks/acls-ecs.yml +++ b/roles/security/tls_generate_csr/tasks/acls-ecs.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Install acls package ansible.builtin.package: lock_timeout: "{{ (ansible_os_family == 'RedHat') | ternary(60, omit) }}" @@ -24,7 +23,7 @@ file: state: file path: "{{ tls_keystore_path }}" - mode: 0640 + mode: "0640" owner: root group: hadoop @@ -45,7 +44,7 @@ file: state: file path: "{{ tls_keystore_path_generic }}" - mode: 0640 + mode: "0640" owner: root group: hadoop @@ -66,7 +65,7 @@ file: state: file path: "{{ item }}" - mode: 0440 + mode: "0440" owner: root group: root loop: @@ -103,7 +102,7 @@ file: state: file path: "{{ tls_key_password_file }}" - mode: 0440 + mode: "0440" owner: root group: root @@ -124,7 +123,7 @@ file: state: file path: "{{ item }}" - mode: 0440 + mode: "0440" owner: root group: root loop: diff --git a/roles/security/tls_generate_csr/tasks/acls.yml b/roles/security/tls_generate_csr/tasks/acls.yml index d853471c..9d7247f9 100644 --- a/roles/security/tls_generate_csr/tasks/acls.yml +++ b/roles/security/tls_generate_csr/tasks/acls.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Install acls package ansible.builtin.package: lock_timeout: "{{ (ansible_os_family == 'RedHat') | ternary(60, omit) }}" @@ -24,7 +23,7 @@ file: state: file path: "{{ tls_keystore_path }}" - mode: 0640 + mode: "0640" owner: root group: hadoop @@ -45,7 +44,7 @@ file: state: file path: "{{ tls_keystore_path_generic }}" - mode: 0640 + mode: "0640" owner: root group: hadoop @@ -66,7 +65,7 @@ file: state: file path: "{{ item }}" - mode: 0440 + mode: "0440" owner: root group: root loop: @@ -103,7 +102,7 @@ file: state: file path: "{{ tls_key_password_file }}" - mode: 0440 + mode: "0440" owner: root group: root @@ -124,7 +123,7 @@ file: state: file path: "{{ item }}" - mode: 0440 + mode: "0440" owner: root group: root loop: diff --git a/roles/security/tls_generate_csr/tasks/main.yml b/roles/security/tls_generate_csr/tasks/main.yml index ab20f4b5..6e5785ae 100644 --- a/roles/security/tls_generate_csr/tasks/main.yml +++ b/roles/security/tls_generate_csr/tasks/main.yml @@ -13,12 +13,11 @@ # limitations under the License. --- - - name: Prepare directories for TLS file: state: directory path: "{{ dir }}" - mode: 0755 + mode: "0755" owner: root loop: - "{{ base_dir_security }}" @@ -57,7 +56,7 @@ -keystore {{ tls_keystore_path }} -alias {{ keystore_alias | default(inventory_hostname) }} -storepass {{ tls_keystore_password }} | grep PrivateKeyEntry - changed_when: False + changed_when: false - name: Export temporary PKCS12 keystore shell: @@ -128,7 +127,7 @@ src: csr.cnf.j2 dest: "{{ tls_csr_config_path }}" owner: root - mode: 0644 + mode: "0644" - name: Generate CSR shell: @@ -146,4 +145,4 @@ fetch: src: "{{ tls_csr_path }}" dest: "{{ local_csrs_dir }}/" - flat: yes + flat: true diff --git a/roles/security/tls_install_certs/tasks/main.yml b/roles/security/tls_install_certs/tasks/main.yml index 920086eb..680bf1c9 100644 --- a/roles/security/tls_install_certs/tasks/main.yml +++ b/roles/security/tls_install_certs/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Set fact for signed TLS certificates directory ansible.builtin.set_fact: tls_signed_certs_dir: "{{ local_certs_dir }}" @@ -53,8 +52,8 @@ fetch: src: "{{ cert.path }}" dest: "{{ tls_signed_certs_dir }}/{{ cert.alias }}.pem" - flat: yes - run_once: yes + flat: true + run_once: true delegate_to: "{{ cert.remote_host }}" loop: "{{ tls_ca_certs }}" loop_control: @@ -62,7 +61,7 @@ when: cert.remote_host is defined - name: Check if signed cert is available - become: no + become: false delegate_to: localhost stat: path: "{{ tls_signed_certs_dir }}/{{ inventory_hostname }}.pem" @@ -85,7 +84,7 @@ copy: src: "{{ tls_signed_certs_dir }}/{{ inventory_hostname }}.pem" dest: "{{ base_dir_security_pki }}/" - mode: 0644 + mode: "0644" when: not signed_cert_remote.stat.exists - name: Copy CA certs to hosts @@ -97,7 +96,7 @@ else cacert.path }} dest: "{{ base_dir_security_pki }}/{{ cacert.alias }}.pem" - mode: 0644 + mode: "0644" loop: "{{ tls_ca_certs }}" loop_control: loop_var: cacert @@ -118,7 +117,7 @@ src: "{{ tls_cert_path }}" dest: "{{ tls_cert_path_generic }}" state: hard - mode: 0644 + mode: "0644" owner: root group: root @@ -183,23 +182,23 @@ - name: Update OS trust stores block: - - copy: - src: "{{ base_dir_security_pki }}/{{ cacert.alias }}.pem" - dest: /etc/pki/ca-trust/source/anchors/ - mode: 0644 - remote_src: yes - loop: "{{ tls_ca_certs }}" - loop_control: - loop_var: cacert - - shell: - cmd: update-ca-trust extract + - copy: + src: "{{ base_dir_security_pki }}/{{ cacert.alias }}.pem" + dest: /etc/pki/ca-trust/source/anchors/ + mode: "0644" + remote_src: true + loop: "{{ tls_ca_certs }}" + loop_control: + loop_var: cacert + - shell: + cmd: update-ca-trust extract when: ansible_os_family == "RedHat" - name: Find system cacerts file find: paths: "{{ jdk_java_cacerts_paths }}" pattern: "cacerts" - follow: yes + follow: true file_type: any register: java_cacerts when: diff --git a/roles/security/tls_nifi/tasks/main.yml b/roles/security/tls_nifi/tasks/main.yml index 4f701dd7..ddfc4fe8 100644 --- a/roles/security/tls_nifi/tasks/main.yml +++ b/roles/security/tls_nifi/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Ensure the NiFi home directory exists file: path: "{{ nifi_dir_path }}" diff --git a/roles/security/tls_signing/tasks/csr_signing_local.yml b/roles/security/tls_signing/tasks/csr_signing_local.yml index d66f4799..02acb4bb 100644 --- a/roles/security/tls_signing/tasks/csr_signing_local.yml +++ b/roles/security/tls_signing/tasks/csr_signing_local.yml @@ -13,12 +13,11 @@ # limitations under the License. --- - - name: Copy CSRs to CA server ansible.builtin.copy: src: "{{ local_csrs_dir }}/{{ inventory_hostname }}.csr" dest: "{{ ca_server_intermediate_path_csr }}/" - mode: 0644 + mode: "0644" delegate_to: "{{ groups.ca_server | first }}" connection: ssh @@ -46,6 +45,6 @@ ansible.builtin.fetch: src: "{{ ca_server_intermediate_path_certs }}/{{ inventory_hostname }}.pem" dest: "{{ local_certs_dir }}/" - flat: yes + flat: true delegate_to: "{{ groups.ca_server | first }}" connection: ssh diff --git a/roles/security/tls_signing/tasks/main.yml b/roles/security/tls_signing/tasks/main.yml index dba63eac..4cb2e9f2 100644 --- a/roles/security/tls_signing/tasks/main.yml +++ b/roles/security/tls_signing/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Sign CSRs with locally installed CA include_tasks: csr_signing_local.yml when: "'ca_server' in groups" diff --git a/roles/security/tls_signing/tasks/signing_freeipa.yml b/roles/security/tls_signing/tasks/signing_freeipa.yml index 30322805..bca23231 100644 --- a/roles/security/tls_signing/tasks/signing_freeipa.yml +++ b/roles/security/tls_signing/tasks/signing_freeipa.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - # ZOOKEEPER-3832 # - name: Sign the private key # shell: @@ -33,14 +32,14 @@ - name: Sign the private key shell: cmd: | - kinit -kt /etc/krb5.keytab "host/{{ inventory_hostname }}" - trap kdestroy EXIT - ipa cert-request \ - "{{ base_dir_security_pki }}/{{ inventory_hostname }}.csr" \ - --principal "host/{{ inventory_hostname }}" \ - --certificate-out "{{ base_dir_security_pki }}/{{ inventory_hostname }}.pem" || ( - rm "{{ base_dir_security_pki }}/{{ inventory_hostname }}.pem" - exit 1 - ) - chmod 644 "{{ base_dir_security_pki }}/{{ inventory_hostname }}.pem" + kinit -kt /etc/krb5.keytab "host/{{ inventory_hostname }}" + trap kdestroy EXIT + ipa cert-request \ + "{{ base_dir_security_pki }}/{{ inventory_hostname }}.csr" \ + --principal "host/{{ inventory_hostname }}" \ + --certificate-out "{{ base_dir_security_pki }}/{{ inventory_hostname }}.pem" || ( + rm "{{ base_dir_security_pki }}/{{ inventory_hostname }}.pem" + exit 1 + ) + chmod 644 "{{ base_dir_security_pki }}/{{ inventory_hostname }}.pem" creates: "{{ base_dir_security_pki }}/{{ inventory_hostname }}.pem" diff --git a/roles/teardown/meta/main.yml b/roles/teardown/meta/main.yml index 51a44df1..0b9e5a4d 100644 --- a/roles/teardown/meta/main.yml +++ b/roles/teardown/meta/main.yml @@ -1,3 +1,4 @@ +--- # Copyright 2023 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -11,8 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - - dependencies: - role: cloudera.cluster.cloudera_manager.common - role: cloudera.cluster.deployment.definition diff --git a/roles/teardown/tasks/main.yml b/roles/teardown/tasks/main.yml index 60437a43..997c50b1 100644 --- a/roles/teardown/tasks/main.yml +++ b/roles/teardown/tasks/main.yml @@ -13,11 +13,10 @@ # limitations under the License. --- - - name: Include config cluster defaults for deployment ansible.builtin.include_role: name: cloudera.cluster.config.cluster.common - public: yes + public: true - name: Ensure properly configured assert: @@ -83,7 +82,7 @@ stop_cluster_before_delete: true cluster: "{{ default_cluster_compute | combine(_cluster) }}" run_once: true - ignore_errors: '{{ ansible_check_mode }}' + ignore_errors: "{{ ansible_check_mode }}" loop: "{{ definition.clusters }}" loop_control: label: "{{ cluster.name }}" @@ -101,7 +100,7 @@ stop_cluster_before_delete: true cluster: "{{ default_cluster_base | combine(_cluster) }}" run_once: true - ignore_errors: '{{ ansible_check_mode }}' + ignore_errors: "{{ ansible_check_mode }}" loop: "{{ definition.clusters }}" loop_control: label: "{{ cluster.name }}" @@ -119,7 +118,7 @@ stop_cluster_before_delete: true cluster: "{{ default_cluster_kts | combine(_cluster) }}" run_once: true - ignore_errors: '{{ ansible_check_mode }}' + ignore_errors: "{{ ansible_check_mode }}" loop: "{{ definition.clusters }}" loop_control: label: "{{ cluster.name }}" @@ -140,7 +139,7 @@ when: - teardown_everything | default(false) or teardown_cms | default(false) - not (teardown_skip_cluster_deletion | default(false)) - ignore_errors: '{{ ansible_check_mode }}' + ignore_errors: "{{ ansible_check_mode }}" - name: Teardown Cloudera agent include_tasks: teardown_cloudera_agent.yml @@ -243,7 +242,7 @@ service: name: haproxy state: stopped - enabled: no + enabled: false when: - "'haproxy' in group_names" - teardown_everything | default(false) diff --git a/roles/teardown/tasks/teardown_cdsw.yml b/roles/teardown/tasks/teardown_cdsw.yml index 9ccec7e0..e0b939a8 100644 --- a/roles/teardown/tasks/teardown_cdsw.yml +++ b/roles/teardown/tasks/teardown_cdsw.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Generate merged configs (base) include_role: name: cloudera.cluster.config.cluster.base diff --git a/roles/teardown/tasks/teardown_cloudera_agent.yml b/roles/teardown/tasks/teardown_cloudera_agent.yml index 10cfda01..e54e5f66 100644 --- a/roles/teardown/tasks/teardown_cloudera_agent.yml +++ b/roles/teardown/tasks/teardown_cloudera_agent.yml @@ -1,3 +1,4 @@ +--- # Copyright 2023 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/roles/teardown/tasks/teardown_cloudera_server.yml b/roles/teardown/tasks/teardown_cloudera_server.yml index 53bc9ad8..7d1b2b42 100644 --- a/roles/teardown/tasks/teardown_cloudera_server.yml +++ b/roles/teardown/tasks/teardown_cloudera_server.yml @@ -1,3 +1,4 @@ +--- # Copyright 2023 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,7 +17,7 @@ service: name: cloudera-scm-server state: stopped - enabled: no + enabled: false ignore_errors: true - name: Remove Cloudera manager package diff --git a/roles/teardown/tasks/teardown_cluster.yml b/roles/teardown/tasks/teardown_cluster.yml index 3566261b..2085a469 100644 --- a/roles/teardown/tasks/teardown_cluster.yml +++ b/roles/teardown/tasks/teardown_cluster.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Generate merged configs (base, compute) include_role: name: cloudera.cluster.config.cluster.base @@ -66,5 +65,5 @@ loop_var: database run_once: true when: - - cluster.services is defined - - database.key in cluster.services + - cluster.services is defined + - database.key in cluster.services diff --git a/roles/teardown/tasks/teardown_cms.yml b/roles/teardown/tasks/teardown_cms.yml index b2b83e9a..92bb1c63 100644 --- a/roles/teardown/tasks/teardown_cms.yml +++ b/roles/teardown/tasks/teardown_cms.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Generate merged configs include_role: name: cloudera.cluster.config.services.mgmt diff --git a/roles/teardown/tasks/teardown_cms_role_directories.yml b/roles/teardown/tasks/teardown_cms_role_directories.yml index 857cd39b..2906a0e0 100644 --- a/roles/teardown/tasks/teardown_cms_role_directories.yml +++ b/roles/teardown/tasks/teardown_cms_role_directories.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Remove service role directories include_tasks: teardown_cms_role_directory.yml loop: "{{ service.directories_confs }}" diff --git a/roles/teardown/tasks/teardown_cms_role_directory.yml b/roles/teardown/tasks/teardown_cms_role_directory.yml index 8a7caf7f..3ac34252 100644 --- a/roles/teardown/tasks/teardown_cms_role_directory.yml +++ b/roles/teardown/tasks/teardown_cms_role_directory.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Remove service role directory file: path: "{{ directory }}" diff --git a/roles/teardown/tasks/teardown_database.yml b/roles/teardown/tasks/teardown_database.yml index 319ce563..b4d725a2 100644 --- a/roles/teardown/tasks/teardown_database.yml +++ b/roles/teardown/tasks/teardown_database.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - set_fact: has_oracle_client: >- {{ @@ -42,7 +41,7 @@ name: "{{ database.value.name }}" state: absent delegate_to: "{{ database.value.host }}" - become: yes + become: true when: - database.value.type in ['mariadb', 'mysql'] - database.value.host in groups.db_server @@ -52,7 +51,7 @@ name: "{{ database.value.name }}" state: absent delegate_to: "{{ database.value.host }}" - become: yes + become: true become_user: postgres when: - database.value.type == 'postgresql' @@ -63,7 +62,7 @@ name: "{{ database.value.user }}" state: absent delegate_to: "{{ database.value.host }}" - become: yes + become: true when: - database.value.type in ['mariadb', 'mysql'] - database.value.host in groups.db_server @@ -73,7 +72,7 @@ name: "{{ database.value.user }}" state: absent delegate_to: "{{ database.value.host }}" - become: yes + become: true become_user: postgres when: - database.value.type == 'postgresql' @@ -89,7 +88,7 @@ {{ lookup('file', 'oracle_drop.sql') }} EOF sqlplus "{{ database.value.user }}/{{ database.value.password }}@{{ database.value.host }}:{{ database.value.port | default(1521) }}/{{ database.value.name }}" @$ORACLE_TMP - delegate_to: "{{ teardown_oracle_client_host }}" - become: yes + delegate_to: "{{ teardown_oracle_client_host }}" + become: true become_user: "{{ teardown_oracle_user }}" when: has_oracle_client diff --git a/roles/teardown/tasks/teardown_ecs.yml b/roles/teardown/tasks/teardown_ecs.yml index 4d9b22b0..df296c53 100644 --- a/roles/teardown/tasks/teardown_ecs.yml +++ b/roles/teardown/tasks/teardown_ecs.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - #- name: Include config cluster defaults for deployment # ansible.builtin.include_role: # name: cloudera.cluster.config.cluster.base @@ -86,10 +85,9 @@ rm -rf /var/log/pods/* ignore_errors: true - - name: Flush and Delete IPTables ansible.builtin.iptables: - flush: yes + flush: true table: "{{ __iptables_flush_item }}" loop: - filter diff --git a/roles/teardown/tasks/teardown_kms.yml b/roles/teardown/tasks/teardown_kms.yml index 4ea4d729..09f72fd1 100644 --- a/roles/teardown/tasks/teardown_kms.yml +++ b/roles/teardown/tasks/teardown_kms.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Remove the KMS directory file: path: /var/lib/kms-keytrustee diff --git a/roles/teardown/tasks/teardown_role_directories.yml b/roles/teardown/tasks/teardown_role_directories.yml index 0312aefe..808afdb0 100644 --- a/roles/teardown/tasks/teardown_role_directories.yml +++ b/roles/teardown/tasks/teardown_role_directories.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Remove cluster role directories include_tasks: teardown_role_directory.yml loop: "{{ role.directories_confs }}" diff --git a/roles/teardown/tasks/teardown_role_directory.yml b/roles/teardown/tasks/teardown_role_directory.yml index fe3f88ef..47bb5c52 100644 --- a/roles/teardown/tasks/teardown_role_directory.yml +++ b/roles/teardown/tasks/teardown_role_directory.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Remove cluster role directory file: path: "{{ directory }}" diff --git a/roles/teardown/tasks/teardown_service_directories.yml b/roles/teardown/tasks/teardown_service_directories.yml index bd1624ea..9c020c15 100644 --- a/roles/teardown/tasks/teardown_service_directories.yml +++ b/roles/teardown/tasks/teardown_service_directories.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Remove all cluster role directories include_tasks: teardown_role_directories.yml loop: "{{ service.roles|dict2items(key_name='name', value_name='directories_confs') }}" diff --git a/roles/teardown/vars/main.yml b/roles/teardown/vars/main.yml index 53071c9b..81d0b43b 100644 --- a/roles/teardown/vars/main.yml +++ b/roles/teardown/vars/main.yml @@ -1,3 +1,4 @@ +--- # Copyright 2023 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -100,7 +101,6 @@ base_cluster_directories: - zk_server_log_dir kts_cluster_directories: {} - cms_directories: ACTIVITYMONITOR: - mgmt_log_dir diff --git a/roles/verify/definition/tasks/main.yml b/roles/verify/definition/tasks/main.yml index d774b8dc..ee53aa8d 100644 --- a/roles/verify/definition/tasks/main.yml +++ b/roles/verify/definition/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - # Inventory specific - block: - set_fact: @@ -104,7 +103,7 @@ that: "{{ krb5_kdc_host is not defined and 'krb5_server' not in groups }}" success_msg: "Kerberos is not configured on any cluster and the KDC host is not set" fail_msg: "The KDC host is configured but no cluster is configured to use Kerberos" - ignore_errors: yes + ignore_errors: true when: not expect_kerberos - name: Ensure that Kerberos is specified when used @@ -201,20 +200,20 @@ - block: - set_fact: kerberos_clusters: >- - {{ - definition - | json_query("clusters[?security.kerberos].name") - }} + {{ + definition + | json_query("clusters[?security.kerberos].name") + }} ranger_clusters: >- - {{ - definition - | json_query('clusters[?services] | [?contains(services, `RANGER`)].name') - }} + {{ + definition + | json_query('clusters[?services] | [?contains(services, `RANGER`)].name') + }} sentry_clusters: >- - {{ - definition - | json_query('clusters[?services] | [?contains(services, `SENTRY`)].name') - }} + {{ + definition + | json_query('clusters[?services] | [?contains(services, `SENTRY`)].name') + }} - name: Ensure that Kerberos is enabled alongside Ranger and Sentry assert: that: "{{ ranger_clusters | union(sentry_clusters) | difference(kerberos_clusters) | length == 0 }}" @@ -226,7 +225,7 @@ that: "{{ kerberos_clusters | difference(ranger_clusters | union(sentry_clusters)) | length == 0 }}" success_msg: "Ranger or Sentry is present on each cluster with Kerberos" fail_msg: "Ranger or Sentry should be present on each cluster with Kerberos" - ignore_errors: yes + ignore_errors: true when: kerberos_clusters | length > 0 ## ZooKeeper diff --git a/roles/verify/inventory/tasks/main.yml b/roles/verify/inventory/tasks/main.yml index d17ad5a3..62d2b468 100644 --- a/roles/verify/inventory/tasks/main.yml +++ b/roles/verify/inventory/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Fail if inventory groups are empty fail: msg: Ensure that all inventory groups are non-empty @@ -36,15 +35,15 @@ - block: - set_fact: cluster_hosts: >- - {{ groups.cluster | default([]) - | union( - (groups.cloudera_manager | default([]) - | union( - groups.ecs_nodes | default([]) - ) + {{ groups.cluster | default([]) + | union( + (groups.cloudera_manager | default([]) + | union( + groups.ecs_nodes | default([]) ) - ) - }} + ) + ) + }} - name: Ensure that all hosts requiring TLS certificates have a FreeIPA client assert: diff --git a/roles/verify/parcels_and_roles/tasks/check_cluster.yml b/roles/verify/parcels_and_roles/tasks/check_cluster.yml index 4a35381b..924bd289 100644 --- a/roles/verify/parcels_and_roles/tasks/check_cluster.yml +++ b/roles/verify/parcels_and_roles/tasks/check_cluster.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Retrieve repository metadata include_role: name: cloudera.cluster.deployment.repometa diff --git a/roles/verify/parcels_and_roles/tasks/check_cluster_config_roles.yml b/roles/verify/parcels_and_roles/tasks/check_cluster_config_roles.yml index 0cb77783..d2200e1a 100644 --- a/roles/verify/parcels_and_roles/tasks/check_cluster_config_roles.yml +++ b/roles/verify/parcels_and_roles/tasks/check_cluster_config_roles.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - block: - set_fact: invalid_roles: >- diff --git a/roles/verify/parcels_and_roles/tasks/check_template.yml b/roles/verify/parcels_and_roles/tasks/check_template.yml index 5847d191..5b328a06 100644 --- a/roles/verify/parcels_and_roles/tasks/check_template.yml +++ b/roles/verify/parcels_and_roles/tasks/check_template.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Host template being checked debug: msg: "{{ host_template.name }}" diff --git a/roles/verify/parcels_and_roles/tasks/check_template_roles.yml b/roles/verify/parcels_and_roles/tasks/check_template_roles.yml index 71b3b2b7..26602f87 100644 --- a/roles/verify/parcels_and_roles/tasks/check_template_roles.yml +++ b/roles/verify/parcels_and_roles/tasks/check_template_roles.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - block: - set_fact: invalid_roles: >- diff --git a/roles/verify/parcels_and_roles/tasks/main.yml b/roles/verify/parcels_and_roles/tasks/main.yml index d9490b66..08d2cf9c 100644 --- a/roles/verify/parcels_and_roles/tasks/main.yml +++ b/roles/verify/parcels_and_roles/tasks/main.yml @@ -13,7 +13,6 @@ # limitations under the License. --- - - name: Ensure cluster services and roles are valid include_tasks: check_cluster.yml loop: "{{ definition.clusters }}" diff --git a/tests/config.yml b/tests/config.yml index 2969e7a8..95865ea2 100644 --- a/tests/config.yml +++ b/tests/config.yml @@ -1,3 +1,4 @@ +--- # Copyright 2024 Cloudera, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,4 +19,4 @@ # - https://github.com/ansible/ansible/blob/devel/test/lib/ansible_test/config/config.yml modules: - python_requires: '>=3.6' + python_requires: ">=3.6" diff --git a/tests/unit/plugins/modules/cluster/example.yml b/tests/unit/plugins/modules/cluster/example.yml index 2f128db5..d213e77c 100644 --- a/tests/unit/plugins/modules/cluster/example.yml +++ b/tests/unit/plugins/modules/cluster/example.yml @@ -1,3 +1,4 @@ +--- # Copyright 2024 Cloudera, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); From b71bb1e6c8f82fc486b3fb1c04621cfacbb12c42 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 18 Jun 2025 17:34:57 -0400 Subject: [PATCH 03/21] Add initial ansible-lint ignore file Signed-off-by: Webster Mudge --- .ansible-lint-ignore | 484 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 484 insertions(+) create mode 100644 .ansible-lint-ignore diff --git a/.ansible-lint-ignore b/.ansible-lint-ignore new file mode 100644 index 00000000..28070644 --- /dev/null +++ b/.ansible-lint-ignore @@ -0,0 +1,484 @@ +# This file contains ignores rule violations for ansible-lint + +roles/assemble_template/defaults/main.yml var-naming[no-role-prefix] +roles/assemble_template/tasks/main.yml risky-file-permissions + +# Scheduled for removal +roles/cloudera_manager/admin_password/check/tasks/main.yml fqcn[action-core] +roles/cloudera_manager/admin_password/check/tasks/main.yml name[missing] +roles/cloudera_manager/agent/meta/main.yml role-name[path] +roles/cloudera_manager/agent/tasks/main.yml fqcn[action-core] +roles/cloudera_manager/agent/tasks/main.yml package-latest +roles/cloudera_manager/agent_config/tasks/main.yml fqcn[action-core] +roles/cloudera_manager/api_client/handlers/main.yml name[casing] +roles/cloudera_manager/api_client/tasks/main.yml fqcn[action-core] +roles/cloudera_manager/api_client/tasks/main.yml name[missing] +roles/cloudera_manager/api_hosts/meta/main.yml role-name[path] +roles/cloudera_manager/api_hosts/tasks/main.yml fqcn[action-core] +roles/cloudera_manager/autotls/defaults/main.yml var-naming[no-role-prefix] +roles/cloudera_manager/autotls/defaults/main.yml var-naming[pattern] +roles/cloudera_manager/autotls/tasks/main.yml fqcn[action-core] +roles/cloudera_manager/autotls/tasks/main.yml ignore-errors +roles/cloudera_manager/autotls/tasks/main.yml jinja[spacing] +roles/cloudera_manager/autotls/tasks/main.yml name[missing] +roles/cloudera_manager/autotls/tasks/patch_old_cm.yml command-instead-of-shell +roles/cloudera_manager/autotls/tasks/patch_old_cm.yml fqcn[action-core] +roles/cloudera_manager/autotls/tasks/patch_old_cm.yml jinja[spacing] +roles/cloudera_manager/autotls/tasks/patch_old_cm.yml no-changed-when +roles/cloudera_manager/common/handlers/main.yml fqcn[action-core] +roles/cloudera_manager/common/handlers/main.yml name[casing] +roles/cloudera_manager/config/defaults/main.yml var-naming[no-role-prefix] +roles/cloudera_manager/config/tasks/main.yml fqcn[action-core] +roles/cloudera_manager/config/tasks/main.yml jinja[spacing] +roles/cloudera_manager/csds/tasks/main.yml fqcn[action-core] +roles/cloudera_manager/database/defaults/main.yml yaml[line-length] +roles/cloudera_manager/database/handlers/main.yml fqcn[action-core] +roles/cloudera_manager/database/handlers/main.yml name[casing] +roles/cloudera_manager/database/meta/main.yml role-name[path] +roles/cloudera_manager/database/tasks/external.yml fqcn[action-core] +roles/cloudera_manager/database/tasks/external.yml fqcn[action] +roles/cloudera_manager/database/tasks/main.yml fqcn[action-core] +roles/cloudera_manager/external_auth/defaults/main.yml var-naming[no-role-prefix] +roles/cloudera_manager/external_auth/tasks/main.yml fqcn[action-core] +roles/cloudera_manager/external_auth/tasks/main.yml key-order[task] +roles/cloudera_manager/external_auth/tasks/main.yml name[missing] +roles/cloudera_manager/external_auth/vars/freeipa.yml var-naming[no-role-prefix] +roles/cloudera_manager/external_auth/vars/main.yml var-naming[no-role-prefix] +roles/cloudera_manager/hosts_config/tasks/main.yml fqcn[action-core] +roles/cloudera_manager/kerberos/tasks/main.yml fqcn[action-core] +roles/cloudera_manager/license/tasks/enterprise.yml command-instead-of-module +roles/cloudera_manager/license/tasks/enterprise.yml jinja[spacing] +roles/cloudera_manager/license/tasks/enterprise.yml no-changed-when +roles/cloudera_manager/license/tasks/main.yml fqcn[action-core] +roles/cloudera_manager/license/tasks/trial.yml ignore-errors +roles/cloudera_manager/preload_parcels/defaults/main.yml var-naming[no-role-prefix] +roles/cloudera_manager/preload_parcels/tasks/main.yml fqcn[action-core] +roles/cloudera_manager/preload_parcels/tasks/main.yml risky-file-permissions +roles/cloudera_manager/repo/defaults/main.yml var-naming[no-role-prefix] +roles/cloudera_manager/repo/tasks/main-Debian.yml fqcn[action-core] +roles/cloudera_manager/repo/tasks/main-Debian.yml jinja[spacing] +roles/cloudera_manager/repo/tasks/main-Debian.yml yaml[line-length] +roles/cloudera_manager/repo/tasks/main-RedHat.yml jinja[spacing] +roles/cloudera_manager/repo/tasks/main-RedHat.yml name[casing] +roles/cloudera_manager/repo/tasks/main-RedHat.yml no-changed-when +roles/cloudera_manager/repo/tasks/main.yml fqcn[action-core] +roles/cloudera_manager/repo/tasks/main.yml jinja[spacing] +roles/cloudera_manager/repo/tasks/main.yml yaml[line-length] +roles/cloudera_manager/repo/vars/Debian.yml jinja[spacing] +roles/cloudera_manager/repo/vars/Debian.yml yaml[line-length] +roles/cloudera_manager/repo/vars/RedHat.yml jinja[spacing] +roles/cloudera_manager/repo/vars/RedHat.yml yaml[line-length] +roles/cloudera_manager/server/tasks/main.yml fqcn[action-core] +roles/cloudera_manager/server/tasks/main.yml package-latest +roles/cloudera_manager/server_tls/tasks/main.yml fqcn[action-core] +roles/cloudera_manager/server_tls/tasks/main.yml name[missing] +roles/cloudera_manager/server_tls/tasks/main.yml yaml[line-length] +roles/cloudera_manager/services_info/defaults/main.yml var-naming[no-role-prefix] +roles/cloudera_manager/services_info/tasks/main.yml fqcn[action-core] +roles/cloudera_manager/services_info/tasks/main.yml jinja[invalid] +roles/cloudera_manager/services_info/tasks/main.yml name[missing] +roles/cloudera_manager/services_info/tasks/main.yml var-naming[no-reserved] +roles/cloudera_manager/services_info/tasks/main.yml yaml[line-length] +roles/cloudera_manager/wait_for_heartbeat/tasks/main.yml fqcn[action-core] +roles/config/cluster/base/tasks/main.yml fqcn[action-core] +roles/config/cluster/base/tasks/main.yml name[missing] +roles/config/cluster/base/vars/main.yml jinja[spacing] +roles/config/cluster/base/vars/main.yml var-naming[no-role-prefix] +roles/config/cluster/base/vars/main.yml yaml[line-length] +roles/config/cluster/common/defaults/main.yml var-naming[no-role-prefix] +roles/config/cluster/ecs/tasks/main.yml fqcn[action-core] +roles/config/cluster/ecs/tasks/main.yml name[missing] +roles/config/cluster/ecs/vars/main.yml var-naming[no-role-prefix] +roles/config/cluster/kts/tasks/main.yml fqcn[action-core] +roles/config/cluster/kts/vars/main.yml var-naming[no-role-prefix] +roles/config/services/hue_ticket_lifetime/tasks/main.yml fqcn[action-core] +roles/config/services/hue_ticket_lifetime/tasks/main.yml ignore-errors +roles/config/services/hue_ticket_lifetime/tasks/main.yml no-changed-when +roles/config/services/hue_ticket_lifetime/tasks/main.yml yaml[line-length] +roles/config/services/kms/tasks/main.yml fqcn[action-core] +roles/config/services/kms/vars/main.yml var-naming[no-role-prefix] +roles/config/services/kms_tls/tasks/main.yml fqcn[action-core] +roles/config/services/kms_tls/tasks/main.yml ignore-errors +roles/config/services/kms_tls/tasks/main.yml jinja[invalid] +roles/config/services/kms_tls/tasks/main.yml jinja[spacing] +roles/config/services/kms_tls/tasks/main.yml yaml[line-length] +roles/config/services/mgmt/tasks/main.yml fqcn[action-core] +roles/config/services/mgmt/tasks/main.yml name[missing] +roles/config/services/mgmt/vars/main.yml jinja[spacing] +roles/config/services/mgmt/vars/main.yml var-naming[no-role-prefix] +roles/config/services/oozie_ui/tasks/main.yml command-instead-of-module +roles/config/services/oozie_ui/tasks/main.yml command-instead-of-shell +roles/config/services/oozie_ui/tasks/main.yml fqcn[action-core] +roles/config/services/oozie_ui/tasks/main.yml ignore-errors +roles/config/services/oozie_ui/tasks/main.yml no-changed-when +roles/config/services/oozie_ui/tasks/main.yml package-latest +roles/config/services/oozie_ui/tasks/main.yml risky-file-permissions +roles/config/services/ranger_pvc_default_policies/tasks/main.yml fqcn[action-core] +roles/config/services/ranger_pvc_default_policies/tasks/main.yml jinja[spacing] +roles/config/services/ranger_pvc_default_policies/tasks/main.yml schema[moves] +roles/config/services/solr_knox/tasks/add_solr_knox_host.yml fqcn[action-core] +roles/config/services/solr_knox/tasks/add_solr_knox_host.yml jinja[spacing] +roles/config/services/solr_knox/tasks/add_solr_knox_host.yml name[missing] +roles/config/services/solr_knox/tasks/add_solr_knox_host.yml yaml[line-length] +roles/config/services/solr_knox/tasks/main.yml fqcn[action-core] +roles/config/services/solr_knox/tasks/main.yml name[casing] +roles/config/services/solr_ranger_plugin/tasks/main.yml fqcn[action-core] +roles/config/services/solr_ranger_plugin/tasks/main.yml jinja[spacing] +roles/config/services/solr_ranger_plugin/tasks/main.yml no-handler +roles/deployment/cluster/tasks/create_base.yml fqcn[action-core] +roles/deployment/cluster/tasks/create_base.yml jinja[spacing] +roles/deployment/cluster/tasks/create_base.yml role-name[path] +roles/deployment/cluster/tasks/create_base.yml yaml[comments] +roles/deployment/cluster/tasks/create_ecs.yml fqcn[action-core] +roles/deployment/cluster/tasks/create_ecs.yml jinja[invalid] +roles/deployment/cluster/tasks/create_ecs.yml jinja[spacing] +roles/deployment/cluster/tasks/create_ecs.yml name[missing] +roles/deployment/cluster/tasks/create_ecs.yml yaml[line-length] +roles/deployment/cluster/tasks/create_kts.yml fqcn[action-core] +roles/deployment/cluster/tasks/create_kts.yml jinja[invalid] +roles/deployment/cluster/tasks/create_kts.yml jinja[spacing] +roles/deployment/cluster/tasks/create_kts.yml key-order[task] +roles/deployment/cluster/tasks/create_kts.yml name[missing] +roles/deployment/cluster/tasks/create_kts.yml risky-file-permissions +roles/deployment/cluster/tasks/create_kts.yml yaml[comments] +roles/deployment/cluster/tasks/fs2cs.yml fqcn[action-core] +roles/deployment/cluster/tasks/fs2cs.yml jinja[invalid] +roles/deployment/cluster/tasks/fs2cs.yml jinja[spacing] +roles/deployment/cluster/tasks/fs2cs.yml name[missing] +roles/deployment/cluster/tasks/fs2cs.yml yaml[line-length] +roles/deployment/cluster/tasks/main.yml fqcn[action-core] +roles/deployment/cluster/tasks/main.yml jinja[invalid] +roles/deployment/cluster/tasks/main.yml name[missing] +roles/deployment/cluster/tasks/nav2atlas.yml fqcn[action-core] +roles/deployment/cluster/tasks/nav2atlas.yml jinja[invalid] +roles/deployment/cluster/tasks/nav2atlas.yml jinja[spacing] +roles/deployment/cluster/tasks/nav2atlas.yml name[missing] +roles/deployment/cluster/tasks/nav2atlas.yml yaml[line-length] +roles/deployment/cluster/tasks/update_base.yml fqcn[action-core] +roles/deployment/cluster/tasks/update_base.yml jinja[invalid] +roles/deployment/cluster/tasks/update_base.yml jinja[spacing] +roles/deployment/cluster/tasks/update_base.yml name[missing] +roles/deployment/cluster/tasks/update_base.yml role-name[path] +roles/deployment/cluster/tasks/update_base.yml var-naming[no-role-prefix] +roles/deployment/cluster/tasks/update_base.yml yaml[line-length] +roles/deployment/cluster/tasks/upgrade_kts.yml fqcn[action-core] +roles/deployment/cluster/tasks/upgrade_kts.yml jinja[invalid] +roles/deployment/cluster/tasks/upgrade_kts.yml jinja[spacing] +roles/deployment/cluster/tasks/upgrade_kts.yml key-order[task] +roles/deployment/cluster/tasks/upgrade_kts.yml name[missing] +roles/deployment/cluster/tasks/upgrade_kts.yml role-name[path] +roles/deployment/cluster/tasks/upgrade_kts.yml yaml[line-length] +roles/deployment/credential/tasks/main.yml fqcn[action-core] +roles/deployment/credential/tasks/main.yml jinja[spacing] +roles/deployment/credential/tasks/main.yml no-changed-when +roles/deployment/credential/tasks/main.yml risky-shell-pipe +roles/deployment/databases/tasks/main.yml fqcn[action-core] +roles/deployment/databases/tasks/mariadb.yml fqcn[action] +roles/deployment/databases/tasks/mysql.yml fqcn[action] +roles/deployment/databases/tasks/postgresql.yml fqcn[action] +roles/deployment/definition/defaults/main.yml var-naming[no-role-prefix] +roles/deployment/definition/defaults/main.yml yaml[comments] +roles/deployment/definition/tasks/main.yml jinja[invalid] +roles/deployment/groupby/tasks/main.yml fqcn[action-core] +roles/deployment/groupby/tasks/main.yml jinja[invalid] +roles/deployment/groupby/tasks/main.yml key-order[task] +roles/deployment/groupby/tasks/main.yml name[missing] +roles/deployment/groupby/tasks/main.yml var-naming[no-reserved] +roles/deployment/repometa/defaults/main.yml var-naming[no-role-prefix] +roles/deployment/repometa/tasks/main.yml fqcn[action-core] +roles/deployment/repometa/tasks/parcels.yml fqcn[action-core] +roles/deployment/repometa/tasks/parcels.yml jinja[spacing] +roles/deployment/repometa/tasks/parcels.yml name[missing] +roles/deployment/services/kms/tasks/create_kms.yml fqcn[action-core] +roles/deployment/services/kms/tasks/create_kms.yml jinja[invalid] +roles/deployment/services/kms/tasks/create_kms.yml name[missing] +roles/deployment/services/kms/tasks/create_kms.yml var-naming[no-reserved] +roles/deployment/services/kms/tasks/main.yml fqcn[action-core] +roles/deployment/services/kms/tasks/main.yml no-changed-when +roles/deployment/services/kms_ha/defaults/main.yml var-naming[no-role-prefix] +roles/deployment/services/kms_ha/tasks/main.yml fqcn[action-core] +roles/deployment/services/kts_common/defaults/main.yml var-naming[no-role-prefix] +roles/deployment/services/kts_high_availability/tasks/main.yml command-instead-of-shell +roles/deployment/services/kts_high_availability/tasks/main.yml fqcn[action-core] +roles/deployment/services/kts_high_availability/tasks/main.yml jinja[invalid] +roles/deployment/services/kts_high_availability/tasks/main.yml no-changed-when +roles/deployment/services/kts_high_availability/tasks/main.yml risky-shell-pipe +roles/deployment/services/mgmt/tasks/main.yml fqcn[action-core] +roles/deployment/services/wxm/defaults/main.yml var-naming[no-role-prefix] +roles/deployment/services/wxm/tasks/configure_telemetry.yml fqcn[action-core] +roles/deployment/services/wxm/tasks/configure_telemetry.yml ignore-errors +roles/deployment/services/wxm/tasks/configure_telemetry.yml jinja[invalid] +roles/deployment/services/wxm/tasks/configure_telemetry.yml name[missing] +roles/deployment/services/wxm/tasks/configure_telemetry.yml var-naming[no-reserved] +roles/deployment/services/wxm/tasks/main.yml fqcn[action-core] +roles/deployment/services/wxm/tasks/main.yml name[missing] +roles/deployment/services/wxm/tasks/truststore_to_base.yml fqcn[action-core] +roles/deployment/services/wxm/tasks/truststore_to_base.yml ignore-errors +roles/deployment/services/wxm/tasks/truststore_to_base.yml no-changed-when +roles/deployment/services/wxm/tasks/truststore_to_base.yml yaml[line-length] +roles/infrastructure/ca_common/defaults/main.yml var-naming[no-role-prefix] +roles/infrastructure/ca_server/molecule/default/converge.yml fqcn[action-core] +roles/infrastructure/ca_server/molecule/default/verify.yml command-instead-of-shell +roles/infrastructure/ca_server/molecule/default/verify.yml fqcn[action-core] +roles/infrastructure/ca_server/molecule/default/verify.yml no-changed-when +roles/infrastructure/ca_server/tasks/create_ca.yml fqcn[action-core] +roles/infrastructure/ca_server/tasks/create_ca.yml fqcn[action] +roles/infrastructure/ca_server/tasks/create_ca.yml name[missing] +roles/infrastructure/ca_server/tasks/main.yml fqcn[action-core] +roles/infrastructure/ca_server/tasks/main.yml name[missing] +roles/infrastructure/ca_server/vars/Debian.yml var-naming[no-role-prefix] +roles/infrastructure/ca_server/vars/RedHat.yml var-naming[no-role-prefix] +roles/infrastructure/custom_repo/defaults/main.yml var-naming[no-role-prefix] +roles/infrastructure/custom_repo/tasks/install_parcels.yml fqcn[action-core] +roles/infrastructure/custom_repo/tasks/install_parcels.yml jinja[spacing] +roles/infrastructure/custom_repo/tasks/install_parcels_from_tars_on_controller.yml fqcn[action-core] +roles/infrastructure/custom_repo/tasks/install_parcels_from_tars_on_controller.yml jinja[spacing] +roles/infrastructure/custom_repo/tasks/main.yml fqcn[action-core] +roles/infrastructure/custom_repo/tasks/rehost_files_from_download.yml fqcn[action-core] +roles/infrastructure/custom_repo/tasks/rehost_files_from_download.yml risky-file-permissions +roles/infrastructure/custom_repo/vars/Debian.yml var-naming[no-role-prefix] +roles/infrastructure/custom_repo/vars/RedHat.yml var-naming[no-role-prefix] +roles/infrastructure/haproxy/tasks/main.yml fqcn[action-core] +roles/infrastructure/haproxy/tasks/main.yml package-latest +roles/infrastructure/krb5_client/defaults/main.yml var-naming[no-role-prefix] +roles/infrastructure/krb5_client/handlers/main.yml fqcn[action-core] +roles/infrastructure/krb5_client/handlers/main.yml name[casing] +roles/infrastructure/krb5_client/tasks/freeipa.yml fqcn[action-core] +roles/infrastructure/krb5_client/tasks/freeipa.yml literal-compare +roles/infrastructure/krb5_client/tasks/freeipa_autodns.yml fqcn[action-core] +roles/infrastructure/krb5_client/tasks/freeipa_autodns.yml ignore-errors +roles/infrastructure/krb5_client/tasks/freeipa_autodns.yml name[missing] +roles/infrastructure/krb5_client/tasks/freeipa_autodns.yml no-changed-when +roles/infrastructure/krb5_client/tasks/freeipa_autodns.yml risky-file-permissions +roles/infrastructure/krb5_client/tasks/freeipa_dbus_patch.yml fqcn[action-core] +roles/infrastructure/krb5_client/tasks/freeipa_dbus_patch.yml ignore-errors +roles/infrastructure/krb5_client/tasks/freeipa_dbus_patch.yml name[casing] +roles/infrastructure/krb5_client/tasks/freeipa_dbus_patch.yml no-handler +roles/infrastructure/krb5_client/tasks/freeipa_dbus_patch.yml risky-file-permissions +roles/infrastructure/krb5_client/tasks/mit.yml fqcn[action-core] +roles/infrastructure/krb5_client/tasks/pvc_configs.yml fqcn[action-core] +roles/infrastructure/krb5_client/tasks/pvc_configs.yml ignore-errors +roles/infrastructure/krb5_client/vars/Debian.yml var-naming[no-role-prefix] +roles/infrastructure/krb5_client/vars/RedHat.yml var-naming[no-role-prefix] +roles/infrastructure/krb5_client/vars/Suse.yml var-naming[no-role-prefix] +roles/infrastructure/krb5_common/defaults/main.yml var-naming[no-role-prefix] +roles/infrastructure/krb5_conf/tasks/mit.yml fqcn[action-core] +roles/infrastructure/krb5_conf/tasks/mit.yml risky-file-permissions +roles/infrastructure/krb5_server/defaults/main.yml var-naming[no-role-prefix] +roles/infrastructure/krb5_server/tasks/fix_freeipa_collection.yml fqcn[action-core] +roles/infrastructure/krb5_server/tasks/fix_freeipa_collection.yml ignore-errors +roles/infrastructure/krb5_server/tasks/fix_freeipa_collection.yml name[casing] +roles/infrastructure/krb5_server/tasks/fix_freeipa_collection.yml package-latest +roles/infrastructure/krb5_server/tasks/freeipa.yml fqcn[action-core] +roles/infrastructure/krb5_server/tasks/freeipa.yml fqcn[action] +roles/infrastructure/krb5_server/tasks/freeipa.yml ignore-errors +roles/infrastructure/krb5_server/tasks/freeipa.yml name[missing] +roles/infrastructure/krb5_server/tasks/mit.yml fqcn[action-core] +roles/infrastructure/krb5_server/tasks/mit.yml no-changed-when +roles/infrastructure/krb5_server/tasks/mit.yml risky-file-permissions +roles/infrastructure/krb5_server/vars/Debian.yml var-naming[no-role-prefix] +roles/infrastructure/krb5_server/vars/RedHat-7.yml var-naming[no-role-prefix] +roles/infrastructure/krb5_server/vars/RedHat-8.yml var-naming[no-role-prefix] +roles/infrastructure/krb5_server/vars/RedHat.yml var-naming[no-role-prefix] +roles/infrastructure/krb5_server/vars/Suse.yml var-naming[no-role-prefix] +roles/infrastructure/krb5_server/vars/Ubuntu.yml var-naming[no-role-prefix] +roles/infrastructure/krb5_server/vars/default.yml var-naming[no-role-prefix] +roles/infrastructure/rdbms/handlers/main.yml name[casing] +roles/infrastructure/rdbms/handlers/main.yml no-changed-when +roles/infrastructure/rdbms/tasks/main.yml fqcn[action-core] +roles/infrastructure/rdbms/tasks/mariadb-Debian.yml fqcn[action-core] +roles/infrastructure/rdbms/tasks/mariadb-RedHat.yml fqcn[action-core] +roles/infrastructure/rdbms/tasks/mysql-RedHat.yml fqcn[action-core] +roles/infrastructure/rdbms/tasks/mysql-RedHat.yml jinja[spacing] +roles/infrastructure/rdbms/tasks/postgresql-Debian.yml fqcn[action-core] +roles/infrastructure/rdbms/tasks/postgresql-Debian.yml package-latest +roles/infrastructure/rdbms/tasks/postgresql-RedHat.yml fqcn[action-core] +roles/infrastructure/rdbms/tasks/postgresql-RedHat.yml name[casing] +roles/infrastructure/rdbms/tasks/template_fix.yml fqcn[action-core] +roles/infrastructure/rdbms/tasks/template_fix.yml no-changed-when +roles/infrastructure/rdbms/vars/mariadb-Debian.yml var-naming[no-role-prefix] +roles/infrastructure/rdbms/vars/mariadb.yml var-naming[no-role-prefix] +roles/infrastructure/rdbms/vars/mysql-RedHat.yml var-naming[no-role-prefix] +roles/infrastructure/rdbms/vars/mysql.yml var-naming[no-role-prefix] +roles/infrastructure/rdbms/vars/postgresql-Debian.yml var-naming[no-role-prefix] +roles/infrastructure/rdbms/vars/postgresql-RedHat.yml jinja[spacing] +roles/infrastructure/rdbms/vars/postgresql-RedHat.yml var-naming[no-role-prefix] +roles/infrastructure/rdbms/vars/postgresql.yml var-naming[no-role-prefix] +roles/operations/delete_cluster/meta/main.yml role-name[path] +roles/operations/delete_cluster/tasks/main.yml fqcn[action-core] +roles/operations/delete_cluster/tasks/main.yml jinja[invalid] +roles/operations/delete_cluster/tasks/main.yml jinja[spacing] +roles/operations/delete_cluster/tasks/main.yml key-order[task] +roles/operations/delete_cluster/tasks/main.yml name[missing] +roles/operations/refresh_ranger_kms_repo/tasks/cluster_find_ranger.yml fqcn[action-core] +roles/operations/refresh_ranger_kms_repo/tasks/cluster_find_ranger.yml jinja[invalid] +roles/operations/refresh_ranger_kms_repo/tasks/cluster_find_ranger.yml jinja[spacing] +roles/operations/refresh_ranger_kms_repo/tasks/cluster_find_ranger.yml name[missing] +roles/operations/refresh_ranger_kms_repo/tasks/cluster_find_ranger.yml var-naming[no-reserved] +roles/operations/refresh_ranger_kms_repo/tasks/main.yml fqcn[action-core] +roles/operations/refresh_ranger_kms_repo/tasks/setup_cluster.yml fqcn[action-core] +roles/operations/refresh_ranger_kms_repo/tasks/setup_cluster.yml jinja[invalid] +roles/operations/refresh_ranger_kms_repo/tasks/setup_cluster.yml name[missing] +roles/operations/restart_cluster_services/tasks/main.yml fqcn[action-core] +roles/operations/restart_cluster_services/tasks/service_restart.yml fqcn[action-core] +roles/operations/restart_cluster_services/tasks/service_restart.yml jinja[invalid] +roles/operations/restart_cluster_services/tasks/service_restart.yml var-naming[no-reserved] +roles/operations/restart_stale/tasks/main.yml fqcn[action-core] +roles/operations/restart_stale/tasks/restart.yml fqcn[action-core] +roles/operations/restart_stale/tasks/restart.yml jinja[invalid] +roles/operations/restart_stale/tasks/restart.yml name[missing] +roles/operations/stop_cluster/meta/main.yml role-name[path] +roles/prereqs/jdk/defaults/main.yml var-naming[no-role-prefix] +roles/prereqs/jdk/tasks/main.yml fqcn[action-core] +roles/prereqs/jdk/tasks/main.yml fqcn[action] +roles/prereqs/jdk/tasks/main.yml jinja[spacing] +roles/prereqs/jdk/tasks/main.yml name[missing] +roles/prereqs/jdk/tasks/main.yml no-changed-when +roles/prereqs/jdk/tasks/main.yml risky-shell-pipe +roles/prereqs/jdk/vars/Debian.yml var-naming[no-role-prefix] +roles/prereqs/jdk/vars/RedHat.yml var-naming[no-role-prefix] +roles/prereqs/jdk/vars/Suse.yml var-naming[no-role-prefix] +roles/prereqs/kerberos/tasks/main.yml fqcn[action-core] +roles/prereqs/kerberos/vars/Debian.yml var-naming[no-role-prefix] +roles/prereqs/kerberos/vars/RedHat.yml var-naming[no-role-prefix] +roles/prereqs/kerberos/vars/Suse.yml var-naming[no-role-prefix] +roles/prereqs/license/defaults/main.yml var-naming[no-role-prefix] +roles/prereqs/license/tasks/main.yml fqcn[action-core] +roles/prereqs/local_accounts_common/defaults/main.yml var-naming[no-role-prefix] +roles/prereqs/mysql_connector/defaults/main.yml var-naming[no-role-prefix] +roles/prereqs/mysql_connector/tasks/main.yml command-instead-of-shell +roles/prereqs/mysql_connector/tasks/main.yml fqcn[action-core] +roles/prereqs/mysql_connector/tasks/main.yml ignore-errors +roles/prereqs/mysql_connector/tasks/main.yml no-changed-when +roles/prereqs/mysql_connector/tasks/main.yml risky-file-permissions +roles/prereqs/oracle_connector/defaults/main.yml var-naming[no-role-prefix] +roles/prereqs/oracle_connector/tasks/main.yml fqcn[action-core] +roles/prereqs/oracle_connector/tasks/main.yml fqcn[action] +roles/prereqs/oracle_connector/tasks/main.yml key-order[task] +roles/prereqs/os/defaults/main.yml var-naming[no-role-prefix] +roles/prereqs/os/handlers/main.yml fqcn[action-core] +roles/prereqs/os/handlers/main.yml name[casing] +roles/prereqs/os/tasks/main-Debian.yml fqcn[action-core] +roles/prereqs/os/tasks/main-Debian.yml fqcn[action] +roles/prereqs/os/tasks/main-Debian.yml jinja[spacing] +roles/prereqs/os/tasks/main-Debian.yml name[missing] +roles/prereqs/os/tasks/main-Debian.yml package-latest +roles/prereqs/os/tasks/main-RedHat.yml command-instead-of-shell +roles/prereqs/os/tasks/main-RedHat.yml fqcn[action-core] +roles/prereqs/os/tasks/main-RedHat.yml fqcn[action] +roles/prereqs/os/tasks/main-RedHat.yml ignore-errors +roles/prereqs/os/tasks/main-RedHat.yml name[casing] +roles/prereqs/os/tasks/main-RedHat.yml no-changed-when +roles/prereqs/os/tasks/main.yml fqcn[action-core] +roles/prereqs/os/tasks/main.yml fqcn[action] +roles/prereqs/os/tasks/main.yml name[casing] +roles/prereqs/os/tasks/main.yml name[template] +roles/prereqs/os/tasks/rngd.yml fqcn[action-core] +roles/prereqs/os/vars/Debian.yml var-naming[no-role-prefix] +roles/prereqs/os/vars/RedHat.yml var-naming[no-role-prefix] +roles/prereqs/os/vars/Suse.yml var-naming[no-role-prefix] +roles/prereqs/postgresql_connector/defaults/main.yml var-naming[no-role-prefix] +roles/prereqs/postgresql_connector/tasks/main.yml command-instead-of-shell +roles/prereqs/postgresql_connector/tasks/main.yml fqcn[action-core] +roles/prereqs/postgresql_connector/tasks/main.yml literal-compare +roles/prereqs/postgresql_connector/tasks/main.yml no-changed-when +roles/prereqs/pvc_ecs/tasks/main.yml no-changed-when +roles/prereqs/pvc_ecs/tasks/main.yml package-latest +roles/prereqs/user_accounts/tasks/main.yml fqcn[action-core] +roles/prereqs/user_accounts/tasks/main.yml key-order[task] +roles/prereqs/user_accounts/tasks/main.yml name[missing] +roles/prereqs/user_accounts_ecs/defaults/main.yml var-naming[no-role-prefix] +roles/prereqs/user_accounts_ecs/tasks/main.yml fqcn[action-core] +roles/prereqs/user_accounts_ecs/tasks/main.yml key-order[task] +roles/prereqs/user_accounts_ecs/tasks/main.yml name[missing] +roles/security/tls_clean/tasks/main.yml fqcn[action-core] +roles/security/tls_generate_csr/defaults/main.yml var-naming[no-role-prefix] +roles/security/tls_generate_csr/molecule/default/converge.yml fqcn[action-core] +roles/security/tls_generate_csr/molecule/default/prepare.yml fqcn[action-core] +roles/security/tls_generate_csr/molecule/default/verify.yml fqcn[action-core] +roles/security/tls_generate_csr/tasks/acls-ecs.yml fqcn[action-core] +roles/security/tls_generate_csr/tasks/acls-ecs.yml fqcn[action] +roles/security/tls_generate_csr/tasks/acls-ecs.yml jinja[invalid] +roles/security/tls_generate_csr/tasks/acls.yml fqcn[action-core] +roles/security/tls_generate_csr/tasks/acls.yml fqcn[action] +roles/security/tls_generate_csr/tasks/acls.yml jinja[invalid] +roles/security/tls_generate_csr/tasks/main.yml fqcn[action-core] +roles/security/tls_generate_csr/tasks/main.yml name[missing] +roles/security/tls_generate_csr/tasks/main.yml risky-file-permissions +roles/security/tls_generate_csr/tasks/main.yml risky-shell-pipe +roles/security/tls_install_certs/defaults/main.yml var-naming[no-role-prefix] +roles/security/tls_install_certs/tasks/main.yml command-instead-of-shell +roles/security/tls_install_certs/tasks/main.yml fqcn[action-core] +roles/security/tls_install_certs/tasks/main.yml fqcn[action] +roles/security/tls_install_certs/tasks/main.yml jinja[invalid] +roles/security/tls_install_certs/tasks/main.yml key-order[task] +roles/security/tls_install_certs/tasks/main.yml name[missing] +roles/security/tls_install_certs/tasks/main.yml no-changed-when +roles/security/tls_nifi/defaults/main.yml var-naming[no-role-prefix] +roles/security/tls_nifi/tasks/main.yml fqcn[action-core] +roles/security/tls_nifi/tasks/main.yml risky-file-permissions +roles/security/tls_signing/defaults/main.yml var-naming[no-role-prefix] +roles/security/tls_signing/tasks/main.yml fqcn[action-core] +roles/security/tls_signing/tasks/signing_freeipa.yml fqcn[action-core] +roles/teardown/tasks/main.yml fqcn[action-core] +roles/teardown/tasks/main.yml jinja[invalid] +roles/teardown/tasks/main.yml jinja[spacing] +roles/teardown/tasks/main.yml name[missing] +roles/teardown/tasks/main.yml risky-file-permissions +roles/teardown/tasks/main.yml var-naming[no-reserved] +roles/teardown/tasks/teardown_cdsw.yml command-instead-of-shell +roles/teardown/tasks/teardown_cdsw.yml fqcn[action-core] +roles/teardown/tasks/teardown_cdsw.yml jinja[invalid] +roles/teardown/tasks/teardown_cdsw.yml no-changed-when +roles/teardown/tasks/teardown_cloudera_agent.yml fqcn[action-core] +roles/teardown/tasks/teardown_cloudera_agent.yml ignore-errors +roles/teardown/tasks/teardown_cloudera_agent.yml no-changed-when +roles/teardown/tasks/teardown_cloudera_server.yml fqcn[action-core] +roles/teardown/tasks/teardown_cloudera_server.yml ignore-errors +roles/teardown/tasks/teardown_cluster.yml fqcn[action-core] +roles/teardown/tasks/teardown_cluster.yml jinja[spacing] +roles/teardown/tasks/teardown_cluster.yml key-order[task] +roles/teardown/tasks/teardown_cluster.yml name[missing] +roles/teardown/tasks/teardown_cms.yml fqcn[action-core] +roles/teardown/tasks/teardown_cms.yml jinja[spacing] +roles/teardown/tasks/teardown_cms_role_directories.yml fqcn[action-core] +roles/teardown/tasks/teardown_cms_role_directory.yml fqcn[action-core] +roles/teardown/tasks/teardown_database.yml fqcn[action-core] +roles/teardown/tasks/teardown_database.yml fqcn[action] +roles/teardown/tasks/teardown_database.yml ignore-errors +roles/teardown/tasks/teardown_database.yml name[missing] +roles/teardown/tasks/teardown_database.yml no-changed-when +roles/teardown/tasks/teardown_database.yml yaml[line-length] +roles/teardown/tasks/teardown_ecs.yml command-instead-of-shell +roles/teardown/tasks/teardown_ecs.yml fqcn[action-core] +roles/teardown/tasks/teardown_ecs.yml ignore-errors +roles/teardown/tasks/teardown_ecs.yml no-changed-when +roles/teardown/tasks/teardown_ecs.yml yaml[comments] +roles/teardown/tasks/teardown_kms.yml fqcn[action-core] +roles/teardown/tasks/teardown_role_directories.yml fqcn[action-core] +roles/teardown/tasks/teardown_role_directory.yml fqcn[action-core] +roles/teardown/tasks/teardown_service_directories.yml fqcn[action-core] +roles/teardown/tasks/teardown_service_directories.yml jinja[spacing] +roles/teardown/vars/main.yml var-naming[no-role-prefix] +roles/verify/definition/tasks/main.yml fqcn[action-core] +roles/verify/definition/tasks/main.yml ignore-errors +roles/verify/definition/tasks/main.yml jinja[invalid] +roles/verify/definition/tasks/main.yml key-order[task] +roles/verify/definition/tasks/main.yml literal-compare +roles/verify/definition/tasks/main.yml name[missing] +roles/verify/inventory/tasks/main.yml fqcn[action-core] +roles/verify/inventory/tasks/main.yml jinja[spacing] +roles/verify/inventory/tasks/main.yml key-order[task] +roles/verify/inventory/tasks/main.yml name[missing] +roles/verify/parcels_and_roles/tasks/check_cluster.yml fqcn[action-core] +roles/verify/parcels_and_roles/tasks/check_cluster.yml jinja[invalid] +roles/verify/parcels_and_roles/tasks/check_cluster.yml key-order[task] +roles/verify/parcels_and_roles/tasks/check_cluster.yml name[missing] +roles/verify/parcels_and_roles/tasks/check_cluster_config_roles.yml fqcn[action-core] +roles/verify/parcels_and_roles/tasks/check_cluster_config_roles.yml name[missing] +roles/verify/parcels_and_roles/tasks/check_template.yml fqcn[action-core] +roles/verify/parcels_and_roles/tasks/check_template.yml name[missing] +roles/verify/parcels_and_roles/tasks/check_template_roles.yml fqcn[action-core] +roles/verify/parcels_and_roles/tasks/check_template_roles.yml jinja[spacing] +roles/verify/parcels_and_roles/tasks/check_template_roles.yml name[missing] +roles/verify/parcels_and_roles/tasks/main.yml fqcn[action-core] From ce3d9ed684528ef48afadbf93f91bc64784303f8 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Mon, 23 Jun 2025 14:11:45 -0400 Subject: [PATCH 04/21] Initial changelog configuration Signed-off-by: Webster Mudge --- changelogs/config.yaml | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 changelogs/config.yaml diff --git a/changelogs/config.yaml b/changelogs/config.yaml new file mode 100644 index 00000000..746418be --- /dev/null +++ b/changelogs/config.yaml @@ -0,0 +1,37 @@ +add_plugin_period: true +changelog_nice_yaml: false +changelog_sort: alphanumerical +changes_file: changelog.yaml +changes_format: combined +ignore_other_fragment_extensions: true +keep_fragments: false +mention_ancestor: true +new_plugins_after_name: removed_features +notesdir: fragments +output: +- file: CHANGELOG.rst + format: rst +prelude_section_name: release_summary +prelude_section_title: Release Summary +sanitize_changelog: true +sections: +- - major_changes + - Major Changes +- - minor_changes + - Minor Changes +- - breaking_changes + - Breaking Changes / Porting Guide +- - deprecated_features + - Deprecated Features +- - removed_features + - Removed Features (previously deprecated) +- - security_fixes + - Security Fixes +- - bugfixes + - Bugfixes +- - known_issues + - Known Issues +title: Cloudera.Cluster +trivial_section_name: trivial +use_fqcn: true +vcs: auto From d2c568a23b9250730ba6b84fc2e5c494bcf15d9a Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Mon, 23 Jun 2025 14:11:53 -0400 Subject: [PATCH 05/21] Update docsbuild to use Hatch Signed-off-by: Webster Mudge --- docs/docsite/config.yml | 15 +++++++++++++++ docs/docsite/extra-docs.yml | 6 ++++++ docs/{ => docsite}/links.yml | 4 ++-- docsbuild/build.sh | 4 ++-- docsbuild/requirements.txt | 5 ++++- pyproject.toml | 16 ++++++++++++++++ 6 files changed, 45 insertions(+), 5 deletions(-) create mode 100644 docs/docsite/config.yml create mode 100644 docs/docsite/extra-docs.yml rename docs/{ => docsite}/links.yml (79%) diff --git a/docs/docsite/config.yml b/docs/docsite/config.yml new file mode 100644 index 00000000..0d1260cf --- /dev/null +++ b/docs/docsite/config.yml @@ -0,0 +1,15 @@ +--- +# Whether the collection uses flatmapping to flatten subdirectories in +# `plugins/*/`. +flatmap: true + +# List of environment variables that are defined by `.. envvar::` directives +# in the extra docsite RST files. +envvar_directives: [] + +# Changelog configuration (added in antsibull-docs 2.10.0) +changelog: + # Whether to write the changelog (taken from changelogs/changelog.yaml, see the + # antsibull-changelog documentation for more information) and link to it from the + # collection's index page. + write_changelog: false diff --git a/docs/docsite/extra-docs.yml b/docs/docsite/extra-docs.yml new file mode 100644 index 00000000..c10cfcd2 --- /dev/null +++ b/docs/docsite/extra-docs.yml @@ -0,0 +1,6 @@ +--- + +sections: + - title: Guides + toctree: + - api-design diff --git a/docs/links.yml b/docs/docsite/links.yml similarity index 79% rename from docs/links.yml rename to docs/docsite/links.yml index bc6fedb7..c2b1da7a 100644 --- a/docs/links.yml +++ b/docs/docsite/links.yml @@ -6,7 +6,7 @@ edit_on_github: path_prefix: "" extra_links: - - description: Submit a bug report + - description: Submit a Bug Report url: https://github.com/cloudera-labs/cloudera.cluster/issues/new?labels=bug - - description: Request a feature + - description: Request a Feature url: https://github.com/cloudera-labs/cloudera.cluster/issues/new?labels=enhancement diff --git a/docsbuild/build.sh b/docsbuild/build.sh index 9e627287..5251a6e7 100755 --- a/docsbuild/build.sh +++ b/docsbuild/build.sh @@ -1,7 +1,5 @@ #!/usr/bin/env bash -# Created with antsibull-docs 2.3.1.post0 - set -e pushd "$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" @@ -19,6 +17,8 @@ antsibull-docs \ --dest-dir temp-rst \ cloudera.cluster + # --cleanup everything \ + # Copy collection documentation into source directory rsync -cprv --delete-after temp-rst/ rst/ diff --git a/docsbuild/requirements.txt b/docsbuild/requirements.txt index d4a6fbc4..ef0c3423 100644 --- a/docsbuild/requirements.txt +++ b/docsbuild/requirements.txt @@ -1,2 +1,5 @@ -# Created with antsibull-docs 2.3.1.post0 +antsibull-docs >= 2.0.0, < 3.0.0 +ansible-pygments +sphinx +sphinx-ansible-theme >= 0.9.0 diff --git a/pyproject.toml b/pyproject.toml index 277d492f..462b018e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,6 +42,22 @@ extra-dependencies = ["ansible-lint"] [tool.hatch.envs.lint.scripts] run = "pre-commit run -a" +[tool.hatch.envs.docs] +python = "3.12" +detached = true +extra-dependencies = [ + # "antsibull-docs >= 2.0.0, < 3.0.0", + "ansible-pygments", + "sphinx", + "sphinx-ansible-theme >= 0.9.0", + "antsibull-docs @ git+https://github.com/cloudera-labs/antsibull-docs@cldr-docsite#egg=antsibull-docs", +] + +[tool.hatch.envs.docs.scripts] +doclint = "antsibull-docs lint-collection-docs --plugin-docs --validate-collection-refs=all --skip-rstcheck ." +build = "docsbuild/build.sh" +log = "" + [tool.pytest.ini_options] testpaths = ["tests"] filterwarnings = [ From d46ce93ccfaa5747e1802362ab7f84dd0d4de097 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Mon, 23 Jun 2025 15:24:04 -0400 Subject: [PATCH 06/21] Updated version_added for current roles and plugins Signed-off-by: Webster Mudge --- plugins/lookup/cm_license.py | 1 + plugins/lookup/cm_service.py | 1 + plugins/modules/cluster.py | 1 + plugins/modules/cluster_info.py | 1 + plugins/modules/cm_autotls.py | 1 + plugins/modules/cm_autotls_info.py | 1 + plugins/modules/cm_config.py | 1 + plugins/modules/cm_config_info.py | 1 + plugins/modules/cm_endpoint_info.py | 1 + plugins/modules/cm_kerberos.py | 1 + plugins/modules/cm_kerberos_info.py | 1 + plugins/modules/cm_license.py | 1 + plugins/modules/cm_license_info.py | 1 + plugins/modules/cm_resource.py | 1 + plugins/modules/cm_resource_info.py | 1 + plugins/modules/cm_service.py | 1 + plugins/modules/cm_service_config.py | 1 + plugins/modules/cm_service_info.py | 1 + plugins/modules/cm_service_role.py | 1 + plugins/modules/cm_service_role_config.py | 1 + plugins/modules/cm_service_role_config_group.py | 1 + plugins/modules/cm_service_role_config_group_config.py | 1 + plugins/modules/cm_service_role_config_group_info.py | 1 + plugins/modules/cm_service_role_info.py | 1 + plugins/modules/cm_trial_license.py | 1 + plugins/modules/cm_version_info.py | 1 + plugins/modules/data_context.py | 1 + plugins/modules/data_context_info.py | 1 + plugins/modules/external_account.py | 1 + plugins/modules/external_account_info.py | 1 + plugins/modules/external_user_mappings.py | 1 + plugins/modules/external_user_mappings_info.py | 1 + plugins/modules/host.py | 1 + plugins/modules/host_config.py | 1 + plugins/modules/host_config_info.py | 1 + plugins/modules/host_info.py | 1 + plugins/modules/host_template.py | 1 + plugins/modules/host_template_info.py | 1 + plugins/modules/parcel.py | 1 + plugins/modules/parcel_info.py | 1 + plugins/modules/service.py | 1 + plugins/modules/service_config.py | 1 + plugins/modules/service_config_info.py | 1 + plugins/modules/service_info.py | 1 + plugins/modules/service_role.py | 1 + plugins/modules/service_role_config.py | 1 + plugins/modules/service_role_config_group.py | 1 + plugins/modules/service_role_config_group_config.py | 1 + plugins/modules/service_role_config_group_config_info.py | 1 + plugins/modules/service_role_config_group_info.py | 1 + plugins/modules/service_role_config_info.py | 1 + plugins/modules/service_role_info.py | 1 + plugins/modules/service_type_info.py | 1 + plugins/modules/user.py | 1 + plugins/modules/user_info.py | 1 + roles/assemble_template/meta/argument_specs.yml | 1 + 56 files changed, 56 insertions(+) diff --git a/plugins/lookup/cm_license.py b/plugins/lookup/cm_license.py index 8b2877eb..cf5bc6a4 100644 --- a/plugins/lookup/cm_license.py +++ b/plugins/lookup/cm_license.py @@ -25,6 +25,7 @@ description: - Parses and verifies the contents of a Cloudera license. - Returns the license details, including the computed C(password). + version_added: "5.0.0" options: _terms: description: diff --git a/plugins/lookup/cm_service.py b/plugins/lookup/cm_service.py index 6f1feed5..4aed2aea 100644 --- a/plugins/lookup/cm_service.py +++ b/plugins/lookup/cm_service.py @@ -28,6 +28,7 @@ - Otherwise, the lookup entry will be an empty list. - If the cluster is not found or is ambigious, the lookup will return an error. - If the Cloudera Manager endpoint is not found or is not available, the lookup will return an error. + version_added: "4.0.0" options: _terms: description: diff --git a/plugins/modules/cluster.py b/plugins/modules/cluster.py index 851eb054..375d476c 100644 --- a/plugins/modules/cluster.py +++ b/plugins/modules/cluster.py @@ -27,6 +27,7 @@ author: - "Ronald Suplina (@rsuplina)" - "Webster Mudge (@wmudge)" +version_added: "4.4.0" requirements: - cm-client options: diff --git a/plugins/modules/cluster_info.py b/plugins/modules/cluster_info.py index 271d3a13..ff974829 100644 --- a/plugins/modules/cluster_info.py +++ b/plugins/modules/cluster_info.py @@ -22,6 +22,7 @@ - Retrieves details about one or more clusters managed by Cloudera Manager author: - "Ronald Suplina (@rsuplina)" +version_added: "4.4.0" options: name: description: diff --git a/plugins/modules/cm_autotls.py b/plugins/modules/cm_autotls.py index a0de1a45..c259e8f4 100644 --- a/plugins/modules/cm_autotls.py +++ b/plugins/modules/cm_autotls.py @@ -24,6 +24,7 @@ - Note that disabling Auto-TLS does not remove the TLS resources (keys, truststores, etc.) created during the enable process. author: - "Jim Enright (@jimright)" +version_added: "5.0.0" requirements: - cm_client options: diff --git a/plugins/modules/cm_autotls_info.py b/plugins/modules/cm_autotls_info.py index 81bfb841..a67ef853 100644 --- a/plugins/modules/cm_autotls_info.py +++ b/plugins/modules/cm_autotls_info.py @@ -22,6 +22,7 @@ - Retrieve Cloudera Manager configurations for Auto-TLS author: - "Jim Enright (@jimright)" +version_added: "5.0.0" requirements: - cm_client extends_documentation_fragment: diff --git a/plugins/modules/cm_config.py b/plugins/modules/cm_config.py index 39a462e4..e825479c 100644 --- a/plugins/modules/cm_config.py +++ b/plugins/modules/cm_config.py @@ -22,6 +22,7 @@ - Manage Cloudera Manager configuration settings. author: - "Webster Mudge (@wmudge)" +version_added: "4.4.0" requirements: - cm_client options: diff --git a/plugins/modules/cm_config_info.py b/plugins/modules/cm_config_info.py index c4a29bda..ce10382b 100644 --- a/plugins/modules/cm_config_info.py +++ b/plugins/modules/cm_config_info.py @@ -23,6 +23,7 @@ - The module supports C(check_mode). author: - "Webster Mudge (@wmudge)" +version_added: "4.4.0" requirements: - cm_client options: diff --git a/plugins/modules/cm_endpoint_info.py b/plugins/modules/cm_endpoint_info.py index fbb94ba1..3342dbd8 100644 --- a/plugins/modules/cm_endpoint_info.py +++ b/plugins/modules/cm_endpoint_info.py @@ -23,6 +23,7 @@ - The module supports C(check_mode). author: - "Webster Mudge (@wmudge)" +version_added: "4.0.0" requirements: - cm_client extends_documentation_fragment: diff --git a/plugins/modules/cm_kerberos.py b/plugins/modules/cm_kerberos.py index c531d64e..0e299989 100644 --- a/plugins/modules/cm_kerberos.py +++ b/plugins/modules/cm_kerberos.py @@ -23,6 +23,7 @@ - Imports the KDC Account Manager credentials needed by Cloudera Manager to create kerberos principals. author: - "Jim Enright (@jimright)" +version_added: "5.0.0" requirements: - cm_client options: diff --git a/plugins/modules/cm_kerberos_info.py b/plugins/modules/cm_kerberos_info.py index d4130642..cb0307a5 100644 --- a/plugins/modules/cm_kerberos_info.py +++ b/plugins/modules/cm_kerberos_info.py @@ -22,6 +22,7 @@ - Retrieve Cloudera Manager configurations for Kerberos author: - "Jim Enright (@jimright)" +version_added: "5.0.0" requirements: - cm_client extends_documentation_fragment: diff --git a/plugins/modules/cm_license.py b/plugins/modules/cm_license.py index dc7ea3f1..8744eb47 100644 --- a/plugins/modules/cm_license.py +++ b/plugins/modules/cm_license.py @@ -23,6 +23,7 @@ - Return information about the acivate license. author: - "Ronald Suplina (@rsuplina)" +version_added: "4.4.0" requirements: - cm_client """ diff --git a/plugins/modules/cm_license_info.py b/plugins/modules/cm_license_info.py index 20ab3b3c..adeb92de 100644 --- a/plugins/modules/cm_license_info.py +++ b/plugins/modules/cm_license_info.py @@ -22,6 +22,7 @@ - Returns details about current active license. author: - "Ronald Suplina (@rsuplina)" +version_added: "4.4.0" requirements: - cm_client """ diff --git a/plugins/modules/cm_resource.py b/plugins/modules/cm_resource.py index 880a1fcb..4612a521 100644 --- a/plugins/modules/cm_resource.py +++ b/plugins/modules/cm_resource.py @@ -25,6 +25,7 @@ - The module supports C(check_mode). author: - "Webster Mudge (@wmudge)" +version_added: "4.0.0" requirements: - cm_client options: diff --git a/plugins/modules/cm_resource_info.py b/plugins/modules/cm_resource_info.py index a196ba89..6d3ac4f8 100644 --- a/plugins/modules/cm_resource_info.py +++ b/plugins/modules/cm_resource_info.py @@ -25,6 +25,7 @@ - The module supports C(check_mode). author: - "Webster Mudge (@wmudge)" +version_added: "4.0.0" requirements: - cm_client extends_documentation_fragment: diff --git a/plugins/modules/cm_service.py b/plugins/modules/cm_service.py index cb60598d..e6211385 100644 --- a/plugins/modules/cm_service.py +++ b/plugins/modules/cm_service.py @@ -23,6 +23,7 @@ author: - Ronald Suplina (@rsuplina) - Webster Mudge (@wmudge) +version_added: "4.4.0" options: config: description: diff --git a/plugins/modules/cm_service_config.py b/plugins/modules/cm_service_config.py index ea4992bb..2a42ee79 100644 --- a/plugins/modules/cm_service_config.py +++ b/plugins/modules/cm_service_config.py @@ -22,6 +22,7 @@ - Manage a configuration (service-wide) for the Cloudera Manager service. author: - "Webster Mudge (@wmudge)" +version_added: "4.4.0" requirements: - cm-client options: diff --git a/plugins/modules/cm_service_info.py b/plugins/modules/cm_service_info.py index 226d8d3c..a466ef64 100644 --- a/plugins/modules/cm_service_info.py +++ b/plugins/modules/cm_service_info.py @@ -23,6 +23,7 @@ author: - Ronald Suplina (@rsuplina) - Webster Mudge (@wmudge) +version_added: "4.4.0" extends_documentation_fragment: - cloudera.cluster.cm_options - cloudera.cluster.cm_endpoint diff --git a/plugins/modules/cm_service_role.py b/plugins/modules/cm_service_role.py index 6ecbafb7..a8844e99 100644 --- a/plugins/modules/cm_service_role.py +++ b/plugins/modules/cm_service_role.py @@ -22,6 +22,7 @@ - Manage a Cloudera Manager Service role. author: - Webster Mudge (@wmudge) +version_added: "5.0.0" options: cluster_hostname: description: diff --git a/plugins/modules/cm_service_role_config.py b/plugins/modules/cm_service_role_config.py index 051258a0..6c9fd1b7 100644 --- a/plugins/modules/cm_service_role_config.py +++ b/plugins/modules/cm_service_role_config.py @@ -22,6 +22,7 @@ - Manage a service role configuration (role-specific) in a cluster. author: - "Webster Mudge (@wmudge)" +version_added: "4.4.0" requirements: - cm-client options: diff --git a/plugins/modules/cm_service_role_config_group.py b/plugins/modules/cm_service_role_config_group.py index 30557736..b43c24e7 100644 --- a/plugins/modules/cm_service_role_config_group.py +++ b/plugins/modules/cm_service_role_config_group.py @@ -22,6 +22,7 @@ - Manage a Cloudera Manager Service role config group. author: - Webster Mudge (@wmudge) +version_added: "5.0.0" options: type: description: diff --git a/plugins/modules/cm_service_role_config_group_config.py b/plugins/modules/cm_service_role_config_group_config.py index 0c1f6104..30ebca12 100644 --- a/plugins/modules/cm_service_role_config_group_config.py +++ b/plugins/modules/cm_service_role_config_group_config.py @@ -22,6 +22,7 @@ - Manage the configuration details of a role config group of the Cloudera Manager Service. author: - "Webster Mudge (@wmudge)" +version_added: "4.4.0" requirements: - cm-client options: diff --git a/plugins/modules/cm_service_role_config_group_info.py b/plugins/modules/cm_service_role_config_group_info.py index a6dc301a..a75d4533 100644 --- a/plugins/modules/cm_service_role_config_group_info.py +++ b/plugins/modules/cm_service_role_config_group_info.py @@ -22,6 +22,7 @@ - Gather information about Cloudera Manager service role config groups. author: - Webster Mudge (@wmudge) +version_added: "5.0.0" options: type: description: diff --git a/plugins/modules/cm_service_role_info.py b/plugins/modules/cm_service_role_info.py index 48ea9f9d..94829b45 100644 --- a/plugins/modules/cm_service_role_info.py +++ b/plugins/modules/cm_service_role_info.py @@ -22,6 +22,7 @@ - Gather information about one or all Cloudera Manager service roles. author: - Webster Mudge (@wmudge) +version_added: "5.0.0" options: type: description: diff --git a/plugins/modules/cm_trial_license.py b/plugins/modules/cm_trial_license.py index 07424e1d..88e05dac 100644 --- a/plugins/modules/cm_trial_license.py +++ b/plugins/modules/cm_trial_license.py @@ -24,6 +24,7 @@ - Return information about the trial license. author: - "Ronald Suplina (@rsuplina)" +version_added: "4.4.0" requirements: - cm_client """ diff --git a/plugins/modules/cm_version_info.py b/plugins/modules/cm_version_info.py index d711d4ba..de86105c 100644 --- a/plugins/modules/cm_version_info.py +++ b/plugins/modules/cm_version_info.py @@ -23,6 +23,7 @@ - The module supports C(check_mode). author: - "Webster Mudge (@wmudge)" +version_added: "4.0.0" requirements: - cm_client extends_documentation_fragment: diff --git a/plugins/modules/data_context.py b/plugins/modules/data_context.py index 7f197ef2..11099f5b 100644 --- a/plugins/modules/data_context.py +++ b/plugins/modules/data_context.py @@ -26,6 +26,7 @@ - The module supports C(check_mode). author: - "Ronald Suplina (@rsuplina)" +version_added: "5.0.0" requirements: - cm-client >= 54 options: diff --git a/plugins/modules/data_context_info.py b/plugins/modules/data_context_info.py index 06c93340..89932979 100644 --- a/plugins/modules/data_context_info.py +++ b/plugins/modules/data_context_info.py @@ -22,6 +22,7 @@ - Retrieve details of a specific data context or all data contexts within the Cloudera Manager. author: - "Ronald Suplina (@rsuplina)" +version_added: "5.0.0" requirements: - cm_client options: diff --git a/plugins/modules/external_account.py b/plugins/modules/external_account.py index a980e0fb..a47da650 100644 --- a/plugins/modules/external_account.py +++ b/plugins/modules/external_account.py @@ -25,6 +25,7 @@ - Supports I(check_mode). author: - "Ronald Suplina (@rsuplina)" +version_added: "5.0.0" options: name: description: diff --git a/plugins/modules/external_account_info.py b/plugins/modules/external_account_info.py index ffee00e8..6d30107e 100644 --- a/plugins/modules/external_account_info.py +++ b/plugins/modules/external_account_info.py @@ -22,6 +22,7 @@ - Provides details for a specific account or retrieves all external accounts configured in Cloudera Manager. author: - "Ronald Suplina (@rsuplina)" +version_added: "5.0.0" options: name: description: diff --git a/plugins/modules/external_user_mappings.py b/plugins/modules/external_user_mappings.py index c73c3efd..8447b96b 100644 --- a/plugins/modules/external_user_mappings.py +++ b/plugins/modules/external_user_mappings.py @@ -26,6 +26,7 @@ - The module supports C(check_mode). author: - "Ronald Suplina (@rsuplina)" +version_added: "5.0.0" requirements: - cm_client options: diff --git a/plugins/modules/external_user_mappings_info.py b/plugins/modules/external_user_mappings_info.py index 944a3672..bc426f7c 100644 --- a/plugins/modules/external_user_mappings_info.py +++ b/plugins/modules/external_user_mappings_info.py @@ -22,6 +22,7 @@ - Retrieve details of a specific or all external user mappings within the Cloudera Manager. author: - "Ronald Suplina (@rsuplina)" +version_added: "5.0.0" requirements: - cm_client options: diff --git a/plugins/modules/host.py b/plugins/modules/host.py index edb07594..b93f4f3d 100644 --- a/plugins/modules/host.py +++ b/plugins/modules/host.py @@ -24,6 +24,7 @@ author: - "Ronald Suplina (@rsuplina)" - "Webster Mudge (@wmudge)" +version_added: "4.4.0" options: name: description: diff --git a/plugins/modules/host_config.py b/plugins/modules/host_config.py index 229fd841..dabb6487 100644 --- a/plugins/modules/host_config.py +++ b/plugins/modules/host_config.py @@ -23,6 +23,7 @@ - The module supports C(check_mode). author: - "Ronald Suplina (@rsuplina)" +version_added: "4.4.0" requirements: - cm_client options: diff --git a/plugins/modules/host_config_info.py b/plugins/modules/host_config_info.py index ac25c9ab..2a7c4195 100644 --- a/plugins/modules/host_config_info.py +++ b/plugins/modules/host_config_info.py @@ -22,6 +22,7 @@ - Gather configuration information about a specific host. author: - "Ronald Suplina (@rsuplina)" +version_added: "4.4.0" requirements: - cm_client options: diff --git a/plugins/modules/host_info.py b/plugins/modules/host_info.py index d2bf4dfb..bcff3bb5 100644 --- a/plugins/modules/host_info.py +++ b/plugins/modules/host_info.py @@ -23,6 +23,7 @@ author: - "Ronald Suplina (@rsuplina)" - "Webster Mudge (@wmudge)" +version_added: "4.4.0" options: cluster: description: diff --git a/plugins/modules/host_template.py b/plugins/modules/host_template.py index 5e32c5ce..eafa0ddd 100644 --- a/plugins/modules/host_template.py +++ b/plugins/modules/host_template.py @@ -23,6 +23,7 @@ author: - "Webster Mudge (@wmudge)" - "Ronald Suplina (@rsuplina)" +version_added: "5.0.0" options: cluster: description: diff --git a/plugins/modules/host_template_info.py b/plugins/modules/host_template_info.py index 3dc82c75..af24bb1b 100644 --- a/plugins/modules/host_template_info.py +++ b/plugins/modules/host_template_info.py @@ -23,6 +23,7 @@ author: - "Ronald Suplina (@rsuplina)" - "Webster Mudge (@wmudge)" +version_added: "5.0.0" options: cluster: description: diff --git a/plugins/modules/parcel.py b/plugins/modules/parcel.py index b9d03aaf..fbefa8ad 100644 --- a/plugins/modules/parcel.py +++ b/plugins/modules/parcel.py @@ -24,6 +24,7 @@ - The module manages the transitions between these states, e.g. if a parcel is I(distributed) and I(state=downloaded), the module will deactivate the parcel from the cluster hosts. author: - "Ronald Suplina (@rsuplina)" +version_added: "4.4.0" requirements: - cm-client options: diff --git a/plugins/modules/parcel_info.py b/plugins/modules/parcel_info.py index 92973b38..a029c25f 100644 --- a/plugins/modules/parcel_info.py +++ b/plugins/modules/parcel_info.py @@ -22,6 +22,7 @@ - Gathers details about a single parcel or about all parcels on the cluster author: - "Ronald Suplina (@rsuplina)" +version_added: "4.4.0" requirements: - cm-client options: diff --git a/plugins/modules/service.py b/plugins/modules/service.py index 389249c0..171b71d9 100644 --- a/plugins/modules/service.py +++ b/plugins/modules/service.py @@ -22,6 +22,7 @@ - Manage a service in a cluster. author: - "Webster Mudge (@wmudge)" +version_added: "4.4.0" options: cluster: description: diff --git a/plugins/modules/service_config.py b/plugins/modules/service_config.py index 3fd6b699..cd4f5225 100644 --- a/plugins/modules/service_config.py +++ b/plugins/modules/service_config.py @@ -22,6 +22,7 @@ - Manage a configuration (service-wide) for a cluster service. author: - "Webster Mudge (@wmudge)" +version_added: "4.4.0" requirements: - cm-client options: diff --git a/plugins/modules/service_config_info.py b/plugins/modules/service_config_info.py index 0eeec66f..4a58ff0d 100644 --- a/plugins/modules/service_config_info.py +++ b/plugins/modules/service_config_info.py @@ -22,6 +22,7 @@ - Gather configuration information about a service of a CDP cluster. author: - "Webster Mudge (@wmudge)" +version_added: "4.4.0" requirements: - cm_client options: diff --git a/plugins/modules/service_info.py b/plugins/modules/service_info.py index 285cd242..1ab0fd81 100644 --- a/plugins/modules/service_info.py +++ b/plugins/modules/service_info.py @@ -22,6 +22,7 @@ - Gather information about services of a CDP cluster. author: - "Webster Mudge (@wmudge)" +version_added: "4.4.0" options: cluster: description: diff --git a/plugins/modules/service_role.py b/plugins/modules/service_role.py index e69105a8..909081f6 100644 --- a/plugins/modules/service_role.py +++ b/plugins/modules/service_role.py @@ -22,6 +22,7 @@ - Manage a service role in a cluster. author: - "Webster Mudge (@wmudge)" +version_added: "4.4.0" options: cluster: description: diff --git a/plugins/modules/service_role_config.py b/plugins/modules/service_role_config.py index e88249b7..390458fe 100644 --- a/plugins/modules/service_role_config.py +++ b/plugins/modules/service_role_config.py @@ -22,6 +22,7 @@ - Manage a service role configuration (role-specific) in a cluster. author: - "Webster Mudge (@wmudge)" +version_added: "4.4.0" requirements: - cm-client options: diff --git a/plugins/modules/service_role_config_group.py b/plugins/modules/service_role_config_group.py index 2a7e06da..a82ae496 100644 --- a/plugins/modules/service_role_config_group.py +++ b/plugins/modules/service_role_config_group.py @@ -22,6 +22,7 @@ - Manage a cluster service role config group. author: - "Webster Mudge (@wmudge)" +version_added: "4.4.0" options: cluster: description: diff --git a/plugins/modules/service_role_config_group_config.py b/plugins/modules/service_role_config_group_config.py index 978179cc..ab20418d 100644 --- a/plugins/modules/service_role_config_group_config.py +++ b/plugins/modules/service_role_config_group_config.py @@ -22,6 +22,7 @@ - Manage the configuration details of a role config group of a service in a CDP cluster. author: - "Webster Mudge (@wmudge)" +version_added: "4.4.0" requirements: - cm-client options: diff --git a/plugins/modules/service_role_config_group_config_info.py b/plugins/modules/service_role_config_group_config_info.py index 1914b97a..eda118c1 100644 --- a/plugins/modules/service_role_config_group_config_info.py +++ b/plugins/modules/service_role_config_group_config_info.py @@ -22,6 +22,7 @@ - Gather the configuration details of role config group of a service in a CDP cluster. author: - "Webster Mudge (@wmudge)" +version_added: "4.4.0" requirements: - cm_client options: diff --git a/plugins/modules/service_role_config_group_info.py b/plugins/modules/service_role_config_group_info.py index cb70af5a..75f80489 100644 --- a/plugins/modules/service_role_config_group_info.py +++ b/plugins/modules/service_role_config_group_info.py @@ -22,6 +22,7 @@ - Gather details about a role config group or groups of a service in a CDP cluster. author: - "Webster Mudge (@wmudge)" +version_added: "4.4.0" options: cluster: description: diff --git a/plugins/modules/service_role_config_info.py b/plugins/modules/service_role_config_info.py index 5ddb21ed..1d9ad216 100644 --- a/plugins/modules/service_role_config_info.py +++ b/plugins/modules/service_role_config_info.py @@ -22,6 +22,7 @@ - Gather configuration information about a service role of a CDP cluster. author: - "Webster Mudge (@wmudge)" +version_added: "4.4.0" requirements: - cm_client options: diff --git a/plugins/modules/service_role_info.py b/plugins/modules/service_role_info.py index 46607135..7f5d9f71 100644 --- a/plugins/modules/service_role_info.py +++ b/plugins/modules/service_role_info.py @@ -22,6 +22,7 @@ - Gather information about one or all service roles of a CDP cluster. author: - "Webster Mudge (@wmudge)" +version_added: "4.4.0" options: cluster: description: diff --git a/plugins/modules/service_type_info.py b/plugins/modules/service_type_info.py index 9fa3b37c..bd5c1dfa 100644 --- a/plugins/modules/service_type_info.py +++ b/plugins/modules/service_type_info.py @@ -21,6 +21,7 @@ - Gather the available service types of a CDP cluster. author: - "Webster Mudge (@wmudge)" +version_added: "4.4.0" requirements: - cm_client options: diff --git a/plugins/modules/user.py b/plugins/modules/user.py index dce95c4e..e68adc2f 100644 --- a/plugins/modules/user.py +++ b/plugins/modules/user.py @@ -24,6 +24,7 @@ - Enables the deletion of a user along with its associated roles if desired. author: - "Ronald Suplina (@rsuplina)" +version_added: "4.4.0" requirements: - cm_client options: diff --git a/plugins/modules/user_info.py b/plugins/modules/user_info.py index edf13bc7..e4061a23 100644 --- a/plugins/modules/user_info.py +++ b/plugins/modules/user_info.py @@ -23,6 +23,7 @@ - Includes information about authentication roles associated with each user. author: - "Ronald Suplina (@rsuplina)" +version_added: "4.4.0" requirements: - cm_client options: diff --git a/roles/assemble_template/meta/argument_specs.yml b/roles/assemble_template/meta/argument_specs.yml index 8e032be9..044ac0b4 100644 --- a/roles/assemble_template/meta/argument_specs.yml +++ b/roles/assemble_template/meta/argument_specs.yml @@ -24,6 +24,7 @@ argument_specs: - This ensures that the template processing occurs on the Ansible controller machine. author: - "Ronald Suplina " + version_added: "4.3.0" options: cluster_template_fragments_directory: description: "Path to the directory containing fragment files for the cluster template" From 661f4a28835108a8286f25469bd899a2c3e97720 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Mon, 23 Jun 2025 15:28:53 -0400 Subject: [PATCH 07/21] Update version_added to legacy filters Signed-off-by: Webster Mudge --- plugins/filter/append_database_port.yml | 2 +- plugins/filter/cluster_service_role_hosts.yml | 2 +- plugins/filter/default_database_port.yml | 2 +- plugins/filter/extract_custom_role_groups.yml | 2 +- plugins/filter/extract_custom_roles.yml | 2 +- plugins/filter/extract_parcel_urls.yml | 2 +- plugins/filter/extract_products_from_manifests.yml | 2 +- plugins/filter/extract_role_and_group.yml | 2 +- plugins/filter/filter_null_configs.yml | 2 +- plugins/filter/find_clusters.yml | 2 +- plugins/filter/flatten_dict_list.yml | 1 + plugins/filter/format_database_type.yml | 2 +- plugins/filter/get_database_collation_mysql.yml | 2 +- plugins/filter/get_database_encoding_mysql.yml | 2 +- plugins/filter/get_major_version.yml | 2 +- plugins/filter/get_product_version.yml | 2 +- plugins/filter/to_ldap_type_enum.yml | 2 +- 17 files changed, 17 insertions(+), 16 deletions(-) diff --git a/plugins/filter/append_database_port.yml b/plugins/filter/append_database_port.yml index 684b8941..322f40c2 100644 --- a/plugins/filter/append_database_port.yml +++ b/plugins/filter/append_database_port.yml @@ -3,6 +3,6 @@ DOCUMENTATION: name: append_database_port short_description: append_database_port description: append_database_port - + version_added: "3.0.3" EXAMPLES: RETURN: diff --git a/plugins/filter/cluster_service_role_hosts.yml b/plugins/filter/cluster_service_role_hosts.yml index e2cb4a50..e43d3082 100644 --- a/plugins/filter/cluster_service_role_hosts.yml +++ b/plugins/filter/cluster_service_role_hosts.yml @@ -3,6 +3,6 @@ DOCUMENTATION: name: cluster_service_role_hosts short_description: cluster_service_role_hosts description: cluster_service_role_hosts - + version_added: "3.0.3" EXAMPLES: RETURN: diff --git a/plugins/filter/default_database_port.yml b/plugins/filter/default_database_port.yml index a6e4aa00..2d082b52 100644 --- a/plugins/filter/default_database_port.yml +++ b/plugins/filter/default_database_port.yml @@ -3,6 +3,6 @@ DOCUMENTATION: name: default_database_port short_description: default_database_port description: default_database_port - + version_added: "3.0.3" EXAMPLES: RETURN: diff --git a/plugins/filter/extract_custom_role_groups.yml b/plugins/filter/extract_custom_role_groups.yml index 108fbf15..715c6f95 100644 --- a/plugins/filter/extract_custom_role_groups.yml +++ b/plugins/filter/extract_custom_role_groups.yml @@ -3,6 +3,6 @@ DOCUMENTATION: name: extract_custom_role_groups short_description: extract_custom_role_groups description: extract_custom_role_groups - + version_added: "3.0.4" EXAMPLES: RETURN: diff --git a/plugins/filter/extract_custom_roles.yml b/plugins/filter/extract_custom_roles.yml index 695345da..c2510f46 100644 --- a/plugins/filter/extract_custom_roles.yml +++ b/plugins/filter/extract_custom_roles.yml @@ -3,6 +3,6 @@ DOCUMENTATION: name: extract_custom_roles short_description: extract_custom_roles description: extract_custom_roles - + version_added: "3.0.4" EXAMPLES: RETURN: diff --git a/plugins/filter/extract_parcel_urls.yml b/plugins/filter/extract_parcel_urls.yml index 4e617638..5b5bba51 100644 --- a/plugins/filter/extract_parcel_urls.yml +++ b/plugins/filter/extract_parcel_urls.yml @@ -3,6 +3,6 @@ DOCUMENTATION: name: extract_parcel_urls short_description: extract_parcel_urls description: extract_parcel_urls - + version_added: "3.0.3" EXAMPLES: RETURN: diff --git a/plugins/filter/extract_products_from_manifests.yml b/plugins/filter/extract_products_from_manifests.yml index e834ea28..f7d162dc 100644 --- a/plugins/filter/extract_products_from_manifests.yml +++ b/plugins/filter/extract_products_from_manifests.yml @@ -3,6 +3,6 @@ DOCUMENTATION: name: extract_products_from_manifests short_description: extract_products_from_manifests description: extract_products_from_manifests - + version_added: "3.0.3" EXAMPLES: RETURN: diff --git a/plugins/filter/extract_role_and_group.yml b/plugins/filter/extract_role_and_group.yml index 181192c7..749ba8f0 100644 --- a/plugins/filter/extract_role_and_group.yml +++ b/plugins/filter/extract_role_and_group.yml @@ -3,6 +3,6 @@ DOCUMENTATION: name: extract_role_and_group short_description: extract_role_and_group description: extract_role_and_group - + version_added: "3.0.4" EXAMPLES: RETURN: diff --git a/plugins/filter/filter_null_configs.yml b/plugins/filter/filter_null_configs.yml index 8c807050..b586321d 100644 --- a/plugins/filter/filter_null_configs.yml +++ b/plugins/filter/filter_null_configs.yml @@ -3,6 +3,6 @@ DOCUMENTATION: name: fill_null_configs short_description: fill_null_configs description: fill_null_configs - + version_added: "3.0.3" EXAMPLES: RETURN: diff --git a/plugins/filter/find_clusters.yml b/plugins/filter/find_clusters.yml index 65d13152..bdb2a286 100644 --- a/plugins/filter/find_clusters.yml +++ b/plugins/filter/find_clusters.yml @@ -3,6 +3,6 @@ DOCUMENTATION: name: find_clusters short_description: find_clusters description: find_clusters - + version_added: "3.0.3" EXAMPLES: RETURN: diff --git a/plugins/filter/flatten_dict_list.yml b/plugins/filter/flatten_dict_list.yml index a9e0f8e0..872998e9 100644 --- a/plugins/filter/flatten_dict_list.yml +++ b/plugins/filter/flatten_dict_list.yml @@ -3,6 +3,7 @@ DOCUMENTATION: name: flatten_dict_list short_description: flatten_dict_list description: flatten_dict_list + version_added: "2.0.0" EXAMPLES: RETURN: diff --git a/plugins/filter/format_database_type.yml b/plugins/filter/format_database_type.yml index 54909885..1d269592 100644 --- a/plugins/filter/format_database_type.yml +++ b/plugins/filter/format_database_type.yml @@ -3,6 +3,6 @@ DOCUMENTATION: name: format_database_type short_description: format_database_type description: format_database_type - + version_added: "3.0.3" EXAMPLES: RETURN: diff --git a/plugins/filter/get_database_collation_mysql.yml b/plugins/filter/get_database_collation_mysql.yml index 6a299eb2..f27aa11b 100644 --- a/plugins/filter/get_database_collation_mysql.yml +++ b/plugins/filter/get_database_collation_mysql.yml @@ -3,6 +3,6 @@ DOCUMENTATION: name: get_database_collation_mysql short_description: get_database_collation_mysql description: get_database_collation_mysql - + version_added: "3.0.3" EXAMPLES: RETURN: diff --git a/plugins/filter/get_database_encoding_mysql.yml b/plugins/filter/get_database_encoding_mysql.yml index 65146220..aa436db8 100644 --- a/plugins/filter/get_database_encoding_mysql.yml +++ b/plugins/filter/get_database_encoding_mysql.yml @@ -3,6 +3,6 @@ DOCUMENTATION: name: get_database_encoding_mysql short_description: get_database_encoding_mysql description: get_database_encoding_mysql - + version_added: "3.0.3" EXAMPLES: RETURN: diff --git a/plugins/filter/get_major_version.yml b/plugins/filter/get_major_version.yml index 9febf3a0..a20a24be 100644 --- a/plugins/filter/get_major_version.yml +++ b/plugins/filter/get_major_version.yml @@ -3,6 +3,6 @@ DOCUMENTATION: name: get_major_version short_description: get_major_version description: get_major_version - + version_added: "3.0.3" EXAMPLES: RETURN: diff --git a/plugins/filter/get_product_version.yml b/plugins/filter/get_product_version.yml index 966765ee..70abeac0 100644 --- a/plugins/filter/get_product_version.yml +++ b/plugins/filter/get_product_version.yml @@ -3,6 +3,6 @@ DOCUMENTATION: name: get_product_version short_description: get_product_version description: get_product_version - + version_added: "3.0.3" EXAMPLES: RETURN: diff --git a/plugins/filter/to_ldap_type_enum.yml b/plugins/filter/to_ldap_type_enum.yml index 1367f40c..e12863b6 100644 --- a/plugins/filter/to_ldap_type_enum.yml +++ b/plugins/filter/to_ldap_type_enum.yml @@ -3,6 +3,6 @@ DOCUMENTATION: name: to_ldap_type_enum short_description: to_ldap_type_enum description: to_ldap_type_enum - + version_added: "3.0.3" EXAMPLES: RETURN: From 74b505d7556786d4246d80fa96419e6ac36ccedb Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Mon, 23 Jun 2025 16:47:23 -0400 Subject: [PATCH 08/21] Add antsichaut to docs env and update CHANGELOG for prior releases Signed-off-by: Webster Mudge --- CHANGELOG.rst | 307 ++++++++++++++++++++++++++++ changelogs/.plugin-cache.yaml | 374 ++++++++++++++++++++++++++++++++++ changelogs/changelog.yaml | 336 ++++++++++++++++++++++++++++++ pyproject.toml | 1 + 4 files changed, 1018 insertions(+) create mode 100644 CHANGELOG.rst create mode 100644 changelogs/.plugin-cache.yaml create mode 100644 changelogs/changelog.yaml diff --git a/CHANGELOG.rst b/CHANGELOG.rst new file mode 100644 index 00000000..41bb9ea3 --- /dev/null +++ b/CHANGELOG.rst @@ -0,0 +1,307 @@ +============================== +Cloudera.Cluster Release Notes +============================== + +.. contents:: Topics + +v4.4.0 +====== + +Minor Changes +------------- + +- Add CM Service Info Module (https://github.com/cloudera-labs/cloudera.cluster/pull/190) +- Add CM Service Module (https://github.com/cloudera-labs/cloudera.cluster/pull/194) +- Add Cloudera Manager config modules (https://github.com/cloudera-labs/cloudera.cluster/pull/211) +- Add Cluster Info Module (https://github.com/cloudera-labs/cloudera.cluster/pull/204) +- Add Host Module (https://github.com/cloudera-labs/cloudera.cluster/pull/218) +- Add Import_cluster_template module (https://github.com/cloudera-labs/cloudera.cluster/pull/197) +- Add License/License_info Module (https://github.com/cloudera-labs/cloudera.cluster/pull/199) +- Add Parcel Module (https://github.com/cloudera-labs/cloudera.cluster/pull/221) +- Add cluster module (https://github.com/cloudera-labs/cloudera.cluster/pull/224) +- Add cluster service and related resource modules (https://github.com/cloudera-labs/cloudera.cluster/pull/220) +- Add cm_trial module (https://github.com/cloudera-labs/cloudera.cluster/pull/195) +- Add or update API to support diff documentation (https://github.com/cloudera-labs/cloudera.cluster/pull/225) +- Add workflow and steps to validate for and publishing to Ansible Galaxy (https://github.com/cloudera-labs/cloudera.cluster/pull/230) +- Update cluster and cluster_info results object and API (https://github.com/cloudera-labs/cloudera.cluster/pull/228) +- Update cluster state management (https://github.com/cloudera-labs/cloudera.cluster/pull/227) +- Update parcel_info API output and move parsing function to parcel_utils (https://github.com/cloudera-labs/cloudera.cluster/pull/226) +- Update to version 4.4.0 (https://github.com/cloudera-labs/cloudera.cluster/pull/231) +- Updates required for publishing collection to Ansible Galaxy (https://github.com/cloudera-labs/cloudera.cluster/pull/229) + +Bugfixes +-------- + +- Remove deprecated ansible.builtin.command 'warn' parameter (https://github.com/cloudera-labs/cloudera.cluster/pull/196) +- Removes blockers to running check mode & diff mode (https://github.com/cloudera-labs/cloudera.cluster/pull/166) +- Update parcels.yml (https://github.com/cloudera-labs/cloudera.cluster/pull/189) +- Update postgresql-RedHat.yml (https://github.com/cloudera-labs/cloudera.cluster/pull/188) + +New Modules +----------- + +- cloudera.cluster.cluster - Manage the lifecycle and state of a cluster. +- cloudera.cluster.cluster_info - Retrieve details about one or more clusters. +- cloudera.cluster.cm_config - Manage the configuration of Cloudera Manager. +- cloudera.cluster.cm_config_info - Retrieve the Cloudera Manager configuration. +- cloudera.cluster.cm_license - Activate the license for Cloudera Manager. +- cloudera.cluster.cm_license_info - Returns details about current license. +- cloudera.cluster.cm_service - Manage Cloudera Manager service. +- cloudera.cluster.cm_service_config - Manage the Cloudera Manager service configuration. +- cloudera.cluster.cm_service_info - Retrieve information about the Cloudera Management service. +- cloudera.cluster.cm_service_role_config - Manage a service role configuration in cluster. +- cloudera.cluster.cm_service_role_config_group_config - Manage the configuration of a Cloudera Manager Service role config group. +- cloudera.cluster.cm_trial_license - Activate the trial license of Cloudera Manager. +- cloudera.cluster.host - Manage Cloudera Manager hosts. +- cloudera.cluster.host_config - Manage a host configuration in Cloudera Manager. +- cloudera.cluster.host_config_info - Retrieves the configuration details of a specific host. +- cloudera.cluster.host_info - Gather information about Cloudera Manager hosts. +- cloudera.cluster.parcel - Manage the state of parcels on a cluster. +- cloudera.cluster.parcel_info - Gather details about the parcels on the cluster. +- cloudera.cluster.service - Manage a service in cluster. +- cloudera.cluster.service_config - Manage a cluster service configuration. +- cloudera.cluster.service_config_info - Retrieve information about the configuration for a cluster service. +- cloudera.cluster.service_info - Retrieve information about the services of cluster. +- cloudera.cluster.service_role - Manage a service role in cluster. +- cloudera.cluster.service_role_config - Manage a service role configuration in cluster. +- cloudera.cluster.service_role_config_group - Manage a cluster service role config group. +- cloudera.cluster.service_role_config_group_config - Manage the configuration of a cluster service role config group. +- cloudera.cluster.service_role_config_group_config_info - Retrieve the configuration of a cluster service role config group. +- cloudera.cluster.service_role_config_group_info - Retrieve information about a cluster service role config group or groups. +- cloudera.cluster.service_role_config_info - Retrieve information about the configuration for a cluster service role. +- cloudera.cluster.service_role_info - Retrieve information about the service roles of cluster. +- cloudera.cluster.service_type_info - Retrieve the service types of a cluster. +- cloudera.cluster.user - Create, delete or update users within Cloudera Manager. +- cloudera.cluster.user_info - Retrieve user details and associated authentication roles. + +v4.3.0 +====== + +Minor Changes +------------- + +- Add assemble template role (https://github.com/cloudera-labs/cloudera.cluster/pull/167) +- Update logging and error handling for CM API modules (https://github.com/cloudera-labs/cloudera.cluster/pull/168) +- Update role API for assemble_template (https://github.com/cloudera-labs/cloudera.cluster/pull/183) +- ldap search filters - allow literal expression (https://github.com/cloudera-labs/cloudera.cluster/pull/163) + +Bugfixes +-------- + +- Add changes to run ansible.builtin.template locally (https://github.com/cloudera-labs/cloudera.cluster/pull/170) +- Allow complex expressions in external authentication LDAP search filters (https://github.com/cloudera-labs/cloudera.cluster/pull/171) +- Remove deprecated "warn" argument from shell and command module calls (https://github.com/cloudera-labs/cloudera.cluster/pull/182) + +New Roles +--------- + +- cloudera.cluster.assemble_template - Discover and render files into a cluster template. + +v4.2.0 +====== + +Minor Changes +------------- + +- Allow selection of cluster deployed from cluster.yml (https://github.com/cloudera-labs/cloudera.cluster/pull/151) +- Create module and action plugins for assemble_cluster_template (https://github.com/cloudera-labs/cloudera.cluster/pull/164) + +Bugfixes +-------- + +- Filter AWS_S3 service from host template validation check (https://github.com/cloudera-labs/cloudera.cluster/pull/161) +- Fix typo - Feature qmanagerdb (https://github.com/cloudera-labs/cloudera.cluster/pull/158) + +New Modules +----------- + +- cloudera.cluster.assemble_cluster_template - Merge Cloudera Manager cluster template fragments. + +v4.1.1 +====== + +Bugfixes +-------- + +- Remove extra quote from databases-7.1.0 config condition (https://github.com/cloudera-labs/cloudera.cluster/pull/159) + +v4.1.0 +====== + +Minor Changes +------------- + +- Adds 7.1.9 QueueManager for postgresql (https://github.com/cloudera-labs/cloudera.cluster/pull/152) +- CDH to CDP Upgrade : YARN Queues are not migrated (https://github.com/cloudera-labs/cloudera.cluster/pull/119) +- use spark_on_yarn_service dependency for hive in CDH only (https://github.com/cloudera-labs/cloudera.cluster/pull/123) + +v4.0.1 +====== + +Minor Changes +------------- + +- Update freeipa.ansible_freeipa collection version (https://github.com/cloudera-labs/cloudera.cluster/pull/134) + +Bugfixes +-------- + +- Move non-controller code in 'module_utils/cm_utils' (https://github.com/cloudera-labs/cloudera.cluster/pull/136) +- Update validate_pr.yml workflow to install latest ansible-core 2.12.* (https://github.com/cloudera-labs/cloudera.cluster/pull/138) + +v4.0.0 +====== + +Minor Changes +------------- + +- Add cm_service lookup (https://github.com/cloudera-labs/cloudera.cluster/pull/113) +- Add documentation build workflows (https://github.com/cloudera-labs/cloudera.cluster/pull/125) +- Add query processor to the list of CDP 7.x services (https://github.com/cloudera-labs/cloudera.cluster/pull/85) +- ECS 1.5.0 changes (https://github.com/cloudera-labs/cloudera.cluster/pull/110) +- Fixes for PvC running on PvC with sidecar FreeIPA (https://github.com/cloudera-labs/cloudera.cluster/pull/120) +- Update dependencies for optional functions (https://github.com/cloudera-labs/cloudera.cluster/pull/116) +- Update release/v4.0.0 (#130) (https://github.com/cloudera-labs/cloudera.cluster/pull/132) +- Update release/v4.0.0 (https://github.com/cloudera-labs/cloudera.cluster/pull/130) +- Update with collected CDP PVC changes (https://github.com/cloudera-labs/cloudera.cluster/pull/107) +- support CDP 7.1.9 / CM 7.11.3 deployment (https://github.com/cloudera-labs/cloudera.cluster/pull/127) + +Bugfixes +-------- + +- Add 'freeipa_enroll' optional parameter (https://github.com/cloudera-labs/cloudera.cluster/pull/129) +- Add Postgres default log_directory (https://github.com/cloudera-labs/cloudera.cluster/pull/114) +- Add missing cm_client library (https://github.com/cloudera-labs/cloudera.cluster/pull/121) +- Add status check for NetworkManager updates (https://github.com/cloudera-labs/cloudera.cluster/pull/115) +- Fix/#111 (https://github.com/cloudera-labs/cloudera.cluster/pull/112) + +New Plugins +----------- + +Lookup +~~~~~~ + +- cloudera.cluster.cm_service - Get the details for a service on a CDP Datahub cluster. + +New Modules +----------- + +- cloudera.cluster.cm_endpoint_info - Discover the Cloudera Manager API endpoint. +- cloudera.cluster.cm_resource - Create, update, and delete resources from the Cloudera Manager API endpoint. +- cloudera.cluster.cm_resource_info - Retrieve resources from the Cloudera Manager API endpoint. +- cloudera.cluster.cm_version_info - Gather information about Cloudera Manager. + +v3.4.2 +====== + +Bugfixes +-------- + +- Remove bindep requirements for community.general.ipa_user (https://github.com/cloudera-labs/cloudera.cluster/pull/105) +- Update ansible-builder installation file logic (https://github.com/cloudera-labs/cloudera.cluster/pull/106) + +v3.4.1 +====== + +Minor Changes +------------- + +- #81 add SAN support for certificates (https://github.com/cloudera-labs/cloudera.cluster/pull/82) +- #76 add LIVY for SPARK3 support (https://github.com/cloudera-labs/cloudera.cluster/pull/77) +- Cloudera Manager module framework (https://github.com/cloudera-labs/cloudera.cluster/pull/62) +- Fixes for RHEL8.6 support and custom_repo with Cloudera Manager (https://github.com/cloudera-labs/cloudera.cluster/pull/83) +- Moved host configs out of the cluster role (https://github.com/cloudera-labs/cloudera.cluster/pull/60) +- Pull Request workflow and ansible-builder support (https://github.com/cloudera-labs/cloudera.cluster/pull/104) +- Update collection version to 4.0.0-alpha1 (https://github.com/cloudera-labs/cloudera.cluster/pull/72) +- Updates for private IP installations (https://github.com/cloudera-labs/cloudera.cluster/pull/93) +- WIP PvC Prereqs and Control Plane merge (https://github.com/cloudera-labs/cloudera.cluster/pull/61) + +Bugfixes +-------- + +- #65 Fix SPARK3_ON_YARN inter-service dependency (https://github.com/cloudera-labs/cloudera.cluster/pull/66) +- #86 fix atlas_dir permissions (https://github.com/cloudera-labs/cloudera.cluster/pull/87) +- Avoid repeating CM password check (https://github.com/cloudera-labs/cloudera.cluster/pull/91) +- Remove body_format parameter for parcel manifest URI (https://github.com/cloudera-labs/cloudera.cluster/pull/98) +- Remove body_format parameter for parcel manifest URI (https://github.com/cloudera-labs/cloudera.cluster/pull/99) +- condition based on runtime version (https://github.com/cloudera-labs/cloudera.cluster/pull/75) +- database_port variable typo (https://github.com/cloudera-labs/cloudera.cluster/pull/68) + +v3.4.0 +====== + +Minor Changes +------------- + +- 2021 07 freeipa dep fix (https://github.com/cloudera-labs/cloudera.cluster/pull/40) +- Adding support for SQL Stream Builder deployment (https://github.com/cloudera-labs/cloudera.cluster/pull/48) +- Fix CA cipher and python2/3 install for newer OS targets like el8 (https://github.com/cloudera-labs/cloudera.cluster/pull/51) +- Pvc experiences (https://github.com/cloudera-labs/cloudera.cluster/pull/44) + +v3.3.0 +====== + +Minor Changes +------------- + +- Add Ozone data directories (https://github.com/cloudera-labs/cloudera.cluster/pull/54) +- Fixed MariaDB template evaluation used for TLS (https://github.com/cloudera-labs/cloudera.cluster/pull/45) +- Fixed handling of custom roleConfig Groups (https://github.com/cloudera-labs/cloudera.cluster/pull/46) +- Helpful errors (https://github.com/cloudera-labs/cloudera.cluster/pull/42) +- Improve CSD Download (https://github.com/cloudera-labs/cloudera.cluster/pull/53) +- Pin collection versions (https://github.com/cloudera-labs/cloudera.cluster/pull/52) +- Verify if the hostname reported by the agent heartbeat is correct. (https://github.com/cloudera-labs/cloudera.cluster/pull/50) +- removed invalid ranger configs (https://github.com/cloudera-labs/cloudera.cluster/pull/43) + +v3.2.0 +====== + +Minor Changes +------------- + +- Changes required for Core Settings clusters (https://github.com/cloudera-labs/cloudera.cluster/pull/41) + +v3.1.0 +====== + +Minor Changes +------------- + +- Add collection dependencies (https://github.com/cloudera-labs/cloudera.cluster/pull/6) +- Fix ansible-galaxy license statement (https://github.com/cloudera-labs/cloudera.cluster/pull/2) +- Home directory mode fix (https://github.com/cloudera-labs/cloudera.cluster/pull/8) +- Update include_role statements to use the full role name within the Collection as a best practice (https://github.com/cloudera-labs/cloudera.cluster/pull/11) + +v3.0.3 +====== + +New Plugins +----------- + +Filter +~~~~~~ + +- cloudera.cluster.append_database_port - append_database_port. +- cloudera.cluster.cluster_service_role_hosts - cluster_service_role_hosts. +- cloudera.cluster.default_database_port - default_database_port. +- cloudera.cluster.extract_parcel_urls - extract_parcel_urls. +- cloudera.cluster.extract_products_from_manifests - extract_products_from_manifests. +- cloudera.cluster.filter_null_configs - fill_null_configs. +- cloudera.cluster.find_clusters - find_clusters. +- cloudera.cluster.format_database_type - format_database_type. +- cloudera.cluster.get_database_collation_mysql - get_database_collation_mysql. +- cloudera.cluster.get_database_encoding_mysql - get_database_encoding_mysql. +- cloudera.cluster.get_major_version - get_major_version. +- cloudera.cluster.get_product_version - get_product_version. +- cloudera.cluster.to_ldap_type_enum - to_ldap_type_enum. + +v2.0.0 +====== + +New Plugins +----------- + +Filter +~~~~~~ + +- cloudera.cluster.flatten_dict_list - flatten_dict_list. diff --git a/changelogs/.plugin-cache.yaml b/changelogs/.plugin-cache.yaml new file mode 100644 index 00000000..82c00fb0 --- /dev/null +++ b/changelogs/.plugin-cache.yaml @@ -0,0 +1,374 @@ +objects: + role: + assemble_template: + description: Discover and render files into a cluster template + name: assemble_template + version_added: 4.3.0 +plugins: + become: {} + cache: {} + callback: {} + cliconf: {} + connection: {} + filter: + append_database_port: + description: append_database_port + name: append_database_port + version_added: 3.0.3 + cluster_service_role_hosts: + description: cluster_service_role_hosts + name: cluster_service_role_hosts + version_added: 3.0.3 + default_database_port: + description: default_database_port + name: default_database_port + version_added: 3.0.3 + extract_custom_role_groups: + description: extract_custom_role_groups + name: extract_custom_role_groups + version_added: 3.0.4 + extract_custom_roles: + description: extract_custom_roles + name: extract_custom_roles + version_added: 3.0.4 + extract_parcel_urls: + description: extract_parcel_urls + name: extract_parcel_urls + version_added: 3.0.3 + extract_products_from_manifests: + description: extract_products_from_manifests + name: extract_products_from_manifests + version_added: 3.0.3 + extract_role_and_group: + description: extract_role_and_group + name: extract_role_and_group + version_added: 3.0.4 + filter_null_configs: + description: fill_null_configs + name: filter_null_configs + version_added: 3.0.3 + find_clusters: + description: find_clusters + name: find_clusters + version_added: 3.0.3 + flatten_dict_list: + description: flatten_dict_list + name: flatten_dict_list + version_added: 2.0.0 + format_database_type: + description: format_database_type + name: format_database_type + version_added: 3.0.3 + get_database_collation_mysql: + description: get_database_collation_mysql + name: get_database_collation_mysql + version_added: 3.0.3 + get_database_encoding_mysql: + description: get_database_encoding_mysql + name: get_database_encoding_mysql + version_added: 3.0.3 + get_major_version: + description: get_major_version + name: get_major_version + version_added: 3.0.3 + get_product_version: + description: get_product_version + name: get_product_version + version_added: 3.0.3 + to_ldap_type_enum: + description: to_ldap_type_enum + name: to_ldap_type_enum + version_added: 3.0.3 + httpapi: {} + inventory: {} + lookup: + cm_license: + description: Get the details of a Cloudera license + name: cm_license + version_added: 5.0.0 + cm_service: + description: Get the details for a service on a CDP Datahub cluster + name: cm_service + version_added: 4.0.0 + module: + assemble_cluster_template: + description: Merge Cloudera Manager cluster template fragments + name: assemble_cluster_template + namespace: '' + version_added: 4.2.0 + cluster: + description: Manage the lifecycle and state of a cluster + name: cluster + namespace: '' + version_added: 4.4.0 + cluster_info: + description: Retrieve details about one or more clusters + name: cluster_info + namespace: '' + version_added: 4.4.0 + cm_autotls: + description: Manage and configure Auto-TLS and Cloudera Manager CA + name: cm_autotls + namespace: '' + version_added: 5.0.0 + cm_autotls_info: + description: Retrieve Cloudera Manager configurations for Auto-TLS + name: cm_autotls_info + namespace: '' + version_added: 5.0.0 + cm_config: + description: Manage the configuration of Cloudera Manager + name: cm_config + namespace: '' + version_added: 4.4.0 + cm_config_info: + description: Retrieve the Cloudera Manager configuration + name: cm_config_info + namespace: '' + version_added: 4.4.0 + cm_endpoint_info: + description: Discover the Cloudera Manager API endpoint + name: cm_endpoint_info + namespace: '' + version_added: 4.0.0 + cm_kerberos: + description: Manage and configure Kerberos Authentication for CDP + name: cm_kerberos + namespace: '' + version_added: 5.0.0 + cm_kerberos_info: + description: Retrieve Cloudera Manager configurations for Kerberos + name: cm_kerberos_info + namespace: '' + version_added: 5.0.0 + cm_license: + description: Activate the license for Cloudera Manager + name: cm_license + namespace: '' + version_added: 4.4.0 + cm_license_info: + description: Returns details about current license + name: cm_license_info + namespace: '' + version_added: 4.4.0 + cm_resource: + description: Create, update, and delete resources from the Cloudera Manager + API endpoint + name: cm_resource + namespace: '' + version_added: 4.0.0 + cm_resource_info: + description: Retrieve resources from the Cloudera Manager API endpoint + name: cm_resource_info + namespace: '' + version_added: 4.0.0 + cm_service: + description: Manage Cloudera Manager service + name: cm_service + namespace: '' + version_added: 4.4.0 + cm_service_config: + description: Manage the Cloudera Manager service configuration + name: cm_service_config + namespace: '' + version_added: 4.4.0 + cm_service_info: + description: Retrieve information about the Cloudera Management service + name: cm_service_info + namespace: '' + version_added: 4.4.0 + cm_service_role: + description: Manage a Cloudera Manager Service role + name: cm_service_role + namespace: '' + version_added: 5.0.0 + cm_service_role_config: + description: Manage a service role configuration in cluster + name: cm_service_role_config + namespace: '' + version_added: 4.4.0 + cm_service_role_config_group: + description: Manage a Cloudera Manager Service role config group. + name: cm_service_role_config_group + namespace: '' + version_added: 5.0.0 + cm_service_role_config_group_config: + description: Manage the configuration of a Cloudera Manager Service role config + group. + name: cm_service_role_config_group_config + namespace: '' + version_added: 4.4.0 + cm_service_role_config_group_info: + description: Retrieve information about Cloudera Management service role config + groups. + name: cm_service_role_config_group_info + namespace: '' + version_added: 5.0.0 + cm_service_role_info: + description: Retrieve information about Cloudera Management service roles. + name: cm_service_role_info + namespace: '' + version_added: 5.0.0 + cm_trial_license: + description: Activate the trial license of Cloudera Manager + name: cm_trial_license + namespace: '' + version_added: 4.4.0 + cm_version_info: + description: Gather information about Cloudera Manager + name: cm_version_info + namespace: '' + version_added: 4.0.0 + data_context: + description: Create, update, or delete a data context + name: data_context + namespace: '' + version_added: 5.0.0 + data_context_info: + description: Retrieve details of data contexts + name: data_context_info + namespace: '' + version_added: 5.0.0 + external_account: + description: Create, update, or delete an external module account + name: external_account + namespace: '' + version_added: 5.0.0 + external_account_info: + description: Retrieve external account details details. + name: external_account_info + namespace: '' + version_added: 5.0.0 + external_user_mappings: + description: Create, update, or delete external user mappings + name: external_user_mappings + namespace: '' + version_added: 5.0.0 + external_user_mappings_info: + description: Retrieve details of external user mappings + name: external_user_mappings_info + namespace: '' + version_added: 5.0.0 + host: + description: Manage Cloudera Manager hosts + name: host + namespace: '' + version_added: 4.4.0 + host_config: + description: Manage a host configuration in Cloudera Manager + name: host_config + namespace: '' + version_added: 4.4.0 + host_config_info: + description: Retrieves the configuration details of a specific host. + name: host_config_info + namespace: '' + version_added: 4.4.0 + host_info: + description: Gather information about Cloudera Manager hosts + name: host_info + namespace: '' + version_added: 4.4.0 + host_template: + description: Manage a cluster host template + name: host_template + namespace: '' + version_added: 5.0.0 + host_template_info: + description: Retrieve details regarding a cluster's host templates. + name: host_template_info + namespace: '' + version_added: 5.0.0 + parcel: + description: Manage the state of parcels on a cluster + name: parcel + namespace: '' + version_added: 4.4.0 + parcel_info: + description: Gather details about the parcels on the cluster + name: parcel_info + namespace: '' + version_added: 4.4.0 + service: + description: Manage a service in cluster + name: service + namespace: '' + version_added: 4.4.0 + service_config: + description: Manage a cluster service configuration + name: service_config + namespace: '' + version_added: 4.4.0 + service_config_info: + description: Retrieve information about the configuration for a cluster service + name: service_config_info + namespace: '' + version_added: 4.4.0 + service_info: + description: Retrieve information about the services of cluster + name: service_info + namespace: '' + version_added: 4.4.0 + service_role: + description: Manage a service role in cluster + name: service_role + namespace: '' + version_added: 4.4.0 + service_role_config: + description: Manage a service role configuration in cluster + name: service_role_config + namespace: '' + version_added: 4.4.0 + service_role_config_group: + description: Manage a cluster service role config group. + name: service_role_config_group + namespace: '' + version_added: 4.4.0 + service_role_config_group_config: + description: Manage the configuration of a cluster service role config group. + name: service_role_config_group_config + namespace: '' + version_added: 4.4.0 + service_role_config_group_config_info: + description: Retrieve the configuration of a cluster service role config group. + name: service_role_config_group_config_info + namespace: '' + version_added: 4.4.0 + service_role_config_group_info: + description: Retrieve information about a cluster service role config group + or groups + name: service_role_config_group_info + namespace: '' + version_added: 4.4.0 + service_role_config_info: + description: Retrieve information about the configuration for a cluster service + role + name: service_role_config_info + namespace: '' + version_added: 4.4.0 + service_role_info: + description: Retrieve information about the service roles of cluster + name: service_role_info + namespace: '' + version_added: 4.4.0 + service_type_info: + description: Retrieve the service types of a cluster + name: service_type_info + namespace: '' + version_added: 4.4.0 + user: + description: Create, delete or update users within Cloudera Manager + name: user + namespace: '' + version_added: 4.4.0 + user_info: + description: Retrieve user details and associated authentication roles. + name: user_info + namespace: '' + version_added: 4.4.0 + netconf: {} + shell: {} + strategy: {} + test: {} + vars: {} +version: 4.4.0 diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml new file mode 100644 index 00000000..be21805a --- /dev/null +++ b/changelogs/changelog.yaml @@ -0,0 +1,336 @@ +--- +ancestor: +releases: + 4.4.0: + changes: + minor_changes: + - Update to version 4.4.0 (https://github.com/cloudera-labs/cloudera.cluster/pull/231) + - Add workflow and steps to validate for and publishing to Ansible Galaxy + (https://github.com/cloudera-labs/cloudera.cluster/pull/230) + - Updates required for publishing collection to Ansible Galaxy (https://github.com/cloudera-labs/cloudera.cluster/pull/229) + - Update cluster and cluster_info results object and API (https://github.com/cloudera-labs/cloudera.cluster/pull/228) + - Update cluster state management (https://github.com/cloudera-labs/cloudera.cluster/pull/227) + - Update parcel_info API output and move parsing function to parcel_utils + (https://github.com/cloudera-labs/cloudera.cluster/pull/226) + - Add or update API to support diff documentation (https://github.com/cloudera-labs/cloudera.cluster/pull/225) + - Add cluster module (https://github.com/cloudera-labs/cloudera.cluster/pull/224) + - Add Parcel Module (https://github.com/cloudera-labs/cloudera.cluster/pull/221) + - Add cluster service and related resource modules (https://github.com/cloudera-labs/cloudera.cluster/pull/220) + - Add Host Module (https://github.com/cloudera-labs/cloudera.cluster/pull/218) + - Add Cloudera Manager config modules (https://github.com/cloudera-labs/cloudera.cluster/pull/211) + - Add Cluster Info Module (https://github.com/cloudera-labs/cloudera.cluster/pull/204) + - Add License/License_info Module (https://github.com/cloudera-labs/cloudera.cluster/pull/199) + - Add Import_cluster_template module (https://github.com/cloudera-labs/cloudera.cluster/pull/197) + - Add cm_trial module (https://github.com/cloudera-labs/cloudera.cluster/pull/195) + - Add CM Service Module (https://github.com/cloudera-labs/cloudera.cluster/pull/194) + - Add CM Service Info Module (https://github.com/cloudera-labs/cloudera.cluster/pull/190) + bugfixes: + - Remove deprecated ansible.builtin.command 'warn' parameter (https://github.com/cloudera-labs/cloudera.cluster/pull/196) + - Update parcels.yml (https://github.com/cloudera-labs/cloudera.cluster/pull/189) + - Update postgresql-RedHat.yml (https://github.com/cloudera-labs/cloudera.cluster/pull/188) + - Removes blockers to running check mode & diff mode (https://github.com/cloudera-labs/cloudera.cluster/pull/166) + modules: + - description: Manage the lifecycle and state of a cluster. + name: cluster + namespace: '' + - description: Retrieve details about one or more clusters. + name: cluster_info + namespace: '' + - description: Manage the configuration of Cloudera Manager. + name: cm_config + namespace: '' + - description: Retrieve the Cloudera Manager configuration. + name: cm_config_info + namespace: '' + - description: Activate the license for Cloudera Manager. + name: cm_license + namespace: '' + - description: Returns details about current license. + name: cm_license_info + namespace: '' + - description: Manage Cloudera Manager service. + name: cm_service + namespace: '' + - description: Manage the Cloudera Manager service configuration. + name: cm_service_config + namespace: '' + - description: Retrieve information about the Cloudera Management service. + name: cm_service_info + namespace: '' + - description: Manage a service role configuration in cluster. + name: cm_service_role_config + namespace: '' + - description: Manage the configuration of a Cloudera Manager Service role config + group. + name: cm_service_role_config_group_config + namespace: '' + - description: Activate the trial license of Cloudera Manager. + name: cm_trial_license + namespace: '' + - description: Manage Cloudera Manager hosts. + name: host + namespace: '' + - description: Manage a host configuration in Cloudera Manager. + name: host_config + namespace: '' + - description: Retrieves the configuration details of a specific host. + name: host_config_info + namespace: '' + - description: Gather information about Cloudera Manager hosts. + name: host_info + namespace: '' + - description: Manage the state of parcels on a cluster. + name: parcel + namespace: '' + - description: Gather details about the parcels on the cluster. + name: parcel_info + namespace: '' + - description: Manage a service in cluster. + name: service + namespace: '' + - description: Manage a cluster service configuration. + name: service_config + namespace: '' + - description: Retrieve information about the configuration for a cluster service. + name: service_config_info + namespace: '' + - description: Retrieve information about the services of cluster. + name: service_info + namespace: '' + - description: Manage a service role in cluster. + name: service_role + namespace: '' + - description: Manage a service role configuration in cluster. + name: service_role_config + namespace: '' + - description: Manage a cluster service role config group. + name: service_role_config_group + namespace: '' + - description: Manage the configuration of a cluster service role config group. + name: service_role_config_group_config + namespace: '' + - description: Retrieve the configuration of a cluster service role config group. + name: service_role_config_group_config_info + namespace: '' + - description: Retrieve information about a cluster service role config group + or groups. + name: service_role_config_group_info + namespace: '' + - description: Retrieve information about the configuration for a cluster service + role. + name: service_role_config_info + namespace: '' + - description: Retrieve information about the service roles of cluster. + name: service_role_info + namespace: '' + - description: Retrieve the service types of a cluster. + name: service_type_info + namespace: '' + - description: Create, delete or update users within Cloudera Manager. + name: user + namespace: '' + - description: Retrieve user details and associated authentication roles. + name: user_info + namespace: '' + release_date: '2024-05-21' + 4.3.0: + changes: + bugfixes: + - Remove deprecated "warn" argument from shell and command module calls (https://github.com/cloudera-labs/cloudera.cluster/pull/182) + - Allow complex expressions in external authentication LDAP search filters + (https://github.com/cloudera-labs/cloudera.cluster/pull/171) + - Add changes to run ansible.builtin.template locally (https://github.com/cloudera-labs/cloudera.cluster/pull/170) + minor_changes: + - Update role API for assemble_template (https://github.com/cloudera-labs/cloudera.cluster/pull/183) + - Update logging and error handling for CM API modules (https://github.com/cloudera-labs/cloudera.cluster/pull/168) + - Add assemble template role (https://github.com/cloudera-labs/cloudera.cluster/pull/167) + - ldap search filters - allow literal expression (https://github.com/cloudera-labs/cloudera.cluster/pull/163) + objects: + role: + - description: Discover and render files into a cluster template. + name: assemble_template + namespace: + release_date: '2023-12-21' + 4.2.0: + changes: + bugfixes: + - Filter AWS_S3 service from host template validation check (https://github.com/cloudera-labs/cloudera.cluster/pull/161) + - Fix typo - Feature qmanagerdb (https://github.com/cloudera-labs/cloudera.cluster/pull/158) + minor_changes: + - Create module and action plugins for assemble_cluster_template (https://github.com/cloudera-labs/cloudera.cluster/pull/164) + - Allow selection of cluster deployed from cluster.yml (https://github.com/cloudera-labs/cloudera.cluster/pull/151) + modules: + - description: Merge Cloudera Manager cluster template fragments. + name: assemble_cluster_template + namespace: '' + release_date: '2023-11-20' + 4.1.1: + changes: + bugfixes: + - Remove extra quote from databases-7.1.0 config condition (https://github.com/cloudera-labs/cloudera.cluster/pull/159) + release_date: '2023-11-07' + 4.1.0: + changes: + minor_changes: + - Adds 7.1.9 QueueManager for postgresql (https://github.com/cloudera-labs/cloudera.cluster/pull/152) + - use spark_on_yarn_service dependency for hive in CDH only (https://github.com/cloudera-labs/cloudera.cluster/pull/123) + - 'CDH to CDP Upgrade : YARN Queues are not migrated (https://github.com/cloudera-labs/cloudera.cluster/pull/119)' + release_date: '2023-11-02' + 4.0.1: + changes: + bugfixes: + - Update validate_pr.yml workflow to install latest ansible-core 2.12.* (https://github.com/cloudera-labs/cloudera.cluster/pull/138) + - Move non-controller code in 'module_utils/cm_utils' (https://github.com/cloudera-labs/cloudera.cluster/pull/136) + minor_changes: + - Update freeipa.ansible_freeipa collection version (https://github.com/cloudera-labs/cloudera.cluster/pull/134) + release_date: '2023-10-05' + 4.0.0: + changes: + bugfixes: + - Add 'freeipa_enroll' optional parameter (https://github.com/cloudera-labs/cloudera.cluster/pull/129) + - Add missing cm_client library (https://github.com/cloudera-labs/cloudera.cluster/pull/121) + - Add status check for NetworkManager updates (https://github.com/cloudera-labs/cloudera.cluster/pull/115) + - Add Postgres default log_directory (https://github.com/cloudera-labs/cloudera.cluster/pull/114) + - Fix/#111 (https://github.com/cloudera-labs/cloudera.cluster/pull/112) + minor_changes: + - Update release/v4.0.0 (#130) (https://github.com/cloudera-labs/cloudera.cluster/pull/132) + - Update release/v4.0.0 (https://github.com/cloudera-labs/cloudera.cluster/pull/130) + - support CDP 7.1.9 / CM 7.11.3 deployment (https://github.com/cloudera-labs/cloudera.cluster/pull/127) + - Add documentation build workflows (https://github.com/cloudera-labs/cloudera.cluster/pull/125) + - Fixes for PvC running on PvC with sidecar FreeIPA (https://github.com/cloudera-labs/cloudera.cluster/pull/120) + - Update dependencies for optional functions (https://github.com/cloudera-labs/cloudera.cluster/pull/116) + - Add cm_service lookup (https://github.com/cloudera-labs/cloudera.cluster/pull/113) + - ECS 1.5.0 changes (https://github.com/cloudera-labs/cloudera.cluster/pull/110) + - Update with collected CDP PVC changes (https://github.com/cloudera-labs/cloudera.cluster/pull/107) + - Add query processor to the list of CDP 7.x services (https://github.com/cloudera-labs/cloudera.cluster/pull/85) + modules: + - description: Discover the Cloudera Manager API endpoint. + name: cm_endpoint_info + namespace: '' + - description: Create, update, and delete resources from the Cloudera Manager + API endpoint. + name: cm_resource + namespace: '' + - description: Retrieve resources from the Cloudera Manager API endpoint. + name: cm_resource_info + namespace: '' + - description: Gather information about Cloudera Manager. + name: cm_version_info + namespace: '' + plugins: + lookup: + - description: Get the details for a service on a CDP Datahub cluster. + name: cm_service + namespace: + release_date: '2023-09-28' + 3.4.2: + changes: + bugfixes: + - Update ansible-builder installation file logic (https://github.com/cloudera-labs/cloudera.cluster/pull/106) + - Remove bindep requirements for community.general.ipa_user (https://github.com/cloudera-labs/cloudera.cluster/pull/105) + release_date: '2023-02-03' + 3.4.1: + changes: + bugfixes: + - Remove body_format parameter for parcel manifest URI (https://github.com/cloudera-labs/cloudera.cluster/pull/99) + - Remove body_format parameter for parcel manifest URI (https://github.com/cloudera-labs/cloudera.cluster/pull/98) + - Avoid repeating CM password check (https://github.com/cloudera-labs/cloudera.cluster/pull/91) + - '#86 fix atlas_dir permissions (https://github.com/cloudera-labs/cloudera.cluster/pull/87)' + - condition based on runtime version (https://github.com/cloudera-labs/cloudera.cluster/pull/75) + - database_port variable typo (https://github.com/cloudera-labs/cloudera.cluster/pull/68) + - '#65 Fix SPARK3_ON_YARN inter-service dependency (https://github.com/cloudera-labs/cloudera.cluster/pull/66)' + minor_changes: + - Pull Request workflow and ansible-builder support (https://github.com/cloudera-labs/cloudera.cluster/pull/104) + - Updates for private IP installations (https://github.com/cloudera-labs/cloudera.cluster/pull/93) + - Fixes for RHEL8.6 support and custom_repo with Cloudera Manager (https://github.com/cloudera-labs/cloudera.cluster/pull/83) + - ' #81 add SAN support for certificates (https://github.com/cloudera-labs/cloudera.cluster/pull/82)' + - '#76 add LIVY for SPARK3 support (https://github.com/cloudera-labs/cloudera.cluster/pull/77)' + - Update collection version to 4.0.0-alpha1 (https://github.com/cloudera-labs/cloudera.cluster/pull/72) + - Cloudera Manager module framework (https://github.com/cloudera-labs/cloudera.cluster/pull/62) + - WIP PvC Prereqs and Control Plane merge (https://github.com/cloudera-labs/cloudera.cluster/pull/61) + - Moved host configs out of the cluster role (https://github.com/cloudera-labs/cloudera.cluster/pull/60) + release_date: '2023-02-01' + 3.4.0: + changes: + minor_changes: + - Fix CA cipher and python2/3 install for newer OS targets like el8 (https://github.com/cloudera-labs/cloudera.cluster/pull/51) + - Adding support for SQL Stream Builder deployment (https://github.com/cloudera-labs/cloudera.cluster/pull/48) + - Pvc experiences (https://github.com/cloudera-labs/cloudera.cluster/pull/44) + - 2021 07 freeipa dep fix (https://github.com/cloudera-labs/cloudera.cluster/pull/40) + release_date: '2022-08-02' + 3.3.0: + changes: + minor_changes: + - Add Ozone data directories (https://github.com/cloudera-labs/cloudera.cluster/pull/54) + - Improve CSD Download (https://github.com/cloudera-labs/cloudera.cluster/pull/53) + - Pin collection versions (https://github.com/cloudera-labs/cloudera.cluster/pull/52) + - Verify if the hostname reported by the agent heartbeat is correct. (https://github.com/cloudera-labs/cloudera.cluster/pull/50) + - Fixed handling of custom roleConfig Groups (https://github.com/cloudera-labs/cloudera.cluster/pull/46) + - Fixed MariaDB template evaluation used for TLS (https://github.com/cloudera-labs/cloudera.cluster/pull/45) + - removed invalid ranger configs (https://github.com/cloudera-labs/cloudera.cluster/pull/43) + - Helpful errors (https://github.com/cloudera-labs/cloudera.cluster/pull/42) + release_date: '2022-04-07' + 3.2.0: + changes: + minor_changes: + - Changes required for Core Settings clusters (https://github.com/cloudera-labs/cloudera.cluster/pull/41) + release_date: '2021-12-09' + 3.1.0: + changes: + minor_changes: + - Update include_role statements to use the full role name within the Collection + as a best practice (https://github.com/cloudera-labs/cloudera.cluster/pull/11) + - Home directory mode fix (https://github.com/cloudera-labs/cloudera.cluster/pull/8) + - Add collection dependencies (https://github.com/cloudera-labs/cloudera.cluster/pull/6) + - Fix ansible-galaxy license statement (https://github.com/cloudera-labs/cloudera.cluster/pull/2) + release_date: '2021-11-30' + 3.0.3: + plugins: + filter: + - description: append_database_port. + name: append_database_port + namespace: + - description: cluster_service_role_hosts. + name: cluster_service_role_hosts + namespace: + - description: default_database_port. + name: default_database_port + namespace: + - description: extract_parcel_urls. + name: extract_parcel_urls + namespace: + - description: extract_products_from_manifests. + name: extract_products_from_manifests + namespace: + - description: fill_null_configs. + name: filter_null_configs + namespace: + - description: find_clusters. + name: find_clusters + namespace: + - description: format_database_type. + name: format_database_type + namespace: + - description: get_database_collation_mysql. + name: get_database_collation_mysql + namespace: + - description: get_database_encoding_mysql. + name: get_database_encoding_mysql + namespace: + - description: get_major_version. + name: get_major_version + namespace: + - description: get_product_version. + name: get_product_version + namespace: + - description: to_ldap_type_enum. + name: to_ldap_type_enum + namespace: + release_date: '2021-11-10' + 2.0.0: + plugins: + filter: + - description: flatten_dict_list. + name: flatten_dict_list + namespace: + release_date: '2021-04-21' diff --git a/pyproject.toml b/pyproject.toml index 462b018e..799a9c61 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,6 +51,7 @@ extra-dependencies = [ "sphinx", "sphinx-ansible-theme >= 0.9.0", "antsibull-docs @ git+https://github.com/cloudera-labs/antsibull-docs@cldr-docsite#egg=antsibull-docs", + "antsichaut", ] [tool.hatch.envs.docs.scripts] From 6bdbae50c157b3c4bb38c4b0c07a7f0c931da245 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Mon, 23 Jun 2025 16:50:55 -0400 Subject: [PATCH 09/21] Update version to 5.0.0-rc.1 Signed-off-by: Webster Mudge --- galaxy.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/galaxy.yml b/galaxy.yml index 1cc27d8c..e6571607 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,4 +1,4 @@ -# Copyright 2023 Cloudera, Inc. +# Copyright 2025 Cloudera, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ namespace: cloudera name: cluster -version: 4.5.0-rc1 +version: 5.0.0-rc.1 readme: README.md authors: - Webster Mudge @wmudge From 9c24a74306b18a29d2c71537ad1934cd954aa570 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Mon, 23 Jun 2025 18:09:43 -0400 Subject: [PATCH 10/21] Update hatch scripts for changelog management and semantic versioning Signed-off-by: Webster Mudge --- pyproject.toml | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 799a9c61..d5a30c52 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,6 +16,8 @@ bypass-selection = true [tool.hatch.version] path = "galaxy.yml" pattern = "version:\\s+(?P[\\d\\.]+)" +scheme = "semver" +validate-bump = true [tool.hatch.envs.default] python = "3.12" @@ -55,9 +57,15 @@ extra-dependencies = [ ] [tool.hatch.envs.docs.scripts] -doclint = "antsibull-docs lint-collection-docs --plugin-docs --validate-collection-refs=all --skip-rstcheck ." +lint = "antsibull-docs lint-collection-docs --plugin-docs --validate-collection-refs=all --skip-rstcheck ." build = "docsbuild/build.sh" -log = "" +changelog = [ + # Read the version in galaxy.yml via hatch itself (normalizes release candidates, etc.) + # Use 'hatch version' to manage the version, i.e. 'hatch version major,rc' + "antsibull-changelog release --version $(hatch version)", + "antsichaut --since_version=latest", + "antsibull-changelog generate", +] [tool.pytest.ini_options] testpaths = ["tests"] @@ -77,5 +85,8 @@ markers = [ ] [build-system] -requires = ["hatchling"] +requires = [ + "hatchling", + "hatch-semver", +] build-backend = "hatchling.build" From 9ac4bd62b2f6c4756cec6a9a002f6d0b8bd2a799 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Mon, 23 Jun 2025 18:10:07 -0400 Subject: [PATCH 11/21] Configure antsibull-docs to build the changelog Signed-off-by: Webster Mudge --- docs/docsite/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/docsite/config.yml b/docs/docsite/config.yml index 0d1260cf..d700a6b7 100644 --- a/docs/docsite/config.yml +++ b/docs/docsite/config.yml @@ -12,4 +12,4 @@ changelog: # Whether to write the changelog (taken from changelogs/changelog.yaml, see the # antsibull-changelog documentation for more information) and link to it from the # collection's index page. - write_changelog: false + write_changelog: true From 305de54f611ec618d396ce3d0f453f1b7c7e062a Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Mon, 23 Jun 2025 18:10:42 -0400 Subject: [PATCH 12/21] Update README with API docs and version management instructions Signed-off-by: Webster Mudge --- README.md | 40 +++++++++++++++++++++++++++++----------- 1 file changed, 29 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 6cf98608..0d2e4d0e 100644 --- a/README.md +++ b/README.md @@ -159,24 +159,42 @@ ansible-galaxy collection build ## Building the API Documentation -To create a local copy of the API documentation, first make sure the collection is in your `ANSIBLE_COLLECTIONS_PATHS`. Then run the following: +To create a local copy of the API documentation, first make sure the collection is in your `ANSIBLE_COLLECTIONS_PATH`. ```bash -# change into the /docsbuild directory -cd docsbuild +hatch run docs:build +``` -# install the build requirements (antsibull-docs); you may want to set up a -# dedicated virtual environment -pip install ansible-core https://github.com/cloudera-labs/antsibull-docs/archive/cldr-docsite.tar.gz +Your local documentation will be found at `docsbuild/build/html`. -# Install the collection's build dependencies -pip install requirements.txt +You can also lint the documentation with the following command: -# Then run the build script -./build.sh +```bash +hatch run docs:lint ``` -Your local documentation will be found at `docsbuild/build/html`. +## Preparing a New Version + +To prepare a version release, first set the following variables for `antsichaut`: + +```bash +export GITHUB_REPOSITORY=cloudera-labs/cloudera.cluster +export GITHUB_TOKEN=some_gh_token_value +``` + +Update the collection version using [`hatch version`](https://hatch.pypa.io/latest/version/). For example, to increment to the next minor release: + +```bash +hatch version minor +``` + +Then update the changelog to query the pull requests since the last release. + +```bash +hatch run docs:changelog +``` + +You can then examine (and update if needed) the resulting `changelog.yaml` and `CHANGELOG.rst` files before committing to the release branch. ## Tested Platforms From 5d14bf7fc8f3c193dbc006bce2b36ea21cf1f0a7 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Mon, 23 Jun 2025 18:13:30 -0400 Subject: [PATCH 13/21] Update indentation for ansible-lint validation Signed-off-by: Webster Mudge --- changelogs/config.yaml | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/changelogs/config.yaml b/changelogs/config.yaml index 746418be..db9bef5d 100644 --- a/changelogs/config.yaml +++ b/changelogs/config.yaml @@ -9,28 +9,28 @@ mention_ancestor: true new_plugins_after_name: removed_features notesdir: fragments output: -- file: CHANGELOG.rst - format: rst + - file: CHANGELOG.rst + format: rst prelude_section_name: release_summary prelude_section_title: Release Summary sanitize_changelog: true sections: -- - major_changes - - Major Changes -- - minor_changes - - Minor Changes -- - breaking_changes - - Breaking Changes / Porting Guide -- - deprecated_features - - Deprecated Features -- - removed_features - - Removed Features (previously deprecated) -- - security_fixes - - Security Fixes -- - bugfixes - - Bugfixes -- - known_issues - - Known Issues + - - major_changes + - Major Changes + - - minor_changes + - Minor Changes + - - breaking_changes + - Breaking Changes / Porting Guide + - - deprecated_features + - Deprecated Features + - - removed_features + - Removed Features (previously deprecated) + - - security_fixes + - Security Fixes + - - bugfixes + - Bugfixes + - - known_issues + - Known Issues title: Cloudera.Cluster trivial_section_name: trivial use_fqcn: true From 5651419f94a8699471163514a8e2411d386eb741 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Tue, 24 Jun 2025 17:08:19 -0400 Subject: [PATCH 14/21] Update pre-commit and hooks for ansible-lint, black, and multiple checks Signed-off-by: Webster Mudge --- .pre-commit-config.yaml | 25 +- plugins/action/assemble_cluster_template.py | 45 +++- plugins/action/cm_api.py | 19 +- plugins/filter/filters.py | 8 +- plugins/lookup/cm_license.py | 4 +- plugins/lookup/cm_service.py | 10 +- plugins/module_utils/cm_controller_utils.py | 17 +- plugins/module_utils/cm_utils.py | 64 +++-- plugins/module_utils/host_utils.py | 52 ++-- plugins/module_utils/parcel_utils.py | 25 +- .../module_utils/role_config_group_utils.py | 19 +- plugins/module_utils/role_utils.py | 58 +++-- plugins/module_utils/service_utils.py | 51 ++-- plugins/modules/assemble_cluster_template.py | 17 +- plugins/modules/cluster.py | 123 ++++++---- plugins/modules/cluster_info.py | 4 +- plugins/modules/cm_autotls.py | 8 +- plugins/modules/cm_config.py | 2 +- plugins/modules/cm_kerberos.py | 18 +- plugins/modules/cm_resource.py | 15 +- plugins/modules/cm_resource_info.py | 9 +- plugins/modules/cm_service.py | 70 ++++-- plugins/modules/cm_service_config.py | 3 +- plugins/modules/cm_service_role.py | 33 ++- plugins/modules/cm_service_role_config.py | 4 +- .../modules/cm_service_role_config_group.py | 4 +- .../cm_service_role_config_group_config.py | 2 +- .../cm_service_role_config_group_info.py | 4 +- plugins/modules/cm_trial_license.py | 3 +- plugins/modules/data_context.py | 24 +- plugins/modules/data_context_info.py | 6 +- plugins/modules/external_account.py | 8 +- plugins/modules/external_account_info.py | 4 +- plugins/modules/external_user_mappings.py | 12 +- .../modules/external_user_mappings_info.py | 6 +- plugins/modules/host.py | 30 +-- plugins/modules/host_config.py | 5 +- plugins/modules/host_config_info.py | 3 +- plugins/modules/host_template.py | 10 +- plugins/modules/host_template_info.py | 8 +- plugins/modules/parcel.py | 14 +- plugins/modules/service.py | 11 +- plugins/modules/service_config.py | 4 +- plugins/modules/service_config_info.py | 4 +- plugins/modules/service_info.py | 4 +- plugins/modules/service_role.py | 17 +- plugins/modules/service_role_config.py | 13 +- plugins/modules/service_role_config_group.py | 21 +- .../service_role_config_group_config.py | 13 +- .../service_role_config_group_config_info.py | 6 +- .../modules/service_role_config_group_info.py | 7 +- plugins/modules/service_role_config_info.py | 3 +- plugins/modules/service_role_info.py | 7 +- plugins/modules/user.py | 8 +- plugins/modules/user_info.py | 2 +- .../api_client/action_plugins/cm_api.py | 19 +- tests/unit/__init__.py | 103 +++++--- tests/unit/conftest.py | 101 +++++--- .../test_assemble_cluster_template_action.py | 4 +- .../test_assemble_cluster_template_module.py | 30 ++- .../modules/cluster/test_base_cluster.py | 5 +- .../modules/cm_autotls/test_cm_autotls.py | 2 +- .../modules/cm_kerberos/test_cm_kerberos.py | 10 +- .../modules/cm_license/test_cm_license.py | 2 +- .../cm_license_info/test_cm_license_info.py | 2 +- .../modules/cm_resource/test_cm_resource_i.py | 3 +- .../test_cm_resource_info_i.py | 16 +- .../modules/cm_service/test_cm_service.py | 37 +-- .../test_cm_service_role_config_groups.py | 54 ++-- .../cm_service/test_cm_service_roles.py | 80 +++--- .../test_cm_service_config.py | 24 +- .../cm_service_role/test_cm_service_role.py | 66 +++-- .../test_cm_service_role_config.py | 49 ++-- .../test_cm_service_role_config_group.py | 64 +++-- ...est_cm_service_role_config_group_config.py | 105 ++++---- .../test_cm_service_role_config_group_info.py | 7 +- .../test_cm_service_role_info.py | 7 +- .../modules/cm_trial/test_cm_trial_license.py | 2 +- .../cm_version_info/test_cm_version_info_i.py | 7 +- .../external_account/test_external_account.py | 14 +- tests/unit/plugins/modules/host/conftest.py | 12 +- tests/unit/plugins/modules/host/test_host.py | 69 ++++-- .../modules/host/test_host_clusters.py | 28 ++- .../modules/host/test_host_host_templates.py | 30 +-- .../plugins/modules/host/test_host_rcgs.py | 91 ++++--- .../modules/host/test_host_role_configs.py | 30 ++- .../modules/host_info/test_host_info.py | 12 +- .../host_template/test_host_template.py | 79 +++--- .../test_host_template_info.py | 8 +- .../plugins/modules/service/test_service.py | 124 +++++++--- .../modules/service/test_service_rcgs.py | 141 +++++++---- .../modules/service/test_service_roles.py | 158 ++++++++---- .../service_config/test_service_config.py | 17 +- .../test_service_config_info.py | 6 +- .../modules/service_info/test_service_info.py | 14 +- .../modules/service_role/test_service_role.py | 230 +++++++++++++----- .../test_service_role_config.py | 12 +- .../test_service_role_config_group.py | 111 +++++---- .../test_service_role_config_group_config.py | 18 +- ...t_service_role_config_group_config_info.py | 10 +- .../test_service_role_config_group_info.py | 31 ++- .../test_service_role_config_info.py | 6 +- .../test_service_role_info.py | 36 ++- .../test_service_type_info.py | 4 +- tests/unit/plugins/modules/utils.py | 4 +- 105 files changed, 1950 insertions(+), 1110 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0916215b..1ae38ba5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -15,13 +15,34 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v3.2.0 + rev: v5.0.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer - id: check-yaml + - id: check-toml + - id: check-json - id: check-added-large-files + - id: check-case-conflict + - id: check-docstring-first + - id: check-merge-conflict + - id: check-symlinks + - id: debug-statements + - id: detect-aws-credentials + - id: detect-private-key + - id: forbid-submodules + # - id: name-tests-test + - repo: https://github.com/asottile/add-trailing-comma.git + rev: v3.2.0 + hooks: + - id: add-trailing-comma + args: + - --py36-plus - repo: https://github.com/psf/black - rev: 22.10.0 + rev: 25.1.0 hooks: - id: black + - repo: https://github.com/ansible/ansible-lint + rev: v25.6.1 + hooks: + - id: ansible-lint diff --git a/plugins/action/assemble_cluster_template.py b/plugins/action/assemble_cluster_template.py index b8eacb33..769f2baf 100644 --- a/plugins/action/assemble_cluster_template.py +++ b/plugins/action/assemble_cluster_template.py @@ -41,18 +41,35 @@ class ActionModule(ActionBase): TRANSFERS_FILES = True def __init__( - self, task, connection, play_context, loader, templar, shared_loader_obj + self, + task, + connection, + play_context, + loader, + templar, + shared_loader_obj, ): super().__init__( - task, connection, play_context, loader, templar, shared_loader_obj + task, + connection, + play_context, + loader, + templar, + shared_loader_obj, ) self.TEMPLATE = ClusterTemplate( - warn_fn=self._display.warning, error_fn=self._display.error + warn_fn=self._display.warning, + error_fn=self._display.error, ) self.MERGED = {} def assemble_fragments( - self, assembled_file, src_path, regex=None, ignore_hidden=True, decrypt=True + self, + assembled_file, + src_path, + regex=None, + ignore_hidden=True, + decrypt=True, ): # By file name sort order for f in ( @@ -80,7 +97,8 @@ def assemble_fragments( self.MERGED = json.loads(fragment_file.read()) else: self.TEMPLATE.merge( - self.MERGED, json.loads(fragment_file.read()) + self.MERGED, + json.loads(fragment_file.read()), ) except json.JSONDecodeError as e: raise AnsibleActionFail( @@ -133,7 +151,7 @@ def run(self, tmp=None, task_vars=None): self._execute_module( module_name="cloudera.cluster.assemble_cluster_template", task_vars=task_vars, - ) + ), ) raise _AnsibleActionDone() else: @@ -152,12 +170,14 @@ def run(self, tmp=None, task_vars=None): compiled = re.compile(regexp) except re.error as e: raise AnsibleActionFail( - message=f"Regular expression, {regexp}, is invalid: {to_native(e)}" + message=f"Regular expression, {regexp}, is invalid: {to_native(e)}", ) # Assemble the src files into output file with tempfile.NamedTemporaryFile( - mode="w", encoding="utf-8", dir=C.DEFAULT_LOCAL_TMP + mode="w", + encoding="utf-8", + dir=C.DEFAULT_LOCAL_TMP, ) as assembled: self.assemble_fragments( assembled, @@ -172,7 +192,9 @@ def run(self, tmp=None, task_vars=None): dest = self._remote_expand_user(dest) dest_stat = self._execute_remote_stat( - dest, all_vars=task_vars, follow=follow + dest, + all_vars=task_vars, + follow=follow, ) # Prepare the task arguments for the called submodules @@ -201,7 +223,8 @@ def run(self, tmp=None, task_vars=None): # Define a temporary remote path for the remote copy remote_path = self._connection._shell.join_path( - self._connection._shell.tmpdir, "assembled_cluster_template" + self._connection._shell.tmpdir, + "assembled_cluster_template", ) # Transfer the file to the remote path @@ -214,7 +237,7 @@ def run(self, tmp=None, task_vars=None): submodule_args.update( dict( src=transfered, - ) + ), ) # Execute the copy diff --git a/plugins/action/cm_api.py b/plugins/action/cm_api.py index efa22600..a44ba9aa 100644 --- a/plugins/action/cm_api.py +++ b/plugins/action/cm_api.py @@ -70,7 +70,7 @@ def poll_command_status(self, task_vars, api_base_url, command_id): args = self.build_args( task_vars, additional_args=dict( - url=self.build_url(api_base_url, "/commands/" + str(command_id)) + url=self.build_url(api_base_url, "/commands/" + str(command_id)), ), ) result = self._execute_module( @@ -104,7 +104,7 @@ def run(self, tmp=None, task_vars=None): poll_duration = int(self._task.args.get("poll_duration") or 10) poll_max_failed_retries = int( - self._task.args.get("poll_max_failed_retries") or 3 + self._task.args.get("poll_max_failed_retries") or 3, ) # Add request body if necessary @@ -132,11 +132,14 @@ def run(self, tmp=None, task_vars=None): time.sleep(poll_duration) display.vv( "Waiting for {} command ({}) to complete...".format( - command_name, command_id - ) + command_name, + command_id, + ), ) command_status = self.poll_command_status( - task_vars, api_base_url, command_id + task_vars, + api_base_url, + command_id, ) if "json" in command_status: failed_polls = 0 @@ -147,8 +150,10 @@ def run(self, tmp=None, task_vars=None): response = {"success": False} display.vv( "Failed to poll command ({}) for status (attempt {} of {})...".format( - command_id, failed_polls, poll_max_failed_retries - ) + command_id, + failed_polls, + poll_max_failed_retries, + ), ) result.update(command_status) result["failed"] = not response["success"] diff --git a/plugins/filter/filters.py b/plugins/filter/filters.py index 050fce37..0c5d4656 100644 --- a/plugins/filter/filters.py +++ b/plugins/filter/filters.py @@ -78,7 +78,9 @@ def _flatten_dict_list(i, l, parents): return state def extract_products_from_manifests( - self, manifests, os_distribution: Optional[str] = None + self, + manifests, + os_distribution: Optional[str] = None, ): products = dict() for manifest in manifests: @@ -224,9 +226,9 @@ def extract_custom_roles(self, host_templates, service): def extract_custom_role_groups(self, host_templates): custom_role_groups = set([]) for role_mapping in host_templates.values(): - for (service, roles) in role_mapping.items(): + for service, roles in role_mapping.items(): for custom_role in filter(lambda x: "/" in x, roles): custom_role_groups.add( - "-".join([service.lower()] + custom_role.split("/")) + "-".join([service.lower()] + custom_role.split("/")), ) return list(custom_role_groups) diff --git a/plugins/lookup/cm_license.py b/plugins/lookup/cm_license.py index cf5bc6a4..9850b597 100644 --- a/plugins/lookup/cm_license.py +++ b/plugins/lookup/cm_license.py @@ -130,11 +130,11 @@ def run(self, terms, variables=None, **kwargs): license = self.parse(path=terms[0]) elif len(terms) > 1: raise AnsibleLookupError( - "Please specify a single path for the Cloudera license file." + "Please specify a single path for the Cloudera license file.", ) else: raise AnsibleLookupError( - "Please specify either the path to the Cloudera license or its contents in the 'contents' parameter." + "Please specify either the path to the Cloudera license or its contents in the 'contents' parameter.", ) msg = hashlib.sha256(to_bytes(license["name"] + license["uuid"])) diff --git a/plugins/lookup/cm_service.py b/plugins/lookup/cm_service.py index 4aed2aea..94fc16fb 100644 --- a/plugins/lookup/cm_service.py +++ b/plugins/lookup/cm_service.py @@ -111,7 +111,7 @@ def run(self, terms, variables=None, **kwargs): service["type"]: service for service in self.get( "/%s/clusters/%s/services" - % (self.get_option("version"), self.get_option("cluster")) + % (self.get_option("version"), self.get_option("cluster")), ) } @@ -119,9 +119,11 @@ def run(self, terms, variables=None, **kwargs): for term in LookupModule._flatten(terms): if term in all_services: results.append( - all_services[term] - if self.get_option("detailed") - else all_services[term]["name"] + ( + all_services[term] + if self.get_option("detailed") + else all_services[term]["name"] + ), ) else: if self.get_option("default") is not None: diff --git a/plugins/module_utils/cm_controller_utils.py b/plugins/module_utils/cm_controller_utils.py index ba539770..4d2901ba 100644 --- a/plugins/module_utils/cm_controller_utils.py +++ b/plugins/module_utils/cm_controller_utils.py @@ -37,12 +37,9 @@ __maintainer__ = ["wmudge@cloudera.com"] -""" -A common Ansible Lookup plugin for API access to Cloudera Manager. -""" - - class ClouderaManagerLookupBase(LookupBase): + """A common Ansible Lookup plugin for API access to Cloudera Manager.""" + def initialize_client(self): # Set up core CM API client parameters config = Configuration() @@ -99,7 +96,9 @@ def _discover_endpoint(self, config): port=self.get_option("port"), ) rendered = rest.pool_manager.request( - "GET", pre_rendered.url, headers=headers.copy() + "GET", + pre_rendered.url, + headers=headers.copy(), ) rendered_url = rendered.geturl() @@ -117,10 +116,10 @@ def get(self, path, query=None, field="items", body=None): path_params = [] header_params = {} header_params["Accept"] = self.api_client.select_header_accept( - ["application/json"] + ["application/json"], ) header_params["Content-Type"] = self.api_client.select_header_content_type( - ["application/json"] + ["application/json"], ) try: @@ -142,7 +141,7 @@ def get(self, path, query=None, field="items", body=None): else: raise AnsibleError( "Error interacting with CM resource. Status code: %s" - % to_text(results[1]) + % to_text(results[1]), ) except ApiException as ae: body = ae.body.decode("utf-8") diff --git a/plugins/module_utils/cm_utils.py b/plugins/module_utils/cm_utils.py index 4af537ab..495899b6 100644 --- a/plugins/module_utils/cm_utils.py +++ b/plugins/module_utils/cm_utils.py @@ -68,7 +68,10 @@ def wait_bulk_commands( def wait_commands( - api_client: ApiClient, commands: ApiCommandList, polling: int = 120, delay: int = 10 + api_client: ApiClient, + commands: ApiCommandList, + polling: int = 120, + delay: int = 10, ): for cmd in commands.items: # Serial monitoring @@ -208,14 +211,16 @@ def reconcile_config_list_updates( after = changeset reconciled_config = ApiConfigList( - items=[ApiConfig(name=k, value=v) for k, v in changeset.items()] + items=[ApiConfig(name=k, value=v) for k, v in changeset.items()], ) return (reconciled_config, before, after) def resolve_tag_updates( - current: dict, incoming: dict, purge: bool = False + current: dict, + incoming: dict, + purge: bool = False, ) -> tuple[dict, dict]: incoming_tags = { k: str(v) @@ -242,7 +247,10 @@ def resolve_tag_updates( class TagUpdates(object): def __init__( - self, existing: list[ApiEntityTag], updates: dict, purge: bool + self, + existing: list[ApiEntityTag], + updates: dict, + purge: bool, ) -> None: (_additions, _deletions) = resolve_tag_updates( current={t.name: t.value for t in existing}, @@ -274,7 +282,7 @@ def __init__(self, existing: ApiConfigList, updates: dict, purge: bool) -> None: ) self.config = ApiConfigList( - items=[ApiConfig(name=k, value=v) for k, v in changeset.items()] + items=[ApiConfig(name=k, value=v) for k, v in changeset.items()], ) @property @@ -284,7 +292,7 @@ def changed(self) -> bool: class ClusterTemplate(object): IDEMPOTENT_IDS = frozenset( - ["refName", "name", "clusterName", "hostName", "product"] + ["refName", "name", "clusterName", "hostName", "product"], ) UNIQUE_IDS = frozenset(["repositories"]) @@ -299,7 +307,7 @@ def merge(self, base: Union[dict, list], fragment: Union[dict, list]) -> bool: self._update_list(base, fragment) else: raise TypeError( - f"Base and fragment arguments must be the same type: base[{type(base)}], fragment[{type(fragment)}]" + f"Base and fragment arguments must be the same type: base[{type(base)}], fragment[{type(fragment)}]", ) def _update_dict(self, base, fragment, breadcrumbs="") -> None: @@ -326,7 +334,7 @@ def _update_dict(self, base, fragment, breadcrumbs="") -> None: # If the value is different, override if base[key] != value: self._warn( - f"Overriding value for key [{crumb}]], Old: [{base[key]}], New: [{value}]" + f"Overriding value for key [{crumb}]], Old: [{base[key]}], New: [{value}]", ) base[key] = value @@ -343,9 +351,9 @@ def _update_list(self, base, fragment, breadcrumbs="") -> None: [ id for id in set(entry.keys()).intersection( - self.IDEMPOTENT_IDS + self.IDEMPOTENT_IDS, ) - ] + ], ), None, ) @@ -360,7 +368,7 @@ def _update_list(self, base, fragment, breadcrumbs="") -> None: if isinstance(i, dict) and idempotent_key in i and i[idempotent_key] == entry[idempotent_key] - ] + ], ), None, ) @@ -410,7 +418,8 @@ def _add_log(err): self.module.fail_json(**_add_log(err)) except MaxRetryError as maxe: err = dict( - msg="Request error: " + to_text(maxe.reason), url=to_text(maxe.url) + msg="Request error: " + to_text(maxe.reason), + url=to_text(maxe.url), ) self.module.fail_json(**_add_log(err)) except HTTPError as he: @@ -523,10 +532,14 @@ def discover_endpoint(self, config): # Resolve redirects to establish HTTP scheme and port pre_rendered = Url( - scheme="https" if self.force_tls else "http", host=self.host, port=self.port + scheme="https" if self.force_tls else "http", + host=self.host, + port=self.port, ) rendered = rest.pool_manager.request( - "GET", pre_rendered.url, headers=headers.copy() + "GET", + pre_rendered.url, + headers=headers.copy(), ) # Normalize to handle redirects @@ -575,7 +588,7 @@ def wait_for_command_state(self, command_id, polling_interval): command_api_instance = CommandsResourceApi(self.api_client) while True: get_command_state = command_api_instance.read_command_with_http_info( - command_id=command_id + command_id=command_id, ) state = get_command_state[0].active if not state: @@ -588,10 +601,10 @@ def call_api(self, path, method, query=None, field="items", body=None): path_params = [] header_params = {} header_params["Accept"] = self.api_client.select_header_accept( - ["application/json"] + ["application/json"], ) header_params["Content-Type"] = self.api_client.select_header_content_type( - ["application/json"] + ["application/json"], ) results = self.api_client.call_api( @@ -639,11 +652,15 @@ def wait_command(self, command: ApiCommand, polling: int = 10, delay: int = 5): command = CommandsResourceApi(self.api_client).read_command(command.id) if not command.success: self.module.fail_json( - msg=to_text(command.result_message), command_id=to_text(command.id) + msg=to_text(command.result_message), + command_id=to_text(command.id), ) def wait_commands( - self, commands: ApiBulkCommandList, polling: int = 10, delay: int = 5 + self, + commands: ApiBulkCommandList, + polling: int = 10, + delay: int = 5, ): """ Waits for a list of commands to complete, polling each status at regular intervals. @@ -682,7 +699,10 @@ def ansible_module_internal(argument_spec={}, required_together=[], **kwargs): version=dict(type="str"), force_tls=dict(type="bool", default=False), verify_tls=dict( - required=False, type="bool", default=True, aliases=["tls"] + required=False, + type="bool", + default=True, + aliases=["tls"], ), ssl_ca_cert=dict(type="path", aliases=["tls_cert", "ssl_cert"]), username=dict(required=True, type="str", aliases=["user"]), @@ -700,7 +720,9 @@ def ansible_module_internal(argument_spec={}, required_together=[], **kwargs): aliases=["user_agent"], ), proxy_server=dict( - required=False, type="str", aliases=["proxy", "http_proxy"] + required=False, + type="str", + aliases=["proxy", "http_proxy"], ), ), required_together=required_together + [["username", "password"]], diff --git a/plugins/module_utils/host_utils.py b/plugins/module_utils/host_utils.py index e538d583..72793996 100644 --- a/plugins/module_utils/host_utils.py +++ b/plugins/module_utils/host_utils.py @@ -169,7 +169,9 @@ def get_host( def get_host_ref( - api_client: ApiClient, hostname: str = None, host_id: str = None + api_client: ApiClient, + hostname: str = None, + host_id: str = None, ) -> ApiHostRef: """Retrieve a Host Reference by either hostname or host ID. @@ -228,7 +230,7 @@ def create_host_model( # Configuration if config: host.config = ApiConfigList( - items=[ApiConfig(name=k, value=v) for k, v in config.items()] + items=[ApiConfig(name=k, value=v) for k, v in config.items()], ) # Tags @@ -263,7 +265,7 @@ def reconcile_host_role_configs( service_name=incoming_role_config["service"], type=incoming_role_config["type"], host_id=host.host_id, - ).items + ).items, ), None, ) @@ -271,7 +273,7 @@ def reconcile_host_role_configs( # If no existing role of service and type exists, raise an error if current_role is None: raise HostException( - f"No role of type, '{incoming_role_config['type']}', found for service, '{incoming_role_config['service']}', on cluster, '{host.cluster_ref.cluster_name}'" + f"No role of type, '{incoming_role_config['type']}', found for service, '{incoming_role_config['service']}', on cluster, '{host.cluster_ref.cluster_name}'", ) # Reconcile role override configurations @@ -338,7 +340,7 @@ def reconcile_host_role_config_groups( ) if base_rcg is None: raise HostException( - f"Base role config group for type, '{rcg['type']}', not found." + f"Base role config group for type, '{rcg['type']}', not found.", ) declared_rcgs[base_rcg.name] = base_rcg # Else, confirm the custom role config group and use its name @@ -350,7 +352,7 @@ def reconcile_host_role_config_groups( ) if custom_rcg is None: raise HostException( - f"Named role config group, '{rcg['name']}', not found." + f"Named role config group, '{rcg['name']}', not found.", ) declared_rcgs[custom_rcg.name] = custom_rcg @@ -390,7 +392,8 @@ def _read_cluster_rcgs( rcg.name: rcg for service in service_api.read_services(cluster_name=cluster.name).items for rcg in rcg_api.read_role_config_groups( - cluster_name=cluster.name, service_name=service.name + cluster_name=cluster.name, + service_name=service.name, ).items } @@ -409,12 +412,12 @@ def _read_host_rcgs( role_name=role_ref.role_name, ) if role.role_config_group_ref.role_config_group_name in cluster_rcgs: - current_rcgs[ - role.role_config_group_ref.role_config_group_name - ] = cluster_rcgs[role.role_config_group_ref.role_config_group_name] + current_rcgs[role.role_config_group_ref.role_config_group_name] = ( + cluster_rcgs[role.role_config_group_ref.role_config_group_name] + ) else: raise Exception( - f"Invalid role config group reference, '{role.role_config_group_ref.role_config_group_name}', on host, {host.hostname}" + f"Invalid role config group reference, '{role.role_config_group_ref.role_config_group_name}', on host, {host.hostname}", ) return current_rcgs @@ -482,11 +485,11 @@ def _reconcile_host_rcgs( if not del_roles: raise Exception( - f"Error reading role type, '{del_rcg.role_type}', for service, '{del_rcg.service_ref.service_name}', on cluster, '{del_rcg.service_ref.cluster_name}'" + f"Error reading role type, '{del_rcg.role_type}', for service, '{del_rcg.service_ref.service_name}', on cluster, '{del_rcg.service_ref.cluster_name}'", ) if len(del_roles) != 1: raise Exception( - f"Error, multiple instances for role type, '{del_rcg.role_type}', for service, '{del_rcg.service_ref.service_name}', on cluster, '{del_rcg.service_ref.cluster_name}'" + f"Error, multiple instances for role type, '{del_rcg.role_type}', for service, '{del_rcg.service_ref.service_name}', on cluster, '{del_rcg.service_ref.cluster_name}'", ) diff_before.append(del_roles[0].to_dict()) @@ -527,7 +530,7 @@ def reconcile_host_template_assignments( ] else: raise HostTemplateException( - f"Invalid role config group reference, '{rcg_ref.role_config_group_name}', in host template, {host_template.name}" + f"Invalid role config group reference, '{rcg_ref.role_config_group_name}', in host template, {host_template.name}", ) # Retrieve the associated role config groups from each installed role @@ -551,7 +554,7 @@ def reconcile_host_template_assignments( service_name=add_rcg.service_ref.service_name, role_type=add_rcg.role_type, role_config_group=add_rcg.name, - ).to_dict() + ).to_dict(), ) if not check_mode: @@ -568,7 +571,9 @@ def _apply(): host_template_name=host_template.name, start_roles=False, body=ApiHostRefList( - items=[ApiHostRef(host_id=host.host_id, hostname=host.hostname)] + items=[ + ApiHostRef(host_id=host.host_id, hostname=host.hostname), + ], ), ) wait_command( @@ -613,7 +618,10 @@ def _apply(): def toggle_host_role_states( - api_client: ApiClient, host: ApiHost, state: str, check_mode: bool + api_client: ApiClient, + host: ApiHost, + state: str, + check_mode: bool, ) -> tuple[list[dict], list[dict]]: service_api = ServicesResourceApi(api_client) @@ -644,7 +652,7 @@ def toggle_host_role_states( if state == "started" and role.role_state not in [ApiRoleState.STARTED]: before_roles.append(dict(name=role.name, role_state=role.role_state)) after_roles.append( - dict(name=role.name, role_state=ApiRoleState.STARTED) + dict(name=role.name, role_state=ApiRoleState.STARTED), ) changed_roles.append(role) cmd = role_api.start_command @@ -654,14 +662,14 @@ def toggle_host_role_states( ]: before_roles.append(dict(name=role.name, role_state=role.role_state)) after_roles.append( - dict(name=role.name, role_state=ApiRoleState.STOPPED) + dict(name=role.name, role_state=ApiRoleState.STOPPED), ) changed_roles.append(role) cmd = role_api.stop_command elif state == "restarted": before_roles.append(dict(name=role.name, role_state=role.role_state)) after_roles.append( - dict(name=role.name, role_state=ApiRoleState.STARTED) + dict(name=role.name, role_state=ApiRoleState.STARTED), ) changed_roles.append(role) cmd = role_api.restart_command @@ -700,7 +708,7 @@ def toggle_host_maintenance( if maintenance_cmd.success is False: raise HostMaintenanceStateException( - f"Unable to set Maintenance mode to '{maintenance}': {maintenance_cmd.result_message}" + f"Unable to set Maintenance mode to '{maintenance}': {maintenance_cmd.result_message}", ) return changed @@ -727,7 +735,7 @@ def detach_host( if current_roles and not purge: raise HostException( - f"Unable to detach from cluster, '{host.cluster_ref.cluster_name}', due to existing role instances." + f"Unable to detach from cluster, '{host.cluster_ref.cluster_name}', due to existing role instances.", ) # Decommission the entirety of the host's roles diff --git a/plugins/module_utils/parcel_utils.py b/plugins/module_utils/parcel_utils.py index 93b418ef..c6627dd0 100644 --- a/plugins/module_utils/parcel_utils.py +++ b/plugins/module_utils/parcel_utils.py @@ -72,7 +72,7 @@ def __init__( cluster_name=self.cluster, product=self.product, version=self.version, - ).stage + ).stage, ).upper() ] @@ -85,18 +85,20 @@ def _wait(self, stage: STAGE) -> None: while end_time > time.time(): parcel_status = self.parcel_api.read_parcel( - cluster_name=self.cluster, product=self.product, version=self.version + cluster_name=self.cluster, + product=self.product, + version=self.version, ) if parcel_status.stage == stage.name: return else: self.log( - f"[RETRY] Waiting for parcel stage, {stage.name}, for cluster '{self.cluster}': Product {self.product}[{self.version}]" + f"[RETRY] Waiting for parcel stage, {stage.name}, for cluster '{self.cluster}': Product {self.product}[{self.version}]", ) time.sleep(self.delay) return Exception( - f"Failed to reach parcel stage, {stage.name}: timeout ({self.timeout} secs)" + f"Failed to reach parcel stage, {stage.name}: timeout ({self.timeout} secs)", ) def _exec(self, stage: STAGE, func) -> None: @@ -115,7 +117,7 @@ def _exec(self, stage: STAGE, func) -> None: except ApiException as e: if e.status == 400: self.log( - f"[RETRY] Attempting to execute parcel function, {func}, for cluster '{self.cluster}': Product {self.product}[{self.version}]" + f"[RETRY] Attempting to execute parcel function, {func}, for cluster '{self.cluster}': Product {self.product}[{self.version}]", ) time.sleep(self.delay) continue @@ -128,7 +130,8 @@ def remove(self): if self.current > self.STAGE.AVAILABLE_REMOTELY: self.download(self.STAGE.AVAILABLE_REMOTELY) self._exec( - self.STAGE.AVAILABLE_REMOTELY, self.parcel_api.remove_download_command + self.STAGE.AVAILABLE_REMOTELY, + self.parcel_api.remove_download_command, ) def download(self, target: STAGE = STAGE.DOWNLOADED): @@ -151,7 +154,8 @@ def distribute(self, target: STAGE = STAGE.DISTRIBUTED): elif self.current < self.STAGE.DISTRIBUTING: self.download(target) self._exec( - self.STAGE.DISTRIBUTED, self.parcel_api.start_distribution_command + self.STAGE.DISTRIBUTED, + self.parcel_api.start_distribution_command, ) def activate(self): @@ -181,7 +185,10 @@ def parse_parcel_result(parcel: ApiParcel) -> dict: def wait_parcel_staging( - api_client: ApiClient, cluster: ApiCluster, delay: int = 15, timeout: int = 3600 + api_client: ApiClient, + cluster: ApiCluster, + delay: int = 15, + timeout: int = 3600, ) -> None: parcels_api = ParcelsResourceApi(api_client) @@ -207,5 +214,5 @@ def wait_parcel_staging( time.sleep(delay) raise ParcelException( - f"Failed to reach stable parcel stages for cluster, '{cluster.name}': timeout ({timeout} secs)" + f"Failed to reach stable parcel stages for cluster, '{cluster.name}': timeout ({timeout} secs)", ) diff --git a/plugins/module_utils/role_config_group_utils.py b/plugins/module_utils/role_config_group_utils.py index 8d5ff29c..49562bde 100644 --- a/plugins/module_utils/role_config_group_utils.py +++ b/plugins/module_utils/role_config_group_utils.py @@ -85,7 +85,7 @@ def create_role_config_group( .items ): raise InvalidRoleTypeException( - f"Invalid role type '{role_type}' for service '{service_name}'" + f"Invalid role type '{role_type}' for service '{service_name}'", ) role_config_group = ApiRoleConfigGroup( @@ -98,7 +98,7 @@ def create_role_config_group( if config: role_config_group.config = ApiConfigList( - items=[ApiConfig(name=k, value=v) for k, v in config.items()] + items=[ApiConfig(name=k, value=v) for k, v in config.items()], ) return role_config_group @@ -138,7 +138,10 @@ def update_role_config_group( config = dict() (updated_config, config_before, config_after) = reconcile_config_list_updates( - role_config_group.config, config, purge, skip_redacted + role_config_group.config, + config, + purge, + skip_redacted, ) if config_before or config_after: @@ -151,7 +154,10 @@ def update_role_config_group( # TODO Normalize the return value to be a list def get_base_role_config_group( - api_client: ApiClient, cluster_name: str, service_name: str, role_type: str = None + api_client: ApiClient, + cluster_name: str, + service_name: str, + role_type: str = None, ) -> ApiRoleConfigGroup: base_rcg_list = [ r @@ -170,7 +176,8 @@ def get_base_role_config_group( def get_mgmt_base_role_config_group( - api_client: ApiClient, role_type: str + api_client: ApiClient, + role_type: str, ) -> ApiRoleConfigGroup: rcg_api = MgmtRoleConfigGroupsResourceApi(api_client) return next( @@ -179,7 +186,7 @@ def get_mgmt_base_role_config_group( r for r in rcg_api.read_role_config_groups().items if r.role_type == role_type and r.base - ] + ], ), None, ) diff --git a/plugins/module_utils/role_utils.py b/plugins/module_utils/role_utils.py index 8c283f19..38730449 100644 --- a/plugins/module_utils/role_utils.py +++ b/plugins/module_utils/role_utils.py @@ -103,12 +103,15 @@ def parse_role_result(role: ApiRole) -> dict: def get_mgmt_roles(api_client: ApiClient, role_type: str) -> ApiRoleList: role_api = MgmtRolesResourceApi(api_client) return ApiRoleList( - items=[r for r in role_api.read_roles().items if r.type == role_type] + items=[r for r in role_api.read_roles().items if r.type == role_type], ) def read_role( - api_client: ApiClient, cluster_name: str, service_name: str, role_name: str + api_client: ApiClient, + cluster_name: str, + service_name: str, + role_name: str, ) -> ApiRole: """Read a role for a cluster service and populates the role configuration. @@ -126,11 +129,15 @@ def read_role( """ role_api = RolesResourceApi(api_client) role = role_api.read_role( - cluster_name=cluster_name, service_name=service_name, role_name=role_name + cluster_name=cluster_name, + service_name=service_name, + role_name=role_name, ) if role is not None: role.config = role_api.read_role_config( - cluster_name=cluster_name, service_name=service_name, role_name=role.name + cluster_name=cluster_name, + service_name=service_name, + role_name=role.name, ) return role @@ -180,7 +187,7 @@ def read_roles( ("hostId", host_id), ] if f[1] is not None - ] + ], ) if filter != "": @@ -199,7 +206,10 @@ def read_roles( def read_roles_by_type( - api_client: ApiClient, cluster_name: str, service_name: str, role_type: str + api_client: ApiClient, + cluster_name: str, + service_name: str, + role_type: str, ) -> ApiRoleList: role_api = RolesResourceApi(api_client) roles = [ @@ -258,7 +268,7 @@ def create_role( .items ): raise InvalidRoleTypeException( - f"Invalid role type '{role_type}' for service '{service_name}'" + f"Invalid role type '{role_type}' for service '{service_name}'", ) # Set up the role type @@ -287,7 +297,7 @@ def create_role( if host_ref is None: raise RoleHostNotFoundException( - f"Host not found: hostname='{hostname}', host_id='{host_id}'" + f"Host not found: hostname='{hostname}', host_id='{host_id}'", ) else: role.host_ref = ApiHostRef(host_id=host_ref.host_id, hostname=host_ref.hostname) @@ -302,7 +312,7 @@ def create_role( ) if rcg is None: raise RoleConfigGroupNotFoundException( - f"Role config group not found: {role_config_group}" + f"Role config group not found: {role_config_group}", ) else: role.role_config_group_ref = ApiRoleConfigGroupRef(rcg.name) @@ -310,7 +320,7 @@ def create_role( # Role override configurations if config: role.config = ApiConfigList( - items=[ApiConfig(name=k, value=v) for k, v in config.items()] + items=[ApiConfig(name=k, value=v) for k, v in config.items()], ) # Tags @@ -332,7 +342,7 @@ def create_mgmt_role_model( not in MgmtServiceResourceApi(api_client).list_role_types().items ): raise InvalidRoleTypeException( - f"Invalid role type '{role_type}' for Cloudera Management Service" + f"Invalid role type '{role_type}' for Cloudera Management Service", ) # Set up the role type @@ -361,24 +371,28 @@ def create_mgmt_role_model( if host_ref is None: raise RoleHostNotFoundException( - f"Host not found: hostname='{hostname}', host_id='{host_id}'" + f"Host not found: hostname='{hostname}', host_id='{host_id}'", ) else: mgmt_role.host_ref = ApiHostRef( - host_id=host_ref.host_id, hostname=host_ref.hostname + host_id=host_ref.host_id, + hostname=host_ref.hostname, ) # Role override configurations if config: mgmt_role.config = ApiConfigList( - items=[ApiConfig(name=k, value=v) for k, v in config.items()] + items=[ApiConfig(name=k, value=v) for k, v in config.items()], ) return mgmt_role def provision_service_role( - api_client: ApiClient, cluster_name: str, service_name: str, role: ApiRole + api_client: ApiClient, + cluster_name: str, + service_name: str, + role: ApiRole, ) -> ApiRole: role_api = RolesResourceApi(api_client) @@ -389,7 +403,7 @@ def provision_service_role( cluster_name=cluster_name, service_name=service_name, body=ApiRoleList(items=[role]), - ).items + ).items, ) ), None, @@ -419,7 +433,10 @@ def provision_service_role( def toggle_role_maintenance( - api_client: ApiClient, role: ApiRole, maintenance: bool, check_mode: bool + api_client: ApiClient, + role: ApiRole, + maintenance: bool, + check_mode: bool, ) -> bool: role_api = RolesResourceApi(api_client) changed = False @@ -440,14 +457,17 @@ def toggle_role_maintenance( if maintenance_cmd.success is False: raise RoleMaintenanceStateException( - f"Unable to set Maintenance mode to '{maintenance}': {maintenance_cmd.result_message}" + f"Unable to set Maintenance mode to '{maintenance}': {maintenance_cmd.result_message}", ) return changed def toggle_role_state( - api_client: ApiClient, role: ApiRole, state: str, check_mode: bool + api_client: ApiClient, + role: ApiRole, + state: str, + check_mode: bool, ) -> ApiRoleState: role_cmd_api = RoleCommandsResourceApi(api_client) changed = None diff --git a/plugins/module_utils/service_utils.py b/plugins/module_utils/service_utils.py index 274e17cd..ddfbde90 100644 --- a/plugins/module_utils/service_utils.py +++ b/plugins/module_utils/service_utils.py @@ -117,7 +117,7 @@ def parse_service_result(service: ApiService) -> dict: role_config_groups=[ {k: v for k, v in rcg_dict.items() if k != "service_name"} for rcg_dict in parsed_rcgs - ] + ], ) # Parse the roles via util function @@ -128,14 +128,16 @@ def parse_service_result(service: ApiService) -> dict: roles=[ {k: v for k, v in role_dict.items() if k != "service_name"} for role_dict in parsed_roles - ] + ], ) return output def read_service( - api_client: ApiClient, cluster_name: str, service_name: str + api_client: ApiClient, + cluster_name: str, + service_name: str, ) -> ApiService: """Read a cluster service and its role config group and role dependents. @@ -151,13 +153,15 @@ def read_service( rcg_api = RoleConfigGroupsResourceApi(api_client) service = service_api.read_service( - cluster_name=cluster_name, service_name=service_name + cluster_name=cluster_name, + service_name=service_name, ) if service is not None: # Gather the service-wide configuration service.config = service_api.read_service_config( - cluster_name=cluster_name, service_name=service_name + cluster_name=cluster_name, + service_name=service_name, ) # Gather each role config group configuration @@ -198,7 +202,8 @@ def read_services(api_client: ApiClient, cluster_name: str) -> list[ApiService]: for service in discovered_services: # Gather the service-wide configuration service.config = service_api.read_service_config( - cluster_name=cluster_name, service_name=service.name + cluster_name=cluster_name, + service_name=service.name, ) # Gather each role config group configuration @@ -238,7 +243,7 @@ def create_service_model( .items ): raise InvalidServiceTypeException( - f"Invalid service type '{type}' for cluster '{cluster_name}'" + f"Invalid service type '{type}' for cluster '{cluster_name}'", ) # Set up the service basics @@ -250,7 +255,7 @@ def create_service_model( # Service-wide configurations if config: service.config = ApiConfigList( - items=[ApiConfig(name=k, value=v) for k, v in config.items()] + items=[ApiConfig(name=k, value=v) for k, v in config.items()], ) # Tags @@ -261,7 +266,9 @@ def create_service_model( def provision_service( - api_client: ApiClient, cluster_name: str, service: ApiService + api_client: ApiClient, + cluster_name: str, + service: ApiService, ) -> ApiService: service_api = ServicesResourceApi(api_client) @@ -271,7 +278,7 @@ def provision_service( service_api.create_services( cluster_name=cluster_name, body=ApiServiceList(items=[service]), - ).items + ).items, ) ), None, @@ -299,7 +306,10 @@ def provision_service( def toggle_service_maintenance( - api_client: ApiClient, service: ApiService, maintenance: bool, check_mode: bool + api_client: ApiClient, + service: ApiService, + maintenance: bool, + check_mode: bool, ) -> bool: service_api = ServicesResourceApi(api_client) changed = False @@ -319,14 +329,17 @@ def toggle_service_maintenance( if maintenance_cmd.success is False: raise ServiceMaintenanceStateException( - f"Unable to set Maintenance mode to '{maintenance}': {maintenance_cmd.result_message}" + f"Unable to set Maintenance mode to '{maintenance}': {maintenance_cmd.result_message}", ) return changed def toggle_service_state( - api_client: ApiClient, service: ApiService, state: str, check_mode: bool + api_client: ApiClient, + service: ApiService, + state: str, + check_mode: bool, ) -> ApiServiceState: service_api = ServicesResourceApi(api_client) changed = None @@ -411,7 +424,7 @@ def _handle_config( after = changeset reconciled_config = ApiServiceConfig( - items=[ApiConfig(name=k, value=v) for k, v in changeset.items()] + items=[ApiConfig(name=k, value=v) for k, v in changeset.items()], ) return (reconciled_config, before, after) @@ -476,7 +489,7 @@ def __init__(self, existing: ApiServiceConfig, updates: dict, purge: bool) -> No ) self.config = ApiServiceConfig( - items=[ApiConfig(name=k, value=v) for k, v in changeset.items()] + items=[ApiConfig(name=k, value=v) for k, v in changeset.items()], ) @property @@ -772,7 +785,9 @@ def reconcile_service_roles( incoming_tags = dict() tag_updates = TagUpdates( - current_role.tags, incoming_tags, purge + current_role.tags, + incoming_tags, + purge, ) if tag_updates.changed: @@ -805,7 +820,7 @@ def reconcile_service_roles( != base_rcg.name ): instance_role_before.update( - role_config_group=current_role.role_config_group_ref.role_config_group_name + role_config_group=current_role.role_config_group_ref.role_config_group_name, ) instance_role_after.update(role_config_group=base_rcg.name) @@ -822,7 +837,7 @@ def reconcile_service_roles( != current_role.role_config_group_ref.role_config_group_name ): instance_role_before.update( - role_config_group=current_role.role_config_group_ref.role_config_group_name + role_config_group=current_role.role_config_group_ref.role_config_group_name, ) instance_role_after.update(role_config_group=incoming_rcg.name) diff --git a/plugins/modules/assemble_cluster_template.py b/plugins/modules/assemble_cluster_template.py index e2c7b13b..ead28c47 100644 --- a/plugins/modules/assemble_cluster_template.py +++ b/plugins/modules/assemble_cluster_template.py @@ -191,7 +191,8 @@ def assemble_fragments(self, assembled_file): self.merged = json.loads(fragment_file.read()) else: self.template.merge( - self.merged, json.loads(fragment_file.read()) + self.merged, + json.loads(fragment_file.read()), ) except json.JSONDecodeError as e: self.module.fail_json( @@ -218,13 +219,16 @@ def process(self): self.compiled = re.compile(self.regexp) except re.error as e: self.module.fail_json( - msg=f"Regular expression, {self.regexp} is invalid: {to_native(e)}" + msg=f"Regular expression, {self.regexp} is invalid: {to_native(e)}", ) # Assemble the src files into output file # No deletion on close; atomic_move "removes" the file with tempfile.NamedTemporaryFile( - mode="w", encoding="utf-8", dir=self.module.tmpdir, delete=False + mode="w", + encoding="utf-8", + dir=self.module.tmpdir, + delete=False, ) as assembled: # Process fragments into temporary file self.assemble_fragments(assembled) @@ -249,14 +253,17 @@ def process(self): self.output.update(backup_file=self.module.backup_local(self.dest)) self.module.atomic_move( - assembled.name, self.dest, unsafe_writes=self.unsafe_writes + assembled.name, + self.dest, + unsafe_writes=self.unsafe_writes, ) self.changed = True # Notify file permissions self.changed = self.module.set_fs_attributes_if_different( - self.file_perms, self.changed + self.file_perms, + self.changed, ) # Finalize output diff --git a/plugins/modules/cluster.py b/plugins/modules/cluster.py index 375d476c..764de79e 100644 --- a/plugins/modules/cluster.py +++ b/plugins/modules/cluster.py @@ -901,7 +901,7 @@ def process(self): # Modify cluster if existing: self.module.warn( - "Module currently does not support reconcilation of cluster templates with existing clusters." + "Module currently does not support reconcilation of cluster templates with existing clusters.", ) refresh = False @@ -928,7 +928,7 @@ def process(self): if self.auto_tls: enable_tls_cmd = ( self.cluster_api.configure_auto_tls_services_command( - cluster_name=self.name + cluster_name=self.name, ) ) wait_command( @@ -975,7 +975,7 @@ def process(self): if self.auto_tls: enable_tls_cmd = ( self.cluster_api.configure_auto_tls_services_command( - cluster_name=self.name + cluster_name=self.name, ) ) wait_command( @@ -987,7 +987,8 @@ def process(self): cluster_name=self.name, ) wait_command( - api_client=self.api_client, command=disable_tls_cmd + api_client=self.api_client, + command=disable_tls_cmd, ) self.changed = True @@ -996,7 +997,9 @@ def process(self): if not existing or existing.entity_status == "NONE" or self.force: first_run = self.cluster_api.first_run(cluster_name=self.name) self.wait_command( - first_run, polling=self.timeout, delay=self.delay + first_run, + polling=self.timeout, + delay=self.delay, ) # Start the existing and previously initialized cluster else: @@ -1025,7 +1028,7 @@ def process(self): if self.auto_tls: enable_tls_cmd = ( self.cluster_api.configure_auto_tls_services_command( - cluster_name=self.name + cluster_name=self.name, ) ) wait_command( @@ -1037,7 +1040,8 @@ def process(self): cluster_name=self.name, ) wait_command( - api_client=self.api_client, command=disable_tls_cmd + api_client=self.api_client, + command=disable_tls_cmd, ) # Stop an existing cluster else: @@ -1047,7 +1051,7 @@ def process(self): if self.auto_tls: enable_tls_cmd = ( self.cluster_api.configure_auto_tls_services_command( - cluster_name=self.name + cluster_name=self.name, ) ) wait_command( @@ -1059,7 +1063,8 @@ def process(self): cluster_name=self.name, ) wait_command( - api_client=self.api_client, command=disable_tls_cmd + api_client=self.api_client, + command=disable_tls_cmd, ) if not self.module.check_mode: stop = self.cluster_api.stop_command(cluster_name=self.name) @@ -1083,7 +1088,7 @@ def process(self): if self.auto_tls: enable_tls_cmd = ( self.cluster_api.configure_auto_tls_services_command( - cluster_name=self.name + cluster_name=self.name, ) ) wait_command( @@ -1095,7 +1100,8 @@ def process(self): cluster_name=self.name, ) wait_command( - api_client=self.api_client, command=disable_tls_cmd + api_client=self.api_client, + command=disable_tls_cmd, ) self.changed = True @@ -1103,7 +1109,9 @@ def process(self): if self.force: first_run = self.cluster_api.first_run(cluster_name=self.name) self.wait_command( - first_run, polling=self.timeout, delay=self.delay + first_run, + polling=self.timeout, + delay=self.delay, ) restart = self.cluster_api.restart_command(cluster_name=self.name) self.wait_command(restart, polling=self.timeout, delay=self.delay) @@ -1111,7 +1119,7 @@ def process(self): if refresh: # Retrieve the updated cluster details self.output = parse_cluster_result( - self.cluster_api.read_cluster(cluster_name=self.name) + self.cluster_api.read_cluster(cluster_name=self.name), ) elif existing: self.output = parse_cluster_result(existing) @@ -1139,8 +1147,8 @@ def wait_for_active_cmd(self, cluster_name: str): active_cmd = next( iter( self.cluster_api.list_active_commands( - cluster_name=cluster_name - ).items + cluster_name=cluster_name, + ).items, ), None, ) @@ -1165,7 +1173,8 @@ def create_cluster_from_template(self, template_contents: dict): # Merge/overlay any explicit parameters over the template TEMPLATE = ClusterTemplate( - warn_fn=self.module.warn, error_fn=self.module.fail_json + warn_fn=self.module.warn, + error_fn=self.module.fail_json, ) TEMPLATE.merge(template_contents, explicit_params) payload.update(body=template_contents) @@ -1178,18 +1187,19 @@ def create_cluster_from_template(self, template_contents: dict): self.changed = True if not self.module.check_mode: import_template_request = self.cm_api.import_cluster_template( - **payload + **payload, ).to_dict() command_id = import_template_request["id"] self.wait_for_command_state( - command_id=command_id, polling_interval=self.delay + command_id=command_id, + polling_interval=self.delay, ) def create_cluster_from_parameters(self): if self.cluster_version is None: self.module.fail_json( - msg=f"Cluster must be created. Missing required parameter: cluster_version" + msg=f"Cluster must be created. Missing required parameter: cluster_version", ) # Configure the core cluster @@ -1231,13 +1241,15 @@ def create_cluster_from_parameters(self): name=ht["name"], role_config_group_refs=[ ApiRoleConfigGroupRef( - rcg["name"] - if rcg["name"] - else self.find_base_role_group_name( - service_name=rcg["service"], - service_type=rcg["service_type"], - role_type=rcg["type"], - ) + ( + rcg["name"] + if rcg["name"] + else self.find_base_role_group_name( + service_name=rcg["service"], + service_type=rcg["service_type"], + role_type=rcg["type"], + ) + ), ) for rcg in ht["role_groups"] ], @@ -1304,7 +1316,7 @@ def create_cluster_from_parameters(self): parcel.download() except ApiException as ae: self.module.fail_json( - msg="Error managing parcel states: " + to_native(ae) + msg="Error managing parcel states: " + to_native(ae), ) # Apply host templates @@ -1328,10 +1340,11 @@ def create_cluster_from_parameters(self): rcg.role_type, ) for s in self.service_api.read_services( - cluster_name=self.name + cluster_name=self.name, ).items # s.name for rcg in self.role_group_api.read_role_config_groups( - cluster_name=self.name, service_name=s.name + cluster_name=self.name, + service_name=s.name, ).items } @@ -1344,7 +1357,7 @@ def create_cluster_from_parameters(self): if rcg["name"] not in all_rcgs: self.module.fail_json( msg="Role config group '%s' not found on cluster." - % rcg["name"] + % rcg["name"], ) else: rcg_ref = all_rcgs[rcg["name"]] @@ -1358,7 +1371,7 @@ def create_cluster_from_parameters(self): if refs[0] and refs[1] == rcg["service"] and refs[2] == rcg["type"] - ] + ], ), None, ) @@ -1366,7 +1379,7 @@ def create_cluster_from_parameters(self): if rcg_name is None: self.module.fail_json( msg="Unable to find base role group, '%s [%s]', on cluster, '%s'" - % (rcg["service"], rcg["type"], self.name) + % (rcg["service"], rcg["type"], self.name), ) rcg_ref = all_rcgs[rcg_name] @@ -1376,7 +1389,7 @@ def create_cluster_from_parameters(self): cluster_name=self.name, service_name=rcg_ref[1], body=ApiRoleList( - items=[ApiRole(type=rcg_ref[2], host_ref=hostref)] + items=[ApiRole(type=rcg_ref[2], host_ref=hostref)], ), ) @@ -1387,7 +1400,7 @@ def create_cluster_from_parameters(self): role_config_group_name=rcg["name"], service_name=rcg_ref[1], body=ApiRoleNameList( - items=[direct_roles.items[0].name] + items=[direct_roles.items[0].name], ), ) @@ -1405,7 +1418,7 @@ def create_cluster_from_parameters(self): service_name=override["service"], filter="type==%s;hostId==%s" % (override["type"], hostref.host_id), - ).items + ).items, ), None, ) @@ -1420,13 +1433,13 @@ def create_cluster_from_parameters(self): items=[ ApiConfig(name=k, value=v) for k, v in override["config"].items() - ] + ], ), ) else: self.module.fail_json( msg="Role not found. No role type '%s' for service '%s' found on host '%s'" - % (override["type"], override["service"], hostref.hostname) + % (override["type"], override["service"], hostref.hostname), ) # Configure the experience cluster if self.control_plane: @@ -1445,7 +1458,8 @@ def create_cluster_from_parameters(self): self.control_plane_api.install_embedded_control_plane(body=body) ) self.wait_for_command_state( - command_id=setup_control_plane.id, polling_interval=self.delay + command_id=setup_control_plane.id, + polling_interval=self.delay, ) # Execute auto-role assignments @@ -1461,7 +1475,9 @@ def marshal_service(self, options: dict) -> ApiService: # Service-wide configuration if options["config"]: service.config = ApiServiceConfig( - items=[ApiConfig(name=k, value=v) for k, v in options["config"].items()] + items=[ + ApiConfig(name=k, value=v) for k, v in options["config"].items() + ], ) if options["role_groups"]: @@ -1484,7 +1500,7 @@ def marshal_service(self, options: dict) -> ApiService: items=[ ApiConfig(name=k, value=v) for k, v in body["config"].items() - ] + ], ) rcg_list.append(rcg) @@ -1509,18 +1525,21 @@ def marshal_hostrefs(self, hosts: dict) -> list[ApiHostRef]: and h.cluster_ref.cluster_name != self.name ): self.module.fail_json( - msg=f"Invalid host reference! Host {h.hostname} ({h.host_id}) already in use with cluster '{h.cluster_ref.cluster_name}'!" + msg=f"Invalid host reference! Host {h.hostname} ({h.host_id}) already in use with cluster '{h.cluster_ref.cluster_name}'!", ) results.append(ApiHostRef(host_id=h.host_id, hostname=h.hostname)) if len(results) != len(hosts.keys()): self.module.fail_json( msg="Did not find the following hosts: " - + ", ".join(set(hosts.keys() - set(results))) + + ", ".join(set(hosts.keys() - set(results))), ) return results def find_base_role_group_name( - self, role_type: str, service_name: str = None, service_type: str = None + self, + role_type: str, + service_name: str = None, + service_type: str = None, ) -> str: if service_name: @@ -1528,7 +1547,8 @@ def find_base_role_group_name( rcg for s in self.service_api.read_services(cluster_name=self.name).items for rcg in self.role_group_api.read_role_config_groups( - cluster_name=self.name, service_name=s.name + cluster_name=self.name, + service_name=s.name, ).items if s.name == service_name ] @@ -1537,7 +1557,8 @@ def find_base_role_group_name( rcg for s in self.service_api.read_services(cluster_name=self.name).items for rcg in self.role_group_api.read_role_config_groups( - cluster_name=self.name, service_name=s.name + cluster_name=self.name, + service_name=s.name, ).items if s.type == service_type ] @@ -1550,7 +1571,7 @@ def find_base_role_group_name( if base is None: self.module.fail_json( "Invalid role group; unable to discover base role group for service role, %s" - % role_type + % role_type, ) else: return base.name @@ -1615,7 +1636,8 @@ def main(): options=dict( name=dict(aliases=["ref", "ref_name"]), service=dict( - required=True, aliases=["service_name", "service_ref"] + required=True, + aliases=["service_name", "service_ref"], ), type=dict(aliases=["role_type"]), ), @@ -1632,7 +1654,8 @@ def main(): elements="dict", options=dict( service=dict( - required=True, aliases=["service_name", "service_ref"] + required=True, + aliases=["service_name", "service_ref"], ), type=dict(required=True, aliases=["role_type"]), config=dict(type="dict", required=True), @@ -1673,7 +1696,9 @@ def main(): remote_repo_url=dict(required=True, type="str"), datalake_cluster_name=dict(required=True, type="str"), control_plane_config=dict( - required=True, type="dict", aliases=["values_yaml"] + required=True, + type="dict", + aliases=["values_yaml"], ), ), ), diff --git a/plugins/modules/cluster_info.py b/plugins/modules/cluster_info.py index ff974829..c0a5a4b2 100644 --- a/plugins/modules/cluster_info.py +++ b/plugins/modules/cluster_info.py @@ -123,8 +123,8 @@ def process(self): if self.name: self.output = [ parse_cluster_result( - cluster_api_instance.read_cluster(cluster_name=self.name) - ) + cluster_api_instance.read_cluster(cluster_name=self.name), + ), ] else: self.output = [ diff --git a/plugins/modules/cm_autotls.py b/plugins/modules/cm_autotls.py index c259e8f4..bb970a44 100644 --- a/plugins/modules/cm_autotls.py +++ b/plugins/modules/cm_autotls.py @@ -174,7 +174,7 @@ password: "S&peR4Ec*re" state: present connection_user_name: clouduser - connection_private_key: "-----BEGIN RSA PRIVATE KEY-----\n[base-64 encoded key]\n-----END RSA PRIVATE KEY-----" + connection_private_key: "-----BEGIN YOUR KEY -----\n[base-64 encoded key]\n-----END YOUR KEY-----" - name: Disable Auto-TLS cloudera.cluster.cm_autotls: @@ -344,12 +344,12 @@ def process(self): trusted_ca_certs=self.trusted_ca_certs, host_certs=self.host_certs, configure_all_services=self.configure_all_services, - ) + ), ) if cmca_result.success is False: self.module.fail_json( - msg=f"Unable to enable AutoTLS: {cmca_result.result_message}" + msg=f"Unable to enable AutoTLS: {cmca_result.result_message}", ) # Retrieve cm_config again after enabling TLS @@ -382,7 +382,7 @@ def process(self): body = ApiConfigList( items=[ ApiConfig(name=k, value=v) for k, v in reset_params.items() - ] + ], ) cm_api_instance.update_config(body=body) diff --git a/plugins/modules/cm_config.py b/plugins/modules/cm_config.py index e825479c..7a1d0fb1 100644 --- a/plugins/modules/cm_config.py +++ b/plugins/modules/cm_config.py @@ -195,7 +195,7 @@ def process(self): items=[ cm_client.ApiConfig(name=k, value=v) for k, v in change_set.items() - ] + ], ) # Return 'summary' refresh = False diff --git a/plugins/modules/cm_kerberos.py b/plugins/modules/cm_kerberos.py index 0e299989..ace9bbe8 100644 --- a/plugins/modules/cm_kerberos.py +++ b/plugins/modules/cm_kerberos.py @@ -268,7 +268,7 @@ def __init__(self, module): self.ad_delete_on_regenerate = self.get_param("ad_delete_on_regenerate") self.ad_set_encryption_types = self.get_param("ad_set_encryption_types") self.kdc_account_creation_host_override = self.get_param( - "kdc_account_creation_host_override" + "kdc_account_creation_host_override", ) self.gen_keytab_script = self.get_param("gen_keytab_script") self.kdc_admin_user = self.get_param("kdc_admin_user") @@ -299,7 +299,7 @@ def process(self): or self.ad_set_encryption_types ): self.module.fail_json( - msg="Parameters 'ad_account_prefix', 'ad_kdc_domain', 'ad_delete_on_regenerate' or 'ad_set_encryption_types' can only be used with 'kdc_type = Active Directory'" + msg="Parameters 'ad_account_prefix', 'ad_kdc_domain', 'ad_delete_on_regenerate' or 'ad_set_encryption_types' can only be used with 'kdc_type = Active Directory'", ) # Convert encryption types to space separated string @@ -349,7 +349,7 @@ def process(self): body = ApiConfigList( items=[ ApiConfig(name=k, value=v) for k, v in change_set.items() - ] + ], ) cm_api_instance.update_config(message=self.message, body=body).items @@ -370,8 +370,9 @@ def process(self): creds_cmd_result = next( iter( self.wait_for_command_state( - command_id=cmd.id, polling_interval=self.delay - ) + command_id=cmd.id, + polling_interval=self.delay, + ), ), None, ) @@ -417,7 +418,8 @@ def process(self): ) # NOTE: Change set is always > 0 change_set = resolve_parameter_changeset( - current, {k.upper(): v for k, v in reset_params.items()} + current, + {k.upper(): v for k, v in reset_params.items()}, ) if change_set: @@ -433,14 +435,14 @@ def process(self): body = ApiConfigList( items=[ ApiConfig(name=k, value=v) for k, v in reset_params.items() - ] + ], ) cm_api_instance.update_config(body=body).items # Set output # Retrieve cm_config again after enabling Kerberos self.output.update( - cm_config=[r.to_dict() for r in self.get_cm_config()] + cm_config=[r.to_dict() for r in self.get_cm_config()], ) diff --git a/plugins/modules/cm_resource.py b/plugins/modules/cm_resource.py index 4612a521..d3dfe003 100644 --- a/plugins/modules/cm_resource.py +++ b/plugins/modules/cm_resource.py @@ -118,7 +118,11 @@ def __init__(self, module): def process(self): if not self.module.check_mode: self.resources = self.call_api( - self.path, self.method, self.query, self.field, self.body + self.path, + self.method, + self.query, + self.field, + self.body, ) @@ -128,11 +132,16 @@ def main(): method=dict(required=True, type="str", choices=["POST", "PUT", "DELETE"]), path=dict(required=True, type="str"), query=dict( - required=False, type="dict", aliases=["query_parameters", "parameters"] + required=False, + type="dict", + aliases=["query_parameters", "parameters"], ), body=dict(required=False, type="dict"), field=dict( - required=False, type="str", default="items", aliases=["return_field"] + required=False, + type="str", + default="items", + aliases=["return_field"], ), ), supports_check_mode=True, diff --git a/plugins/modules/cm_resource_info.py b/plugins/modules/cm_resource_info.py index 6d3ac4f8..1550680b 100644 --- a/plugins/modules/cm_resource_info.py +++ b/plugins/modules/cm_resource_info.py @@ -93,10 +93,15 @@ def main(): argument_spec=dict( path=dict(required=True, type="str"), query=dict( - required=False, type="dict", aliases=["query_parameters", "parameters"] + required=False, + type="dict", + aliases=["query_parameters", "parameters"], ), field=dict( - required=False, type="str", default="items", aliases=["return_field"] + required=False, + type="str", + default="items", + aliases=["return_field"], ), ), supports_check_mode=True, diff --git a/plugins/modules/cm_service.py b/plugins/modules/cm_service.py index e6211385..e2838be5 100644 --- a/plugins/modules/cm_service.py +++ b/plugins/modules/cm_service.py @@ -670,7 +670,7 @@ def process(self): if maintenance_cmd.success is False: self.module.fail_json( - msg=f"Unable to set Maintenance mode to '{self.maintenance}': {maintenance_cmd.result_message}" + msg=f"Unable to set Maintenance mode to '{self.maintenance}': {maintenance_cmd.result_message}", ) # Handle service-wide changes @@ -690,7 +690,8 @@ def process(self): if not self.module.check_mode: service_api.update_service_config( - message=self.message, body=updates.config + message=self.message, + body=updates.config, ) # Manage role config groups (base only) @@ -735,7 +736,9 @@ def process(self): # Reconcile configurations if existing_rcg.config or self.purge: updates = ConfigListUpdates( - existing_rcg.config, incoming_rcg["config"], self.purge + existing_rcg.config, + incoming_rcg["config"], + self.purge, ) if updates.changed: @@ -752,7 +755,9 @@ def process(self): payload.display_name is not None or payload.config is not None ) and not self.module.check_mode: rcg_api.update_role_config_group( - existing_rcg.name, message=self.message, body=payload + existing_rcg.name, + message=self.message, + body=payload, ) # Add any new role config groups @@ -763,7 +768,8 @@ def process(self): rcg_diff = dict(before=dict(), after=dict()) existing_rcg = get_mgmt_base_role_config_group( - self.api_client, rcg_type + self.api_client, + rcg_type, ) incoming_rcg = incoming_rcgs_map[rcg_type] @@ -773,7 +779,7 @@ def process(self): if incoming_display_name is not None: if self.module._diff: rcg_diff["before"].update( - display_name=existing_rcg.display_name + display_name=existing_rcg.display_name, ) rcg_diff["after"].update(display_name=incoming_display_name) payload.display_name = incoming_display_name @@ -781,7 +787,9 @@ def process(self): incoming_rcg_config = incoming_rcg.get("config") if incoming_rcg_config: updates = ConfigListUpdates( - existing_rcg.config, incoming_rcg_config, self.purge + existing_rcg.config, + incoming_rcg_config, + self.purge, ) if self.module._diff: @@ -794,7 +802,9 @@ def process(self): if not self.module.check_mode: rcg_api.update_role_config_group( - existing_rcg.name, message=self.message, body=payload + existing_rcg.name, + message=self.message, + body=payload, ) # Remove any undeclared role config groups @@ -806,15 +816,18 @@ def process(self): rcg_diff = dict(before=dict(), after=dict()) existing_rcg = get_mgmt_base_role_config_group( - self.api_client, rcg_type + self.api_client, + rcg_type, ) payload = ApiRoleConfigGroup( - display_name=f"mgmt-{rcg_type}-BASE" + display_name=f"mgmt-{rcg_type}-BASE", ) updates = ConfigListUpdates( - existing_rcg.config, dict(), self.purge + existing_rcg.config, + dict(), + self.purge, ) if self.module._diff: @@ -825,7 +838,9 @@ def process(self): if not self.module.check_mode: rcg_api.update_role_config_group( - existing_rcg.name, message=self.message, body=payload + existing_rcg.name, + message=self.message, + body=payload, ) # Manage roles @@ -885,8 +900,8 @@ def process(self): ( iter( role_api.create_roles( - body=ApiRoleList(items=[new_role]) - ).items + body=ApiRoleList(items=[new_role]), + ).items, ) ), {}, @@ -935,8 +950,8 @@ def process(self): ( iter( role_api.create_roles( - body=ApiRoleList(items=[new_role]) - ).items + body=ApiRoleList(items=[new_role]), + ).items, ) ), {}, @@ -959,21 +974,27 @@ def process(self): # Handle various states if self.state == "started" and current.service_state not in [ - ApiServiceState.STARTED + ApiServiceState.STARTED, ]: self.exec_service_command( - current, ApiServiceState.STARTED, service_api.start_command + current, + ApiServiceState.STARTED, + service_api.start_command, ) elif self.state == "stopped" and current.service_state not in [ ApiServiceState.STOPPED, ApiServiceState.NA, ]: self.exec_service_command( - current, ApiServiceState.STOPPED, service_api.stop_command + current, + ApiServiceState.STOPPED, + service_api.stop_command, ) elif self.state == "restarted": self.exec_service_command( - current, ApiServiceState.STARTED, service_api.restart_command + current, + ApiServiceState.STARTED, + service_api.restart_command, ) # If there are changes, get a fresh read @@ -987,7 +1008,10 @@ def process(self): self.module.fail_json(msg=f"Invalid state: {self.state}") def exec_service_command( - self, service: ApiService, value: str, cmd: Callable[[None], ApiCommand] + self, + service: ApiService, + value: str, + cmd: Callable[[None], ApiCommand], ): self.changed = True if self.module._diff: @@ -1018,7 +1042,9 @@ def main(): display_name=dict(), # TODO Remove display_name as an option type=dict(required=True, aliases=["role_type"]), config=dict( - required=True, type="dict", aliases=["params", "parameters"] + required=True, + type="dict", + aliases=["params", "parameters"], ), ), ), diff --git a/plugins/modules/cm_service_config.py b/plugins/modules/cm_service_config.py index 2a42ee79..dd89a232 100644 --- a/plugins/modules/cm_service_config.py +++ b/plugins/modules/cm_service_config.py @@ -235,7 +235,8 @@ def process(self): self.config = [ p.to_dict() for p in api_instance.update_service_config( - message=self.message, body=updates.config + message=self.message, + body=updates.config, ).items ] diff --git a/plugins/modules/cm_service_role.py b/plugins/modules/cm_service_role.py index a8844e99..4fc62026 100644 --- a/plugins/modules/cm_service_role.py +++ b/plugins/modules/cm_service_role.py @@ -456,21 +456,27 @@ def process(self): # Handle the various states if self.state == "started" and current.role_state not in [ - ApiRoleState.STARTED + ApiRoleState.STARTED, ]: self.exec_role_command( - current, ApiRoleState.STARTED, role_cmd_api.start_command + current, + ApiRoleState.STARTED, + role_cmd_api.start_command, ) elif self.state == "stopped" and current.role_state not in [ ApiRoleState.STOPPED, ApiRoleState.NA, ]: self.exec_role_command( - current, ApiRoleState.STOPPED, role_cmd_api.stop_command + current, + ApiRoleState.STOPPED, + role_cmd_api.stop_command, ) elif self.state == "restarted": self.exec_role_command( - current, ApiRoleState.STARTED, role_cmd_api.restart_command + current, + ApiRoleState.STARTED, + role_cmd_api.restart_command, ) # If there are changes, get a fresh read @@ -485,7 +491,10 @@ def process(self): self.module.fail_json(msg=f"Invalid state: {self.state}") def exec_role_command( - self, role: ApiRole, value: str, cmd: Callable[[ApiRoleNameList], ApiCommand] + self, + role: ApiRole, + value: str, + cmd: Callable[[ApiRoleNameList], ApiCommand], ): self.changed = True if self.module._diff: @@ -511,7 +520,7 @@ def handle_maintenance(self, role_api: MgmtRolesResourceApi, role: ApiRole) -> N if maintenance_cmd.success is False: self.module.fail_json( - msg=f"Unable to set Maintenance mode to '{self.maintenance}': {maintenance_cmd.result_message}" + msg=f"Unable to set Maintenance mode to '{self.maintenance}': {maintenance_cmd.result_message}", ) def provision_role(self, role_api: MgmtRolesResourceApi, role: ApiRole) -> ApiRole: @@ -529,19 +538,23 @@ def provision_role(self, role_api: MgmtRolesResourceApi, role: ApiRole) -> ApiRo iter( role_api.create_roles( body=ApiRoleList(items=[role]), - ).items + ).items, ) ), {}, ) if not created_role: self.module.fail_json( - msg="Unable to create new role", role=to_native(role.to_dict()) + msg="Unable to create new role", + role=to_native(role.to_dict()), ) return created_role def reprovision_role( - self, role_api: MgmtRolesResourceApi, existing_role: ApiRole, new_role: ApiRole + self, + role_api: MgmtRolesResourceApi, + existing_role: ApiRole, + new_role: ApiRole, ) -> ApiRole: self.changed = True @@ -559,7 +572,7 @@ def reprovision_role( iter( role_api.create_roles( body=ApiRoleList(items=[new_role]), - ).items + ).items, ) ), {}, diff --git a/plugins/modules/cm_service_role_config.py b/plugins/modules/cm_service_role_config.py index 6c9fd1b7..a49b8701 100644 --- a/plugins/modules/cm_service_role_config.py +++ b/plugins/modules/cm_service_role_config.py @@ -237,13 +237,13 @@ def process(self): if self.name is None: role = next( iter( - [r for r in role_api.read_roles().items if r.type == self.type] + [r for r in role_api.read_roles().items if r.type == self.type], ), None, ) if role is None: self.module.fail_json( - msg=f"Unable to find Cloudera Manager Service role type '{self.type}" + msg=f"Unable to find Cloudera Manager Service role type '{self.type}", ) else: self.name = role.name diff --git a/plugins/modules/cm_service_role_config_group.py b/plugins/modules/cm_service_role_config_group.py index b43c24e7..b6d12793 100644 --- a/plugins/modules/cm_service_role_config_group.py +++ b/plugins/modules/cm_service_role_config_group.py @@ -185,7 +185,7 @@ def process(self): current = get_mgmt_base_role_config_group(self.api_client, self.type) if current is None: self.module.fail_json( - msg=f"Unable to find Cloudera Manager service base role config group for role type '{self.type}'" + msg=f"Unable to find Cloudera Manager service base role config group for role type '{self.type}'", ) except ApiException as ex: if ex.status != 404: @@ -223,7 +223,7 @@ def process(self): # Report on any role associations self.output.update( - role_names=[r.name for r in rcg_api.read_roles(current.name).items] + role_names=[r.name for r in rcg_api.read_roles(current.name).items], ) diff --git a/plugins/modules/cm_service_role_config_group_config.py b/plugins/modules/cm_service_role_config_group_config.py index 30ebca12..96b9eecd 100644 --- a/plugins/modules/cm_service_role_config_group_config.py +++ b/plugins/modules/cm_service_role_config_group_config.py @@ -234,7 +234,7 @@ def process(self): rcg = get_mgmt_base_role_config_group(self.api_client, self.type) if rcg is None: self.module.fail_json( - msg=f"Unable to find Cloudera Manager Service base role config group for role type '{self.type}'" + msg=f"Unable to find Cloudera Manager Service base role config group for role type '{self.type}'", ) self.name = rcg.name diff --git a/plugins/modules/cm_service_role_config_group_info.py b/plugins/modules/cm_service_role_config_group_info.py index a75d4533..d246adec 100644 --- a/plugins/modules/cm_service_role_config_group_info.py +++ b/plugins/modules/cm_service_role_config_group_info.py @@ -151,7 +151,7 @@ def process(self): if current is not None: result = parse_role_config_group_result(current) result.update( - role_names=[r.name for r in rcg_api.read_roles(current.name).items] + role_names=[r.name for r in rcg_api.read_roles(current.name).items], ) self.output.append(result) else: @@ -159,7 +159,7 @@ def process(self): def process_result(rcg: ApiRoleConfigGroup) -> dict: result = parse_role_config_group_result(rcg) result.update( - role_names=[r.name for r in rcg_api.read_roles(rcg.name).items] + role_names=[r.name for r in rcg_api.read_roles(rcg.name).items], ) return result diff --git a/plugins/modules/cm_trial_license.py b/plugins/modules/cm_trial_license.py index 88e05dac..f45d1c84 100644 --- a/plugins/modules/cm_trial_license.py +++ b/plugins/modules/cm_trial_license.py @@ -103,7 +103,8 @@ def process(self): def main(): module = ClouderaManagerModule.ansible_module( - argument_spec=dict(), supports_check_mode=True + argument_spec=dict(), + supports_check_mode=True, ) result = ClouderaTrial(module) diff --git a/plugins/modules/data_context.py b/plugins/modules/data_context.py index 11099f5b..a4fb1b7b 100644 --- a/plugins/modules/data_context.py +++ b/plugins/modules/data_context.py @@ -209,13 +209,13 @@ def process(self): except ApiException as ex: if ex.status == 404: self.module.fail_json( - msg="Cluster does not exist: " + self.cluster_name + msg="Cluster does not exist: " + self.cluster_name, ) else: raise ex try: existing = data_context_api.read_data_context( - data_context_name=self.data_contex_name + data_context_name=self.data_contex_name, ).to_dict() except ApiException as ex: if ( @@ -244,11 +244,12 @@ def process(self): if not self.module.check_mode: update_data_context = data_context_api.update_data_context( body=ApiDataContext( - name=self.data_contex_name, services=services - ) + name=self.data_contex_name, + services=services, + ), ).to_dict() self.data_context_output = parse_data_context_result( - ApiDataContextList(items=[update_data_context]) + ApiDataContextList(items=[update_data_context]), ) self.changed = True else: @@ -261,12 +262,13 @@ def process(self): if not self.module.check_mode: create_data_context = data_context_api.create_data_context( body=ApiDataContext( - name=self.data_contex_name, services=services - ) + name=self.data_contex_name, + services=services, + ), ).to_dict() self.data_context_output = parse_data_context_result( - ApiDataContextList(items=[create_data_context]) + ApiDataContextList(items=[create_data_context]), ) self.changed = True @@ -274,7 +276,7 @@ def process(self): if existing: if not self.module.check_mode: data_context_api.delete_data_context( - data_context_name=self.data_contex_name + data_context_name=self.data_contex_name, ).to_dict() self.changed = True @@ -283,7 +285,9 @@ def main(): module = ClouderaManagerMutableModule.ansible_module( argument_spec=dict( name=dict( - required=True, type="str", aliases=["context_name", "data_context_name"] + required=True, + type="str", + aliases=["context_name", "data_context_name"], ), cluster=dict(required=False, type="str", aliases=["cluster_name"]), services=dict(required=False, type="list"), diff --git a/plugins/modules/data_context_info.py b/plugins/modules/data_context_info.py index 89932979..ce4f98bb 100644 --- a/plugins/modules/data_context_info.py +++ b/plugins/modules/data_context_info.py @@ -146,10 +146,10 @@ def process(self): if self.data_context_name: try: data_contex = data_context_api.read_data_context( - data_context_name=self.data_context_name + data_context_name=self.data_context_name, ).to_dict() self.data_context_info = parse_data_context_result( - ApiDataContextList(items=[data_contex]) + ApiDataContextList(items=[data_contex]), ) except ApiException as ex: if ex.status != 500: @@ -158,7 +158,7 @@ def process(self): data_contexts_info = data_context_api.read_data_contexts().to_dict() self.data_context_info = parse_data_context_result( - ApiDataContextList(items=data_contexts_info.get("items", [])) + ApiDataContextList(items=data_contexts_info.get("items", [])), ) diff --git a/plugins/modules/external_account.py b/plugins/modules/external_account.py index a47da650..8a9bc6f1 100644 --- a/plugins/modules/external_account.py +++ b/plugins/modules/external_account.py @@ -289,9 +289,9 @@ def process(self): items=[ ApiConfig(name=key, value=value) for key, value in self.params.items() - ] + ], ), - ) + ), ) self.changed = True else: @@ -315,9 +315,9 @@ def process(self): items=[ ApiConfig(name=key, value=value) for key, value in self.params.items() - ] + ], ), - ) + ), ) self.changed = True diff --git a/plugins/modules/external_account_info.py b/plugins/modules/external_account_info.py index 6d30107e..cc88d2b2 100644 --- a/plugins/modules/external_account_info.py +++ b/plugins/modules/external_account_info.py @@ -149,7 +149,7 @@ def process(self): try: if self.name: self.external_accounts = [ - api_instance.read_account(self.name).to_dict() + api_instance.read_account(self.name).to_dict(), ] elif self.type: @@ -160,7 +160,7 @@ def process(self): else: self.external_accounts = api_instance.read_accounts( - type_name="AWS_ACCESS_KEY_AUTH" + type_name="AWS_ACCESS_KEY_AUTH", ).to_dict()["items"] all_accounts = [] for account_type in account_types: diff --git a/plugins/modules/external_user_mappings.py b/plugins/modules/external_user_mappings.py index 8447b96b..e6935382 100644 --- a/plugins/modules/external_user_mappings.py +++ b/plugins/modules/external_user_mappings.py @@ -211,7 +211,7 @@ def process(self): for mapping in all_external_user_mappings.items: if self.name == mapping.name: existing = api_instance.read_external_user_mapping( - uuid=mapping.uuid + uuid=mapping.uuid, ).to_dict() break if self.uuid: @@ -247,7 +247,8 @@ def process(self): if not self.module.check_mode: self.external_user_mappings_output = ( api_instance.update_external_user_mapping( - uuid=mapping.uuid, body=update_existing_auth_roles + uuid=mapping.uuid, + body=update_existing_auth_roles, ) ).to_dict() self.changed = True @@ -263,7 +264,7 @@ def process(self): if not self.module.check_mode: self.external_user_mappings_output = ( api_instance.create_external_user_mappings( - body={"items": [external_user_mappings_body]} + body={"items": [external_user_mappings_body]}, ) ).to_dict()["items"] self.changed = True @@ -277,7 +278,7 @@ def process(self): incoming_auth_roles = set(self.auth_roles) roles_to_delete = existing_auth_roles.intersection( - incoming_auth_roles + incoming_auth_roles, ) if self.module._diff: self.diff.update( @@ -298,7 +299,8 @@ def process(self): if not self.module.check_mode: self.external_user_mappings_output = ( api_instance.update_external_user_mapping( - uuid=mapping.uuid, body=update_existing_auth_roles + uuid=mapping.uuid, + body=update_existing_auth_roles, ) ).to_dict() self.changed = True diff --git a/plugins/modules/external_user_mappings_info.py b/plugins/modules/external_user_mappings_info.py index bc426f7c..9d31040c 100644 --- a/plugins/modules/external_user_mappings_info.py +++ b/plugins/modules/external_user_mappings_info.py @@ -128,12 +128,12 @@ def process(self): if self.name == mapping.name: self.external_user_mappings_info_output = [ api_instance.read_external_user_mapping( - uuid=mapping.uuid - ).to_dict() + uuid=mapping.uuid, + ).to_dict(), ] elif self.uuid: self.external_user_mappings_info_output = [ - api_instance.read_external_user_mapping(uuid=self.uuid).to_dict() + api_instance.read_external_user_mapping(uuid=self.uuid).to_dict(), ] else: self.external_user_mappings_info_output = ( diff --git a/plugins/modules/host.py b/plugins/modules/host.py index b93f4f3d..041a2a79 100644 --- a/plugins/modules/host.py +++ b/plugins/modules/host.py @@ -485,7 +485,7 @@ def process(self): if not self.module.check_mode: current = host_api.create_hosts( - body=ApiHostList(items=[host]) + body=ApiHostList(items=[host]), ).items[0] if not current: @@ -505,7 +505,7 @@ def process(self): # Handle IP address configuration if self.ip_address and self.ip_address != current.ip_address: self.module.fail_json( - msg="Invalid host configuration. To update the host IP address, please remove and then add the host." + msg="Invalid host configuration. To update the host IP address, please remove and then add the host.", ) # Handle rack ID @@ -521,7 +521,8 @@ def process(self): # Currently, update_host() only handles rack_id, so executing here, not further in the logic if not self.module.check_mode: current = host_api.update_host( - host_id=current.host_id, body=current + host_id=current.host_id, + body=current, ) # Handle host configs @@ -595,7 +596,8 @@ def process(self): if self.module._diff: self.diff["before"].update( - cluster=current.cluster_ref.cluster_name, roles=[] + cluster=current.cluster_ref.cluster_name, + roles=[], ) self.diff["after"].update(cluster="", roles=[]) @@ -603,7 +605,7 @@ def process(self): if role.service_ref.cluster_name is not None: if self.module._diff: self.diff["before"]["roles"].append( - parse_role_result(role) + parse_role_result(role), ) if not self.module.check_mode: @@ -625,7 +627,7 @@ def process(self): except ApiException as ex: if ex.status == 404: self.module.fail_json( - msg=f"Cluster not found: {self.cluster}." + msg=f"Cluster not found: {self.cluster}.", ) # Handle new cluster membership @@ -649,15 +651,15 @@ def process(self): ApiHostRef( host_id=current.host_id, hostname=current.hostname, - ) - ] + ), + ], ), ) break except ApiException as ae: if ae.status == 400: self.module.log( - f"[RETRY] Attempting to add host, {current.hostname}, to cluster, {cluster.name}" + f"[RETRY] Attempting to add host, {current.hostname}, to cluster, {cluster.name}", ) time.sleep(self.delay) continue @@ -685,8 +687,8 @@ def process(self): ApiHostRef( host_id=current.host_id, hostname=current.hostname, - ) - ] + ), + ], ), ) @@ -710,7 +712,7 @@ def process(self): except ApiException as ex: if ex.status == 404: self.module.fail_json( - msg=f"Host template, '{self.host_template}', does not exist on cluster, '{cluster.name}'" + msg=f"Host template, '{self.host_template}', does not exist on cluster, '{cluster.name}'", ) try: @@ -724,7 +726,7 @@ def process(self): ) except ApiException as ex: self.module.fail_json( - msg=f"Error whil reconciling host template assignments: {to_native(ex)}" + msg=f"Error whil reconciling host template assignments: {to_native(ex)}", ) if before_ht or after_ht: @@ -798,7 +800,7 @@ def process(self): api_client=self.api_client, host_id=current.host_id, view="full", - ) + ), ) else: self.output = parse_host_result(current) diff --git a/plugins/modules/host_config.py b/plugins/modules/host_config.py index dabb6487..062029ce 100644 --- a/plugins/modules/host_config.py +++ b/plugins/modules/host_config.py @@ -229,13 +229,14 @@ def process(self): body = ApiConfigList( items=[ ApiConfig(name=k, value=f"{v}") for k, v in change_set.items() - ] + ], ) self.host_config = [ p.to_dict() for p in api_instance.update_host_config( - host_id=self.hostname, body=body + host_id=self.hostname, + body=body, ).items ] else: diff --git a/plugins/modules/host_config_info.py b/plugins/modules/host_config_info.py index 2a7c4195..a3716407 100644 --- a/plugins/modules/host_config_info.py +++ b/plugins/modules/host_config_info.py @@ -175,7 +175,8 @@ def process(self): host_api_instance = HostsResourceApi(self.api_client) host_configs = host_api_instance.read_host_config( - host_id=self.hostname, view=self.view + host_id=self.hostname, + view=self.view, ) self.host_config_info = [s.to_dict() for s in host_configs.items] diff --git a/plugins/modules/host_template.py b/plugins/modules/host_template.py index eafa0ddd..5ba79fe3 100644 --- a/plugins/modules/host_template.py +++ b/plugins/modules/host_template.py @@ -280,7 +280,7 @@ def process(self): ) if base_rcg is None: self.module.fail_json( - msg=f"Role type '{rcg['type']}' not found for service '{rcg['service']}' in cluster '{self.cluster}'" + msg=f"Role type '{rcg['type']}' not found for service '{rcg['service']}' in cluster '{self.cluster}'", ) incoming_rcgs.append(base_rcg) @@ -291,7 +291,7 @@ def process(self): [ rcg.role_config_group_name for rcg in current.role_config_group_refs - ] + ], ) incoming_rcg_names = set([rcg.name for rcg in incoming_rcgs]) @@ -313,7 +313,8 @@ def process(self): updated_diff = dict(**current_diff) updated_diff.role_config_groups = updated_rcg_names self.diff.update( - before=current_diff, after=dict(updated_diff) + before=current_diff, + after=dict(updated_diff), ) current.role_config_group_refs = [ @@ -339,7 +340,8 @@ def process(self): if self.module._diff: self.diff.update( - before=dict(), after=parse_host_template(created_host_template) + before=dict(), + after=parse_host_template(created_host_template), ) if not self.module.check_mode: diff --git a/plugins/modules/host_template_info.py b/plugins/modules/host_template_info.py index af24bb1b..93325223 100644 --- a/plugins/modules/host_template_info.py +++ b/plugins/modules/host_template_info.py @@ -131,7 +131,7 @@ def process(self): except ApiException as ex: if ex.status == 404: self.module.fail_json( - msg="Cluster does not exist: " + self.cluster_name + msg="Cluster does not exist: " + self.cluster_name, ) else: raise ex @@ -145,8 +145,8 @@ def process(self): host_template_api.read_host_template( cluster_name=self.cluster_name, host_template_name=self.name, - ) - ) + ), + ), ) except ApiException as ex: if ex.status != 404: @@ -156,7 +156,7 @@ def process(self): self.output = [ parse_host_template(ht) for ht in host_template_api.read_host_templates( - cluster_name=self.cluster_name + cluster_name=self.cluster_name, ).items ] diff --git a/plugins/modules/parcel.py b/plugins/modules/parcel.py index fbefa8ad..1e5b3719 100644 --- a/plugins/modules/parcel.py +++ b/plugins/modules/parcel.py @@ -232,7 +232,7 @@ def process(self): except ApiException as ex: if ex.status == 404: self.module.fail_json( - msg=f"Parcel {self.parcel_name} (version: {self.parcel_version}) not found on cluster '{self.cluster}'" + msg=f"Parcel {self.parcel_name} (version: {self.parcel_version}) not found on cluster '{self.cluster}'", ) # Normalize self.state @@ -258,7 +258,7 @@ def process(self): cluster_name=self.cluster, product=self.parcel_name, version=self.parcel_version, - ) + ), ) @@ -269,10 +269,16 @@ def main(): name=dict(required=True, aliases=["parcel", "product"]), parcel_version=dict(required=True), delay=dict( - required=False, type="int", default=15, aliases=["polling_interval"] + required=False, + type="int", + default=15, + aliases=["polling_interval"], ), timeout=dict( - required=False, type="int", default=1200, aliases=["polling_timeout"] + required=False, + type="int", + default=1200, + aliases=["polling_timeout"], ), state=dict( default="present", diff --git a/plugins/modules/service.py b/plugins/modules/service.py index 171b71d9..64e26b78 100644 --- a/plugins/modules/service.py +++ b/plugins/modules/service.py @@ -848,7 +848,7 @@ def process(self): for base_rcg in base_rcg_list: RoleConfigGroupsResourceApi( - self.api_client + self.api_client, ).update_role_config_group( cluster_name=self.cluster, service_name=current.name, @@ -876,7 +876,8 @@ def process(self): hostname=role_host, config=requested_role.get("config", None), role_config_group=requested_role.get( - "role_config_group", None + "role_config_group", + None, ), tags=requested_role.get("tags", None), ) @@ -912,7 +913,7 @@ def process(self): else: if self.type and self.type.upper() != current.type: self.module.fail_json( - msg="Service name already in use for type: " + current.type + msg="Service name already in use for type: " + current.type, ) # Set the maintenance @@ -1049,7 +1050,7 @@ def process(self): api_client=self.api_client, cluster_name=self.cluster, service_name=self.name, - ) + ), ) else: self.output = parse_service_result(current) @@ -1072,7 +1073,7 @@ def handle_maintenance(self, service: ApiService) -> None: self.changed = True if self.module._diff: self.diff["before"].update( - maintenance_mode=service.maintenance_mode + maintenance_mode=service.maintenance_mode, ) self.diff["after"].update(maintenance_mode=self.maintenance) diff --git a/plugins/modules/service_config.py b/plugins/modules/service_config.py index cd4f5225..0e170414 100644 --- a/plugins/modules/service_config.py +++ b/plugins/modules/service_config.py @@ -278,7 +278,9 @@ def process(self): self.config = [ p.to_dict() for p in api_instance.read_service_config( - self.cluster, self.service, view=self.view + self.cluster, + self.service, + view=self.view, ).items ] diff --git a/plugins/modules/service_config_info.py b/plugins/modules/service_config_info.py index 4a58ff0d..64335946 100644 --- a/plugins/modules/service_config_info.py +++ b/plugins/modules/service_config_info.py @@ -192,7 +192,9 @@ def process(self): try: results = api_instance.read_service_config( - cluster_name=self.cluster, service_name=self.service, view=self.view + cluster_name=self.cluster, + service_name=self.service, + view=self.view, ) self.config = [s.to_dict() for s in results.items] diff --git a/plugins/modules/service_info.py b/plugins/modules/service_info.py index 1ab0fd81..f4a196a8 100644 --- a/plugins/modules/service_info.py +++ b/plugins/modules/service_info.py @@ -402,8 +402,8 @@ def process(self): api_client=self.api_client, cluster_name=self.cluster, service_name=self.name, - ) - ) + ), + ), ) except ApiException as e: if e.status != 404: diff --git a/plugins/modules/service_role.py b/plugins/modules/service_role.py index 909081f6..f719f3df 100644 --- a/plugins/modules/service_role.py +++ b/plugins/modules/service_role.py @@ -422,7 +422,7 @@ def process(self): if not self.cluster_hostname and not self.cluster_host_id: self.module.fail_json( msg="one of the following is required: %s" - % ", ".join(["cluster_hostname", "cluster_host_id"]) + % ", ".join(["cluster_hostname", "cluster_host_id"]), ) try: @@ -435,7 +435,8 @@ def process(self): try: ServicesResourceApi(self.api_client).read_service( - self.cluster, self.service + self.cluster, + self.service, ) except ApiException as ex: if ex.status == 404: @@ -471,7 +472,7 @@ def process(self): type=self.type, hostname=self.cluster_hostname, host_id=self.cluster_host_id, - ).items + ).items, ), None, ) @@ -587,13 +588,13 @@ def process(self): if self.module._diff: self.diff["before"].update( - role_config_group=current.role_config_group_ref.role_config_group_name + role_config_group=current.role_config_group_ref.role_config_group_name, ) self.diff["after"].update(role_config_group=None) if not self.module.check_mode: RoleConfigGroupsResourceApi( - self.api_client + self.api_client, ).move_roles_to_base_group( cluster_name=self.cluster, service_name=self.service, @@ -604,10 +605,10 @@ def process(self): self.changed = True if self.module._diff: self.diff["before"].update( - role_config_group=current.role_config_group_ref.role_config_group_name + role_config_group=current.role_config_group_ref.role_config_group_name, ) self.diff["after"].update( - role_config_group=self.role_config_group + role_config_group=self.role_config_group, ) if not self.module.check_mode: @@ -671,7 +672,7 @@ def process(self): cluster_name=self.cluster, service_name=self.service, role_name=current.name, - ) + ), ) else: self.output = parse_role_result(current) diff --git a/plugins/modules/service_role_config.py b/plugins/modules/service_role_config.py index 390458fe..e490b03d 100644 --- a/plugins/modules/service_role_config.py +++ b/plugins/modules/service_role_config.py @@ -249,7 +249,8 @@ def process(self): try: ServicesResourceApi(self.api_client).read_service( - self.cluster, self.service + self.cluster, + self.service, ) except ApiException as ex: if ex.status == 404: @@ -262,7 +263,9 @@ def process(self): try: existing = api_instance.read_role_config( - self.cluster, self.role, self.service + self.cluster, + self.role, + self.service, ) except ApiException as ex: if ex.status == 404: @@ -289,7 +292,7 @@ def process(self): if not self.module.check_mode: body = ApiConfigList( - items=[ApiConfig(name=k, value=v) for k, v in change_set.items()] + items=[ApiConfig(name=k, value=v) for k, v in change_set.items()], ) refresh = False @@ -309,7 +312,9 @@ def process(self): self.config = [ p.to_dict() for p in api_instance.read_role_config( - self.cluster, self.role, self.service + self.cluster, + self.role, + self.service, ).items ] diff --git a/plugins/modules/service_role_config_group.py b/plugins/modules/service_role_config_group.py index a82ae496..46b1c628 100644 --- a/plugins/modules/service_role_config_group.py +++ b/plugins/modules/service_role_config_group.py @@ -263,7 +263,8 @@ def process(self): try: ServicesResourceApi(self.api_client).read_service( - self.cluster, self.service + self.cluster, + self.service, ) except ApiException as ex: if ex.status == 404: @@ -285,7 +286,10 @@ def process(self): ) else: current = get_base_role_config_group( - self.api_client, self.cluster, self.service, self.role_type + self.api_client, + self.cluster, + self.service, + self.role_type, ) current_roles = rcg_api.read_roles( @@ -303,12 +307,12 @@ def process(self): if current.base: self.module.fail_json( - msg="Deletion failed. Role config group is a base (default) group." + msg="Deletion failed. Role config group is a base (default) group.", ) if current_roles: self.module.fail_json( - msg="Deletion failed. Role config group has existing role associations." + msg="Deletion failed. Role config group has existing role associations.", ) if self.module._diff: @@ -331,11 +335,12 @@ def process(self): # Check for role type changes if self.role_type and self.role_type != current.role_type: self.module.fail_json( - msg="Invalid role type. To change the role type of an existing role config group, please destroy and recreate the role config group with the designated role type." + msg="Invalid role type. To change the role type of an existing role config group, please destroy and recreate the role config group with the designated role type.", ) payload = ApiRoleConfigGroup( - name=current.name, role_type=current.role_type + name=current.name, + role_type=current.role_type, ) # Check for display name changes @@ -385,7 +390,7 @@ def process(self): else: if self.role_type is None: self.module.fail_json( - msg="Role config group needs to be created, but is missing required arguments: role_type" + msg="Role config group needs to be created, but is missing required arguments: role_type", ) self.changed = True @@ -437,7 +442,7 @@ def process(self): cluster_name=self.cluster, service_name=self.service, role_config_group_name=current.name, - ) + ), ) else: self.output = parse_role_config_group_result(current) diff --git a/plugins/modules/service_role_config_group_config.py b/plugins/modules/service_role_config_group_config.py index ab20418d..880321ac 100644 --- a/plugins/modules/service_role_config_group_config.py +++ b/plugins/modules/service_role_config_group_config.py @@ -251,7 +251,8 @@ def process(self): try: ServicesResourceApi(self.api_client).read_service( - self.cluster, self.service + self.cluster, + self.service, ) except ApiException as ex: if ex.status == 404: @@ -294,7 +295,7 @@ def process(self): if not self.module.check_mode: body = ApiConfigList( - items=[ApiConfig(name=k, value=v) for k, v in change_set.items()] + items=[ApiConfig(name=k, value=v) for k, v in change_set.items()], ) self.config = [ @@ -315,7 +316,10 @@ def process(self): self.config = [ p.to_dict() for p in api_instance.read_config( - self.cluster, self.role_config_group, self.service, view=self.view + self.cluster, + self.role_config_group, + self.service, + view=self.view, ).items ] @@ -326,7 +330,8 @@ def main(): cluster=dict(required=True, aliases=["cluster_name"]), service=dict(required=True, aliases=["service_name"]), role_config_group=dict( - required=True, aliases=["role_config_group", "name"] + required=True, + aliases=["role_config_group", "name"], ), parameters=dict(type="dict", required=True, aliases=["params"]), purge=dict(type="bool", default=False), diff --git a/plugins/modules/service_role_config_group_config_info.py b/plugins/modules/service_role_config_group_config_info.py index eda118c1..16e667f8 100644 --- a/plugins/modules/service_role_config_group_config_info.py +++ b/plugins/modules/service_role_config_group_config_info.py @@ -200,7 +200,8 @@ def process(self): try: ServicesResourceApi(self.api_client).read_service( - self.cluster, self.service + self.cluster, + self.service, ) except ApiException as ex: if ex.status == 404: @@ -230,7 +231,8 @@ def main(): cluster=dict(required=True, aliases=["cluster_name"]), service=dict(required=True, aliases=["service_name"]), role_config_group=dict( - required=True, aliases=["role_config_group", "name"] + required=True, + aliases=["role_config_group", "name"], ), view=dict( default="summary", diff --git a/plugins/modules/service_role_config_group_info.py b/plugins/modules/service_role_config_group_info.py index 75f80489..56053cf6 100644 --- a/plugins/modules/service_role_config_group_info.py +++ b/plugins/modules/service_role_config_group_info.py @@ -168,7 +168,8 @@ def process(self): try: ServicesResourceApi(self.api_client).read_service( - self.cluster, self.service + self.cluster, + self.service, ) except ApiException as ex: if ex.status == 404: @@ -188,7 +189,7 @@ def process(self): cluster_name=self.cluster, role_config_group_name=self.name, service_name=self.service, - ) + ), ] except ApiException as e: if e.status != 404: @@ -222,7 +223,7 @@ def process(self): { **parse_role_config_group_result(r), "role_names": [r.name for r in roles.items], - } + }, ) diff --git a/plugins/modules/service_role_config_info.py b/plugins/modules/service_role_config_info.py index 1d9ad216..fc803348 100644 --- a/plugins/modules/service_role_config_info.py +++ b/plugins/modules/service_role_config_info.py @@ -201,7 +201,8 @@ def process(self): try: ServicesResourceApi(self.api_client).read_service( - self.cluster, self.service + self.cluster, + self.service, ) except ApiException as ex: if ex.status == 404: diff --git a/plugins/modules/service_role_info.py b/plugins/modules/service_role_info.py index 7f5d9f71..94162d94 100644 --- a/plugins/modules/service_role_info.py +++ b/plugins/modules/service_role_info.py @@ -300,7 +300,8 @@ def process(self): try: ServicesResourceApi(self.api_client).read_service( - self.cluster, self.service + self.cluster, + self.service, ) except ApiException as ex: if ex.status == 404: @@ -322,8 +323,8 @@ def process(self): cluster_name=self.cluster, service_name=self.service, role_name=self.role, - ) - ) + ), + ), ) except ApiException as e: if e.status != 404: diff --git a/plugins/modules/user.py b/plugins/modules/user.py index e68adc2f..22ff3d8a 100644 --- a/plugins/modules/user.py +++ b/plugins/modules/user.py @@ -238,9 +238,9 @@ def process(self): name=self.account_name, auth_roles=auth_roles, password=self.account_password, - ) - ] - ) + ), + ], + ), ) self.user_output = api_instance.read_user2(self.account_name).to_dict() @@ -249,7 +249,7 @@ def process(self): if self.state == "absent": if existing: self.user_output = api_instance.delete_user2( - self.account_name + self.account_name, ).to_dict() self.changed = True diff --git a/plugins/modules/user_info.py b/plugins/modules/user_info.py index e4061a23..d05df5a9 100644 --- a/plugins/modules/user_info.py +++ b/plugins/modules/user_info.py @@ -98,7 +98,7 @@ def process(self): try: if self.account_name: self.user_info_output = [ - api_instance.read_user2(self.account_name).to_dict() + api_instance.read_user2(self.account_name).to_dict(), ] else: self.user_info_output = api_instance.read_users2().to_dict()["items"] diff --git a/roles/cloudera_manager/api_client/action_plugins/cm_api.py b/roles/cloudera_manager/api_client/action_plugins/cm_api.py index 658ab3a3..350343e5 100644 --- a/roles/cloudera_manager/api_client/action_plugins/cm_api.py +++ b/roles/cloudera_manager/api_client/action_plugins/cm_api.py @@ -70,7 +70,7 @@ def poll_command_status(self, task_vars, api_base_url, command_id): args = self.build_args( task_vars, additional_args=dict( - url=self.build_url(api_base_url, "/commands/" + str(command_id)) + url=self.build_url(api_base_url, "/commands/" + str(command_id)), ), ) result = self._execute_module( @@ -104,7 +104,7 @@ def run(self, tmp=None, task_vars=None): poll_duration = int(self._task.args.get("poll_duration") or 10) poll_max_failed_retries = int( - self._task.args.get("poll_max_failed_retries") or 3 + self._task.args.get("poll_max_failed_retries") or 3, ) # Add request body if necessary @@ -132,11 +132,14 @@ def run(self, tmp=None, task_vars=None): time.sleep(poll_duration) display.vv( "Waiting for {} command ({}) to complete...".format( - command_name, command_id - ) + command_name, + command_id, + ), ) command_status = self.poll_command_status( - task_vars, api_base_url, command_id + task_vars, + api_base_url, + command_id, ) if "json" in command_status: failed_polls = 0 @@ -147,8 +150,10 @@ def run(self, tmp=None, task_vars=None): response = {"success": False} display.vv( "Failed to poll command ({}) for status (attempt {} of {})...".format( - command_id, failed_polls, poll_max_failed_retries - ) + command_id, + failed_polls, + poll_max_failed_retries, + ), ) result.update(command_status) result["failed"] = not response["success"] diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index f412df87..14ce61a7 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -78,7 +78,7 @@ class AnsibleExitJson(Exception): def __init__(self, kwargs): super(AnsibleExitJson, self).__init__( - kwargs.get("msg", "General module success") + kwargs.get("msg", "General module success"), ) self.__dict__.update(kwargs) @@ -88,13 +88,16 @@ class AnsibleFailJson(Exception): def __init__(self, kwargs): super(AnsibleFailJson, self).__init__( - kwargs.get("msg", "General module failure") + kwargs.get("msg", "General module failure"), ) self.__dict__.update(kwargs) def wait_for_command( - api_client: ApiClient, command: ApiCommand, polling: int = 120, delay: int = 5 + api_client: ApiClient, + command: ApiCommand, + polling: int = 120, + delay: int = 5, ): """Polls Cloudera Manager to wait for given Command to succeed or fail.""" @@ -110,7 +113,10 @@ def wait_for_command( def yield_service( - api_client: ApiClient, cluster: ApiCluster, service_name: str, service_type: str + api_client: ApiClient, + cluster: ApiCluster, + service_name: str, + service_type: str, ) -> Generator[ApiService]: """Provisions a new cluster service as a generator. Use with 'yield from' to delegate within a pytest fixture. @@ -168,12 +174,13 @@ def register_service( if len(hosts) != 3: raise Exception( - "Not enough available hosts to assign service roles; the cluster must have 3 or more hosts." + "Not enough available hosts to assign service roles; the cluster must have 3 or more hosts.", ) # Create the service created_service = service_api.create_services( - cluster_name=cluster.name, body=ApiServiceList(items=[service]) + cluster_name=cluster.name, + body=ApiServiceList(items=[service]), ).items[0] # Record the service @@ -188,17 +195,20 @@ def register_service( # Refresh the service created_service = service_api.read_service( - cluster_name=cluster.name, service_name=created_service.name + cluster_name=cluster.name, + service_name=created_service.name, ) # Establish the maintenance mode of the service if service.maintenance_mode: maintenance_cmd = service_api.enter_maintenance_mode( - cluster_name=cluster.name, service_name=created_service.name + cluster_name=cluster.name, + service_name=created_service.name, ) wait_for_command(api_client, maintenance_cmd) created_service = service_api.read_service( - cluster_name=cluster.name, service_name=created_service.name + cluster_name=cluster.name, + service_name=created_service.name, ) # Establish the state the of the service @@ -210,11 +220,12 @@ def register_service( ) wait_for_command(api_client, stop_cmd) created_service = service_api.read_service( - cluster_name=cluster.name, service_name=created_service.name + cluster_name=cluster.name, + service_name=created_service.name, ) else: raise Exception( - "Unsupported service state for fixture: " + service.service_state + "Unsupported service state for fixture: " + service.service_state, ) # Return the provisioned service @@ -266,7 +277,10 @@ def deregister_service(api_client: ApiClient, registry: list[ApiService]) -> Non def register_role( - api_client: ApiClient, registry: list[ApiRole], service: ApiService, role: ApiRole + api_client: ApiClient, + registry: list[ApiRole], + service: ApiService, + role: ApiRole, ) -> ApiRole: # Create the role created_role = provision_service_role( @@ -391,7 +405,9 @@ def register_role_config_group( def deregister_role_config_group( - api_client: ApiClient, registry: list[ApiRoleConfigGroup], message: str + api_client: ApiClient, + registry: list[ApiRoleConfigGroup], + message: str, ) -> None: rcg_api = RoleConfigGroupsResourceApi(api_client) for rcg in registry: @@ -439,7 +455,9 @@ def deregister_role_config_group( if config_revert: rcg.config = ApiConfigList( - items=[ApiConfig(name=k, value=v) for k, v in config_revert.items()] + items=[ + ApiConfig(name=k, value=v) for k, v in config_revert.items() + ], ) rcg_api.update_role_config_group( @@ -461,7 +479,8 @@ def register_host_template( # Create the host template created_host_template = host_template_api.create_host_templates( - cluster_name=cluster.name, body=ApiHostTemplateList(items=[host_template]) + cluster_name=cluster.name, + body=ApiHostTemplateList(items=[host_template]), ).items[0] # Record the host template @@ -490,7 +509,10 @@ def deregister_host_template( def service_wide_config( - api_client: ApiClient, service: ApiService, params: dict, message: str + api_client: ApiClient, + service: ApiService, + params: dict, + message: str, ) -> Generator[ApiService]: """Update a service-wide configuration for a given service. Yields the service, resetting the configuration to its prior state. Use with @@ -550,7 +572,7 @@ def service_wide_config( ApiConfig(name=k.name, value=None) for k in post.items if k.name not in pre_set - ] + ], ) service_api.update_service_config( @@ -562,7 +584,10 @@ def service_wide_config( def provision_cm_role( - api_client: ApiClient, role_name: str, role_type: str, host_id: str + api_client: ApiClient, + role_name: str, + role_type: str, + host_id: str, ) -> Generator[ApiRole]: """Yield a newly-created Cloudera Manager Service role, deleting the role after use. Use with 'yield from' within a pytest fixture. @@ -585,7 +610,8 @@ def provision_cm_role( ) provisioned_role = next( - iter(api.create_roles(body=ApiRoleList(items=[role])).items), None + iter(api.create_roles(body=ApiRoleList(items=[role])).items), + None, ) yield provisioned_role @@ -598,7 +624,9 @@ def provision_cm_role( def set_cm_role( - api_client: ApiClient, cluster: ApiCluster, role: ApiRole + api_client: ApiClient, + cluster: ApiCluster, + role: ApiRole, ) -> Generator[ApiRole]: """Set a net-new Cloudera Manager Service role. Yields the new role, resetting to any existing role upon completion. Use with 'yield from' @@ -609,7 +637,8 @@ def set_cm_role( # Check for existing management role pre_role = next( - iter([r for r in get_mgmt_roles(api_client, role.type).items]), None + iter([r for r in get_mgmt_roles(api_client, role.type).items]), + None, ) if pre_role is not None: @@ -627,14 +656,15 @@ def set_cm_role( if not hosts.items: raise Exception( - "No available hosts to assign the Cloudera Manager Service role." + "No available hosts to assign the Cloudera Manager Service role.", ) role.host_ref = get_host_ref(api_client, host_id=hosts.items[0].host_id) # Create the role under test current_role = next( - iter(role_api.create_roles(body=ApiRoleList(items=[role])).items), None + iter(role_api.create_roles(body=ApiRoleList(items=[role])).items), + None, ) current_role.config = role_api.read_role_config(role_name=current_role.name) @@ -643,7 +673,7 @@ def set_cm_role( if role.role_state in [ApiRoleState.STARTING, ApiRoleState.STARTED]: start_cmds = role_cmd_api.start_command( - body=ApiRoleNameList(items=[current_role.name]) + body=ApiRoleNameList(items=[current_role.name]), ) if start_cmds.errors: error_msg = "\n".join(start_cmds.errors) @@ -666,7 +696,7 @@ def set_cm_role( role_api.enter_maintenance_mode(pre_role.name) if pre_role.role_state in [ApiRoleState.STARTED, ApiRoleState.STARTING]: restart_cmds = role_cmd_api.restart_command( - body=ApiRoleNameList(items=[pre_role.name]) + body=ApiRoleNameList(items=[pre_role.name]), ) if restart_cmds.errors: error_msg = "\n".join(restart_cmds.errors) @@ -678,7 +708,10 @@ def set_cm_role( def set_cm_role_config( - api_client: ApiClient, role: ApiRole, params: dict, message: str + api_client: ApiClient, + role: ApiRole, + params: dict, + message: str, ) -> Generator[ApiRole]: """Update a role configuration for a given role. Yields the role, resetting the configuration to its prior state. Use with @@ -731,7 +764,7 @@ def set_cm_role_config( ApiConfig(name=k.name, value=None) for k in post.items if k.name not in pre_set - ] + ], ) role_api.update_role_config( @@ -768,7 +801,9 @@ def set_cm_role_config_group( # Update the role config group pre_rcg = rcg_api.update_role_config_group( - role_config_group.name, message=f"{message}::set", body=update + role_config_group.name, + message=f"{message}::set", + body=update, ) yield pre_rcg @@ -785,11 +820,13 @@ def set_cm_role_config_group( if config_revert: role_config_group.config = ApiConfigList( - items=[ApiConfig(name=k, value=v) for k, v in config_revert.items()] + items=[ApiConfig(name=k, value=v) for k, v in config_revert.items()], ) rcg_api.update_role_config_group( - role_config_group.name, message=f"{message}::reset", body=role_config_group + role_config_group.name, + message=f"{message}::reset", + body=role_config_group, ) @@ -849,7 +886,7 @@ def set_role_config_group( if config_revert: role_config_group.config = ApiConfigList( - items=[ApiConfig(name=k, value=v) for k, v in config_revert.items()] + items=[ApiConfig(name=k, value=v) for k, v in config_revert.items()], ) rcg_api.update_role_config_group( @@ -862,7 +899,9 @@ def set_role_config_group( def read_expected_roles( - api_client: ApiClient, cluster_name: str, service_name: str + api_client: ApiClient, + cluster_name: str, + service_name: str, ) -> list[ApiRole]: return ( RolesResourceApi(api_client) diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 2e0039b5..80e4f30c 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -118,7 +118,7 @@ def skip_python(): if sys.version_info < (3, 6): pytest.skip( "Skipping on Python %s. cloudera.cloud supports Python 3.6 and higher." - % sys.version + % sys.version, ) @@ -220,7 +220,8 @@ def cm_api_client(conn) -> ApiClient: # Get version auth = config.auth_settings().get("basic") version = rest.GET( - f"{url}/api/version", headers={auth["key"]: auth["value"]} + f"{url}/api/version", + headers={auth["key"]: auth["value"]}, ).data # Set host @@ -287,7 +288,7 @@ def cms_session(cm_api_client) -> Generator[ApiService]: ApiRole(type="EVENTSERVER"), ApiRole(type="ALERTPUBLISHER"), ], - ) + ), ) service_api.auto_configure() @@ -328,7 +329,7 @@ def base_cluster(cm_api_client, cms_session) -> Generator[ApiCluster]: cdh_version = os.getenv("CDH_VERSION") else: raise Exception( - "No CDH_VERSION found. Please set this environment variable." + "No CDH_VERSION found. Please set this environment variable.", ) name = ( @@ -359,13 +360,13 @@ def base_cluster(cm_api_client, cms_session) -> Generator[ApiCluster]: cluster_api.delete_cluster(cluster_name=name) raise NoHostsFoundException( - "Not enough available hosts to assign to base cluster" + "Not enough available hosts to assign to base cluster", ) else: cluster_api.add_hosts( cluster_name=name, body=ApiHostRefList( - items=[ApiHostRef(host_id=h.host_id) for h in hosts[:3]] + items=[ApiHostRef(host_id=h.host_id) for h in hosts[:3]], ), ) @@ -384,7 +385,7 @@ def base_cluster(cm_api_client, cms_session) -> Generator[ApiCluster]: # Roll back the cluster and then raise an error cluster_api.delete_cluster(cluster_name=name) raise ParcelNotFoundException( - f"CDH Version {cdh_version} not found. Please check your parcel repo configuration." + f"CDH Version {cdh_version} not found. Please check your parcel repo configuration.", ) def _log(msg: str, args: dict = None) -> None: @@ -429,12 +430,13 @@ def zk_function(cm_api_client, base_cluster, request) -> Generator[ApiService]: cm_api = ClustersResourceApi(cm_api_client) host = next( - (h for h in cm_api.list_hosts(cluster_name=base_cluster.name).items), None + (h for h in cm_api.list_hosts(cluster_name=base_cluster.name).items), + None, ) if host is None: raise NoHostsFoundException( - "No available hosts to assign ZooKeeper service roles" + "No available hosts to assign ZooKeeper service roles", ) payload = ApiService( @@ -449,7 +451,8 @@ def zk_function(cm_api_client, base_cluster, request) -> Generator[ApiService]: ) service_results = service_api.create_services( - cluster_name=base_cluster.name, body=ApiServiceList(items=[payload]) + cluster_name=base_cluster.name, + body=ApiServiceList(items=[payload]), ) first_run_cmd = service_api.first_run( @@ -460,7 +463,8 @@ def zk_function(cm_api_client, base_cluster, request) -> Generator[ApiService]: monitor_command(cm_api_client, first_run_cmd) zk_service = service_api.read_service( - cluster_name=base_cluster.name, service_name=service_results.items[0].name + cluster_name=base_cluster.name, + service_name=service_results.items[0].name, ) yield zk_service @@ -497,7 +501,7 @@ def zk_session(cm_api_client, base_cluster) -> Generator[ApiService]: if len(hosts) != 3: raise NoHostsFoundException( - "Not enough available hosts to assign ZooKeeper service roles" + "Not enough available hosts to assign ZooKeeper service roles", ) payload = ApiService( @@ -516,7 +520,8 @@ def zk_session(cm_api_client, base_cluster) -> Generator[ApiService]: ) service_results = service_api.create_services( - cluster_name=base_cluster.name, body=ApiServiceList(items=[payload]) + cluster_name=base_cluster.name, + body=ApiServiceList(items=[payload]), ) first_run_cmd = service_api.first_run( @@ -527,7 +532,8 @@ def zk_session(cm_api_client, base_cluster) -> Generator[ApiService]: monitor_command(cm_api_client, first_run_cmd) zk_service = service_api.read_service( - cluster_name=base_cluster.name, service_name=service_results.items[0].name + cluster_name=base_cluster.name, + service_name=service_results.items[0].name, ) yield zk_service @@ -691,7 +697,7 @@ def cms_auto(cm_api_client, cms_cleared) -> Generator[ApiService]: ApiRole(type="EVENTSERVER"), ApiRole(type="ALERTPUBLISHER"), ], - ) + ), ) service_api.auto_configure() @@ -736,7 +742,7 @@ def cms_auto_no_start(cm_api_client, cms_cleared) -> Generator[ApiService]: ApiRole(type="EVENTSERVER"), ApiRole(type="ALERTPUBLISHER"), ], - ) + ), ) service_api.auto_configure() @@ -786,7 +792,7 @@ def cms_config(cm_api_client, cms, request) -> Generator[ApiService]: ApiConfig(name=k.name, value=None) for k in post.items if k.name not in pre_set - ] + ], ) api.update_service_config( @@ -800,7 +806,8 @@ def host_monitor(cm_api_client, cms, request) -> Generator[ApiRole]: api = MgmtRolesResourceApi(cm_api_client) hm = next( - iter([r for r in api.read_roles().items if r.type == "HOSTMONITOR"]), None + iter([r for r in api.read_roles().items if r.type == "HOSTMONITOR"]), + None, ) if hm is not None: @@ -811,12 +818,15 @@ def host_monitor(cm_api_client, cms, request) -> Generator[ApiRole]: if host is None: raise Exception( - "No available hosts to assign Cloudera Manager Service role" + "No available hosts to assign Cloudera Manager Service role", ) else: name = Path(request.fixturename).stem yield from provision_cm_role( - cm_api_client, name, "HOSTMONITOR", host.host_id + cm_api_client, + name, + "HOSTMONITOR", + host.host_id, ) @@ -837,7 +847,9 @@ def host_monitor_config(cm_api_client, host_monitor, request) -> Generator[ApiRo @pytest.fixture(scope="function") def host_monitor_role_group_config( - cm_api_client, host_monitor, request + cm_api_client, + host_monitor, + request, ) -> Generator[ApiRoleConfigGroup]: """Configures the base Role Config Group for the Host Monitor role of a Cloudera Manager Service.""" marker = request.node.get_closest_marker("role_config_group") @@ -847,7 +859,7 @@ def host_monitor_role_group_config( rcg_api = MgmtRoleConfigGroupsResourceApi(cm_api_client) rcg = rcg_api.read_role_config_group( - host_monitor.role_config_group_ref.role_config_group_name + host_monitor.role_config_group_ref.role_config_group_name, ) rcg.config = rcg_api.read_config(role_config_group_name=rcg.name) @@ -866,7 +878,8 @@ def host_monitor_cleared(cm_api_client, cms) -> Generator[None]: # Check for existing management role pre_role = next( - iter([r for r in get_mgmt_roles(cm_api_client, "HOSTMONITOR").items]), None + iter([r for r in get_mgmt_roles(cm_api_client, "HOSTMONITOR").items]), + None, ) if pre_role is not None: @@ -886,14 +899,16 @@ def host_monitor_cleared(cm_api_client, cms) -> Generator[None]: role_api.enter_maintenance_mode(pre_role.name) if pre_role.role_state in [ApiRoleState.STARTED, ApiRoleState.STARTING]: restart_cmds = role_cmd_api.restart_command( - body=ApiRoleNameList(items=[pre_role.name]) + body=ApiRoleNameList(items=[pre_role.name]), ) handle_commands(api_client=cm_api_client, commands=restart_cmds) @pytest.fixture(scope="function") def host_monitor_state( - cm_api_client, host_monitor, request + cm_api_client, + host_monitor, + request, ) -> Generator[ApiRoleConfigGroup]: marker = request.node.get_closest_marker("role_state") @@ -914,14 +929,14 @@ def host_monitor_state( handle_commands( api_client=cm_api_client, commands=cmd_api.start_command( - body=ApiRoleNameList(items=[host_monitor.name]) + body=ApiRoleNameList(items=[host_monitor.name]), ), ) elif role_state in [ApiRoleState.STOPPED]: handle_commands( api_client=cm_api_client, commands=cmd_api.stop_command( - body=ApiRoleNameList(items=[host_monitor.name]) + body=ApiRoleNameList(items=[host_monitor.name]), ), ) @@ -940,21 +955,23 @@ def host_monitor_state( handle_commands( api_client=cm_api_client, commands=cmd_api.start_command( - body=ApiRoleNameList(items=[host_monitor.name]) + body=ApiRoleNameList(items=[host_monitor.name]), ), ) elif pre_role.role_state in [ApiRoleState.STOPPED]: handle_commands( api_client=cm_api_client, commands=cmd_api.stop_command( - body=ApiRoleNameList(items=[host_monitor.name]) + body=ApiRoleNameList(items=[host_monitor.name]), ), ) @pytest.fixture(scope="function") def zk_role_config_group( - cm_api_client, zk_session, request + cm_api_client, + zk_session, + request, ) -> Generator[ApiRoleConfigGroup]: """ Creates or updates a Role Config Group of a ZooKeeper service, i.e. a SERVER role type group. @@ -1028,7 +1045,8 @@ def zk_role_config_group( @pytest.fixture(scope="function") def role_config_group_wrapper( - cm_api_client, request + cm_api_client, + request, ) -> Callable[[ApiService, ApiRoleConfigGroup], Generator[ApiRoleConfigGroup]]: """ Returns a function that will create a role config group on the selected service, @@ -1037,7 +1055,8 @@ def role_config_group_wrapper( """ def wrapper( - service: ApiService, role_config_group: ApiRoleConfigGroup + service: ApiService, + role_config_group: ApiRoleConfigGroup, ) -> Generator[ApiRoleConfigGroup]: rcg_api = RoleConfigGroupsResourceApi(cm_api_client) wrapped_rcg = None @@ -1111,7 +1130,10 @@ def handle_commands(api_client: ApiClient, commands: ApiBulkCommandList): def monitor_command( - api_client: ApiClient, command: ApiCommand, polling: int = 120, delay: int = 10 + api_client: ApiClient, + command: ApiCommand, + polling: int = 120, + delay: int = 10, ): poll_count = 0 while command.active: @@ -1168,7 +1190,8 @@ def _wrapper(service: ApiService, role: ApiRole) -> ApiRole: @pytest.fixture(scope="function") def role_config_group_factory( - cm_api_client, request + cm_api_client, + request, ) -> Generator[Callable[[ApiService, ApiRoleConfigGroup], ApiRoleConfigGroup]]: # Track the created or updated role config groups role_config_groups = list[ApiRoleConfigGroup]() @@ -1178,7 +1201,8 @@ def role_config_group_factory( # Yield the role factory function to the tests def _wrapper( - service: ApiService, role_config_group: ApiRoleConfigGroup + service: ApiService, + role_config_group: ApiRoleConfigGroup, ) -> ApiRoleConfigGroup: return register_role_config_group( api_client=cm_api_client, @@ -1192,7 +1216,9 @@ def _wrapper( # Delete any registered role config groups deregister_role_config_group( - api_client=cm_api_client, registry=role_config_groups, message=message + api_client=cm_api_client, + registry=role_config_groups, + message=message, ) @@ -1205,7 +1231,8 @@ def host_template_factory( # Yield the host template factory function to the tests def _wrapper( - cluster: ApiCluster, host_template: ApiHostTemplate + cluster: ApiCluster, + host_template: ApiHostTemplate, ) -> ApiHostTemplate: return register_host_template( api_client=cm_api_client, diff --git a/tests/unit/plugins/actions/assemble_cluster_template/test_assemble_cluster_template_action.py b/tests/unit/plugins/actions/assemble_cluster_template/test_assemble_cluster_template_action.py index 57447a76..e2bb7121 100644 --- a/tests/unit/plugins/actions/assemble_cluster_template/test_assemble_cluster_template_action.py +++ b/tests/unit/plugins/actions/assemble_cluster_template/test_assemble_cluster_template_action.py @@ -279,7 +279,9 @@ def find_in_tmp(fragment, decrypt): regexp = re.compile("^((?!ig).)*$") plugin.assemble_fragments( - dest_file.open(mode="w", encoding="utf-8"), src_dir, regex=regexp + dest_file.open(mode="w", encoding="utf-8"), + src_dir, + regex=regexp, ) results = json.load(dest_file.open(mode="r", encoding="utf-8")) diff --git a/tests/unit/plugins/modules/assemble_cluster_template/test_assemble_cluster_template_module.py b/tests/unit/plugins/modules/assemble_cluster_template/test_assemble_cluster_template_module.py index 5c054db2..ebd1ce86 100644 --- a/tests/unit/plugins/modules/assemble_cluster_template/test_assemble_cluster_template_module.py +++ b/tests/unit/plugins/modules/assemble_cluster_template/test_assemble_cluster_template_module.py @@ -73,7 +73,7 @@ def test_src_not_directory(module_args, tmp_path): { "dest": "foo.json", "src": str(invalid_src), - } + }, ) with pytest.raises(AnsibleFailJson, match="not a directory"): @@ -91,7 +91,7 @@ def test_src_invalid_file(module_args, tmp_path): { "dest": "foo.json", "src": str(root_dir), - } + }, ) with pytest.raises(AnsibleFailJson, match="JSON parsing error"): @@ -113,7 +113,8 @@ def test_src_filtered(module_args, tmp_path): filtered = root_dir / "filtered.json" filtered.write_text( - json.dumps(content, indent=2, sort_keys=False), encoding="utf-8" + json.dumps(content, indent=2, sort_keys=False), + encoding="utf-8", ) results = root_dir / "results.json" @@ -357,7 +358,7 @@ def test_merge_list_idempotent_multiple_elements(module_args, tmp_path): assert len(output["test"]) == 4 assert output["test"] == expected_list( - [{"name": "Test"}, {"product": "Product"}, "two", "three"] + [{"name": "Test"}, {"product": "Product"}, "two", "three"], ) @@ -389,7 +390,7 @@ def test_merge_list_idempotent_multiple_keys(module_args, tmp_path): assert len(output["test"]) == 3 assert output["test"] == expected_list( - [{"name": "Test", "product": "Product"}, "two", "three"] + [{"name": "Test", "product": "Product"}, "two", "three"], ) @@ -421,7 +422,7 @@ def test_merge_list_idempotent_append(module_args, tmp_path): assert len(output["test"]) == 4 assert output["test"] == expected_list( - [{"name": "Test"}, "two", {"name": "Additional"}, "three"] + [{"name": "Test"}, "two", {"name": "Additional"}, "three"], ) @@ -652,7 +653,7 @@ def test_multiple_services(module_args, tmp_path): "https://archive.cloudera.com/", "https://archive.cloudera.com/schemaregistry", "https://archive.cloudera.com/atlas", - ] + ], ) assert len(output["products"]) == 2 @@ -660,13 +661,18 @@ def test_multiple_services(module_args, tmp_path): [ dict(product="CDH", version="1.2.3"), dict(product="FOO", version="9.8.7"), - ] + ], ) assert output["instantiator"]["clusterName"] == "ExampleCluster" assert len(output["instantiator"]["hosts"]) == 1 assert output["instantiator"]["hosts"] == expected_list( - [{"hostName": "host.example.com", "hostTemplateRefName": "ExampleHostTemplate"}] + [ + { + "hostName": "host.example.com", + "hostTemplateRefName": "ExampleHostTemplate", + }, + ], ) assert len(output["hostTemplates"]) == 2 @@ -688,7 +694,7 @@ def test_multiple_services(module_args, tmp_path): "atlas-GATEWAY-BASE", ], }, - ] + ], ) assert len(output["services"]) == 3 @@ -722,7 +728,7 @@ def test_multiple_services(module_args, tmp_path): "serviceType": "SCHEMAREGISTRY", "displayName": "Schema Registry", "serviceConfigs": [ - {"name": "database_host", "value": "host.example.com"} + {"name": "database_host", "value": "host.example.com"}, ], "roleConfigGroups": [ { @@ -736,7 +742,7 @@ def test_multiple_services(module_args, tmp_path): "value": "7790", }, ], - } + }, ], }, { diff --git a/tests/unit/plugins/modules/cluster/test_base_cluster.py b/tests/unit/plugins/modules/cluster/test_base_cluster.py index 09b501e7..6013dcda 100644 --- a/tests/unit/plugins/modules/cluster/test_base_cluster.py +++ b/tests/unit/plugins/modules/cluster/test_base_cluster.py @@ -105,7 +105,8 @@ def test_present_base_hosts_not_found(conn, module_args): module_args(conn) with pytest.raises( - AnsibleFailJson, match="Did not find the following hosts: should.not.find" + AnsibleFailJson, + match="Did not find the following hosts: should.not.find", ): cluster.main() @@ -433,7 +434,7 @@ def test_pytest_cluster_with_template(module_args): "template": "./files/cluster-template.json", "add_repositories": "True", "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/cm_autotls/test_cm_autotls.py b/tests/unit/plugins/modules/cm_autotls/test_cm_autotls.py index 982cb5be..41d6ccbe 100644 --- a/tests/unit/plugins/modules/cm_autotls/test_cm_autotls.py +++ b/tests/unit/plugins/modules/cm_autotls/test_cm_autotls.py @@ -70,7 +70,7 @@ def test_enable_invalid_ssh(module_args, conn): # Update parameters to enable with invalid ssh key module_args( - {**conn, "connection_private_key": "invalid-ssh-key", "state": "present"} + {**conn, "connection_private_key": "invalid-ssh-key", "state": "present"}, ) with pytest.raises(AnsibleFailJson, match="Could not authenticate"): diff --git a/tests/unit/plugins/modules/cm_kerberos/test_cm_kerberos.py b/tests/unit/plugins/modules/cm_kerberos/test_cm_kerberos.py index 09104a9e..3ac6a2b9 100644 --- a/tests/unit/plugins/modules/cm_kerberos/test_cm_kerberos.py +++ b/tests/unit/plugins/modules/cm_kerberos/test_cm_kerberos.py @@ -73,7 +73,7 @@ def krb_disabled(cm_api_client, request) -> None: ) body = ApiConfigList( - items=[ApiConfig(name=k, value=v) for k, v in reset_params.items()] + items=[ApiConfig(name=k, value=v) for k, v in reset_params.items()], ) cm_api.update_config( @@ -102,7 +102,7 @@ def krb_freeipa(cm_api_client, request, krb_disabled) -> None: ) body = ApiConfigList( - items=[ApiConfig(name=k, value=v) for k, v in setup_params.items()] + items=[ApiConfig(name=k, value=v) for k, v in setup_params.items()], ) cm_api.update_config( @@ -147,7 +147,7 @@ def test_pytest_enable_kerberos(module_args, conn, krb_disabled, request): "krb_enc_types": ["aes256-cts", "aes128-cts", "rc4-hmac"], "security_realm": "CLDR.INTERNAL", "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -180,7 +180,7 @@ def test_enable_invalid_admin_password(module_args, conn, krb_disabled, request) "security_realm": "CLDR.INTERNAL", "kdc_admin_password": "wrongPass", "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", - } + }, ) with pytest.raises( @@ -226,7 +226,7 @@ def test_force_enable_kerberos(module_args, conn, krb_freeipa, request): "krb_enc_types": ["aes256-cts", "aes128-cts", "rc4-hmac"], "security_realm": "CLDR.INTERNAL", "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/cm_license/test_cm_license.py b/tests/unit/plugins/modules/cm_license/test_cm_license.py index ea2dd2a5..aae40e28 100644 --- a/tests/unit/plugins/modules/cm_license/test_cm_license.py +++ b/tests/unit/plugins/modules/cm_license/test_cm_license.py @@ -40,7 +40,7 @@ def test_pytest_cm_license(module_args): "verify_tls": "no", "debug": "no", "license": "./files/license.txt", - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/cm_license_info/test_cm_license_info.py b/tests/unit/plugins/modules/cm_license_info/test_cm_license_info.py index 4c309e1a..81d5adb5 100644 --- a/tests/unit/plugins/modules/cm_license_info/test_cm_license_info.py +++ b/tests/unit/plugins/modules/cm_license_info/test_cm_license_info.py @@ -39,7 +39,7 @@ def test_pytest_cm_license_info(module_args): "port": "7180", "verify_tls": "no", "debug": "no", - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/cm_resource/test_cm_resource_i.py b/tests/unit/plugins/modules/cm_resource/test_cm_resource_i.py index f50d853d..f3563dd8 100644 --- a/tests/unit/plugins/modules/cm_resource/test_cm_resource_i.py +++ b/tests/unit/plugins/modules/cm_resource/test_cm_resource_i.py @@ -32,7 +32,8 @@ @unittest.skipUnless( - os.getenv("CM_USERNAME"), "Cloudera Manager access parameters not set" + os.getenv("CM_USERNAME"), + "Cloudera Manager access parameters not set", ) class TestCMResourceIntegration(ModuleTestCase): def test_post(self): diff --git a/tests/unit/plugins/modules/cm_resource_info/test_cm_resource_info_i.py b/tests/unit/plugins/modules/cm_resource_info/test_cm_resource_info_i.py index ae574fc0..b78a5a42 100644 --- a/tests/unit/plugins/modules/cm_resource_info/test_cm_resource_info_i.py +++ b/tests/unit/plugins/modules/cm_resource_info/test_cm_resource_info_i.py @@ -33,7 +33,8 @@ @unittest.skipUnless( - os.getenv("CM_USERNAME"), "Cloudera Manager access parameters not set" + os.getenv("CM_USERNAME"), + "Cloudera Manager access parameters not set", ) class TestCMResourceInfoIntegration(ModuleTestCase): def test_list(self): @@ -46,7 +47,7 @@ def test_list(self): "verify_tls": "no", "debug": "yes", "path": "/clusters", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -63,7 +64,7 @@ def test_item(self): "verify_tls": "no", "debug": "yes", "path": "/cm/license", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -80,14 +81,15 @@ def test_invalid_host(self): "verify_tls": "no", "debug": "yes", "path": "/cm/license", - } + }, ) with pytest.raises(AnsibleFailJson) as e: cm_resource_info.main() self.assertRegexpMatches( - e.value.args[0]["msg"], "nodename nor servname provided, or not known" + e.value.args[0]["msg"], + "nodename nor servname provided, or not known", ) def test_invalid_path(self): @@ -99,7 +101,7 @@ def test_invalid_path(self): "verify_tls": "no", "debug": "yes", "path": "/cm/licenseZ", - } + }, ) with pytest.raises(AnsibleFailJson) as e: @@ -118,7 +120,7 @@ def test_query_params(self): "path": "/tools/echo", "query": {"message": "foobarbaz"}, "field": "message", - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/cm_service/test_cm_service.py b/tests/unit/plugins/modules/cm_service/test_cm_service.py index 5abace09..16bc7249 100644 --- a/tests/unit/plugins/modules/cm_service/test_cm_service.py +++ b/tests/unit/plugins/modules/cm_service/test_cm_service.py @@ -43,7 +43,7 @@ def test_state_present(conn, module_args, cms_cleared, request): { **conn, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -64,7 +64,7 @@ def test_state_absent(conn, module_args, cm_api_client, cms_cleared, request): **conn, "state": "absent", "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", - } + }, ) service_api = MgmtServiceResourceApi(cm_api_client) @@ -90,7 +90,7 @@ def test_state_absent_running_roles(conn, module_args, cms_auto, request): **conn, "state": "absent", "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -113,7 +113,7 @@ def test_state_started(conn, module_args, cm_api_client, cms_auto_no_start, requ **conn, "state": "started", "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -136,7 +136,7 @@ def test_state_stopped(conn, module_args, cm_api_client, cms_auto, request): **conn, "state": "stopped", "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -159,7 +159,7 @@ def test_state_restarted(conn, module_args, cm_api_client, cms_auto, request): **conn, "state": "restarted", "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -182,7 +182,7 @@ def test_new_maintenance_enabled(conn, module_args, cms_cleared, request): **conn, "maintenance": True, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -207,7 +207,7 @@ def test_new_config(conn, module_args, cms_cleared, request): "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) expected = dict(mgmt_emit_sensitive_data_in_stderr="True") @@ -232,7 +232,7 @@ def test_existing_maintenance_enabled(conn, module_args, cm_api_client, cms, req **conn, "maintenance": True, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", - } + }, ) service_api = MgmtServiceResourceApi(cm_api_client) @@ -258,7 +258,7 @@ def test_existing_maintenance_disabled(conn, module_args, cm_api_client, cms, re **conn, "maintenance": False, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", - } + }, ) service_api = MgmtServiceResourceApi(cm_api_client) @@ -287,11 +287,12 @@ def test_existing_set_parameters(conn, module_args, cms_config, request): "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) expected = dict( - mgmt_emit_sensitive_data_in_stderr="True", log_event_retry_frequency="10" + mgmt_emit_sensitive_data_in_stderr="True", + log_event_retry_frequency="10", ) with pytest.raises(AnsibleExitJson) as e: @@ -309,7 +310,7 @@ def test_existing_set_parameters(conn, module_args, cms_config, request): @pytest.mark.service_config( - dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) + dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10), ) def test_existing_unset_parameters(conn, module_args, cms_config, request): module_args( @@ -317,7 +318,7 @@ def test_existing_unset_parameters(conn, module_args, cms_config, request): **conn, "parameters": dict(mgmt_emit_sensitive_data_in_stderr=None), "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", - } + }, ) expected = dict(log_event_retry_frequency="10") @@ -337,7 +338,7 @@ def test_existing_unset_parameters(conn, module_args, cms_config, request): @pytest.mark.service_config( - dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) + dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10), ) def test_existing_set_parameters_with_purge(conn, module_args, cms_config, request): module_args( @@ -348,7 +349,7 @@ def test_existing_set_parameters_with_purge(conn, module_args, cms_config, reque "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) expected = dict(mgmt_emit_sensitive_data_in_stderr="True") @@ -368,7 +369,7 @@ def test_existing_set_parameters_with_purge(conn, module_args, cms_config, reque @pytest.mark.service_config( - dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) + dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10), ) def test_existing_purge_all_parameters(conn, module_args, cms_config, request): module_args( @@ -379,7 +380,7 @@ def test_existing_purge_all_parameters(conn, module_args, cms_config, request): "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/cm_service/test_cm_service_role_config_groups.py b/tests/unit/plugins/modules/cm_service/test_cm_service_role_config_groups.py index 3f99ef5f..a8a6275f 100644 --- a/tests/unit/plugins/modules/cm_service/test_cm_service_role_config_groups.py +++ b/tests/unit/plugins/modules/cm_service/test_cm_service_role_config_groups.py @@ -47,10 +47,10 @@ def test_new_role_config_group(conn, module_args, cms_cleared, request): { "type": "ALERTPUBLISHER", "config": expected, - } + }, ], "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -80,12 +80,15 @@ def test_new_role_config_group(conn, module_args, cms_cleared, request): items=[ ApiConfig(name="mgmt_num_descriptor_fetch_tries", value=16), ApiConfig(name="process_start_secs", value=36), - ] + ], ), - ) + ), ) def test_existing_role_config_group_set( - conn, module_args, host_monitor_role_group_config, request + conn, + module_args, + host_monitor_role_group_config, + request, ): expected = dict(mgmt_num_descriptor_fetch_tries="16", process_start_secs="96") @@ -96,10 +99,10 @@ def test_existing_role_config_group_set( { "type": "HOSTMONITOR", "config": dict(process_start_secs="96"), - } + }, ], "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -129,12 +132,15 @@ def test_existing_role_config_group_set( items=[ ApiConfig(name="mgmt_num_descriptor_fetch_tries", value=17), ApiConfig(name="process_start_secs", value=37), - ] + ], ), - ) + ), ) def test_existing_role_config_group_unset( - conn, module_args, host_monitor_role_group_config, request + conn, + module_args, + host_monitor_role_group_config, + request, ): expected = dict( mgmt_num_descriptor_fetch_tries="17", @@ -147,10 +153,10 @@ def test_existing_role_config_group_unset( { "type": "HOSTMONITOR", "config": dict(process_start_secs=None), - } + }, ], "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -180,12 +186,15 @@ def test_existing_role_config_group_unset( items=[ ApiConfig(name="mgmt_num_descriptor_fetch_tries", value=18), ApiConfig(name="process_start_secs", value=38), - ] + ], ), - ) + ), ) def test_existing_role_config_group_purge( - conn, module_args, host_monitor_role_group_config, request + conn, + module_args, + host_monitor_role_group_config, + request, ): expected = dict( mgmt_num_descriptor_fetch_tries="28", @@ -198,11 +207,11 @@ def test_existing_role_config_group_purge( { "type": "HOSTMONITOR", "config": dict(mgmt_num_descriptor_fetch_tries=28), - } + }, ], "purge": True, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -232,19 +241,22 @@ def test_existing_role_config_group_purge( items=[ ApiConfig(name="mgmt_num_descriptor_fetch_tries", value=18), ApiConfig(name="process_start_secs", value=38), - ] + ], ), - ) + ), ) def test_existing_role_config_group_purge_all( - conn, module_args, host_monitor_role_group_config, request + conn, + module_args, + host_monitor_role_group_config, + request, ): module_args( { **conn, "purge": True, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/cm_service/test_cm_service_roles.py b/tests/unit/plugins/modules/cm_service/test_cm_service_roles.py index 2de5ee43..3341ca3d 100644 --- a/tests/unit/plugins/modules/cm_service/test_cm_service_roles.py +++ b/tests/unit/plugins/modules/cm_service/test_cm_service_roles.py @@ -49,12 +49,12 @@ def test_new_role(conn, module_args, cm_api_client, cms_cleared, request): { "type": "HOSTMONITOR", "cluster_host_id": host.host_id, - } + }, ], "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -89,12 +89,12 @@ def test_new_role_config(conn, module_args, cm_api_client, cms_cleared, request) "type": "HOSTMONITOR", "cluster_host_id": host.host_id, "config": expected, - } + }, ], "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -126,12 +126,12 @@ def test_existing_role_new(conn, module_args, cm_api_client, cms, request): { "type": "HOSTMONITOR", "cluster_host_id": host.host_id, - } + }, ], "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -166,12 +166,12 @@ def test_existing_role_new_config_set(conn, module_args, cm_api_client, cms, req "type": "HOSTMONITOR", "cluster_host_id": host.host_id, "config": expected, - } + }, ], "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -191,10 +191,14 @@ def test_existing_role_new_config_set(conn, module_args, cm_api_client, cms, req @pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=15, process_start_secs=35) + dict(mgmt_num_descriptor_fetch_tries=15, process_start_secs=35), ) def test_existing_role_existing_config_set( - conn, module_args, cm_api_client, host_monitor_config, request + conn, + module_args, + cm_api_client, + host_monitor_config, + request, ): expected = dict(process_start_secs="35") @@ -208,12 +212,12 @@ def test_existing_role_existing_config_set( "config": { "mgmt_num_descriptor_fetch_tries": None, }, - } + }, ], "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -233,10 +237,14 @@ def test_existing_role_existing_config_set( @pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=15, process_start_secs=35) + dict(mgmt_num_descriptor_fetch_tries=15, process_start_secs=35), ) def test_existing_role_existing_config_unset( - conn, module_args, cm_api_client, host_monitor_config, request + conn, + module_args, + cm_api_client, + host_monitor_config, + request, ): expected = dict(process_start_secs="35") @@ -250,12 +258,12 @@ def test_existing_role_existing_config_unset( "config": { "mgmt_num_descriptor_fetch_tries": None, }, - } + }, ], "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -275,10 +283,14 @@ def test_existing_role_existing_config_unset( @pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=16, process_start_secs=36) + dict(mgmt_num_descriptor_fetch_tries=16, process_start_secs=36), ) def test_existing_role_existing_config_purge( - conn, module_args, cm_api_client, host_monitor_config, request + conn, + module_args, + cm_api_client, + host_monitor_config, + request, ): expected = dict(process_start_secs="36") @@ -292,13 +304,13 @@ def test_existing_role_existing_config_purge( "config": { "process_start_secs": 36, }, - } + }, ], "purge": True, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -318,10 +330,14 @@ def test_existing_role_existing_config_purge( @pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=17, process_start_secs=37) + dict(mgmt_num_descriptor_fetch_tries=17, process_start_secs=37), ) def test_existing_role_existing_config_purge_all( - conn, module_args, cm_api_client, host_monitor_config, request + conn, + module_args, + cm_api_client, + host_monitor_config, + request, ): module_args( { @@ -330,13 +346,13 @@ def test_existing_role_existing_config_purge_all( { "type": "HOSTMONITOR", # "cluster_host_id": host.host_id, - } + }, ], "purge": True, "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -371,12 +387,12 @@ def test_existing_role_config_invalid(conn, module_args, cm_api_client, cms, req "type": "HOSTMONITOR", "cluster_host_id": host.host_id, "config": expected, - } + }, ], "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleFailJson, match="Unknown configuration attribute"): @@ -384,7 +400,11 @@ def test_existing_role_config_invalid(conn, module_args, cm_api_client, cms, req def test_existing_role_relocate( - conn, module_args, cm_api_client, host_monitor, request + conn, + module_args, + cm_api_client, + host_monitor, + request, ): host_api = HostsResourceApi(cm_api_client) host = next( @@ -405,12 +425,12 @@ def test_existing_role_relocate( { "type": "HOSTMONITOR", "cluster_host_id": host.host_id, - } + }, ], "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -437,7 +457,7 @@ def test_existing_role_purge(conn, module_args, host_monitor, request): "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py b/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py index 98df49b8..37fd8e93 100644 --- a/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py +++ b/tests/unit/plugins/modules/cm_service_config/test_cm_service_config.py @@ -45,17 +45,18 @@ def test_present_invalid_parameter(conn, module_args): { **conn, "parameters": dict(example="Example"), - } + }, ) with pytest.raises( - AnsibleFailJson, match="Unknown configuration attribute 'example'" + AnsibleFailJson, + match="Unknown configuration attribute 'example'", ): cm_service_config.main() @pytest.mark.service_config( - dict(mgmt_emit_sensitive_data_in_stderr=False, log_event_retry_frequency=10) + dict(mgmt_emit_sensitive_data_in_stderr=False, log_event_retry_frequency=10), ) def test_set_parameters(conn, module_args, cms_config): module_args( @@ -65,11 +66,12 @@ def test_set_parameters(conn, module_args, cms_config): "message": "test_cm_service_config::test_set_parameters", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) expected = dict( - mgmt_emit_sensitive_data_in_stderr="True", log_event_retry_frequency="10" + mgmt_emit_sensitive_data_in_stderr="True", + log_event_retry_frequency="10", ) with pytest.raises(AnsibleExitJson) as e: @@ -87,7 +89,7 @@ def test_set_parameters(conn, module_args, cms_config): @pytest.mark.service_config( - dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) + dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10), ) def test_unset_parameters(conn, module_args, cms_config): module_args( @@ -95,7 +97,7 @@ def test_unset_parameters(conn, module_args, cms_config): **conn, "parameters": dict(mgmt_emit_sensitive_data_in_stderr=None), "message": "test_cm_service_config::test_unset_parameters", - } + }, ) expected = dict(log_event_retry_frequency="10") @@ -115,7 +117,7 @@ def test_unset_parameters(conn, module_args, cms_config): @pytest.mark.service_config( - dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) + dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10), ) def test_set_parameters_with_purge(conn, module_args, cms_config): module_args( @@ -126,7 +128,7 @@ def test_set_parameters_with_purge(conn, module_args, cms_config): "message": "test_cm_service_config::test_set_parameters_with_purge", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) expected = dict(mgmt_emit_sensitive_data_in_stderr="True") @@ -146,7 +148,7 @@ def test_set_parameters_with_purge(conn, module_args, cms_config): @pytest.mark.service_config( - dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10) + dict(mgmt_emit_sensitive_data_in_stderr=True, log_event_retry_frequency=10), ) def test_purge_all_parameters(conn, module_args, cms_config): module_args( @@ -157,7 +159,7 @@ def test_purge_all_parameters(conn, module_args, cms_config): "message": "test_cm_service_config::test_purge_all_parameters", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py b/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py index 1501ce85..cdb03220 100644 --- a/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py +++ b/tests/unit/plugins/modules/cm_service_role/test_cm_service_role.py @@ -76,7 +76,7 @@ def test_existing_relocate(conn, module_args, cm_api_client, host_monitor, reque "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -109,7 +109,7 @@ def test_new(conn, module_args, cm_api_client, cms, request): "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) expected = dict(mgmt_num_descriptor_fetch_tries="55") @@ -144,7 +144,7 @@ def test_new_maintenance_mode_enabled(conn, module_args, cm_api_client, cms, req "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -162,7 +162,7 @@ def test_new_maintenance_mode_enabled(conn, module_args, cm_api_client, cms, req @pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21), ) def test_existing_set(conn, module_args, host_monitor_config, request): module_args( @@ -173,7 +173,7 @@ def test_existing_set(conn, module_args, host_monitor_config, request): "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) expected = dict(mgmt_num_descriptor_fetch_tries="55", process_start_secs="21") @@ -193,7 +193,7 @@ def test_existing_set(conn, module_args, host_monitor_config, request): @pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=12, process_start_secs=22) + dict(mgmt_num_descriptor_fetch_tries=12, process_start_secs=22), ) def test_existing_unset(conn, module_args, host_monitor_config, request): module_args( @@ -202,7 +202,7 @@ def test_existing_unset(conn, module_args, host_monitor_config, request): "type": host_monitor_config.type, "config": dict(mgmt_num_descriptor_fetch_tries=None), "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", - } + }, ) expected = dict(process_start_secs="22") @@ -222,7 +222,7 @@ def test_existing_unset(conn, module_args, host_monitor_config, request): @pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=13, process_start_secs=23) + dict(mgmt_num_descriptor_fetch_tries=13, process_start_secs=23), ) def test_existing_purge(conn, module_args, host_monitor_config, request): module_args( @@ -234,7 +234,7 @@ def test_existing_purge(conn, module_args, host_monitor_config, request): "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) expected = dict(mgmt_num_descriptor_fetch_tries="33") @@ -254,7 +254,7 @@ def test_existing_purge(conn, module_args, host_monitor_config, request): @pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=14, process_start_secs=24) + dict(mgmt_num_descriptor_fetch_tries=14, process_start_secs=24), ) def test_existing_purge_all(conn, module_args, host_monitor_config, request): module_args( @@ -265,7 +265,7 @@ def test_existing_purge_all(conn, module_args, host_monitor_config, request): "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -283,7 +283,11 @@ def test_existing_purge_all(conn, module_args, host_monitor_config, request): def test_existing_maintenance_mode_enabled( - conn, module_args, cm_api_client, host_monitor, request + conn, + module_args, + cm_api_client, + host_monitor, + request, ): module_args( { @@ -293,7 +297,7 @@ def test_existing_maintenance_mode_enabled( "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) role_api = MgmtRolesResourceApi(cm_api_client) @@ -314,7 +318,11 @@ def test_existing_maintenance_mode_enabled( def test_existing_maintenance_mode_disabled( - conn, module_args, cm_api_client, host_monitor, request + conn, + module_args, + cm_api_client, + host_monitor, + request, ): module_args( { @@ -324,7 +332,7 @@ def test_existing_maintenance_mode_disabled( "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) # TODO Turn this into a fixture - host_monitor_maintenance @@ -354,7 +362,7 @@ def test_existing_state_present(conn, module_args, host_monitor, request): "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -373,7 +381,11 @@ def test_existing_state_present(conn, module_args, host_monitor, request): @pytest.mark.role_state(ApiRoleState.STOPPED) def test_existing_state_started( - conn, module_args, cms_auto, host_monitor_state, request + conn, + module_args, + cms_auto, + host_monitor_state, + request, ): module_args( { @@ -383,7 +395,7 @@ def test_existing_state_started( "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -402,7 +414,11 @@ def test_existing_state_started( @pytest.mark.role_state(ApiRoleState.STARTED) def test_existing_state_stopped( - conn, module_args, cms_auto, host_monitor_state, request + conn, + module_args, + cms_auto, + host_monitor_state, + request, ): module_args( { @@ -412,7 +428,7 @@ def test_existing_state_stopped( "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -431,7 +447,11 @@ def test_existing_state_stopped( @pytest.mark.role_state(ApiRoleState.STARTED) def test_existing_state_restarted( - conn, module_args, cms_auto, host_monitor_state, request + conn, + module_args, + cms_auto, + host_monitor_state, + request, ): module_args( { @@ -441,7 +461,7 @@ def test_existing_state_restarted( "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -467,7 +487,7 @@ def test_existing_state_absent(conn, module_args, cms_auto, host_monitor, reques "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py b/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py index b15a6e85..010c972e 100644 --- a/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py +++ b/tests/unit/plugins/modules/cm_service_role_config/test_cm_service_role_config.py @@ -47,7 +47,7 @@ def test_missing_required_if(conn, module_args): { **conn, "parameters": dict(), - } + }, ) with pytest.raises(AnsibleFailJson, match="name, type"): @@ -60,17 +60,18 @@ def test_present_invalid_parameter(conn, module_args, host_monitor): **conn, "role": host_monitor.name, "parameters": dict(example="Example"), - } + }, ) with pytest.raises( - AnsibleFailJson, match="Unknown configuration attribute 'example'" + AnsibleFailJson, + match="Unknown configuration attribute 'example'", ): cm_service_role_config.main() @pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21), ) def test_set_parameters(conn, module_args, host_monitor_config, request): module_args( @@ -81,7 +82,7 @@ def test_set_parameters(conn, module_args, host_monitor_config, request): "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") @@ -101,7 +102,7 @@ def test_set_parameters(conn, module_args, host_monitor_config, request): @pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21), ) def test_set_parameters_role_type(conn, module_args, host_monitor_config, request): module_args( @@ -112,7 +113,7 @@ def test_set_parameters_role_type(conn, module_args, host_monitor_config, reques "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") @@ -132,7 +133,7 @@ def test_set_parameters_role_type(conn, module_args, host_monitor_config, reques @pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21), ) def test_unset_parameters(conn, module_args, host_monitor_config, request): module_args( @@ -141,7 +142,7 @@ def test_unset_parameters(conn, module_args, host_monitor_config, request): "role": host_monitor_config.name, "parameters": dict(mgmt_num_descriptor_fetch_tries=None), "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", - } + }, ) expected = dict(process_start_secs="21") @@ -161,7 +162,7 @@ def test_unset_parameters(conn, module_args, host_monitor_config, request): @pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21), ) def test_unset_parameters_role_type(conn, module_args, host_monitor_config, request): module_args( @@ -170,7 +171,7 @@ def test_unset_parameters_role_type(conn, module_args, host_monitor_config, requ "type": host_monitor_config.type, "parameters": dict(mgmt_num_descriptor_fetch_tries=None), "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", - } + }, ) expected = dict(process_start_secs="21") @@ -190,7 +191,7 @@ def test_unset_parameters_role_type(conn, module_args, host_monitor_config, requ @pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21), ) def test_set_parameters_with_purge(conn, module_args, host_monitor_config, request): module_args( @@ -202,7 +203,7 @@ def test_set_parameters_with_purge(conn, module_args, host_monitor_config, reque "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) expected = dict(mgmt_num_descriptor_fetch_tries="32") @@ -222,10 +223,13 @@ def test_set_parameters_with_purge(conn, module_args, host_monitor_config, reque @pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21), ) def test_set_parameters_with_purge_role_type( - conn, module_args, host_monitor_config, request + conn, + module_args, + host_monitor_config, + request, ): module_args( { @@ -236,7 +240,7 @@ def test_set_parameters_with_purge_role_type( "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) expected = dict(mgmt_num_descriptor_fetch_tries="32") @@ -256,7 +260,7 @@ def test_set_parameters_with_purge_role_type( @pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21), ) def test_purge_all_parameters(conn, module_args, host_monitor_config, request): module_args( @@ -268,7 +272,7 @@ def test_purge_all_parameters(conn, module_args, host_monitor_config, request): "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -286,10 +290,13 @@ def test_purge_all_parameters(conn, module_args, host_monitor_config, request): @pytest.mark.role_config( - dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21) + dict(mgmt_num_descriptor_fetch_tries=11, process_start_secs=21), ) def test_purge_all_parameters_role_type( - conn, module_args, host_monitor_config, request + conn, + module_args, + host_monitor_config, + request, ): module_args( { @@ -300,7 +307,7 @@ def test_purge_all_parameters_role_type( "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/cm_service_role_config_group/test_cm_service_role_config_group.py b/tests/unit/plugins/modules/cm_service_role_config_group/test_cm_service_role_config_group.py index 43847d23..683d6a01 100644 --- a/tests/unit/plugins/modules/cm_service_role_config_group/test_cm_service_role_config_group.py +++ b/tests/unit/plugins/modules/cm_service_role_config_group/test_cm_service_role_config_group.py @@ -47,14 +47,18 @@ items=[ ApiConfig(k, v) for k, v in dict( - mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + mgmt_num_descriptor_fetch_tries=11, + process_start_secs=21, ).items() - ] - ) - ) + ], + ), + ), ) def test_cm_role_config_group_config_set( - conn, module_args, host_monitor_role_group_config, request + conn, + module_args, + host_monitor_role_group_config, + request, ): module_args( { @@ -64,7 +68,7 @@ def test_cm_role_config_group_config_set( "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") @@ -89,14 +93,18 @@ def test_cm_role_config_group_config_set( items=[ ApiConfig(k, v) for k, v in dict( - mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + mgmt_num_descriptor_fetch_tries=11, + process_start_secs=21, ).items() - ] - ) - ) + ], + ), + ), ) def test_cm_role_config_group_config_unset( - conn, module_args, host_monitor_role_group_config, request + conn, + module_args, + host_monitor_role_group_config, + request, ): module_args( { @@ -106,7 +114,7 @@ def test_cm_role_config_group_config_unset( "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) expected = dict(process_start_secs="21") @@ -131,14 +139,18 @@ def test_cm_role_config_group_config_unset( items=[ ApiConfig(k, v) for k, v in dict( - mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + mgmt_num_descriptor_fetch_tries=11, + process_start_secs=21, ).items() - ] - ) - ) + ], + ), + ), ) def test_cm_role_config_group_config_set_purge( - conn, module_args, host_monitor_role_group_config, request + conn, + module_args, + host_monitor_role_group_config, + request, ): module_args( { @@ -149,7 +161,7 @@ def test_cm_role_config_group_config_set_purge( "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) expected = dict(mgmt_num_descriptor_fetch_tries="32") @@ -174,14 +186,18 @@ def test_cm_role_config_group_config_set_purge( items=[ ApiConfig(k, v) for k, v in dict( - mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + mgmt_num_descriptor_fetch_tries=11, + process_start_secs=21, ).items() - ] - ) - ) + ], + ), + ), ) def test_cm_role_config_group_config_purge_all( - conn, module_args, host_monitor_role_group_config, request + conn, + module_args, + host_monitor_role_group_config, + request, ): module_args( { @@ -192,7 +208,7 @@ def test_cm_role_config_group_config_purge_all( "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) expected = dict() diff --git a/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py b/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py index 8c5c87d3..77301add 100644 --- a/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py +++ b/tests/unit/plugins/modules/cm_service_role_config_group_config/test_cm_service_role_config_group_config.py @@ -56,7 +56,7 @@ def test_missing_required_if(conn, module_args): { **conn, "parameters": dict(), - } + }, ) with pytest.raises(AnsibleFailJson, match="name, type"): @@ -69,11 +69,12 @@ def test_present_invalid_parameter(conn, module_args, host_monitor): **conn, "name": host_monitor.role_config_group_ref.role_config_group_name, "parameters": dict(example="Example"), - } + }, ) with pytest.raises( - AnsibleFailJson, match="Unknown configuration attribute 'example'" + AnsibleFailJson, + match="Unknown configuration attribute 'example'", ): cm_service_role_config_group_config.main() @@ -84,11 +85,12 @@ def test_present_invalid_parameter(conn, module_args, host_monitor): items=[ ApiConfig(k, v) for k, v in dict( - mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + mgmt_num_descriptor_fetch_tries=11, + process_start_secs=21, ).items() - ] - ) - ) + ], + ), + ), ) def test_set_parameters(conn, module_args, host_monitor_role_group_config, request): module_args( @@ -99,7 +101,7 @@ def test_set_parameters(conn, module_args, host_monitor_role_group_config, reque "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") @@ -124,11 +126,12 @@ def test_set_parameters(conn, module_args, host_monitor_role_group_config, reque items=[ ApiConfig(k, v) for k, v in dict( - mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + mgmt_num_descriptor_fetch_tries=11, + process_start_secs=21, ).items() - ] - ) - ) + ], + ), + ), ) def test_set_parameters_role_type(conn, module_args, host_monitor_config, request): module_args( @@ -139,7 +142,7 @@ def test_set_parameters_role_type(conn, module_args, host_monitor_config, reques "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) expected = dict(mgmt_num_descriptor_fetch_tries="32", process_start_secs="21") @@ -164,11 +167,12 @@ def test_set_parameters_role_type(conn, module_args, host_monitor_config, reques items=[ ApiConfig(k, v) for k, v in dict( - mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + mgmt_num_descriptor_fetch_tries=11, + process_start_secs=21, ).items() - ] - ) - ) + ], + ), + ), ) def test_unset_parameters(conn, module_args, host_monitor_config, request): module_args( @@ -177,7 +181,7 @@ def test_unset_parameters(conn, module_args, host_monitor_config, request): "name": host_monitor_config.name, "parameters": dict(mgmt_num_descriptor_fetch_tries=None), "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", - } + }, ) expected = dict(process_start_secs="21") @@ -202,11 +206,12 @@ def test_unset_parameters(conn, module_args, host_monitor_config, request): items=[ ApiConfig(k, v) for k, v in dict( - mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + mgmt_num_descriptor_fetch_tries=11, + process_start_secs=21, ).items() - ] - ) - ) + ], + ), + ), ) def test_unset_parameters_role_type(conn, module_args, host_monitor_config, request): module_args( @@ -215,7 +220,7 @@ def test_unset_parameters_role_type(conn, module_args, host_monitor_config, requ "type": host_monitor_config.role_type, "parameters": dict(mgmt_num_descriptor_fetch_tries=None), "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", - } + }, ) expected = dict(process_start_secs="21") @@ -240,11 +245,12 @@ def test_unset_parameters_role_type(conn, module_args, host_monitor_config, requ items=[ ApiConfig(k, v) for k, v in dict( - mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + mgmt_num_descriptor_fetch_tries=11, + process_start_secs=21, ).items() - ] - ) - ) + ], + ), + ), ) def test_set_parameters_with_purge(conn, module_args, host_monitor_config, request): module_args( @@ -256,7 +262,7 @@ def test_set_parameters_with_purge(conn, module_args, host_monitor_config, reque "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) expected = dict(mgmt_num_descriptor_fetch_tries="32") @@ -281,14 +287,18 @@ def test_set_parameters_with_purge(conn, module_args, host_monitor_config, reque items=[ ApiConfig(k, v) for k, v in dict( - mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + mgmt_num_descriptor_fetch_tries=11, + process_start_secs=21, ).items() - ] - ) - ) + ], + ), + ), ) def test_set_parameters_with_purge_role_type( - conn, module_args, host_monitor_config, request + conn, + module_args, + host_monitor_config, + request, ): module_args( { @@ -299,7 +309,7 @@ def test_set_parameters_with_purge_role_type( "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) expected = dict(mgmt_num_descriptor_fetch_tries="32") @@ -324,11 +334,12 @@ def test_set_parameters_with_purge_role_type( items=[ ApiConfig(k, v) for k, v in dict( - mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + mgmt_num_descriptor_fetch_tries=11, + process_start_secs=21, ).items() - ] - ) - ) + ], + ), + ), ) def test_purge_all_parameters(conn, module_args, host_monitor_config, request): module_args( @@ -340,7 +351,7 @@ def test_purge_all_parameters(conn, module_args, host_monitor_config, request): "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -363,14 +374,18 @@ def test_purge_all_parameters(conn, module_args, host_monitor_config, request): items=[ ApiConfig(k, v) for k, v in dict( - mgmt_num_descriptor_fetch_tries=11, process_start_secs=21 + mgmt_num_descriptor_fetch_tries=11, + process_start_secs=21, ).items() - ] - ) - ) + ], + ), + ), ) def test_purge_all_parameters_role_type( - conn, module_args, host_monitor_config, request + conn, + module_args, + host_monitor_config, + request, ): module_args( { @@ -381,7 +396,7 @@ def test_purge_all_parameters_role_type( "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/cm_service_role_config_group_info/test_cm_service_role_config_group_info.py b/tests/unit/plugins/modules/cm_service_role_config_group_info/test_cm_service_role_config_group_info.py index c5d1605b..a9a5c293 100644 --- a/tests/unit/plugins/modules/cm_service_role_config_group_info/test_cm_service_role_config_group_info.py +++ b/tests/unit/plugins/modules/cm_service_role_config_group_info/test_cm_service_role_config_group_info.py @@ -50,7 +50,7 @@ def test_read_role_config_group(conn, module_args, cms_auto): { **conn, "type": "HOSTMONITOR", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -65,7 +65,7 @@ def test_read_role_config_group_nonexistent(conn, module_args, cms_auto): { **conn, "type": "DOESNOTEXIST", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -78,6 +78,7 @@ def test_read_service_nonexistent(conn, module_args): module_args({**conn}) with pytest.raises( - AnsibleFailJson, match="Cloudera Management service does not exist" + AnsibleFailJson, + match="Cloudera Management service does not exist", ) as e: cm_service_role_config_group_info.main() diff --git a/tests/unit/plugins/modules/cm_service_role_info/test_cm_service_role_info.py b/tests/unit/plugins/modules/cm_service_role_info/test_cm_service_role_info.py index dee3402d..6aa895c4 100644 --- a/tests/unit/plugins/modules/cm_service_role_info/test_cm_service_role_info.py +++ b/tests/unit/plugins/modules/cm_service_role_info/test_cm_service_role_info.py @@ -46,7 +46,7 @@ def test_read_role(conn, module_args, cms_auto): { **conn, "type": "HOSTMONITOR", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -61,7 +61,7 @@ def test_read_role_nonexistent(conn, module_args, cms_auto): { **conn, "type": "DOESNOTEXIST", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -74,6 +74,7 @@ def test_read_service_nonexistent(conn, module_args): module_args({**conn}) with pytest.raises( - AnsibleFailJson, match="Cloudera Management service does not exist" + AnsibleFailJson, + match="Cloudera Management service does not exist", ) as e: cm_service_role_info.main() diff --git a/tests/unit/plugins/modules/cm_trial/test_cm_trial_license.py b/tests/unit/plugins/modules/cm_trial/test_cm_trial_license.py index a3f10eab..f40b2a9d 100644 --- a/tests/unit/plugins/modules/cm_trial/test_cm_trial_license.py +++ b/tests/unit/plugins/modules/cm_trial/test_cm_trial_license.py @@ -39,7 +39,7 @@ def test_pytest_cm_trial_license(module_args): "port": "7180", "verify_tls": "no", "debug": "no", - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/cm_version_info/test_cm_version_info_i.py b/tests/unit/plugins/modules/cm_version_info/test_cm_version_info_i.py index c37fdb81..e68783c2 100644 --- a/tests/unit/plugins/modules/cm_version_info/test_cm_version_info_i.py +++ b/tests/unit/plugins/modules/cm_version_info/test_cm_version_info_i.py @@ -33,7 +33,8 @@ @unittest.skipUnless( - os.getenv("CM_USERNAME"), "Cloudera Manager access parameters not set" + os.getenv("CM_USERNAME"), + "Cloudera Manager access parameters not set", ) class TestCMVersionIntegration(ModuleTestCase): def test_host_discovery(self): @@ -45,7 +46,7 @@ def test_host_discovery(self): "port": "7180", "verify_tls": "no", "debug": "yes", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -66,7 +67,7 @@ def test_direct_endpoint(self): + os.getenv("CM_VERSION"), "verify_tls": "no", "debug": "yes", - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/external_account/test_external_account.py b/tests/unit/plugins/modules/external_account/test_external_account.py index f69c0563..ad53273b 100644 --- a/tests/unit/plugins/modules/external_account/test_external_account.py +++ b/tests/unit/plugins/modules/external_account/test_external_account.py @@ -42,7 +42,7 @@ def test_create_aws_keys(module_args, conn, request): "aws_secret_key": "secret_key11", }, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: external_account.main() @@ -59,7 +59,7 @@ def test_create_aws_role(module_args, conn, request): "category": "AWS", "type": "AWS_IAM_ROLES_AUTH", "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: external_account.main() @@ -81,7 +81,7 @@ def test_create_azure_credentials(module_args, conn, request): "adls_tenant_id": "Tenant_test", }, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: external_account.main() @@ -102,7 +102,7 @@ def test_create_external_basic_user(module_args, conn, request): "password": "123456", }, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: external_account.main() @@ -124,7 +124,7 @@ def test_update_aws_keys_diff_enabled(module_args, conn, request): }, "state": "present", "_ansible_diff": True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -146,7 +146,7 @@ def test_update_aws_keys(module_args, conn, request): "aws_secret_key": "22222222", }, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -162,7 +162,7 @@ def test_remove_external_basic_user(module_args, conn, request): **conn, "name": "John", "state": "absent", - } + }, ) with pytest.raises(AnsibleExitJson) as e: external_account.main() diff --git a/tests/unit/plugins/modules/host/conftest.py b/tests/unit/plugins/modules/host/conftest.py index 1bb78120..48c872e0 100644 --- a/tests/unit/plugins/modules/host/conftest.py +++ b/tests/unit/plugins/modules/host/conftest.py @@ -238,7 +238,7 @@ def resettable_cluster(cm_api_client, base_cluster): items=[ ApiHostRef(host_id=prior_host.host_id, hostname=prior_host.hostname) for prior_host in prior_hosts - ] + ], ), ) @@ -275,7 +275,9 @@ def _wrapper(host: ApiHost) -> ApiHost: else: # Tags tag_updates = TagUpdates( - target_host.tags, {t.name: t.value for t in previous_host.tags}, True + target_host.tags, + {t.name: t.value for t in previous_host.tags}, + True, ) if tag_updates.deletions: host_api.delete_tags( @@ -317,7 +319,7 @@ def _wrapper(host: ApiHost) -> ApiHost: and target_host.cluster_ref is not None ): decommission_cmd = host_api.remove_hosts_from_cluster( - body=ApiHostsToRemoveArgs(hosts_to_remove=[target_host.hostname]) + body=ApiHostsToRemoveArgs(hosts_to_remove=[target_host.hostname]), ) wait_command( api_client=cm_api_client, @@ -340,8 +342,8 @@ def _wrapper(host: ApiHost) -> ApiHost: ApiHostRef( host_id=target_host.host_id, hostname=previous_host.hostname, - ) - ] + ), + ], ), ) diff --git a/tests/unit/plugins/modules/host/test_host.py b/tests/unit/plugins/modules/host/test_host.py index f9fa85c8..36a28129 100644 --- a/tests/unit/plugins/modules/host/test_host.py +++ b/tests/unit/plugins/modules/host/test_host.py @@ -47,11 +47,12 @@ def test_host_missing_required(self, conn, module_args): module_args( { **conn, - } + }, ) with pytest.raises( - AnsibleFailJson, match="one of the following is required: name, host_id" + AnsibleFailJson, + match="one of the following is required: name, host_id", ) as e: host.main() @@ -61,7 +62,7 @@ def test_host_missing_host_template_cluster(self, conn, module_args): **conn, "name": "example", "host_template": "example", - } + }, ) with pytest.raises( @@ -81,7 +82,7 @@ def test_host_missing_role_config_groups_cluster(self, conn, module_args): "type": "example", }, ], - } + }, ) with pytest.raises( @@ -101,7 +102,7 @@ def test_host_missing_roles_cluster(self, conn, module_args): "type": "example", }, ], - } + }, ) with pytest.raises( @@ -119,7 +120,7 @@ def test_host_create_missing_ip_address(self, conn, module_args): { **conn, "name": "pytest-host", - } + }, ) with pytest.raises( @@ -134,7 +135,7 @@ def test_host_create_ip_address(self, conn, module_args, detached_hosts): **conn, "name": "pytest-host", "ip_address": detached_hosts[0].ip_address, - } + }, ) with pytest.raises(AnsibleFailJson, match="boom") as e: @@ -144,7 +145,7 @@ def test_host_create_rack_id(self, conn, module_args): module_args( { **conn, - } + }, ) with pytest.raises(AnsibleFailJson, match="boom") as e: @@ -154,7 +155,7 @@ def test_host_create_host_template(self, conn, module_args): module_args( { **conn, - } + }, ) with pytest.raises(AnsibleFailJson, match="boom") as e: @@ -164,7 +165,7 @@ def test_host_create_tags(self, conn, module_args): module_args( { **conn, - } + }, ) with pytest.raises(AnsibleFailJson, match="boom") as e: @@ -174,7 +175,7 @@ def test_host_create_maintenance_enabled(self, conn, module_args): module_args( { **conn, - } + }, ) with pytest.raises(AnsibleFailJson, match="boom") as e: @@ -184,7 +185,9 @@ def test_host_create_maintenance_enabled(self, conn, module_args): class TestHostModification: @pytest.fixture() def maintenance_enabled_host( - self, cm_api_client, detached_hosts + self, + cm_api_client, + detached_hosts, ) -> Generator[ApiHost]: target_host = detached_hosts[0] @@ -203,7 +206,9 @@ def maintenance_enabled_host( @pytest.fixture() def maintenance_disabled_host( - self, cm_api_client, detached_hosts + self, + cm_api_client, + detached_hosts, ) -> Generator[ApiHost]: target_host = detached_hosts[0] @@ -228,7 +233,7 @@ def test_host_update_ip_address(self, conn, module_args, attached_hosts): **conn, "name": target_host.hostname, "ip_address": "10.0.0.1", - } + }, ) with pytest.raises(AnsibleFailJson, match="To update the host IP address") as e: @@ -242,7 +247,7 @@ def test_host_update_rack_id(self, conn, module_args, attached_hosts): **conn, "name": target_host.hostname, "rack_id": "/pytest1", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -289,7 +294,7 @@ def test_host_update_tags( "tag_one": "Updated", "tag_three": "Added", }, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -297,7 +302,9 @@ def test_host_update_tags( assert e.value.changed == True assert e.value.host["tags"] == dict( - tag_one="Updated", tag_two="Existing", tag_three="Added" + tag_one="Updated", + tag_two="Existing", + tag_three="Added", ) # Idempotency @@ -306,7 +313,9 @@ def test_host_update_tags( assert e.value.changed == False assert e.value.host["tags"] == dict( - tag_one="Updated", tag_two="Existing", tag_three="Added" + tag_one="Updated", + tag_two="Existing", + tag_three="Added", ) def test_host_update_tags_purge( @@ -343,7 +352,7 @@ def test_host_update_tags_purge( # Note that if using an attached host, be sure to include the cluster name # or purge will detach the host from the cluster! "purge": True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -382,7 +391,7 @@ def test_host_update_config( items=[ ApiConfig(name="memory_overcommit_threshold", value="0.85"), ApiConfig(name="host_memswap_window", value="16"), - ] + ], ), ) @@ -395,7 +404,7 @@ def test_host_update_config( "host_network_frame_errors_window": "20", "host_memswap_window": "20", }, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -442,7 +451,7 @@ def test_host_update_config_purge( items=[ ApiConfig(name="memory_overcommit_threshold", value="0.85"), ApiConfig(name="host_memswap_window", value="16"), - ] + ], ), ) @@ -458,7 +467,7 @@ def test_host_update_config_purge( "purge": True, # Note that if using an attached host, be sure to set 'cluster' or it will # be detached due to the 'purge' flag! - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -481,14 +490,17 @@ def test_host_update_config_purge( ) def test_host_update_maintenance_enabled( - self, conn, module_args, maintenance_disabled_host + self, + conn, + module_args, + maintenance_disabled_host, ): module_args( { **conn, "name": maintenance_disabled_host.hostname, "maintenance": True, - } + }, ) with pytest.raises( @@ -510,14 +522,17 @@ def test_host_update_maintenance_enabled( assert e.value.host["maintenance_mode"] == True def test_host_update_maintenance_disabled( - self, conn, module_args, maintenance_enabled_host + self, + conn, + module_args, + maintenance_enabled_host, ): module_args( { **conn, "name": maintenance_enabled_host.hostname, "maintenance": False, - } + }, ) with pytest.raises( diff --git a/tests/unit/plugins/modules/host/test_host_clusters.py b/tests/unit/plugins/modules/host/test_host_clusters.py index 311e1551..86d1f917 100644 --- a/tests/unit/plugins/modules/host/test_host_clusters.py +++ b/tests/unit/plugins/modules/host/test_host_clusters.py @@ -34,7 +34,11 @@ class TestHostAttachedCluster: def test_host_attach_invalid_cluster( - self, conn, module_args, resettable_host, detached_hosts + self, + conn, + module_args, + resettable_host, + detached_hosts, ): target_host = resettable_host(random.choice(detached_hosts)) @@ -43,7 +47,7 @@ def test_host_attach_invalid_cluster( **conn, "name": target_host.hostname, "cluster": "BOOM", - } + }, ) with pytest.raises( @@ -53,7 +57,12 @@ def test_host_attach_invalid_cluster( host.main() def test_host_attach_cluster( - self, conn, module_args, base_cluster, resettable_host, detached_hosts + self, + conn, + module_args, + base_cluster, + resettable_host, + detached_hosts, ): target_host = resettable_host(random.choice(detached_hosts)) @@ -62,7 +71,7 @@ def test_host_attach_cluster( **conn, "name": target_host.hostname, "cluster": base_cluster.name, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -82,7 +91,12 @@ def test_host_attach_cluster( @pytest.mark.skip("Requires set up of two clusters") class TestHostMigrateClusters: def test_host_migrate_cluster( - self, conn, module_args, base_cluster, resettable_host, detached_hosts + self, + conn, + module_args, + base_cluster, + resettable_host, + detached_hosts, ): target_host = resettable_host(random.choice(detached_hosts)) @@ -91,7 +105,7 @@ def test_host_migrate_cluster( **conn, "name": target_host.hostname, "cluster": base_cluster.name, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -117,7 +131,7 @@ def test_host_detach(self, conn, module_args, attached_hosts, resettable_host): **conn, "name": target_host.hostname, "purge": True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/host/test_host_host_templates.py b/tests/unit/plugins/modules/host/test_host_host_templates.py index cae56718..bcf23cc7 100644 --- a/tests/unit/plugins/modules/host/test_host_host_templates.py +++ b/tests/unit/plugins/modules/host/test_host_host_templates.py @@ -91,7 +91,7 @@ def test_host_update_host_template_new( "name": target_host.hostname, "cluster": target_host.cluster_ref.cluster_name, "host_template": host_template.name, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -101,7 +101,7 @@ def test_host_update_host_template_new( # Reread the host updated_host = HostsResourceApi(cm_api_client).read_host( - host_id=target_host.host_id + host_id=target_host.host_id, ) # Retrieve the current running roles on the host @@ -119,7 +119,7 @@ def test_host_update_host_template_new( # Reread the host updated_host = HostsResourceApi(cm_api_client).read_host( - host_id=target_host.host_id + host_id=target_host.host_id, ) # Retrieve the current running roles on the host @@ -187,7 +187,7 @@ def test_host_update_host_template_existing( "name": target_host.hostname, "cluster": target_host.cluster_ref.cluster_name, "host_template": host_template.name, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -197,7 +197,7 @@ def test_host_update_host_template_existing( # Reread the host updated_host = HostsResourceApi(cm_api_client).read_host( - host_id=target_host.host_id + host_id=target_host.host_id, ) # Retrieve the current running roles on the host @@ -207,12 +207,12 @@ def test_host_update_host_template_existing( [ target_rcg.name, existing_role.role_config_group_ref.role_config_group_name, - ] + ], ) == set( [ role.role_config_group_ref.role_config_group_name for role in current_roles - ] + ], ) # Idempotency @@ -223,7 +223,7 @@ def test_host_update_host_template_existing( # Reread the host updated_host = HostsResourceApi(cm_api_client).read_host( - host_id=target_host.host_id + host_id=target_host.host_id, ) # Retrieve the current running roles on the host @@ -233,12 +233,12 @@ def test_host_update_host_template_existing( [ target_rcg.name, existing_role.role_config_group_ref.role_config_group_name, - ] + ], ) == set( [ role.role_config_group_ref.role_config_group_name for role in current_roles - ] + ], ) def test_host_update_host_template_purge( @@ -300,7 +300,7 @@ def test_host_update_host_template_purge( "cluster": target_host.cluster_ref.cluster_name, "host_template": host_template.name, "purge": True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -310,7 +310,7 @@ def test_host_update_host_template_purge( # Reread the host updated_host = HostsResourceApi(cm_api_client).read_host( - host_id=target_host.host_id + host_id=target_host.host_id, ) # Retrieve the current running roles on the host @@ -320,7 +320,7 @@ def test_host_update_host_template_purge( [ role.role_config_group_ref.role_config_group_name for role in current_roles - ] + ], ) # Idempotency @@ -331,7 +331,7 @@ def test_host_update_host_template_purge( # Reread the host updated_host = HostsResourceApi(cm_api_client).read_host( - host_id=target_host.host_id + host_id=target_host.host_id, ) # Retrieve the current running roles on the host @@ -341,5 +341,5 @@ def test_host_update_host_template_purge( [ role.role_config_group_ref.role_config_group_name for role in current_roles - ] + ], ) diff --git a/tests/unit/plugins/modules/host/test_host_rcgs.py b/tests/unit/plugins/modules/host/test_host_rcgs.py index 8871f7d0..b05ad1a8 100644 --- a/tests/unit/plugins/modules/host/test_host_rcgs.py +++ b/tests/unit/plugins/modules/host/test_host_rcgs.py @@ -52,7 +52,11 @@ class TestHostRoleConfigGroups: def test_host_update_role_config_group_invalid_service( - self, conn, module_args, available_hosts, zookeeper + self, + conn, + module_args, + available_hosts, + zookeeper, ): target_host = available_hosts[0] @@ -69,14 +73,18 @@ def test_host_update_role_config_group_invalid_service( "type": "Example", }, ], - } + }, ) with pytest.raises(AnsibleFailJson, match="Service 'BOOM' not found"): host.main() def test_host_update_role_config_group_invalid_type( - self, conn, module_args, available_hosts, zookeeper + self, + conn, + module_args, + available_hosts, + zookeeper, ): target_host = available_hosts[0] @@ -92,11 +100,12 @@ def test_host_update_role_config_group_invalid_type( "type": "BOOM", }, ], - } + }, ) with pytest.raises( - AnsibleFailJson, match="Base role config group for type, 'BOOM', not found" + AnsibleFailJson, + match="Base role config group for type, 'BOOM', not found", ): host.main() @@ -137,11 +146,12 @@ def test_host_update_role_config_group_invalid_name( "name": "BOOM", }, ], - } + }, ) with pytest.raises( - AnsibleFailJson, match="The role config group 'BOOM' does not exist" + AnsibleFailJson, + match="The role config group 'BOOM' does not exist", ): host.main() @@ -183,7 +193,7 @@ def test_host_update_role_config_group_new_name( "name": target_rcg.name, }, ], - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -193,7 +203,7 @@ def test_host_update_role_config_group_new_name( # Reread the host updated_host = HostsResourceApi(cm_api_client).read_host( - host_id=target_host.host_id + host_id=target_host.host_id, ) # Retrieve the current running roles on the host @@ -211,7 +221,7 @@ def test_host_update_role_config_group_new_name( # Reread the host updated_host = HostsResourceApi(cm_api_client).read_host( - host_id=target_host.host_id + host_id=target_host.host_id, ) # Retrieve the current running roles on the host @@ -222,7 +232,12 @@ def test_host_update_role_config_group_new_name( ] def test_host_update_role_config_group_new_base( - self, conn, module_args, cm_api_client, available_hosts, zookeeper + self, + conn, + module_args, + cm_api_client, + available_hosts, + zookeeper, ): target_rcg = get_base_role_config_group( api_client=cm_api_client, @@ -246,7 +261,7 @@ def test_host_update_role_config_group_new_base( "name": target_rcg.name, }, ], - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -256,7 +271,7 @@ def test_host_update_role_config_group_new_base( # Reread the host updated_host = HostsResourceApi(cm_api_client).read_host( - host_id=target_host.host_id + host_id=target_host.host_id, ) # Retrieve the current running roles on the host @@ -274,7 +289,7 @@ def test_host_update_role_config_group_new_base( # Reread the host updated_host = HostsResourceApi(cm_api_client).read_host( - host_id=target_host.host_id + host_id=target_host.host_id, ) # Retrieve the current running roles on the host @@ -341,7 +356,7 @@ def test_host_update_role_config_group_existing_name( "name": target_rcg.name, }, ], - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -351,7 +366,7 @@ def test_host_update_role_config_group_existing_name( # Reread the host updated_host = HostsResourceApi(cm_api_client).read_host( - host_id=target_host.host_id + host_id=target_host.host_id, ) # Retrieve the current running roles on the host @@ -369,7 +384,7 @@ def test_host_update_role_config_group_existing_name( # Reread the host updated_host = HostsResourceApi(cm_api_client).read_host( - host_id=target_host.host_id + host_id=target_host.host_id, ) # Retrieve the current running roles on the host @@ -380,7 +395,13 @@ def test_host_update_role_config_group_existing_name( ] def test_host_update_role_config_group_existing_base( - self, conn, module_args, cm_api_client, available_hosts, base_cluster, zookeeper + self, + conn, + module_args, + cm_api_client, + available_hosts, + base_cluster, + zookeeper, ): # Get an existing, non-ZK SERVER host target_host = available_hosts[0] @@ -422,7 +443,7 @@ def test_host_update_role_config_group_existing_base( "name": target_rcg.name, }, ], - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -432,7 +453,7 @@ def test_host_update_role_config_group_existing_base( # Reread the host updated_host = HostsResourceApi(cm_api_client).read_host( - host_id=target_host.host_id + host_id=target_host.host_id, ) # Retrieve the current running roles on the host @@ -450,7 +471,7 @@ def test_host_update_role_config_group_existing_base( # Reread the host updated_host = HostsResourceApi(cm_api_client).read_host( - host_id=target_host.host_id + host_id=target_host.host_id, ) # Retrieve the current running roles on the host @@ -518,7 +539,7 @@ def test_host_update_role_config_group_purge_name( }, ], "purge": True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -528,7 +549,7 @@ def test_host_update_role_config_group_purge_name( # Reread the host updated_host = HostsResourceApi(cm_api_client).read_host( - host_id=target_host.host_id + host_id=target_host.host_id, ) # Retrieve the current running roles on the host @@ -538,7 +559,7 @@ def test_host_update_role_config_group_purge_name( [ role.role_config_group_ref.role_config_group_name for role in current_roles - ] + ], ) # Idempotency @@ -549,7 +570,7 @@ def test_host_update_role_config_group_purge_name( # Reread the host updated_host = HostsResourceApi(cm_api_client).read_host( - host_id=target_host.host_id + host_id=target_host.host_id, ) # Retrieve the current running roles on the host @@ -559,11 +580,17 @@ def test_host_update_role_config_group_purge_name( [ role.role_config_group_ref.role_config_group_name for role in current_roles - ] + ], ) def test_host_update_role_config_group_purge_base( - self, conn, module_args, cm_api_client, available_hosts, base_cluster, zookeeper + self, + conn, + module_args, + cm_api_client, + available_hosts, + base_cluster, + zookeeper, ): # Get an existing, non-ZK SERVER host target_host = available_hosts[0] @@ -606,7 +633,7 @@ def test_host_update_role_config_group_purge_base( }, ], "purge": True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -616,7 +643,7 @@ def test_host_update_role_config_group_purge_base( # Reread the host updated_host = HostsResourceApi(cm_api_client).read_host( - host_id=target_host.host_id + host_id=target_host.host_id, ) # Retrieve the current running roles on the host @@ -626,7 +653,7 @@ def test_host_update_role_config_group_purge_base( [ role.role_config_group_ref.role_config_group_name for role in current_roles - ] + ], ) # Idempotency @@ -637,7 +664,7 @@ def test_host_update_role_config_group_purge_base( # Reread the host updated_host = HostsResourceApi(cm_api_client).read_host( - host_id=target_host.host_id + host_id=target_host.host_id, ) # Retrieve the current running roles on the host @@ -647,5 +674,5 @@ def test_host_update_role_config_group_purge_base( [ role.role_config_group_ref.role_config_group_name for role in current_roles - ] + ], ) diff --git a/tests/unit/plugins/modules/host/test_host_role_configs.py b/tests/unit/plugins/modules/host/test_host_role_configs.py index b7daccd7..56827b38 100644 --- a/tests/unit/plugins/modules/host/test_host_role_configs.py +++ b/tests/unit/plugins/modules/host/test_host_role_configs.py @@ -38,7 +38,13 @@ class TestHostRoleConfigs: def test_host_update_role_config_invalid_type( - self, conn, module_args, cm_api_client, available_hosts, zookeeper, role_factory + self, + conn, + module_args, + cm_api_client, + available_hosts, + zookeeper, + role_factory, ): role_model = create_role( api_client=cm_api_client, @@ -69,14 +75,20 @@ def test_host_update_role_config_invalid_type( }, }, ], - } + }, ) with pytest.raises(AnsibleFailJson, match="No role of type, 'BOOM'"): host.main() def test_host_update_role_config( - self, conn, module_args, cm_api_client, available_hosts, zookeeper, role_factory + self, + conn, + module_args, + cm_api_client, + available_hosts, + zookeeper, + role_factory, ): role_model = create_role( api_client=cm_api_client, @@ -111,7 +123,7 @@ def test_host_update_role_config( }, }, ], - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -158,7 +170,13 @@ def test_host_update_role_config( ) def test_host_update_role_config_purge( - self, conn, module_args, cm_api_client, available_hosts, zookeeper, role_factory + self, + conn, + module_args, + cm_api_client, + available_hosts, + zookeeper, + role_factory, ): role_model = create_role( @@ -196,7 +214,7 @@ def test_host_update_role_config_purge( ], "purge": True, "cluster": existing_role.service_ref.cluster_name, - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/host_info/test_host_info.py b/tests/unit/plugins/modules/host_info/test_host_info.py index 850e827d..829f34a4 100644 --- a/tests/unit/plugins/modules/host_info/test_host_info.py +++ b/tests/unit/plugins/modules/host_info/test_host_info.py @@ -43,7 +43,7 @@ def test_host_info_host_id_invalid(conn, module_args): { **conn, "host_id": "BOOM", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -57,7 +57,7 @@ def test_host_info_name_invalid(conn, module_args): { **conn, "name": "BOOM", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -71,7 +71,7 @@ def test_host_info_cluster_invalid(conn, module_args): { **conn, "cluster": "BOOM", - } + }, ) with pytest.raises(AnsibleFailJson, match="Cluster does not exist: BOOM"): @@ -85,7 +85,7 @@ def test_host_info_host_id(conn, module_args, cm_api_client): { **conn, "host_id": all_hosts[0].host_id, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -102,7 +102,7 @@ def test_host_info_name(conn, module_args, cm_api_client): { **conn, "name": all_hosts[0].hostname, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -122,7 +122,7 @@ def test_host_info_cluster(conn, module_args, cm_api_client, base_cluster): { **conn, "cluster": base_cluster.name, - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/host_template/test_host_template.py b/tests/unit/plugins/modules/host_template/test_host_template.py index 3c391446..b805c24d 100644 --- a/tests/unit/plugins/modules/host_template/test_host_template.py +++ b/tests/unit/plugins/modules/host_template/test_host_template.py @@ -131,7 +131,9 @@ def resettable_host_templates(cm_api_client, base_cluster) -> Generator[None]: @pytest.fixture() def existing_host_template( - cm_api_client, zookeeper, request + cm_api_client, + zookeeper, + request, ) -> Generator[ApiHostTemplate]: host_template_api = HostTemplatesResourceApi(cm_api_client) @@ -153,8 +155,8 @@ def existing_host_template( role_config_group_refs=[ ApiRoleConfigGroupRef(role_config_group_name=base_rcg.name), ], - ) - ] + ), + ], ), ).items[0] @@ -176,11 +178,12 @@ def test_host_template_missing_cluster(conn, module_args): **conn, "name": "EXAMPLE", "role_config_groups": [], - } + }, ) with pytest.raises( - AnsibleFailJson, match="missing required arguments: cluster" + AnsibleFailJson, + match="missing required arguments: cluster", ) as e: host_template.main() @@ -191,7 +194,7 @@ def test_host_template_missing_name(conn, module_args): **conn, "cluster": "EXAMPLE", "role_config_groups": [], - } + }, ) with pytest.raises(AnsibleFailJson, match="missing required arguments: name") as e: @@ -204,7 +207,7 @@ def test_host_template_missing_role_config_groups_on_present(conn, module_args): **conn, "cluster": "EXAMPLE", "name": "EXAMPLE", - } + }, ) with pytest.raises( @@ -224,9 +227,9 @@ def test_host_template_provision_invalid_cluster(conn, module_args): { "service": "zookeeper", "type": "SERVER", - } + }, ], - } + }, ) with pytest.raises(AnsibleFailJson, match="Cluster does not exist: INVALID") as e: @@ -243,13 +246,14 @@ def test_host_template_provision_invalid_base_rcg_service(conn, module_args, zoo { "service": "INVALID", "type": "SERVER", - } + }, ], - } + }, ) with pytest.raises( - AnsibleFailJson, match="Service 'INVALID' not found in cluster" + AnsibleFailJson, + match="Service 'INVALID' not found in cluster", ) as e: host_template.main() @@ -264,9 +268,9 @@ def test_host_template_provision_invalid_base_rcg_name(conn, module_args, zookee { "service": zookeeper.name, "type": "INVALID", - } + }, ], - } + }, ) with pytest.raises( @@ -277,7 +281,11 @@ def test_host_template_provision_invalid_base_rcg_name(conn, module_args, zookee def test_host_template_provision_base_rcg( - conn, module_args, cm_api_client, zookeeper, request + conn, + module_args, + cm_api_client, + zookeeper, + request, ): id = f"pytest-{request.node.name}" @@ -297,9 +305,9 @@ def test_host_template_provision_base_rcg( { "service": zookeeper.name, "type": base_rcg.role_type, - } + }, ], - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -320,7 +328,11 @@ def test_host_template_provision_base_rcg( def test_host_template_provision_custom_rcg( - conn, module_args, zookeeper, role_config_group_factory, request + conn, + module_args, + zookeeper, + role_config_group_factory, + request, ): id = f"pytest-{request.node.name}" @@ -338,9 +350,9 @@ def test_host_template_provision_custom_rcg( { "service": zookeeper.name, "name": custom_rcg.name, - } + }, ], - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -384,9 +396,9 @@ def test_host_template_existing_duplicate_type( { "service": zookeeper.name, "name": custom_rcg.name, - } + }, ], - } + }, ) with pytest.raises( @@ -423,9 +435,9 @@ def test_host_template_existing_add( { "service": zookeeper.name, "name": custom_rcg.name, - } + }, ], - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -443,7 +455,7 @@ def test_host_template_existing_add( [ rcg_ref.role_config_group_name for rcg_ref in updated_host_template.role_config_group_refs - ] + ], ) == set(e.value.host_template["role_config_groups"]) # Idempotency @@ -463,7 +475,7 @@ def test_host_template_existing_add( [ rcg_ref.role_config_group_name for rcg_ref in updated_host_template.role_config_group_refs - ] + ], ) == set(e.value.host_template["role_config_groups"]) @@ -494,10 +506,10 @@ def test_host_template_existing_purge( { "service": zookeeper.name, "name": custom_rcg.name, - } + }, ], "purge": True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -515,7 +527,7 @@ def test_host_template_existing_purge( [ rcg_ref.role_config_group_name for rcg_ref in updated_host_template.role_config_group_refs - ] + ], ) == set(e.value.host_template["role_config_groups"]) # Idempotency @@ -535,12 +547,15 @@ def test_host_template_existing_purge( [ rcg_ref.role_config_group_name for rcg_ref in updated_host_template.role_config_group_refs - ] + ], ) == set(e.value.host_template["role_config_groups"]) def test_host_template_state_absent( - conn, module_args, cm_api_client, existing_host_template + conn, + module_args, + cm_api_client, + existing_host_template, ): host_template_api = HostTemplatesResourceApi(cm_api_client) @@ -550,7 +565,7 @@ def test_host_template_state_absent( "cluster": existing_host_template.cluster_ref.cluster_name, "name": existing_host_template.name, "state": "absent", - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/host_template_info/test_host_template_info.py b/tests/unit/plugins/modules/host_template_info/test_host_template_info.py index 1e1462d5..36a5bfe0 100644 --- a/tests/unit/plugins/modules/host_template_info/test_host_template_info.py +++ b/tests/unit/plugins/modules/host_template_info/test_host_template_info.py @@ -88,7 +88,7 @@ def test_host_template_info_missing_cluster(conn, module_args): module_args( { **conn, - } + }, ) with pytest.raises(AnsibleFailJson, match="missing required arguments: cluster"): @@ -128,7 +128,7 @@ def test_host_template_info_named( **conn, "cluster": base_cluster.name, "name": id, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -143,7 +143,7 @@ def test_host_template_info_not_found(conn, module_args, base_cluster): **conn, "cluster": base_cluster.name, "name": "not_found", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -184,7 +184,7 @@ def test_host_template_info_all( { **conn, "cluster": base_cluster.name, - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/service/test_service.py b/tests/unit/plugins/modules/service/test_service.py index 753da8d9..5689a4f3 100644 --- a/tests/unit/plugins/modules/service/test_service.py +++ b/tests/unit/plugins/modules/service/test_service.py @@ -138,7 +138,7 @@ def test_service_missing_name(self, conn, module_args): { **conn, "service": "example", - } + }, ) with pytest.raises(AnsibleFailJson, match="cluster"): @@ -149,7 +149,7 @@ def test_service_missing_cluster(self, conn, module_args): { **conn, "cluster": "example", - } + }, ) with pytest.raises(AnsibleFailJson, match="name"): @@ -164,9 +164,9 @@ def test_service_roles_missing_type(self, conn, module_args): "roles": [ { "hostnames": "example", - } + }, ], - } + }, ) with pytest.raises(AnsibleFailJson, match="type found in roles"): @@ -181,9 +181,9 @@ def test_service_roles_missing_hostnames(self, conn, module_args): "roles": [ { "type": "example", - } + }, ], - } + }, ) with pytest.raises(AnsibleFailJson, match="hostnames found in roles"): @@ -203,7 +203,7 @@ def test_present_missing_type(self, conn, module_args, base_cluster): **conn, "cluster": base_cluster.name, "service": "test-zookeeper", - } + }, ) with pytest.raises(AnsibleFailJson, match="type"): @@ -222,7 +222,7 @@ def zookeeper_reset(self, cm_api_client, base_cluster): cluster_name=base_cluster.name, ) .items - ] + ], ) # Yield to the test @@ -250,7 +250,7 @@ def test_service_provision_core(self, conn, module_args, base_cluster, request): "name": id, "type": "ZOOKEEPER", "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -281,7 +281,11 @@ def test_service_provision_core(self, conn, module_args, base_cluster, request): assert e.value.service["roles"] == list() def test_service_provision_display_name( - self, conn, module_args, base_cluster, request + self, + conn, + module_args, + base_cluster, + request, ): id = f"pytest-{Path(request.node.name)}" name = "Pytest ZooKeeper" @@ -294,7 +298,7 @@ def test_service_provision_display_name( "type": "ZOOKEEPER", "display_name": name, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -335,7 +339,7 @@ def test_service_provision_config(self, conn, module_args, base_cluster, request "type": "ZOOKEEPER", "config": {"tickTime": 2001}, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -376,7 +380,7 @@ def test_service_provision_tags(self, conn, module_args, base_cluster, request): "type": "ZOOKEEPER", "tags": {"pytest": "example"}, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -424,7 +428,7 @@ def test_service_existing_type(self, conn, module_args, zookeeper): "name": zookeeper.name, "type": "GATEWAY", "state": "present", - } + }, ) with pytest.raises(AnsibleFailJson, match="already in use"): @@ -438,7 +442,7 @@ def test_service_existing_display_name(self, conn, module_args, zookeeper): "name": zookeeper.name, "display_name": "Example", "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -476,7 +480,7 @@ def test_service_existing_maintenance_enabled(self, conn, module_args, zookeeper "name": zookeeper.name, "maintenance": True, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -505,7 +509,11 @@ def test_service_existing_maintenance_enabled(self, conn, module_args, zookeeper assert len(e.value.service["roles"]) == 1 # SERVER def test_service_existing_maintenance_disabled( - self, conn, module_args, cm_api_client, zookeeper + self, + conn, + module_args, + cm_api_client, + zookeeper, ): ServicesResourceApi(cm_api_client).enter_maintenance_mode( cluster_name=zookeeper.cluster_ref.cluster_name, @@ -519,7 +527,7 @@ def test_service_existing_maintenance_disabled( "name": zookeeper.name, "maintenance": False, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -548,7 +556,12 @@ def test_service_existing_maintenance_disabled( assert len(e.value.service["roles"]) == 1 # SERVER def test_service_existing_config( - self, conn, module_args, cm_api_client, zookeeper, request + self, + conn, + module_args, + cm_api_client, + zookeeper, + request, ): ServicesResourceApi(cm_api_client).update_service_config( cluster_name=zookeeper.cluster_ref.cluster_name, @@ -558,7 +571,7 @@ def test_service_existing_config( items=[ ApiConfig(name="tickTime", value="3001"), ApiConfig(name="autopurgeSnapRetainCount", value="9"), - ] + ], ), ) @@ -573,7 +586,7 @@ def test_service_existing_config( }, "message": f"{request.node.name}::test", "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -583,7 +596,9 @@ def test_service_existing_config( assert e.value.service["name"] == zookeeper.name assert e.value.service["type"] == zookeeper.type assert e.value.service["config"] == dict( - tickTime="2001", leaderServes="no", autopurgeSnapRetainCount="9" + tickTime="2001", + leaderServes="no", + autopurgeSnapRetainCount="9", ) assert e.value.service["tags"] == dict() assert e.value.service["maintenance_mode"] == False @@ -598,7 +613,9 @@ def test_service_existing_config( assert e.value.service["name"] == zookeeper.name assert e.value.service["type"] == zookeeper.type assert e.value.service["config"] == dict( - tickTime="2001", leaderServes="no", autopurgeSnapRetainCount="9" + tickTime="2001", + leaderServes="no", + autopurgeSnapRetainCount="9", ) assert e.value.service["tags"] == dict() assert e.value.service["maintenance_mode"] == False @@ -606,7 +623,12 @@ def test_service_existing_config( assert len(e.value.service["roles"]) == 1 # SERVER def test_service_existing_config_purge( - self, conn, module_args, cm_api_client, zookeeper, request + self, + conn, + module_args, + cm_api_client, + zookeeper, + request, ): ServicesResourceApi(cm_api_client).update_service_config( cluster_name=zookeeper.cluster_ref.cluster_name, @@ -616,7 +638,7 @@ def test_service_existing_config_purge( items=[ ApiConfig(name="tickTime", value="3001"), ApiConfig(name="autopurgeSnapRetainCount", value="9"), - ] + ], ), ) @@ -632,7 +654,7 @@ def test_service_existing_config_purge( "message": f"{request.node.name}::test", "purge": True, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -683,7 +705,7 @@ def test_service_existing_tags(self, conn, module_args, cm_api_client, zookeeper "tag_three": "Added", }, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -694,7 +716,9 @@ def test_service_existing_tags(self, conn, module_args, cm_api_client, zookeeper assert e.value.service["type"] == zookeeper.type assert e.value.service["config"] == dict() assert e.value.service["tags"] == dict( - tag_one="Updated", tag_two="Existing", tag_three="Added" + tag_one="Updated", + tag_two="Existing", + tag_three="Added", ) assert e.value.service["maintenance_mode"] == False assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases @@ -709,14 +733,20 @@ def test_service_existing_tags(self, conn, module_args, cm_api_client, zookeeper assert e.value.service["type"] == zookeeper.type assert e.value.service["config"] == dict() assert e.value.service["tags"] == dict( - tag_one="Updated", tag_two="Existing", tag_three="Added" + tag_one="Updated", + tag_two="Existing", + tag_three="Added", ) assert e.value.service["maintenance_mode"] == False assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases assert len(e.value.service["roles"]) == 1 # SERVER def test_service_existing_tags_purge( - self, conn, module_args, cm_api_client, zookeeper + self, + conn, + module_args, + cm_api_client, + zookeeper, ): ServicesResourceApi(cm_api_client).add_tags( cluster_name=zookeeper.cluster_ref.cluster_name, @@ -738,7 +768,7 @@ def test_service_existing_tags_purge( }, "purge": True, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -769,7 +799,11 @@ def test_service_existing_tags_purge( class TestServiceStates: def test_service_existing_state_started( - self, conn, module_args, cm_api_client, zookeeper + self, + conn, + module_args, + cm_api_client, + zookeeper, ): if zookeeper.service_state not in [ ApiServiceState.STOPPED, @@ -788,7 +822,7 @@ def test_service_existing_state_started( "cluster": zookeeper.cluster_ref.cluster_name, "name": zookeeper.name, "state": "started", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -805,7 +839,11 @@ def test_service_existing_state_started( assert e.value.service["service_state"] == ApiServiceState.STARTED def test_service_existing_state_stopped( - self, conn, module_args, cm_api_client, zookeeper + self, + conn, + module_args, + cm_api_client, + zookeeper, ): if zookeeper.service_state not in [ ApiServiceState.STARTED, @@ -824,7 +862,7 @@ def test_service_existing_state_stopped( "cluster": zookeeper.cluster_ref.cluster_name, "name": zookeeper.name, "state": "stopped", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -841,7 +879,11 @@ def test_service_existing_state_stopped( assert e.value.service["service_state"] == ApiServiceState.STOPPED def test_service_existing_state_restarted( - self, conn, module_args, cm_api_client, zookeeper + self, + conn, + module_args, + cm_api_client, + zookeeper, ): if zookeeper.service_state not in [ ApiServiceState.STARTED, @@ -860,7 +902,7 @@ def test_service_existing_state_restarted( "cluster": zookeeper.cluster_ref.cluster_name, "name": zookeeper.name, "state": "restarted", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -877,7 +919,11 @@ def test_service_existing_state_restarted( assert e.value.service["service_state"] == ApiServiceState.STARTED def test_service_existing_state_absent( - self, conn, module_args, cm_api_client, zookeeper + self, + conn, + module_args, + cm_api_client, + zookeeper, ): if zookeeper.service_state not in [ ApiServiceState.STARTED, @@ -896,7 +942,7 @@ def test_service_existing_state_absent( "cluster": zookeeper.cluster_ref.cluster_name, "name": zookeeper.name, "state": "absent", - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/service/test_service_rcgs.py b/tests/unit/plugins/modules/service/test_service_rcgs.py index 296fcb44..2da1f0f6 100644 --- a/tests/unit/plugins/modules/service/test_service_rcgs.py +++ b/tests/unit/plugins/modules/service/test_service_rcgs.py @@ -143,7 +143,7 @@ def resettable_cluster(self, cm_api_client, base_cluster): cluster_name=base_cluster.name, ) .items - ] + ], ) # Yield to the test @@ -162,7 +162,11 @@ def resettable_cluster(self, cm_api_client, base_cluster): deregister_service(cm_api_client, services_to_remove) def test_service_provision_custom_rcg( - self, conn, module_args, base_cluster, request + self, + conn, + module_args, + base_cluster, + request, ): id = f"pytest-{Path(request.node.name)}" @@ -179,10 +183,10 @@ def test_service_provision_custom_rcg( "config": { "minSessionTimeout": 4601, }, - } + }, ], "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -199,7 +203,7 @@ def test_service_provision_custom_rcg( assert len(e.value.service["role_config_groups"]) == 3 # custom + 2 bases rcg = next( - iter([r for r in e.value.service["role_config_groups"] if not r["base"]]) + iter([r for r in e.value.service["role_config_groups"] if not r["base"]]), ) assert rcg["name"] == id assert rcg["role_type"] == "SERVER" @@ -220,7 +224,7 @@ def test_service_provision_custom_rcg( assert len(e.value.service["role_config_groups"]) == 3 rcg = next( - iter([r for r in e.value.service["role_config_groups"] if not r["base"]]) + iter([r for r in e.value.service["role_config_groups"] if not r["base"]]), ) assert rcg["name"] == id assert rcg["role_type"] == "SERVER" @@ -241,10 +245,10 @@ def test_service_provision_base_rcg(self, conn, module_args, base_cluster, reque "config": { "minSessionTimeout": 4601, }, - } + }, ], "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -266,8 +270,8 @@ def test_service_provision_base_rcg(self, conn, module_args, base_cluster, reque r for r in e.value.service["role_config_groups"] if r["base"] and r["role_type"] == "SERVER" - ] - ) + ], + ), ) assert rcg["role_type"] == "SERVER" assert rcg["config"]["minSessionTimeout"] == "4601" @@ -292,8 +296,8 @@ def test_service_provision_base_rcg(self, conn, module_args, base_cluster, reque r for r in e.value.service["role_config_groups"] if r["base"] and r["role_type"] == "SERVER" - ] - ) + ], + ), ) assert rcg["role_type"] == "SERVER" assert rcg["config"]["minSessionTimeout"] == "4601" @@ -313,7 +317,7 @@ def base_rcg_server(self, cm_api_client, zookeeper) -> ApiRoleConfigGroup: items=[ ApiConfig(name="minSessionTimeout", value="5500"), ApiConfig(name="maxSessionTimeout", value="45000"), - ] + ], ) return RoleConfigGroupsResourceApi(cm_api_client).update_role_config_group( @@ -333,7 +337,7 @@ def base_rcg_gateway(self, cm_api_client, zookeeper) -> ApiRoleConfigGroup: ) base_rcg.config = ApiConfigList( - items=[ApiConfig(name="client_config_priority", value="91")] + items=[ApiConfig(name="client_config_priority", value="91")], ) return RoleConfigGroupsResourceApi(cm_api_client).update_role_config_group( @@ -345,7 +349,10 @@ def base_rcg_gateway(self, cm_api_client, zookeeper) -> ApiRoleConfigGroup: @pytest.fixture() def custom_rcg_server( - self, cm_api_client, zookeeper, request + self, + cm_api_client, + zookeeper, + request, ) -> Generator[ApiRoleConfigGroup]: id = Path(request.node.name).stem @@ -372,7 +379,10 @@ def custom_rcg_server( @pytest.fixture() def server_role_custom_rcg( - self, cm_api_client, server_role, custom_rcg_server + self, + cm_api_client, + server_role, + custom_rcg_server, ) -> ApiRole: RoleConfigGroupsResourceApi(cm_api_client).move_roles( cluster_name=server_role.service_ref.cluster_name, @@ -383,7 +393,12 @@ def server_role_custom_rcg( return server_role def test_service_existing_base_rcg( - self, conn, module_args, zookeeper, base_rcg_server, base_rcg_gateway + self, + conn, + module_args, + zookeeper, + base_rcg_server, + base_rcg_gateway, ): module_args( { @@ -397,10 +412,10 @@ def test_service_existing_base_rcg( "minSessionTimeout": 5501, "maxSessionTimeout": 45001, }, - } + }, ], "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -414,8 +429,8 @@ def test_service_existing_base_rcg( r for r in e.value.service["role_config_groups"] if r["base"] and r["role_type"] == "SERVER" - ] - ) + ], + ), ) assert server_rcg["config"]["minSessionTimeout"] == "5501" assert server_rcg["config"]["maxSessionTimeout"] == "45001" @@ -426,8 +441,8 @@ def test_service_existing_base_rcg( r for r in e.value.service["role_config_groups"] if r["base"] and r["role_type"] == "GATEWAY" - ] - ) + ], + ), ) assert gateway_rcg["config"]["client_config_priority"] == "91" @@ -443,8 +458,8 @@ def test_service_existing_base_rcg( r for r in e.value.service["role_config_groups"] if r["base"] and r["role_type"] == "SERVER" - ] - ) + ], + ), ) assert server_rcg["config"]["minSessionTimeout"] == "5501" assert server_rcg["config"]["maxSessionTimeout"] == "45001" @@ -455,13 +470,18 @@ def test_service_existing_base_rcg( r for r in e.value.service["role_config_groups"] if r["base"] and r["role_type"] == "GATEWAY" - ] - ) + ], + ), ) assert gateway_rcg["config"]["client_config_priority"] == "91" def test_service_existing_base_rcg_purge( - self, conn, module_args, zookeeper, base_rcg_server, base_rcg_gateway + self, + conn, + module_args, + zookeeper, + base_rcg_server, + base_rcg_gateway, ): module_args( { @@ -474,11 +494,11 @@ def test_service_existing_base_rcg_purge( "config": { "minSessionTimeout": 5501, }, - } + }, ], "purge": True, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -492,8 +512,8 @@ def test_service_existing_base_rcg_purge( r for r in e.value.service["role_config_groups"] if r["base"] and r["role_type"] == "SERVER" - ] - ) + ], + ), ) assert server_rcg["config"]["minSessionTimeout"] == "5501" assert "maxSessionTimeout" not in server_rcg["config"] @@ -504,8 +524,8 @@ def test_service_existing_base_rcg_purge( r for r in e.value.service["role_config_groups"] if r["base"] and r["role_type"] == "GATEWAY" - ] - ) + ], + ), ) assert "client_config_priority" not in gateway_rcg["config"] @@ -521,8 +541,8 @@ def test_service_existing_base_rcg_purge( r for r in e.value.service["role_config_groups"] if r["base"] and r["role_type"] == "SERVER" - ] - ) + ], + ), ) assert server_rcg["config"]["minSessionTimeout"] == "5501" assert "maxSessionTimeout" not in server_rcg["config"] @@ -533,13 +553,17 @@ def test_service_existing_base_rcg_purge( r for r in e.value.service["role_config_groups"] if r["base"] and r["role_type"] == "GATEWAY" - ] - ) + ], + ), ) assert "client_config_priority" not in gateway_rcg["config"] def test_service_existing_custom_rcg( - self, conn, module_args, zookeeper, custom_rcg_server + self, + conn, + module_args, + zookeeper, + custom_rcg_server, ): module_args( { @@ -554,10 +578,10 @@ def test_service_existing_custom_rcg( "minSessionTimeout": 5501, "maxSessionTimeout": 45001, }, - } + }, ], "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -571,8 +595,8 @@ def test_service_existing_custom_rcg( r for r in e.value.service["role_config_groups"] if r["name"] == custom_rcg_server.name - ] - ) + ], + ), ) assert server_rcg["config"]["minSessionTimeout"] == "5501" assert server_rcg["config"]["maxSessionTimeout"] == "45001" @@ -589,14 +613,18 @@ def test_service_existing_custom_rcg( r for r in e.value.service["role_config_groups"] if r["name"] == custom_rcg_server.name - ] - ) + ], + ), ) assert server_rcg["config"]["minSessionTimeout"] == "5501" assert server_rcg["config"]["maxSessionTimeout"] == "45001" def test_service_existing_custom_rcg_purge( - self, conn, module_args, zookeeper, custom_rcg_server + self, + conn, + module_args, + zookeeper, + custom_rcg_server, ): module_args( { @@ -610,11 +638,11 @@ def test_service_existing_custom_rcg_purge( "config": { "maxSessionTimeout": 45001, }, - } + }, ], "purge": True, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -628,8 +656,8 @@ def test_service_existing_custom_rcg_purge( r for r in e.value.service["role_config_groups"] if r["name"] == custom_rcg_server.name - ] - ) + ], + ), ) assert server_rcg["config"]["maxSessionTimeout"] == "45001" assert "minSessionTimeout" not in server_rcg["config"] @@ -646,14 +674,19 @@ def test_service_existing_custom_rcg_purge( r for r in e.value.service["role_config_groups"] if r["name"] == custom_rcg_server.name - ] - ) + ], + ), ) assert server_rcg["config"]["maxSessionTimeout"] == "45001" assert "minSessionTimeout" not in server_rcg["config"] def test_service_existing_custom_rcg_purge_role_assoc( - self, conn, module_args, cm_api_client, zookeeper, server_role_custom_rcg + self, + conn, + module_args, + cm_api_client, + zookeeper, + server_role_custom_rcg, ): module_args( { @@ -662,7 +695,7 @@ def test_service_existing_custom_rcg_purge_role_assoc( "name": zookeeper.name, "purge": True, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/service/test_service_roles.py b/tests/unit/plugins/modules/service/test_service_roles.py index 4a150286..df518a78 100644 --- a/tests/unit/plugins/modules/service/test_service_roles.py +++ b/tests/unit/plugins/modules/service/test_service_roles.py @@ -84,7 +84,7 @@ def resettable_cluster(self, cm_api_client, base_cluster) -> Generator[ApiCluste cluster_name=base_cluster.name, ) .items - ] + ], ) # Yield to the test @@ -103,12 +103,18 @@ def resettable_cluster(self, cm_api_client, base_cluster) -> Generator[ApiCluste deregister_service(cm_api_client, services_to_remove) def test_service_provision_roles( - self, conn, module_args, cm_api_client, resettable_cluster, request + self, + conn, + module_args, + cm_api_client, + resettable_cluster, + request, ): service_name = f"pytest-{Path(request.node.name)}" available_hosts = get_cluster_hosts( - api_client=cm_api_client, cluster=resettable_cluster + api_client=cm_api_client, + cluster=resettable_cluster, ) module_args( @@ -121,10 +127,10 @@ def test_service_provision_roles( { "type": "SERVER", "hostnames": [h.hostname for h in available_hosts], - } + }, ], "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -155,12 +161,18 @@ def test_service_provision_roles( assert len(e.value.service["role_config_groups"]) == 2 # SERVER, GATEWAY bases def test_service_provision_roles_custom_rcg( - self, conn, module_args, cm_api_client, resettable_cluster, request + self, + conn, + module_args, + cm_api_client, + resettable_cluster, + request, ): service_name = f"pytest-{Path(request.node.name)}" available_hosts = get_cluster_hosts( - api_client=cm_api_client, cluster=resettable_cluster + api_client=cm_api_client, + cluster=resettable_cluster, ) module_args( @@ -183,7 +195,7 @@ def test_service_provision_roles_custom_rcg( }, ], "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -222,12 +234,18 @@ def test_service_provision_roles_custom_rcg( assert e.value.service["roles"][0]["role_config_group_name"] == "PYTEST_SERVER" def test_service_provision_roles_config( - self, conn, module_args, cm_api_client, resettable_cluster, request + self, + conn, + module_args, + cm_api_client, + resettable_cluster, + request, ): service_name = f"pytest-{Path(request.node.name)}" available_hosts = get_cluster_hosts( - api_client=cm_api_client, cluster=resettable_cluster + api_client=cm_api_client, + cluster=resettable_cluster, ) module_args( @@ -246,7 +264,7 @@ def test_service_provision_roles_config( }, ], "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -281,12 +299,18 @@ def test_service_provision_roles_config( assert e.value.service["roles"][0]["config"]["minSessionTimeout"] == "4801" def test_service_provision_roles_tags( - self, conn, module_args, cm_api_client, resettable_cluster, request + self, + conn, + module_args, + cm_api_client, + resettable_cluster, + request, ): service_name = f"pytest-{Path(request.node.name)}" available_hosts = get_cluster_hosts( - api_client=cm_api_client, cluster=resettable_cluster + api_client=cm_api_client, + cluster=resettable_cluster, ) module_args( @@ -305,7 +329,7 @@ def test_service_provision_roles_tags( }, ], "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -449,7 +473,12 @@ def server_rcg_role(self, cm_api_client, server_role, server_rcg) -> ApiRole: return moved_roles.items[0] def test_service_existing_role_rcg( - self, conn, module_args, cm_api_client, zookeeper, server_rcg + self, + conn, + module_args, + cm_api_client, + zookeeper, + server_rcg, ): existing_hosts = get_service_hosts( api_client=cm_api_client, @@ -466,10 +495,10 @@ def test_service_existing_role_rcg( "type": "SERVER", "hostnames": [h.hostname for h in existing_hosts], "role_config_group": server_rcg.name, - } + }, ], "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -496,7 +525,12 @@ def test_service_existing_role_rcg( assert e.value.service["roles"][0]["role_config_group_name"] == server_rcg.name def test_service_existing_role_rcg_base( - self, conn, module_args, cm_api_client, zookeeper, server_rcg_role + self, + conn, + module_args, + cm_api_client, + zookeeper, + server_rcg_role, ): base_rcg = get_base_role_config_group( api_client=cm_api_client, @@ -514,10 +548,10 @@ def test_service_existing_role_rcg_base( { "type": server_rcg_role.type, "hostnames": [server_rcg_role.host_ref.hostname], - } + }, ], "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -556,7 +590,12 @@ def test_service_existing_role_rcg_base( assert result_role["role_config_group_name"] == base_rcg.name def test_service_existing_role_tags( - self, conn, module_args, cm_api_client, zookeeper, server_role + self, + conn, + module_args, + cm_api_client, + zookeeper, + server_role, ): RolesResourceApi(cm_api_client).add_tags( cluster_name=zookeeper.cluster_ref.cluster_name, @@ -581,10 +620,10 @@ def test_service_existing_role_tags( "tag_one": "Updated", "tag_three": "Added", }, - } + }, ], "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -601,7 +640,9 @@ def test_service_existing_role_tags( and r["hostname"] == server_role.host_ref.hostname ][0] assert result_role["tags"] == dict( - tag_one="Updated", tag_two="Existing", tag_three="Added" + tag_one="Updated", + tag_two="Existing", + tag_three="Added", ) # Idempotency @@ -619,11 +660,18 @@ def test_service_existing_role_tags( and r["hostname"] == server_role.host_ref.hostname ][0] assert result_role["tags"] == dict( - tag_one="Updated", tag_two="Existing", tag_three="Added" + tag_one="Updated", + tag_two="Existing", + tag_three="Added", ) def test_service_existing_role_tags_purge( - self, conn, module_args, cm_api_client, zookeeper, server_role + self, + conn, + module_args, + cm_api_client, + zookeeper, + server_role, ): RolesResourceApi(cm_api_client).add_tags( cluster_name=zookeeper.cluster_ref.cluster_name, @@ -648,11 +696,11 @@ def test_service_existing_role_tags_purge( "tag_one": "Updated", "tag_three": "Added", }, - } + }, ], "purge": True, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -687,7 +735,12 @@ def test_service_existing_role_tags_purge( assert result_role["tags"] == dict(tag_one="Updated", tag_three="Added") def test_service_existing_role_config( - self, conn, module_args, cm_api_client, zookeeper, server_role + self, + conn, + module_args, + cm_api_client, + zookeeper, + server_role, ): RolesResourceApi(cm_api_client).update_role_config( cluster_name=zookeeper.cluster_ref.cluster_name, @@ -697,7 +750,7 @@ def test_service_existing_role_config( items=[ ApiConfig(name="minSessionTimeout", value="5501"), ApiConfig(name="maxSessionTimeout", value="45001"), - ] + ], ), ) @@ -714,10 +767,10 @@ def test_service_existing_role_config( "minSessionTimeout": 5601, "maxClientCnxns": 56, }, - } + }, ], "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -736,7 +789,9 @@ def test_service_existing_role_config( assert ( result_role["config"].items() >= dict( - minSessionTimeout="5601", maxSessionTimeout="45001", maxClientCnxns="56" + minSessionTimeout="5601", + maxSessionTimeout="45001", + maxClientCnxns="56", ).items() ) @@ -757,12 +812,19 @@ def test_service_existing_role_config( assert ( result_role["config"].items() >= dict( - minSessionTimeout="5601", maxSessionTimeout="45001", maxClientCnxns="56" + minSessionTimeout="5601", + maxSessionTimeout="45001", + maxClientCnxns="56", ).items() ) def test_service_existing_role_config_purge( - self, conn, module_args, cm_api_client, zookeeper, server_role + self, + conn, + module_args, + cm_api_client, + zookeeper, + server_role, ): RolesResourceApi(cm_api_client).update_role_config( cluster_name=zookeeper.cluster_ref.cluster_name, @@ -772,7 +834,7 @@ def test_service_existing_role_config_purge( items=[ ApiConfig(name="minSessionTimeout", value="5501"), ApiConfig(name="maxSessionTimeout", value="45001"), - ] + ], ), ) @@ -789,11 +851,11 @@ def test_service_existing_role_config_purge( "minSessionTimeout": 5601, "maxClientCnxns": 56, }, - } + }, ], "purge": True, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -834,7 +896,11 @@ def test_service_existing_role_config_purge( ) def test_service_existing_role_add( - self, conn, module_args, zookeeper, available_hosts + self, + conn, + module_args, + zookeeper, + available_hosts, ): module_args( { @@ -845,10 +911,10 @@ def test_service_existing_role_add( { "type": "SERVER", "hostnames": [available_hosts[0].hostname], - } + }, ], "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -873,7 +939,11 @@ def test_service_existing_role_add( ] def test_service_existing_role_purge( - self, conn, module_args, zookeeper, available_hosts + self, + conn, + module_args, + zookeeper, + available_hosts, ): module_args( { @@ -887,11 +957,11 @@ def test_service_existing_role_purge( "config": { "serverId": 9, }, - } + }, ], "purge": True, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/service_config/test_service_config.py b/tests/unit/plugins/modules/service_config/test_service_config.py index 881396a1..025b5146 100644 --- a/tests/unit/plugins/modules/service_config/test_service_config.py +++ b/tests/unit/plugins/modules/service_config/test_service_config.py @@ -124,7 +124,7 @@ def test_present_invalid_cluster(conn, module_args): "cluster": "example", "service": "example", "parameters": dict(example="Example"), - } + }, ) with pytest.raises(AnsibleFailJson, match="Cluster does not exist"): @@ -138,7 +138,7 @@ def test_present_invalid_service(conn, module_args, zk_service): "cluster": zk_service.cluster_ref.cluster_name, "service": "example", "parameters": dict(example="Example"), - } + }, ) with pytest.raises(AnsibleFailJson, match="Service 'example' not found"): @@ -152,11 +152,12 @@ def test_present_invalid_parameter(conn, module_args, zk_service): "cluster": zk_service.cluster_ref.cluster_name, "service": zk_service.name, "parameters": dict(example="Example"), - } + }, ) with pytest.raises( - AnsibleFailJson, match="Unknown configuration attribute 'example'" + AnsibleFailJson, + match="Unknown configuration attribute 'example'", ): service_config.main() @@ -172,7 +173,7 @@ def test_set_parameters(conn, module_args, zk_service_config): "message": "test_service_config::test_set_parameters", # "_ansible_check_mode": True, # "_ansible_diff": True, - } + }, ) expected = dict(autopurgeSnapRetainCount="9", tickTime="1111") @@ -200,7 +201,7 @@ def test_unset_parameters(conn, module_args, zk_service_config): "service": zk_service_config.name, "parameters": dict(autopurgeSnapRetainCount=None), "message": "test_service_config::test_unset_parameters", - } + }, ) expected = dict(tickTime="1111") @@ -235,7 +236,7 @@ def test_set_parameters_with_purge(conn, module_args, zk_service_config): "message": "test_service_config::test_set_parameters_with_purge", # "_ansible_check_mode": True, # "_ansible_diff": True, - } + }, ) expected = dict(autopurgeSnapRetainCount="9") @@ -265,7 +266,7 @@ def test_purge_all_parameters(conn, module_args, zk_service_config): "message": "test_service_config::test_purge_all_parameters", # "_ansible_check_mode": True, # "_ansible_diff": True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/service_config_info/test_service_config_info.py b/tests/unit/plugins/modules/service_config_info/test_service_config_info.py index 860b575b..294d89d3 100644 --- a/tests/unit/plugins/modules/service_config_info/test_service_config_info.py +++ b/tests/unit/plugins/modules/service_config_info/test_service_config_info.py @@ -86,7 +86,7 @@ def test_view_default(conn, module_args): **conn, "cluster": os.getenv("CM_CLUSTER"), "service": os.getenv("CM_SERVICE"), - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -101,7 +101,7 @@ def test_invalid_service(conn, module_args): **conn, "cluster": os.getenv("CM_CLUSTER"), "service": "BOOM", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -116,7 +116,7 @@ def test_invalid_cluster(conn, module_args): **conn, "cluster": "BOOM", "service": os.getenv("CM_SERVICE"), - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/service_info/test_service_info.py b/tests/unit/plugins/modules/service_info/test_service_info.py index 207d7491..734b32cd 100644 --- a/tests/unit/plugins/modules/service_info/test_service_info.py +++ b/tests/unit/plugins/modules/service_info/test_service_info.py @@ -100,7 +100,7 @@ def test_missing_required(conn, module_args): module_args( { **conn, - } + }, ) with pytest.raises(AnsibleFailJson, match="cluster"): @@ -112,7 +112,7 @@ def test_missing_cluster(conn, module_args): { **conn, "service": "example", - } + }, ) with pytest.raises(AnsibleFailJson, match="cluster"): @@ -125,7 +125,7 @@ def test_invalid_cluster(conn, module_args): **conn, "cluster": "invalid", "service": "example", - } + }, ) with pytest.raises(AnsibleFailJson, match="Cluster does not exist") as e: @@ -138,7 +138,7 @@ def test_invalid_service(conn, module_args, base_cluster): **conn, "cluster": base_cluster.name, "service": "not_found", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -167,7 +167,7 @@ def test_all_services( display_name=f"ZooKeeper ({id})", # Add a SERVER role (so we can start the service -- a ZK requirement!) roles=[ - ApiRole(type="SERVER", host_ref=ApiHostRef(available_hosts[0].host_id)) + ApiRole(type="SERVER", host_ref=ApiHostRef(available_hosts[0].host_id)), ], ), ) @@ -176,7 +176,7 @@ def test_all_services( { **conn, "cluster": zookeeper.cluster_ref.cluster_name, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -194,7 +194,7 @@ def test_named_service(conn, module_args, zookeeper): **conn, "cluster": zookeeper.cluster_ref.cluster_name, "service": zookeeper.name, - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/service_role/test_service_role.py b/tests/unit/plugins/modules/service_role/test_service_role.py index 1a697ecb..08749b03 100644 --- a/tests/unit/plugins/modules/service_role/test_service_role.py +++ b/tests/unit/plugins/modules/service_role/test_service_role.py @@ -169,7 +169,7 @@ def test_service_role_missing_one_of(self, conn, module_args): **conn, "cluster": "cluster", "service": "service", - } + }, ) with pytest.raises(AnsibleFailJson, match="type, name"): @@ -182,7 +182,7 @@ def test_service_role_missing_required_by_type(self, conn, module_args): "cluster": "cluster", "service": "service", "type": "type", - } + }, ) with pytest.raises(AnsibleFailJson, match="cluster_hostname, cluster_host_id"): @@ -197,7 +197,7 @@ def test_service_role_missing_required_by_type_exclusives(self, conn, module_arg "type": "type", "cluster_hostname": "hostname", "cluster_host_id": "host_id", - } + }, ) with pytest.raises( @@ -216,14 +216,18 @@ def test_service_role_invalid_cluster(self, conn, module_args): "service": "example", "type": "type", "cluster_hostname": "hostname", - } + }, ) with pytest.raises(AnsibleFailJson, match="Cluster does not exist"): service_role.main() def test_service_role_invalid_service( - self, conn, module_args, cm_api_client, zookeeper + self, + conn, + module_args, + cm_api_client, + zookeeper, ): expected_roles = gather_server_roles( api_client=cm_api_client, @@ -237,14 +241,18 @@ def test_service_role_invalid_service( "service": "example", "type": expected_roles[0].type, "cluster_hostname": expected_roles[0].host_ref.hostname, - } + }, ) with pytest.raises(AnsibleFailJson, match="Service does not exist"): service_role.main() def test_service_role_invalid_type( - self, conn, module_args, cm_api_client, zookeeper + self, + conn, + module_args, + cm_api_client, + zookeeper, ): expected_roles = gather_server_roles( api_client=cm_api_client, @@ -258,7 +266,7 @@ def test_service_role_invalid_type( "service": zookeeper.name, "type": "example", "cluster_hostname": expected_roles[0].host_ref.hostname, - } + }, ) with pytest.raises( @@ -268,7 +276,11 @@ def test_service_role_invalid_type( service_role.main() def test_service_role_invalid_host( - self, conn, module_args, cm_api_client, zookeeper + self, + conn, + module_args, + cm_api_client, + zookeeper, ): expected_roles = gather_server_roles( api_client=cm_api_client, @@ -282,7 +294,7 @@ def test_service_role_invalid_host( "service": zookeeper.name, "type": expected_roles[0].type, "cluster_hostname": "example", - } + }, ) with pytest.raises(AnsibleFailJson, match="Host not found"): @@ -295,7 +307,7 @@ def test_service_role_invalid_role_name(self, conn, module_args, zookeeper): "cluster": zookeeper.cluster_ref.cluster_name, "service": zookeeper.name, "name": "example", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -306,7 +318,12 @@ def test_service_role_invalid_role_name(self, conn, module_args, zookeeper): class TestServiceRoleProvision: def test_service_role_provision_hostname( - self, conn, module_args, cm_api_client, zookeeper, server_role_reset + self, + conn, + module_args, + cm_api_client, + zookeeper, + server_role_reset, ): existing_role_instances = [ r.host_ref.hostname @@ -330,7 +347,7 @@ def test_service_role_provision_hostname( "type": "SERVER", "cluster_hostname": hosts[0].hostname, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -341,7 +358,12 @@ def test_service_role_provision_hostname( assert e.value.role["role_state"] == ApiRoleState.STOPPED def test_service_role_provision_host_id( - self, conn, module_args, cm_api_client, zookeeper, server_role_reset + self, + conn, + module_args, + cm_api_client, + zookeeper, + server_role_reset, ): existing_role_instances = [ r.host_ref.hostname @@ -365,7 +387,7 @@ def test_service_role_provision_host_id( "type": "SERVER", "cluster_host_id": hosts[0].host_id, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -376,7 +398,12 @@ def test_service_role_provision_host_id( assert e.value.role["role_state"] == ApiRoleState.STOPPED def test_service_role_provision_config( - self, conn, module_args, cm_api_client, zookeeper, server_role_reset + self, + conn, + module_args, + cm_api_client, + zookeeper, + server_role_reset, ): existing_role_instances = [ r.host_ref.hostname @@ -403,7 +430,7 @@ def test_service_role_provision_config( "minSessionTimeout": 4500, }, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -459,7 +486,7 @@ def test_service_role_provision_role_config_group( "cluster_hostname": hosts[0].hostname, "role_config_group": rcg.name, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -472,7 +499,12 @@ def test_service_role_provision_role_config_group( assert e.value.role["config"]["minSessionTimeout"] == "4501" def test_service_role_provision_tags( - self, conn, module_args, cm_api_client, zookeeper, server_role_reset + self, + conn, + module_args, + cm_api_client, + zookeeper, + server_role_reset, ): existing_role_instances = [ r.host_ref.hostname @@ -499,7 +531,7 @@ def test_service_role_provision_tags( "pytest": "success", }, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -511,7 +543,12 @@ def test_service_role_provision_tags( assert e.value.role["tags"]["pytest"] == "success" def test_service_role_provision_enable_maintenance( - self, conn, module_args, cm_api_client, zookeeper, server_role_reset + self, + conn, + module_args, + cm_api_client, + zookeeper, + server_role_reset, ): existing_role_instances = [ r.host_ref.hostname @@ -536,7 +573,7 @@ def test_service_role_provision_enable_maintenance( "cluster_hostname": hosts[0].hostname, "maintenance": True, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -548,7 +585,12 @@ def test_service_role_provision_enable_maintenance( assert e.value.role["maintenance_mode"] == True def test_service_role_provision_state_start( - self, conn, module_args, cm_api_client, zookeeper, server_role_reset + self, + conn, + module_args, + cm_api_client, + zookeeper, + server_role_reset, ): existing_role_instances = [ r.host_ref.hostname @@ -572,7 +614,7 @@ def test_service_role_provision_state_start( "type": "SERVER", "cluster_hostname": hosts[0].hostname, "state": "started", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -583,7 +625,12 @@ def test_service_role_provision_state_start( assert e.value.role["role_state"] == ApiRoleState.STARTED def test_service_role_provision_state_stopped( - self, conn, module_args, cm_api_client, zookeeper, server_role_reset + self, + conn, + module_args, + cm_api_client, + zookeeper, + server_role_reset, ): existing_role_instances = [ r.host_ref.hostname @@ -607,7 +654,7 @@ def test_service_role_provision_state_stopped( "type": "SERVER", "cluster_hostname": hosts[0].hostname, "state": "stopped", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -618,7 +665,12 @@ def test_service_role_provision_state_stopped( assert e.value.role["role_state"] == ApiRoleState.STOPPED def test_service_role_provision_state_restarted( - self, conn, module_args, cm_api_client, zookeeper, server_role_reset + self, + conn, + module_args, + cm_api_client, + zookeeper, + server_role_reset, ): existing_role_instances = [ r.host_ref.hostname @@ -642,7 +694,7 @@ def test_service_role_provision_state_restarted( "type": "SERVER", "cluster_hostname": hosts[0].hostname, "state": "restarted", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -665,8 +717,8 @@ def updated_server_role_config(self, cm_api_client, server_role): ApiConfig( "minSessionTimeout", 5000, - ) - ] + ), + ], ), ) return server_role @@ -721,7 +773,10 @@ def custom_rcg_server_role(self, cm_api_client, zookeeper, request): @pytest.fixture() def updated_server_role_rcg( - self, cm_api_client, server_role, custom_rcg_server_role + self, + cm_api_client, + server_role, + custom_rcg_server_role, ): RoleConfigGroupsResourceApi(cm_api_client).move_roles( cluster_name=server_role.service_ref.cluster_name, @@ -732,7 +787,11 @@ def updated_server_role_rcg( return server_role def test_service_role_existing_name( - self, conn, module_args, zookeeper, server_role + self, + conn, + module_args, + zookeeper, + server_role, ): module_args( { @@ -741,7 +800,7 @@ def test_service_role_existing_name( "service": zookeeper.name, "name": server_role.name, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -753,7 +812,11 @@ def test_service_role_existing_name( assert e.value.role["role_state"] == ApiRoleState.STARTED def test_service_role_existing_hostname( - self, conn, module_args, zookeeper, server_role + self, + conn, + module_args, + zookeeper, + server_role, ): module_args( { @@ -763,7 +826,7 @@ def test_service_role_existing_hostname( "type": server_role.type, "cluster_hostname": server_role.host_ref.hostname, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -775,7 +838,11 @@ def test_service_role_existing_hostname( assert e.value.role["role_state"] == ApiRoleState.STARTED def test_service_role_existing_hostid( - self, conn, module_args, zookeeper, server_role + self, + conn, + module_args, + zookeeper, + server_role, ): module_args( { @@ -785,7 +852,7 @@ def test_service_role_existing_hostid( "type": server_role.type, "cluster_host_id": server_role.host_ref.host_id, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -797,7 +864,11 @@ def test_service_role_existing_hostid( assert e.value.role["role_state"] == ApiRoleState.STARTED def test_service_role_existing_enable_maintenance( - self, conn, module_args, zookeeper, server_role + self, + conn, + module_args, + zookeeper, + server_role, ): module_args( { @@ -807,7 +878,7 @@ def test_service_role_existing_enable_maintenance( "name": server_role.name, "maintenance": True, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -827,7 +898,11 @@ def test_service_role_existing_enable_maintenance( assert e.value.role["maintenance_mode"] == True def test_service_role_existing_config( - self, conn, module_args, zookeeper, updated_server_role_config + self, + conn, + module_args, + zookeeper, + updated_server_role_config, ): module_args( { @@ -840,7 +915,7 @@ def test_service_role_existing_config( "maxSessionTimeout": 50001, }, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -862,7 +937,11 @@ def test_service_role_existing_config( assert e.value.role["config"]["maxSessionTimeout"] == "50001" def test_service_role_existing_config_purge( - self, conn, module_args, zookeeper, updated_server_role_config + self, + conn, + module_args, + zookeeper, + updated_server_role_config, ): module_args( { @@ -875,7 +954,7 @@ def test_service_role_existing_config_purge( }, "purge": True, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -897,7 +976,12 @@ def test_service_role_existing_config_purge( assert e.value.role["config"]["maxSessionTimeout"] == "50001" def test_service_role_existing_rcg( - self, conn, module_args, zookeeper, server_role, custom_rcg_server_role + self, + conn, + module_args, + zookeeper, + server_role, + custom_rcg_server_role, ): module_args( { @@ -907,7 +991,7 @@ def test_service_role_existing_rcg( "name": server_role.name, "role_config_group": custom_rcg_server_role.name, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -927,7 +1011,11 @@ def test_service_role_existing_rcg( assert e.value.role["config"]["minSessionTimeout"] == "4501" def test_service_role_existing_rcg_base( - self, conn, module_args, zookeeper, updated_server_role_rcg + self, + conn, + module_args, + zookeeper, + updated_server_role_rcg, ): module_args( { @@ -937,7 +1025,7 @@ def test_service_role_existing_rcg_base( "name": updated_server_role_rcg.name, "role_config_group": None, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -957,7 +1045,11 @@ def test_service_role_existing_rcg_base( assert "minSessionTimeout" not in e.value.role["config"] def test_service_role_existing_tags( - self, conn, module_args, zookeeper, updated_server_role_tags + self, + conn, + module_args, + zookeeper, + updated_server_role_tags, ): module_args( { @@ -969,7 +1061,7 @@ def test_service_role_existing_tags( "pytest": "tag", }, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -991,7 +1083,11 @@ def test_service_role_existing_tags( assert e.value.role["tags"]["pytest"] == "tag" def test_service_role_existing_tags_purge( - self, conn, module_args, zookeeper, updated_server_role_tags + self, + conn, + module_args, + zookeeper, + updated_server_role_tags, ): module_args( { @@ -1004,7 +1100,7 @@ def test_service_role_existing_tags_purge( }, "purge": True, "state": "present", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -1026,7 +1122,11 @@ def test_service_role_existing_tags_purge( assert e.value.role["tags"]["pytest"] == "tag" def test_service_role_existing_state_started( - self, conn, module_args, zookeeper, stopped_server_role + self, + conn, + module_args, + zookeeper, + stopped_server_role, ): module_args( { @@ -1035,7 +1135,7 @@ def test_service_role_existing_state_started( "service": zookeeper.name, "name": stopped_server_role.name, "state": "started", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -1054,7 +1154,11 @@ def test_service_role_existing_state_started( assert e.value.role["role_state"] == ApiRoleState.STARTED def test_service_role_existing_state_stopped( - self, conn, module_args, zookeeper, server_role + self, + conn, + module_args, + zookeeper, + server_role, ): module_args( { @@ -1063,7 +1167,7 @@ def test_service_role_existing_state_stopped( "service": zookeeper.name, "name": server_role.name, "state": "stopped", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -1082,7 +1186,11 @@ def test_service_role_existing_state_stopped( assert e.value.role["role_state"] == ApiRoleState.STOPPED def test_service_role_existing_state_restarted( - self, conn, module_args, zookeeper, server_role + self, + conn, + module_args, + zookeeper, + server_role, ): module_args( { @@ -1091,7 +1199,7 @@ def test_service_role_existing_state_restarted( "service": zookeeper.name, "name": server_role.name, "state": "restarted", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -1110,7 +1218,11 @@ def test_service_role_existing_state_restarted( assert e.value.role["role_state"] == ApiRoleState.STARTED def test_service_role_existing_state_absent( - self, conn, module_args, zookeeper, server_role + self, + conn, + module_args, + zookeeper, + server_role, ): module_args( { @@ -1119,7 +1231,7 @@ def test_service_role_existing_state_absent( "service": zookeeper.name, "name": server_role.name, "state": "absent", - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/service_role_config/test_service_role_config.py b/tests/unit/plugins/modules/service_role_config/test_service_role_config.py index 9441e118..72e21717 100644 --- a/tests/unit/plugins/modules/service_role_config/test_service_role_config.py +++ b/tests/unit/plugins/modules/service_role_config/test_service_role_config.py @@ -137,7 +137,8 @@ def test_present_invalid_parameter(conn, module_args): module_args(conn) with pytest.raises( - AnsibleFailJson, match="Unknown configuration attribute 'example'" + AnsibleFailJson, + match="Unknown configuration attribute 'example'", ): service_role_config.main() @@ -183,7 +184,8 @@ def test_unset_parameters(conn, module_args): assert e.value.changed == True assert not {c["name"]: c["value"] for c in e.value.config}.get( - os.getenv("CM_ROLE_PARAM"), False + os.getenv("CM_ROLE_PARAM"), + False, ) with pytest.raises(AnsibleExitJson) as e: @@ -191,7 +193,8 @@ def test_unset_parameters(conn, module_args): assert e.value.changed == False assert not {c["name"]: c["value"] for c in e.value.config}.get( - os.getenv("CM_ROLE_PARAM"), False + os.getenv("CM_ROLE_PARAM"), + False, ) @@ -214,7 +217,8 @@ def test_set_parameters_with_purge(conn, module_args): os.getenv("CM_ROLE_PARAM2") ] == "false" assert not {c["name"]: c["value"] for c in e.value.config}.get( - os.getenv("CM_ROLE_PARAM"), False + os.getenv("CM_ROLE_PARAM"), + False, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/service_role_config_group/test_service_role_config_group.py b/tests/unit/plugins/modules/service_role_config_group/test_service_role_config_group.py index aa07a26d..c31523da 100644 --- a/tests/unit/plugins/modules/service_role_config_group/test_service_role_config_group.py +++ b/tests/unit/plugins/modules/service_role_config_group/test_service_role_config_group.py @@ -56,7 +56,7 @@ def test_missing_required(conn, module_args): "service": "SERVICE", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleFailJson, match="name, role_type"): @@ -70,7 +70,7 @@ def test_invalid_service(conn, module_args, base_cluster): "cluster": base_cluster.name, "service": "BOOM", "type": "BOOM", - } + }, ) with pytest.raises(AnsibleFailJson, match="Service does not exist: BOOM"): @@ -84,7 +84,7 @@ def test_invalid_cluster(conn, module_args, cms_session): "cluster": "BOOM", "service": "BOOM", "type": "BOOM", - } + }, ) with pytest.raises(AnsibleFailJson, match="Cluster does not exist: BOOM"): @@ -97,9 +97,9 @@ def test_invalid_cluster(conn, module_args, cms_session): items=[ ApiConfig(k, v) for k, v in dict(minSessionTimeout=2500, process_start_secs=25).items() - ] - ) - ) + ], + ), + ), ) def test_base_role_config_group_set(conn, module_args, zk_role_config_group, request): module_args( @@ -112,7 +112,7 @@ def test_base_role_config_group_set(conn, module_args, zk_role_config_group, req "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) expected = dict(minSessionTimeout="3000", process_start_secs="25") @@ -137,9 +137,9 @@ def test_base_role_config_group_set(conn, module_args, zk_role_config_group, req items=[ ApiConfig(k, v) for k, v in dict(minSessionTimeout=2600, process_start_secs=26).items() - ] - ) - ) + ], + ), + ), ) def test_base_role_config_group_unset(conn, module_args, zk_role_config_group, request): module_args( @@ -152,7 +152,7 @@ def test_base_role_config_group_unset(conn, module_args, zk_role_config_group, r "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) expected = dict(process_start_secs="26") @@ -177,9 +177,9 @@ def test_base_role_config_group_unset(conn, module_args, zk_role_config_group, r items=[ ApiConfig(k, v) for k, v in dict(minSessionTimeout=2700, process_start_secs=27).items() - ] - ) - ) + ], + ), + ), ) def test_base_role_config_group_purge(conn, module_args, zk_role_config_group, request): module_args( @@ -193,7 +193,7 @@ def test_base_role_config_group_purge(conn, module_args, zk_role_config_group, r "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) expected = dict(minSessionTimeout="2701") @@ -218,12 +218,15 @@ def test_base_role_config_group_purge(conn, module_args, zk_role_config_group, r items=[ ApiConfig(k, v) for k, v in dict(minSessionTimeout=2800, process_start_secs=28).items() - ] - ) - ) + ], + ), + ), ) def test_base_role_config_group_purge_all( - conn, module_args, zk_role_config_group, request + conn, + module_args, + zk_role_config_group, + request, ): module_args( { @@ -236,7 +239,7 @@ def test_base_role_config_group_purge_all( "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) expected = dict() @@ -256,7 +259,11 @@ def test_base_role_config_group_purge_all( def test_base_role_config_group_absent( - conn, module_args, cm_api_client, zk_session, request + conn, + module_args, + cm_api_client, + zk_session, + request, ): rcg = get_base_role_config_group( api_client=cm_api_client, @@ -275,7 +282,7 @@ def test_base_role_config_group_absent( "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises( @@ -297,7 +304,7 @@ def test_role_config_group_create(conn, module_args, zk_session, request): "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) expected = dict(minSessionTimeout="3000") @@ -324,9 +331,9 @@ def test_role_config_group_create(conn, module_args, zk_session, request): items=[ ApiConfig(k, v) for k, v in dict(minSessionTimeout=2800, process_start_secs=28).items() - ] + ], ), - ) + ), ) def test_role_config_group_set(conn, module_args, zk_role_config_group, request): module_args( @@ -339,7 +346,7 @@ def test_role_config_group_set(conn, module_args, zk_role_config_group, request) "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) expected = dict(minSessionTimeout="3000", process_start_secs="28") @@ -366,9 +373,9 @@ def test_role_config_group_set(conn, module_args, zk_role_config_group, request) items=[ ApiConfig(k, v) for k, v in dict(minSessionTimeout=2900, process_start_secs=29).items() - ] + ], ), - ) + ), ) def test_role_config_group_unset(conn, module_args, zk_role_config_group, request): module_args( @@ -381,7 +388,7 @@ def test_role_config_group_unset(conn, module_args, zk_role_config_group, reques "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) expected = dict(process_start_secs="29") @@ -408,9 +415,9 @@ def test_role_config_group_unset(conn, module_args, zk_role_config_group, reques items=[ ApiConfig(k, v) for k, v in dict(minSessionTimeout=3100, process_start_secs=31).items() - ] + ], ), - ) + ), ) def test_role_config_group_purge(conn, module_args, zk_role_config_group, request): module_args( @@ -424,7 +431,7 @@ def test_role_config_group_purge(conn, module_args, zk_role_config_group, reques "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) expected = dict(minSessionTimeout="3000") @@ -451,9 +458,9 @@ def test_role_config_group_purge(conn, module_args, zk_role_config_group, reques items=[ ApiConfig(k, v) for k, v in dict(minSessionTimeout=3200, process_start_secs=32).items() - ] + ], ), - ) + ), ) def test_role_config_group_purge_all(conn, module_args, zk_role_config_group, request): module_args( @@ -467,7 +474,7 @@ def test_role_config_group_purge_all(conn, module_args, zk_role_config_group, re "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) expected = dict() @@ -494,9 +501,9 @@ def test_role_config_group_purge_all(conn, module_args, zk_role_config_group, re items=[ ApiConfig(k, v) for k, v in dict(minSessionTimeout=3100, process_start_secs=31).items() - ] + ], ), - ) + ), ) def test_role_config_group_absent(conn, module_args, zk_role_config_group, request): module_args( @@ -509,7 +516,7 @@ def test_role_config_group_absent(conn, module_args, zk_role_config_group, reque "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -531,10 +538,13 @@ def test_role_config_group_absent(conn, module_args, zk_role_config_group, reque name="Pytest Invalid Type", role_type="SERVER", config=ApiConfigList(items=[]), - ) + ), ) def test_role_config_group_invalid_type( - conn, module_args, zk_role_config_group, request + conn, + module_args, + zk_role_config_group, + request, ): module_args( { @@ -546,7 +556,7 @@ def test_role_config_group_invalid_type( "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleFailJson, match="Invalid role type") as e: @@ -558,10 +568,13 @@ def test_role_config_group_invalid_type( name="Pytest Invalid Configuration", role_type="SERVER", config=ApiConfigList(items=[]), - ) + ), ) def test_role_config_group_invalid_config( - conn, module_args, zk_role_config_group, request + conn, + module_args, + zk_role_config_group, + request, ): module_args( { @@ -573,7 +586,7 @@ def test_role_config_group_invalid_config( "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleFailJson, match="Unknown configuration attribute") as e: @@ -585,10 +598,14 @@ def test_role_config_group_invalid_config( name="Pytest Absent", role_type="SERVER", config=ApiConfigList(items=[]), - ) + ), ) def test_role_config_group_existing_roles( - conn, module_args, cm_api_client, zk_role_config_group, request + conn, + module_args, + cm_api_client, + zk_role_config_group, + request, ): base_rcg = get_base_role_config_group( api_client=cm_api_client, @@ -621,7 +638,7 @@ def test_role_config_group_existing_roles( "message": f"{Path(request.node.parent.name).stem}::{request.node.name}", # _ansible_check_mode=True, # _ansible_diff=True, - } + }, ) with pytest.raises(AnsibleFailJson, match="existing role associations") as e: diff --git a/tests/unit/plugins/modules/service_role_config_group_config/test_service_role_config_group_config.py b/tests/unit/plugins/modules/service_role_config_group_config/test_service_role_config_group_config.py index 53b08067..938a2537 100644 --- a/tests/unit/plugins/modules/service_role_config_group_config/test_service_role_config_group_config.py +++ b/tests/unit/plugins/modules/service_role_config_group_config/test_service_role_config_group_config.py @@ -63,7 +63,8 @@ def test_missing_required(conn, module_args): module_args(conn) with pytest.raises( - AnsibleFailJson, match="cluster, parameters, role_config_group, service" + AnsibleFailJson, + match="cluster, parameters, role_config_group, service", ): service_role_config_group_config.main() @@ -178,7 +179,7 @@ def test_create_role_config_group_with_roles(conn, module_args): assert e.value.changed == True assert e.value.role_config_group["name"] == "hdfs-example2" assert e.value.role_config_group["roles"] == [ - "hdfs-DATANODE-a9a5b7d344404d8a304ff4b3779679a1" + "hdfs-DATANODE-a9a5b7d344404d8a304ff4b3779679a1", ] with pytest.raises(AnsibleExitJson) as e: @@ -187,7 +188,7 @@ def test_create_role_config_group_with_roles(conn, module_args): assert e.value.changed == False assert e.value.role_config_group["name"] == "hdfs-example2" assert e.value.role_config_group["roles"] == [ - "hdfs-DATANODE-a9a5b7d344404d8a304ff4b3779679a1" + "hdfs-DATANODE-a9a5b7d344404d8a304ff4b3779679a1", ] @@ -212,7 +213,7 @@ def test_update_role_membership(conn, module_args): [ "hdfs-DATANODE-a9a5b7d344404d8a304ff4b3779679a1", "hdfs-DATANODE-7f3a9da5805a46e3100bae67424355ac", - ] + ], ) with pytest.raises(AnsibleExitJson) as e: @@ -224,7 +225,7 @@ def test_update_role_membership(conn, module_args): [ "hdfs-DATANODE-a9a5b7d344404d8a304ff4b3779679a1", "hdfs-DATANODE-7f3a9da5805a46e3100bae67424355ac", - ] + ], ) @@ -247,7 +248,7 @@ def test_set_role_membership(conn, module_args): assert e.value.changed == True assert e.value.role_config_group["name"] == "hdfs-example2" assert e.value.role_config_group["roles"] == [ - "hdfs-DATANODE-7f3a9da5805a46e3100bae67424355ac" + "hdfs-DATANODE-7f3a9da5805a46e3100bae67424355ac", ] with pytest.raises(AnsibleExitJson) as e: @@ -256,7 +257,7 @@ def test_set_role_membership(conn, module_args): assert e.value.changed == False assert e.value.role_config_group["name"] == "hdfs-example2" assert e.value.role_config_group["roles"] == [ - "hdfs-DATANODE-7f3a9da5805a46e3100bae67424355ac" + "hdfs-DATANODE-7f3a9da5805a46e3100bae67424355ac", ] @@ -348,6 +349,7 @@ def test_remove_role_config_group_invalid_base(conn, module_args): module_args(conn) with pytest.raises( - AnsibleFailJson, match="Group 'hdfs-DATANODE-BASE' is a base group" + AnsibleFailJson, + match="Group 'hdfs-DATANODE-BASE' is a base group", ): service_role_config_group_config.main() diff --git a/tests/unit/plugins/modules/service_role_config_group_config_info/test_service_role_config_group_config_info.py b/tests/unit/plugins/modules/service_role_config_group_config_info/test_service_role_config_group_config_info.py index 2203919d..a2487d3a 100644 --- a/tests/unit/plugins/modules/service_role_config_group_config_info/test_service_role_config_group_config_info.py +++ b/tests/unit/plugins/modules/service_role_config_group_config_info/test_service_role_config_group_config_info.py @@ -97,7 +97,7 @@ def test_invalid_service(conn, module_args): "cluster": os.getenv("CM_CLUSTER"), "service": "BOOM", "role_config_group": os.getenv("CM_ROLE_CONFIG_GROUP"), - } + }, ) with pytest.raises(AnsibleFailJson, match="Service does not exist: BOOM"): @@ -111,7 +111,7 @@ def test_invalid_cluster(conn, module_args): "cluster": "BOOM", "service": os.getenv("CM_SERVICE"), "role_config_group": os.getenv("CM_ROLE_CONFIG_GROUP"), - } + }, ) with pytest.raises(AnsibleFailJson, match="Cluster does not exist: BOOM"): @@ -125,7 +125,7 @@ def test_invalid_role_config_group(conn, module_args): "cluster": os.getenv("CM_CLUSTER"), "service": os.getenv("CM_SERVICE"), "role_config_group": "BOOM", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -141,7 +141,7 @@ def test_summary(conn, module_args): "cluster": os.getenv("CM_CLUSTER"), "service": os.getenv("CM_SERVICE"), "role_config_group": os.getenv("CM_ROLE_CONFIG_GROUP"), - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -158,7 +158,7 @@ def test_full(conn, module_args): "service": os.getenv("CM_SERVICE"), "role_config_group": os.getenv("CM_ROLE_CONFIG_GROUP"), "view": "full", - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/service_role_config_group_info/test_service_role_config_group_info.py b/tests/unit/plugins/modules/service_role_config_group_info/test_service_role_config_group_info.py index 76befc6c..f64a129f 100644 --- a/tests/unit/plugins/modules/service_role_config_group_info/test_service_role_config_group_info.py +++ b/tests/unit/plugins/modules/service_role_config_group_info/test_service_role_config_group_info.py @@ -67,7 +67,7 @@ def test_invalid_service(conn, module_args, base_cluster): **conn, "cluster": base_cluster.name, "service": "BOOM", - } + }, ) with pytest.raises(AnsibleFailJson, match="Service does not exist: BOOM"): @@ -80,7 +80,7 @@ def test_invalid_cluster(conn, module_args, cms_session): **conn, "cluster": "BOOM", "service": "ShouldNotReach", - } + }, ) with pytest.raises(AnsibleFailJson, match="Cluster does not exist: BOOM"): @@ -92,7 +92,7 @@ def test_invalid_cluster(conn, module_args, cms_session): name="Pytest All", role_type="SERVER", config=ApiConfigList(items=[]), - ) + ), ) def test_all_role_config_groups(conn, module_args, base_cluster, zk_role_config_group): module_args( @@ -100,7 +100,7 @@ def test_all_role_config_groups(conn, module_args, base_cluster, zk_role_config_ **conn, "cluster": base_cluster.name, "service": zk_role_config_group.service_ref.service_name, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -114,7 +114,7 @@ def test_all_role_config_groups(conn, module_args, base_cluster, zk_role_config_ name="Pytest Type", role_type="SERVER", config=ApiConfigList(items=[]), - ) + ), ) def test_type_role_config_group(conn, module_args, base_cluster, zk_role_config_group): module_args( @@ -123,7 +123,7 @@ def test_type_role_config_group(conn, module_args, base_cluster, zk_role_config_ "cluster": base_cluster.name, "service": zk_role_config_group.service_ref.service_name, "type": "SERVER", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -137,10 +137,14 @@ def test_type_role_config_group(conn, module_args, base_cluster, zk_role_config_ name="Pytest Base", role_type="SERVER", config=ApiConfigList(items=[]), - ) + ), ) def test_name_base_role_config_group( - conn, module_args, cm_api_client, base_cluster, zk_role_config_group + conn, + module_args, + cm_api_client, + base_cluster, + zk_role_config_group, ): base_rcg = get_base_role_config_group( api_client=cm_api_client, @@ -155,7 +159,7 @@ def test_name_base_role_config_group( "cluster": base_cluster.name, "service": zk_role_config_group.name, "name": base_rcg.name, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -171,10 +175,13 @@ def test_name_base_role_config_group( name="Pytest Non-Base", role_type="SERVER", config=ApiConfigList(items=[]), - ) + ), ) def test_name_base_role_config_group( - conn, module_args, base_cluster, zk_role_config_group + conn, + module_args, + base_cluster, + zk_role_config_group, ): module_args( { @@ -182,7 +189,7 @@ def test_name_base_role_config_group( "cluster": base_cluster.name, "service": zk_role_config_group.service_ref.service_name, "name": "Pytest Non-Base", - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/service_role_config_info/test_service_role_config_info.py b/tests/unit/plugins/modules/service_role_config_info/test_service_role_config_info.py index bdcfc446..f820fafa 100644 --- a/tests/unit/plugins/modules/service_role_config_info/test_service_role_config_info.py +++ b/tests/unit/plugins/modules/service_role_config_info/test_service_role_config_info.py @@ -89,7 +89,7 @@ def test_view_default(conn, module_args): "cluster": os.getenv("CM_CLUSTER"), "service": os.getenv("CM_SERVICE"), "role": os.getenv("CM_ROLE"), - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -105,7 +105,7 @@ def test_invalid_service(conn, module_args): "cluster": os.getenv("CM_CLUSTER"), "service": "BOOM", "role": os.getenv("CM_ROLE"), - } + }, ) with pytest.raises(AnsibleFailJson, match="Service does not exist: BOOM"): @@ -119,7 +119,7 @@ def test_invalid_cluster(conn, module_args): "cluster": "BOOM", "service": os.getenv("CM_SERVICE"), "role": os.getenv("CM_ROLE"), - } + }, ) with pytest.raises(AnsibleFailJson, match="Cluster does not exist: BOOM"): diff --git a/tests/unit/plugins/modules/service_role_info/test_service_role_info.py b/tests/unit/plugins/modules/service_role_info/test_service_role_info.py index 5008faf4..b4a122de 100644 --- a/tests/unit/plugins/modules/service_role_info/test_service_role_info.py +++ b/tests/unit/plugins/modules/service_role_info/test_service_role_info.py @@ -137,7 +137,7 @@ def test_service_role_info_missing_cluster(conn, module_args): { **conn, "service": "example", - } + }, ) with pytest.raises(AnsibleFailJson, match="cluster"): @@ -150,7 +150,7 @@ def test_service_role_info_invalid_service(conn, module_args, zookeeper): **conn, "cluster": zookeeper.cluster_ref.cluster_name, "service": "BOOM", - } + }, ) with pytest.raises(AnsibleFailJson, match="Service 'BOOM' not found in cluster"): @@ -163,7 +163,7 @@ def test_service_role_info_invalid_cluster(conn, module_args, zookeeper): **conn, "cluster": "BOOM", "service": zookeeper.name, - } + }, ) with pytest.raises(AnsibleFailJson, match="Cluster does not exist: BOOM"): @@ -181,7 +181,7 @@ def test_service_role_info_all(conn, module_args, cm_api_client, zookeeper): **conn, "cluster": zookeeper.cluster_ref.cluster_name, "service": zookeeper.name, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -202,7 +202,7 @@ def test_service_role_info_all_full(conn, module_args, cm_api_client, zookeeper) "cluster": zookeeper.cluster_ref.cluster_name, "service": zookeeper.name, "view": "full", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -223,7 +223,7 @@ def test_service_role_info_by_name(conn, module_args, cm_api_client, zookeeper): "cluster": zookeeper.cluster_ref.cluster_name, "service": zookeeper.name, "role": expected_roles[0].name, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -234,7 +234,11 @@ def test_service_role_info_by_name(conn, module_args, cm_api_client, zookeeper): def test_service_role_info_by_type( - conn, module_args, cm_api_client, zookeeper, server_role + conn, + module_args, + cm_api_client, + zookeeper, + server_role, ): role_type = "SERVER" @@ -253,7 +257,7 @@ def test_service_role_info_by_type( "cluster": zookeeper.cluster_ref.cluster_name, "service": zookeeper.name, "type": role_type, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -263,7 +267,11 @@ def test_service_role_info_by_type( def test_service_role_info_by_hostname( - conn, module_args, cm_api_client, zookeeper, server_role + conn, + module_args, + cm_api_client, + zookeeper, + server_role, ): expected_roles = gather_server_roles( api_client=cm_api_client, @@ -276,7 +284,7 @@ def test_service_role_info_by_hostname( "cluster": zookeeper.cluster_ref.cluster_name, "service": zookeeper.name, "cluster_hostname": expected_roles[0].host_ref.hostname, - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -288,7 +296,11 @@ def test_service_role_info_by_hostname( def test_service_role_info_by_host_id( - conn, module_args, cm_api_client, zookeeper, server_role + conn, + module_args, + cm_api_client, + zookeeper, + server_role, ): expected_roles = gather_server_roles( api_client=cm_api_client, @@ -301,7 +313,7 @@ def test_service_role_info_by_host_id( "cluster": zookeeper.cluster_ref.cluster_name, "service": zookeeper.name, "cluster_host_id": expected_roles[0].host_ref.host_id, - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/service_type_info/test_service_type_info.py b/tests/unit/plugins/modules/service_type_info/test_service_type_info.py index 4d7394ec..bdfc458d 100644 --- a/tests/unit/plugins/modules/service_type_info/test_service_type_info.py +++ b/tests/unit/plugins/modules/service_type_info/test_service_type_info.py @@ -42,7 +42,7 @@ def test_invalid_cluster(conn, module_args): { **conn, "cluster": "BOOM", - } + }, ) with pytest.raises(AnsibleExitJson) as e: @@ -56,7 +56,7 @@ def test_view_all_services_types(conn, module_args, base_cluster): { **conn, "cluster": base_cluster.name, - } + }, ) with pytest.raises(AnsibleExitJson) as e: diff --git a/tests/unit/plugins/modules/utils.py b/tests/unit/plugins/modules/utils.py index 2c97a428..a49f82af 100644 --- a/tests/unit/plugins/modules/utils.py +++ b/tests/unit/plugins/modules/utils.py @@ -54,7 +54,9 @@ def fail_json(*args, **kwargs): class ModuleTestCase(unittest.TestCase): def setUp(self): self.mock_module = patch.multiple( - basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json + basic.AnsibleModule, + exit_json=exit_json, + fail_json=fail_json, ) self.mock_module.start() self.mock_sleep = patch("time.sleep") From db46ca09f18c5f624b198296212d983518a7d6f3 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Tue, 24 Jun 2025 17:09:58 -0400 Subject: [PATCH 15/21] Rename legacy auto-tls templates to conform to pre-commit checks Signed-off-by: Webster Mudge --- roles/cloudera_manager/autotls/tasks/main.yml | 8 ++++---- .../templates/{auto-tls-key.json => auto-tls-key.json.j2} | 0 .../autotls/templates/{auto-tls.json => auto-tls.json.j2} | 0 3 files changed, 4 insertions(+), 4 deletions(-) rename roles/cloudera_manager/autotls/templates/{auto-tls-key.json => auto-tls-key.json.j2} (100%) rename roles/cloudera_manager/autotls/templates/{auto-tls.json => auto-tls.json.j2} (100%) diff --git a/roles/cloudera_manager/autotls/tasks/main.yml b/roles/cloudera_manager/autotls/tasks/main.yml index 3e46fca5..f9afb535 100644 --- a/roles/cloudera_manager/autotls/tasks/main.yml +++ b/roles/cloudera_manager/autotls/tasks/main.yml @@ -33,14 +33,14 @@ - name: DEBUG Auto-TLS using password debug: - msg: "{{ lookup('template', 'auto-tls.json') }}" + msg: "{{ lookup('template', 'auto-tls.json.j2') }}" when: use_password and debug | default(false) - name: Enable Auto-TLS cm_api: endpoint: "/cm/commands/generateCmca" method: POST - body: "{{ lookup('template', 'auto-tls.json') }}" + body: "{{ lookup('template', 'auto-tls.json.j2') }}" timeout: 360 ignore_errors: true when: use_password @@ -52,14 +52,14 @@ - name: DEBUG Auto-TLS using key debug: - msg: "{{ lookup('template', 'auto-tls-key.json') }}" + msg: "{{ lookup('template', 'auto-tls-key.json.j2') }}" when: not use_password - name: Enable Auto-TLS cm_api: endpoint: "/cm/commands/generateCmca" method: POST - body: "{{ lookup('template', 'auto-tls-key.json') }}" + body: "{{ lookup('template', 'auto-tls-key.json.j2') }}" ignore_errors: true when: not use_password notify: diff --git a/roles/cloudera_manager/autotls/templates/auto-tls-key.json b/roles/cloudera_manager/autotls/templates/auto-tls-key.json.j2 similarity index 100% rename from roles/cloudera_manager/autotls/templates/auto-tls-key.json rename to roles/cloudera_manager/autotls/templates/auto-tls-key.json.j2 diff --git a/roles/cloudera_manager/autotls/templates/auto-tls.json b/roles/cloudera_manager/autotls/templates/auto-tls.json.j2 similarity index 100% rename from roles/cloudera_manager/autotls/templates/auto-tls.json rename to roles/cloudera_manager/autotls/templates/auto-tls.json.j2 From dd2807c385042e00fd8d719819b2afde22878e28 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Tue, 24 Jun 2025 17:10:35 -0400 Subject: [PATCH 16/21] Remove lint environment and move lint command to default environment Signed-off-by: Webster Mudge --- pyproject.toml | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index d5a30c52..e84f133e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,15 +34,11 @@ dependencies = [ "jmespath", "cm-client", "python-gnupg", + "ansible-lint", ] -[tool.hatch.envs.lint] -python = "3.12" -detached = true -extra-dependencies = ["ansible-lint"] - -[tool.hatch.envs.lint.scripts] -run = "pre-commit run -a" +[tool.hatch.envs.default.scripts] +lint = "pre-commit run -a" [tool.hatch.envs.docs] python = "3.12" From 4ab40f481ac5378b29dd0afb428595ac48a101d9 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Tue, 24 Jun 2025 17:13:31 -0400 Subject: [PATCH 17/21] Allow missing AWS credentials in pre-commit check Signed-off-by: Webster Mudge --- .pre-commit-config.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1ae38ba5..48884ab3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,6 +29,8 @@ repos: - id: check-symlinks - id: debug-statements - id: detect-aws-credentials + args: + - --allow-missing-credentials - id: detect-private-key - id: forbid-submodules # - id: name-tests-test From e91cd684e34fcd9a0c793c0a2fb656dd34ff8118 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 27 Jun 2025 12:56:31 -0400 Subject: [PATCH 18/21] Update pre-commit hook labels Signed-off-by: Webster Mudge --- .pre-commit-config.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 48884ab3..a8c32c3f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -38,13 +38,16 @@ repos: rev: v3.2.0 hooks: - id: add-trailing-comma + name: ensure trailing commas args: - --py36-plus - repo: https://github.com/psf/black rev: 25.1.0 hooks: - id: black + name: lint python - repo: https://github.com/ansible/ansible-lint rev: v25.6.1 hooks: - id: ansible-lint + name: lint ansible From 8f29656f6f118075ef32c4c6834ea3fe26fef2a1 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 27 Jun 2025 12:57:25 -0400 Subject: [PATCH 19/21] Update lint script in Hatch to check Ansible documentation Signed-off-by: Webster Mudge --- pyproject.toml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index e84f133e..d4fe8ff8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,10 +35,14 @@ dependencies = [ "cm-client", "python-gnupg", "ansible-lint", + "antsibull-docs @ git+https://github.com/cloudera-labs/antsibull-docs@cldr-docsite#egg=antsibull-docs", ] [tool.hatch.envs.default.scripts] -lint = "pre-commit run -a" +lint = [ + "pre-commit run -a", + "antsibull-docs lint-collection-docs --plugin-docs --validate-collection-refs=all --skip-rstcheck .", +] [tool.hatch.envs.docs] python = "3.12" @@ -48,12 +52,10 @@ extra-dependencies = [ "ansible-pygments", "sphinx", "sphinx-ansible-theme >= 0.9.0", - "antsibull-docs @ git+https://github.com/cloudera-labs/antsibull-docs@cldr-docsite#egg=antsibull-docs", "antsichaut", ] [tool.hatch.envs.docs.scripts] -lint = "antsibull-docs lint-collection-docs --plugin-docs --validate-collection-refs=all --skip-rstcheck ." build = "docsbuild/build.sh" changelog = [ # Read the version in galaxy.yml via hatch itself (normalizes release candidates, etc.) From 013547e47e97ead13bd96d28ff3007c0e9353208 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 27 Jun 2025 14:28:17 -0400 Subject: [PATCH 20/21] Fix Hatch default and docs environment dependencies Signed-off-by: Webster Mudge --- pyproject.toml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index d4fe8ff8..bffabfe6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,7 +35,7 @@ dependencies = [ "cm-client", "python-gnupg", "ansible-lint", - "antsibull-docs @ git+https://github.com/cloudera-labs/antsibull-docs@cldr-docsite#egg=antsibull-docs", + "antsibull-docs >= 2.0.0, < 3.0.0", ] [tool.hatch.envs.default.scripts] @@ -48,7 +48,8 @@ lint = [ python = "3.12" detached = true extra-dependencies = [ - # "antsibull-docs >= 2.0.0, < 3.0.0", + "ansible-core<2.17", # For RHEL 8 support + "antsibull-docs @ git+https://github.com/cloudera-labs/antsibull-docs@cldr-docsite#egg=antsibull-docs", "ansible-pygments", "sphinx", "sphinx-ansible-theme >= 0.9.0", From 1eacb4040ba8f3031236262903167a4db1c5e182 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Fri, 27 Jun 2025 14:32:24 -0400 Subject: [PATCH 21/21] Comment out non-existent document reference Signed-off-by: Webster Mudge --- docs/docsite/extra-docs.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/docsite/extra-docs.yml b/docs/docsite/extra-docs.yml index c10cfcd2..61cb0a88 100644 --- a/docs/docsite/extra-docs.yml +++ b/docs/docsite/extra-docs.yml @@ -1,6 +1,6 @@ --- sections: - - title: Guides - toctree: - - api-design +# - title: Guides +# toctree: +# - api-design