diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..a36945c5 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,61 @@ +[project] +name = "cluster" +dynamic = ["version"] +description = "cloudera.cluster Ansible collection" +readme = "README.md" +requires-python = ">=3.8" +license = "Apache-2.0" +keywords = [] +authors = [ + { name = "Webster Mudge", email = "wmudge@cloudera.com" }, +] +classifiers = [] +dependencies = [] + +[tool.hatch.version] +path = "galaxy.yml" +pattern = "version:\\s+(?P[\\d\\.]+)" + +[tool.hatch.envs.default] +python = "3.12" +skip-install = true +dependencies = [ + "pre-commit", + "coverage[toml]", + "pytest", + "pytest-mock", + # "pytest-cov", + "molecule", + "molecule-plugins", + "molecule-plugins[ec2]", + "tox-ansible", + "ansible-core<2.17", # For RHEL 8 support + "jmespath", + "cm-client", +] + +[tool.hatch.envs.lint] +python = "3.12" +skip-install = true +extra-dependencies = [ + "ansible-lint", +] + +[tool.hatch.envs.lint.scripts] +run = "pre-commit run -a" + +[tool.pytest.ini_options] +testpaths = [ + "tests", +] +filterwarnings = [ + "ignore:AnsibleCollectionFinder has already been configured", + "ignore:'crypt' is deprecated and slated for removal in Python 3.13:DeprecationWarning", +] +markers = [ + "prepare: Prepare Cloudera Manager and resources for tests", +] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" diff --git a/pytest.ini b/pytest.ini deleted file mode 100644 index 03f164ec..00000000 --- a/pytest.ini +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2023 Cloudera, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -[pytest] -filterwarnings = - ignore::DeprecationWarning - ignore:AnsibleCollectionFinder has already been configured:UserWarning - -; log_cli = 1 -; log_cli_level = INFO - -pythonpath = "../../../" diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index 6a16e733..f593ed44 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- +# # Copyright 2024 Cloudera, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,6 +14,23 @@ # See the License for the specific language governing permissions and # limitations under the License. +from collections.abc import Generator +from time import sleep + +from cm_client import ( + ApiClient, + ApiCluster, + ApiCommand, + ApiConfig, + ApiService, + ApiServiceConfig, + ApiServiceList, + ClustersResourceApi, + CommandsResourceApi, + ServicesResourceApi, +) +from cm_client.rest import ApiException + class AnsibleExitJson(Exception): """Exception class to be raised by module.exit_json and caught by the test case""" @@ -31,3 +50,132 @@ def __init__(self, kwargs): kwargs.get("msg", "General module failure") ) self.__dict__.update(kwargs) + + +def wait_for_command( + api_client: ApiClient, command: ApiCommand, polling: int = 120, delay: int = 5 +): + """Polls Cloudera Manager to wait for a Command to complete.""" + + poll_count = 0 + while command.active: + if poll_count > polling: + raise Exception("CM command timeout") + sleep(delay) + poll_count += 1 + command = CommandsResourceApi(api_client).read_command(command.id) + if not command.success: + raise Exception(f"CM command [{command.id}] failed: {command.result_message}") + + +def provision_service( + api_client: ApiClient, cluster: ApiCluster, service_name: str, service_type: str +) -> Generator[ApiService]: + """Provisions a new cluster service as a generator. + Use with 'yield from' to delegate within a pytest fixture. + + Args: + api_client (ApiClient): _description_ + cluster (ApiCluster): _description_ + service_name (dict): _description_ + service_type (str): _description_ + + Raises: + Exception: _description_ + + Yields: + ApiService: _description_ + """ + + api = ServicesResourceApi(api_client) + cluster_api = ClustersResourceApi(api_client) + + service = ApiService( + name=service_name, + type=service_type, + ) + + api.create_services(cluster_name=cluster.name, body=ApiServiceList(items=[service])) + cluster_api.auto_assign_roles(cluster_name=cluster.name) + + # configure = cluster_api.auto_configure(cluster_name=target_cluster.name) + wait_for_command( + api_client, + api.first_run(cluster_name=cluster.name, service_name=service_name), + ) + + yield api.read_service(cluster_name=cluster.name, service_name=service_name) + + api.delete_service(cluster_name=cluster.name, service_name=service_name) + + +def service_wide_config( + api_client: ApiClient, service: ApiService, params: dict, message: str +) -> Generator[ApiService]: + """Update a service-wide configuration for a given service. Yields the + service, resetting the configuration to its prior state. Use with + 'yield from' within a pytest fixture. + + Args: + api_client (ApiClient): _description_ + service (ApiService): _description_ + params (dict): _description_ + message (str): _description_ + + Raises: + Exception: _description_ + + Yields: + ApiService: _description_ + """ + service_api = ServicesResourceApi(api_client) + + # Retrieve all of the pre-setup configurations + pre = service_api.read_service_config( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + ) + + # Set the test configurations + # Do so serially, since a failed update due to defaults (see ApiException) will cause remaining + # configuration entries to not run. Long-term solution is to check-and-set, which is + # what the Ansible modules do... + for k, v in params.items(): + try: + service_api.update_service_config( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + message=f"{message}::set", + body=ApiServiceConfig(items=[ApiConfig(name=k, value=v)]), + ) + except ApiException as ae: + if ae.status != 400 or "delete with template" not in str(ae.body): + raise Exception(str(ae)) + + # Yield the targeted service + yield service + + # Retrieve all of the post-setup configurations + post = service_api.read_service_config( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + ) + + # Reconcile the configurations + pre_set = set([c.name for c in pre.items]) + + reconciled = pre.items.copy() + reconciled.extend( + [ + ApiConfig(name=k.name, value=None) + for k in post.items + if k.name not in pre_set + ] + ) + + service_api.update_service_config( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + message=f"{message}::reset", + body=ApiServiceConfig(items=reconciled), + ) diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index d86218dd..3a2a103c 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -19,18 +19,35 @@ __metaclass__ = type import json -import sys +import os import pytest +import random +import string +import sys import yaml +from pathlib import Path + +from cm_client import ( + ApiClient, + ApiClusterList, + ApiCluster, + ApiHostRef, + ApiHostRefList, + ClustersResourceApi, + Configuration, + HostsResourceApi, + ParcelResourceApi, + ParcelsResourceApi, +) +from cm_client.rest import ApiException, RESTClientObject + from ansible.module_utils import basic from ansible.module_utils.common.text.converters import to_bytes -# Required for pytest discovery in VSCode, reasons unknown... -try: - from ansible.plugins.action import ActionBase -except ModuleNotFoundError: - pass +from ansible_collections.cloudera.cluster.plugins.module_utils.parcel_utils import ( + Parcel, +) from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleFailJson, @@ -84,3 +101,145 @@ def prep_args(args: str = ""): basic._ANSIBLE_ARGS = to_bytes(output) return prep_args + + +@pytest.fixture(scope="session") +def conn(): + conn = dict(username=os.getenv("CM_USERNAME"), password=os.getenv("CM_PASSWORD")) + + if os.getenv("CM_HOST", None): + conn.update(host=os.getenv("CM_HOST")) + + if os.getenv("CM_PORT", None): + conn.update(port=os.getenv("CM_PORT")) + + if os.getenv("CM_ENDPOINT", None): + conn.update(url=os.getenv("CM_ENDPOINT")) + + if os.getenv("CM_PROXY", None): + conn.update(proxy=os.getenv("CM_PROXY")) + + return { + **conn, + "verify_tls": "no", + "debug": "no", + } + + +@pytest.fixture(scope="session") +def cm_api_client(conn) -> ApiClient: + """Create a Cloudera Manager API client, resolving HTTP/S and version URL. + + Args: + conn (dict): Connection details + + Returns: + ApiClient: Cloudera Manager API client + """ + config = Configuration() + + config.username = conn["username"] + config.password = conn["password"] + + if "url" in conn: + config.host = str(conn["url"]).rstrip(" /") + else: + rest = RESTClientObject() + url = f"{conn['host']}:{conn['port']}" + + # Handle redirects + redirect = rest.GET(url).urllib3_response.geturl() + if redirect != "/": + url = redirect + + url = url.rstrip(" /") + + # Get version + auth = config.auth_settings().get("basic") + version = rest.GET( + f"{url}/api/version", headers={auth["key"]: auth["value"]} + ).data + + # Set host + config.host = f"{url}/api/{version}" + + client = ApiClient() + client.user_agent = "pytest" + return client + + +@pytest.fixture(scope="session") +def base_cluster(cm_api_client, request): + """Provision a CDH Base cluster.""" + + cluster_api = ClustersResourceApi(cm_api_client) + + if os.getenv("CM_CLUSTER", None): + yield cluster_api.read_cluster(cluster_name=os.getenv("CM_CLUSTER")) + else: + if os.getenv("CDH_VERSION", None): + cdh_version = os.getenv("CDH_VERSION") + else: + raise Exception( + "No CDH_VERSION found. Please set this environment variable." + ) + + name = ( + Path(request.fixturename).stem + + "_" + + "".join(random.choices(string.ascii_lowercase, k=6)) + ) + + parcels_api = ParcelsResourceApi(cm_api_client) + parcel_api = ParcelResourceApi(cm_api_client) + host_api = HostsResourceApi(cm_api_client) + + try: + # Create the initial cluster + config = ApiCluster( + name=name, + full_version=cdh_version, + ) + + cluster_api.create_clusters(body=ApiClusterList(items=[config])) + + # Get first free host and assign to the cluster + all_hosts = host_api.read_hosts() + host = next((h for h in all_hosts.items if not h.cluster_ref), None) + + if host is None: + # Roll back the cluster and then raise an error + cluster_api.delete_cluster(cluster_name=name) + raise Exception("No available hosts to allocate to new cluster") + else: + cluster_api.add_hosts( + cluster_name=name, + body=ApiHostRefList(items=[ApiHostRef(host_id=host.host_id)]), + ) + + # Find the first CDH parcel version and activate it + parcels = parcels_api.read_parcels(cluster_name=name) + cdh_parcel = next( + ( + p + for p in parcels.items + if p.product == "CDH" and p.version.startswith(cdh_version) + ) + ) + + parcel = Parcel( + parcel_api=parcel_api, + product=cdh_parcel.product, + version=cdh_parcel.version, + cluster=name, + ) + + parcel.activate() + + # Reread and return the cluster + yield cluster_api.read_cluster(cluster_name=name) + + # Deprovision the cluster + cluster_api.delete_cluster(cluster_name=name) + except ApiException as ae: + raise Exception(str(ae)) diff --git a/tests/unit/plugins/modules/service_config/test_service_config.py b/tests/unit/plugins/modules/service_config/test_service_config.py index 1750722c..17c446c6 100644 --- a/tests/unit/plugins/modules/service_config/test_service_config.py +++ b/tests/unit/plugins/modules/service_config/test_service_config.py @@ -21,37 +21,164 @@ import logging import os import pytest +import random +import string + +from pathlib import Path + +from cm_client import ( + ApiConfig, + ApiService, + ApiServiceConfig, + ApiServiceList, + ClustersResourceApi, + ServicesResourceApi, +) +from cm_client.rest import ApiException from ansible_collections.cloudera.cluster.plugins.modules import service_config + from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleExitJson, AnsibleFailJson, + wait_for_command, + provision_service, + service_wide_config, ) LOG = logging.getLogger(__name__) +@pytest.fixture(scope="module") +def zk_service(cm_api_client, base_cluster, request): + if os.getenv("CM_SERVICE_ZOOKEEPER", None): + api = ServicesResourceApi(cm_api_client) + yield api.read_service( + cluster_name=base_cluster.name, + service_name=os.getenv("CM_SERVICE_ZOOKEEPER"), + ) + else: + name = ( + Path(request.fixturename).stem + + "_" + + "".join(random.choices(string.ascii_lowercase, k=6)) + ) + yield from provision_service( + api_client=cm_api_client, + cluster=base_cluster, + service_name=name, + service_type="ZOOKEEPER", + ) + + +@pytest.fixture(scope="function") +def zk_service_config(cm_api_client, zk_service, request): + marker = request.node.get_closest_marker("service_config") + + if marker is None: + raise Exception("No service_config marker found.") + + yield from service_wide_config( + api_client=cm_api_client, + service=zk_service, + params=marker.args[0], + message=f"test_service_config::{request.node.name}", + ) + + +@pytest.fixture(scope="module") +def target_service(cm_api_client, base_cluster, request): + api = ServicesResourceApi(cm_api_client) + + if os.getenv("CM_SERVICE_NAME", None): + yield api.read_service( + cluster_name=base_cluster.name, service_name=os.getenv("CM_SERVICE_NAME") + ) + else: + cluster_api = ClustersResourceApi(cm_api_client) + name = Path(request.node.name).stem + "_zookeeper" + + service = ApiService( + name=name, + type="ZOOKEEPER", + ) + + api.create_services( + cluster_name=base_cluster.name, body=ApiServiceList(items=[service]) + ) + cluster_api.auto_assign_roles(cluster_name=base_cluster.name) + + # configure = cluster_api.auto_configure(cluster_name=target_cluster.name) + wait_for_command( + cm_api_client, + api.first_run(cluster_name=base_cluster.name, service_name=name), + ) + + yield api.read_service(cluster_name=base_cluster.name, service_name=name) + + api.delete_service(cluster_name=base_cluster.name, service_name=name) + + @pytest.fixture -def conn(): - conn = dict(username=os.getenv("CM_USERNAME"), password=os.getenv("CM_PASSWORD")) +def target_service_config(cm_api_client, target_service, request): + marker = request.node.get_closest_marker("prepare") - if os.getenv("CM_HOST", None): - conn.update(host=os.getenv("CM_HOST")) + if marker is None: + raise Exception("No prepare marker found.") + elif "service_config" not in marker.kwargs: + raise Exception("No 'service_config' parameter found.") - if os.getenv("CM_PORT", None): - conn.update(port=os.getenv("CM_PORT")) + service_api = ServicesResourceApi(cm_api_client) - if os.getenv("CM_ENDPOINT", None): - conn.update(url=os.getenv("CM_ENDPOINT")) + # Retrieve all of the pre-setup configurations + pre = service_api.read_service_config( + cluster_name=target_service.cluster_ref.cluster_name, + service_name=target_service.name, + ) - if os.getenv("CM_PROXY", None): - conn.update(proxy=os.getenv("CM_PROXY")) + # Set the test configurations + # Do so serially, since a failed update due to defaults (see ApiException) will cause remaining + # configuration entries to not run. Long-term solution is to check-and-set, which is + # what the Ansible modules do... + for k, v in marker.kwargs["service_config"].items(): + try: + service_api.update_service_config( + cluster_name=target_service.cluster_ref.cluster_name, + service_name=target_service.name, + message=f"test_service_config::{request.node.name}:set", + body=ApiServiceConfig(items=[ApiConfig(name=k, value=v)]), + ) + except ApiException as ae: + if ae.status != 400 or "delete with template" not in str(ae.body): + raise Exception(str(ae)) + + # Return the targeted service and go run the test + yield target_service + + # Retrieve all of the post-setup configurations + post = service_api.read_service_config( + cluster_name=target_service.cluster_ref.cluster_name, + service_name=target_service.name, + ) + + # Reconcile the configurations + pre_set = set([c.name for c in pre.items]) + + reconciled = pre.items.copy() + reconciled.extend( + [ + ApiConfig(name=k.name, value=None) + for k in post.items + if k.name not in pre_set + ] + ) - return { - **conn, - "verify_tls": "no", - "debug": "no", - } + service_api.update_service_config( + cluster_name=target_service.cluster_ref.cluster_name, + service_name=target_service.name, + message=f"test_service_config::{request.node.name}::reset", + body=ApiServiceConfig(items=reconciled), + ) def test_missing_required(conn, module_args): @@ -62,60 +189,63 @@ def test_missing_required(conn, module_args): def test_missing_service(conn, module_args): - conn.update(service="example") - module_args(conn) + module_args({**conn, "service": "example"}) with pytest.raises(AnsibleFailJson, match="cluster, parameters"): service_config.main() def test_missing_cluster(conn, module_args): - conn.update(cluster="example") - module_args(conn) + module_args({**conn, "cluster": "example"}) with pytest.raises(AnsibleFailJson, match="parameters, service"): service_config.main() def test_missing_parameters(conn, module_args): - conn.update(parameters=dict(test="example")) - module_args(conn) + module_args({**conn, "parameters": dict(test="example")}) with pytest.raises(AnsibleFailJson, match="cluster, service"): service_config.main() def test_present_invalid_cluster(conn, module_args): - conn.update( - cluster="example", - service="example", - parameters=dict(example="Example"), + module_args( + { + **conn, + "cluster": "example", + "service": "example", + "parameters": dict(example="Example"), + } ) - module_args(conn) with pytest.raises(AnsibleFailJson, match="Cluster does not exist"): service_config.main() -def test_present_invalid_service(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service="example", - parameters=dict(example="Example"), +def test_present_invalid_service(conn, module_args, target_service): + module_args( + { + **conn, + "cluster": target_service.cluster_ref.cluster_name, + "service": "example", + "parameters": dict(example="Example"), + } ) - module_args(conn) with pytest.raises(AnsibleFailJson, match="Service 'example' not found"): service_config.main() -def test_present_invalid_parameter(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - parameters=dict(example="Example"), +def test_present_invalid_parameter(conn, module_args, target_service): + module_args( + { + **conn, + "cluster": target_service.cluster_ref.cluster_name, + "service": target_service.name, + "parameters": dict(example="Example"), + } ) - module_args(conn) with pytest.raises( AnsibleFailJson, match="Unknown configuration attribute 'example'" @@ -123,40 +253,49 @@ def test_present_invalid_parameter(conn, module_args): service_config.main() -def test_set_parameters(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - parameters=dict(autopurgeSnapRetainCount=9), - _ansible_check_mode=True, - _ansible_diff=True, +@pytest.mark.service_config(dict(autopurgeSnapRetainCount=None, tickTime=1111)) +def test_set_parameters(conn, module_args, zk_service_config): + module_args( + { + **conn, + "cluster": zk_service_config.cluster_ref.cluster_name, + "service": zk_service_config.name, + "parameters": dict(autopurgeSnapRetainCount=9), + "message": "test_service_config::test_set_parameters", + # "_ansible_check_mode": True, + # "_ansible_diff": True, + } ) - module_args(conn) + + expected = dict(autopurgeSnapRetainCount="9", tickTime="1111") with pytest.raises(AnsibleExitJson) as e: service_config.main() assert e.value.changed == True - assert {c["name"]: c["value"] for c in e.value.config}[ - "autopurgeSnapRetainCount" - ] == "9" + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + # Idempotency with pytest.raises(AnsibleExitJson) as e: service_config.main() assert e.value.changed == False - assert {c["name"]: c["value"] for c in e.value.config}[ - "autopurgeSnapRetainCount" - ] == "9" - - -def test_unset_parameters(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - parameters=dict(autopurgeSnapRetainCount=None), + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.service_config(dict(autopurgeSnapRetainCount=7, tickTime=1111)) +def test_unset_parameters(conn, module_args, zk_service_config): + module_args( + { + **conn, + "cluster": zk_service_config.cluster_ref.cluster_name, + "service": zk_service_config.name, + "parameters": dict(autopurgeSnapRetainCount=None), + "message": "test_service_config::test_unset_parameters", + } ) - module_args(conn) + + expected = dict(tickTime="1111") with pytest.raises(AnsibleExitJson) as e: service_config.main() @@ -164,53 +303,62 @@ def test_unset_parameters(conn, module_args): assert e.value.changed == True results = {c["name"]: c["value"] for c in e.value.config} assert "autopurgeSnapRetainCount" not in results + assert expected.items() <= results.items() + # Idempotency with pytest.raises(AnsibleExitJson) as e: service_config.main() assert e.value.changed == False results = {c["name"]: c["value"] for c in e.value.config} assert "autopurgeSnapRetainCount" not in results - - -def test_set_parameters_with_purge(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - parameters=dict(autopurgeSnapRetainCount=9), - purge=True, - _ansible_check_mode=True, - _ansible_diff=True, + assert expected.items() <= results.items() + + +@pytest.mark.service_config(dict(autopurgeSnapRetainCount=7, tickTime=1111)) +def test_set_parameters_with_purge(conn, module_args, zk_service_config): + module_args( + { + **conn, + "cluster": zk_service_config.cluster_ref.cluster_name, + "service": zk_service_config.name, + "parameters": dict(autopurgeSnapRetainCount=9), + "purge": True, + "message": "test_service_config::test_set_parameters_with_purge", + # "_ansible_check_mode": True, + # "_ansible_diff": True, + } ) - module_args(conn) + + expected = dict(autopurgeSnapRetainCount="9") with pytest.raises(AnsibleExitJson) as e: service_config.main() assert e.value.changed == True - assert {c["name"]: c["value"] for c in e.value.config}[ - "autopurgeSnapRetainCount" - ] == "9" + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() with pytest.raises(AnsibleExitJson) as e: service_config.main() assert e.value.changed == False - assert {c["name"]: c["value"] for c in e.value.config}[ - "autopurgeSnapRetainCount" - ] == "9" - - -def test_purge_all_parameters(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - parameters=dict(), - purge=True, - _ansible_check_mode=True, - _ansible_diff=True, + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() + + +@pytest.mark.service_config(dict(autopurgeSnapRetainCount=8, tickTime=2222)) +def test_purge_all_parameters(conn, module_args, zk_service_config): + module_args( + { + **conn, + "cluster": zk_service_config.cluster_ref.cluster_name, + "service": zk_service_config.name, + "parameters": dict(), + "purge": True, + "message": "test_service_config::test_purge_all_parameters", + # "_ansible_check_mode": True, + # "_ansible_diff": True, + } ) - module_args(conn) with pytest.raises(AnsibleExitJson) as e: service_config.main()