From 67239eede40fb58632bc8b8171350545f35b152c Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 11 Dec 2024 10:45:34 -0500 Subject: [PATCH 01/13] Add pyproject.toml for Hatch and pytest Signed-off-by: Webster Mudge --- pyproject.toml | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++ pytest.ini | 23 ------------------- 2 files changed, 61 insertions(+), 23 deletions(-) create mode 100644 pyproject.toml delete mode 100644 pytest.ini diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..d5ea7976 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,61 @@ +[project] +name = "cluster" +dynamic = ["version"] +description = "cloudera.cluster Ansible collection" +readme = "README.md" +requires-python = ">=3.8" +license = "Apache-2.0" +keywords = [] +authors = [ + { name = "Webster Mudge", email = "wmudge@cloudera.com" }, +] +classifiers = [] +dependencies = [] + +[tool.hatch.version] +path = "galaxy.yml" +pattern = "version:\\s+(?P[\\d\\.]+)" + +[tool.hatch.envs.default] +python = "3.12" +skip-install = true +dependencies = [ + "pre-commit", + "coverage[toml]", + "pytest", + "pytest-mock", + # "pytest-cov", + "molecule", + "molecule-plugins", + "molecule-plugins[ec2]", + "tox-ansible", + "ansible-core<2.17", # For RHEL 8 support + "jmespath", + "cm-client", +] + +[tool.hatch.envs.lint] +python = "3.12" +skip-install = true +extra-dependencies = [ + "ansible-lint", +] + +[tool.hatch.envs.lint.scripts] +run = "pre-commit run -a" + +[tool.pytest.ini_options] +testpaths = [ + "tests", +] +filterwarnings = [ + "ignore:AnsibleCollectionFinder has already been configured", + "ignore:'crypt' is deprecated and slated for removal in Python 3.13:DeprecationWarning", +] +markers = [ + "prep: Prepare Cloudera Manager and resources for tests", +] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" diff --git a/pytest.ini b/pytest.ini deleted file mode 100644 index 03f164ec..00000000 --- a/pytest.ini +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2023 Cloudera, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -[pytest] -filterwarnings = - ignore::DeprecationWarning - ignore:AnsibleCollectionFinder has already been configured:UserWarning - -; log_cli = 1 -; log_cli_level = INFO - -pythonpath = "../../../" From a78576a77b0df4b9460e8283b16fe3eeb898f3b0 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 11 Dec 2024 10:47:57 -0500 Subject: [PATCH 02/13] Remove pytest discovery hack Signed-off-by: Webster Mudge --- tests/unit/conftest.py | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index d86218dd..6829f307 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -26,11 +26,11 @@ from ansible.module_utils import basic from ansible.module_utils.common.text.converters import to_bytes -# Required for pytest discovery in VSCode, reasons unknown... -try: - from ansible.plugins.action import ActionBase -except ModuleNotFoundError: - pass +# # Required for pytest discovery in VSCode, reasons unknown... +# try: +# from ansible.plugins.action import ActionBase +# except ModuleNotFoundError: +# pass from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleFailJson, @@ -84,3 +84,23 @@ def prep_args(args: str = ""): basic._ANSIBLE_ARGS = to_bytes(output) return prep_args + + +# class AnsibleExitJson(Exception): +# """Exception class to be raised by module.exit_json and caught by the test case""" + +# def __init__(self, kwargs): +# super(AnsibleExitJson, self).__init__( +# kwargs.get("msg", "General module success") +# ) +# self.__dict__.update(kwargs) + + +# class AnsibleFailJson(Exception): +# """Exception class to be raised by module.fail_json and caught by the test case""" + +# def __init__(self, kwargs): +# super(AnsibleFailJson, self).__init__( +# kwargs.get("msg", "General module failure") +# ) +# self.__dict__.update(kwargs) From 477f2cdb3d854d714a0c37799766b8dba50a8696 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 11 Dec 2024 10:54:05 -0500 Subject: [PATCH 03/13] Update service-wide cluster service configuration tests to use auto-generated cluster resources Signed-off-by: Webster Mudge --- .../service_config/test_service_config.py | 264 +++++++++++++++++- 1 file changed, 252 insertions(+), 12 deletions(-) diff --git a/tests/unit/plugins/modules/service_config/test_service_config.py b/tests/unit/plugins/modules/service_config/test_service_config.py index 1750722c..4c9a73bc 100644 --- a/tests/unit/plugins/modules/service_config/test_service_config.py +++ b/tests/unit/plugins/modules/service_config/test_service_config.py @@ -22,6 +22,30 @@ import os import pytest +from pathlib import Path +from time import sleep + +from cm_client import ( + ClustersResourceApi, + Configuration, + ApiClient, + ApiClusterList, + ApiCluster, + ApiCommand, + ApiConfig, + ParcelResourceApi, + ApiHostRefList, + ApiHostRef, + ApiParcel, + ApiParcelList, + ApiServiceList, + ApiService, + ApiServiceConfig, + CommandsResourceApi, + ServicesResourceApi, +) +from cm_client.rest import ApiException, RESTClientObject + from ansible_collections.cloudera.cluster.plugins.modules import service_config from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleExitJson, @@ -31,7 +55,7 @@ LOG = logging.getLogger(__name__) -@pytest.fixture +@pytest.fixture(scope="session") def conn(): conn = dict(username=os.getenv("CM_USERNAME"), password=os.getenv("CM_PASSWORD")) @@ -54,6 +78,190 @@ def conn(): } +@pytest.fixture(scope="session") +def prep_client(conn): + """Create a Cloudera Manager API client, resolving HTTP/S and version URL. + + Args: + conn (dict): Connection details + + Returns: + ApiClient: Cloudera Manager API client + """ + config = Configuration() + + config.username = conn["username"] + config.password = conn["password"] + + if "url" in conn: + config.host = str(conn["url"]).rstrip(" /") + else: + rest = RESTClientObject() + + # Handle redirects + url = rest.GET(conn["host"]).urllib3_response.geturl() + + # Get version + auth = config.auth_settings().get("basic") + version = rest.GET( + f"{url}api/version", headers={auth["key"]: auth["value"]} + ).data + + # Set host + config.host = f"{url}api/{version}" + + client = ApiClient() + client.user_agent = "pytest" + return client + + +@pytest.fixture( + scope="module", +) +def prep_cluster(prep_client, request): + """Create a 7.1.9 test cluster using the module name.""" + + marker = request.node.get_closest_marker("prep") + if marker is None: + raise Exception("Preparation marker not found.") + elif "version" not in marker.kwargs: + raise Exception("Cluster version parameter not found.") + elif "hosts" not in marker.kwargs: + raise Exception("Cluster hosts parameter not found.") + else: + version = marker.kwargs["version"] + hosts = marker.kwargs["hosts"] + + cluster_api = ClustersResourceApi(prep_client) + parcel_api = ParcelResourceApi(prep_client) + + try: + config = ApiCluster( + name=request.node.name, + full_version=version, + ) + # Create the cluster + clusters = cluster_api.create_clusters(body=ApiClusterList(items=[config])) + + # Activate the parcel(s) + from ansible_collections.cloudera.cluster.plugins.module_utils.parcel_utils import ( + Parcel, + ) + + parcel = Parcel( + parcel_api=parcel_api, + product="CDH", + version=version, + cluster=request.node.name, + ) + + cluster_api.add_hosts( + cluster_name=request.node.name, + body=ApiHostRefList(items=[ApiHostRef(hostname=h) for h in hosts]), + ) + yield clusters.items[0] + cluster_api.delete_cluster(cluster_name=request.node.name) + except ApiException as ae: + raise Exception(str(ae)) + + +@pytest.mark.skip +def test_wip_cluster(prep_cluster): + results = prep_cluster + print(results) + + +def wait_for_command( + api_client: ApiClient, command: ApiCommand, polling: int = 10, delay: int = 5 +): + poll_count = 0 + while command.active: + if poll_count > polling: + raise Exception("CM command timeout") + sleep(delay) + poll_count += 1 + command = CommandsResourceApi(api_client).read_command(command.id) + if not command.success: + raise Exception(f"CM command [{command.id}] failed: {command.result_message}") + + +@pytest.fixture(scope="module") +def prep_service(prep_client, request): + api = ServicesResourceApi(prep_client) + cluster_api = ClustersResourceApi(prep_client) + + name = Path(request.node.name).stem + "_zookeeper" + + service = ApiService( + name=name, + type="ZOOKEEPER", + ) + + api.create_services(cluster_name="TestOne", body=ApiServiceList(items=[service])) + cluster_api.auto_assign_roles(cluster_name="TestOne") + + # configure = cluster_api.auto_configure(cluster_name="TestOne") + wait_for_command( + prep_client, api.first_run(cluster_name="TestOne", service_name=name) + ) + + yield api.read_service(cluster_name="TestOne", service_name=name) + + api.delete_service(cluster_name="TestOne", service_name=name) + + +def test_wip_service(prep_service): + results = prep_service + print(results) + + +@pytest.fixture +def prep_service_config(prep_client, request): + marker = request.node.get_closest_marker("prep") + + if marker is None: + raise Exception("Unable to determine parameter to prepare") + elif len(marker.args) != 3: + raise Exception("Invalid number of values for parameter preparation") + else: + cluster = marker.args[0] + service = marker.args[1] + params = marker.args[2] + + api = ServicesResourceApi(prep_client) + + # Set the parameter + try: + api.update_service_config( + cluster_name=cluster, + service_name=service, + message=f"test_service_config::{request.node.name}:set", + body=ApiServiceConfig( + items=[ApiConfig(name=k, value=v) for k, v in params.items()] + ), + ) + except ApiException as ae: + if ae.status != 400 or "delete with template" not in str(ae.body): + raise Exception(str(ae)) + + # Go run the test + yield + + # Reset the parameter + try: + api.update_service_config( + cluster_name=cluster, + service_name=service, + message=f"test_service_config::{request.node.name}::reset", + body=ApiServiceConfig( + items=[ApiConfig(name=k, value=v) for k, v in params.items()] + ), + ) + except ApiException as ae: + if ae.status != 400 or "delete with template" not in str(ae.body): + raise Exception(str(ae)) + + def test_missing_required(conn, module_args): module_args(conn) @@ -94,7 +302,7 @@ def test_present_invalid_cluster(conn, module_args): module_args(conn) with pytest.raises(AnsibleFailJson, match="Cluster does not exist"): - service_config.main() + prep_service_config.main() def test_present_invalid_service(conn, module_args): @@ -123,13 +331,19 @@ def test_present_invalid_parameter(conn, module_args): service_config.main() -def test_set_parameters(conn, module_args): +@pytest.mark.prep( + os.getenv("CM_CLUSTER"), + os.getenv("CM_SERVICE"), + dict(autopurgeSnapRetainCount=None, tickTime=1111), +) +def test_set_parameters(conn, module_args, prep_service_config): conn.update( cluster=os.getenv("CM_CLUSTER"), service=os.getenv("CM_SERVICE"), parameters=dict(autopurgeSnapRetainCount=9), - _ansible_check_mode=True, - _ansible_diff=True, + message="test_service_config::test_set_parameters", + # _ansible_check_mode=True, + # _ansible_diff=True, ) module_args(conn) @@ -140,7 +354,9 @@ def test_set_parameters(conn, module_args): assert {c["name"]: c["value"] for c in e.value.config}[ "autopurgeSnapRetainCount" ] == "9" + assert len(e.value.config) == 2 + # Idempotency with pytest.raises(AnsibleExitJson) as e: service_config.main() @@ -148,13 +364,20 @@ def test_set_parameters(conn, module_args): assert {c["name"]: c["value"] for c in e.value.config}[ "autopurgeSnapRetainCount" ] == "9" + assert len(e.value.config) == 2 -def test_unset_parameters(conn, module_args): +@pytest.mark.prep( + os.getenv("CM_CLUSTER"), + os.getenv("CM_SERVICE"), + dict(autopurgeSnapRetainCount=7, tickTime=1111), +) +def test_unset_parameters(conn, module_args, prep_service_config): conn.update( cluster=os.getenv("CM_CLUSTER"), service=os.getenv("CM_SERVICE"), parameters=dict(autopurgeSnapRetainCount=None), + message="test_service_config::test_unset_parameters", ) module_args(conn) @@ -164,23 +387,32 @@ def test_unset_parameters(conn, module_args): assert e.value.changed == True results = {c["name"]: c["value"] for c in e.value.config} assert "autopurgeSnapRetainCount" not in results + assert len(e.value.config) == 1 + # Idempotency with pytest.raises(AnsibleExitJson) as e: service_config.main() assert e.value.changed == False results = {c["name"]: c["value"] for c in e.value.config} assert "autopurgeSnapRetainCount" not in results + assert len(e.value.config) == 1 -def test_set_parameters_with_purge(conn, module_args): +@pytest.mark.prep( + os.getenv("CM_CLUSTER"), + os.getenv("CM_SERVICE"), + dict(autopurgeSnapRetainCount=7, tickTime=1111), +) +def test_set_parameters_with_purge(conn, module_args, prep_service_config): conn.update( cluster=os.getenv("CM_CLUSTER"), service=os.getenv("CM_SERVICE"), parameters=dict(autopurgeSnapRetainCount=9), purge=True, - _ansible_check_mode=True, - _ansible_diff=True, + message="test_service_config::test_set_parameters_with_purge", + # _ansible_check_mode=True, + # _ansible_diff=True, ) module_args(conn) @@ -191,6 +423,7 @@ def test_set_parameters_with_purge(conn, module_args): assert {c["name"]: c["value"] for c in e.value.config}[ "autopurgeSnapRetainCount" ] == "9" + assert len(e.value.config) == 1 with pytest.raises(AnsibleExitJson) as e: service_config.main() @@ -199,16 +432,23 @@ def test_set_parameters_with_purge(conn, module_args): assert {c["name"]: c["value"] for c in e.value.config}[ "autopurgeSnapRetainCount" ] == "9" + assert len(e.value.config) == 1 -def test_purge_all_parameters(conn, module_args): +@pytest.mark.prep( + os.getenv("CM_CLUSTER"), + os.getenv("CM_SERVICE"), + dict(autopurgeSnapRetainCount=8, tickTime=2222), +) +def test_purge_all_parameters(conn, module_args, prep_service_config): conn.update( cluster=os.getenv("CM_CLUSTER"), service=os.getenv("CM_SERVICE"), parameters=dict(), purge=True, - _ansible_check_mode=True, - _ansible_diff=True, + message="test_service_config::test_purge_all_parameters", + # _ansible_check_mode=True, + # _ansible_diff=True, ) module_args(conn) From 33efb35f688a06549b9e42cb0636e6f94cedde7f Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 11 Dec 2024 12:36:25 -0500 Subject: [PATCH 04/13] Update marker name Signed-off-by: Webster Mudge --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index d5ea7976..a36945c5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,7 +53,7 @@ filterwarnings = [ "ignore:'crypt' is deprecated and slated for removal in Python 3.13:DeprecationWarning", ] markers = [ - "prep: Prepare Cloudera Manager and resources for tests", + "prepare: Prepare Cloudera Manager and resources for tests", ] [build-system] From db89298b6465caf35fa7d557b9bfc36afe340358 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 11 Dec 2024 12:37:53 -0500 Subject: [PATCH 05/13] Configure pytest fixtures to create target service for testing Signed-off-by: Webster Mudge --- .../service_config/test_service_config.py | 201 ++++++++---------- 1 file changed, 89 insertions(+), 112 deletions(-) diff --git a/tests/unit/plugins/modules/service_config/test_service_config.py b/tests/unit/plugins/modules/service_config/test_service_config.py index 4c9a73bc..c3b5dc47 100644 --- a/tests/unit/plugins/modules/service_config/test_service_config.py +++ b/tests/unit/plugins/modules/service_config/test_service_config.py @@ -79,7 +79,7 @@ def conn(): @pytest.fixture(scope="session") -def prep_client(conn): +def cm_api_client(conn): """Create a Cloudera Manager API client, resolving HTTP/S and version URL. Args: @@ -115,10 +115,8 @@ def prep_client(conn): return client -@pytest.fixture( - scope="module", -) -def prep_cluster(prep_client, request): +@pytest.fixture(scope="module") +def prep_cluster(cm_api_client, request): """Create a 7.1.9 test cluster using the module name.""" marker = request.node.get_closest_marker("prep") @@ -132,8 +130,8 @@ def prep_cluster(prep_client, request): version = marker.kwargs["version"] hosts = marker.kwargs["hosts"] - cluster_api = ClustersResourceApi(prep_client) - parcel_api = ParcelResourceApi(prep_client) + cluster_api = ClustersResourceApi(cm_api_client) + parcel_api = ParcelResourceApi(cm_api_client) try: config = ApiCluster( @@ -186,9 +184,9 @@ def wait_for_command( @pytest.fixture(scope="module") -def prep_service(prep_client, request): - api = ServicesResourceApi(prep_client) - cluster_api = ClustersResourceApi(prep_client) +def target_service(cm_api_client, request): + api = ServicesResourceApi(cm_api_client) + cluster_api = ClustersResourceApi(cm_api_client) name = Path(request.node.name).stem + "_zookeeper" @@ -202,7 +200,7 @@ def prep_service(prep_client, request): # configure = cluster_api.auto_configure(cluster_name="TestOne") wait_for_command( - prep_client, api.first_run(cluster_name="TestOne", service_name=name) + cm_api_client, api.first_run(cluster_name="TestOne", service_name=name) ) yield api.read_service(cluster_name="TestOne", service_name=name) @@ -210,56 +208,48 @@ def prep_service(prep_client, request): api.delete_service(cluster_name="TestOne", service_name=name) -def test_wip_service(prep_service): - results = prep_service - print(results) - - @pytest.fixture -def prep_service_config(prep_client, request): - marker = request.node.get_closest_marker("prep") +def target_service_config(cm_api_client, target_service, request): + marker = request.node.get_closest_marker("prepare") if marker is None: - raise Exception("Unable to determine parameter to prepare") - elif len(marker.args) != 3: - raise Exception("Invalid number of values for parameter preparation") - else: - cluster = marker.args[0] - service = marker.args[1] - params = marker.args[2] - - api = ServicesResourceApi(prep_client) - - # Set the parameter - try: - api.update_service_config( - cluster_name=cluster, - service_name=service, - message=f"test_service_config::{request.node.name}:set", - body=ApiServiceConfig( - items=[ApiConfig(name=k, value=v) for k, v in params.items()] - ), - ) - except ApiException as ae: - if ae.status != 400 or "delete with template" not in str(ae.body): - raise Exception(str(ae)) - - # Go run the test - yield + raise Exception("No prepare marker found.") + elif "service_config" not in marker.kwargs: + raise Exception("No 'service_config' parameter found.") + + service_api = ServicesResourceApi(cm_api_client) + + # Set the parameter(s) + # Do so serially, since a failed update due to defaults (see ApiException) will cause remaining + # configuration entries to not run. Long-term solution is to check-and-set, which is + # what the Ansible modules do... + for k, v in marker.kwargs["service_config"].items(): + try: + service_api.update_service_config( + cluster_name=target_service.cluster_ref.cluster_name, + service_name=target_service.name, + message=f"test_service_config::{request.node.name}:set", + body=ApiServiceConfig(items=[ApiConfig(name=k, value=v)]), + ) + except ApiException as ae: + if ae.status != 400 or "delete with template" not in str(ae.body): + raise Exception(str(ae)) + + # Return the targeted service and go run the test + yield target_service # Reset the parameter - try: - api.update_service_config( - cluster_name=cluster, - service_name=service, - message=f"test_service_config::{request.node.name}::reset", - body=ApiServiceConfig( - items=[ApiConfig(name=k, value=v) for k, v in params.items()] - ), - ) - except ApiException as ae: - if ae.status != 400 or "delete with template" not in str(ae.body): - raise Exception(str(ae)) + for k, v in marker.kwargs["service_config"].items(): + try: + service_api.update_service_config( + cluster_name=target_service.cluster_ref.cluster_name, + service_name=target_service.name, + message=f"test_service_config::{request.node.name}::reset", + body=ApiServiceConfig(items=[ApiConfig(name=k, value=v)]), + ) + except ApiException as ae: + if ae.status != 400 or "delete with template" not in str(ae.body): + raise Exception(str(ae)) def test_missing_required(conn, module_args): @@ -270,60 +260,63 @@ def test_missing_required(conn, module_args): def test_missing_service(conn, module_args): - conn.update(service="example") - module_args(conn) + module_args({**conn, "service": "example"}) with pytest.raises(AnsibleFailJson, match="cluster, parameters"): service_config.main() def test_missing_cluster(conn, module_args): - conn.update(cluster="example") - module_args(conn) + module_args({**conn, "cluster": "example"}) with pytest.raises(AnsibleFailJson, match="parameters, service"): service_config.main() def test_missing_parameters(conn, module_args): - conn.update(parameters=dict(test="example")) - module_args(conn) + module_args({**conn, "parameters": dict(test="example")}) with pytest.raises(AnsibleFailJson, match="cluster, service"): service_config.main() def test_present_invalid_cluster(conn, module_args): - conn.update( - cluster="example", - service="example", - parameters=dict(example="Example"), + module_args( + { + **conn, + "cluster": "example", + "service": "example", + "parameters": dict(example="Example"), + } ) - module_args(conn) with pytest.raises(AnsibleFailJson, match="Cluster does not exist"): - prep_service_config.main() + service_config.main() -def test_present_invalid_service(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service="example", - parameters=dict(example="Example"), +def test_present_invalid_service(conn, module_args, target_service): + module_args( + { + **conn, + "cluster": target_service.cluster_ref.cluster_name, + "service": "example", + "parameters": dict(example="Example"), + } ) - module_args(conn) with pytest.raises(AnsibleFailJson, match="Service 'example' not found"): service_config.main() -def test_present_invalid_parameter(conn, module_args): - conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), - parameters=dict(example="Example"), +def test_present_invalid_parameter(conn, module_args, target_service): + module_args( + { + **conn, + "cluster": target_service.cluster_ref.cluster_name, + "service": target_service.name, + "parameters": dict(example="Example"), + } ) - module_args(conn) with pytest.raises( AnsibleFailJson, match="Unknown configuration attribute 'example'" @@ -331,15 +324,11 @@ def test_present_invalid_parameter(conn, module_args): service_config.main() -@pytest.mark.prep( - os.getenv("CM_CLUSTER"), - os.getenv("CM_SERVICE"), - dict(autopurgeSnapRetainCount=None, tickTime=1111), -) -def test_set_parameters(conn, module_args, prep_service_config): +@pytest.mark.prepare(service_config=dict(autopurgeSnapRetainCount=None, tickTime=1111)) +def test_set_parameters(conn, module_args, target_service_config): conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), + cluster=target_service_config.cluster_ref.cluster_name, + service=target_service_config.name, parameters=dict(autopurgeSnapRetainCount=9), message="test_service_config::test_set_parameters", # _ansible_check_mode=True, @@ -367,15 +356,11 @@ def test_set_parameters(conn, module_args, prep_service_config): assert len(e.value.config) == 2 -@pytest.mark.prep( - os.getenv("CM_CLUSTER"), - os.getenv("CM_SERVICE"), - dict(autopurgeSnapRetainCount=7, tickTime=1111), -) -def test_unset_parameters(conn, module_args, prep_service_config): +@pytest.mark.prepare(service_config=dict(autopurgeSnapRetainCount=7, tickTime=1111)) +def test_unset_parameters(conn, module_args, target_service_config): conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), + cluster=target_service_config.cluster_ref.cluster_name, + service=target_service_config.name, parameters=dict(autopurgeSnapRetainCount=None), message="test_service_config::test_unset_parameters", ) @@ -399,15 +384,11 @@ def test_unset_parameters(conn, module_args, prep_service_config): assert len(e.value.config) == 1 -@pytest.mark.prep( - os.getenv("CM_CLUSTER"), - os.getenv("CM_SERVICE"), - dict(autopurgeSnapRetainCount=7, tickTime=1111), -) -def test_set_parameters_with_purge(conn, module_args, prep_service_config): +@pytest.mark.prepare(service_config=dict(autopurgeSnapRetainCount=7, tickTime=1111)) +def test_set_parameters_with_purge(conn, module_args, target_service_config): conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), + cluster=target_service_config.cluster_ref.cluster_name, + service=target_service_config.name, parameters=dict(autopurgeSnapRetainCount=9), purge=True, message="test_service_config::test_set_parameters_with_purge", @@ -435,15 +416,11 @@ def test_set_parameters_with_purge(conn, module_args, prep_service_config): assert len(e.value.config) == 1 -@pytest.mark.prep( - os.getenv("CM_CLUSTER"), - os.getenv("CM_SERVICE"), - dict(autopurgeSnapRetainCount=8, tickTime=2222), -) -def test_purge_all_parameters(conn, module_args, prep_service_config): +@pytest.mark.prepare(service_config=dict(autopurgeSnapRetainCount=8, tickTime=2222)) +def test_purge_all_parameters(conn, module_args, target_service_config): conn.update( - cluster=os.getenv("CM_CLUSTER"), - service=os.getenv("CM_SERVICE"), + cluster=target_service_config.cluster_ref.cluster_name, + service=target_service_config.name, parameters=dict(), purge=True, message="test_service_config::test_purge_all_parameters", From ec8e06ce722ba2c1ee0106fab1c346a8a6c8e22f Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 11 Dec 2024 13:19:07 -0500 Subject: [PATCH 06/13] Update module_args() for mutation tests Signed-off-by: Webster Mudge --- .../service_config/test_service_config.py | 72 ++++++++++--------- 1 file changed, 40 insertions(+), 32 deletions(-) diff --git a/tests/unit/plugins/modules/service_config/test_service_config.py b/tests/unit/plugins/modules/service_config/test_service_config.py index c3b5dc47..342863f0 100644 --- a/tests/unit/plugins/modules/service_config/test_service_config.py +++ b/tests/unit/plugins/modules/service_config/test_service_config.py @@ -326,15 +326,17 @@ def test_present_invalid_parameter(conn, module_args, target_service): @pytest.mark.prepare(service_config=dict(autopurgeSnapRetainCount=None, tickTime=1111)) def test_set_parameters(conn, module_args, target_service_config): - conn.update( - cluster=target_service_config.cluster_ref.cluster_name, - service=target_service_config.name, - parameters=dict(autopurgeSnapRetainCount=9), - message="test_service_config::test_set_parameters", - # _ansible_check_mode=True, - # _ansible_diff=True, + module_args( + { + **conn, + "cluster": target_service_config.cluster_ref.cluster_name, + "service": target_service_config.name, + "parameters": dict(autopurgeSnapRetainCount=9), + "message": "test_service_config::test_set_parameters", + # "_ansible_check_mode": True, + # "_ansible_diff": True, + } ) - module_args(conn) with pytest.raises(AnsibleExitJson) as e: service_config.main() @@ -358,13 +360,15 @@ def test_set_parameters(conn, module_args, target_service_config): @pytest.mark.prepare(service_config=dict(autopurgeSnapRetainCount=7, tickTime=1111)) def test_unset_parameters(conn, module_args, target_service_config): - conn.update( - cluster=target_service_config.cluster_ref.cluster_name, - service=target_service_config.name, - parameters=dict(autopurgeSnapRetainCount=None), - message="test_service_config::test_unset_parameters", + module_args( + { + **conn, + "cluster": target_service_config.cluster_ref.cluster_name, + "service": target_service_config.name, + "parameters": dict(autopurgeSnapRetainCount=None), + "message": "test_service_config::test_unset_parameters", + } ) - module_args(conn) with pytest.raises(AnsibleExitJson) as e: service_config.main() @@ -386,16 +390,18 @@ def test_unset_parameters(conn, module_args, target_service_config): @pytest.mark.prepare(service_config=dict(autopurgeSnapRetainCount=7, tickTime=1111)) def test_set_parameters_with_purge(conn, module_args, target_service_config): - conn.update( - cluster=target_service_config.cluster_ref.cluster_name, - service=target_service_config.name, - parameters=dict(autopurgeSnapRetainCount=9), - purge=True, - message="test_service_config::test_set_parameters_with_purge", - # _ansible_check_mode=True, - # _ansible_diff=True, + module_args( + { + **conn, + "cluster": target_service_config.cluster_ref.cluster_name, + "service": target_service_config.name, + "parameters": dict(autopurgeSnapRetainCount=9), + "purge": True, + "message": "test_service_config::test_set_parameters_with_purge", + # "_ansible_check_mode": True, + # "_ansible_diff": True, + } ) - module_args(conn) with pytest.raises(AnsibleExitJson) as e: service_config.main() @@ -418,16 +424,18 @@ def test_set_parameters_with_purge(conn, module_args, target_service_config): @pytest.mark.prepare(service_config=dict(autopurgeSnapRetainCount=8, tickTime=2222)) def test_purge_all_parameters(conn, module_args, target_service_config): - conn.update( - cluster=target_service_config.cluster_ref.cluster_name, - service=target_service_config.name, - parameters=dict(), - purge=True, - message="test_service_config::test_purge_all_parameters", - # _ansible_check_mode=True, - # _ansible_diff=True, + module_args( + { + **conn, + "cluster": target_service_config.cluster_ref.cluster_name, + "service": target_service_config.name, + "parameters": dict(), + "purge": True, + "message": "test_service_config::test_purge_all_parameters", + # "_ansible_check_mode": True, + # "_ansible_diff": True, + } ) - module_args(conn) with pytest.raises(AnsibleExitJson) as e: service_config.main() From 9293ff070e0bea2e619c3a1ed14e4d110f4e4844 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 11 Dec 2024 16:17:25 -0500 Subject: [PATCH 07/13] Update to create target cluster for all session tests Signed-off-by: Webster Mudge --- .../service_config/test_service_config.py | 120 ++++++++++-------- 1 file changed, 69 insertions(+), 51 deletions(-) diff --git a/tests/unit/plugins/modules/service_config/test_service_config.py b/tests/unit/plugins/modules/service_config/test_service_config.py index 342863f0..4505f3cc 100644 --- a/tests/unit/plugins/modules/service_config/test_service_config.py +++ b/tests/unit/plugins/modules/service_config/test_service_config.py @@ -21,32 +21,38 @@ import logging import os import pytest +import random +import string from pathlib import Path from time import sleep from cm_client import ( - ClustersResourceApi, - Configuration, ApiClient, ApiClusterList, ApiCluster, ApiCommand, ApiConfig, - ParcelResourceApi, - ApiHostRefList, ApiHostRef, - ApiParcel, - ApiParcelList, - ApiServiceList, + ApiHostRefList, ApiService, ApiServiceConfig, + ApiServiceList, + ClustersResourceApi, CommandsResourceApi, + Configuration, + HostsResourceApi, + ParcelResourceApi, + ParcelsResourceApi, ServicesResourceApi, ) from cm_client.rest import ApiException, RESTClientObject from ansible_collections.cloudera.cluster.plugins.modules import service_config +from ansible_collections.cloudera.cluster.plugins.module_utils.parcel_utils import ( + Parcel, +) + from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleExitJson, AnsibleFailJson, @@ -115,62 +121,71 @@ def cm_api_client(conn): return client -@pytest.fixture(scope="module") -def prep_cluster(cm_api_client, request): - """Create a 7.1.9 test cluster using the module name.""" +@pytest.fixture(scope="session") +def target_cluster(cm_api_client, request): + """Create a 7.1.9 test cluster.""" - marker = request.node.get_closest_marker("prep") - if marker is None: - raise Exception("Preparation marker not found.") - elif "version" not in marker.kwargs: - raise Exception("Cluster version parameter not found.") - elif "hosts" not in marker.kwargs: - raise Exception("Cluster hosts parameter not found.") - else: - version = marker.kwargs["version"] - hosts = marker.kwargs["hosts"] + name = ( + Path(request.fixturename).stem + + "_" + + "".join(random.choices(string.ascii_lowercase, k=6)) + ) + cdh_version = "7.1.9" cluster_api = ClustersResourceApi(cm_api_client) + parcels_api = ParcelsResourceApi(cm_api_client) parcel_api = ParcelResourceApi(cm_api_client) + host_api = HostsResourceApi(cm_api_client) try: + # TODO Query for the latest version available - is this possible? + + # Create the initial cluster config = ApiCluster( - name=request.node.name, - full_version=version, + name=name, + full_version=cdh_version, ) - # Create the cluster - clusters = cluster_api.create_clusters(body=ApiClusterList(items=[config])) - # Activate the parcel(s) - from ansible_collections.cloudera.cluster.plugins.module_utils.parcel_utils import ( - Parcel, - ) + cluster_api.create_clusters(body=ApiClusterList(items=[config])) + + # Get first free host and assign to the cluster + all_hosts = host_api.read_hosts() + host = next((h for h in all_hosts.items if not h.cluster_ref), None) + + if host is None: + # Roll back the cluster and then raise an error + cluster_api.delete_cluster(cluster_name=name) + raise Exception("No available hosts to allocate to new cluster") + else: + cluster_api.add_hosts( + cluster_name=name, + body=ApiHostRefList(items=[ApiHostRef(host_id=host.host_id)]), + ) + + # Find the first CDH parcel version and activate it + parcels = parcels_api.read_parcels(cluster_name=name) + cdh_parcel = next((p for p in parcels.items if p.product == "CDH")) parcel = Parcel( parcel_api=parcel_api, - product="CDH", - version=version, - cluster=request.node.name, + product=cdh_parcel.product, + version=cdh_parcel.version, + cluster=name, ) - cluster_api.add_hosts( - cluster_name=request.node.name, - body=ApiHostRefList(items=[ApiHostRef(hostname=h) for h in hosts]), - ) - yield clusters.items[0] - cluster_api.delete_cluster(cluster_name=request.node.name) - except ApiException as ae: - raise Exception(str(ae)) + parcel.activate() + # Reread and return the cluster + yield cluster_api.read_cluster(cluster_name=name) -@pytest.mark.skip -def test_wip_cluster(prep_cluster): - results = prep_cluster - print(results) + # Deprovision the cluster + cluster_api.delete_cluster(cluster_name=name) + except ApiException as ae: + raise Exception(str(ae)) def wait_for_command( - api_client: ApiClient, command: ApiCommand, polling: int = 10, delay: int = 5 + api_client: ApiClient, command: ApiCommand, polling: int = 120, delay: int = 5 ): poll_count = 0 while command.active: @@ -184,7 +199,7 @@ def wait_for_command( @pytest.fixture(scope="module") -def target_service(cm_api_client, request): +def target_service(cm_api_client, target_cluster, request): api = ServicesResourceApi(cm_api_client) cluster_api = ClustersResourceApi(cm_api_client) @@ -195,17 +210,20 @@ def target_service(cm_api_client, request): type="ZOOKEEPER", ) - api.create_services(cluster_name="TestOne", body=ApiServiceList(items=[service])) - cluster_api.auto_assign_roles(cluster_name="TestOne") + api.create_services( + cluster_name=target_cluster.name, body=ApiServiceList(items=[service]) + ) + cluster_api.auto_assign_roles(cluster_name=target_cluster.name) - # configure = cluster_api.auto_configure(cluster_name="TestOne") + # configure = cluster_api.auto_configure(cluster_name=target_cluster.name) wait_for_command( - cm_api_client, api.first_run(cluster_name="TestOne", service_name=name) + cm_api_client, + api.first_run(cluster_name=target_cluster.name, service_name=name), ) - yield api.read_service(cluster_name="TestOne", service_name=name) + yield api.read_service(cluster_name=target_cluster.name, service_name=name) - api.delete_service(cluster_name="TestOne", service_name=name) + api.delete_service(cluster_name=target_cluster.name, service_name=name) @pytest.fixture From 170eb1e371a17bbbb1f6ec33c6e13c1236f08988 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Wed, 11 Dec 2024 17:26:26 -0500 Subject: [PATCH 08/13] Refactor session fixtures to relocate for general test access Signed-off-by: Webster Mudge --- tests/unit/__init__.py | 24 +++ tests/unit/conftest.py | 168 +++++++++++++++--- .../service_config/test_service_config.py | 158 +--------------- 3 files changed, 174 insertions(+), 176 deletions(-) diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index 6a16e733..0913603e 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- +# # Copyright 2024 Cloudera, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,6 +14,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +from time import sleep + +from cm_client import ( + ApiClient, + ApiCommand, + CommandsResourceApi, +) + class AnsibleExitJson(Exception): """Exception class to be raised by module.exit_json and caught by the test case""" @@ -31,3 +41,17 @@ def __init__(self, kwargs): kwargs.get("msg", "General module failure") ) self.__dict__.update(kwargs) + + +def wait_for_command( + api_client: ApiClient, command: ApiCommand, polling: int = 120, delay: int = 5 +): + poll_count = 0 + while command.active: + if poll_count > polling: + raise Exception("CM command timeout") + sleep(delay) + poll_count += 1 + command = CommandsResourceApi(api_client).read_command(command.id) + if not command.success: + raise Exception(f"CM command [{command.id}] failed: {command.result_message}") diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 6829f307..e7b48475 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -19,18 +19,35 @@ __metaclass__ = type import json -import sys +import os import pytest +import random +import string +import sys import yaml +from pathlib import Path + +from cm_client import ( + ApiClient, + ApiClusterList, + ApiCluster, + ApiHostRef, + ApiHostRefList, + ClustersResourceApi, + Configuration, + HostsResourceApi, + ParcelResourceApi, + ParcelsResourceApi, +) +from cm_client.rest import ApiException, RESTClientObject + from ansible.module_utils import basic from ansible.module_utils.common.text.converters import to_bytes -# # Required for pytest discovery in VSCode, reasons unknown... -# try: -# from ansible.plugins.action import ActionBase -# except ModuleNotFoundError: -# pass +from ansible_collections.cloudera.cluster.plugins.module_utils.parcel_utils import ( + Parcel, +) from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleFailJson, @@ -86,21 +103,132 @@ def prep_args(args: str = ""): return prep_args -# class AnsibleExitJson(Exception): -# """Exception class to be raised by module.exit_json and caught by the test case""" +@pytest.fixture(scope="session") +def conn(): + conn = dict(username=os.getenv("CM_USERNAME"), password=os.getenv("CM_PASSWORD")) + + if os.getenv("CM_HOST", None): + conn.update(host=os.getenv("CM_HOST")) + + if os.getenv("CM_PORT", None): + conn.update(port=os.getenv("CM_PORT")) + + if os.getenv("CM_ENDPOINT", None): + conn.update(url=os.getenv("CM_ENDPOINT")) + + if os.getenv("CM_PROXY", None): + conn.update(proxy=os.getenv("CM_PROXY")) + + return { + **conn, + "verify_tls": "no", + "debug": "no", + } + + +@pytest.fixture(scope="session") +def cm_api_client(conn): + """Create a Cloudera Manager API client, resolving HTTP/S and version URL. + + Args: + conn (dict): Connection details + + Returns: + ApiClient: Cloudera Manager API client + """ + config = Configuration() + + config.username = conn["username"] + config.password = conn["password"] -# def __init__(self, kwargs): -# super(AnsibleExitJson, self).__init__( -# kwargs.get("msg", "General module success") -# ) -# self.__dict__.update(kwargs) + if "url" in conn: + config.host = str(conn["url"]).rstrip(" /") + else: + rest = RESTClientObject() + + # Handle redirects + url = rest.GET(conn["host"]).urllib3_response.geturl() + + # Get version + auth = config.auth_settings().get("basic") + version = rest.GET( + f"{url}api/version", headers={auth["key"]: auth["value"]} + ).data + + # Set host + config.host = f"{url}api/{version}" + + client = ApiClient() + client.user_agent = "pytest" + return client + + +@pytest.fixture(scope="session") +def target_cluster(cm_api_client, request): + """Create a test cluster.""" + + if os.getenv("CDH_VERSION", None): + cdh_version = os.getenv("CDH_VERSION") + else: + raise Exception("No CDH_VERSION found. Please set this environment variable.") + + name = ( + Path(request.fixturename).stem + + "_" + + "".join(random.choices(string.ascii_lowercase, k=6)) + ) + + cluster_api = ClustersResourceApi(cm_api_client) + parcels_api = ParcelsResourceApi(cm_api_client) + parcel_api = ParcelResourceApi(cm_api_client) + host_api = HostsResourceApi(cm_api_client) + + try: + # Create the initial cluster + config = ApiCluster( + name=name, + full_version=cdh_version, + ) + + cluster_api.create_clusters(body=ApiClusterList(items=[config])) + + # Get first free host and assign to the cluster + all_hosts = host_api.read_hosts() + host = next((h for h in all_hosts.items if not h.cluster_ref), None) + + if host is None: + # Roll back the cluster and then raise an error + cluster_api.delete_cluster(cluster_name=name) + raise Exception("No available hosts to allocate to new cluster") + else: + cluster_api.add_hosts( + cluster_name=name, + body=ApiHostRefList(items=[ApiHostRef(host_id=host.host_id)]), + ) + + # Find the first CDH parcel version and activate it + parcels = parcels_api.read_parcels(cluster_name=name) + cdh_parcel = next( + ( + p + for p in parcels.items + if p.product == "CDH" and p.version.startswith(cdh_version) + ) + ) + + parcel = Parcel( + parcel_api=parcel_api, + product=cdh_parcel.product, + version=cdh_parcel.version, + cluster=name, + ) + parcel.activate() -# class AnsibleFailJson(Exception): -# """Exception class to be raised by module.fail_json and caught by the test case""" + # Reread and return the cluster + yield cluster_api.read_cluster(cluster_name=name) -# def __init__(self, kwargs): -# super(AnsibleFailJson, self).__init__( -# kwargs.get("msg", "General module failure") -# ) -# self.__dict__.update(kwargs) + # Deprovision the cluster + cluster_api.delete_cluster(cluster_name=name) + except ApiException as ae: + raise Exception(str(ae)) diff --git a/tests/unit/plugins/modules/service_config/test_service_config.py b/tests/unit/plugins/modules/service_config/test_service_config.py index 4505f3cc..a8ab22f8 100644 --- a/tests/unit/plugins/modules/service_config/test_service_config.py +++ b/tests/unit/plugins/modules/service_config/test_service_config.py @@ -19,185 +19,31 @@ __metaclass__ = type import logging -import os import pytest -import random -import string from pathlib import Path -from time import sleep from cm_client import ( - ApiClient, - ApiClusterList, - ApiCluster, - ApiCommand, ApiConfig, - ApiHostRef, - ApiHostRefList, ApiService, ApiServiceConfig, ApiServiceList, ClustersResourceApi, - CommandsResourceApi, - Configuration, - HostsResourceApi, - ParcelResourceApi, - ParcelsResourceApi, ServicesResourceApi, ) -from cm_client.rest import ApiException, RESTClientObject +from cm_client.rest import ApiException from ansible_collections.cloudera.cluster.plugins.modules import service_config -from ansible_collections.cloudera.cluster.plugins.module_utils.parcel_utils import ( - Parcel, -) from ansible_collections.cloudera.cluster.tests.unit import ( AnsibleExitJson, AnsibleFailJson, + wait_for_command, ) LOG = logging.getLogger(__name__) -@pytest.fixture(scope="session") -def conn(): - conn = dict(username=os.getenv("CM_USERNAME"), password=os.getenv("CM_PASSWORD")) - - if os.getenv("CM_HOST", None): - conn.update(host=os.getenv("CM_HOST")) - - if os.getenv("CM_PORT", None): - conn.update(port=os.getenv("CM_PORT")) - - if os.getenv("CM_ENDPOINT", None): - conn.update(url=os.getenv("CM_ENDPOINT")) - - if os.getenv("CM_PROXY", None): - conn.update(proxy=os.getenv("CM_PROXY")) - - return { - **conn, - "verify_tls": "no", - "debug": "no", - } - - -@pytest.fixture(scope="session") -def cm_api_client(conn): - """Create a Cloudera Manager API client, resolving HTTP/S and version URL. - - Args: - conn (dict): Connection details - - Returns: - ApiClient: Cloudera Manager API client - """ - config = Configuration() - - config.username = conn["username"] - config.password = conn["password"] - - if "url" in conn: - config.host = str(conn["url"]).rstrip(" /") - else: - rest = RESTClientObject() - - # Handle redirects - url = rest.GET(conn["host"]).urllib3_response.geturl() - - # Get version - auth = config.auth_settings().get("basic") - version = rest.GET( - f"{url}api/version", headers={auth["key"]: auth["value"]} - ).data - - # Set host - config.host = f"{url}api/{version}" - - client = ApiClient() - client.user_agent = "pytest" - return client - - -@pytest.fixture(scope="session") -def target_cluster(cm_api_client, request): - """Create a 7.1.9 test cluster.""" - - name = ( - Path(request.fixturename).stem - + "_" - + "".join(random.choices(string.ascii_lowercase, k=6)) - ) - cdh_version = "7.1.9" - - cluster_api = ClustersResourceApi(cm_api_client) - parcels_api = ParcelsResourceApi(cm_api_client) - parcel_api = ParcelResourceApi(cm_api_client) - host_api = HostsResourceApi(cm_api_client) - - try: - # TODO Query for the latest version available - is this possible? - - # Create the initial cluster - config = ApiCluster( - name=name, - full_version=cdh_version, - ) - - cluster_api.create_clusters(body=ApiClusterList(items=[config])) - - # Get first free host and assign to the cluster - all_hosts = host_api.read_hosts() - host = next((h for h in all_hosts.items if not h.cluster_ref), None) - - if host is None: - # Roll back the cluster and then raise an error - cluster_api.delete_cluster(cluster_name=name) - raise Exception("No available hosts to allocate to new cluster") - else: - cluster_api.add_hosts( - cluster_name=name, - body=ApiHostRefList(items=[ApiHostRef(host_id=host.host_id)]), - ) - - # Find the first CDH parcel version and activate it - parcels = parcels_api.read_parcels(cluster_name=name) - cdh_parcel = next((p for p in parcels.items if p.product == "CDH")) - - parcel = Parcel( - parcel_api=parcel_api, - product=cdh_parcel.product, - version=cdh_parcel.version, - cluster=name, - ) - - parcel.activate() - - # Reread and return the cluster - yield cluster_api.read_cluster(cluster_name=name) - - # Deprovision the cluster - cluster_api.delete_cluster(cluster_name=name) - except ApiException as ae: - raise Exception(str(ae)) - - -def wait_for_command( - api_client: ApiClient, command: ApiCommand, polling: int = 120, delay: int = 5 -): - poll_count = 0 - while command.active: - if poll_count > polling: - raise Exception("CM command timeout") - sleep(delay) - poll_count += 1 - command = CommandsResourceApi(api_client).read_command(command.id) - if not command.success: - raise Exception(f"CM command [{command.id}] failed: {command.result_message}") - - @pytest.fixture(scope="module") def target_service(cm_api_client, target_cluster, request): api = ServicesResourceApi(cm_api_client) From a797c1e89a8b244b5dc5f2227ecea5922b29f3f3 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Thu, 12 Dec 2024 10:45:14 -0500 Subject: [PATCH 09/13] Fix cm_api_client fixture redirect resolution Signed-off-by: Webster Mudge --- tests/unit/conftest.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index e7b48475..51ab95e7 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -145,18 +145,23 @@ def cm_api_client(conn): config.host = str(conn["url"]).rstrip(" /") else: rest = RESTClientObject() + url = f"{conn['host']}:{conn['port']}" # Handle redirects - url = rest.GET(conn["host"]).urllib3_response.geturl() + redirect = rest.GET(url).urllib3_response.geturl() + if redirect != "/": + url = redirect + + url = url.rstrip(" /") # Get version auth = config.auth_settings().get("basic") version = rest.GET( - f"{url}api/version", headers={auth["key"]: auth["value"]} + f"{url}/api/version", headers={auth["key"]: auth["value"]} ).data # Set host - config.host = f"{url}api/{version}" + config.host = f"{url}/api/{version}" client = ApiClient() client.user_agent = "pytest" From 6cbb4db4d761fca35e516a294a4236cae1f5da4a Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Thu, 12 Dec 2024 14:32:09 -0500 Subject: [PATCH 10/13] Allow for existing cluster and service, reinstate existing service config Signed-off-by: Webster Mudge --- tests/unit/conftest.py | 114 +++++++++--------- .../service_config/test_service_config.py | 114 ++++++++++-------- 2 files changed, 126 insertions(+), 102 deletions(-) diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 51ab95e7..5a492335 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -127,7 +127,7 @@ def conn(): @pytest.fixture(scope="session") -def cm_api_client(conn): +def cm_api_client(conn) -> ApiClient: """Create a Cloudera Manager API client, resolving HTTP/S and version URL. Args: @@ -172,68 +172,74 @@ def cm_api_client(conn): def target_cluster(cm_api_client, request): """Create a test cluster.""" - if os.getenv("CDH_VERSION", None): - cdh_version = os.getenv("CDH_VERSION") - else: - raise Exception("No CDH_VERSION found. Please set this environment variable.") + cluster_api = ClustersResourceApi(cm_api_client) - name = ( - Path(request.fixturename).stem - + "_" - + "".join(random.choices(string.ascii_lowercase, k=6)) - ) + if os.getenv("CM_CLUSTER_NAME", None): + yield cluster_api.read_cluster(cluster_name=os.getenv("CM_CLUSTER_NAME")) + else: + if os.getenv("CDH_VERSION", None): + cdh_version = os.getenv("CDH_VERSION") + else: + raise Exception( + "No CDH_VERSION found. Please set this environment variable." + ) - cluster_api = ClustersResourceApi(cm_api_client) - parcels_api = ParcelsResourceApi(cm_api_client) - parcel_api = ParcelResourceApi(cm_api_client) - host_api = HostsResourceApi(cm_api_client) - - try: - # Create the initial cluster - config = ApiCluster( - name=name, - full_version=cdh_version, + name = ( + Path(request.fixturename).stem + + "_" + + "".join(random.choices(string.ascii_lowercase, k=6)) ) - cluster_api.create_clusters(body=ApiClusterList(items=[config])) - - # Get first free host and assign to the cluster - all_hosts = host_api.read_hosts() - host = next((h for h in all_hosts.items if not h.cluster_ref), None) + parcels_api = ParcelsResourceApi(cm_api_client) + parcel_api = ParcelResourceApi(cm_api_client) + host_api = HostsResourceApi(cm_api_client) - if host is None: - # Roll back the cluster and then raise an error - cluster_api.delete_cluster(cluster_name=name) - raise Exception("No available hosts to allocate to new cluster") - else: - cluster_api.add_hosts( - cluster_name=name, - body=ApiHostRefList(items=[ApiHostRef(host_id=host.host_id)]), + try: + # Create the initial cluster + config = ApiCluster( + name=name, + full_version=cdh_version, ) - # Find the first CDH parcel version and activate it - parcels = parcels_api.read_parcels(cluster_name=name) - cdh_parcel = next( - ( - p - for p in parcels.items - if p.product == "CDH" and p.version.startswith(cdh_version) + cluster_api.create_clusters(body=ApiClusterList(items=[config])) + + # Get first free host and assign to the cluster + all_hosts = host_api.read_hosts() + host = next((h for h in all_hosts.items if not h.cluster_ref), None) + + if host is None: + # Roll back the cluster and then raise an error + cluster_api.delete_cluster(cluster_name=name) + raise Exception("No available hosts to allocate to new cluster") + else: + cluster_api.add_hosts( + cluster_name=name, + body=ApiHostRefList(items=[ApiHostRef(host_id=host.host_id)]), + ) + + # Find the first CDH parcel version and activate it + parcels = parcels_api.read_parcels(cluster_name=name) + cdh_parcel = next( + ( + p + for p in parcels.items + if p.product == "CDH" and p.version.startswith(cdh_version) + ) ) - ) - parcel = Parcel( - parcel_api=parcel_api, - product=cdh_parcel.product, - version=cdh_parcel.version, - cluster=name, - ) + parcel = Parcel( + parcel_api=parcel_api, + product=cdh_parcel.product, + version=cdh_parcel.version, + cluster=name, + ) - parcel.activate() + parcel.activate() - # Reread and return the cluster - yield cluster_api.read_cluster(cluster_name=name) + # Reread and return the cluster + yield cluster_api.read_cluster(cluster_name=name) - # Deprovision the cluster - cluster_api.delete_cluster(cluster_name=name) - except ApiException as ae: - raise Exception(str(ae)) + # Deprovision the cluster + cluster_api.delete_cluster(cluster_name=name) + except ApiException as ae: + raise Exception(str(ae)) diff --git a/tests/unit/plugins/modules/service_config/test_service_config.py b/tests/unit/plugins/modules/service_config/test_service_config.py index a8ab22f8..ee2070fe 100644 --- a/tests/unit/plugins/modules/service_config/test_service_config.py +++ b/tests/unit/plugins/modules/service_config/test_service_config.py @@ -19,6 +19,7 @@ __metaclass__ = type import logging +import os import pytest from pathlib import Path @@ -47,29 +48,34 @@ @pytest.fixture(scope="module") def target_service(cm_api_client, target_cluster, request): api = ServicesResourceApi(cm_api_client) - cluster_api = ClustersResourceApi(cm_api_client) - name = Path(request.node.name).stem + "_zookeeper" + if os.getenv("CM_SERVICE_NAME", None): + yield api.read_service( + cluster_name=target_cluster.name, service_name=os.getenv("CM_SERVICE_NAME") + ) + else: + cluster_api = ClustersResourceApi(cm_api_client) + name = Path(request.node.name).stem + "_zookeeper" - service = ApiService( - name=name, - type="ZOOKEEPER", - ) + service = ApiService( + name=name, + type="ZOOKEEPER", + ) - api.create_services( - cluster_name=target_cluster.name, body=ApiServiceList(items=[service]) - ) - cluster_api.auto_assign_roles(cluster_name=target_cluster.name) + api.create_services( + cluster_name=target_cluster.name, body=ApiServiceList(items=[service]) + ) + cluster_api.auto_assign_roles(cluster_name=target_cluster.name) - # configure = cluster_api.auto_configure(cluster_name=target_cluster.name) - wait_for_command( - cm_api_client, - api.first_run(cluster_name=target_cluster.name, service_name=name), - ) + # configure = cluster_api.auto_configure(cluster_name=target_cluster.name) + wait_for_command( + cm_api_client, + api.first_run(cluster_name=target_cluster.name, service_name=name), + ) - yield api.read_service(cluster_name=target_cluster.name, service_name=name) + yield api.read_service(cluster_name=target_cluster.name, service_name=name) - api.delete_service(cluster_name=target_cluster.name, service_name=name) + api.delete_service(cluster_name=target_cluster.name, service_name=name) @pytest.fixture @@ -83,7 +89,13 @@ def target_service_config(cm_api_client, target_service, request): service_api = ServicesResourceApi(cm_api_client) - # Set the parameter(s) + # Retrieve all of the pre-setup configurations + pre = service_api.read_service_config( + cluster_name=target_service.cluster_ref.cluster_name, + service_name=target_service.name, + ) + + # Set the test configurations # Do so serially, since a failed update due to defaults (see ApiException) will cause remaining # configuration entries to not run. Long-term solution is to check-and-set, which is # what the Ansible modules do... @@ -102,18 +114,30 @@ def target_service_config(cm_api_client, target_service, request): # Return the targeted service and go run the test yield target_service - # Reset the parameter - for k, v in marker.kwargs["service_config"].items(): - try: - service_api.update_service_config( - cluster_name=target_service.cluster_ref.cluster_name, - service_name=target_service.name, - message=f"test_service_config::{request.node.name}::reset", - body=ApiServiceConfig(items=[ApiConfig(name=k, value=v)]), - ) - except ApiException as ae: - if ae.status != 400 or "delete with template" not in str(ae.body): - raise Exception(str(ae)) + # Retrieve all of the post-setup configurations + post = service_api.read_service_config( + cluster_name=target_service.cluster_ref.cluster_name, + service_name=target_service.name, + ) + + # Reconcile the configurations + pre_set = set([c.name for c in pre.items]) + + reconciled = pre.items.copy() + reconciled.extend( + [ + ApiConfig(name=k.name, value=None) + for k in post.items + if k.name not in pre_set + ] + ) + + service_api.update_service_config( + cluster_name=target_service.cluster_ref.cluster_name, + service_name=target_service.name, + message=f"test_service_config::{request.node.name}::reset", + body=ApiServiceConfig(items=reconciled), + ) def test_missing_required(conn, module_args): @@ -202,24 +226,20 @@ def test_set_parameters(conn, module_args, target_service_config): } ) + expected = dict(autopurgeSnapRetainCount="9", tickTime="1111") + with pytest.raises(AnsibleExitJson) as e: service_config.main() assert e.value.changed == True - assert {c["name"]: c["value"] for c in e.value.config}[ - "autopurgeSnapRetainCount" - ] == "9" - assert len(e.value.config) == 2 + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() # Idempotency with pytest.raises(AnsibleExitJson) as e: service_config.main() assert e.value.changed == False - assert {c["name"]: c["value"] for c in e.value.config}[ - "autopurgeSnapRetainCount" - ] == "9" - assert len(e.value.config) == 2 + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() @pytest.mark.prepare(service_config=dict(autopurgeSnapRetainCount=7, tickTime=1111)) @@ -234,13 +254,15 @@ def test_unset_parameters(conn, module_args, target_service_config): } ) + expected = dict(tickTime="1111") + with pytest.raises(AnsibleExitJson) as e: service_config.main() assert e.value.changed == True results = {c["name"]: c["value"] for c in e.value.config} assert "autopurgeSnapRetainCount" not in results - assert len(e.value.config) == 1 + assert expected.items() <= results.items() # Idempotency with pytest.raises(AnsibleExitJson) as e: @@ -249,7 +271,7 @@ def test_unset_parameters(conn, module_args, target_service_config): assert e.value.changed == False results = {c["name"]: c["value"] for c in e.value.config} assert "autopurgeSnapRetainCount" not in results - assert len(e.value.config) == 1 + assert expected.items() <= results.items() @pytest.mark.prepare(service_config=dict(autopurgeSnapRetainCount=7, tickTime=1111)) @@ -267,23 +289,19 @@ def test_set_parameters_with_purge(conn, module_args, target_service_config): } ) + expected = dict(autopurgeSnapRetainCount="9") + with pytest.raises(AnsibleExitJson) as e: service_config.main() assert e.value.changed == True - assert {c["name"]: c["value"] for c in e.value.config}[ - "autopurgeSnapRetainCount" - ] == "9" - assert len(e.value.config) == 1 + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() with pytest.raises(AnsibleExitJson) as e: service_config.main() assert e.value.changed == False - assert {c["name"]: c["value"] for c in e.value.config}[ - "autopurgeSnapRetainCount" - ] == "9" - assert len(e.value.config) == 1 + assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() @pytest.mark.prepare(service_config=dict(autopurgeSnapRetainCount=8, tickTime=2222)) From 0eff57a03eae57dbfab29be794555d06ba829d66 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Sat, 14 Dec 2024 10:44:33 -0500 Subject: [PATCH 11/13] Make utility functions for service and service_config for use with pytest fixtures Signed-off-by: Webster Mudge --- tests/unit/__init__.py | 124 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 124 insertions(+) diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index 0913603e..f593ed44 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -14,13 +14,22 @@ # See the License for the specific language governing permissions and # limitations under the License. +from collections.abc import Generator from time import sleep from cm_client import ( ApiClient, + ApiCluster, ApiCommand, + ApiConfig, + ApiService, + ApiServiceConfig, + ApiServiceList, + ClustersResourceApi, CommandsResourceApi, + ServicesResourceApi, ) +from cm_client.rest import ApiException class AnsibleExitJson(Exception): @@ -46,6 +55,8 @@ def __init__(self, kwargs): def wait_for_command( api_client: ApiClient, command: ApiCommand, polling: int = 120, delay: int = 5 ): + """Polls Cloudera Manager to wait for a Command to complete.""" + poll_count = 0 while command.active: if poll_count > polling: @@ -55,3 +66,116 @@ def wait_for_command( command = CommandsResourceApi(api_client).read_command(command.id) if not command.success: raise Exception(f"CM command [{command.id}] failed: {command.result_message}") + + +def provision_service( + api_client: ApiClient, cluster: ApiCluster, service_name: str, service_type: str +) -> Generator[ApiService]: + """Provisions a new cluster service as a generator. + Use with 'yield from' to delegate within a pytest fixture. + + Args: + api_client (ApiClient): _description_ + cluster (ApiCluster): _description_ + service_name (dict): _description_ + service_type (str): _description_ + + Raises: + Exception: _description_ + + Yields: + ApiService: _description_ + """ + + api = ServicesResourceApi(api_client) + cluster_api = ClustersResourceApi(api_client) + + service = ApiService( + name=service_name, + type=service_type, + ) + + api.create_services(cluster_name=cluster.name, body=ApiServiceList(items=[service])) + cluster_api.auto_assign_roles(cluster_name=cluster.name) + + # configure = cluster_api.auto_configure(cluster_name=target_cluster.name) + wait_for_command( + api_client, + api.first_run(cluster_name=cluster.name, service_name=service_name), + ) + + yield api.read_service(cluster_name=cluster.name, service_name=service_name) + + api.delete_service(cluster_name=cluster.name, service_name=service_name) + + +def service_wide_config( + api_client: ApiClient, service: ApiService, params: dict, message: str +) -> Generator[ApiService]: + """Update a service-wide configuration for a given service. Yields the + service, resetting the configuration to its prior state. Use with + 'yield from' within a pytest fixture. + + Args: + api_client (ApiClient): _description_ + service (ApiService): _description_ + params (dict): _description_ + message (str): _description_ + + Raises: + Exception: _description_ + + Yields: + ApiService: _description_ + """ + service_api = ServicesResourceApi(api_client) + + # Retrieve all of the pre-setup configurations + pre = service_api.read_service_config( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + ) + + # Set the test configurations + # Do so serially, since a failed update due to defaults (see ApiException) will cause remaining + # configuration entries to not run. Long-term solution is to check-and-set, which is + # what the Ansible modules do... + for k, v in params.items(): + try: + service_api.update_service_config( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + message=f"{message}::set", + body=ApiServiceConfig(items=[ApiConfig(name=k, value=v)]), + ) + except ApiException as ae: + if ae.status != 400 or "delete with template" not in str(ae.body): + raise Exception(str(ae)) + + # Yield the targeted service + yield service + + # Retrieve all of the post-setup configurations + post = service_api.read_service_config( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + ) + + # Reconcile the configurations + pre_set = set([c.name for c in pre.items]) + + reconciled = pre.items.copy() + reconciled.extend( + [ + ApiConfig(name=k.name, value=None) + for k in post.items + if k.name not in pre_set + ] + ) + + service_api.update_service_config( + cluster_name=service.cluster_ref.cluster_name, + service_name=service.name, + message=f"{message}::reset", + body=ApiServiceConfig(items=reconciled), + ) From b4747b8c68b9fa91882afb1d8fe96b428fe3f575 Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Sat, 14 Dec 2024 10:45:05 -0500 Subject: [PATCH 12/13] Update name for CDH Base cluster fixture Signed-off-by: Webster Mudge --- tests/unit/conftest.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 5a492335..3a2a103c 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -169,13 +169,13 @@ def cm_api_client(conn) -> ApiClient: @pytest.fixture(scope="session") -def target_cluster(cm_api_client, request): - """Create a test cluster.""" +def base_cluster(cm_api_client, request): + """Provision a CDH Base cluster.""" cluster_api = ClustersResourceApi(cm_api_client) - if os.getenv("CM_CLUSTER_NAME", None): - yield cluster_api.read_cluster(cluster_name=os.getenv("CM_CLUSTER_NAME")) + if os.getenv("CM_CLUSTER", None): + yield cluster_api.read_cluster(cluster_name=os.getenv("CM_CLUSTER")) else: if os.getenv("CDH_VERSION", None): cdh_version = os.getenv("CDH_VERSION") From 612159eb11a8c5b099301040629a79de55910abf Mon Sep 17 00:00:00 2001 From: Webster Mudge Date: Sat, 14 Dec 2024 10:46:38 -0500 Subject: [PATCH 13/13] Update ZK service and config fixtures to use utility functions Signed-off-by: Webster Mudge --- .../service_config/test_service_config.py | 87 ++++++++++++++----- 1 file changed, 64 insertions(+), 23 deletions(-) diff --git a/tests/unit/plugins/modules/service_config/test_service_config.py b/tests/unit/plugins/modules/service_config/test_service_config.py index ee2070fe..17c446c6 100644 --- a/tests/unit/plugins/modules/service_config/test_service_config.py +++ b/tests/unit/plugins/modules/service_config/test_service_config.py @@ -21,6 +21,8 @@ import logging import os import pytest +import random +import string from pathlib import Path @@ -40,18 +42,57 @@ AnsibleExitJson, AnsibleFailJson, wait_for_command, + provision_service, + service_wide_config, ) LOG = logging.getLogger(__name__) @pytest.fixture(scope="module") -def target_service(cm_api_client, target_cluster, request): +def zk_service(cm_api_client, base_cluster, request): + if os.getenv("CM_SERVICE_ZOOKEEPER", None): + api = ServicesResourceApi(cm_api_client) + yield api.read_service( + cluster_name=base_cluster.name, + service_name=os.getenv("CM_SERVICE_ZOOKEEPER"), + ) + else: + name = ( + Path(request.fixturename).stem + + "_" + + "".join(random.choices(string.ascii_lowercase, k=6)) + ) + yield from provision_service( + api_client=cm_api_client, + cluster=base_cluster, + service_name=name, + service_type="ZOOKEEPER", + ) + + +@pytest.fixture(scope="function") +def zk_service_config(cm_api_client, zk_service, request): + marker = request.node.get_closest_marker("service_config") + + if marker is None: + raise Exception("No service_config marker found.") + + yield from service_wide_config( + api_client=cm_api_client, + service=zk_service, + params=marker.args[0], + message=f"test_service_config::{request.node.name}", + ) + + +@pytest.fixture(scope="module") +def target_service(cm_api_client, base_cluster, request): api = ServicesResourceApi(cm_api_client) if os.getenv("CM_SERVICE_NAME", None): yield api.read_service( - cluster_name=target_cluster.name, service_name=os.getenv("CM_SERVICE_NAME") + cluster_name=base_cluster.name, service_name=os.getenv("CM_SERVICE_NAME") ) else: cluster_api = ClustersResourceApi(cm_api_client) @@ -63,19 +104,19 @@ def target_service(cm_api_client, target_cluster, request): ) api.create_services( - cluster_name=target_cluster.name, body=ApiServiceList(items=[service]) + cluster_name=base_cluster.name, body=ApiServiceList(items=[service]) ) - cluster_api.auto_assign_roles(cluster_name=target_cluster.name) + cluster_api.auto_assign_roles(cluster_name=base_cluster.name) # configure = cluster_api.auto_configure(cluster_name=target_cluster.name) wait_for_command( cm_api_client, - api.first_run(cluster_name=target_cluster.name, service_name=name), + api.first_run(cluster_name=base_cluster.name, service_name=name), ) - yield api.read_service(cluster_name=target_cluster.name, service_name=name) + yield api.read_service(cluster_name=base_cluster.name, service_name=name) - api.delete_service(cluster_name=target_cluster.name, service_name=name) + api.delete_service(cluster_name=base_cluster.name, service_name=name) @pytest.fixture @@ -212,13 +253,13 @@ def test_present_invalid_parameter(conn, module_args, target_service): service_config.main() -@pytest.mark.prepare(service_config=dict(autopurgeSnapRetainCount=None, tickTime=1111)) -def test_set_parameters(conn, module_args, target_service_config): +@pytest.mark.service_config(dict(autopurgeSnapRetainCount=None, tickTime=1111)) +def test_set_parameters(conn, module_args, zk_service_config): module_args( { **conn, - "cluster": target_service_config.cluster_ref.cluster_name, - "service": target_service_config.name, + "cluster": zk_service_config.cluster_ref.cluster_name, + "service": zk_service_config.name, "parameters": dict(autopurgeSnapRetainCount=9), "message": "test_service_config::test_set_parameters", # "_ansible_check_mode": True, @@ -242,13 +283,13 @@ def test_set_parameters(conn, module_args, target_service_config): assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() -@pytest.mark.prepare(service_config=dict(autopurgeSnapRetainCount=7, tickTime=1111)) -def test_unset_parameters(conn, module_args, target_service_config): +@pytest.mark.service_config(dict(autopurgeSnapRetainCount=7, tickTime=1111)) +def test_unset_parameters(conn, module_args, zk_service_config): module_args( { **conn, - "cluster": target_service_config.cluster_ref.cluster_name, - "service": target_service_config.name, + "cluster": zk_service_config.cluster_ref.cluster_name, + "service": zk_service_config.name, "parameters": dict(autopurgeSnapRetainCount=None), "message": "test_service_config::test_unset_parameters", } @@ -274,13 +315,13 @@ def test_unset_parameters(conn, module_args, target_service_config): assert expected.items() <= results.items() -@pytest.mark.prepare(service_config=dict(autopurgeSnapRetainCount=7, tickTime=1111)) -def test_set_parameters_with_purge(conn, module_args, target_service_config): +@pytest.mark.service_config(dict(autopurgeSnapRetainCount=7, tickTime=1111)) +def test_set_parameters_with_purge(conn, module_args, zk_service_config): module_args( { **conn, - "cluster": target_service_config.cluster_ref.cluster_name, - "service": target_service_config.name, + "cluster": zk_service_config.cluster_ref.cluster_name, + "service": zk_service_config.name, "parameters": dict(autopurgeSnapRetainCount=9), "purge": True, "message": "test_service_config::test_set_parameters_with_purge", @@ -304,13 +345,13 @@ def test_set_parameters_with_purge(conn, module_args, target_service_config): assert expected.items() <= {c["name"]: c["value"] for c in e.value.config}.items() -@pytest.mark.prepare(service_config=dict(autopurgeSnapRetainCount=8, tickTime=2222)) -def test_purge_all_parameters(conn, module_args, target_service_config): +@pytest.mark.service_config(dict(autopurgeSnapRetainCount=8, tickTime=2222)) +def test_purge_all_parameters(conn, module_args, zk_service_config): module_args( { **conn, - "cluster": target_service_config.cluster_ref.cluster_name, - "service": target_service_config.name, + "cluster": zk_service_config.cluster_ref.cluster_name, + "service": zk_service_config.name, "parameters": dict(), "purge": True, "message": "test_service_config::test_purge_all_parameters",