diff --git a/CHANGES.txt b/CHANGES.txt index e66834b4..58205457 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,3 +1,8 @@ +10.5.0 (Sep 15, 2025) +- Changed the log level from error to debug when renewing the token for Streaming service in asyncio mode. +- Added new configuration for Fallback Treatments, which allows setting a treatment value and optional config to be returned in place of "control", either globally or by flag. Read more in our docs. +- Deprecated config parameter `redisErrors` as it is removed in redis lib since 6.0.0 version (https://github.com/redis/redis-py/releases/tag/v6.0.0). + 10.4.0 (Aug 4, 2025) - Added a new optional argument to the client `getTreatment` methods to allow passing additional evaluation options, such as a map of properties to append to the generated impressions sent to Split backend. Read more in our docs. diff --git a/setup.py b/setup.py index 1e1928fc..e2b4c74a 100644 --- a/setup.py +++ b/setup.py @@ -45,7 +45,7 @@ tests_require=TESTS_REQUIRES, extras_require={ 'test': TESTS_REQUIRES, - 'redis': ['redis>=2.10.5'], + 'redis': ['redis>=2.10.5,<7.0.0'], 'uwsgi': ['uwsgi>=2.0.0'], 'cpphash': ['mmh3cffi==0.2.1'], 'asyncio': ['aiohttp>=3.8.4', 'aiofiles>=23.1.0'], diff --git a/splitio/client/client.py b/splitio/client/client.py index 257c9b97..9e1ddffc 100644 --- a/splitio/client/client.py +++ b/splitio/client/client.py @@ -2,6 +2,7 @@ import logging import json from collections import namedtuple +import copy from splitio.engine.evaluator import Evaluator, CONTROL, EvaluationDataFactory, AsyncEvaluationDataFactory from splitio.engine.splitters import Splitter @@ -39,7 +40,7 @@ class ClientBase(object): # pylint: disable=too-many-instance-attributes 'impressions_disabled': False } - def __init__(self, factory, recorder, labels_enabled=True): + def __init__(self, factory, recorder, labels_enabled=True, fallback_treatment_calculator=None): """ Construct a Client instance. @@ -61,9 +62,10 @@ def __init__(self, factory, recorder, labels_enabled=True): self._feature_flag_storage = factory._get_storage('splits') # pylint: disable=protected-access self._segment_storage = factory._get_storage('segments') # pylint: disable=protected-access self._events_storage = factory._get_storage('events') # pylint: disable=protected-access - self._evaluator = Evaluator(self._splitter) + self._evaluator = Evaluator(self._splitter, fallback_treatment_calculator) self._telemetry_evaluation_producer = self._factory._telemetry_evaluation_producer self._telemetry_init_producer = self._factory._telemetry_init_producer + self._fallback_treatment_calculator = fallback_treatment_calculator @property def ready(self): @@ -203,11 +205,26 @@ def _validate_track(self, key, traffic_type, event_type, value=None, properties= def _get_properties(self, evaluation_options): return evaluation_options.properties if evaluation_options != None else None + def _get_fallback_treatment_with_config(self, feature): + fallback_treatment = self._fallback_treatment_calculator.resolve(feature, "") + return fallback_treatment.treatment, fallback_treatment.config + + def _get_fallback_eval_results(self, eval_result, feature): + result = copy.deepcopy(eval_result) + fallback_treatment = self._fallback_treatment_calculator.resolve(feature, result["impression"]["label"]) + result["impression"]["label"] = fallback_treatment.label + result["treatment"] = fallback_treatment.treatment + result["configurations"] = fallback_treatment.config + + return result + def _check_impression_label(self, result): + return result['impression']['label'] == None or (result['impression']['label'] != None and result['impression']['label'].find(Label.SPLIT_NOT_FOUND) == -1) + class Client(ClientBase): # pylint: disable=too-many-instance-attributes """Entry point for the split sdk.""" - def __init__(self, factory, recorder, labels_enabled=True): + def __init__(self, factory, recorder, labels_enabled=True, fallback_treatment_calculator=None): """ Construct a Client instance. @@ -222,7 +239,7 @@ def __init__(self, factory, recorder, labels_enabled=True): :rtype: Client """ - ClientBase.__init__(self, factory, recorder, labels_enabled) + ClientBase.__init__(self, factory, recorder, labels_enabled, fallback_treatment_calculator) self._context_factory = EvaluationDataFactory(factory._get_storage('splits'), factory._get_storage('segments'), factory._get_storage('rule_based_segments')) def destroy(self): @@ -254,10 +271,11 @@ def get_treatment(self, key, feature_flag_name, attributes=None, evaluation_opti try: treatment, _ = self._get_treatment(MethodExceptionsAndLatencies.TREATMENT, key, feature_flag_name, attributes, evaluation_options) return treatment - + except: _LOGGER.error('get_treatment failed') - return CONTROL + treatment, _ = self._get_fallback_treatment_with_config(feature_flag_name) + return treatment def get_treatment_with_config(self, key, feature_flag_name, attributes=None, evaluation_options=None): """ @@ -282,8 +300,8 @@ def get_treatment_with_config(self, key, feature_flag_name, attributes=None, eva except Exception: _LOGGER.error('get_treatment_with_config failed') - return CONTROL, None - + return self._get_fallback_treatment_with_config(feature_flag_name) + def _get_treatment(self, method, key, feature, attributes=None, evaluation_options=None): """ Validate key, feature flag name and object, and get the treatment and config with an optional dictionary of attributes. @@ -302,7 +320,7 @@ def _get_treatment(self, method, key, feature, attributes=None, evaluation_optio :rtype: dict """ if not self._client_is_usable(): # not destroyed & not waiting for a fork - return CONTROL, None + return self._get_fallback_treatment_with_config(feature) start = get_current_epoch_time_ms() if not self.ready: @@ -312,9 +330,10 @@ def _get_treatment(self, method, key, feature, attributes=None, evaluation_optio try: key, bucketing, feature, attributes, evaluation_options = self._validate_treatment_input(key, feature, attributes, method, evaluation_options) except _InvalidInputError: - return CONTROL, None + return self._get_fallback_treatment_with_config(feature) - result = self._NON_READY_EVAL_RESULT + result = self._get_fallback_eval_results(self._NON_READY_EVAL_RESULT, feature) + if self.ready: try: ctx = self._context_factory.context_for(key, [feature]) @@ -324,15 +343,15 @@ def _get_treatment(self, method, key, feature, attributes=None, evaluation_optio _LOGGER.error('Error getting treatment for feature flag') _LOGGER.debug('Error: ', exc_info=True) self._telemetry_evaluation_producer.record_exception(method) - result = self._FAILED_EVAL_RESULT + result = self._get_fallback_eval_results(self._FAILED_EVAL_RESULT, feature) properties = self._get_properties(evaluation_options) - if result['impression']['label'] != Label.SPLIT_NOT_FOUND: + if self._check_impression_label(result): impression_decorated = self._build_impression(key, bucketing, feature, result, properties) self._record_stats([(impression_decorated, attributes)], start, method) return result['treatment'], result['configurations'] - + def get_treatments(self, key, feature_flag_names, attributes=None, evaluation_options=None): """ Evaluate multiple feature flags and return a dictionary with all the feature flag/treatments. @@ -356,7 +375,7 @@ def get_treatments(self, key, feature_flag_names, attributes=None, evaluation_op return {feature_flag: result[0] for (feature_flag, result) in with_config.items()} except Exception: - return {feature: CONTROL for feature in feature_flag_names} + return {feature: self._get_fallback_treatment_with_config(feature)[0] for feature in feature_flag_names} def get_treatments_with_config(self, key, feature_flag_names, attributes=None, evaluation_options=None): """ @@ -380,7 +399,7 @@ def get_treatments_with_config(self, key, feature_flag_names, attributes=None, e return self._get_treatments(key, feature_flag_names, MethodExceptionsAndLatencies.TREATMENTS_WITH_CONFIG, attributes, evaluation_options) except Exception: - return {feature: (CONTROL, None) for feature in feature_flag_names} + return {feature: (self._get_fallback_treatment_with_config(feature)) for feature in feature_flag_names} def get_treatments_by_flag_set(self, key, flag_set, attributes=None, evaluation_options=None): """ @@ -604,7 +623,7 @@ def _get_treatments(self, key, features, method, attributes=None, evaluation_opt """ start = get_current_epoch_time_ms() if not self._client_is_usable(): - return input_validator.generate_control_treatments(features) + return input_validator.generate_control_treatments(features, self._fallback_treatment_calculator) if not self.ready: _LOGGER.error("Client is not ready - no calls possible") @@ -613,9 +632,9 @@ def _get_treatments(self, key, features, method, attributes=None, evaluation_opt try: key, bucketing, features, attributes, evaluation_options = self._validate_treatments_input(key, features, attributes, method, evaluation_options) except _InvalidInputError: - return input_validator.generate_control_treatments(features) + return input_validator.generate_control_treatments(features, self._fallback_treatment_calculator) - results = {n: self._NON_READY_EVAL_RESULT for n in features} + results = {n: self._get_fallback_eval_results(self._NON_READY_EVAL_RESULT, n) for n in features} if self.ready: try: ctx = self._context_factory.context_for(key, features) @@ -625,12 +644,12 @@ def _get_treatments(self, key, features, method, attributes=None, evaluation_opt _LOGGER.error('Error getting treatment for feature flag') _LOGGER.debug('Error: ', exc_info=True) self._telemetry_evaluation_producer.record_exception(method) - results = {n: self._FAILED_EVAL_RESULT for n in features} + results = {n: self._get_fallback_eval_results(self._FAILED_EVAL_RESULT, n) for n in features} properties = self._get_properties(evaluation_options) imp_decorated_attrs = [ (i, attributes) for i in self._build_impressions(key, bucketing, results, properties) - if i.Impression.label != Label.SPLIT_NOT_FOUND + if i.Impression.label == None or (i.Impression.label != None and i.Impression.label.find(Label.SPLIT_NOT_FOUND)) == -1 ] self._record_stats(imp_decorated_attrs, start, method) @@ -706,7 +725,7 @@ def track(self, key, traffic_type, event_type, value=None, properties=None): class ClientAsync(ClientBase): # pylint: disable=too-many-instance-attributes """Entry point for the split sdk.""" - def __init__(self, factory, recorder, labels_enabled=True): + def __init__(self, factory, recorder, labels_enabled=True, fallback_treatment_calculator=None): """ Construct a Client instance. @@ -721,7 +740,7 @@ def __init__(self, factory, recorder, labels_enabled=True): :rtype: Client """ - ClientBase.__init__(self, factory, recorder, labels_enabled) + ClientBase.__init__(self, factory, recorder, labels_enabled, fallback_treatment_calculator) self._context_factory = AsyncEvaluationDataFactory(factory._get_storage('splits'), factory._get_storage('segments'), factory._get_storage('rule_based_segments')) async def destroy(self): @@ -756,7 +775,8 @@ async def get_treatment(self, key, feature_flag_name, attributes=None, evaluatio except: _LOGGER.error('get_treatment failed') - return CONTROL + treatment, _ = self._get_fallback_treatment_with_config(feature_flag_name) + return treatment async def get_treatment_with_config(self, key, feature_flag_name, attributes=None, evaluation_options=None): """ @@ -781,7 +801,7 @@ async def get_treatment_with_config(self, key, feature_flag_name, attributes=Non except Exception: _LOGGER.error('get_treatment_with_config failed') - return CONTROL, None + return self._get_fallback_treatment_with_config(feature_flag_name) async def _get_treatment(self, method, key, feature, attributes=None, evaluation_options=None): """ @@ -801,7 +821,7 @@ async def _get_treatment(self, method, key, feature, attributes=None, evaluation :rtype: dict """ if not self._client_is_usable(): # not destroyed & not waiting for a fork - return CONTROL, None + return self._get_fallback_treatment_with_config(feature) start = get_current_epoch_time_ms() if not self.ready: @@ -811,9 +831,9 @@ async def _get_treatment(self, method, key, feature, attributes=None, evaluation try: key, bucketing, feature, attributes, evaluation_options = self._validate_treatment_input(key, feature, attributes, method, evaluation_options) except _InvalidInputError: - return CONTROL, None + return self._get_fallback_treatment_with_config(feature) - result = self._NON_READY_EVAL_RESULT + result = self._get_fallback_eval_results(self._NON_READY_EVAL_RESULT, feature) if self.ready: try: ctx = await self._context_factory.context_for(key, [feature]) @@ -823,10 +843,10 @@ async def _get_treatment(self, method, key, feature, attributes=None, evaluation _LOGGER.error('Error getting treatment for feature flag') _LOGGER.debug('Error: ', exc_info=True) await self._telemetry_evaluation_producer.record_exception(method) - result = self._FAILED_EVAL_RESULT + result = self._get_fallback_eval_results(self._FAILED_EVAL_RESULT, feature) properties = self._get_properties(evaluation_options) - if result['impression']['label'] != Label.SPLIT_NOT_FOUND: + if self._check_impression_label(result): impression_decorated = self._build_impression(key, bucketing, feature, result, properties) await self._record_stats([(impression_decorated, attributes)], start, method) return result['treatment'], result['configurations'] @@ -854,7 +874,7 @@ async def get_treatments(self, key, feature_flag_names, attributes=None, evaluat return {feature_flag: result[0] for (feature_flag, result) in with_config.items()} except Exception: - return {feature: CONTROL for feature in feature_flag_names} + return {feature: self._get_fallback_treatment_with_config(feature)[0] for feature in feature_flag_names} async def get_treatments_with_config(self, key, feature_flag_names, attributes=None, evaluation_options=None): """ @@ -878,8 +898,7 @@ async def get_treatments_with_config(self, key, feature_flag_names, attributes=N return await self._get_treatments(key, feature_flag_names, MethodExceptionsAndLatencies.TREATMENTS_WITH_CONFIG, attributes, evaluation_options) except Exception: - _LOGGER.error("AA", exc_info=True) - return {feature: (CONTROL, None) for feature in feature_flag_names} + return {feature: (self._get_fallback_treatment_with_config(feature)) for feature in feature_flag_names} async def get_treatments_by_flag_set(self, key, flag_set, attributes=None, evaluation_options=None): """ @@ -1017,7 +1036,7 @@ async def _get_treatments(self, key, features, method, attributes=None, evaluati """ start = get_current_epoch_time_ms() if not self._client_is_usable(): - return input_validator.generate_control_treatments(features) + return input_validator.generate_control_treatments(features, self._fallback_treatment_calculator) if not self.ready: _LOGGER.error("Client is not ready - no calls possible") @@ -1026,9 +1045,9 @@ async def _get_treatments(self, key, features, method, attributes=None, evaluati try: key, bucketing, features, attributes, evaluation_options = self._validate_treatments_input(key, features, attributes, method, evaluation_options) except _InvalidInputError: - return input_validator.generate_control_treatments(features) + return input_validator.generate_control_treatments(features, self._fallback_treatment_calculator) - results = {n: self._NON_READY_EVAL_RESULT for n in features} + results = {n: self._get_fallback_eval_results(self._NON_READY_EVAL_RESULT, n) for n in features} if self.ready: try: ctx = await self._context_factory.context_for(key, features) @@ -1038,12 +1057,12 @@ async def _get_treatments(self, key, features, method, attributes=None, evaluati _LOGGER.error('Error getting treatment for feature flag') _LOGGER.debug('Error: ', exc_info=True) await self._telemetry_evaluation_producer.record_exception(method) - results = {n: self._FAILED_EVAL_RESULT for n in features} + results = {n: self._get_fallback_eval_results(self._FAILED_EVAL_RESULT, n) for n in features} properties = self._get_properties(evaluation_options) imp_decorated_attrs = [ (i, attributes) for i in self._build_impressions(key, bucketing, results, properties) - if i.Impression.label != Label.SPLIT_NOT_FOUND + if i.Impression.label == None or (i.Impression.label != None and i.Impression.label.find(Label.SPLIT_NOT_FOUND)) == -1 ] await self._record_stats(imp_decorated_attrs, start, method) diff --git a/splitio/client/config.py b/splitio/client/config.py index 78d08b45..25b1bc31 100644 --- a/splitio/client/config.py +++ b/splitio/client/config.py @@ -4,7 +4,8 @@ from enum import Enum from splitio.engine.impressions import ImpressionsMode -from splitio.client.input_validator import validate_flag_sets +from splitio.client.input_validator import validate_flag_sets, validate_fallback_treatment, validate_regex_name +from splitio.models.fallback_config import FallbackTreatmentsConfiguration _LOGGER = logging.getLogger(__name__) DEFAULT_DATA_SAMPLING = 1 @@ -69,7 +70,8 @@ class AuthenticateScheme(Enum): 'flagSetsFilter': None, 'httpAuthenticateScheme': AuthenticateScheme.NONE, 'kerberosPrincipalUser': None, - 'kerberosPrincipalPassword': None + 'kerberosPrincipalPassword': None, + 'fallbackTreatments': FallbackTreatmentsConfiguration(None) } def _parse_operation_mode(sdk_key, config): @@ -168,4 +170,38 @@ def sanitize(sdk_key, config): ' Defaulting to `none` mode.') processed["httpAuthenticateScheme"] = authenticate_scheme + processed = _sanitize_fallback_config(config, processed) + + if config.get("redisErrors") is not None: + _LOGGER.warning('Parameter `redisErrors` is deprecated as it is no longer supported in redis lib.' \ + ' Will ignore this value.') + + processed["redisErrors"] = None return processed + +def _sanitize_fallback_config(config, processed): + if config.get('fallbackTreatments') is None: + return processed + + if not isinstance(config['fallbackTreatments'], FallbackTreatmentsConfiguration): + _LOGGER.warning('Config: fallbackTreatments parameter should be of `FallbackTreatmentsConfiguration` class.') + processed['fallbackTreatments'] = None + return processed + + sanitized_global_fallback_treatment = config['fallbackTreatments'].global_fallback_treatment + if config['fallbackTreatments'].global_fallback_treatment is not None and not validate_fallback_treatment(config['fallbackTreatments'].global_fallback_treatment): + _LOGGER.warning('Config: global fallbacktreatment parameter is discarded.') + sanitized_global_fallback_treatment = None + + sanitized_flag_fallback_treatments = {} + if config['fallbackTreatments'].by_flag_fallback_treatment is not None: + for feature_name in config['fallbackTreatments'].by_flag_fallback_treatment.keys(): + if not validate_regex_name(feature_name) or not validate_fallback_treatment(config['fallbackTreatments'].by_flag_fallback_treatment[feature_name]): + _LOGGER.warning('Config: fallback treatment parameter for feature flag %s is discarded.', feature_name) + continue + + sanitized_flag_fallback_treatments[feature_name] = config['fallbackTreatments'].by_flag_fallback_treatment[feature_name] + + processed['fallbackTreatments'] = FallbackTreatmentsConfiguration(sanitized_global_fallback_treatment, sanitized_flag_fallback_treatments) + + return processed \ No newline at end of file diff --git a/splitio/client/factory.py b/splitio/client/factory.py index f6070243..6c7ce990 100644 --- a/splitio/client/factory.py +++ b/splitio/client/factory.py @@ -18,7 +18,7 @@ TelemetryStorageProducerAsync, TelemetryStorageConsumerAsync from splitio.engine.impressions.manager import Counter as ImpressionsCounter from splitio.engine.impressions.unique_keys_tracker import UniqueKeysTracker, UniqueKeysTrackerAsync - +from splitio.models.fallback_config import FallbackTreatmentCalculator # Storage from splitio.storage.inmemmory import InMemorySplitStorage, InMemorySegmentStorage, \ InMemoryImpressionStorage, InMemoryEventStorage, InMemoryTelemetryStorage, LocalhostTelemetryStorage, \ @@ -170,7 +170,8 @@ def __init__( # pylint: disable=too-many-arguments telemetry_producer=None, telemetry_init_producer=None, telemetry_submitter=None, - preforked_initialization=False + preforked_initialization=False, + fallback_treatment_calculator=None ): """ Class constructor. @@ -201,6 +202,7 @@ def __init__( # pylint: disable=too-many-arguments self._ready_time = get_current_epoch_time_ms() _LOGGER.debug("Running in threading mode") self._sdk_internal_ready_flag = sdk_ready_flag + self._fallback_treatment_calculator = fallback_treatment_calculator self._start_status_updater() def _start_status_updater(self): @@ -242,7 +244,7 @@ def client(self): This client is only a set of references to structures hold by the factory. Creating one a fast operation and safe to be used anywhere. """ - return Client(self, self._recorder, self._labels_enabled) + return Client(self, self._recorder, self._labels_enabled, self._fallback_treatment_calculator) def manager(self): """ @@ -338,7 +340,8 @@ def __init__( # pylint: disable=too-many-arguments telemetry_init_producer=None, telemetry_submitter=None, manager_start_task=None, - api_client=None + api_client=None, + fallback_treatment_calculator=None ): """ Class constructor. @@ -372,6 +375,7 @@ def __init__( # pylint: disable=too-many-arguments self._sdk_ready_flag = asyncio.Event() self._ready_task = asyncio.get_running_loop().create_task(self._update_status_when_ready_async()) self._api_client = api_client + self._fallback_treatment_calculator = fallback_treatment_calculator async def _update_status_when_ready_async(self): """Wait until the sdk is ready and update the status for async mode.""" @@ -460,7 +464,7 @@ def client(self): This client is only a set of references to structures hold by the factory. Creating one a fast operation and safe to be used anywhere. """ - return ClientAsync(self, self._recorder, self._labels_enabled) + return ClientAsync(self, self._recorder, self._labels_enabled, self._fallback_treatment_calculator) def _wrap_impression_listener(listener, metadata): """ @@ -623,7 +627,8 @@ def _build_in_memory_factory(api_key, cfg, sdk_url=None, events_url=None, # pyl synchronizer._split_synchronizers._segment_sync.shutdown() return SplitFactory(api_key, storages, cfg['labelsEnabled'], - recorder, manager, None, telemetry_producer, telemetry_init_producer, telemetry_submitter, preforked_initialization=preforked_initialization) + recorder, manager, None, telemetry_producer, telemetry_init_producer, telemetry_submitter, preforked_initialization=preforked_initialization, + fallback_treatment_calculator=FallbackTreatmentCalculator(cfg['fallbackTreatments'])) initialization_thread = threading.Thread(target=manager.start, name="SDKInitializer", daemon=True) initialization_thread.start() @@ -631,7 +636,7 @@ def _build_in_memory_factory(api_key, cfg, sdk_url=None, events_url=None, # pyl return SplitFactory(api_key, storages, cfg['labelsEnabled'], recorder, manager, sdk_ready_flag, telemetry_producer, telemetry_init_producer, - telemetry_submitter) + telemetry_submitter, fallback_treatment_calculator = FallbackTreatmentCalculator(cfg['fallbackTreatments'])) async def _build_in_memory_factory_async(api_key, cfg, sdk_url=None, events_url=None, # pylint:disable=too-many-arguments,too-many-localsa auth_api_base_url=None, streaming_api_base_url=None, telemetry_api_base_url=None, @@ -750,7 +755,7 @@ async def _build_in_memory_factory_async(api_key, cfg, sdk_url=None, events_url= recorder, manager, telemetry_producer, telemetry_init_producer, telemetry_submitter, manager_start_task=manager_start_task, - api_client=http_client) + api_client=http_client, fallback_treatment_calculator=FallbackTreatmentCalculator(cfg['fallbackTreatments'])) def _build_redis_factory(api_key, cfg): """Build and return a split factory with redis-based storage.""" @@ -828,7 +833,8 @@ def _build_redis_factory(api_key, cfg): manager, sdk_ready_flag=None, telemetry_producer=telemetry_producer, - telemetry_init_producer=telemetry_init_producer + telemetry_init_producer=telemetry_init_producer, + fallback_treatment_calculator=FallbackTreatmentCalculator(cfg['fallbackTreatments']) ) redundant_factory_count, active_factory_count = _get_active_and_redundant_count() storages['telemetry'].record_active_and_redundant_factories(active_factory_count, redundant_factory_count) @@ -910,7 +916,8 @@ async def _build_redis_factory_async(api_key, cfg): manager, telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_init_producer, - telemetry_submitter=telemetry_submitter + telemetry_submitter=telemetry_submitter, + fallback_treatment_calculator=FallbackTreatmentCalculator(cfg['fallbackTreatments']) ) redundant_factory_count, active_factory_count = _get_active_and_redundant_count() await storages['telemetry'].record_active_and_redundant_factories(active_factory_count, redundant_factory_count) @@ -992,7 +999,8 @@ def _build_pluggable_factory(api_key, cfg): manager, sdk_ready_flag=None, telemetry_producer=telemetry_producer, - telemetry_init_producer=telemetry_init_producer + telemetry_init_producer=telemetry_init_producer, + fallback_treatment_calculator=FallbackTreatmentCalculator(cfg['fallbackTreatments']) ) redundant_factory_count, active_factory_count = _get_active_and_redundant_count() storages['telemetry'].record_active_and_redundant_factories(active_factory_count, redundant_factory_count) @@ -1072,7 +1080,8 @@ async def _build_pluggable_factory_async(api_key, cfg): manager, telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_init_producer, - telemetry_submitter=telemetry_submitter + telemetry_submitter=telemetry_submitter, + fallback_treatment_calculator=FallbackTreatmentCalculator(cfg['fallbackTreatments']) ) redundant_factory_count, active_factory_count = _get_active_and_redundant_count() await storages['telemetry'].record_active_and_redundant_factories(active_factory_count, redundant_factory_count) @@ -1150,6 +1159,7 @@ def _build_localhost_factory(cfg): telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_producer.get_telemetry_init_producer(), telemetry_submitter=LocalhostTelemetrySubmitter(), + fallback_treatment_calculator=FallbackTreatmentCalculator(cfg['fallbackTreatments']) ) async def _build_localhost_factory_async(cfg): @@ -1220,7 +1230,8 @@ async def _build_localhost_factory_async(cfg): telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_producer.get_telemetry_init_producer(), telemetry_submitter=LocalhostTelemetrySubmitterAsync(), - manager_start_task=manager_start_task + manager_start_task=manager_start_task, + fallback_treatment_calculator=FallbackTreatmentCalculator(cfg['fallbackTreatments']) ) def get_factory(api_key, **kwargs): diff --git a/splitio/client/input_validator.py b/splitio/client/input_validator.py index 4a2fb8bc..dfded942 100644 --- a/splitio/client/input_validator.py +++ b/splitio/client/input_validator.py @@ -8,6 +8,7 @@ from splitio.client.key import Key from splitio.client import client from splitio.engine.evaluator import CONTROL +from splitio.models.fallback_treatment import FallbackTreatment _LOGGER = logging.getLogger(__name__) @@ -15,7 +16,8 @@ EVENT_TYPE_PATTERN = r'^[a-zA-Z0-9][-_.:a-zA-Z0-9]{0,79}$' MAX_PROPERTIES_LENGTH_BYTES = 32768 _FLAG_SETS_REGEX = '^[a-z0-9][_a-z0-9]{0,49}$' - +_FALLBACK_TREATMENT_REGEX = '^[0-9]+[.a-zA-Z0-9_-]*$|^[a-zA-Z]+[a-zA-Z0-9_-]*$' +_FALLBACK_TREATMENT_SIZE = 100 def _check_not_null(value, name, operation): """ @@ -500,7 +502,7 @@ def validate_feature_flags_get_treatments( # pylint: disable=invalid-name valid_feature_flags.append(ff) return valid_feature_flags -def generate_control_treatments(feature_flags): +def generate_control_treatments(feature_flags, fallback_treatment_calculator): """ Generate valid feature flags to control. @@ -515,7 +517,11 @@ def generate_control_treatments(feature_flags): to_return = {} for feature_flag in feature_flags: if isinstance(feature_flag, str) and len(feature_flag.strip())> 0: - to_return[feature_flag] = (CONTROL, None) + fallback_treatment = fallback_treatment_calculator.resolve(feature_flag, "") + treatment = fallback_treatment.treatment + config = fallback_treatment.config + + to_return[feature_flag] = (treatment, config) return to_return @@ -712,3 +718,28 @@ def validate_flag_sets(flag_sets, method_name): sanitized_flag_sets.add(flag_set) return list(sanitized_flag_sets) + +def validate_fallback_treatment(fallback_treatment): + if not isinstance(fallback_treatment, FallbackTreatment): + _LOGGER.warning("Config: Fallback treatment instance should be FallbackTreatment, input is discarded") + return False + + if not isinstance(fallback_treatment.treatment, str): + _LOGGER.warning("Config: Fallback treatment value should be str type, input is discarded") + return False + + if not validate_regex_name(fallback_treatment.treatment): + _LOGGER.warning("Config: Fallback treatment should match regex %s", _FALLBACK_TREATMENT_REGEX) + return False + + if len(fallback_treatment.treatment) > _FALLBACK_TREATMENT_SIZE: + _LOGGER.warning("Config: Fallback treatment size should not exceed %s characters", _FALLBACK_TREATMENT_SIZE) + return False + + return True + +def validate_regex_name(name): + if re.match(_FALLBACK_TREATMENT_REGEX, name) == None: + return False + + return True \ No newline at end of file diff --git a/splitio/client/util.py b/splitio/client/util.py index e4892512..b5b693cb 100644 --- a/splitio/client/util.py +++ b/splitio/client/util.py @@ -50,4 +50,4 @@ def get_metadata(config): """ version = 'python-%s' % __version__ ip_address, hostname = _get_hostname_and_ip(config) - return SdkMetadata(version, hostname, ip_address) + return SdkMetadata(version, hostname, ip_address) \ No newline at end of file diff --git a/splitio/engine/evaluator.py b/splitio/engine/evaluator.py index 26875a68..b47db5c5 100644 --- a/splitio/engine/evaluator.py +++ b/splitio/engine/evaluator.py @@ -20,7 +20,7 @@ class Evaluator(object): # pylint: disable=too-few-public-methods """Split Evaluator class.""" - def __init__(self, splitter): + def __init__(self, splitter, fallback_treatment_calculator=None): """ Construct a Evaluator instance. @@ -28,6 +28,7 @@ def __init__(self, splitter): :type splitter: splitio.engine.splitters.Splitters """ self._splitter = splitter + self._fallback_treatment_calculator = fallback_treatment_calculator def eval_many_with_context(self, key, bucketing, features, attrs, ctx): """ @@ -51,6 +52,10 @@ def eval_with_context(self, key, bucketing, feature_name, attrs, ctx): if not feature: _LOGGER.warning('Unknown or invalid feature: %s', feature) label = Label.SPLIT_NOT_FOUND + fallback_treatment = self._fallback_treatment_calculator.resolve(feature_name, label) + label = fallback_treatment.label + _treatment = fallback_treatment.treatment + config = fallback_treatment.config else: _change_number = feature.change_number if feature.killed: @@ -59,17 +64,18 @@ def eval_with_context(self, key, bucketing, feature_name, attrs, ctx): else: label, _treatment = self._check_prerequisites(feature, bucketing, key, attrs, ctx, label, _treatment) label, _treatment = self._get_treatment(feature, bucketing, key, attrs, ctx, label, _treatment) + config = feature.get_configurations_for(_treatment) return { 'treatment': _treatment, - 'configurations': feature.get_configurations_for(_treatment) if feature else None, + 'configurations': config, 'impression': { 'label': label, 'change_number': _change_number }, 'impressions_disabled': feature.impressions_disabled if feature else None } - + def _get_treatment(self, feature, bucketing, key, attrs, ctx, label, _treatment): if _treatment == CONTROL: treatment, label = self._treatment_for_flag(feature, key, bucketing, attrs, ctx) diff --git a/splitio/models/fallback_config.py b/splitio/models/fallback_config.py new file mode 100644 index 00000000..aba7ad7b --- /dev/null +++ b/splitio/models/fallback_config.py @@ -0,0 +1,81 @@ +"""Segment module.""" +from splitio.models.fallback_treatment import FallbackTreatment +from splitio.client.client import CONTROL + +class FallbackTreatmentsConfiguration(object): + """FallbackTreatmentsConfiguration object class.""" + + def __init__(self, global_fallback_treatment=None, by_flag_fallback_treatment=None): + """ + Class constructor. + + :param global_fallback_treatment: global FallbackTreatment. + :type global_fallback_treatment: FallbackTreatment + + :param by_flag_fallback_treatment: Dict of flags and their fallback treatment + :type by_flag_fallback_treatment: {str: FallbackTreatment} + """ + self._global_fallback_treatment = global_fallback_treatment + self._by_flag_fallback_treatment = by_flag_fallback_treatment + + @property + def global_fallback_treatment(self): + """Return global fallback treatment.""" + return self._global_fallback_treatment + + @global_fallback_treatment.setter + def global_fallback_treatment(self, new_value): + """Set global fallback treatment.""" + self._global_fallback_treatment = new_value + + @property + def by_flag_fallback_treatment(self): + """Return by flag fallback treatment.""" + return self._by_flag_fallback_treatment + + @by_flag_fallback_treatment.setter + def by_flag_fallback_treatment(self, new_value): + """Set global fallback treatment.""" + self.by_flag_fallback_treatment = new_value + +class FallbackTreatmentCalculator(object): + """FallbackTreatmentCalculator object class.""" + + def __init__(self, fallback_treatment_configuration): + """ + Class constructor. + + :param fallback_treatment_configuration: fallback treatment configuration + :type fallback_treatment_configuration: FallbackTreatmentsConfiguration + """ + self._label_prefix = "fallback - " + self._fallback_treatments_configuration = fallback_treatment_configuration + + @property + def fallback_treatments_configuration(self): + """Return fallback treatment configuration.""" + return self._fallback_treatments_configuration + + def resolve(self, flag_name, label): + if self._fallback_treatments_configuration != None: + if self._fallback_treatments_configuration.by_flag_fallback_treatment != None \ + and self._fallback_treatments_configuration.by_flag_fallback_treatment.get(flag_name) != None: + return self._copy_with_label(self._fallback_treatments_configuration.by_flag_fallback_treatment.get(flag_name), \ + self._resolve_label(label)) + + if self._fallback_treatments_configuration.global_fallback_treatment != None: + return self._copy_with_label(self._fallback_treatments_configuration.global_fallback_treatment, \ + self._resolve_label(label)) + + return FallbackTreatment(CONTROL, None, label) + + def _resolve_label(self, label): + if label == None: + return None + + return self._label_prefix + label + + def _copy_with_label(self, fallback_treatment, label): + return FallbackTreatment(fallback_treatment.treatment, fallback_treatment.config, label) + + \ No newline at end of file diff --git a/splitio/models/fallback_treatment.py b/splitio/models/fallback_treatment.py new file mode 100644 index 00000000..794cbb63 --- /dev/null +++ b/splitio/models/fallback_treatment.py @@ -0,0 +1,34 @@ +"""Segment module.""" +import json + +class FallbackTreatment(object): + """FallbackTreatment object class.""" + + def __init__(self, treatment, config=None, label=None): + """ + Class constructor. + + :param treatment: treatment. + :type treatment: str + + :param config: config. + :type config: json + """ + self._treatment = treatment + self._config = config + self._label = label + + @property + def treatment(self): + """Return treatment.""" + return self._treatment + + @property + def config(self): + """Return config.""" + return self._config + + @property + def label(self): + """Return label prefix.""" + return self._label \ No newline at end of file diff --git a/splitio/push/splitsse.py b/splitio/push/splitsse.py index 63e24b40..788648d4 100644 --- a/splitio/push/splitsse.py +++ b/splitio/push/splitsse.py @@ -247,7 +247,7 @@ async def stop(self): try: await self._event_source_ended.wait() except asyncio.CancelledError as e: - _LOGGER.error("Exception waiting for event source ended") + _LOGGER.debug("Exception waiting for event source ended") _LOGGER.debug('stack trace: ', exc_info=True) pass diff --git a/splitio/push/sse.py b/splitio/push/sse.py index 84d73224..8cde7f98 100644 --- a/splitio/push/sse.py +++ b/splitio/push/sse.py @@ -205,7 +205,7 @@ async def shutdown(self): try: await self._done.wait() except asyncio.CancelledError: - _LOGGER.error("Exception waiting for SSE connection to end") + _LOGGER.debug("Exception waiting for SSE connection to end") _LOGGER.debug('stack trace: ', exc_info=True) pass diff --git a/splitio/storage/adapters/redis.py b/splitio/storage/adapters/redis.py index 78d88487..92aa2544 100644 --- a/splitio/storage/adapters/redis.py +++ b/splitio/storage/adapters/redis.py @@ -715,7 +715,6 @@ def _build_default_client(config): # pylint: disable=too-many-locals unix_socket_path = config.get('redisUnixSocketPath', None) encoding = config.get('redisEncoding', 'utf-8') encoding_errors = config.get('redisEncodingErrors', 'strict') - errors = config.get('redisErrors', None) decode_responses = config.get('redisDecodeResponses', True) retry_on_timeout = config.get('redisRetryOnTimeout', False) ssl = config.get('redisSsl', False) @@ -740,7 +739,6 @@ def _build_default_client(config): # pylint: disable=too-many-locals unix_socket_path=unix_socket_path, encoding=encoding, encoding_errors=encoding_errors, - errors=errors, decode_responses=decode_responses, retry_on_timeout=retry_on_timeout, ssl=ssl, diff --git a/splitio/version.py b/splitio/version.py index 9858bdcf..780d6251 100644 --- a/splitio/version.py +++ b/splitio/version.py @@ -1 +1 @@ -__version__ = '10.4.0' \ No newline at end of file +__version__ = '10.5.0' \ No newline at end of file diff --git a/tests/client/test_client.py b/tests/client/test_client.py index 9a6848eb..27ed399d 100644 --- a/tests/client/test_client.py +++ b/tests/client/test_client.py @@ -9,6 +9,8 @@ from splitio.client.client import Client, _LOGGER as _logger, CONTROL, ClientAsync, EvaluationOptions from splitio.client.factory import SplitFactory, Status as FactoryStatus, SplitFactoryAsync +from splitio.models.fallback_config import FallbackTreatmentsConfiguration, FallbackTreatmentCalculator +from splitio.models.fallback_treatment import FallbackTreatment from splitio.models.impressions import Impression, Label from splitio.models.events import Event, EventWrapper from splitio.storage import EventStorage, ImpressionStorage, SegmentStorage, SplitStorage, RuleBasedSegmentsStorage @@ -21,7 +23,7 @@ from splitio.engine.impressions.manager import Counter as ImpressionsCounter from splitio.engine.impressions.unique_keys_tracker import UniqueKeysTracker, UniqueKeysTrackerAsync from splitio.engine.telemetry import TelemetryStorageConsumer, TelemetryStorageProducer, TelemetryStorageProducerAsync -from splitio.engine.evaluator import Evaluator +from splitio.engine.evaluator import Evaluator, EvaluationContext from splitio.recorder.recorder import StandardRecorder, StandardRecorderAsync from splitio.engine.impressions.strategies import StrategyDebugMode, StrategyNoneMode, StrategyOptimizedMode from tests.integration import splits_json @@ -74,7 +76,7 @@ def synchronize_config(*_): factory.block_until_ready(5) split_storage.update([from_raw(splits_json['splitChange1_1']['ff']['d'][0])], [], -1) - client = Client(factory, recorder, True) + client = Client(factory, recorder, True, FallbackTreatmentCalculator(None)) client._evaluator = mocker.Mock(spec=Evaluator) client._evaluator.eval_with_context.return_value = { 'treatment': 'on', @@ -146,7 +148,7 @@ def synchronize_config(*_): mocker.patch('splitio.client.client.get_latency_bucket_index', new=lambda x: 5) split_storage.update([from_raw(splits_json['splitChange1_1']['ff']['d'][0])], [], -1) - client = Client(factory, recorder, True) + client = Client(factory, recorder, True, FallbackTreatmentCalculator(None)) client._evaluator = mocker.Mock(spec=Evaluator) client._evaluator.eval_with_context.return_value = { 'treatment': 'on', @@ -223,7 +225,7 @@ def synchronize_config(*_): mocker.patch('splitio.client.client.utctime_ms', new=lambda: 1000) mocker.patch('splitio.client.client.get_latency_bucket_index', new=lambda x: 5) - client = Client(factory, recorder, True) + client = Client(factory, recorder, True, FallbackTreatmentCalculator(None)) client._evaluator = mocker.Mock(spec=Evaluator) evaluation = { 'treatment': 'on', @@ -303,7 +305,7 @@ def synchronize_config(*_): mocker.patch('splitio.client.client.utctime_ms', new=lambda: 1000) mocker.patch('splitio.client.client.get_latency_bucket_index', new=lambda x: 5) - client = Client(factory, recorder, True) + client = Client(factory, recorder, True, FallbackTreatmentCalculator(None)) client._evaluator = mocker.Mock(spec=Evaluator) evaluation = { 'treatment': 'on', @@ -382,7 +384,7 @@ def synchronize_config(*_): mocker.patch('splitio.client.client.utctime_ms', new=lambda: 1000) mocker.patch('splitio.client.client.get_latency_bucket_index', new=lambda x: 5) - client = Client(factory, recorder, True) + client = Client(factory, recorder, True, FallbackTreatmentCalculator(None)) client._evaluator = mocker.Mock(spec=Evaluator) evaluation = { 'treatment': 'on', @@ -460,7 +462,7 @@ def synchronize_config(*_): mocker.patch('splitio.client.client.utctime_ms', new=lambda: 1000) mocker.patch('splitio.client.client.get_latency_bucket_index', new=lambda x: 5) - client = Client(factory, recorder, True) + client = Client(factory, recorder, True, FallbackTreatmentCalculator(None)) client._evaluator = mocker.Mock(spec=Evaluator) evaluation = { 'treatment': 'on', @@ -543,7 +545,7 @@ def synchronize_config(*_): mocker.patch('splitio.client.client.utctime_ms', new=lambda: 1000) mocker.patch('splitio.client.client.get_latency_bucket_index', new=lambda x: 5) - client = Client(factory, recorder, True) + client = Client(factory, recorder, True, FallbackTreatmentCalculator(None)) client._evaluator = mocker.Mock(spec=Evaluator) evaluation = { 'treatment': 'on', @@ -623,7 +625,7 @@ def synchronize_config(*_): mocker.patch('splitio.client.client.utctime_ms', new=lambda: 1000) mocker.patch('splitio.client.client.get_latency_bucket_index', new=lambda x: 5) - client = Client(factory, recorder, True) + client = Client(factory, recorder, True, FallbackTreatmentCalculator(None)) client._evaluator = mocker.Mock(spec=Evaluator) evaluation = { 'treatment': 'on', @@ -710,7 +712,7 @@ def synchronize_config(*_): from_raw(splits_json['splitChange1_1']['ff']['d'][1]), from_raw(splits_json['splitChange1_1']['ff']['d'][2]) ], [], -1) - client = Client(factory, recorder, True) + client = Client(factory, recorder, True, FallbackTreatmentCalculator(None)) assert client.get_treatment('some_key', 'SPLIT_1') == 'off' assert client.get_treatment('some_key', 'SPLIT_2') == 'on' assert client.get_treatment('some_key', 'SPLIT_3') == 'on' @@ -774,7 +776,7 @@ def synchronize_config(*_): from_raw(splits_json['splitChange1_1']['ff']['d'][1]), from_raw(splits_json['splitChange1_1']['ff']['d'][2]) ], [], -1) - client = Client(factory, recorder, True) + client = Client(factory, recorder, True, FallbackTreatmentCalculator(None)) assert client.get_treatment('some_key', 'SPLIT_1') == 'off' assert client.get_treatment('some_key', 'SPLIT_2') == 'on' assert client.get_treatment('some_key', 'SPLIT_3') == 'on' @@ -838,7 +840,7 @@ def synchronize_config(*_): from_raw(splits_json['splitChange1_1']['ff']['d'][1]), from_raw(splits_json['splitChange1_1']['ff']['d'][2]) ], [], -1) - client = Client(factory, recorder, True) + client = Client(factory, recorder, True, FallbackTreatmentCalculator(None)) assert client.get_treatment('some_key', 'SPLIT_1') == 'off' assert client.get_treatment('some_key', 'SPLIT_2') == 'on' assert client.get_treatment('some_key', 'SPLIT_3') == 'on' @@ -879,7 +881,7 @@ def synchronize_config(*_): pass factory._telemetry_submitter = TelemetrySubmitterMock() - client = Client(factory, recorder, True) + client = Client(factory, recorder, True, FallbackTreatmentCalculator(None)) client.destroy() assert client.destroyed is not None assert(mocker.called) @@ -921,7 +923,7 @@ def synchronize_config(*_): factory._apikey = 'test' mocker.patch('splitio.client.client.utctime_ms', new=lambda: 1000) - client = Client(factory, recorder, True) + client = Client(factory, recorder, True, FallbackTreatmentCalculator(None)) assert client.track('key', 'user', 'purchase', 12) is True assert mocker.call([ EventWrapper( @@ -970,7 +972,7 @@ def synchronize_config(*_): mocker.call('Client is not ready - no calls possible') ] - client = Client(factory, mocker.Mock()) + client = Client(factory, mocker.Mock(), mocker.Mock(), FallbackTreatmentCalculator(None)) _logger = mocker.Mock() mocker.patch('splitio.client.client._LOGGER', new=_logger) @@ -1042,7 +1044,7 @@ def synchronize_config(*_): pass factory._telemetry_submitter = TelemetrySubmitterMock() - client = Client(factory, mocker.Mock()) + client = Client(factory, mocker.Mock(), mocker.Mock(), FallbackTreatmentCalculator(None)) client.ready = False assert client.get_treatment('some_key', 'SPLIT_2') == CONTROL assert(telemetry_storage._tel_config._not_ready == 1) @@ -1094,7 +1096,7 @@ def stop(*_): ready_property = mocker.PropertyMock() ready_property.return_value = True type(factory).ready = ready_property - client = Client(factory, recorder, True) + client = Client(factory, recorder, True, FallbackTreatmentCalculator(None)) def _raise(*_): raise RuntimeError('something') client._evaluator.eval_many_with_context = _raise @@ -1190,7 +1192,7 @@ def stop(*_): pass factory._sync_manager.stop = stop - client = Client(factory, recorder, True) + client = Client(factory, recorder, True, FallbackTreatmentCalculator(None)) assert client.get_treatment('key', 'SPLIT_2') == 'on' assert(telemetry_storage._method_latencies._treatment[0] == 1) @@ -1256,7 +1258,7 @@ def synchronize_config(*_): pass factory._telemetry_submitter = TelemetrySubmitterMock() - client = Client(factory, recorder, True) + client = Client(factory, recorder, True, FallbackTreatmentCalculator(None)) try: client.track('key', 'tt', 'ev') except: @@ -1309,7 +1311,7 @@ def synchronize_config(*_): factory.block_until_ready(5) split_storage.update([from_raw(splits_json['splitChange1_1']['ff']['d'][0])], [], -1) - client = Client(factory, recorder, True) + client = Client(factory, recorder, True, FallbackTreatmentCalculator(None)) client._evaluator = mocker.Mock(spec=Evaluator) evaluation = { 'treatment': 'on', @@ -1375,6 +1377,287 @@ def synchronize_config(*_): assert client.get_treatments_with_config_by_flag_sets('some_key', ['set_1'], evaluation_options=EvaluationOptions({"prop": "value"})) == {'SPLIT_2': ('on', None)} assert impression_storage.pop_many(100) == [Impression('some_key', 'SPLIT_2', 'on', 'some_label', 123, None, 1000, None, '{"prop": "value"}')] + @mock.patch('splitio.engine.evaluator.Evaluator.eval_with_context', side_effect=RuntimeError()) + def test_fallback_treatment_eval_exception(self, mocker): + # using fallback when the evaluator has RuntimeError exception + split_storage = mocker.Mock(spec=SplitStorage) + segment_storage = mocker.Mock(spec=SegmentStorage) + rb_segment_storage = mocker.Mock(spec=RuleBasedSegmentsStorage) + impression_storage = mocker.Mock(spec=ImpressionStorage) + event_storage = mocker.Mock(spec=EventStorage) + destroyed_property = mocker.PropertyMock() + destroyed_property.return_value = False + + mocker.patch('splitio.client.client.utctime_ms', new=lambda: 1000) + mocker.patch('splitio.client.client.get_latency_bucket_index', new=lambda x: 5) + + telemetry_storage = InMemoryTelemetryStorage() + telemetry_producer = TelemetryStorageProducer(telemetry_storage) + impmanager = ImpressionManager(StrategyOptimizedMode(), StrategyNoneMode(), telemetry_producer.get_telemetry_runtime_producer()) + recorder = StandardRecorder(impmanager, event_storage, impression_storage, telemetry_producer.get_telemetry_evaluation_producer(), telemetry_producer.get_telemetry_runtime_producer()) + factory = SplitFactory(mocker.Mock(), + {'splits': split_storage, + 'segments': segment_storage, + 'rule_based_segments': rb_segment_storage, + 'impressions': impression_storage, + 'events': event_storage}, + mocker.Mock(), + recorder, + impmanager, + mocker.Mock(), + telemetry_producer, + telemetry_producer.get_telemetry_init_producer(), + mocker.Mock() + ) + + self.imps = None + def put(impressions): + self.imps = impressions + impression_storage.put = put + + class TelemetrySubmitterMock(): + def synchronize_config(*_): + pass + factory._telemetry_submitter = TelemetrySubmitterMock() + client = Client(factory, recorder, True, FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(FallbackTreatment("on-global", '{"prop": "val"}')))) + + def get_feature_flag_names_by_flag_sets(*_): + return ["some", "some2"] + client._get_feature_flag_names_by_flag_sets = get_feature_flag_names_by_flag_sets + + treatment = client.get_treatment("key", "some") + assert(treatment == "on-global") + assert(self.imps[0].treatment == "on-global") + assert(self.imps[0].label == "fallback - exception") + + self.imps = None + treatment = client.get_treatments("key_m", ["some", "some2"]) + assert(treatment == {"some": "on-global", "some2": "on-global"}) + assert(self.imps[0].treatment == "on-global") + assert(self.imps[0].label == "fallback - exception") + assert(self.imps[1].treatment == "on-global") + assert(self.imps[1].label == "fallback - exception") + + assert(client.get_treatment_with_config("key", "some") == ("on-global", '{"prop": "val"}')) + assert(client.get_treatments_with_config("key_m", ["some", "some2"]) == {"some": ("on-global", '{"prop": "val"}'), "some2": ("on-global", '{"prop": "val"}')}) + assert(client.get_treatments_by_flag_set("key_m", "set") == {"some": "on-global", "some2": "on-global"}) + assert(client.get_treatments_by_flag_set("key_m", ["set"]) == {"some": "on-global", "some2": "on-global"}) + assert(client.get_treatments_with_config_by_flag_set("key_m", "set") == {"some": ("on-global", '{"prop": "val"}'), "some2": ("on-global", '{"prop": "val"}')}) + assert(client.get_treatments_with_config_by_flag_sets("key_m", ["set"]) == {"some": ("on-global", '{"prop": "val"}'), "some2": ("on-global", '{"prop": "val"}')}) + + self.imps = None + client._fallback_treatment_calculator = FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(FallbackTreatment("on-global", '{"prop": "val"}'), {'some': FallbackTreatment("on-local")})) + treatment = client.get_treatment("key2", "some") + assert(treatment == "on-local") + assert(self.imps[0].treatment == "on-local") + assert(self.imps[0].label == "fallback - exception") + + self.imps = None + treatment = client.get_treatments("key2_m", ["some", "some2"]) + assert(treatment == {"some": "on-local", "some2": "on-global"}) + assert_both = 0 + for imp in self.imps: + if imp.feature_name == "some": + assert_both += 1 + assert(imp.treatment == "on-local") + assert(imp.label == "fallback - exception") + else: + assert_both += 1 + assert(imp.treatment == "on-global") + assert(imp.label == "fallback - exception") + assert assert_both == 2 + + assert(client.get_treatment_with_config("key", "some") == ("on-local", None)) + assert(client.get_treatments_with_config("key_m", ["some", "some2"]) == {"some": ("on-local", None), "some2": ("on-global", '{"prop": "val"}')}) + assert(client.get_treatments_by_flag_set("key_m", "set") == {"some": "on-local", "some2": "on-global"}) + assert(client.get_treatments_by_flag_set("key_m", ["set"]) == {"some": "on-local", "some2": "on-global"}) + assert(client.get_treatments_with_config_by_flag_set("key_m", "set") == {"some": ("on-local", None), "some2": ("on-global", '{"prop": "val"}')}) + assert(client.get_treatments_with_config_by_flag_sets("key_m", ["set"]) == {"some": ("on-local", None), "some2": ("on-global", '{"prop": "val"}')}) + + self.imps = None + client._fallback_treatment_calculator = FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(None, {'some': FallbackTreatment("on-local", '{"prop": "val"}')})) + treatment = client.get_treatment("key3", "some") + assert(treatment == "on-local") + assert(self.imps[0].treatment == "on-local") + assert(self.imps[0].label == "fallback - exception") + + self.imps = None + treatment = client.get_treatments("key3_m", ["some", "some2"]) + assert(treatment == {"some": "on-local", "some2": "control"}) + assert_both = 0 + for imp in self.imps: + if imp.feature_name == "some": + assert_both += 1 + assert(imp.treatment == "on-local") + assert(imp.label == "fallback - exception") + else: + assert_both += 1 + assert(imp.treatment == "control") + assert(imp.label == "exception") + assert assert_both == 2 + + assert(client.get_treatment_with_config("key", "some") == ("on-local", '{"prop": "val"}')) + assert(client.get_treatments_with_config("key_m", ["some", "some2"]) == {"some": ("on-local", '{"prop": "val"}'), "some2": ("control", None)}) + assert(client.get_treatments_by_flag_set("key_m", "set") == {"some": "on-local", "some2": "control"}) + assert(client.get_treatments_by_flag_set("key_m", ["set"]) == {"some": "on-local", "some2": "control"}) + assert(client.get_treatments_with_config_by_flag_set("key_m", "set") == {"some": ("on-local", '{"prop": "val"}'), "some2": ("control", None)}) + assert(client.get_treatments_with_config_by_flag_sets("key_m", ["set"]) == {"some": ("on-local", '{"prop": "val"}'), "some2": ("control", None)}) + + self.imps = None + client._fallback_treatment_calculator = FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(None, {'some2': FallbackTreatment("on-local")})) + treatment = client.get_treatment("key4", "some") + assert(treatment == "control") + assert(self.imps[0].treatment == "control") + assert(self.imps[0].label == "exception") + + try: + factory.destroy() + except: + pass + + @mock.patch('splitio.engine.evaluator.Evaluator.eval_with_context', side_effect=Exception()) + def test_fallback_treatment_exception(self, mocker): + # using fallback when the evaluator has RuntimeError exception + split_storage = mocker.Mock(spec=SplitStorage) + segment_storage = mocker.Mock(spec=SegmentStorage) + rb_segment_storage = mocker.Mock(spec=RuleBasedSegmentsStorage) + impression_storage = mocker.Mock(spec=ImpressionStorage) + event_storage = mocker.Mock(spec=EventStorage) + destroyed_property = mocker.PropertyMock() + destroyed_property.return_value = False + + mocker.patch('splitio.client.client.utctime_ms', new=lambda: 1000) + mocker.patch('splitio.client.client.get_latency_bucket_index', new=lambda x: 5) + + telemetry_storage = InMemoryTelemetryStorage() + telemetry_producer = TelemetryStorageProducer(telemetry_storage) + impmanager = ImpressionManager(StrategyOptimizedMode(), StrategyNoneMode(), telemetry_producer.get_telemetry_runtime_producer()) + recorder = StandardRecorder(impmanager, event_storage, impression_storage, telemetry_producer.get_telemetry_evaluation_producer(), telemetry_producer.get_telemetry_runtime_producer()) + factory = SplitFactory(mocker.Mock(), + {'splits': split_storage, + 'segments': segment_storage, + 'rule_based_segments': rb_segment_storage, + 'impressions': impression_storage, + 'events': event_storage}, + mocker.Mock(), + recorder, + impmanager, + mocker.Mock(), + telemetry_producer, + telemetry_producer.get_telemetry_init_producer(), + mocker.Mock() + ) + + self.imps = None + def put(impressions): + self.imps = impressions + impression_storage.put = put + + class TelemetrySubmitterMock(): + def synchronize_config(*_): + pass + factory._telemetry_submitter = TelemetrySubmitterMock() + client = Client(factory, recorder, True, FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(FallbackTreatment("on-global")))) + treatment = client.get_treatment("key", "some") + assert(treatment == "on-global") + assert(self.imps == None) + + self.imps = None + client._fallback_treatment_calculator = FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(FallbackTreatment("on-global"), {'some': FallbackTreatment("on-local")})) + treatment = client.get_treatment("key2", "some") + assert(treatment == "on-local") + assert(self.imps == None) + + self.imps = None + client._fallback_treatment_calculator = FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(None, {'some': FallbackTreatment("on-local")})) + treatment = client.get_treatment("key3", "some") + assert(treatment == "on-local") + assert(self.imps == None) + + self.imps = None + client._fallback_treatment_calculator = FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(None, {'some2': FallbackTreatment("on-local")})) + treatment = client.get_treatment("key4", "some") + assert(treatment == "control") + assert(self.imps == None) + + try: + factory.destroy() + except: + pass + + @mock.patch('splitio.client.client.Client.ready', side_effect=None) + def test_fallback_treatment_not_ready_impressions(self, mocker): + # using fallback when the evaluator has RuntimeError exception + split_storage = mocker.Mock(spec=SplitStorage) + segment_storage = mocker.Mock(spec=SegmentStorage) + rb_segment_storage = mocker.Mock(spec=RuleBasedSegmentsStorage) + impression_storage = mocker.Mock(spec=ImpressionStorage) + event_storage = mocker.Mock(spec=EventStorage) + + mocker.patch('splitio.client.client.utctime_ms', new=lambda: 1000) + mocker.patch('splitio.client.client.get_latency_bucket_index', new=lambda x: 5) + + telemetry_storage = InMemoryTelemetryStorage() + telemetry_producer = TelemetryStorageProducer(telemetry_storage) + impmanager = ImpressionManager(StrategyOptimizedMode(), StrategyNoneMode(), telemetry_producer.get_telemetry_runtime_producer()) + recorder = StandardRecorder(impmanager, event_storage, impression_storage, telemetry_producer.get_telemetry_evaluation_producer(), telemetry_producer.get_telemetry_runtime_producer()) + factory = SplitFactory(mocker.Mock(), + {'splits': split_storage, + 'segments': segment_storage, + 'rule_based_segments': rb_segment_storage, + 'impressions': impression_storage, + 'events': event_storage}, + mocker.Mock(), + recorder, + impmanager, + mocker.Mock(), + telemetry_producer, + telemetry_producer.get_telemetry_init_producer(), + mocker.Mock() + ) + + self.imps = None + def put(impressions): + self.imps = impressions + impression_storage.put = put + + class TelemetrySubmitterMock(): + def synchronize_config(*_): + pass + factory._telemetry_submitter = TelemetrySubmitterMock() + client = Client(factory, recorder, True, FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(FallbackTreatment("on-global")))) + client.ready = False + + treatment = client.get_treatment("key", "some") + assert(self.imps[0].treatment == "on-global") + assert(self.imps[0].label == "fallback - not ready") + + self.imps = None + client._fallback_treatment_calculator = FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(FallbackTreatment("on-global"), {'some': FallbackTreatment("on-local")})) + treatment = client.get_treatment("key2", "some") + assert(treatment == "on-local") + assert(self.imps[0].treatment == "on-local") + assert(self.imps[0].label == "fallback - not ready") + + self.imps = None + client._fallback_treatment_calculator = FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(None, {'some': FallbackTreatment("on-local")})) + treatment = client.get_treatment("key3", "some") + assert(treatment == "on-local") + assert(self.imps[0].treatment == "on-local") + assert(self.imps[0].label == "fallback - not ready") + + self.imps = None + client._fallback_treatment_calculator = FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(None, {'some2': FallbackTreatment("on-local")})) + treatment = client.get_treatment("key4", "some") + assert(treatment == "control") + assert(self.imps[0].treatment == "control") + assert(self.imps[0].label == "not ready") + + try: + factory.destroy() + except: + pass + class ClientAsyncTests(object): # pylint: disable=too-few-public-methods """Split client async test cases.""" @@ -1417,7 +1700,7 @@ async def synchronize_config(*_): ) await factory.block_until_ready(1) - client = ClientAsync(factory, recorder, True) + client = ClientAsync(factory, recorder, True, FallbackTreatmentCalculator(None)) client._evaluator = mocker.Mock(spec=Evaluator) client._evaluator.eval_with_context.return_value = { 'treatment': 'on', @@ -1489,7 +1772,7 @@ async def synchronize_config(*_): mocker.patch('splitio.client.client.get_latency_bucket_index', new=lambda x: 5) await factory.block_until_ready(1) - client = ClientAsync(factory, recorder, True) + client = ClientAsync(factory, recorder, True, FallbackTreatmentCalculator(None)) client._evaluator = mocker.Mock(spec=Evaluator) client._evaluator.eval_with_context.return_value = { 'treatment': 'on', @@ -1566,7 +1849,7 @@ async def synchronize_config(*_): mocker.patch('splitio.client.client.get_latency_bucket_index', new=lambda x: 5) await factory.block_until_ready(1) - client = ClientAsync(factory, recorder, True) + client = ClientAsync(factory, recorder, True, FallbackTreatmentCalculator(None)) client._evaluator = mocker.Mock(spec=Evaluator) evaluation = { 'treatment': 'on', @@ -1646,7 +1929,7 @@ async def synchronize_config(*_): mocker.patch('splitio.client.client.get_latency_bucket_index', new=lambda x: 5) await factory.block_until_ready(1) - client = ClientAsync(factory, recorder, True) + client = ClientAsync(factory, recorder, True, FallbackTreatmentCalculator(None)) client._evaluator = mocker.Mock(spec=Evaluator) evaluation = { 'treatment': 'on', @@ -1726,7 +2009,7 @@ async def synchronize_config(*_): mocker.patch('splitio.client.client.get_latency_bucket_index', new=lambda x: 5) await factory.block_until_ready(1) - client = ClientAsync(factory, recorder, True) + client = ClientAsync(factory, recorder, True, FallbackTreatmentCalculator(None)) client._evaluator = mocker.Mock(spec=Evaluator) evaluation = { 'treatment': 'on', @@ -1805,7 +2088,7 @@ async def synchronize_config(*_): mocker.patch('splitio.client.client.get_latency_bucket_index', new=lambda x: 5) await factory.block_until_ready(1) - client = ClientAsync(factory, recorder, True) + client = ClientAsync(factory, recorder, True, FallbackTreatmentCalculator(None)) client._evaluator = mocker.Mock(spec=Evaluator) evaluation = { 'treatment': 'on', @@ -1889,7 +2172,7 @@ async def synchronize_config(*_): mocker.patch('splitio.client.client.get_latency_bucket_index', new=lambda x: 5) await factory.block_until_ready(1) - client = ClientAsync(factory, recorder, True) + client = ClientAsync(factory, recorder, True, FallbackTreatmentCalculator(None)) client._evaluator = mocker.Mock(spec=Evaluator) evaluation = { 'treatment': 'on', @@ -1973,7 +2256,7 @@ async def synchronize_config(*_): mocker.patch('splitio.client.client.get_latency_bucket_index', new=lambda x: 5) await factory.block_until_ready(1) - client = ClientAsync(factory, recorder, True) + client = ClientAsync(factory, recorder, True, FallbackTreatmentCalculator(None)) client._evaluator = mocker.Mock(spec=Evaluator) evaluation = { 'treatment': 'on', @@ -2059,7 +2342,7 @@ async def test_impression_toggle_optimized(self, mocker): from_raw(splits_json['splitChange1_1']['ff']['d'][1]), from_raw(splits_json['splitChange1_1']['ff']['d'][2]) ], [], -1) - client = ClientAsync(factory, recorder, True) + client = ClientAsync(factory, recorder, True, FallbackTreatmentCalculator(None)) treatment = await client.get_treatment('some_key', 'SPLIT_1') assert treatment == 'off' treatment = await client.get_treatment('some_key', 'SPLIT_2') @@ -2122,7 +2405,7 @@ async def test_impression_toggle_debug(self, mocker): from_raw(splits_json['splitChange1_1']['ff']['d'][1]), from_raw(splits_json['splitChange1_1']['ff']['d'][2]) ], [], -1) - client = ClientAsync(factory, recorder, True) + client = ClientAsync(factory, recorder, True, FallbackTreatmentCalculator(None)) assert await client.get_treatment('some_key', 'SPLIT_1') == 'off' assert await client.get_treatment('some_key', 'SPLIT_2') == 'on' assert await client.get_treatment('some_key', 'SPLIT_3') == 'on' @@ -2182,7 +2465,7 @@ async def test_impression_toggle_none(self, mocker): from_raw(splits_json['splitChange1_1']['ff']['d'][1]), from_raw(splits_json['splitChange1_1']['ff']['d'][2]) ], [], -1) - client = ClientAsync(factory, recorder, True) + client = ClientAsync(factory, recorder, True, FallbackTreatmentCalculator(None)) assert await client.get_treatment('some_key', 'SPLIT_1') == 'off' assert await client.get_treatment('some_key', 'SPLIT_2') == 'on' assert await client.get_treatment('some_key', 'SPLIT_3') == 'on' @@ -2233,7 +2516,7 @@ async def synchronize_config(*_): mocker.patch('splitio.client.client.utctime_ms', new=lambda: 1000) await factory.block_until_ready(1) - client = ClientAsync(factory, recorder, True) + client = ClientAsync(factory, recorder, True, FallbackTreatmentCalculator(None)) assert await client.track('key', 'user', 'purchase', 12) is True assert self.events[0] == [EventWrapper( event=Event('key', 'user', 'purchase', 12, 1000, None), @@ -2277,7 +2560,7 @@ async def synchronize_config(*_): type(factory).ready = ready_property await factory.block_until_ready(1) - client = ClientAsync(factory, recorder) + client = ClientAsync(factory, recorder, True, FallbackTreatmentCalculator(None)) assert await client.get_treatment('some_key', 'SPLIT_2') == CONTROL assert(telemetry_storage._tel_config._not_ready == 1) await client.track('key', 'tt', 'ev') @@ -2325,7 +2608,7 @@ async def synchronize_config(*_): ready_property.return_value = True type(factory).ready = ready_property - client = ClientAsync(factory, recorder, True) + client = ClientAsync(factory, recorder, True, FallbackTreatmentCalculator(None)) client._evaluator = mocker.Mock() def _raise(*_): raise RuntimeError('something') @@ -2403,7 +2686,7 @@ async def synchronize_config(*_): await factory.block_until_ready(1) except: pass - client = ClientAsync(factory, recorder, True) + client = ClientAsync(factory, recorder, True, FallbackTreatmentCalculator(None)) assert await client.get_treatment('key', 'SPLIT_2') == 'on' assert(telemetry_storage._method_latencies._treatment[0] == 1) @@ -2473,7 +2756,7 @@ async def exc(*_): recorder.record_track_stats = exc await factory.block_until_ready(1) - client = ClientAsync(factory, recorder, True) + client = ClientAsync(factory, recorder, True, FallbackTreatmentCalculator(None)) try: await client.track('key', 'tt', 'ev') except: @@ -2520,7 +2803,7 @@ async def synchronize_config(*_): ) await factory.block_until_ready(1) - client = ClientAsync(factory, recorder, True) + client = ClientAsync(factory, recorder, True, FallbackTreatmentCalculator(None)) client._evaluator = mocker.Mock(spec=Evaluator) evaluation = { 'treatment': 'on', @@ -2585,3 +2868,342 @@ async def synchronize_config(*_): assert await client.get_treatments_with_config_by_flag_sets('some_key', ['set_1'], evaluation_options=EvaluationOptions({"prop": "value"})) == {'SPLIT_2': ('on', None)} assert await impression_storage.pop_many(100) == [Impression('some_key', 'SPLIT_2', 'on', 'some_label', 123, None, 1000, None, '{"prop": "value"}')] + try: + await factory.destroy() + except: + pass + + @pytest.mark.asyncio + async def test_fallback_treatment_eval_exception(self, mocker): + # using fallback when the evaluator has RuntimeError exception + split_storage = mocker.Mock(spec=SplitStorage) + segment_storage = mocker.Mock(spec=SegmentStorage) + rb_segment_storage = mocker.Mock(spec=RuleBasedSegmentsStorage) + impression_storage = mocker.Mock(spec=ImpressionStorage) + event_storage = mocker.Mock(spec=EventStorage) + destroyed_property = mocker.PropertyMock() + destroyed_property.return_value = False + + mocker.patch('splitio.client.client.utctime_ms', new=lambda: 1000) + mocker.patch('splitio.client.client.get_latency_bucket_index', new=lambda x: 5) + + telemetry_storage = await InMemoryTelemetryStorageAsync.create() + telemetry_producer = TelemetryStorageProducerAsync(telemetry_storage) + telemetry_evaluation_producer = telemetry_producer.get_telemetry_evaluation_producer() + impmanager = ImpressionManager(StrategyDebugMode(), StrategyNoneMode(), telemetry_producer.get_telemetry_runtime_producer()) + recorder = StandardRecorderAsync(impmanager, event_storage, impression_storage, telemetry_evaluation_producer, telemetry_producer.get_telemetry_runtime_producer()) + + class TelemetrySubmitterMock(): + async def synchronize_config(*_): + pass + + factory = SplitFactoryAsync(mocker.Mock(), + {'splits': split_storage, + 'segments': segment_storage, + 'rule_based_segments': rb_segment_storage, + 'impressions': impression_storage, + 'events': event_storage}, + mocker.Mock(), + recorder, + impmanager, + telemetry_producer, + telemetry_producer.get_telemetry_init_producer(), + TelemetrySubmitterMock(), + None + ) + + ready_property = mocker.PropertyMock() + ready_property.return_value = True + type(factory).ready = ready_property + + self.imps = None + async def put(impressions): + self.imps = impressions + impression_storage.put = put + + client = ClientAsync(factory, recorder, True, FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(FallbackTreatment("on-global", '{"prop": "val"}')))) + + def eval_with_context(*_): + raise RuntimeError() + client._evaluator.eval_with_context = eval_with_context + + async def get_feature_flag_names_by_flag_sets(*_): + return ["some", "some2"] + client._get_feature_flag_names_by_flag_sets = get_feature_flag_names_by_flag_sets + + async def fetch_many(*_): + return {"some": from_raw(splits_json['splitChange1_1']['ff']['d'][0])} + split_storage.fetch_many = fetch_many + + async def fetch_many_rbs(*_): + return {} + rb_segment_storage.fetch_many = fetch_many_rbs + + treatment = await client.get_treatment("key", "some") + assert(treatment == "on-global") + assert(self.imps[0].treatment == "on-global") + assert(self.imps[0].label == "fallback - exception") + + self.imps = None + treatment = await client.get_treatments("key_m", ["some", "some2"]) + assert(treatment == {"some": "on-global", "some2": "on-global"}) + assert(self.imps[0].treatment == "on-global") + assert(self.imps[0].label == "fallback - exception") + assert(self.imps[1].treatment == "on-global") + assert(self.imps[1].label == "fallback - exception") + + assert(await client.get_treatment_with_config("key", "some") == ("on-global", '{"prop": "val"}')) + assert(await client.get_treatments_with_config("key_m", ["some", "some2"]) == {"some": ("on-global", '{"prop": "val"}'), "some2": ("on-global", '{"prop": "val"}')}) + assert(await client.get_treatments_by_flag_set("key_m", "set") == {"some": "on-global", "some2": "on-global"}) + assert(await client.get_treatments_by_flag_set("key_m", ["set"]) == {"some": "on-global", "some2": "on-global"}) + assert(await client.get_treatments_with_config_by_flag_set("key_m", "set") == {"some": ("on-global", '{"prop": "val"}'), "some2": ("on-global", '{"prop": "val"}')}) + assert(await client.get_treatments_with_config_by_flag_sets("key_m", ["set"]) == {"some": ("on-global", '{"prop": "val"}'), "some2": ("on-global", '{"prop": "val"}')}) + + self.imps = None + client._fallback_treatment_calculator = FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(FallbackTreatment("on-global", '{"prop": "val"}'), {'some': FallbackTreatment("on-local")})) + treatment = await client.get_treatment("key2", "some") + assert(treatment == "on-local") + assert(self.imps[0].treatment == "on-local") + assert(self.imps[0].label == "fallback - exception") + + self.imps = None + treatment = await client.get_treatments("key2_m", ["some", "some2"]) + assert(treatment == {"some": "on-local", "some2": "on-global"}) + assert_both = 0 + for imp in self.imps: + if imp.feature_name == "some": + assert_both += 1 + assert(imp.treatment == "on-local") + assert(imp.label == "fallback - exception") + else: + assert_both += 1 + assert(imp.treatment == "on-global") + assert(imp.label == "fallback - exception") + assert assert_both == 2 + + assert(await client.get_treatment_with_config("key", "some") == ("on-local", None)) + assert(await client.get_treatments_with_config("key_m", ["some", "some2"]) == {"some": ("on-local", None), "some2": ("on-global", '{"prop": "val"}')}) + assert(await client.get_treatments_by_flag_set("key_m", "set") == {"some": "on-local", "some2": "on-global"}) + assert(await client.get_treatments_by_flag_set("key_m", ["set"]) == {"some": "on-local", "some2": "on-global"}) + assert(await client.get_treatments_with_config_by_flag_set("key_m", "set") == {"some": ("on-local", None), "some2": ("on-global", '{"prop": "val"}')}) + assert(await client.get_treatments_with_config_by_flag_sets("key_m", ["set"]) == {"some": ("on-local", None), "some2": ("on-global", '{"prop": "val"}')}) + + self.imps = None + client._fallback_treatment_calculator = FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(None, {'some': FallbackTreatment("on-local", '{"prop": "val"}')})) + treatment = await client.get_treatment("key3", "some") + assert(treatment == "on-local") + assert(self.imps[0].treatment == "on-local") + assert(self.imps[0].label == "fallback - exception") + + self.imps = None + treatment = await client.get_treatments("key3_m", ["some", "some2"]) + assert(treatment == {"some": "on-local", "some2": "control"}) + assert_both = 0 + for imp in self.imps: + if imp.feature_name == "some": + assert_both += 1 + assert(imp.treatment == "on-local") + assert(imp.label == "fallback - exception") + else: + assert_both += 1 + assert(imp.treatment == "control") + assert(imp.label == "exception") + assert assert_both == 2 + + assert(await client.get_treatment_with_config("key", "some") == ("on-local", '{"prop": "val"}')) + assert(await client.get_treatments_with_config("key_m", ["some", "some2"]) == {"some": ("on-local", '{"prop": "val"}'), "some2": ("control", None)}) + assert(await client.get_treatments_by_flag_set("key_m", "set") == {"some": "on-local", "some2": "control"}) + assert(await client.get_treatments_by_flag_set("key_m", ["set"]) == {"some": "on-local", "some2": "control"}) + assert(await client.get_treatments_with_config_by_flag_set("key_m", "set") == {"some": ("on-local", '{"prop": "val"}'), "some2": ("control", None)}) + assert(await client.get_treatments_with_config_by_flag_sets("key_m", ["set"]) == {"some": ("on-local", '{"prop": "val"}'), "some2": ("control", None)}) + + self.imps = None + client._fallback_treatment_calculator = FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(None, {'some2': FallbackTreatment("on-local")})) + treatment = await client.get_treatment("key4", "some") + assert(treatment == "control") + assert(self.imps[0].treatment == "control") + assert(self.imps[0].label == "exception") + + try: + await factory.destroy() + except: + pass + + @pytest.mark.asyncio + async def test_fallback_treatment_exception(self, mocker): + # using fallback when the evaluator has RuntimeError exception + split_storage = mocker.Mock(spec=SplitStorage) + segment_storage = mocker.Mock(spec=SegmentStorage) + rb_segment_storage = mocker.Mock(spec=RuleBasedSegmentsStorage) + impression_storage = mocker.Mock(spec=ImpressionStorage) + event_storage = mocker.Mock(spec=EventStorage) + destroyed_property = mocker.PropertyMock() + destroyed_property.return_value = False + + mocker.patch('splitio.client.client.utctime_ms', new=lambda: 1000) + mocker.patch('splitio.client.client.get_latency_bucket_index', new=lambda x: 5) + + telemetry_storage = await InMemoryTelemetryStorageAsync.create() + telemetry_producer = TelemetryStorageProducerAsync(telemetry_storage) + impmanager = ImpressionManager(StrategyDebugMode(), StrategyNoneMode(), telemetry_producer.get_telemetry_runtime_producer()) + recorder = StandardRecorderAsync(impmanager, event_storage, impression_storage, telemetry_producer.get_telemetry_evaluation_producer(), telemetry_producer.get_telemetry_runtime_producer()) + + factory = SplitFactoryAsync(mocker.Mock(), + {'splits': split_storage, + 'segments': segment_storage, + 'rule_based_segments': rb_segment_storage, + 'impressions': impression_storage, + 'events': event_storage}, + mocker.Mock(), + recorder, + impmanager, + telemetry_producer, + telemetry_producer.get_telemetry_init_producer(), + mocker.Mock(), + None + ) + ready_property = mocker.PropertyMock() + ready_property.return_value = True + type(factory).ready = ready_property + + self.imps = None + async def put(impressions): + self.imps = impressions + impression_storage.put = put + + class TelemetrySubmitterMock(): + def synchronize_config(*_): + pass + factory._telemetry_submitter = TelemetrySubmitterMock() + + client = ClientAsync(factory, recorder, True, FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(FallbackTreatment("on-global")))) + + def eval_with_context(*_): + raise Exception() + client._evaluator.eval_with_context = eval_with_context + + async def context_for(*_): + return EvaluationContext( + {}, + {}, + {} + ) + client._context_factory.context_for = context_for + + treatment = await client.get_treatment("key", "some") + assert(treatment == "on-global") + assert(self.imps[0].treatment == "on-global") + assert(self.imps[0].label == "fallback - exception") + + self.imps = None + client._fallback_treatment_calculator = FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(FallbackTreatment("on-global"), {'some': FallbackTreatment("on-local")})) + treatment = await client.get_treatment("key2", "some") + assert(treatment == "on-local") + assert(self.imps[0].treatment == "on-local") + assert(self.imps[0].label == "fallback - exception") + + self.imps = None + client._fallback_treatment_calculator = FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(None, {'some': FallbackTreatment("on-local")})) + treatment = await client.get_treatment("key3", "some") + assert(treatment == "on-local") + assert(self.imps[0].treatment == "on-local") + assert(self.imps[0].label == "fallback - exception") + + self.imps = None + client._fallback_treatment_calculator = FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(None, {'some2': FallbackTreatment("on-local")})) + treatment = await client.get_treatment("key4", "some") + assert(treatment == "control") + assert(self.imps[0].treatment == "control") + assert(self.imps[0].label == "exception") + + try: + await factory.destroy() + except: + pass + + @pytest.mark.asyncio + async def test_fallback_treatment_not_ready_impressions(self, mocker): + # using fallback when the evaluator has RuntimeError exception + split_storage = mocker.Mock(spec=SplitStorage) + segment_storage = mocker.Mock(spec=SegmentStorage) + rb_segment_storage = mocker.Mock(spec=RuleBasedSegmentsStorage) + impression_storage = mocker.Mock(spec=ImpressionStorage) + event_storage = mocker.Mock(spec=EventStorage) + + mocker.patch('splitio.client.client.utctime_ms', new=lambda: 1000) + mocker.patch('splitio.client.client.get_latency_bucket_index', new=lambda x: 5) + + telemetry_storage = await InMemoryTelemetryStorageAsync.create() + telemetry_producer = TelemetryStorageProducerAsync(telemetry_storage) + impmanager = ImpressionManager(StrategyOptimizedMode(), StrategyNoneMode(), telemetry_producer.get_telemetry_runtime_producer()) + recorder = StandardRecorderAsync(impmanager, event_storage, impression_storage, telemetry_producer.get_telemetry_evaluation_producer(), telemetry_producer.get_telemetry_runtime_producer()) + async def manager_start_task(): + pass + + factory = SplitFactoryAsync(mocker.Mock(), + {'splits': split_storage, + 'segments': segment_storage, + 'rule_based_segments': rb_segment_storage, + 'impressions': impression_storage, + 'events': event_storage}, + mocker.Mock(), + recorder, + impmanager, + telemetry_producer, + telemetry_producer.get_telemetry_init_producer(), + mocker.Mock(), + manager_start_task + ) + + self.imps = None + async def put(impressions): + self.imps = impressions + impression_storage.put = put + + class TelemetrySubmitterMock(): + def synchronize_config(*_): + pass + factory._telemetry_submitter = TelemetrySubmitterMock() + + client = ClientAsync(factory, recorder, True, FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(FallbackTreatment("on-global")))) + ready_property = mocker.PropertyMock() + ready_property.return_value = False + type(factory).ready = ready_property + + async def context_for(*_): + return EvaluationContext( + {"some": {}}, + {}, + {} + ) + client._context_factory.context_for = context_for + + treatment = await client.get_treatment("key", "some") + assert(self.imps[0].treatment == "on-global") + assert(self.imps[0].label == "fallback - not ready") + + self.imps = None + client._fallback_treatment_calculator = FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(FallbackTreatment("on-global"), {'some': FallbackTreatment("on-local")})) + treatment = await client.get_treatment("key2", "some") + assert(treatment == "on-local") + assert(self.imps[0].treatment == "on-local") + assert(self.imps[0].label == "fallback - not ready") + + self.imps = None + client._fallback_treatment_calculator = FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(None, {'some': FallbackTreatment("on-local")})) + treatment = await client.get_treatment("key3", "some") + assert(treatment == "on-local") + assert(self.imps[0].treatment == "on-local") + assert(self.imps[0].label == "fallback - not ready") + + self.imps = None + client._fallback_treatment_calculator = FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(None, {'some2': FallbackTreatment("on-local")})) + treatment = await client.get_treatment("key4", "some") + assert(treatment == "control") + assert(self.imps[0].treatment == "control") + assert(self.imps[0].label == "not ready") + + try: + await factory.destroy() + except: + pass \ No newline at end of file diff --git a/tests/client/test_config.py b/tests/client/test_config.py index 028736b3..e08a1d4b 100644 --- a/tests/client/test_config.py +++ b/tests/client/test_config.py @@ -3,7 +3,8 @@ import pytest from splitio.client import config from splitio.engine.impressions.impressions import ImpressionsMode - +from splitio.models.fallback_treatment import FallbackTreatment +from splitio.models.fallback_config import FallbackTreatmentsConfiguration class ConfigSanitizationTests(object): """Inmemory storage-based integration tests.""" @@ -62,8 +63,10 @@ def test_sanitize_imp_mode(self): assert mode == ImpressionsMode.DEBUG assert rate == 60 - def test_sanitize(self): + def test_sanitize(self, mocker): """Test sanitization.""" + _logger = mocker.Mock() + mocker.patch('splitio.client.config._LOGGER', new=_logger) configs = {} processed = config.sanitize('some', configs) assert processed['redisLocalCacheEnabled'] # check default is True @@ -87,3 +90,36 @@ def test_sanitize(self): processed = config.sanitize('some', {'httpAuthenticateScheme': 'NONE'}) assert processed['httpAuthenticateScheme'] is config.AuthenticateScheme.NONE + + _logger.reset_mock() + processed = config.sanitize('some', {'fallbackTreatments': 'NONE'}) + assert processed['fallbackTreatments'] == None + assert _logger.warning.mock_calls[1] == mocker.call("Config: fallbackTreatments parameter should be of `FallbackTreatmentsConfiguration` class.") + + _logger.reset_mock() + processed = config.sanitize('some', {'fallbackTreatments': FallbackTreatmentsConfiguration(123)}) + assert processed['fallbackTreatments'].global_fallback_treatment == None + assert _logger.warning.mock_calls[1] == mocker.call("Config: global fallbacktreatment parameter is discarded.") + + _logger.reset_mock() + processed = config.sanitize('some', {'fallbackTreatments': FallbackTreatmentsConfiguration(FallbackTreatment(123))}) + assert processed['fallbackTreatments'].global_fallback_treatment == None + assert _logger.warning.mock_calls[1] == mocker.call("Config: global fallbacktreatment parameter is discarded.") + + fb = FallbackTreatmentsConfiguration(FallbackTreatment('on')) + processed = config.sanitize('some', {'fallbackTreatments': fb}) + assert processed['fallbackTreatments'].global_fallback_treatment.treatment == fb.global_fallback_treatment.treatment + assert processed['fallbackTreatments'].global_fallback_treatment.label == None + + fb = FallbackTreatmentsConfiguration(FallbackTreatment('on'), {"flag": FallbackTreatment("off")}) + processed = config.sanitize('some', {'fallbackTreatments': fb}) + assert processed['fallbackTreatments'].global_fallback_treatment.treatment == fb.global_fallback_treatment.treatment + assert processed['fallbackTreatments'].by_flag_fallback_treatment["flag"] == fb.by_flag_fallback_treatment["flag"] + assert processed['fallbackTreatments'].by_flag_fallback_treatment["flag"].label == None + + _logger.reset_mock() + fb = FallbackTreatmentsConfiguration(None, {"flag#%": FallbackTreatment("off"), "flag2": FallbackTreatment("on")}) + processed = config.sanitize('some', {'fallbackTreatments': fb}) + assert len(processed['fallbackTreatments'].by_flag_fallback_treatment) == 1 + assert processed['fallbackTreatments'].by_flag_fallback_treatment.get("flag2") == fb.by_flag_fallback_treatment["flag2"] + assert _logger.warning.mock_calls[1] == mocker.call('Config: fallback treatment parameter for feature flag %s is discarded.', 'flag#%') \ No newline at end of file diff --git a/tests/client/test_factory.py b/tests/client/test_factory.py index fbe499d6..3a43e29f 100644 --- a/tests/client/test_factory.py +++ b/tests/client/test_factory.py @@ -13,6 +13,8 @@ from splitio.storage import redis, inmemmory, pluggable from splitio.tasks.util import asynctask from splitio.engine.impressions.impressions import Manager as ImpressionsManager +from splitio.models.fallback_config import FallbackTreatmentsConfiguration, FallbackTreatmentCalculator +from splitio.models.fallback_treatment import FallbackTreatment from splitio.sync.manager import Manager, ManagerAsync from splitio.sync.synchronizer import Synchronizer, SynchronizerAsync, SplitSynchronizers, SplitTasks from splitio.sync.split import SplitSynchronizer, SplitSynchronizerAsync @@ -25,32 +27,37 @@ class SplitFactoryTests(object): """Split factory test cases.""" - def test_flag_sets_counts(self): + def test_flag_sets_counts(self): factory = get_factory("none", config={ 'flagSetsFilter': ['set1', 'set2', 'set3'] }) assert factory._telemetry_init_producer._telemetry_storage._tel_config._flag_sets == 3 assert factory._telemetry_init_producer._telemetry_storage._tel_config._flag_sets_invalid == 0 - factory.destroy() - + event = threading.Event() + factory.destroy(event) + event.wait() + factory = get_factory("none", config={ 'flagSetsFilter': ['s#et1', 'set2', 'set3'] }) assert factory._telemetry_init_producer._telemetry_storage._tel_config._flag_sets == 3 assert factory._telemetry_init_producer._telemetry_storage._tel_config._flag_sets_invalid == 1 - factory.destroy() + event = threading.Event() + factory.destroy(event) + event.wait() factory = get_factory("none", config={ 'flagSetsFilter': ['s#et1', 22, 'set3'] }) assert factory._telemetry_init_producer._telemetry_storage._tel_config._flag_sets == 3 assert factory._telemetry_init_producer._telemetry_storage._tel_config._flag_sets_invalid == 2 - factory.destroy() + event = threading.Event() + factory.destroy(event) + event.wait() def test_inmemory_client_creation_streaming_false(self, mocker): """Test that a client with in-memory storage is created correctly.""" - # Setup synchronizer def _split_synchronizer(self, ready_flag, some, auth_api, streaming_enabled, sdk_matadata, telemetry_runtime_producer, sse_url=None, client_key=None): synchronizer = mocker.Mock(spec=Synchronizer) @@ -94,7 +101,7 @@ def test_redis_client_creation(self, mocker): """Test that a client with redis storage is created correctly.""" strict_redis_mock = mocker.Mock() mocker.patch('splitio.storage.adapters.redis.StrictRedis', new=strict_redis_mock) - + fallback_treatments_configuration = FallbackTreatmentsConfiguration(FallbackTreatment("on")) config = { 'labelsEnabled': False, 'impressionListener': 123, @@ -110,7 +117,6 @@ def test_redis_client_creation(self, mocker): 'redisConnectionPool': False, 'redisUnixSocketPath': '/some_path', 'redisEncodingErrors': 'non-strict', - 'redisErrors': True, 'redisDecodeResponses': True, 'redisRetryOnTimeout': True, 'redisSsl': True, @@ -119,7 +125,8 @@ def test_redis_client_creation(self, mocker): 'redisSslCertReqs': 'some_cert_req', 'redisSslCaCerts': 'some_ca_cert', 'redisMaxConnections': 999, - 'flagSetsFilter': ['set_1'] + 'flagSetsFilter': ['set_1'], + 'fallbackTreatments': fallback_treatments_configuration } factory = get_factory('some_api_key', config=config) class TelemetrySubmitterMock(): @@ -133,6 +140,7 @@ def synchronize_config(*_): assert isinstance(factory._get_storage('events'), redis.RedisEventsStorage) assert factory._get_storage('splits').flag_set_filter.flag_sets == set([]) + assert factory._fallback_treatment_calculator.fallback_treatments_configuration.global_fallback_treatment.treatment == fallback_treatments_configuration.global_fallback_treatment.treatment adapter = factory._get_storage('splits')._redis assert adapter == factory._get_storage('segments')._redis @@ -153,7 +161,6 @@ def synchronize_config(*_): unix_socket_path='/some_path', encoding='utf-8', encoding_errors='non-strict', - errors=True, decode_responses=True, retry_on_timeout=True, ssl=True, @@ -516,9 +523,15 @@ def synchronize_config(*_): event.wait() assert _INSTANTIATED_FACTORIES['some_other_api_key'] == 1 assert _INSTANTIATED_FACTORIES['some_api_key'] == 2 - factory2.destroy() - factory3.destroy() - factory4.destroy() + event = threading.Event() + factory2.destroy(event) + event.wait() + event = threading.Event() + factory3.destroy(event) + event.wait() + event = threading.Event() + factory4.destroy(event) + event.wait() def test_uwsgi_preforked(self, mocker): """Test preforked initializations.""" @@ -581,6 +594,8 @@ def synchronize_config(*_): assert clear_impressions._called == 1 assert clear_events._called == 1 factory.destroy() + time.sleep(0.1) + assert factory.destroyed def test_error_prefork(self, mocker): """Test not handling fork.""" @@ -643,6 +658,8 @@ def synchronize_config(*_): pass assert factory.ready factory.destroy() + time.sleep(0.1) + assert factory.destroyed def test_destroy_with_event_pluggable(self, mocker): config = { @@ -698,17 +715,21 @@ def synchronize_config(*_): assert factory._status == Status.WAITING_FORK factory.destroy() - + time.sleep(0.1) + assert factory.destroyed class SplitFactoryAsyncTests(object): """Split factory async test cases.""" @pytest.mark.asyncio async def test_flag_sets_counts(self): + fallback_treatments_configuration = FallbackTreatmentsConfiguration(FallbackTreatment("on")) factory = await get_factory_async("none", config={ 'flagSetsFilter': ['set1', 'set2', 'set3'], - 'streamEnabled': False + 'streamEnabled': False, + 'fallbackTreatments': fallback_treatments_configuration }) + assert factory._fallback_treatment_calculator.fallback_treatments_configuration.global_fallback_treatment.treatment == fallback_treatments_configuration.global_fallback_treatment.treatment assert factory._telemetry_init_producer._telemetry_storage._tel_config._flag_sets == 3 assert factory._telemetry_init_producer._telemetry_storage._tel_config._flag_sets_invalid == 0 await factory.destroy() @@ -730,17 +751,22 @@ async def test_flag_sets_counts(self): @pytest.mark.asyncio async def test_inmemory_client_creation_streaming_false_async(self, mocker): """Test that a client with in-memory storage is created correctly for async.""" - # Setup synchronizer def _split_synchronizer(self, ready_flag, some, auth_api, streaming_enabled, sdk_matadata, telemetry_runtime_producer, sse_url=None, client_key=None): synchronizer = mocker.Mock(spec=SynchronizerAsync) async def sync_all(*_): return None synchronizer.sync_all = sync_all + + def start_periodic_fetching(): + pass + synchronizer.start_periodic_fetching = start_periodic_fetching + self._ready_flag = ready_flag self._synchronizer = synchronizer self._streaming_enabled = False self._telemetry_runtime_producer = telemetry_runtime_producer + mocker.patch('splitio.sync.manager.ManagerAsync.__init__', new=_split_synchronizer) async def synchronize_config(*_): @@ -748,29 +774,30 @@ async def synchronize_config(*_): mocker.patch('splitio.sync.telemetry.InMemoryTelemetrySubmitterAsync.synchronize_config', new=synchronize_config) # Start factory and make assertions - factory = await get_factory_async('some_api_key', config={'streamingEmabled': False}) - assert isinstance(factory, SplitFactoryAsync) - assert isinstance(factory._storages['splits'], inmemmory.InMemorySplitStorageAsync) - assert isinstance(factory._storages['segments'], inmemmory.InMemorySegmentStorageAsync) - assert isinstance(factory._storages['impressions'], inmemmory.InMemoryImpressionStorageAsync) - assert factory._storages['impressions']._impressions.maxsize == 10000 - assert isinstance(factory._storages['events'], inmemmory.InMemoryEventStorageAsync) - assert factory._storages['events']._events.maxsize == 10000 + factory2 = await get_factory_async('some_api_key', config={'streamingEmabled': False}) - assert isinstance(factory._sync_manager, ManagerAsync) + assert isinstance(factory2, SplitFactoryAsync) + assert isinstance(factory2._storages['splits'], inmemmory.InMemorySplitStorageAsync) + assert isinstance(factory2._storages['segments'], inmemmory.InMemorySegmentStorageAsync) + assert isinstance(factory2._storages['impressions'], inmemmory.InMemoryImpressionStorageAsync) + assert factory2._storages['impressions']._impressions.maxsize == 10000 + assert isinstance(factory2._storages['events'], inmemmory.InMemoryEventStorageAsync) + assert factory2._storages['events']._events.maxsize == 10000 - assert isinstance(factory._recorder, StandardRecorderAsync) - assert isinstance(factory._recorder._impressions_manager, ImpressionsManager) - assert isinstance(factory._recorder._event_sotrage, inmemmory.EventStorage) - assert isinstance(factory._recorder._impression_storage, inmemmory.ImpressionStorage) + assert isinstance(factory2._sync_manager, ManagerAsync) - assert factory._labels_enabled is True + assert isinstance(factory2._recorder, StandardRecorderAsync) + assert isinstance(factory2._recorder._impressions_manager, ImpressionsManager) + assert isinstance(factory2._recorder._event_sotrage, inmemmory.EventStorage) + assert isinstance(factory2._recorder._impression_storage, inmemmory.ImpressionStorage) + + assert factory2._labels_enabled is True try: - await factory.block_until_ready(1) + await factory2.block_until_ready(1) except: pass - assert factory.ready - await factory.destroy() + assert factory2._status == Status.READY + await factory2.destroy() @pytest.mark.asyncio async def test_destroy_async(self, mocker): @@ -874,7 +901,7 @@ async def start(*_): await factory.block_until_ready(1) except: pass - assert factory.ready + assert factory._status == Status.READY assert factory.destroyed is False await factory.destroy() @@ -915,7 +942,7 @@ async def test_pluggable_client_creation_async(self, mocker): await factory.block_until_ready(1) except: pass - assert factory.ready + assert factory._status == Status.READY await factory.destroy() @pytest.mark.asyncio @@ -944,3 +971,4 @@ async def _make_factory_with_apikey(apikey, *_, **__): await asyncio.sleep(0.5) assert factory.destroyed assert len(build_redis.mock_calls) == 2 + \ No newline at end of file diff --git a/tests/client/test_input_validator.py b/tests/client/test_input_validator.py index a5a1c91a..be2ec574 100644 --- a/tests/client/test_input_validator.py +++ b/tests/client/test_input_validator.py @@ -1,20 +1,21 @@ """Unit tests for the input_validator module.""" -import logging import pytest +import logging from splitio.client.factory import SplitFactory, get_factory, SplitFactoryAsync, get_factory_async from splitio.client.client import CONTROL, Client, _LOGGER as _logger, ClientAsync -from splitio.client.manager import SplitManager, SplitManagerAsync from splitio.client.key import Key from splitio.storage import SplitStorage, EventStorage, ImpressionStorage, SegmentStorage, RuleBasedSegmentsStorage from splitio.storage.inmemmory import InMemoryTelemetryStorage, InMemoryTelemetryStorageAsync, \ InMemorySplitStorage, InMemorySplitStorageAsync, InMemoryRuleBasedSegmentStorage, InMemoryRuleBasedSegmentStorageAsync from splitio.models.splits import Split +from splitio.models.fallback_config import FallbackTreatmentCalculator from splitio.client import input_validator +from splitio.client.manager import SplitManager, SplitManagerAsync from splitio.recorder.recorder import StandardRecorder, StandardRecorderAsync from splitio.engine.telemetry import TelemetryStorageProducer, TelemetryStorageProducerAsync from splitio.engine.impressions.impressions import Manager as ImpressionManager -from splitio.engine.evaluator import EvaluationDataFactory +from splitio.models.fallback_treatment import FallbackTreatment class ClientInputValidationTests(object): """Input validation test cases.""" @@ -56,7 +57,7 @@ def test_get_treatment(self, mocker): mocker.Mock() ) - client = Client(factory, mocker.Mock()) + client = Client(factory, mocker.Mock(), mocker.Mock(), FallbackTreatmentCalculator(None)) _logger = mocker.Mock() mocker.patch('splitio.client.input_validator._LOGGER', new=_logger) @@ -297,7 +298,7 @@ def _configs(treatment): mocker.Mock() ) - client = Client(factory, mocker.Mock()) + client = Client(factory, mocker.Mock(), mocker.Mock(), FallbackTreatmentCalculator(None)) _logger = mocker.Mock() mocker.patch('splitio.client.input_validator._LOGGER', new=_logger) @@ -573,7 +574,7 @@ def test_track(self, mocker): ) factory._sdk_key = 'some-test' - client = Client(factory, recorder) + client = Client(factory, recorder, mocker.Mock(), FallbackTreatmentCalculator(None)) client._event_storage = event_storage _logger = mocker.Mock() mocker.patch('splitio.client.input_validator._LOGGER', new=_logger) @@ -855,7 +856,7 @@ def test_get_treatments(self, mocker): ready_mock.return_value = True type(factory).ready = ready_mock - client = Client(factory, recorder) + client = Client(factory, recorder, mocker.Mock(), FallbackTreatmentCalculator(None)) _logger = mocker.Mock() mocker.patch('splitio.client.input_validator._LOGGER', new=_logger) @@ -1005,7 +1006,7 @@ def _configs(treatment): return '{"some": "property"}' if treatment == 'default_treatment' else None split_mock.get_configurations_for.side_effect = _configs - client = Client(factory, mocker.Mock()) + client = Client(factory, mocker.Mock(), mocker.Mock(), FallbackTreatmentCalculator(None)) _logger = mocker.Mock() mocker.patch('splitio.client.input_validator._LOGGER', new=_logger) @@ -1151,7 +1152,7 @@ def test_get_treatments_by_flag_set(self, mocker): ready_mock.return_value = True type(factory).ready = ready_mock - client = Client(factory, recorder) + client = Client(factory, recorder, mocker.Mock(), FallbackTreatmentCalculator(None)) _logger = mocker.Mock() mocker.patch('splitio.client.input_validator._LOGGER', new=_logger) @@ -1270,7 +1271,7 @@ def test_get_treatments_by_flag_sets(self, mocker): ready_mock.return_value = True type(factory).ready = ready_mock - client = Client(factory, recorder) + client = Client(factory, recorder, mocker.Mock(), FallbackTreatmentCalculator(None)) _logger = mocker.Mock() mocker.patch('splitio.client.input_validator._LOGGER', new=_logger) @@ -1400,7 +1401,7 @@ def _configs(treatment): ready_mock.return_value = True type(factory).ready = ready_mock - client = Client(factory, recorder) + client = Client(factory, recorder, mocker.Mock(), FallbackTreatmentCalculator(None)) _logger = mocker.Mock() mocker.patch('splitio.client.input_validator._LOGGER', new=_logger) @@ -1524,7 +1525,7 @@ def _configs(treatment): ready_mock.return_value = True type(factory).ready = ready_mock - client = Client(factory, recorder) + client = Client(factory, recorder, mocker.Mock(), FallbackTreatmentCalculator(None)) _logger = mocker.Mock() mocker.patch('splitio.client.input_validator._LOGGER', new=_logger) @@ -1627,7 +1628,36 @@ def test_flag_sets_validation(self): flag_sets = input_validator.validate_flag_sets([12, 33], 'method') assert flag_sets == [] + def test_fallback_treatments(self, mocker): + _logger = mocker.Mock() + mocker.patch('splitio.client.input_validator._LOGGER', new=_logger) + + assert input_validator.validate_fallback_treatment(FallbackTreatment("on", {"prop":"val"})) + assert input_validator.validate_fallback_treatment(FallbackTreatment("on")) + + _logger.reset_mock() + assert not input_validator.validate_fallback_treatment(FallbackTreatment("on" * 100)) + assert _logger.warning.mock_calls == [ + mocker.call("Config: Fallback treatment size should not exceed %s characters", 100) + ] + + assert input_validator.validate_fallback_treatment(FallbackTreatment("on", {"prop" * 500:"val" * 500})) + _logger.reset_mock() + assert not input_validator.validate_fallback_treatment(FallbackTreatment("on/c")) + assert _logger.warning.mock_calls == [ + mocker.call("Config: Fallback treatment should match regex %s", "^[0-9]+[.a-zA-Z0-9_-]*$|^[a-zA-Z]+[a-zA-Z0-9_-]*$") + ] + + _logger.reset_mock() + assert not input_validator.validate_fallback_treatment(FallbackTreatment("on$as")) + assert _logger.warning.mock_calls == [ + mocker.call("Config: Fallback treatment should match regex %s", "^[0-9]+[.a-zA-Z0-9_-]*$|^[a-zA-Z]+[a-zA-Z0-9_-]*$") + ] + + assert input_validator.validate_fallback_treatment(FallbackTreatment("on_c")) + assert input_validator.validate_fallback_treatment(FallbackTreatment("on_45-c")) + class ClientInputValidationAsyncTests(object): """Input validation test cases.""" @@ -1675,13 +1705,14 @@ async def get_change_number(*_): impmanager, telemetry_producer, telemetry_producer.get_telemetry_init_producer(), - mocker.Mock() + mocker.Mock(), + None ) ready_mock = mocker.PropertyMock() ready_mock.return_value = True type(factory).ready = ready_mock - client = ClientAsync(factory, mocker.Mock()) + client = ClientAsync(factory, mocker.Mock(), mocker.Mock(), FallbackTreatmentCalculator(None)) async def record_treatment_stats(*_): pass @@ -1937,13 +1968,14 @@ async def get_change_number(*_): impmanager, telemetry_producer, telemetry_producer.get_telemetry_init_producer(), - mocker.Mock() + mocker.Mock(), + None ) ready_mock = mocker.PropertyMock() ready_mock.return_value = True type(factory).ready = ready_mock - client = ClientAsync(factory, mocker.Mock()) + client = ClientAsync(factory, mocker.Mock(), mocker.Mock(), FallbackTreatmentCalculator(None)) async def record_treatment_stats(*_): pass client._recorder.record_treatment_stats = record_treatment_stats @@ -2181,11 +2213,12 @@ async def put(*_): impmanager, telemetry_producer, telemetry_producer.get_telemetry_init_producer(), - mocker.Mock() + mocker.Mock(), + None ) factory._sdk_key = 'some-test' - client = ClientAsync(factory, recorder) + client = ClientAsync(factory, recorder, mocker.Mock(), FallbackTreatmentCalculator(None)) client._event_storage = event_storage _logger = mocker.Mock() mocker.patch('splitio.client.input_validator._LOGGER', new=_logger) @@ -2471,13 +2504,14 @@ async def fetch_many_rbs(*_): impmanager, telemetry_producer, telemetry_producer.get_telemetry_init_producer(), - mocker.Mock() + mocker.Mock(), + None ) ready_mock = mocker.PropertyMock() ready_mock.return_value = True type(factory).ready = ready_mock - client = ClientAsync(factory, recorder) + client = ClientAsync(factory, recorder, mocker.Mock(), FallbackTreatmentCalculator(None)) async def record_treatment_stats(*_): pass client._recorder.record_treatment_stats = record_treatment_stats @@ -2635,7 +2669,8 @@ async def fetch_many_rbs(*_): impmanager, telemetry_producer, telemetry_producer.get_telemetry_init_producer(), - mocker.Mock() + mocker.Mock(), + None ) split_mock.name = 'some_feature' @@ -2643,7 +2678,7 @@ def _configs(treatment): return '{"some": "property"}' if treatment == 'default_treatment' else None split_mock.get_configurations_for.side_effect = _configs - client = ClientAsync(factory, mocker.Mock()) + client = ClientAsync(factory, mocker.Mock(), mocker.Mock(), FallbackTreatmentCalculator(None)) async def record_treatment_stats(*_): pass client._recorder.record_treatment_stats = record_treatment_stats @@ -2802,13 +2837,14 @@ async def fetch_many_rbs(*_): mocker.Mock(), telemetry_producer, telemetry_producer.get_telemetry_init_producer(), - mocker.Mock() + mocker.Mock(), + None ) ready_mock = mocker.PropertyMock() ready_mock.return_value = True type(factory).ready = ready_mock - client = ClientAsync(factory, recorder) + client = ClientAsync(factory, recorder, mocker.Mock(), FallbackTreatmentCalculator(None)) async def record_treatment_stats(*_): pass client._recorder.record_treatment_stats = record_treatment_stats @@ -2948,13 +2984,14 @@ async def get_feature_flags_by_sets(*_): mocker.Mock(), telemetry_producer, telemetry_producer.get_telemetry_init_producer(), - mocker.Mock() + mocker.Mock(), + None ) ready_mock = mocker.PropertyMock() ready_mock.return_value = True type(factory).ready = ready_mock - client = ClientAsync(factory, recorder) + client = ClientAsync(factory, recorder, mocker.Mock(), FallbackTreatmentCalculator(None)) async def record_treatment_stats(*_): pass client._recorder.record_treatment_stats = record_treatment_stats @@ -3103,13 +3140,14 @@ async def get_feature_flags_by_sets(*_): mocker.Mock(), telemetry_producer, telemetry_producer.get_telemetry_init_producer(), - mocker.Mock() + mocker.Mock(), + None ) ready_mock = mocker.PropertyMock() ready_mock.return_value = True type(factory).ready = ready_mock - client = ClientAsync(factory, recorder) + client = ClientAsync(factory, recorder, mocker.Mock(), FallbackTreatmentCalculator(None)) async def record_treatment_stats(*_): pass client._recorder.record_treatment_stats = record_treatment_stats @@ -3252,13 +3290,14 @@ async def get_feature_flags_by_sets(*_): mocker.Mock(), telemetry_producer, telemetry_producer.get_telemetry_init_producer(), - mocker.Mock() + mocker.Mock(), + None ) ready_mock = mocker.PropertyMock() ready_mock.return_value = True type(factory).ready = ready_mock - client = ClientAsync(factory, recorder) + client = ClientAsync(factory, recorder, mocker.Mock(), FallbackTreatmentCalculator(None)) async def record_treatment_stats(*_): pass client._recorder.record_treatment_stats = record_treatment_stats diff --git a/tests/client/test_utils.py b/tests/client/test_utils.py index 64edb076..98d9d8f6 100644 --- a/tests/client/test_utils.py +++ b/tests/client/test_utils.py @@ -14,7 +14,6 @@ class ClientUtilsTests(object): def test_get_metadata(self, mocker): """Test the get_metadata function.""" meta = util.get_metadata({'machineIp': 'some_ip', 'machineName': 'some_machine_name'}) - # assert _get_hostname_and_ip.mock_calls == [] assert meta.instance_ip == 'some_ip' assert meta.instance_name == 'some_machine_name' assert meta.sdk_version == 'python-' + __version__ diff --git a/tests/engine/test_evaluator.py b/tests/engine/test_evaluator.py index 3ec7e136..07f79a80 100644 --- a/tests/engine/test_evaluator.py +++ b/tests/engine/test_evaluator.py @@ -11,6 +11,8 @@ from splitio.models.impressions import Label from splitio.models.grammar import condition from splitio.models import rule_based_segments +from splitio.models.fallback_treatment import FallbackTreatment +from splitio.models.fallback_config import FallbackTreatmentsConfiguration, FallbackTreatmentCalculator from splitio.engine import evaluator, splitters from splitio.engine.evaluator import EvaluationContext from splitio.storage.inmemmory import InMemorySplitStorage, InMemorySegmentStorage, InMemoryRuleBasedSegmentStorage, \ @@ -372,6 +374,49 @@ def test_prerequisites(self): ctx = evaluation_facctory.context_for('mauro@split.io', ['prereq_chain']) assert e.eval_with_context('mauro@split.io', 'mauro@split.io', 'prereq_chain', {'email': 'mauro@split.io'}, ctx)['treatment'] == "on_default" + def test_evaluate_treatment_with_fallback(self, mocker): + """Test that a evaluation return fallback treatment.""" + splitter_mock = mocker.Mock(spec=splitters.Splitter) + logger_mock = mocker.Mock(spec=logging.Logger) + evaluator._LOGGER = logger_mock + mocked_split = mocker.Mock(spec=Split) + ctx = EvaluationContext(flags={'some': mocked_split}, segment_memberships=set(), rbs_segments={}) + + # should use global fallback + e = evaluator.Evaluator(splitter_mock, FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(FallbackTreatment("off-global", '{"prop": "val"}')))) + result = e.eval_with_context('some_key', 'some_bucketing_key', 'some2', {}, ctx) + assert result['treatment'] == 'off-global' + assert result['configurations'] == '{"prop": "val"}' + assert result['impression']['label'] == "fallback - " + Label.SPLIT_NOT_FOUND + + # should use by flag fallback + e = evaluator.Evaluator(splitter_mock, FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(None, {"some2": FallbackTreatment("off-some2", '{"prop2": "val2"}')}))) + result = e.eval_with_context('some_key', 'some_bucketing_key', 'some2', {}, ctx) + assert result['treatment'] == 'off-some2' + assert result['configurations'] == '{"prop2": "val2"}' + assert result['impression']['label'] == "fallback - " + Label.SPLIT_NOT_FOUND + + # should not use any fallback + e = evaluator.Evaluator(splitter_mock, FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(None, {"some2": FallbackTreatment("off-some2", '{"prop2": "val2"}')}))) + result = e.eval_with_context('some_key', 'some_bucketing_key', 'some3', {}, ctx) + assert result['treatment'] == 'control' + assert result['configurations'] == None + assert result['impression']['label'] == Label.SPLIT_NOT_FOUND + + # should use by flag fallback + e = evaluator.Evaluator(splitter_mock, FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(FallbackTreatment("off-global", '{"prop": "val"}'), {"some2": FallbackTreatment("off-some2", '{"prop2": "val2"}')}))) + result = e.eval_with_context('some_key', 'some_bucketing_key', 'some2', {}, ctx) + assert result['treatment'] == 'off-some2' + assert result['configurations'] == '{"prop2": "val2"}' + assert result['impression']['label'] == "fallback - " + Label.SPLIT_NOT_FOUND + + # should global flag fallback + e = evaluator.Evaluator(splitter_mock, FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(FallbackTreatment("off-global", '{"prop": "val"}'), {"some2": FallbackTreatment("off-some2", '{"prop2": "val2"}')}))) + result = e.eval_with_context('some_key', 'some_bucketing_key', 'some3', {}, ctx) + assert result['treatment'] == 'off-global' + assert result['configurations'] == '{"prop": "val"}' + assert result['impression']['label'] == "fallback - " + Label.SPLIT_NOT_FOUND + @pytest.mark.asyncio async def test_evaluate_treatment_with_rbs_in_condition_async(self): e = evaluator.Evaluator(splitters.Splitter()) diff --git a/tests/integration/test_client_e2e.py b/tests/integration/test_client_e2e.py index f8625f6a..9e7c614e 100644 --- a/tests/integration/test_client_e2e.py +++ b/tests/integration/test_client_e2e.py @@ -29,6 +29,8 @@ PluggableRuleBasedSegmentsStorage, PluggableRuleBasedSegmentsStorageAsync from splitio.storage.adapters.redis import build, RedisAdapter, RedisAdapterAsync, build_async from splitio.models import splits, segments, rule_based_segments +from splitio.models.fallback_config import FallbackTreatmentsConfiguration, FallbackTreatmentCalculator +from splitio.models.fallback_treatment import FallbackTreatment from splitio.engine.impressions.impressions import Manager as ImpressionsManager, ImpressionsMode from splitio.engine.impressions import set_classes, set_classes_async from splitio.engine.impressions.strategies import StrategyDebugMode, StrategyOptimizedMode, StrategyNoneMode @@ -196,6 +198,11 @@ def _get_treatment(factory, skip_rbs=False): if not isinstance(factory._recorder._impressions_manager._strategy, StrategyNoneMode): _validate_last_impressions(client, ('prereq_feature', 'user1234', 'off_default')) + # test fallback treatment + assert client.get_treatment('user4321', 'fallback_feature') == 'on-local' + if not isinstance(factory._recorder._impressions_manager._strategy, StrategyNoneMode): + _validate_last_impressions(client) # No impressions should be present + def _get_treatment_with_config(factory): """Test client.get_treatment_with_config().""" try: @@ -229,6 +236,11 @@ def _get_treatment_with_config(factory): if not isinstance(factory._recorder._impressions_manager._strategy, StrategyNoneMode): _validate_last_impressions(client, ('all_feature', 'invalidKey', 'on')) + # test fallback treatment + assert client.get_treatment_with_config('user4321', 'fallback_feature') == ('on-local', '{"prop": "val"}') + if not isinstance(factory._recorder._impressions_manager._strategy, StrategyNoneMode): + _validate_last_impressions(client) # No impressions should be present + def _get_treatments(factory): """Test client.get_treatments().""" try: @@ -267,6 +279,11 @@ def _get_treatments(factory): if not isinstance(factory._recorder._impressions_manager._strategy, StrategyNoneMode): _validate_last_impressions(client, ('all_feature', 'invalidKey', 'on')) + # test fallback treatment + assert client.get_treatments('user4321', ['fallback_feature']) == {'fallback_feature': 'on-local'} + if not isinstance(factory._recorder._impressions_manager._strategy, StrategyNoneMode): + _validate_last_impressions(client) # No impressions should be present + def _get_treatments_with_config(factory): """Test client.get_treatments_with_config().""" try: @@ -306,6 +323,11 @@ def _get_treatments_with_config(factory): if not isinstance(factory._recorder._impressions_manager._strategy, StrategyNoneMode): _validate_last_impressions(client, ('all_feature', 'invalidKey', 'on')) + # test fallback treatment + assert client.get_treatments_with_config('user4321', ['fallback_feature']) == {'fallback_feature': ('on-local', '{"prop": "val"}')} + if not isinstance(factory._recorder._impressions_manager._strategy, StrategyNoneMode): + _validate_last_impressions(client) # No impressions should be present + def _get_treatments_by_flag_set(factory): """Test client.get_treatments_by_flag_set().""" try: @@ -539,6 +561,7 @@ def setup_method(self): None, telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_producer.get_telemetry_init_producer(), + fallback_treatment_calculator=FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(None, {'fallback_feature': FallbackTreatment("on-local", '{"prop": "val"}')})) ) # pylint:disable=attribute-defined-outside-init except: pass @@ -697,6 +720,7 @@ def setup_method(self): None, telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_producer.get_telemetry_init_producer(), + fallback_treatment_calculator=FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(None, {'fallback_feature': FallbackTreatment("on-local", '{"prop": "val"}')})) ) # pylint:disable=attribute-defined-outside-init def test_get_treatment(self): @@ -819,7 +843,11 @@ def setup_method(self): 'sdk_api_base_url': 'http://localhost:%d/api' % self.split_backend.port(), 'events_api_base_url': 'http://localhost:%d/api' % self.split_backend.port(), 'auth_api_base_url': 'http://localhost:%d/api' % self.split_backend.port(), - 'config': {'connectTimeout': 10000, 'streamingEnabled': False, 'impressionsMode': 'debug'} + 'config': {'connectTimeout': 10000, + 'streamingEnabled': False, + 'impressionsMode': 'debug', + 'fallbackTreatments': FallbackTreatmentsConfiguration(None, {'fallback_feature': FallbackTreatment("on-local", '{"prop": "val"}')}) + } } self.factory = get_factory('some_apikey', **kwargs) @@ -989,6 +1017,7 @@ def setup_method(self): recorder, telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_producer.get_telemetry_init_producer(), + fallback_treatment_calculator=FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(None, {'fallback_feature': FallbackTreatment("on-local", '{"prop": "val"}')})) ) # pylint:disable=attribute-defined-outside-init def test_get_treatment(self): @@ -1177,6 +1206,7 @@ def setup_method(self): recorder, telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_producer.get_telemetry_init_producer(), + fallback_treatment_calculator=FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(None, {'fallback_feature': FallbackTreatment("on-local", '{"prop": "val"}')})) ) # pylint:disable=attribute-defined-outside-init class LocalhostIntegrationTests(object): # pylint: disable=too-few-public-methods @@ -1400,6 +1430,7 @@ def setup_method(self): sdk_ready_flag=None, telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_producer.get_telemetry_init_producer(), + fallback_treatment_calculator=FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(None, {'fallback_feature': FallbackTreatment("on-local", '{"prop": "val"}')})) ) # pylint:disable=attribute-defined-outside-init # Adding data to storage @@ -1595,6 +1626,7 @@ def setup_method(self): sdk_ready_flag=None, telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_producer.get_telemetry_init_producer(), + fallback_treatment_calculator=FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(None, {'fallback_feature': FallbackTreatment("on-local", '{"prop": "val"}')})) ) # pylint:disable=attribute-defined-outside-init # Adding data to storage @@ -1789,6 +1821,7 @@ def setup_method(self): sdk_ready_flag=None, telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_producer.get_telemetry_init_producer(), + fallback_treatment_calculator=FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(None, {'fallback_feature': FallbackTreatment("on-local", '{"prop": "val"}')})) ) # pylint:disable=attribute-defined-outside-init # Adding data to storage @@ -1942,6 +1975,7 @@ def test_optimized(self): None, telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_producer.get_telemetry_init_producer(), + fallback_treatment_calculator=FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(FallbackTreatment("on-global"), {'fallback_feature': FallbackTreatment("on-local", '{"prop": "val"}')})) ) # pylint:disable=attribute-defined-outside-init except: pass @@ -1964,6 +1998,8 @@ def test_optimized(self): assert len(imps_count) == 1 assert imps_count[0].feature == 'SPLIT_3' assert imps_count[0].count == 1 + assert client.get_treatment('user1', 'incorrect_feature') == 'on-global' + assert client.get_treatment('user1', 'fallback_feature') == 'on-local' def test_debug(self): split_storage = InMemorySplitStorage() @@ -1997,6 +2033,7 @@ def test_debug(self): None, telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_producer.get_telemetry_init_producer(), + fallback_treatment_calculator=FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(FallbackTreatment("on-global"), {'fallback_feature': FallbackTreatment("on-local", '{"prop": "val"}')})) ) # pylint:disable=attribute-defined-outside-init except: pass @@ -2019,6 +2056,8 @@ def test_debug(self): assert len(imps_count) == 1 assert imps_count[0].feature == 'SPLIT_3' assert imps_count[0].count == 1 + assert client.get_treatment('user1', 'incorrect_feature') == 'on-global' + assert client.get_treatment('user1', 'fallback_feature') == 'on-local' def test_none(self): split_storage = InMemorySplitStorage() @@ -2052,6 +2091,7 @@ def test_none(self): None, telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_producer.get_telemetry_init_producer(), + fallback_treatment_calculator=FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(FallbackTreatment("on-global"), {'fallback_feature': FallbackTreatment("on-local", '{"prop": "val"}')})) ) # pylint:disable=attribute-defined-outside-init except: pass @@ -2076,6 +2116,8 @@ def test_none(self): assert imps_count[1].count == 1 assert imps_count[2].feature == 'SPLIT_3' assert imps_count[2].count == 1 + assert client.get_treatment('user1', 'incorrect_feature') == 'on-global' + assert client.get_treatment('user1', 'fallback_feature') == 'on-local' class RedisImpressionsToggleIntegrationTests(object): """Run impression toggle tests for Redis.""" @@ -2113,6 +2155,7 @@ def test_optimized(self): recorder, telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_producer.get_telemetry_init_producer(), + fallback_treatment_calculator=FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(FallbackTreatment("on-global"), {'fallback_feature': FallbackTreatment("on-local", '{"prop": "val"}')})) ) # pylint:disable=attribute-defined-outside-init try: @@ -2141,6 +2184,8 @@ def test_optimized(self): assert len(imps_count) == 1 assert imps_count[0].feature == 'SPLIT_3' assert imps_count[0].count == 1 + assert client.get_treatment('user1', 'incorrect_feature') == 'on-global' + assert client.get_treatment('user1', 'fallback_feature') == 'on-local' self.clear_cache() client.destroy() @@ -2177,6 +2222,7 @@ def test_debug(self): recorder, telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_producer.get_telemetry_init_producer(), + fallback_treatment_calculator=FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(FallbackTreatment("on-global"), {'fallback_feature': FallbackTreatment("on-local", '{"prop": "val"}')})) ) # pylint:disable=attribute-defined-outside-init try: @@ -2205,6 +2251,8 @@ def test_debug(self): assert len(imps_count) == 1 assert imps_count[0].feature == 'SPLIT_3' assert imps_count[0].count == 1 + assert client.get_treatment('user1', 'incorrect_feature') == 'on-global' + assert client.get_treatment('user1', 'fallback_feature') == 'on-local' self.clear_cache() client.destroy() @@ -2241,6 +2289,7 @@ def test_none(self): recorder, telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_producer.get_telemetry_init_producer(), + fallback_treatment_calculator=FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(FallbackTreatment("on-global"), {'fallback_feature': FallbackTreatment("on-local", '{"prop":"val"}')})) ) # pylint:disable=attribute-defined-outside-init try: @@ -2271,6 +2320,8 @@ def test_none(self): assert imps_count[1].count == 1 assert imps_count[2].feature == 'SPLIT_3' assert imps_count[2].count == 1 + assert client.get_treatment('user1', 'incorrect_feature') == 'on-global' + assert client.get_treatment('user1', 'fallback_feature') == 'on-local' self.clear_cache() client.destroy() @@ -2342,6 +2393,7 @@ async def _setup_method(self): None, telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_producer.get_telemetry_init_producer(), + fallback_treatment_calculator=FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(None, {'fallback_feature': FallbackTreatment("on-local", '{"prop": "val"}')})) ) # pylint:disable=attribute-defined-outside-init except: pass @@ -2513,6 +2565,7 @@ async def _setup_method(self): None, telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_producer.get_telemetry_init_producer(), + fallback_treatment_calculator=FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(None, {'fallback_feature': FallbackTreatment("on-local", '{"prop": "val"}')})) ) # pylint:disable=attribute-defined-outside-init except: pass @@ -2653,7 +2706,11 @@ async def _setup_method(self): 'sdk_api_base_url': 'http://localhost:%d/api' % self.split_backend.port(), 'events_api_base_url': 'http://localhost:%d/api' % self.split_backend.port(), 'auth_api_base_url': 'http://localhost:%d/api' % self.split_backend.port(), - 'config': {'connectTimeout': 10000, 'streamingEnabled': False, 'impressionsMode': 'debug'} + 'config': {'connectTimeout': 10000, + 'streamingEnabled': False, + 'impressionsMode': 'debug', + 'fallbackTreatments': FallbackTreatmentsConfiguration(None, {'fallback_feature': FallbackTreatment("on-local", '{"prop": "val"}')}) + } } self.factory = await get_factory_async('some_apikey', **kwargs) @@ -2861,7 +2918,8 @@ async def _setup_method(self): recorder, telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_producer.get_telemetry_init_producer(), - telemetry_submitter=telemetry_submitter + telemetry_submitter=telemetry_submitter, + fallback_treatment_calculator=FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(None, {'fallback_feature': FallbackTreatment("on-local", '{"prop": "val"}')})) ) # pylint:disable=attribute-defined-outside-init ready_property = mocker.PropertyMock() ready_property.return_value = True @@ -3083,7 +3141,8 @@ async def _setup_method(self): recorder, telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_producer.get_telemetry_init_producer(), - telemetry_submitter=telemetry_submitter + telemetry_submitter=telemetry_submitter, + fallback_treatment_calculator=FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(None, {'fallback_feature': FallbackTreatment("on-local", '{"prop": "val"}')})) ) # pylint:disable=attribute-defined-outside-init ready_property = mocker.PropertyMock() ready_property.return_value = True @@ -3317,7 +3376,8 @@ async def _setup_method(self): RedisManagerAsync(PluggableSynchronizerAsync()), telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_producer.get_telemetry_init_producer(), - telemetry_submitter=telemetry_submitter + telemetry_submitter=telemetry_submitter, + fallback_treatment_calculator=FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(None, {'fallback_feature': FallbackTreatment("on-local", '{"prop": "val"}')})) ) # pylint:disable=attribute-defined-outside-init ready_property = mocker.PropertyMock() ready_property.return_value = True @@ -3546,7 +3606,8 @@ async def _setup_method(self): RedisManagerAsync(PluggableSynchronizerAsync()), telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_producer.get_telemetry_init_producer(), - telemetry_submitter=telemetry_submitter + telemetry_submitter=telemetry_submitter, + fallback_treatment_calculator=FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(None, {'fallback_feature': FallbackTreatment("on-local", '{"prop": "val"}')})) ) # pylint:disable=attribute-defined-outside-init ready_property = mocker.PropertyMock() @@ -3781,6 +3842,7 @@ async def _setup_method(self): manager, telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_producer.get_telemetry_init_producer(), + fallback_treatment_calculator=FallbackTreatmentCalculator(FallbackTreatmentsConfiguration(None, {'fallback_feature': FallbackTreatment("on-local", '{"prop": "val"}')})) ) # pylint:disable=attribute-defined-outside-init # Adding data to storage @@ -3994,6 +4056,7 @@ async def test_optimized(self): None, telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_producer.get_telemetry_init_producer(), + fallback_treatment_calculator=FallbackTreatmentCalculator(None) ) # pylint:disable=attribute-defined-outside-init except: pass @@ -4054,6 +4117,7 @@ async def test_debug(self): None, telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_producer.get_telemetry_init_producer(), + fallback_treatment_calculator=FallbackTreatmentCalculator(None) ) # pylint:disable=attribute-defined-outside-init except: pass @@ -4114,6 +4178,7 @@ async def test_none(self): None, telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_producer.get_telemetry_init_producer(), + fallback_treatment_calculator=FallbackTreatmentCalculator(None) ) # pylint:disable=attribute-defined-outside-init except: pass @@ -4181,6 +4246,7 @@ async def test_optimized(self): recorder, telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_producer.get_telemetry_init_producer(), + fallback_treatment_calculator=FallbackTreatmentCalculator(None) ) # pylint:disable=attribute-defined-outside-init ready_property = mocker.PropertyMock() ready_property.return_value = True @@ -4250,6 +4316,7 @@ async def test_debug(self): recorder, telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_producer.get_telemetry_init_producer(), + fallback_treatment_calculator=FallbackTreatmentCalculator(None) ) # pylint:disable=attribute-defined-outside-init ready_property = mocker.PropertyMock() ready_property.return_value = True @@ -4319,6 +4386,7 @@ async def test_none(self): recorder, telemetry_producer=telemetry_producer, telemetry_init_producer=telemetry_producer.get_telemetry_init_producer(), + fallback_treatment_calculator=FallbackTreatmentCalculator(None) ) # pylint:disable=attribute-defined-outside-init ready_property = mocker.PropertyMock() ready_property.return_value = True @@ -4481,6 +4549,11 @@ async def _get_treatment_async(factory, skip_rbs=False): if not isinstance(factory._recorder._impressions_manager._strategy, StrategyNoneMode): await _validate_last_impressions_async(client, ('regex_test', 'abc4', 'on')) + # test fallback treatment + assert await client.get_treatment('user4321', 'fallback_feature') == 'on-local' + if not isinstance(factory._recorder._impressions_manager._strategy, StrategyNoneMode): + await _validate_last_impressions_async(client) # No impressions should be present + if skip_rbs: return @@ -4537,6 +4610,11 @@ async def _get_treatment_with_config_async(factory): if not isinstance(factory._recorder._impressions_manager._strategy, StrategyNoneMode): await _validate_last_impressions_async(client, ('all_feature', 'invalidKey', 'on')) + # test fallback treatment + assert await client.get_treatment_with_config('user4321', 'fallback_feature') == ('on-local', '{"prop": "val"}') + if not isinstance(factory._recorder._impressions_manager._strategy, StrategyNoneMode): + await _validate_last_impressions_async(client) # No impressions should be present + async def _get_treatments_async(factory): """Test client.get_treatments().""" try: @@ -4575,6 +4653,11 @@ async def _get_treatments_async(factory): if not isinstance(factory._recorder._impressions_manager._strategy, StrategyNoneMode): await _validate_last_impressions_async(client, ('all_feature', 'invalidKey', 'on')) + # test fallback treatment + assert await client.get_treatments('user4321', ['fallback_feature']) == {'fallback_feature': 'on-local'} + if not isinstance(factory._recorder._impressions_manager._strategy, StrategyNoneMode): + await _validate_last_impressions_async(client) # No impressions should be present + async def _get_treatments_with_config_async(factory): """Test client.get_treatments_with_config().""" try: @@ -4614,6 +4697,11 @@ async def _get_treatments_with_config_async(factory): if not isinstance(factory._recorder._impressions_manager._strategy, StrategyNoneMode): await _validate_last_impressions_async(client, ('all_feature', 'invalidKey', 'on')) + # test fallback treatment + assert await client.get_treatments_with_config('user4321', ['fallback_feature']) == {'fallback_feature': ('on-local', '{"prop": "val"}')} + if not isinstance(factory._recorder._impressions_manager._strategy, StrategyNoneMode): + await _validate_last_impressions_async(client) # No impressions should be present + async def _get_treatments_by_flag_set_async(factory): """Test client.get_treatments_by_flag_set().""" try: diff --git a/tests/models/test_fallback.py b/tests/models/test_fallback.py new file mode 100644 index 00000000..4dfdf79e --- /dev/null +++ b/tests/models/test_fallback.py @@ -0,0 +1,56 @@ +from splitio.models.fallback_treatment import FallbackTreatment +from splitio.models.fallback_config import FallbackTreatmentsConfiguration, FallbackTreatmentCalculator + +class FallbackTreatmentModelTests(object): + """Fallback treatment model tests.""" + + def test_working(self): + fallback_treatment = FallbackTreatment("on", '{"prop": "val"}') + assert fallback_treatment.config == '{"prop": "val"}' + assert fallback_treatment.treatment == 'on' + + fallback_treatment = FallbackTreatment("off") + assert fallback_treatment.config == None + assert fallback_treatment.treatment == 'off' + +class FallbackTreatmentsConfigModelTests(object): + """Fallback treatment configuration model tests.""" + + def test_working(self): + global_fb = FallbackTreatment("on") + flag_fb = FallbackTreatment("off") + fallback_config = FallbackTreatmentsConfiguration(global_fb, {"flag1": flag_fb}) + assert fallback_config.global_fallback_treatment == global_fb + assert fallback_config.by_flag_fallback_treatment == {"flag1": flag_fb} + + fallback_config.global_fallback_treatment = None + assert fallback_config.global_fallback_treatment == None + + fallback_config.by_flag_fallback_treatment["flag2"] = flag_fb + assert fallback_config.by_flag_fallback_treatment == {"flag1": flag_fb, "flag2": flag_fb} + + +class FallbackTreatmentCalculatorTests(object): + """Fallback treatment calculator model tests.""" + + def test_working(self): + fallback_config = FallbackTreatmentsConfiguration(FallbackTreatment("on" ,"{}"), None) + fallback_calculator = FallbackTreatmentCalculator(fallback_config) + assert fallback_calculator.fallback_treatments_configuration == fallback_config + assert fallback_calculator._label_prefix == "fallback - " + + fallback_treatment = fallback_calculator.resolve("feature", "not ready") + assert fallback_treatment.treatment == "on" + assert fallback_treatment.label == "fallback - not ready" + assert fallback_treatment.config == "{}" + + fallback_calculator._fallback_treatments_configuration = FallbackTreatmentsConfiguration(FallbackTreatment("on" ,"{}"), {'feature': FallbackTreatment("off" , '{"prop": "val"}')}) + fallback_treatment = fallback_calculator.resolve("feature", "not ready") + assert fallback_treatment.treatment == "off" + assert fallback_treatment.label == "fallback - not ready" + assert fallback_treatment.config == '{"prop": "val"}' + + fallback_treatment = fallback_calculator.resolve("feature2", "not ready") + assert fallback_treatment.treatment == "on" + assert fallback_treatment.label == "fallback - not ready" + assert fallback_treatment.config == "{}" diff --git a/tests/push/test_manager.py b/tests/push/test_manager.py index c85301d8..3525baf3 100644 --- a/tests/push/test_manager.py +++ b/tests/push/test_manager.py @@ -259,7 +259,6 @@ class PushManagerAsyncTests(object): async def test_connection_success(self, mocker): """Test the initial status is ok and reset() works as expected.""" api_mock = mocker.Mock() - async def authenticate(): return Token(True, 'abc', {}, 2000000, 1000000) api_mock.authenticate.side_effect = authenticate @@ -274,8 +273,8 @@ async def coro(): t = 0 try: while t < 3: - yield SSEEvent('1', EventType.MESSAGE, '', '{}') await asyncio.sleep(1) + yield SSEEvent('1', EventType.MESSAGE, '', '{}') t += 1 except Exception: pass @@ -295,7 +294,7 @@ async def stop(): manager._sse_client = sse_mock async def deferred_shutdown(): - await asyncio.sleep(1) + await asyncio.sleep(2) await manager.stop(True) manager.start() @@ -309,7 +308,10 @@ async def deferred_shutdown(): assert self.token.exp == 2000000 assert self.token.iat == 1000000 - await shutdown_task + try: + await shutdown_task + except: + pass assert not manager._running assert(telemetry_storage._streaming_events._streaming_events[0]._type == StreamingEventTypes.TOKEN_REFRESH.value) assert(telemetry_storage._streaming_events._streaming_events[1]._type == StreamingEventTypes.CONNECTION_ESTABLISHED.value) diff --git a/tests/storage/adapters/test_redis_adapter.py b/tests/storage/adapters/test_redis_adapter.py index a6bc72dc..9888c853 100644 --- a/tests/storage/adapters/test_redis_adapter.py +++ b/tests/storage/adapters/test_redis_adapter.py @@ -99,7 +99,6 @@ def test_adapter_building(self, mocker): 'redisUnixSocketPath': '/tmp/socket', 'redisEncoding': 'utf-8', 'redisEncodingErrors': 'strict', - 'redisErrors': 'abc', 'redisDecodeResponses': True, 'redisRetryOnTimeout': True, 'redisSsl': True, @@ -126,7 +125,6 @@ def test_adapter_building(self, mocker): unix_socket_path='/tmp/socket', encoding='utf-8', encoding_errors='strict', - errors='abc', decode_responses=True, retry_on_timeout=True, ssl=True, @@ -151,7 +149,6 @@ def test_adapter_building(self, mocker): 'redisUnixSocketPath': '/tmp/socket', 'redisEncoding': 'utf-8', 'redisEncodingErrors': 'strict', - 'redisErrors': 'abc', 'redisDecodeResponses': True, 'redisRetryOnTimeout': True, 'redisSsl': False, @@ -529,7 +526,6 @@ def master_for(se, 'redisUnixSocketPath': '/tmp/socket', 'redisEncoding': 'utf-8', 'redisEncodingErrors': 'strict', - 'redisErrors': 'abc', 'redisDecodeResponses': True, 'redisRetryOnTimeout': True, 'redisSsl': False,