diff --git a/.circleci/config.yml b/.circleci/config.yml index 4a921187..3a1ec744 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -7,14 +7,11 @@ workflows: test: jobs: - test-linux: - name: Python 3.7 - docker-image: cimg/python:3.7 + name: Python 3.8 + docker-image: cimg/python:3.8 test-build-docs: true skip-sse-contract-tests: true skip-contract-tests: true - - test-linux: - name: Python 3.8 - docker-image: cimg/python:3.8 - test-linux: name: Python 3.9 docker-image: cimg/python:3.9 @@ -24,6 +21,9 @@ workflows: - test-linux: name: Python 3.11 docker-image: cimg/python:3.11 + - test-linux: + name: Python 3.12 + docker-image: cimg/python:3.12 - test-windows: name: Windows Python 3 py3: true @@ -60,6 +60,7 @@ jobs: name: install requirements command: | pip install --upgrade pip + pip install setuptools pip install -r test-requirements.txt; pip install -r test-filesource-optional-requirements.txt; pip install -r consul-requirements.txt; @@ -136,15 +137,7 @@ jobs: - checkout - run: name: install Python 3 - command: | - choco install pyenv-win --force - refreshenv - pyenv install 3.11.0b3 - pyenv global 3.11.0b3 - [System.Environment]::SetEnvironmentVariable('PYENV',$env:USERPROFILE + "\.pyenv\pyenv-win\","User") - [System.Environment]::SetEnvironmentVariable('PYENV_ROOT',$env:USERPROFILE + "\.pyenv\pyenv-win\","User") - [System.Environment]::SetEnvironmentVariable('PYENV_HOME',$env:USERPROFILE + "\.pyenv\pyenv-win\","User") - [System.Environment]::SetEnvironmentVariable('path', $env:USERPROFILE + "\.pyenv\pyenv-win\bin;" + $env:USERPROFILE + "\.pyenv\pyenv-win\shims;" + [System.Environment]::GetEnvironmentVariable('path', "User"),"User") + command: choco install python --no-progress - run: python --version - run: name: set up DynamoDB @@ -182,6 +175,7 @@ jobs: name: install requirements command: | python --version + pip install setuptools pip install -r test-requirements.txt pip install -r consul-requirements.txt python setup.py install diff --git a/.readthedocs.yml b/.readthedocs.yml index 56781a23..c815431f 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -1,7 +1,7 @@ version: 2 python: - version: 3.7 + version: 3.8 install: - requirements: docs/requirements.txt - requirements: requirements.txt diff --git a/README.md b/README.md index c0dd2e00..ab1f56ae 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ ## Supported Python versions -This version of the LaunchDarkly SDK is compatible with Python 3.7 through 3.11. It is tested with the most recent patch releases of those versions. Python versions 2.7 to 3.6 are no longer supported. +This version of the LaunchDarkly SDK is compatible with Python 3.8 through 3.12. It is tested with the most recent patch releases of those versions. Python versions 2.7 to 3.6 are no longer supported. ## Getting started diff --git a/contract-tests/client_entity.py b/contract-tests/client_entity.py index 702a6a90..79caeadc 100644 --- a/contract-tests/client_entity.py +++ b/contract-tests/client_entity.py @@ -2,6 +2,7 @@ import logging import os import sys +import requests from typing import Optional from big_segment_store_fixture import BigSegmentStoreFixture @@ -10,6 +11,7 @@ # Import ldclient from parent directory sys.path.insert(1, os.path.join(sys.path[0], '..')) +from ldclient import Context, MigratorBuilder, ExecutionOrder, MigratorFn, Operation, Stage from ldclient import * @@ -39,7 +41,7 @@ def __init__(self, tag, config): opts["events_max_pending"] = events["capacity"] opts["diagnostic_opt_out"] = not events.get("enableDiagnostics", False) opts["all_attributes_private"] = events.get("allAttributesPrivate", False) - opts["private_attribute_names"] = events.get("globalPrivateAttributes", {}) + opts["private_attributes"] = events.get("globalPrivateAttributes", {}) _set_optional_time_prop(events, "flushIntervalMs", opts, "flush_interval") else: opts["send_events"] = False @@ -55,7 +57,7 @@ def __init__(self, tag, config): _set_optional_time_prop(big_params, "statusPollIntervalMs", big_config, "status_poll_interval") _set_optional_time_prop(big_params, "staleAfterMs", big_config, "stale_after") opts["big_segments"] = BigSegmentsConfig(**big_config) - + start_wait = config.get("startWaitTimeMs") or 5000 config = Config(**opts) @@ -68,12 +70,12 @@ def evaluate(self, params: dict) -> dict: response = {} if params.get("detail", False): - detail = self.client.variation_detail(params["flagKey"], params["context"], params["defaultValue"]) + detail = self.client.variation_detail(params["flagKey"], Context.from_dict(params["context"]), params["defaultValue"]) response["value"] = detail.value response["variationIndex"] = detail.variation_index response["reason"] = detail.reason else: - response["value"] = self.client.variation(params["flagKey"], params["context"], params["defaultValue"]) + response["value"] = self.client.variation(params["flagKey"], Context.from_dict(params["context"]), params["defaultValue"]) return response @@ -83,22 +85,22 @@ def evaluate_all(self, params: dict): opts["with_reasons"] = params.get("withReasons", False) opts["details_only_for_tracked_flags"] = params.get("detailsOnlyForTrackedFlags", False) - state = self.client.all_flags_state(params["context"], **opts) + state = self.client.all_flags_state(Context.from_dict(params["context"]), **opts) return {"state": state.to_json_dict()} def track(self, params: dict): - self.client.track(params["eventKey"], params["context"], params["data"], params.get("metricValue", None)) + self.client.track(params["eventKey"], Context.from_dict(params["context"]), params["data"], params.get("metricValue", None)) def identify(self, params: dict): - self.client.identify(params["context"]) + self.client.identify(Context.from_dict(params["context"])) def flush(self): self.client.flush() def secure_mode_hash(self, params: dict) -> dict: - return {"result": self.client.secure_mode_hash(params["context"])} - + return {"result": self.client.secure_mode_hash(Context.from_dict(params["context"]))} + def context_build(self, params: dict) -> dict: if params.get("multi"): b = Context.multi_builder() @@ -106,7 +108,7 @@ def context_build(self, params: dict) -> dict: b.add(self._context_build_single(c)) return self._context_response(b.build()) return self._context_response(self._context_build_single(params["single"])) - + def _context_build_single(self, params: dict) -> Context: b = Context.builder(params["key"]) if "kind" in params: @@ -122,7 +124,7 @@ def _context_build_single(self, params: dict) -> Context: for attr in params.get("private"): b.private(attr) return b.build() - + def context_convert(self, params: dict) -> dict: input = params["input"] try: @@ -130,12 +132,12 @@ def context_convert(self, params: dict) -> dict: return self._context_response(Context.from_dict(props)) except Exception as e: return {"error": str(e)} - + def _context_response(self, c: Context) -> dict: if c.valid: return {"output": c.to_json_string()} return {"error": c.error} - + def get_big_segment_store_status(self) -> dict: status = self.client.big_segment_store_status_provider.status return { @@ -143,10 +145,55 @@ def get_big_segment_store_status(self) -> dict: "stale": status.stale } + def migration_variation(self, params: dict) -> dict: + stage, _ = self.client.migration_variation(params["key"], Context.from_dict(params["context"]), Stage.from_str(params["defaultStage"])) + + return {'result': stage.value} + + def migration_operation(self, params: dict) -> dict: + builder = MigratorBuilder(self.client) + + if params["readExecutionOrder"] == "concurrent": + params["readExecutionOrder"] = "parallel" + + builder.read_execution_order(ExecutionOrder.from_str(params["readExecutionOrder"])) + builder.track_latency(params["trackLatency"]) + builder.track_errors(params["trackErrors"]) + + def callback(endpoint) -> MigratorFn: + def fn(payload) -> Result: + response = requests.post(endpoint, data=payload) + + if response.status_code == 200: + return Result.success(response.text) + + return Result.error(f"Request failed with status code {response.status_code}") + + return fn + + if params["trackConsistency"]: + builder.read(callback(params["oldEndpoint"]), callback(params["newEndpoint"]), lambda lhs, rhs: lhs == rhs) + else: + builder.read(callback(params["oldEndpoint"]), callback(params["newEndpoint"])) + + builder.write(callback(params["oldEndpoint"]), callback(params["newEndpoint"])) + migrator = builder.build() + + if isinstance(migrator, str): + return {"result": migrator} + + if params["operation"] == Operation.READ.value: + result = migrator.read(params["key"], Context.from_dict(params["context"]), Stage.from_str(params["defaultStage"]), params["payload"]) + return {"result": result.value if result.is_success() else result.error} + + result = migrator.write(params["key"], Context.from_dict(params["context"]), Stage.from_str(params["defaultStage"]), params["payload"]) + return {"result": result.authoritative.value if result.authoritative.is_success() else result.authoritative.error} + def close(self): self.client.close() self.log.info('Test ended') + def _set_optional_time_prop(params_in: dict, name_in: str, params_out: dict, name_out: str): if params_in.get(name_in) is not None: params_out[name_out] = params_in[name_in] / 1000.0 diff --git a/contract-tests/requirements.txt b/contract-tests/requirements.txt index 46a07968..8f0114ca 100644 --- a/contract-tests/requirements.txt +++ b/contract-tests/requirements.txt @@ -1,2 +1,3 @@ Flask==2.3.2 +requests>=2.31.0 urllib3>=1.22.0,<3 diff --git a/contract-tests/service.py b/contract-tests/service.py index 16a078ad..46c19aba 100644 --- a/contract-tests/service.py +++ b/contract-tests/service.py @@ -68,6 +68,8 @@ def status(): 'context-type', 'secure-mode-hash', 'tags', + 'migrations', + 'event-sampling' ] } return (json.dumps(body), 200, {'Content-type': 'application/json'}) @@ -130,6 +132,10 @@ def post_client_command(id): response = client.context_convert(sub_params) elif command == "getBigSegmentStoreStatus": response = client.get_big_segment_store_status() + elif command == "migrationVariation": + response = client.migration_variation(sub_params) + elif command == "migrationOperation": + response = client.migration_operation(sub_params) else: return ('', 400) diff --git a/contract-tests/setup.cfg b/contract-tests/setup.cfg new file mode 100644 index 00000000..c1781905 --- /dev/null +++ b/contract-tests/setup.cfg @@ -0,0 +1,2 @@ +[pycodestyle] +ignore = E501 diff --git a/docs/api-main.rst b/docs/api-main.rst index 0947fa6a..90df0ec0 100644 --- a/docs/api-main.rst +++ b/docs/api-main.rst @@ -26,3 +26,10 @@ ldclient.evaluation module .. automodule:: ldclient.evaluation :members: :special-members: __init__ + +ldclient.migrations module +-------------------------- + +.. automodule:: ldclient.migrations + :members: + :special-members: __init__ diff --git a/ldclient/__init__.py b/ldclient/__init__.py index 30615b0e..faffd68d 100644 --- a/ldclient/__init__.py +++ b/ldclient/__init__.py @@ -3,10 +3,11 @@ """ from ldclient.impl.rwlock import ReadWriteLock as _ReadWriteLock -from ldclient.impl.util import log +from ldclient.impl.util import log, Result from ldclient.version import VERSION from .client import * from .context import * +from .migrations import * __version__ = VERSION @@ -104,9 +105,11 @@ def _reset_client(): 'ContextBuilder', 'ContextMultiBuilder', 'LDClient', + 'Result', 'client', 'context', 'evaluation', 'integrations', - 'interfaces' + 'interfaces', + 'migrations' ] diff --git a/ldclient/client.py b/ldclient/client.py index f9b8a65a..d7a24941 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -2,7 +2,7 @@ This submodule contains the client class that provides most of the SDK functionality. """ -from typing import Optional, Any, Dict, Mapping, Union +from typing import Optional, Any, Dict, Mapping, Union, Tuple from .impl import AnyNum @@ -24,11 +24,13 @@ from ldclient.impl.events.diagnostics import create_diagnostic_id, _DiagnosticAccumulator from ldclient.impl.events.event_processor import DefaultEventProcessor from ldclient.impl.events.types import EventFactory +from ldclient.impl.model.feature_flag import FeatureFlag from ldclient.impl.stubs import NullEventProcessor, NullUpdateProcessor from ldclient.impl.util import check_uwsgi, log from ldclient.interfaces import BigSegmentStoreStatusProvider, FeatureRequester, FeatureStore from ldclient.versioned_data_kind import FEATURES, SEGMENTS, VersionedDataKind from ldclient.feature_store import FeatureStore +from ldclient.migrations import Stage, OpTracker from threading import Lock @@ -190,7 +192,27 @@ def __exit__(self, type, value, traceback): def _send_event(self, event): self._event_processor.send_event(event) - def track(self, event_name: str, context: Union[dict, Context], data: Optional[Any]=None, + def track_migration_op(self, tracker: OpTracker): + """ + Tracks the results of a migrations operation. This event includes + measurements which can be used to enhance the observability of a + migration within the LaunchDarkly UI. + + Customers making use of the :class:`ldclient.MigrationBuilder` should + not need to call this method manually. + + Customers not using the builder should provide this method with the + tracker returned from calling :func:`migration_variation`. + """ + event = tracker.build() + + if isinstance(event, str): + log.error("error generting migration op event %s; no event will be emitted", event) + return + + self._send_event(event) + + def track(self, event_name: str, context: Context, data: Optional[Any]=None, metric_value: Optional[AnyNum]=None): """Tracks that an application-defined event occurred. @@ -201,28 +223,19 @@ def track(self, event_name: str, context: Union[dict, Context], data: Optional[A Note that event delivery is asynchronous, so the event may not actually be sent until later; see :func:`flush()`. - If you pass a dictionary of user attributes instead of a :class:`ldclient.Context`, - the SDK will convert the user to a Context. There is some overhead to this conversion, - so it is more efficient to pass a Context. - - DEPRECATED: This method will no longer accept a dictionary for the context parameter starting in 9.0.0 - :param event_name: the name of the event - :param context: the evaluation context or user associated with the event + :param context: the evaluation context associated with the event :param data: optional additional data associated with the event :param metric_value: a numeric value used by the LaunchDarkly experimentation feature in numeric custom metrics; can be omitted if this event is used by only non-numeric metrics """ - if not isinstance(context, Context): - warnings.warn("track will require a Context instance in 9.0.0", DeprecationWarning) - context = Context.from_dict(context) if not context.valid: log.warning("Invalid context for track (%s)" % context.error) else: self._send_event(self._event_factory_default.new_custom_event(event_name, context, data, metric_value)) - def identify(self, context: Union[Context, dict]): + def identify(self, context: Context): """Reports details about an evaluation context. This method simply creates an analytics event containing the context properties, to @@ -233,23 +246,11 @@ def identify(self, context: Union[Context, dict]): need to use :func:`identify()` if you want to identify the context without evaluating a flag. - If you pass a dictionary of user attributes instead of a :class:`ldclient.Context`, - the SDK will convert the user to a Context. There is some overhead to this conversion, - so it is more efficient to pass a Context. - - DEPRECATED: This method will no longer accept a dictionary for the context parameter starting in 9.0.0 - :param context: the context to register """ - if not isinstance(context, Context): - warnings.warn("identify will require a Context instance in 9.0.0", DeprecationWarning) - context = Context.from_dict(context) + if not context.valid: log.warning("Invalid context for identify (%s)" % context.error) - elif context.key == '' and not context.multiple: - # This could be a valid context for evaluations (if it was using the old user schema) - # but an identify event with an empty key is no good. - log.warning("Empty user key for identify") else: self._send_event(self._event_factory_default.new_identify_event(context)) @@ -280,50 +281,66 @@ def flush(self): return return self._event_processor.flush() - def variation(self, key: str, context: Union[Context, dict], default: Any) -> Any: + def variation(self, key: str, context: Context, default: Any) -> Any: """Calculates the value of a feature flag for a given context. - If you pass a dictionary of user attributes instead of a :class:`ldclient.Context`, - the SDK will convert the user to a Context. There is some overhead to this conversion, - so it is more efficient to pass a Context. - - DEPRECATED: This method will no longer accept a dictionary for the context parameter starting in 9.0.0 - :param key: the unique key for the feature flag - :param context: the evaluation context or user + :param context: the evaluation context :param default: the default value of the flag, to be used if the value is not available from LaunchDarkly :return: the variation for the given context, or the ``default`` value if the flag cannot be evaluated """ - return self._evaluate_internal(key, context, default, self._event_factory_default).value + detail, _ = self._evaluate_internal(key, context, default, self._event_factory_default) + return detail.value - def variation_detail(self, key: str, context: Union[Context, dict], default: Any) -> EvaluationDetail: + def variation_detail(self, key: str, context: Context, default: Any) -> EvaluationDetail: """Calculates the value of a feature flag for a given context, and returns an object that describes the way the value was determined. The ``reason`` property in the result will also be included in analytics events, if you are capturing detailed event data for this flag. - If you pass a dictionary of user attributes instead of a :class:`ldclient.Context`, - the SDK will convert the user to a Context. There is some overhead to this conversion, - so it is more efficient to pass a Context. - - DEPRECATED: This method will no longer accept a dictionary for the context parameter starting in 9.0.0 - :param key: the unique key for the feature flag - :param context: the evaluation context or user + :param context: the evaluation context :param default: the default value of the flag, to be used if the value is not available from LaunchDarkly :return: an :class:`ldclient.evaluation.EvaluationDetail` object that includes the feature flag value and evaluation reason """ - return self._evaluate_internal(key, context, default, self._event_factory_with_reasons) + detail, _ = self._evaluate_internal(key, context, default, self._event_factory_with_reasons) + return detail - def _evaluate_internal(self, key: str, context: Union[Context, dict], default: Any, event_factory): + def migration_variation(self, key: str, context: Context, default_stage: Stage) -> Tuple[Stage, OpTracker]: + """ + This method returns the migration stage of the migration feature flag + for the given evaluation context. + + This method returns the default stage if there is an error or the flag + does not exist. If the default stage is not a valid stage, then a + default stage of :class:`ldclient.migrations.Stage.OFF` will be used + instead. + """ + if not isinstance(default_stage, Stage) or default_stage not in Stage: + log.error(f"default stage {default_stage} is not a valid stage; using 'off' instead") + default_stage = Stage.OFF + + detail, flag = self._evaluate_internal(key, context, default_stage.value, self._event_factory_default) + + if isinstance(detail.value, str): + stage = Stage.from_str(detail.value) + if stage is not None: + tracker = OpTracker(key, flag, context, detail, default_stage) + return stage, tracker + + detail = EvaluationDetail(default_stage.value, None, error_reason('WRONG_TYPE')) + tracker = OpTracker(key, flag, context, detail, default_stage) + return default_stage, tracker + + def _evaluate_internal(self, key: str, context: Context, default: Any, event_factory) -> Tuple[EvaluationDetail, Optional[FeatureFlag]]: default = self._config.get_default(key, default) if self._config.offline: - return EvaluationDetail(default, None, error_reason('CLIENT_NOT_READY')) + return EvaluationDetail(default, None, error_reason('CLIENT_NOT_READY')), None if not self.is_initialized(): if self._store.initialized: @@ -333,14 +350,11 @@ def _evaluate_internal(self, key: str, context: Union[Context, dict], default: A + str(default) + " for feature key: " + key) reason = error_reason('CLIENT_NOT_READY') self._send_event(event_factory.new_unknown_flag_event(key, context, default, reason)) - return EvaluationDetail(default, None, reason) + return EvaluationDetail(default, None, reason), None - if not isinstance(context, Context): - warnings.warn("variation methods will require a Context instance in 9.0.0", DeprecationWarning) - context = Context.from_dict(context) if not context.valid: log.warning("Context was invalid for flag evaluation (%s); returning default value" % context.error) - return EvaluationDetail(default, None, error_reason('USER_NOT_SPECIFIED')) + return EvaluationDetail(default, None, error_reason('USER_NOT_SPECIFIED')), None try: flag = _get_store_item(self._store, FEATURES, key) @@ -349,11 +363,11 @@ def _evaluate_internal(self, key: str, context: Union[Context, dict], default: A log.debug(traceback.format_exc()) reason = error_reason('EXCEPTION') self._send_event(event_factory.new_unknown_flag_event(key, context, default, reason)) - return EvaluationDetail(default, None, reason) + return EvaluationDetail(default, None, reason), None if not flag: reason = error_reason('FLAG_NOT_FOUND') self._send_event(event_factory.new_unknown_flag_event(key, context, default, reason)) - return EvaluationDetail(default, None, reason) + return EvaluationDetail(default, None, reason), None else: try: result = self._evaluator.evaluate(flag, context, event_factory) @@ -363,25 +377,23 @@ def _evaluate_internal(self, key: str, context: Union[Context, dict], default: A if detail.is_default_value(): detail = EvaluationDetail(default, None, detail.reason) self._send_event(event_factory.new_eval_event(flag, context, detail, default)) - return detail + return detail, flag except Exception as e: log.error("Unexpected error while evaluating feature flag \"%s\": %s" % (key, repr(e))) log.debug(traceback.format_exc()) reason = error_reason('EXCEPTION') self._send_event(event_factory.new_default_event(flag, context, default, reason)) - return EvaluationDetail(default, None, reason) + return EvaluationDetail(default, None, reason), flag - def all_flags_state(self, context: Union[Context, dict], **kwargs) -> FeatureFlagsState: - """Returns an object that encapsulates the state of all feature flags for a given user, + def all_flags_state(self, context: Context, **kwargs) -> FeatureFlagsState: + """Returns an object that encapsulates the state of all feature flags for a given context, including the flag values and also metadata that can be used on the front end. See the JavaScript SDK Reference Guide on `Bootstrapping `_. This method does not send analytics events back to LaunchDarkly. - DEPRECATED: This method will no longer accept a dictionary for the context parameter starting in 9.0.0 - - :param user: the end user requesting the feature flags + :param context: the end context requesting the feature flags :param kwargs: optional parameters affecting how the state is computed - see below :Keyword Arguments: @@ -396,7 +408,7 @@ def all_flags_state(self, context: Union[Context, dict], **kwargs) -> FeatureFla turned on :return: a FeatureFlagsState object (will never be None; its ``valid`` property will be False - if the client is offline, has not been initialized, or the user is None or has no key) + if the client is offline, has not been initialized, or the context is invalid) """ if self._config.offline: log.warning("all_flags_state() called, but client is in offline mode. Returning empty state") @@ -409,9 +421,6 @@ def all_flags_state(self, context: Union[Context, dict], **kwargs) -> FeatureFla log.warning("all_flags_state() called before client has finished initializing! Feature store unavailable - returning empty state") return FeatureFlagsState(False) - if not isinstance(context, Context): - warnings.warn("all_flags_state will require a Context instance in 9.0.0", DeprecationWarning) - context = Context.from_dict(context) if not context.valid: log.warning("Context was invalid for all_flags_state (%s); returning default value" % context.error) return FeatureFlagsState(False) @@ -455,20 +464,15 @@ def all_flags_state(self, context: Union[Context, dict], **kwargs) -> FeatureFla return state - def secure_mode_hash(self, context: Union[Context, dict]) -> str: + def secure_mode_hash(self, context: Context) -> str: """Creates a hash string that can be used by the JavaScript SDK to identify a context. For more information, see the documentation on `Secure mode `_. - DEPRECATED: This method will no longer accept a dictionary for the context parameter starting in 9.0.0 - - :param context: the evaluation context or user + :param context: the evaluation context :return: the hash string """ - if not isinstance(context, Context): - warnings.warn("secure_mode_hash will require a Context instance in 9.0.0", DeprecationWarning) - context = Context.from_dict(context) if not context.valid: log.warning("Context was invalid for secure_mode_hash (%s); returning empty hash" % context.error) return "" diff --git a/ldclient/config.py b/ldclient/config.py index 4ac66f7d..a84a8419 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -36,8 +36,6 @@ def __init__(self, store: Optional[BigSegmentStore] = None, context_cache_size: int=1000, context_cache_time: float=5, - user_cache_size: Optional[int]=None, - user_cache_time: Optional[float]=None, status_poll_interval: float=5, stale_after: float=120): """ @@ -47,16 +45,14 @@ def __init__(self, by the SDK at any given time :param context_cache_time: the maximum length of time (in seconds) that the Big Segment state for a context will be cached by the SDK - :param user_cache_size: deprecated alias for `context_cache_size` - :param user_cache_time: deprecated alias for `context_cache_time` :param status_poll_interval: the interval (in seconds) at which the SDK will poll the Big Segment store to make sure it is available and to determine how long ago it was updated :param stale_after: the maximum length of time between updates of the Big Segments data before the data is considered out of date """ self.__store = store - self.__context_cache_size = context_cache_size if user_cache_size is None else user_cache_size - self.__context_cache_time = context_cache_time if user_cache_time is None else user_cache_time + self.__context_cache_size = context_cache_size + self.__context_cache_time = context_cache_time self.__status_poll_interval = status_poll_interval self.__stale_after = stale_after pass @@ -73,16 +69,6 @@ def context_cache_size(self) -> int: def context_cache_time(self) -> float: return self.__context_cache_time - @property - def user_cache_size(self) -> int: - """Deprecated alias for :attr:`context_cache_size`.""" - return self.context_cache_size - - @property - def user_cache_time(self) -> float: - """Deprecated alias for :attr:`context_cache_time`.""" - return self.context_cache_time - @property def status_poll_interval(self) -> float: return self.__status_poll_interval @@ -176,13 +162,10 @@ def __init__(self, feature_requester_class=None, event_processor_class: Callable[['Config'], EventProcessor]=None, private_attributes: Set[str]=set(), - private_attribute_names: Set[str]=set(), all_attributes_private: bool=False, offline: bool=False, context_keys_capacity: int=1000, context_keys_flush_interval: float=300, - user_keys_capacity: Optional[int] = None, - user_keys_flush_interval: Optional[float] = None, diagnostic_opt_out: bool=False, diagnostic_recording_interval: int=900, wrapper_name: Optional[str]=None, @@ -226,8 +209,6 @@ def __init__(self, with this configuration active will have these attributes removed. Each item can be either the name of an attribute ("email"), or a slash-delimited path ("/address/street") to mark a property within a JSON object value as private. - :param array private_attribute_names: Deprecated alias for ``private_attributes`` ("names" is no longer - strictly accurate because these could also be attribute reference paths). :param all_attributes_private: If true, all user attributes (other than the key) will be private, not just the attributes specified in ``private_attributes``. :param feature_store: A FeatureStore implementation @@ -235,8 +216,6 @@ def __init__(self, one time, so that duplicate context details will not be sent in analytics events. :param context_keys_flush_interval: The interval in seconds at which the event processor will reset its set of known context keys. - :param user_keys_capacity: Deprecated alias for ``context_keys_capacity``. - :param user_keys_flush_interval: Deprecated alias for ``context_keys_flush_interval``. :param feature_requester_class: A factory for a FeatureRequester implementation taking the sdk key and config :param event_processor_class: A factory for an EventProcessor implementation taking the config :param update_processor_class: A factory for an UpdateProcessor implementation taking the sdk key, @@ -278,11 +257,11 @@ def __init__(self, if offline is True: send_events = False self.__send_events = True if send_events is None else send_events - self.__private_attributes = private_attributes or private_attribute_names + self.__private_attributes = private_attributes self.__all_attributes_private = all_attributes_private self.__offline = offline - self.__context_keys_capacity = context_keys_capacity if user_keys_capacity is None else user_keys_capacity - self.__context_keys_flush_interval = context_keys_flush_interval if user_keys_flush_interval is None else user_keys_flush_interval + self.__context_keys_capacity = context_keys_capacity + self.__context_keys_flush_interval = context_keys_flush_interval self.__diagnostic_opt_out = diagnostic_opt_out self.__diagnostic_recording_interval = max(diagnostic_recording_interval, 60) self.__wrapper_name = wrapper_name @@ -408,10 +387,6 @@ def flush_interval(self) -> float: def private_attributes(self) -> List[str]: return list(self.__private_attributes) - @property - def private_attribute_names(self) -> List[str]: - return self.private_attributes - @property def all_attributes_private(self) -> bool: return self.__all_attributes_private @@ -428,16 +403,6 @@ def context_keys_capacity(self) -> int: def context_keys_flush_interval(self) -> float: return self.__context_keys_flush_interval - @property - def user_keys_capacity(self) -> int: - """Deprecated name for :attr:`context_keys_capacity`.""" - return self.context_keys_capacity - - @property - def user_keys_flush_interval(self) -> float: - """Deprecated name for :attr:`context_keys_flush_interval`.""" - return self.context_keys_flush_interval - @property def diagnostic_opt_out(self) -> bool: return self.__diagnostic_opt_out diff --git a/ldclient/context.py b/ldclient/context.py index 70817a1d..5add964d 100644 --- a/ldclient/context.py +++ b/ldclient/context.py @@ -17,7 +17,7 @@ def _escape_key_for_fully_qualified_key(key: str) -> str: # When building a fully-qualified key, ':' and '%' are percent-escaped; we do not use a full # URL-encoding function because implementations of this are inconsistent across platforms. return key.replace('%', '%25').replace(':', '%3A') - + def _validate_kind(kind: str) -> Optional[str]: if kind == '': return 'context kind must not be empty' @@ -183,23 +183,14 @@ def create_multi(cls, *contexts: Context) -> Context: def from_dict(cls, props: dict) -> Context: """ Creates a Context from properties in a dictionary, corresponding to the JSON - representation of a context or a user. - - If the dictionary has a "kind" property, then it is interpreted as a context using - the LaunchDarkly JSON schema for contexts. If it does not have a "kind" property, it - is interpreted as a context with "user" kind using the somewhat different LaunchDarkly - JSON schema for users in older LaunchDarkly SDKs. - - DEPRECATED: The legacy user format is deprecated and will be removed in 9.0.0 + representation of a context. - :param props: the context/user properties + :param props: the context properties :return: a context """ if props is None: return Context.__create_with_error('Cannot use None as a context') - if 'kind' not in props: - return Context.__from_dict_old_user(props) - kind = props['kind'] + kind = props.get('kind') if not isinstance(kind, str): return Context.__create_with_schema_type_error('kind') if kind == 'multi': @@ -217,7 +208,7 @@ def from_dict(cls, props: dict) -> Context: def builder(cls, key: str) -> ContextBuilder: """ Creates a builder for building a Context. - + You may use :class:`ldclient.ContextBuilder` methods to set additional attributes and/or change the context kind before calling :func:`ldclient.ContextBuilder.build()`. If you do not change any values, the defaults for the Context are that its ``kind`` is :const:`DEFAULT_KIND`, @@ -235,12 +226,12 @@ def builder(cls, key: str) -> ContextBuilder: """ return ContextBuilder(key) - + @classmethod def builder_from_context(cls, context: Context) -> ContextBuilder: """ Creates a builder whose properties are the same as an existing single-kind Context. - + You may then change the builder's state in any way and call :func:`ldclient.ContextBuilder.build()` to create a new independent Context. @@ -248,7 +239,7 @@ def builder_from_context(cls, context: Context) -> ContextBuilder: :return: a new builder """ return ContextBuilder(context.key, context) - + @classmethod def multi_builder(cls) -> ContextMultiBuilder: """ @@ -266,12 +257,12 @@ def multi_builder(cls) -> ContextMultiBuilder: :see: :func:`create_multi()` """ return ContextMultiBuilder() - + @property def valid(self) -> bool: """ True for a valid Context, or False for an invalid one. - + A valid context is one that can be used in SDK operations. An invalid context is one that is missing necessary attributes or has invalid attributes, indicating an incorrect usage of the SDK API. The only ways for a context to be invalid are: @@ -295,22 +286,22 @@ def valid(self) -> bool: :attr:`valid` or :attr:`error`. """ return self.__error is None - + @property def error(self) -> Optional[str]: """ Returns None for a valid Context, or an error message for an invalid one. - + If this is None, then :attr:`valid` is True. If it is not None, then :attr:`valid` is False. """ return self.__error - + @property def multiple(self) -> bool: """ True if this is a multi-context. - + If this value is True, then :attr:`kind` is guaranteed to be :const:`MULTI_KIND`, and you can inspect the individual context for each kind with :func:`get_individual_context()`. @@ -320,12 +311,12 @@ def multiple(self) -> bool: :see: :func:`create_multi()` """ return self.__multi is not None - + @property def kind(self) -> str: """ Returns the context's ``kind`` attribute. - + Every valid context has a non-empty kind. For multi-contexts, this value is :const:`MULTI_KIND` and the kinds within the context can be inspected with :func:`get_individual_context()`. @@ -339,7 +330,7 @@ def kind(self) -> str: def key(self) -> str: """ Returns the context's ``key`` attribute. - + For a single context, this value is set by :func:`create`, or :func:`ldclient.ContextBuilder.key()`. For a multi-context, there is no single value and :attr:`key` returns an empty string. Use @@ -350,12 +341,12 @@ def key(self) -> str: :see: :func:`create()` """ return self.__key - + @property def name(self) -> Optional[str]: """ Returns the context's ``name`` attribute. - + For a single context, this value is set by :func:`ldclient.ContextBuilder.name()`. It is None if no value was set. @@ -366,7 +357,7 @@ def name(self) -> Optional[str]: :see: :func:`ldclient.ContextBuilder.name()` """ return self.__name - + @property def anonymous(self) -> bool: """ @@ -398,18 +389,18 @@ def get(self, attribute: str) -> Any: by :func:`ldclient.ContextBuilder.set()`. It can also be one of the built-in ones like "kind", "key", or "name"; in such cases, it is equivalent to :attr:`kind`, :attr:`key`, or :attr:`name`. - + For a multi-context, the only supported attribute name is "kind". Use :func:`get_individual_context()` to get the context for a particular kind and then get its attributes. - + If the value is found, the return value is the attribute value. If there is no such attribute, the return value is None. An attribute that actually exists cannot have a value of None. Context has a ``__getitem__`` magic method equivalent to ``get``, so ``context['attr']`` behaves the same as ``context.get('attr')``. - + :param attribute: the desired attribute name :return: the attribute value, or None if there is no such attribute @@ -426,7 +417,7 @@ def get(self, attribute: str) -> Any: if self.__attributes is None: return None return self.__attributes.get(attribute) - + @property def individual_context_count(self) -> int: """ @@ -444,7 +435,7 @@ def individual_context_count(self) -> int: if self.__multi is None: return 1 return len(self.__multi) - + def get_individual_context(self, kind: Union[int, str]) -> Optional[Context]: """ Returns the single-kind Context corresponding to one of the kinds in this context. @@ -461,7 +452,7 @@ def get_individual_context(self, kind: Union[int, str]) -> Optional[Context]: of :attr:`individual_context_count`), and the return value on success is one of the individual Contexts within. Or, if ``kind`` is a string, it must match the context kind of one of the individual contexts. - + If there is no context corresponding to ``kind``, the method returns None. :param kind: the index or string value of a context kind @@ -482,8 +473,8 @@ def get_individual_context(self, kind: Union[int, str]) -> Optional[Context]: return self if kind == 0 else None if kind < 0 or kind >= len(self.__multi): return None - return self.__multi[kind] - + return self.__multi[kind] + @property def custom_attributes(self) -> Iterable[str]: """ @@ -492,19 +483,19 @@ def custom_attributes(self) -> Iterable[str]: For a single-kind context, this includes all the names that were passed to :func:`ldclient.ContextBuilder.set()` as long as the values were not None (since a value of None in LaunchDarkly is equivalent to the attribute not being set). - + For a multi-context, there are no such names. :return: an iterable """ return () if self.__attributes is None else self.__attributes - + @property def _attributes(self) -> Optional[dict[str, Any]]: # for internal use by ContextBuilder - we don't want to expose the original dict # since that would break immutability return self.__attributes - + @property def private_attributes(self) -> Iterable[str]: """ @@ -537,7 +528,7 @@ def to_dict(self) -> dict[str, Any]: """ Returns a dictionary of properties corresponding to the JSON representation of the context (as an associative array), in the standard format used by LaunchDarkly SDKs. - + Use this method if you are passing context data to the front end for use with the LaunchDarkly JavaScript SDK. @@ -551,18 +542,18 @@ def to_dict(self) -> dict[str, Any]: ret[c.kind] = c.__to_dict_single(False) return ret return self.__to_dict_single(True) - + def to_json_string(self) -> str: """ Returns the JSON representation of the context as a string, in the standard format used by LaunchDarkly SDKs. - + This is equivalent to calling :func:`to_dict()` and then ``json.dumps()``. :return: the JSON representation as a string """ return json.dumps(self.to_dict(), separators=(',', ':')) - + def __to_dict_single(self, with_kind: bool) -> dict[str, Any]: ret = {"key": self.__key} # type: Dict[str, Any] if with_kind: @@ -601,44 +592,6 @@ def __from_dict_single(self, props: dict, kind: Optional[str]) -> Context: if not b.try_set(k, v): return Context.__create_with_schema_type_error(k) return b.build() - - @classmethod - def __from_dict_old_user(self, props: dict) -> Context: - warnings.warn("legacy user format will be removed in 9.0.0", DeprecationWarning) - b = ContextBuilder('').kind('user') - has_key = False - for k, v in props.items(): - if k == 'custom': - if v is None: - continue - if not isinstance(v, dict): - return Context.__create_with_schema_type_error(k) - for k1, v1 in v.items(): - b.set(k1, v1) - elif k == 'privateAttributeNames': - if v is None: - continue - if not isinstance(v, list): - return Context.__create_with_schema_type_error(k) - for pa in v: - if not isinstance(pa, str): - return Context.__create_with_schema_type_error(k) - b.private(pa) - elif k in _USER_STRING_ATTRS: - if v is None: - continue - if not isinstance(v, str): - return Context.__create_with_schema_type_error(k) - b.set(k, v) - else: - if k == 'anonymous' and v is None: - v = False # anonymous: null was allowed in the old user model - if not b.try_set(k, v): - return Context.__create_with_schema_type_error(k) - if k == 'key': - has_key = True - b._allow_empty_key(has_key) - return b.build() def __getitem__(self, attribute) -> Any: return self.get(attribute) if isinstance(attribute, str) else None @@ -658,7 +611,7 @@ def __repr__(self) -> str: if not self.valid: return "[invalid context: %s]" % self.__error return self.to_json_string() - + def __eq__(self, other) -> bool: """ Compares contexts for deep equality of their attributes. @@ -744,13 +697,13 @@ def __init__(self, key: str, copy_from: Optional[Context] = None): self.__copy_on_write_attrs = self.__attributes is not None self.__copy_on_write_private = self.__private is not None self.__allow_empty_key = False - + def build(self) -> Context: """ Creates a Context from the current builder properties. The Context is immutable and will not be affected by any subsequent actions on the builder. - + It is possible to specify invalid attributes for a ContextBuilder, such as an empty key. Instead of throwing an exception, the ContextBuilder always returns an Context and you can check :attr:`ldclient.Context.valid` or :attr:`ldclient.Context.error` to see if it has @@ -764,11 +717,11 @@ def build(self) -> Context: self.__copy_on_write_private = (self.__private is not None) return Context(self.__kind, self.__key, self.__name, self.__anonymous, self.__attributes, self.__private, None, self.__allow_empty_key) - + def key(self, key: str) -> ContextBuilder: """ Sets the context's key attribute. - + Every context has a key, which is always a string. It cannot be an empty string, but there are no other restrictions on its value. @@ -789,17 +742,17 @@ def kind(self, kind: str) -> ContextBuilder: The meaning of the context kind is completely up to the application. Validation rules are as follows: - + * It may only contain letters, numbers, and the characters ``.``, ``_``, and ``-``. * It cannot equal the literal string "kind". * For a single context, it cannot equal "multi". - + :param kind: the context kind :return: the builder """ self.__kind = kind return self - + def name(self, name: Optional[str]) -> ContextBuilder: """ Sets the context's name attribute. @@ -809,13 +762,13 @@ def name(self, name: Optional[str]) -> ContextBuilder: * Unlike most other attributes, it is always a string if it is specified. * The LaunchDarkly dashboard treats this attribute as the preferred display name for contexts. - + :param name: the context name (None to unset the attribute) :return: the builder """ self.__name = name return self - + def anonymous(self, anonymous: bool) -> ContextBuilder: """ Sets whether the context is only intended for flag evaluations and should not be @@ -841,7 +794,7 @@ def anonymous(self, anonymous: bool) -> ContextBuilder: """ self.__anonymous = anonymous return self - + def set(self, attribute: str, value: Any) -> ContextBuilder: """ Sets the value of any attribute for the context. @@ -850,7 +803,7 @@ def set(self, attribute: str, value: Any) -> ContextBuilder: as :func:`private()`. If ``attributeName`` is ``"private"``, you will be setting an attribute with that name which you can use in evaluations or to record data for your own purposes, but it will be unrelated to :func:`private()`. - + The allowable types for context attributes are equivalent to JSON types: boolean, number, string, array (list), or object (dictionary). For all attribute names that do not have special meaning to LaunchDarkly, you may use any of those types. Values of different JSON @@ -863,7 +816,7 @@ def set(self, attribute: str, value: Any) -> ContextBuilder: * ``"kind"``, ``"key"``: Must be a string. See :func:`kind()` and :func:`key()`. * ``"name"``: Must be a string or None. See :func:`name()`. * ``"anonymous"``: Must be a boolean. See :func:`anonymous()`. - + The attribute name ``"_meta"`` is not allowed, because it has special meaning in the JSON schema for contexts; any attempt to set an attribute with this name has no effect. @@ -882,7 +835,7 @@ def set(self, attribute: str, value: Any) -> ContextBuilder: """ self.try_set(attribute, value) return self - + def try_set(self, attribute: str, value: Any) -> bool: """ Same as :func:`set()`, but returns a boolean indicating whether the attribute was @@ -891,7 +844,7 @@ def try_set(self, attribute: str, value: Any) -> bool: :param attribute: the attribute name to set :param value: the value to set :return: True if successful; False if the name was invalid or the value was not an - allowed type for that attribute + allowed type for that attribute """ if attribute == '' or attribute == '_meta': return False @@ -925,7 +878,7 @@ def try_set(self, attribute: str, value: Any) -> bool: else: self.__attributes[attribute] = value return True - + def private(self, *attributes: str) -> ContextBuilder: """ Designates any number of Context attributes, or properties within them, as private: that is, @@ -959,7 +912,7 @@ class ContextMultiBuilder: Use this builder if you need to construct a :class:`ldclient.Context` that contains multiple contexts, each for a different context kind. To define a regular context for a single kind, use :func:`ldclient.Context.create()` or :func:`ldclient.Context.builder()`. - + Obtain an instance of ContextMultiBuilder by calling :func:`ldclient.Context.multi_builder()`; then, call :func:`add()` to specify the individual context for each kind. The method returns a reference to the same builder, so calls can be chained: @@ -973,13 +926,13 @@ class ContextMultiBuilder: def __init__(self): self.__contexts = [] # type: list[Context] self.__copy_on_write = False - + def build(self) -> Context: """ Creates a Context from the current builder properties. The Context is immutable and will not be affected by any subsequent actions on the builder. - + It is possible for a ContextMultiBuilder to represent an invalid state. Instead of throwing an exception, the ContextMultiBuilder always returns a Context, and you can check :attr:`ldclient.Context.valid` or :attr:`ldclient.Context.error` to see if it has an @@ -997,7 +950,7 @@ def build(self) -> Context: self.__copy_on_write = True # Context constructor will handle validation return Context(None, '', None, False, None, None, self.__contexts) - + def add(self, context: Context) -> ContextMultiBuilder: """ Adds an individual Context for a specific kind to the builer. @@ -1018,7 +971,7 @@ def add(self, context: Context) -> ContextMultiBuilder: c1plus2 = Context.multi_builder.add(c1).add(c2).build() multi2 = Context.multi_builder().add(c1plus2).add(c3).build() - + :param context: the context to add :return: the builder """ diff --git a/ldclient/impl/events/event_processor.py b/ldclient/impl/events/event_processor.py index b246bcd6..ac8bf3cc 100644 --- a/ldclient/impl/events/event_processor.py +++ b/ldclient/impl/events/event_processor.py @@ -7,23 +7,28 @@ from email.utils import parsedate import json from threading import Event, Lock, Thread -from typing import Any, List, Optional +from typing import Any, List, Optional, Dict import time import uuid import queue import urllib3 from ldclient.config import Config +from datetime import timedelta +from random import Random from ldclient.context import Context from ldclient.impl.events.diagnostics import create_diagnostic_init from ldclient.impl.events.event_context_formatter import EventContextFormatter from ldclient.impl.events.event_summarizer import EventSummarizer, EventSummary from ldclient.impl.events.types import EventInput, EventInputCustom, EventInputEvaluation, EventInputIdentify +from ldclient.migrations.tracker import MigrationOpEvent +from ldclient.impl.util import timedelta_millis from ldclient.impl.fixed_thread_pool import FixedThreadPool from ldclient.impl.http import _http_factory from ldclient.impl.lru_cache import SimpleLRUCache from ldclient.impl.repeating_task import RepeatingTask from ldclient.impl.util import check_if_error_is_recoverable_and_log, current_time_millis, is_http_error_recoverable, log, _headers +from ldclient.impl.sampler import Sampler from ldclient.interfaces import EventProcessor __MAX_FLUSH_THREADS__ = 5 @@ -90,6 +95,72 @@ def make_output_event(self, e: Any): if e.metric_value is not None: out['metricValue'] = e.metric_value return out + elif isinstance(e, MigrationOpEvent): + out = { + 'kind': 'migration_op', + 'creationDate': e.timestamp, + 'operation': e.operation.value, + 'contextKeys': self._context_keys(e.context), + 'evaluation': { + 'key': e.key, + 'value': e.detail.value + } + } + + if e.flag is not None: + out["evaluation"]["version"] = e.flag.version + if e.default_stage: + out["evaluation"]["default"] = e.default_stage.value + if e.detail.variation_index is not None: + out["evaluation"]["variation"] = e.detail.variation_index + if e.detail.reason is not None: + out["evaluation"]["reason"] = e.detail.reason + + if e.sampling_ratio is not None and e.sampling_ratio != 1: + out["samplingRatio"] = e.sampling_ratio + + measurements: List[Dict] = [] + + if len(e.invoked) > 0: + measurements.append( + { + "key": "invoked", + "values": {origin.value: True for origin in e.invoked} + } + ) + + if e.consistent is not None: + measurement = { + "key": "consistent", + "value": e.consistent + } + + if e.consistent_ratio is not None and e.consistent_ratio != 1: + measurement["samplingRatio"] = e.consistent_ratio + + measurements.append(measurement) + + if len(e.latencies) > 0: + measurements.append( + { + "key": "latency_ms", + "values": {o.value: timedelta_millis(d) for o, d in e.latencies.items()} + } + ) + + if len(e.errors) > 0: + measurements.append( + { + "key": "error", + "values": {origin.value: True for origin in e.errors} + } + ) + + if len(measurements): + out["measurements"] = measurements + + return out + return None """ @@ -265,6 +336,7 @@ def __init__(self, inbox, config, http_client, diagnostic_accumulator=None): self._last_known_past_time = 0 self._deduplicated_contexts = 0 self._diagnostic_accumulator = None if config.diagnostic_opt_out else diagnostic_accumulator + self._sampler = Sampler(Random()) self._flush_workers = FixedThreadPool(__MAX_FLUSH_THREADS__, "ldclient.flush") self._diagnostic_flush_workers = None if self._diagnostic_accumulator is None else FixedThreadPool(1, "ldclient.diag_flush") @@ -314,10 +386,12 @@ def _process_event(self, event: EventInput): can_add_index = True full_event = None # type: Any debug_event = None # type: Optional[DebugEvent] + sampling_ratio = 1 if event.sampling_ratio is None else event.sampling_ratio if isinstance(event, EventInputEvaluation): context = event.context - self._outbox.add_to_summary(event) + if not event.exclude_from_summaries: + self._outbox.add_to_summary(event) if event.track_events: full_event = event if self._should_debug_event(event): @@ -329,6 +403,8 @@ def _process_event(self, event: EventInput): elif isinstance(event, EventInputCustom): context = event.context full_event = event + elif isinstance(event, MigrationOpEvent): + full_event = event # For each context we haven't seen before, we add an index event - unless this is already # an identify event. @@ -340,10 +416,10 @@ def _process_event(self, event: EventInput): else: self._outbox.add_event(IndexEvent(event.timestamp, context)) - if full_event: + if full_event and self._sampler.sample(sampling_ratio): self._outbox.add_event(full_event) - - if debug_event: + + if debug_event and self._sampler.sample(sampling_ratio): self._outbox.add_event(debug_event) def _should_debug_event(self, event: EventInputEvaluation): diff --git a/ldclient/impl/events/types.py b/ldclient/impl/events/types.py index 0526a7db..e120e81e 100644 --- a/ldclient/impl/events/types.py +++ b/ldclient/impl/events/types.py @@ -13,12 +13,14 @@ # these at high volume, we want them to be efficient so we use attributes and slots rather # than dictionaries. + class EventInput: - __slots__ = ['timestamp', 'context'] + __slots__ = ['timestamp', 'context', 'sampling_ratio'] - def __init__(self, timestamp: int, context: Context): + def __init__(self, timestamp: int, context: Context, sampling_ratio: Optional[int] = None): self.timestamp = timestamp self.context = context + self.sampling_ratio = sampling_ratio def __repr__(self) -> str: # used only in test debugging return "%s(%s)" % (self.__class__.__name__, json.dumps(self.to_debugging_dict())) @@ -29,13 +31,14 @@ def __eq__(self, other) -> bool: # used only in tests def to_debugging_dict(self) -> dict: pass + class EventInputEvaluation(EventInput): - __slots__ = ['key', 'flag', 'variation', 'value', 'reason', 'default_value', 'prereq_of', 'track_events'] + __slots__ = ['key', 'flag', 'variation', 'value', 'reason', 'default_value', 'prereq_of', 'track_events', 'sampling_ratio', 'exclude_from_summaries'] def __init__(self, timestamp: int, context: Context, key: str, flag: Optional[FeatureFlag], variation: Optional[int], value: Any, reason: Optional[dict], default_value: Any, prereq_of: Optional[FeatureFlag] = None, track_events: bool = False): - super().__init__(timestamp, context) + super().__init__(timestamp, context, 1 if flag is None else flag.sampling_ratio) self.key = key self.flag = flag self.variation = variation @@ -44,7 +47,8 @@ def __init__(self, timestamp: int, context: Context, key: str, flag: Optional[Fe self.default_value = default_value self.prereq_of = prereq_of self.track_events = track_events - + self.exclude_from_summaries = False if flag is None else flag.exclude_from_summaries + def to_debugging_dict(self) -> dict: return { "timestamp": self.timestamp, @@ -56,16 +60,21 @@ def to_debugging_dict(self) -> dict: "reason": self.reason, "default_value": self.default_value, "prereq_of": {"key": self.prereq_of.key} if self.prereq_of else None, - "track_events": self.track_events + "track_events": self.track_events, + "exclude_from_summaries": self.exclude_from_summaries, + "sampling_ratio": self.sampling_ratio, } + class EventInputIdentify(EventInput): def to_debugging_dict(self) -> dict: return { "timestamp": self.timestamp, - "context": self.context.to_dict() + "context": self.context.to_dict(), + "sampling_ratio": self.sampling_ratio, } + class EventInputCustom(EventInput): __slots__ = ['key', 'data', 'metric_value'] @@ -79,11 +88,13 @@ def to_debugging_dict(self) -> dict: return { "timestamp": self.timestamp, "context": self.context.to_dict(), + "sampling_ratio": self.sampling_ratio, "key": self.key, "data": self.data, "metric_value": self.metric_value } + # Event constructors are centralized here to avoid mistakes and repetitive logic. # The LDClient owns two instances of EventFactory: one that always embeds evaluation reasons # in the events (for when variation_detail is called) and one that doesn't. diff --git a/ldclient/impl/model/feature_flag.py b/ldclient/impl/model/feature_flag.py index a53c7689..5a3c15ee 100644 --- a/ldclient/impl/model/feature_flag.py +++ b/ldclient/impl/model/feature_flag.py @@ -1,4 +1,4 @@ -from typing import Any, List, Optional, Set +from typing import Any, List, Optional, Set, Union, Dict from ldclient.impl.model.clause import Clause from ldclient.impl.model.entity import * @@ -28,11 +28,11 @@ def __init__(self, data: dict): self._context_kind = opt_str(data, 'contextKind') self._variation = req_int(data, 'variation') self._values = set(req_str_list(data, 'values')) - + @property def context_kind(self) -> Optional[str]: return self._context_kind - + @property def variation(self) -> int: return self._variation @@ -50,11 +50,11 @@ def __init__(self, data: dict): self._variation_or_rollout = VariationOrRollout(data) self._clauses = list(Clause(item) for item in req_dict_list(data, 'clauses')) self._track_events = opt_bool(data, 'trackEvents') - + @property def id(self) -> Optional[str]: return self._id - + @property def clauses(self) -> List[Clause]: return self._clauses @@ -68,6 +68,17 @@ def variation_or_rollout(self) -> VariationOrRollout: return self._variation_or_rollout +class MigrationSettings: + __slots__ = ['_check_ratio'] + + def __init__(self, data: Dict): + self._check_ratio = opt_int(data, 'checkRatio') + + @property + def check_ratio(self) -> Optional[int]: + return self._check_ratio + + class FeatureFlag(ModelEntity): __slots__ = ['_data', '_key', '_version', '_deleted', '_variations', '_on', '_off_variation', '_fallthrough', '_prerequisites', '_targets', '_context_targets', '_rules', @@ -97,10 +108,17 @@ def __init__(self, data: dict): self._track_events_fallthrough = opt_bool(data, 'trackEventsFallthrough') self._debug_events_until_date = opt_number(data, 'debugEventsUntilDate') + self._migrations = None + if 'migration' in data: + self._migrations = MigrationSettings(opt_dict(data, 'migration') or {}) + + self._exclude_from_summaries = opt_bool(data, 'excludeFromSummaries') or False + self._sampling_ratio = opt_int(data, 'samplingRatio') + @property def key(self) -> str: return self._key - + @property def version(self) -> int: return self._version @@ -108,7 +126,7 @@ def version(self) -> int: @property def deleted(self) -> bool: return self._deleted - + @property def variations(self) -> List[Any]: return self._variations @@ -156,3 +174,15 @@ def track_events_fallthrough(self) -> bool: @property def debug_events_until_date(self) -> Optional[Union[int, float]]: return self._debug_events_until_date + + @property + def migrations(self) -> Optional[MigrationSettings]: + return self._migrations + + @property + def exclude_from_summaries(self) -> bool: + return self._exclude_from_summaries + + @property + def sampling_ratio(self) -> Optional[int]: + return self._sampling_ratio diff --git a/ldclient/impl/model/variation_or_rollout.py b/ldclient/impl/model/variation_or_rollout.py index 476d1df3..d19ba10d 100644 --- a/ldclient/impl/model/variation_or_rollout.py +++ b/ldclient/impl/model/variation_or_rollout.py @@ -1,4 +1,4 @@ -from typing import Any, List, Optional +from typing import List, Optional from ldclient.impl.model.attribute_ref import AttributeRef, opt_attr_ref_with_opt_context_kind from ldclient.impl.model.entity import * @@ -19,7 +19,7 @@ def variation(self) -> int: @property def weight(self) -> int: return self._weight - + @property def untracked(self) -> int: return self._untracked @@ -64,11 +64,11 @@ def __init__(self, data): self._variation = opt_int(data, 'variation') rollout = opt_dict(data, 'rollout') self._rollout = None if rollout is None else Rollout(rollout) - + @property def variation(self) -> Optional[int]: return self._variation - + @property def rollout(self) -> Optional[Rollout]: return self._rollout diff --git a/ldclient/impl/sampler.py b/ldclient/impl/sampler.py new file mode 100644 index 00000000..24f268d2 --- /dev/null +++ b/ldclient/impl/sampler.py @@ -0,0 +1,18 @@ +from random import Random + + +class Sampler: + def __init__(self, generator: Random): + self.__generator = generator + + def sample(self, ratio: int): + # Booleans are considered ints in python, so we have to check for them + # as well here. + if isinstance(ratio, bool) or not isinstance(ratio, int): + return False + if ratio <= 0: + return False + if ratio == 1: + return True + + return self.__generator.random() < (1 / ratio) diff --git a/ldclient/impl/util.py b/ldclient/impl/util.py index 9c7210c1..9ba55a45 100644 --- a/ldclient/impl/util.py +++ b/ldclient/impl/util.py @@ -3,15 +3,20 @@ import sys import time -from typing import Any +from typing import Any, Optional from ldclient.impl.http import _base_headers from urllib.parse import urlparse, urlunparse +from datetime import timedelta def current_time_millis() -> int: return int(time.time() * 1000) +def timedelta_millis(delta: timedelta) -> float: + return delta / timedelta(milliseconds=1) + + log = logging.getLogger('ldclient.util') # historical logger name import queue @@ -147,3 +152,79 @@ def redact_password(url: str) -> str: parts = parts._replace(netloc=updated) return urlunparse(parts) + + +class Result: + """ + A Result is used to reflect the outcome of any operation. + + Results can either be considered a success or a failure. + + In the event of success, the Result will contain an option, nullable value + to hold any success value back to the calling function. + + If the operation fails, the Result will contain an error describing the + value. + """ + + def __init__(self, value: Optional[Any], error: Optional[str], exception: Optional[Exception]): + """ + This constructor should be considered private. Consumers of this class + should use one of the two factory methods provided. Direct + instantiation should follow the below expectations: + + - Successful operations have contain a value, but *MUST NOT* contain an + error or an exception value. + - Failed operations *MUST* contain an error string, and may optionally + include an exception. + + :param value: A result value when the operation was a success + :param error: An error describing the cause of the failure + :param exception: An optional exception if the failure resulted from an + exception being raised + """ + self.__value = value + self.__error = error + self.__exception = exception + + @staticmethod + def success(value: Any) -> 'Result': + """ + Construct a successful result containing the provided value. + + :param value: A result value when the operation was a success + :return: The successful result instance + """ + return Result(value, None, None) + + @staticmethod + def fail(error: str, exception: Optional[Exception] = None) -> 'Result': + """ + Construct a failed result containing an error description and optional + exception. + + :param error: An error describing the cause of the failure + :param exception: An optional exception if the failure resulted from an + exception being raised + :return: The successful result instance + """ + return Result(None, error, exception) + + def is_success(self) -> bool: + """ + Determine whether this result represents success or failure by checking + for the presence of an error. + """ + return self.__error is None + + @property + def value(self) -> Optional[Any]: + return self.__value + + @property + def error(self) -> Optional[str]: + return self.__error + + @property + def exception(self) -> Optional[Exception]: + return self.__exception diff --git a/ldclient/integrations/test_data.py b/ldclient/integrations/test_data.py index ce9527eb..3e1e9b92 100644 --- a/ldclient/integrations/test_data.py +++ b/ldclient/integrations/test_data.py @@ -24,7 +24,7 @@ class TestData(): :: td = TestData.data_source() - td.update(td.flag('flag-key-1').variation_for_all_users(True)) + td.update(td.flag('flag-key-1').variation_for_all(True)) client = LDClient(config=Config('SDK_KEY', update_processor_class = td)) @@ -278,14 +278,6 @@ def variations(self, *variations) -> 'FlagBuilder': return self - def variation_for_all_users(self, variation: Union[bool, int]) -> 'FlagBuilder': - """Deprecated name for variation_for_all(). - - .. deprecated:: 8.0.0 - Use :meth:`ldclient.integrations.test_data.FlagBuilder.variation_for_all()`. - """ - return self.variation_for_all(variation) - def variation_for_all(self, variation: Union[bool, int]) -> 'FlagBuilder': """Sets the flag to always return the specified variation for all contexts. @@ -300,18 +292,10 @@ def variation_for_all(self, variation: Union[bool, int]) -> 'FlagBuilder': :return: the flag builder """ if isinstance(variation, bool): - return self.boolean_flag().variation_for_all_users(_variation_for_boolean(variation)) + return self.boolean_flag().variation_for_all(_variation_for_boolean(variation)) else: return self.clear_rules().clear_targets().on(True).fallthrough_variation(variation) - def value_for_all_users(self, value: Any) -> 'FlagBuilder': - """Deprecated name for value_for_all(). - - .. deprecated:: 8.0.0 - Use :meth:`ldclient.integrations.test_data.FlagBuilder.value_for_all()`. - """ - return self.value_for_all(value) - def value_for_all(self, value: Any) -> 'FlagBuilder': """ Sets the flag to always return the specified variation value for all users. @@ -324,7 +308,7 @@ def value_for_all(self, value: Any) -> 'FlagBuilder': :param value the desired value to be returned for all users :return the flag builder """ - return self.variations(value).variation_for_all_users(0) + return self.variations(value).variation_for_all(0) def variation_for_user(self, user_key: str, variation: Union[bool, int]) -> 'FlagBuilder': """Sets the flag to return the specified variation for a specific user key when targeting @@ -360,7 +344,7 @@ def variation_for_key(self, context_kind: str, context_key: str, variation: Unio if isinstance(variation, bool): # `variation` is True/False value return self.boolean_flag().variation_for_key(context_kind, context_key, _variation_for_boolean(variation)) - + # `variation` specifies the index of the variation to set targets = self._targets.get(context_kind) if targets is None: diff --git a/ldclient/migrations/__init__.py b/ldclient/migrations/__init__.py new file mode 100644 index 00000000..a969f98c --- /dev/null +++ b/ldclient/migrations/__init__.py @@ -0,0 +1,19 @@ +from .migrator import * +from .tracker import * +from .types import * + +__all__ = [ + 'Migrator', + 'MigratorBuilder', + 'MigratorCompareFn', + 'MigratorFn', + + 'OpTracker', + + 'ExecutionOrder', + 'MigrationConfig', + 'Operation', + 'OperationResult', + 'Origin', + 'Stage', +] diff --git a/ldclient/migrations/migrator.py b/ldclient/migrations/migrator.py new file mode 100644 index 00000000..0c2ea41f --- /dev/null +++ b/ldclient/migrations/migrator.py @@ -0,0 +1,336 @@ +from __future__ import annotations +import concurrent.futures +from datetime import datetime +from abc import ABCMeta, abstractmethod +from random import Random +from typing import Optional, Union, Any, Tuple, TYPE_CHECKING +from ldclient.migrations.types import ExecutionOrder, OperationResult, WriteResult, Stage, MigrationConfig, MigratorFn, MigratorCompareFn, Operation, Origin +from ldclient.migrations.tracker import OpTracker +from ldclient.impl.util import Result +from ldclient.impl.sampler import Sampler + +if TYPE_CHECKING: + from ldclient import LDClient, Context + + +class Migrator: + """ + A migrator is the interface through which migration support is executed. A + migrator is configured through the :class:`MigratorBuilder`. + """ + __metaclass__ = ABCMeta + + @abstractmethod + def read(self, key: str, context: Context, default_stage: Stage, payload: Optional[Any] = None) -> OperationResult: + """ + Uses the provided flag key and context to execute a migration-backed read operation. + + :param key: The migration flag key to use when determining the current stage + :param context: The context to use when evaluating the flag + :param default_stage: A default stage to fallback to if one cannot be determined + :param payload: An optional payload to be passed through to the appropriate read method + """ + + @abstractmethod + def write(self, key: str, context: Context, default_stage: Stage, payload: Optional[Any] = None) -> WriteResult: + """ + Uses the provided flag key and context to execute a migration-backed write operation. + + :param key: The migration flag key to use when determining the current stage + :param context: The context to use when evaluating the flag + :param default_stage: A default stage to fallback to if one cannot be determined + :param payload: An optional payload to be passed through to the appropriate write method + """ + + +class MigratorImpl(Migrator): + """ + An implementation of :class:`ldclient.migrations.Migrator` interface, + capable of supporting feature-flag backed technology migrations. + """ + + def __init__( + self, + sampler: Sampler, + client: LDClient, + read_execution_order: ExecutionOrder, + read_config: MigrationConfig, + write_config: MigrationConfig, + measure_latency: bool, + measure_errors: bool + ): + self.__sampler = sampler + self.__client = client + self.__read_execution_order = read_execution_order + self.__read_config = read_config + self.__write_config = write_config + self.__measure_latency = measure_latency + self.__measure_errors = measure_errors + + def read(self, key: str, context: Context, default_stage: Stage, payload: Optional[Any] = None) -> OperationResult: + stage, tracker = self.__client.migration_variation(key, context, default_stage) + tracker.operation(Operation.READ) + + old = Executor(Origin.OLD, self.__read_config.old, tracker, self.__measure_latency, self.__measure_errors, payload) + new = Executor(Origin.NEW, self.__read_config.new, tracker, self.__measure_latency, self.__measure_errors, payload) + + if stage == Stage.OFF: + result = old.run() + elif stage == Stage.DUALWRITE: + result = old.run() + elif stage == Stage.SHADOW: + result = self.__read_both(old, new, tracker) + elif stage == Stage.LIVE: + result = self.__read_both(new, old, tracker) + elif stage == Stage.RAMPDOWN: + result = new.run() + else: + result = new.run() + + self.__client.track_migration_op(tracker) + + return result + + def write(self, key: str, context: Context, default_stage: Stage, payload: Optional[Any] = None) -> WriteResult: + stage, tracker = self.__client.migration_variation(key, context, default_stage) + tracker.operation(Operation.WRITE) + + old = Executor(Origin.OLD, self.__write_config.old, tracker, self.__measure_latency, self.__measure_errors, payload) + new = Executor(Origin.NEW, self.__write_config.new, tracker, self.__measure_latency, self.__measure_errors, payload) + + if stage == Stage.OFF: + result = old.run() + write_result = WriteResult(result) + elif stage == Stage.DUALWRITE: + authoritative_result, nonauthoritative_result = self.__write_both(old, new, tracker) + write_result = WriteResult(authoritative_result, nonauthoritative_result) + elif stage == Stage.SHADOW: + authoritative_result, nonauthoritative_result = self.__write_both(old, new, tracker) + write_result = WriteResult(authoritative_result, nonauthoritative_result) + elif stage == Stage.LIVE: + authoritative_result, nonauthoritative_result = self.__write_both(new, old, tracker) + write_result = WriteResult(authoritative_result, nonauthoritative_result) + elif stage == Stage.RAMPDOWN: + authoritative_result, nonauthoritative_result = self.__write_both(new, old, tracker) + write_result = WriteResult(authoritative_result, nonauthoritative_result) + else: + result = new.run() + write_result = WriteResult(result) + + self.__client.track_migration_op(tracker) + + return write_result + + def __read_both(self, authoritative: Executor, nonauthoritative: Executor, tracker: OpTracker) -> OperationResult: + if self.__read_execution_order == ExecutionOrder.PARALLEL: + futures = [] + with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: + futures.append(executor.submit(lambda: (True, authoritative.run()))) + futures.append(executor.submit(lambda: (False, nonauthoritative.run()))) + + for future in concurrent.futures.as_completed(futures): + is_authoritative, result = future.result() + if is_authoritative: + authoritative_result = result + else: + nonauthoritative_result = result + + elif self.__read_execution_order == ExecutionOrder.RANDOM and self.__sampler.sample(2): + nonauthoritative_result = nonauthoritative.run() + authoritative_result = authoritative.run() + else: + authoritative_result = authoritative.run() + nonauthoritative_result = nonauthoritative.run() + + if self.__read_config.comparison is None: + return authoritative_result + + compare = self.__read_config.comparison + if authoritative_result.is_success() and nonauthoritative_result.is_success(): + tracker.consistent(lambda: compare(authoritative_result.value, nonauthoritative_result.value)) + + return authoritative_result + + def __write_both(self, authoritative: Executor, nonauthoritative: Executor, tracker: OpTracker) -> Tuple[OperationResult, Optional[OperationResult]]: + authoritative_result = authoritative.run() + tracker.invoked(authoritative.origin) + + if not authoritative_result.is_success(): + return authoritative_result, None + + nonauthoritative_result = nonauthoritative.run() + tracker.invoked(nonauthoritative.origin) + + return authoritative_result, nonauthoritative_result + + +class MigratorBuilder: + """ + The migration builder is used to configure and construct an instance of a + :class:`Migrator`. This migrator can be used to perform LaunchDarkly + assisted technology migrations through the use of migration-based feature + flags. + """ + + def __init__(self, client: LDClient): + # Single _ to prevent mangling; useful for testing + self._client = client + + # Default settings as required by the spec + self.__read_execution_order = ExecutionOrder.PARALLEL + self.__measure_latency = True + self.__measure_errors = True + + self.__read_config: Optional[MigrationConfig] = None + self.__write_config: Optional[MigrationConfig] = None + + def read_execution_order(self, order: ExecutionOrder) -> 'MigratorBuilder': + """ + The read execution order influences the parallelism and execution order + for read operations involving multiple origins. + """ + if order not in ExecutionOrder: + return self + + self.__read_execution_order = order + return self + + def track_latency(self, enabled: bool) -> 'MigratorBuilder': + """ + Enable or disable latency tracking for migration operations. This + latency information can be sent upstream to LaunchDarkly to enhance + migration visibility. + """ + self.__measure_latency = enabled + return self + + def track_errors(self, enabled: bool) -> 'MigratorBuilder': + """ + Enable or disable error tracking for migration operations. This error + information can be sent upstream to LaunchDarkly to enhance migration + visibility. + """ + self.__measure_errors = enabled + return self + + def read(self, old: MigratorFn, new: MigratorFn, comparison: Optional[MigratorCompareFn] = None) -> 'MigratorBuilder': + """ + Read can be used to configure the migration-read behavior of the + resulting :class:`Migrator` instance. + + Users are required to provide two different read methods -- one to read + from the old migration origin, and one to read from the new origin. + Additionally, customers can opt-in to consistency tracking by providing + a comparison function. + + Depending on the migration stage, one or both of these read methods may + be called. + + The read methods should accept a single nullable parameter. This + parameter is a payload passed through the :func:`Migrator.read` method. + This method should return a :class:`ldclient.Result` instance. + + The consistency method should accept 2 parameters of any type. These + parameters are the results of executing the read operation against the + old and new origins. If both operations were successful, the + consistency method will be invoked. This method should return true if + the two parameters are equal, or false otherwise. + + :param old: The function to execute when reading from the old origin + :param new: The function to execute when reading from the new origin + :param comparison: An optional function to use for comparing the results from two origins + """ + self.__read_config = MigrationConfig(old, new, comparison) + return self + + def write(self, old: MigratorFn, new: MigratorFn) -> 'MigratorBuilder': + """ + Write can be used to configure the migration-write behavior of the + resulting :class:`Migrator` instance. + + Users are required to provide two different write methods -- one to + write to the old migration origin, and one to write to the new origin. + + Depending on the migration stage, one or both of these write methods + may be called. + + The write methods should accept a single nullable parameter. This + parameter is a payload passed through the :func:`Migrator.write` + method. This method should return a :class:`ldclient.Result` instance. + + :param old: The function to execute when writing to the old origin + :param new: The function to execute when writing to the new origin + """ + self.__write_config = MigrationConfig(old, new) + return self + + def build(self) -> Union[Migrator, str]: + """ + Build constructs a :class:`Migrator` instance to support + migration-based reads and writes. A string describing any failure + conditions will be returned if the build fails. + """ + if self.__read_config is None: + return "read configuration not provided" + + if self.__write_config is None: + return "write configuration not provided" + + return MigratorImpl( + Sampler(Random()), + self._client, + self.__read_execution_order, + self.__read_config, + self.__write_config, + self.__measure_latency, + self.__measure_errors, + ) + + +class Executor: + """ + Utility class for executing migration operations while also tracking our + built-in migration measurements. + """ + + def __init__( + self, + origin: Origin, + fn: MigratorFn, + tracker: OpTracker, + measure_latency: bool, + measure_errors: bool, + payload: Any + ): + self.__origin = origin + self.__fn = fn + self.__tracker = tracker + self.__measure_latency = measure_latency + self.__measure_errors = measure_errors + self.__payload = payload + + @property + def origin(self) -> Origin: + return self.__origin + + def run(self) -> OperationResult: + """ + Execute the configured operation and track any available measurements. + """ + start = datetime.now() + + try: + result = self.__fn(self.__payload) + except Exception as e: + result = Result.fail(f"'{self.__origin.value} operation raised an exception", e) + + # Record required tracker measurements + if self.__measure_latency: + self.__tracker.latency(self.__origin, datetime.now() - start) + + if self.__measure_errors and not result.is_success(): + self.__tracker.error(self.__origin) + + self.__tracker.invoked(self.__origin) + + return OperationResult(self.__origin, result) diff --git a/ldclient/migrations/tracker.py b/ldclient/migrations/tracker.py new file mode 100644 index 00000000..1234f808 --- /dev/null +++ b/ldclient/migrations/tracker.py @@ -0,0 +1,234 @@ +from typing import Callable, Optional, Union, Set, Dict +import time +from datetime import timedelta +from random import Random +from ldclient.impl.sampler import Sampler +from ldclient.evaluation import EvaluationDetail +from ldclient.context import Context +from ldclient.impl.model import FeatureFlag +from threading import Lock +from ldclient.impl.events.types import EventInput +from ldclient.migrations.types import Stage, Operation, Origin +from ldclient.impl.util import log + + +class MigrationOpEvent(EventInput): + """ + A migration op event represents the results of a migration-assisted read or + write operation. + + The event includes optional measurements reporting on consistency checks, + error reporting, and operation latency values. + + This event should not be constructed directly; rather, it should be built + through :class:`ldclient.migrations.OpTracker()`. + """ + __slots__ = ['key', 'flag', 'operation', 'default_stage', 'detail', 'invoked', 'consistent', 'consistent_ratio', 'errors', 'latencies'] + + def __init__(self, timestamp: int, context: Context, key: str, flag: Optional[FeatureFlag], operation: Operation, default_stage: Stage, detail: EvaluationDetail, invoked: Set[Origin], consistent: Optional[bool], consistent_ratio: Optional[int], errors: Set[Origin], latencies: Dict[Origin, timedelta]): + sampling_ratio = None if flag is None else flag.sampling_ratio + super().__init__(timestamp, context, sampling_ratio) + + self.key = key + self.flag = flag + self.operation = operation + self.default_stage = default_stage + self.detail = detail + self.invoked = invoked + self.consistent = consistent + self.consistent_ratio = consistent_ratio + self.errors = errors + self.latencies = latencies + + def to_debugging_dict(self) -> dict: + return { + "timestamp": self.timestamp, + "context": self.context.to_dict(), + "flag": None if self.flag is None else {"key": self.flag.key}, + "operation": self.operation.value, + "default_stage": self.default_stage.value, + "detail": self.detail, + "invoked": self.invoked, + "consistent": self.consistent, + "consistent_ratio": self.consistent_ratio, + "errors": self.errors, + "latencies": self.latencies, + } + + +class OpTracker: + """ + An OpTracker is responsible for managing the collection of measurements + that which a user might wish to record throughout a migration-assisted + operation. + + Example measurements include latency, errors, and consistency. + + The OpTracker is not expected to be instantiated directly. Consumers should + instead call :func:`ldclient.client.LDClient.migration_variation()` and use + the returned tracker instance. + """ + + def __init__( + self, + key: str, + flag: Optional[FeatureFlag], + context: Context, + detail: EvaluationDetail, + default_stage: Stage + ): + self.__key = key + self.__flag = flag + self.__context = context + self.__detail = detail + self.__default_stage = default_stage + + self.__mutex = Lock() + + self.__operation: Optional[Operation] = None + self.__invoked: Set[Origin] = set() + self.__consistent: Optional[bool] = None + + self.__consistent_ratio: int = 1 + if flag is not None and flag.migrations is not None and flag.migrations.check_ratio is not None: + self.__consistent_ratio = flag.migrations.check_ratio + + self.__errors: Set[Origin] = set() + self.__latencies: Dict[Origin, timedelta] = {} + + self.__sampler = Sampler(Random()) + + def operation(self, op: Operation) -> 'OpTracker': + """ + Sets the migration related operation associated with these tracking + measurements. + + :param op: The read or write operation symbol. + """ + if not isinstance(op, Operation): + return self + + with self.__mutex: + self.__operation = op + return self + + def invoked(self, origin: Origin) -> 'OpTracker': + """ + Allows recording which origins were called during a migration. + + :param origin: Designation for the old or new origin. + """ + if not isinstance(origin, Origin): + return self + + with self.__mutex: + self.__invoked.add(origin) + return self + + def consistent(self, is_consistent: Callable[[], bool]) -> 'OpTracker': + """ + Allows recording the results of a consistency check. + + This method accepts a callable which should take no parameters and + return a single boolean to represent the consistency check results for + a read operation. + + A callable is provided in case sampling rules do not require + consistency checking to run. In this case, we can avoid the overhead of + a function by not using the callable. + + :param is_consistent: closure to return result of comparison check + """ + with self.__mutex: + try: + if self.__sampler.sample(self.__consistent_ratio): + self.__consistent = is_consistent() + except Exception as e: + log.error("exception raised during consistency check %s; failed to record measurement", repr(e)) + + return self + + def error(self, origin: Origin) -> 'OpTracker': + """ + Allows recording whether an error occurred during the operation. + + :param origin: Designation for the old or new origin. + """ + if not isinstance(origin, Origin): + return + + with self.__mutex: + self.__errors.add(origin) + return self + + def latency(self, origin: Origin, duration: timedelta) -> 'OpTracker': + """ + Allows tracking the recorded latency for an individual operation. + + :param origin: Designation for the old or new origin. + :param duration: Duration measurement. + """ + if not isinstance(origin, Origin): + return + + with self.__mutex: + self.__latencies[origin] = duration + return self + + def build(self) -> Union[MigrationOpEvent, str]: + """ + Creates an instance of :class:`MigrationOpEvent()`. + This event data can be provided to + :func:`ldclient.client.LDClient.track_migration_op()` to relay this + metric information upstream to LaunchDarkly services. + + :return: A :class:`MigrationOpEvent()` or a string + describing the type of failure. + """ + with self.__mutex: + if self.__operation is None: + return "operation not provided" + if len(self.__key) == 0: + return "migration operation cannot contain an empty key" + if len(self.__invoked) == 0: + return "no origins were invoked" + if not self.__context.valid: + return "provided context was invalid" + + error = self.__check_invoked_consistency() + if error: + return error + + # TODO: Inject this time function or something + timestamp = int(time.time() * 1_000) + + return MigrationOpEvent( + timestamp, + self.__context, + self.__key, + self.__flag, + self.__operation, + self.__default_stage, + self.__detail, + self.__invoked.copy(), + self.__consistent, + None if self.__consistent is None else self.__consistent_ratio, + self.__errors.copy(), + self.__latencies.copy()) + + def __check_invoked_consistency(self) -> Optional[str]: + for origin in Origin: + if origin in self.__invoked: + continue + + if origin in self.__latencies: + return f"provided latency for origin '{origin.value}' without recording invocation" + if origin in self.__errors: + return f"provided error for origin '{origin.value}' without recording invocation" + + # A consistency measurement only makes sense if TWO origins were + # executed. Otherwise, there is nothing to compare against. + if self.__consistent is not None and len(self.__invoked) != 2: + return "provided consistency without recording both invocations" + + return None diff --git a/ldclient/migrations/types.py b/ldclient/migrations/types.py new file mode 100644 index 00000000..a2a6450a --- /dev/null +++ b/ldclient/migrations/types.py @@ -0,0 +1,243 @@ +from typing import Callable, Optional, Any +from enum import Enum +from ldclient.impl.util import Result + +MigratorFn = Callable[[Optional[Any]], Result] +""" +When a migration wishes to execute a read or write operation, it must delegate +that call to a consumer defined function. This function must accept an optional +payload value, and return a :class:`ldclient.Result`. +""" + +MigratorCompareFn = Callable[[Any, Any], bool] +""" +If a migration read operation is executing which results in both origins being +read from, a customer defined comparison function may be used to determine if +the two results are equal. + +This function should accept two parameters which represent the successful +result values of both the old and new origin reads. If the two values are +equal, this function should return true and false otherwise. +""" + + +class ExecutionOrder(Enum): + """ + Depending on the migration stage, reads may operate against both old and + new origins. In this situation, the execution order can be defined to + specify how these individual reads are coordinated. + """ + + SERIAL = "serial" + """ + SERIAL execution order ensures that the authoritative read completes before + the non-authoritative read is executed. + """ + + RANDOM = "random" + """ + Like SERIAL, RANDOM ensures that one read is completed before the + subsequent read is executed. However, the order in which they are executed + is randomly decided. + """ + + PARALLEL = "parallel" + """ + PARALLEL executes both reads in separate threads. This helps reduce total + run time at the cost of the thread overhead. + """ + + @staticmethod + def from_str(order: str) -> Optional['ExecutionOrder']: + """ + This method will create a Stage enum corresponding to the given string. + If the string doesn't map to a stage, None will returned. + """ + try: + return next(e for e in ExecutionOrder if e.value == order) + except StopIteration: + return None + + +class Operation(Enum): + """ + The operation enum is used to record the type of migration operation that + occurred. + """ + + READ = "read" + """ + READ represents a read-only operation on an origin of data. + + A read operation carries the implication that it can be executed in + parallel against multiple origins. + """ + + WRITE = "write" + """ + WRITE represents a write operation on an origin of data. + + A write operation implies that execution cannot be done in parallel against + multiple origins. + """ + + +class Origin(Enum): + """ + The origin enum is used to denote which source of data should be affected + by a particular operation. + """ + + OLD = "old" + """ + The OLD origin is the source of data we are migrating from. When the + migration is complete, this source of data will be unused. + """ + + NEW = "new" + """ + The NEW origin is the source of data we are migrating to. When the + migration is complete, this source of data will be the source of truth. + """ + + +class Stage(Enum): + """ + Stage denotes one of six possible stages a technology migration could be a + part of, progressing through the following order. + + :class:`Stage.OFF` -> :class:`Stage.DUALWRITE` -> :class:`Stage.SHADOW` -> + :class:`Stage.LIVE` -> :class:`Stage.RAMPDOWN` -> :class:`Stage.COMPLETE` + """ + + OFF = "off" + """ + The migration hasn't started. :class:`Origin.OLD` is authoritative for + reads and writes + """ + + DUALWRITE = "dualwrite" + """ + Write to both :class:`Origin.OLD` and :class:`Origin.NEW`, + :class:`Origin.OLD` is authoritative for reads + """ + + SHADOW = "shadow" + """ + Both :class:`Origin.NEW` and :class:`Origin.OLD` versions run with + a preference for :class:`Origin.OLD` + """ + + LIVE = "live" + """ + Both :class:`Origin.NEW` and :class:`Origin.OLD` versions run with a + preference for :class:`Origin.NEW` + """ + + RAMPDOWN = "rampdown" + """ + Only read from :class:`Origin.NEW`, write to :class:`Origin.OLD` and + :class:`Origin.NEW` + """ + + COMPLETE = "complete" + """ + The migration is finished. :class:`Origin.NEW` is authoritative for reads + and writes + """ + + @staticmethod + def from_str(stage: str) -> Optional['Stage']: + """ + This method will create a Stage enum corresponding to the given string. + If the string doesn't map to a stage, OFF will be used. + """ + try: + return next(s for s in Stage if s.value == stage) + except StopIteration: + return None + + +class OperationResult(Result): + """ + The OperationResult wraps a :class:`ldclient.Result` pair an origin with a result. + """ + + def __init__(self, origin: Origin, result: Result): + super().__init__(result.value, result.error, result.exception) + self.__origin = origin + + @property + def origin(self) -> Origin: + return self.__origin + + +class WriteResult: + """ + A write result contains the operation results against both the + authoritative and non-authoritative origins. + + Authoritative writes are always executed first. In the event of a failure, + the non-authoritative write will not be executed, resulting in a None value + in the final WriteResult. + """ + + def __init__(self, authoritative: OperationResult, nonauthoritative: Optional[OperationResult] = None): + self.__authoritative = authoritative + self.__nonauthoritative = nonauthoritative + + @property + def authoritative(self) -> OperationResult: + return self.__authoritative + + @property + def nonauthoritative(self) -> Optional[OperationResult]: + return self.__nonauthoritative + + +class MigrationConfig: + """ + A migration config stores references to callable methods which execute + customer defined read or write operations on old or new origins of + information. For read operations, an optional comparison function also be + defined. + """ + + def __init__(self, old: MigratorFn, new: MigratorFn, comparison: Optional[MigratorCompareFn] = None): + self.__old = old + self.__new = new + self.__comparison = comparison + + @property + def old(self) -> MigratorFn: + """ + Callable which receives a nullable payload parameter and returns an + :class:`ldclient.Result`. + + This function call should affect the old migration origin when called. + + @return [#call] + """ + return self.__old + + @property + def new(self) -> MigratorFn: + """ + # Callable which receives a nullable payload parameter and returns an + # :class:`ldclient.Result`. + # + # This function call should affect the new migration origin when + # called. + """ + return self.__new + + @property + def comparison(self) -> Optional[MigratorCompareFn]: + """ + Optional callable which receives two objects of any kind and returns a + boolean representing equality. + + The result of this comparison can be sent upstream to LaunchDarkly to + enhance migration observability. + """ + return self.__comparison diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 00000000..c1781905 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,2 @@ +[pycodestyle] +ignore = E501 diff --git a/setup.py b/setup.py index c96ef429..3b51feca 100644 --- a/setup.py +++ b/setup.py @@ -64,11 +64,11 @@ def run(self): 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', + 'Programming Language :: Python :: 3.12', 'Topic :: Software Development', 'Topic :: Software Development :: Libraries', ], diff --git a/test-filesource-optional-requirements.txt b/test-filesource-optional-requirements.txt index 38bdc65b..e81baf49 100644 --- a/test-filesource-optional-requirements.txt +++ b/test-filesource-optional-requirements.txt @@ -1,2 +1,2 @@ -pyyaml>=3.0,<5.2 -watchdog>=0.9,<1.0,!=0.10.5 +pyyaml>=5.3 +watchdog>=3.0.0 diff --git a/testing/builders.py b/testing/builders.py index 202627bb..2c3fe7e1 100644 --- a/testing/builders.py +++ b/testing/builders.py @@ -8,15 +8,15 @@ class BaseBuilder: def __init__(self, data): self.data = data - + def _set(self, key: str, value: Any): self.data[key] = value return self - + def _append(self, key: str, item: dict): self.data[key].append(item) return self - + def _append_all(self, key: str, items: List[Any]): self.data[key].extend(items) return self @@ -40,7 +40,7 @@ def __init__(self, key): 'rules': [], 'salt': '' }) - + def build(self): return FeatureFlag(self.data.copy()) @@ -49,13 +49,13 @@ def key(self, key: str) -> FlagBuilder: def version(self, version: int) -> FlagBuilder: return self._set('version', version) - + def on(self, on: bool) -> FlagBuilder: return self._set('on', on) def variations(self, *variations: Any) -> FlagBuilder: return self._set('variations', list(variations)) - + def off_variation(self, value: Optional[int]) -> FlagBuilder: return self._set('offVariation', value) @@ -70,43 +70,60 @@ def prerequisite(self, key: str, variation: int) -> FlagBuilder: def target(self, variation: int, *keys: str) -> FlagBuilder: return self._append('targets', {'variation': variation, 'values': list(keys)}) - + def context_target(self, context_kind: str, variation: int, *keys: str) -> FlagBuilder: return self._append('contextTargets', {'contextKind': context_kind, 'variation': variation, 'values': list(keys)}) - + def rules(self, *rules: dict) -> FlagBuilder: return self._append_all('rules', list(rules)) - + def salt(self, value: str) -> FlagBuilder: return self._set('salt', value) - + def track_events(self, value: bool) -> FlagBuilder: return self._set('trackEvents', value) - + def track_events_fallthrough(self, value: bool) -> FlagBuilder: return self._set('trackEventsFallthrough', value) - + def debug_events_until_date(self, value: Optional[int]) -> FlagBuilder: return self._set('debugEventsUntilDate', value) + def exclude_from_summaries(self, value: bool) -> FlagBuilder: + return self._set('excludeFromSummaries', value) + + def sampling_ratio(self, value: int) -> FlagBuilder: + return self._set('samplingRatio', value) + + def migrations(self, value: MigrationSettings) -> FlagBuilder: + return self._set('migration', value) + + +class MigrationSettingsBuilder(BaseBuilder): + def __init__(self): + super().__init__({}) + + def check_ratio(self, value: int) -> MigrationSettingsBuilder: + return self._set('checkRatio', value) + class FlagRuleBuilder(BaseBuilder): def __init__(self): super().__init__({'clauses': []}) - + def clauses(self, *clauses: dict) -> FlagRuleBuilder: return self._append_all('clauses', list(clauses)) def id(self, value: str) -> FlagRuleBuilder: return self._set('id', value) - + def rollout(self, rollout: Optional[dict]) -> FlagRuleBuilder: return self._set('rollout', rollout) def track_events(self, value: bool) -> FlagRuleBuilder: return self._set('trackEvents', value) - + def variation(self, variation: int) -> FlagRuleBuilder: return self._set('variation', variation) @@ -124,7 +141,7 @@ def __init__(self, key): 'unbounded': False, 'salt': '' }) - + def build(self): return Segment(self.data.copy()) @@ -151,13 +168,13 @@ def salt(self, salt: str) -> SegmentBuilder: def rules(self, *rules: dict) -> SegmentBuilder: return self._append_all('rules', list(rules)) - + def unbounded(self, value: bool) -> SegmentBuilder: return self._set('unbounded', value) - + def unbounded_context_kind(self, value: Optional[str]) -> SegmentBuilder: return self._set('unboundedContextKind', value) - + def generation(self, value: Optional[int]) -> SegmentBuilder: return self._set('generation', value) @@ -168,7 +185,7 @@ def __init__(self): def bucket_by(self, value: Optional[str]) -> SegmentRuleBuilder: return self._set('bucketBy', value) - + def clauses(self, *clauses: dict) -> SegmentRuleBuilder: return self._append_all('clauses', list(clauses)) diff --git a/testing/http_util.py b/testing/http_util.py index de89aa30..2b40a0dd 100644 --- a/testing/http_util.py +++ b/testing/http_util.py @@ -1,6 +1,7 @@ import json import socket import ssl +from ssl import SSLContext, PROTOCOL_TLSv1_2 from threading import Thread import time import queue @@ -46,10 +47,10 @@ def __init__(self, port, secure): self.uri = '%s://localhost:%d' % ('https' if secure else 'http', port) self.server = HTTPServer(('localhost', port), MockServerRequestHandler) if secure: - self.server.socket = ssl.wrap_socket( + context = SSLContext(PROTOCOL_TLSv1_2) + context.load_cert_chain('./testing/selfsigned.pem', './testing/selfsigned.key') + self.server.socket = context.wrap_socket( self.server.socket, - certfile='./testing/selfsigned.pem', # this is a pre-generated self-signed cert that is valid for 100 years - keyfile='./testing/selfsigned.key', server_side=True ) self.server.server_wrapper = self @@ -76,7 +77,7 @@ def require_request(self): def wait_until_request_received(self): req = self.requests.get() self.requests.put(req) - + def should_have_requests(self, count): if self.requests.qsize() != count: rs = [] diff --git a/testing/impl/evaluator_util.py b/testing/impl/evaluator_util.py index deb7fe25..6715bab9 100644 --- a/testing/impl/evaluator_util.py +++ b/testing/impl/evaluator_util.py @@ -18,14 +18,14 @@ def __init__(self): self.__segments = {} self.__big_segments = {} self.__big_segments_status = BigSegmentsStatus.HEALTHY - + def build(self) -> Evaluator: return Evaluator( self._get_flag, self._get_segment, self._get_big_segments_membership ) - + def with_flag(self, flag: FeatureFlag) -> 'EvaluatorBuilder': self.__flags[flag.key] = flag return self @@ -47,25 +47,25 @@ def with_big_segment_for_key(self, key: str, segment: Segment, included: bool) - self.__big_segments[key] = {} self.__big_segments[key][_make_big_segment_ref(segment)] = included return self - + def with_no_big_segments_for_key(self, key: str) -> 'EvaluatorBuilder': self.__big_segments[key] = {} return self - + def with_big_segments_status(self, status: str) -> 'EvaluatorBuilder': self.__big_segments_status = status return self - + def _get_flag(self, key: str) -> Optional[FeatureFlag]: if key not in self.__flags: raise Exception("test made unexpected request for flag '%s'" % key) return self.__flags[key] - + def _get_segment(self, key: str) -> Optional[Segment]: if key not in self.__segments: raise Exception("test made unexpected request for segment '%s'" % key) return self.__segments[key] - + def _get_big_segments_membership(self, key: str) -> Tuple[Optional[dict], str]: if key not in self.__big_segments: raise Exception("test made unexpected request for big segments for context key '%s'" % key) @@ -82,8 +82,3 @@ def assert_eval_result(result, expected_detail, expected_events): def assert_match(evaluator: Evaluator, flag: FeatureFlag, context: Context, expect_value: Any): result = evaluator.evaluate(flag, context, event_factory) assert result.detail.value == expect_value - - -def make_clause_matching_user(user: Union[Context, dict]) -> dict: - key = user.key if isinstance(user, Context) else user['key'] - return { 'attribute': 'key', 'op': 'in', 'values': [ key ] } diff --git a/testing/impl/events/test_event_processor.py b/testing/impl/events/test_event_processor.py index 15e95bba..62e3b6da 100644 --- a/testing/impl/events/test_event_processor.py +++ b/testing/impl/events/test_event_processor.py @@ -1,13 +1,20 @@ +import pytest import json from threading import Thread +from typing import Set, Dict +from datetime import timedelta import time import uuid from ldclient.config import Config from ldclient.context import Context +from ldclient.evaluation import EvaluationDetail from ldclient.impl.events.diagnostics import create_diagnostic_id, _DiagnosticAccumulator from ldclient.impl.events.event_processor import DefaultEventProcessor +from ldclient.migrations.types import Operation, Origin, Stage +from ldclient.migrations.tracker import MigrationOpEvent from ldclient.impl.events.types import EventInput, EventInputCustom, EventInputEvaluation, EventInputIdentify +from ldclient.impl.util import timedelta_millis from testing.builders import * from testing.proxy_test_util import do_proxy_tests @@ -23,6 +30,8 @@ '_meta': {'redactedAttributes': ['name']} } flag = FlagBuilder('flagkey').version(2).build() +flag_with_0_sampling_ratio = FlagBuilder('flagkey').version(3).sampling_ratio(0).build() +flag_excluded_from_summaries = FlagBuilder('flagkey').version(4).exclude_from_summaries(True).build() timestamp = 10000 ep = None @@ -54,7 +63,164 @@ def __init__(self, **kwargs): kwargs['sdk_key'] = 'SDK_KEY' config = Config(**kwargs) diagnostic_accumulator = _DiagnosticAccumulator(create_diagnostic_id(config)) - DefaultEventProcessor.__init__(self, config, mock_http, diagnostic_accumulator = diagnostic_accumulator) + DefaultEventProcessor.__init__(self, config, mock_http, diagnostic_accumulator=diagnostic_accumulator) + + +@pytest.mark.parametrize( + "operation,default_stage", + [ + pytest.param(Operation.READ, Stage.OFF, id="read off"), + pytest.param(Operation.READ, Stage.DUALWRITE, id="read dualwrite"), + pytest.param(Operation.READ, Stage.SHADOW, id="read shadow"), + pytest.param(Operation.READ, Stage.LIVE, id="read live"), + pytest.param(Operation.READ, Stage.RAMPDOWN, id="read rampdown"), + pytest.param(Operation.READ, Stage.COMPLETE, id="read complete"), + + pytest.param(Operation.WRITE, Stage.OFF, id="write off"), + pytest.param(Operation.WRITE, Stage.DUALWRITE, id="write dualwrite"), + pytest.param(Operation.WRITE, Stage.SHADOW, id="write shadow"), + pytest.param(Operation.WRITE, Stage.LIVE, id="write live"), + pytest.param(Operation.WRITE, Stage.RAMPDOWN, id="write rampdown"), + pytest.param(Operation.WRITE, Stage.COMPLETE, id="write complete"), + ], +) +def test_migration_op_event_is_queued_without_flag(operation: Operation, default_stage: Stage): + with DefaultTestProcessor() as ep: + e = MigrationOpEvent(timestamp, context, "key", None, operation, default_stage, EvaluationDetail('off', 0, {'kind': 'FALLTHROUGH'}), {Origin.OLD}, None, None, set(), {}) + ep.send_event(e) + + output = flush_and_get_events(ep) + assert len(output) == 1 + check_migration_op_event(output[0], e) + + +@pytest.mark.parametrize( + "operation,default_stage,invoked", + [ + pytest.param(Operation.READ, Stage.OFF, {Origin.OLD}, id="read off"), + pytest.param(Operation.READ, Stage.DUALWRITE, {Origin.OLD}, id="read dualwrite"), + pytest.param(Operation.READ, Stage.SHADOW, {Origin.OLD, Origin.NEW}, id="read shadow"), + pytest.param(Operation.READ, Stage.LIVE, {Origin.OLD, Origin.NEW}, id="read live"), + pytest.param(Operation.READ, Stage.RAMPDOWN, {Origin.NEW}, id="read rampdown"), + pytest.param(Operation.READ, Stage.COMPLETE, {Origin.NEW}, id="read complete"), + + pytest.param(Operation.WRITE, Stage.OFF, {Origin.OLD}, id="write off"), + pytest.param(Operation.WRITE, Stage.DUALWRITE, {Origin.OLD, Origin.NEW}, id="write dualwrite"), + pytest.param(Operation.WRITE, Stage.SHADOW, {Origin.OLD, Origin.NEW}, id="write shadow"), + pytest.param(Operation.WRITE, Stage.LIVE, {Origin.OLD, Origin.NEW}, id="write live"), + pytest.param(Operation.WRITE, Stage.RAMPDOWN, {Origin.OLD, Origin.NEW}, id="write rampdown"), + pytest.param(Operation.WRITE, Stage.COMPLETE, {Origin.OLD, Origin.NEW}, id="write complete"), + ], +) +def test_migration_op_event_is_queued_with_invoked(operation: Operation, default_stage: Stage, invoked: Set[Origin]): + with DefaultTestProcessor() as ep: + e = MigrationOpEvent(timestamp, context, flag.key, flag, operation, default_stage, EvaluationDetail('off', 0, {'kind': 'FALLTHROUGH'}), invoked, None, None, set(), {}) + ep.send_event(e) + + output = flush_and_get_events(ep) + assert len(output) == 1 + check_migration_op_event(output[0], e) + + +@pytest.mark.parametrize( + "operation,default_stage,errors", + [ + pytest.param(Operation.READ, Stage.OFF, {Origin.OLD}, id="read off"), + pytest.param(Operation.READ, Stage.DUALWRITE, {Origin.OLD}, id="read dualwrite"), + pytest.param(Operation.READ, Stage.SHADOW, {Origin.OLD, Origin.NEW}, id="read shadow"), + pytest.param(Operation.READ, Stage.LIVE, {Origin.OLD, Origin.NEW}, id="read live"), + pytest.param(Operation.READ, Stage.RAMPDOWN, {Origin.NEW}, id="read rampdown"), + pytest.param(Operation.READ, Stage.COMPLETE, {Origin.NEW}, id="read complete"), + + pytest.param(Operation.WRITE, Stage.OFF, {Origin.OLD}, id="write off"), + pytest.param(Operation.WRITE, Stage.DUALWRITE, {Origin.OLD}, id="write dualwrite"), + pytest.param(Operation.WRITE, Stage.SHADOW, {Origin.OLD}, id="write shadow"), + pytest.param(Operation.WRITE, Stage.LIVE, {Origin.NEW}, id="write live"), + pytest.param(Operation.WRITE, Stage.RAMPDOWN, {Origin.NEW}, id="write rampdown"), + pytest.param(Operation.WRITE, Stage.COMPLETE, {Origin.NEW}, id="write complete"), + ], +) +def test_migration_op_event_is_queued_with_errors(operation: Operation, default_stage: Stage, errors: Set[Origin]): + with DefaultTestProcessor() as ep: + e = MigrationOpEvent(timestamp, context, flag.key, flag, operation, default_stage, EvaluationDetail('off', 0, {'kind': 'FALLTHROUGH'}), {Origin.OLD, Origin.NEW}, None, None, errors, {}) + ep.send_event(e) + + output = flush_and_get_events(ep) + assert len(output) == 1 + check_migration_op_event(output[0], e) + + +@pytest.mark.parametrize( + "operation,default_stage,latencies", + [ + pytest.param(Operation.READ, Stage.OFF, {Origin.OLD: 100}, id="read off"), + pytest.param(Operation.READ, Stage.DUALWRITE, {Origin.OLD: 100}, id="read dualwrite"), + pytest.param(Operation.READ, Stage.SHADOW, {Origin.OLD: 100, Origin.NEW: 100}, id="read shadow"), + pytest.param(Operation.READ, Stage.LIVE, {Origin.OLD: 100, Origin.NEW: 100}, id="read live"), + pytest.param(Operation.READ, Stage.RAMPDOWN, {Origin.NEW: 100}, id="read rampdown"), + pytest.param(Operation.READ, Stage.COMPLETE, {Origin.NEW: 100}, id="read complete"), + + pytest.param(Operation.WRITE, Stage.OFF, {Origin.OLD: 100}, id="write off"), + pytest.param(Operation.WRITE, Stage.DUALWRITE, {Origin.OLD: 100, Origin.NEW: 100}, id="write dualwrite"), + pytest.param(Operation.WRITE, Stage.SHADOW, {Origin.OLD: 100, Origin.NEW: 100}, id="write shadow"), + pytest.param(Operation.WRITE, Stage.LIVE, {Origin.OLD: 100, Origin.NEW: 100}, id="write live"), + pytest.param(Operation.WRITE, Stage.RAMPDOWN, {Origin.OLD: 100, Origin.NEW: 100}, id="write rampdown"), + pytest.param(Operation.WRITE, Stage.COMPLETE, {Origin.NEW: 100}, id="write complete"), + ], +) +def test_migration_op_event_is_queued_with_latencies(operation: Operation, default_stage: Stage, latencies: Dict[Origin, float]): + with DefaultTestProcessor() as ep: + delta_latencies = {origin: timedelta(milliseconds=ms) for origin, ms in latencies.items()} + e = MigrationOpEvent(timestamp, context, flag.key, flag, operation, default_stage, EvaluationDetail('off', 0, {'kind': 'FALLTHROUGH'}), {Origin.OLD, Origin.NEW}, None, None, set(), delta_latencies) + ep.send_event(e) + + output = flush_and_get_events(ep) + assert len(output) == 1 + check_migration_op_event(output[0], e) + + +def test_migration_op_event_is_disabled_with_sampling_ratio(): + with DefaultTestProcessor() as ep: + e = MigrationOpEvent(timestamp, context, flag_with_0_sampling_ratio.key, flag_with_0_sampling_ratio, Operation.READ, Stage.OFF, EvaluationDetail('off', 0, {'kind': 'FALLTHROUGH'}), {Origin.OLD}, None, None, set(), {}) + ep.send_event(e) + + # NOTE: Have to send an identify event; otherwise, we will timeout waiting on no events. + identify_event = EventInputIdentify(timestamp, context) + ep.send_event(identify_event) + + output = flush_and_get_events(ep) + assert len(output) == 1 # Got the identify but not the migration op + check_identify_event(output[0], identify_event) + + +@pytest.mark.parametrize( + "operation,default_stage", + [ + pytest.param(Operation.READ, Stage.OFF, id="read off"), + pytest.param(Operation.READ, Stage.DUALWRITE, id="read dualwrite"), + pytest.param(Operation.READ, Stage.SHADOW, id="read shadow"), + pytest.param(Operation.READ, Stage.LIVE, id="read live"), + pytest.param(Operation.READ, Stage.RAMPDOWN, id="read rampdown"), + pytest.param(Operation.READ, Stage.COMPLETE, id="read complete"), + + pytest.param(Operation.WRITE, Stage.OFF, id="write off"), + pytest.param(Operation.WRITE, Stage.DUALWRITE, id="write dualwrite"), + pytest.param(Operation.WRITE, Stage.SHADOW, id="write shadow"), + pytest.param(Operation.WRITE, Stage.LIVE, id="write live"), + pytest.param(Operation.WRITE, Stage.RAMPDOWN, id="write rampdown"), + pytest.param(Operation.WRITE, Stage.COMPLETE, id="write complete"), + ], +) +def test_migration_op_event_is_queued_with_consistency(operation: Operation, default_stage: Stage): + for value in [True, False, None]: + with DefaultTestProcessor() as ep: + e = MigrationOpEvent(timestamp, context, flag.key, flag, operation, default_stage, EvaluationDetail('off', 0, {'kind': 'FALLTHROUGH'}), {Origin.OLD, Origin.NEW}, value, None, set(), {}) + ep.send_event(e) + + output = flush_and_get_events(ep) + assert len(output) == 1 + check_migration_op_event(output[0], e) + def test_identify_event_is_queued(): with DefaultTestProcessor() as ep: @@ -85,6 +251,28 @@ def test_individual_feature_event_is_queued_with_index_event(): check_feature_event(output[1], e) check_summary_event(output[2]) + +def test_individual_feature_event_is_ignored_for_0_sampling_ratio(): + with DefaultTestProcessor() as ep: + e = EventInputEvaluation(timestamp, context, flag_with_0_sampling_ratio.key, flag_with_0_sampling_ratio, 1, 'value', None, 'default', None, True) + ep.send_event(e) + + output = flush_and_get_events(ep) + assert len(output) == 2 + check_index_event(output[0], e) + check_summary_event(output[1]) + + +def test_exclude_can_keep_feature_event_from_summary(): + with DefaultTestProcessor() as ep: + e = EventInputEvaluation(timestamp, context, flag_excluded_from_summaries.key, flag_excluded_from_summaries, 1, 'value', None, 'default', None, True) + ep.send_event(e) + + output = flush_and_get_events(ep) + assert len(output) == 2 + check_index_event(output[0], e) + check_feature_event(output[1], e) + def test_context_is_filtered_in_index_event(): with DefaultTestProcessor(all_attributes_private = True) as ep: e = EventInputEvaluation(timestamp, context, flag.key, flag, 1, 'value', None, 'default', None, True) @@ -153,6 +341,20 @@ def test_event_can_be_both_tracked_and_debugged(): check_debug_event(output[2], e) check_summary_event(output[3]) + +def test_debug_event_can_be_disabled_with_sampling_ratio(): + with DefaultTestProcessor() as ep: + future_time = now() + 100000 + debugged_flag = FlagBuilder(flag.key).version(flag.version).debug_events_until_date(future_time).sampling_ratio(0).build() + e = EventInputEvaluation(timestamp, context, debugged_flag.key, debugged_flag, 1, 'value', None, 'default', None, True) + ep.send_event(e) + + output = flush_and_get_events(ep) + assert len(output) == 2 + check_index_event(output[0], e) + check_summary_event(output[1]) + + def test_debug_mode_does_not_expire_if_both_client_time_and_server_time_are_before_expiration_time(): with DefaultTestProcessor() as ep: # Pick a server time that slightly different from client time @@ -491,6 +693,56 @@ def check_feature_event(data, source: EventInputEvaluation): assert data['contextKeys'] == make_context_keys(source.context) assert data.get('prereq_of') == None if source.prereq_of is None else source.prereq_of.key + +def check_migration_op_event(data, source: MigrationOpEvent): + assert data['kind'] == 'migration_op' + assert data['creationDate'] == source.timestamp + assert data['contextKeys'] == make_context_keys(source.context) + assert data['evaluation']['key'] == source.key + assert data['evaluation']['value'] == source.detail.value + + if source.flag is not None: + assert data['evaluation']['version'] == source.flag.version + + if source.default_stage is not None: + assert data['evaluation']['default'] == source.default_stage.value + + if source.detail.variation_index is not None: + assert data['evaluation']['variation'] == source.detail.variation_index + + if source.detail.reason is not None: + assert data['evaluation']['reason'] == source.detail.reason + + if source.flag is not None and source.flag.sampling_ratio is not None and source.flag.sampling_ratio != 1: + assert data['samplingRatio'] == source.flag.sampling_ratio + + index = 0 + if len(source.invoked): + assert data['measurements'][index]['key'] == 'invoked' + assert data['measurements'][index]['values'] == {origin.value: True for origin in source.invoked} + index += 1 + + if source.consistent is not None: + assert data['measurements'][index]['key'] == 'consistent' + assert data['measurements'][index]['value'] == source.consistent + + if source.flag is not None and source.flag.migrations is not None: + check_ratio = source.flag.migrations.check_ratio + if check_ratio is not None and check_ratio != 1: + assert data['measurements'][index]['samplingRatio'] == check_ratio + + index += 1 + + if len(source.latencies): + assert data['measurements'][index]['key'] == 'latency_ms' + assert data['measurements'][index]['values'] == {o.value: timedelta_millis(d) for o, d in source.latencies.items()} + index += 1 + + if len(source.errors): + assert data['measurements'][index]['key'] == 'error' + assert data['measurements'][index]['values'] == {origin.value: True for origin in source.errors} + + def check_debug_event(data, source: EventInputEvaluation, context_json: Optional[dict] = None): assert data['kind'] == 'debug' assert data['creationDate'] == source.timestamp diff --git a/testing/impl/test_sampler.py b/testing/impl/test_sampler.py new file mode 100644 index 00000000..f3deb05d --- /dev/null +++ b/testing/impl/test_sampler.py @@ -0,0 +1,30 @@ +from random import Random +from ldclient.impl.sampler import Sampler + + +def test_is_false_for_noninteger_values(): + sampler = Sampler(Random()) + for value in ["not an int", True, 3.0]: + assert sampler.sample(value) is False + + +def test_is_false_for_nonpositive_integers(): + sampler = Sampler(Random()) + for value in range(-10, 1): + assert sampler.sample(value) is False + + +def test_one_is_true(): + sampler = Sampler(Random()) + assert sampler.sample(1) + + +def test_can_control_sampling_ratio(): + sampler = Sampler(Random(0)) + + count = 0 + for _ in range(0, 1_000): + if sampler.sample(10): + count += 1 + + assert count == 114 diff --git a/testing/integrations/test_test_data_source.py b/testing/integrations/test_test_data_source.py index 9afba04c..a5d86526 100644 --- a/testing/integrations/test_test_data_source.py +++ b/testing/integrations/test_test_data_source.py @@ -1,7 +1,7 @@ import pytest from typing import Callable -from ldclient.client import LDClient +from ldclient.client import LDClient, Context from ldclient.config import Config from ldclient.feature_store import InMemoryFeatureStore from ldclient.versioned_data_kind import FEATURES @@ -33,7 +33,7 @@ def verify_flag_builder(desc: str, expected_props: dict, builder_actions: Callab 'variations': [True, False], 'offVariation': 1, 'fallthrough': {'variation': 0} - } + } all_expected_props.update(expected_props) td = TestData.data_source() @@ -337,7 +337,7 @@ def test_can_handle_multiple_clients(): assert store2.get(FEATURES, 'flag') == FEATURES.decode(built_flag) - flag_builder_v2 = td.flag('flag').variation_for_all_users(False) + flag_builder_v2 = td.flag('flag').variation_for_all(False) td.update(flag_builder_v2) built_flag_v2 = flag_builder_v2._build(2) @@ -365,7 +365,7 @@ def test_flag_evaluation_with_client(): .then_return(True)) # user1 should satisfy the rule (matching firstname, not matching country) - user1 = { 'key': 'user1', 'firstName': 'Mike', 'country': 'us' } + user1 = Context.from_dict({ 'kind': 'user', 'key': 'user1', 'firstName': 'Mike', 'country': 'us' }) eval1 = client.variation_detail('test-flag', user1, default='default') assert eval1.value == True @@ -373,7 +373,7 @@ def test_flag_evaluation_with_client(): assert eval1.reason['kind'] == 'RULE_MATCH' # user2 should NOT satisfy the rule (not matching firstname despite not matching country) - user2 = { 'key': 'user2', 'firstName': 'Joe', 'country': 'us' } + user2 = Context.from_dict({ 'kind': 'user', 'key': 'user2', 'firstName': 'Joe', 'country': 'us' }) eval2 = client.variation_detail('test-flag', user2, default='default') assert eval2.value == False @@ -395,7 +395,7 @@ def test_flag_can_evaluate_all_flags(): .and_not_match('country', 'gb') .then_return(True)) - user1 = { 'key': 'user1', 'firstName': 'Mike', 'country': 'us' } + user1 = Context.from_dict({ 'kind': 'user', 'key': 'user1', 'firstName': 'Mike', 'country': 'us' }) flags_state = client.all_flags_state(user1, with_reasons=True) assert flags_state.valid diff --git a/testing/migrations/__init__.py b/testing/migrations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/testing/migrations/test_migrator.py b/testing/migrations/test_migrator.py new file mode 100644 index 00000000..b1822023 --- /dev/null +++ b/testing/migrations/test_migrator.py @@ -0,0 +1,509 @@ +import pytest +from datetime import datetime, timedelta +from ldclient.feature_store import InMemoryFeatureStore +from ldclient.migrations import MigratorBuilder +from ldclient import Result +from ldclient.migrations.types import Stage, Origin, MigratorFn, ExecutionOrder +from ldclient.migrations.migrator import Migrator +from ldclient.migrations.tracker import MigrationOpEvent +from ldclient.versioned_data_kind import FEATURES +from ldclient.impl.events.types import EventInputEvaluation +from ldclient.impl.util import timedelta_millis +from testing.builders import FlagBuilder +from testing.test_ldclient import make_client, user +from typing import List +from time import sleep + + +def success(payload) -> Result: + return Result.success(True) + + +def raises_exception(msg) -> MigratorFn: + """Quick helper to generate a migration fn that is going to raise an exception""" + def inner(payload): + raise Exception(msg) + + return inner + + +@pytest.fixture +def data_store(): + flags = {} + for stage in Stage: + feature = FlagBuilder(stage.value).on(True).variations(stage.value).fallthrough_variation(0).build() + flags[stage.value] = feature + + store = InMemoryFeatureStore() + store.init({FEATURES: flags}) + + return store + + +@pytest.fixture +def builder(data_store) -> MigratorBuilder: + client = make_client(data_store) + builder = MigratorBuilder(client) + builder.track_latency(False) + builder.track_errors(False) + + builder.read(success, success, None) + builder.write(success, success) + + return builder + + +class TestPassingPayloadThrough: + @pytest.mark.parametrize( + "stage,count", + [ + pytest.param(Stage.OFF, 1, id="off"), + pytest.param(Stage.DUALWRITE, 1, id="dualwrite"), + pytest.param(Stage.SHADOW, 2, id="shadow"), + pytest.param(Stage.LIVE, 2, id="live"), + pytest.param(Stage.RAMPDOWN, 1, id="rampdown"), + pytest.param(Stage.COMPLETE, 1, id="complete"), + ], + ) + def test_passes_through_read(self, builder: MigratorBuilder, stage: Stage, count: int): + payloads = [] + + def capture_payloads(payload): + payloads.append(payload) + return Result.success(None) + + builder.read(capture_payloads, capture_payloads) + migrator = builder.build() + assert isinstance(migrator, Migrator) + + result = migrator.read(stage.value, user, Stage.LIVE, "payload") + + assert result.is_success() + assert len(payloads) == count + assert all("payload" == p for p in payloads) + + @pytest.mark.parametrize( + "stage,count", + [ + pytest.param(Stage.OFF, 1, id="off"), + pytest.param(Stage.DUALWRITE, 2, id="dualwrite"), + pytest.param(Stage.SHADOW, 2, id="shadow"), + pytest.param(Stage.LIVE, 2, id="live"), + pytest.param(Stage.RAMPDOWN, 2, id="rampdown"), + pytest.param(Stage.COMPLETE, 1, id="complete"), + ], + ) + def test_passes_through_write(self, builder: MigratorBuilder, stage: Stage, count: int): + payloads = [] + + def capture_payloads(payload): + payloads.append(payload) + return Result.success(None) + + builder.write(capture_payloads, capture_payloads) + migrator = builder.build() + assert isinstance(migrator, Migrator) + + result = migrator.write(stage.value, user, Stage.LIVE, "payload") + + assert result.authoritative.is_success() + if result.nonauthoritative is not None: + assert result.nonauthoritative.is_success() + + assert len(payloads) == count + assert all("payload" == p for p in payloads) + + +class TestTrackingInvoked: + @pytest.mark.parametrize( + "stage,origins", + [ + pytest.param(Stage.OFF, [Origin.OLD], id="off"), + pytest.param(Stage.DUALWRITE, [Origin.OLD], id="dualwrite"), + pytest.param(Stage.SHADOW, [Origin.OLD, Origin.NEW], id="shadow"), + pytest.param(Stage.LIVE, [Origin.OLD, Origin.NEW], id="live"), + pytest.param(Stage.RAMPDOWN, [Origin.NEW], id="rampdown"), + pytest.param(Stage.COMPLETE, [Origin.NEW], id="complete"), + ], + ) + def test_reads(self, builder: MigratorBuilder, stage: Stage, origins: List[Origin]): + migrator = builder.build() + assert isinstance(migrator, Migrator) + + result = migrator.read(stage.value, user, Stage.LIVE) + + assert result.is_success() + events = builder._client._event_processor._events # type: ignore + assert isinstance(events[0], EventInputEvaluation) + + event = events[1] + assert isinstance(event, MigrationOpEvent) + assert len(origins) == len(event.invoked) + assert all(o in event.invoked for o in origins) + + @pytest.mark.parametrize( + "stage,origins", + [ + pytest.param(Stage.OFF, [Origin.OLD], id="off"), + pytest.param(Stage.DUALWRITE, [Origin.OLD, Origin.NEW], id="dualwrite"), + pytest.param(Stage.SHADOW, [Origin.OLD, Origin.NEW], id="shadow"), + pytest.param(Stage.LIVE, [Origin.OLD, Origin.NEW], id="live"), + pytest.param(Stage.RAMPDOWN, [Origin.OLD, Origin.NEW], id="rampdown"), + pytest.param(Stage.COMPLETE, [Origin.NEW], id="complete"), + ], + ) + def test_writes(self, builder: MigratorBuilder, stage: Stage, origins: List[Origin]): + migrator = builder.build() + assert isinstance(migrator, Migrator) + + result = migrator.write(stage.value, user, Stage.LIVE) + + assert result.authoritative.is_success() + events = builder._client._event_processor._events # type: ignore + assert isinstance(events[0], EventInputEvaluation) + + event = events[1] + assert isinstance(event, MigrationOpEvent) + assert len(origins) == len(event.invoked) + assert all(o in event.invoked for o in origins) + + +class TestTrackingLatency: + @pytest.mark.parametrize( + "stage,origins", + [ + pytest.param(Stage.OFF, [Origin.OLD], id="off"), + pytest.param(Stage.DUALWRITE, [Origin.OLD], id="dualwrite"), + pytest.param(Stage.SHADOW, [Origin.OLD, Origin.NEW], id="shadow"), + pytest.param(Stage.LIVE, [Origin.OLD, Origin.NEW], id="live"), + pytest.param(Stage.RAMPDOWN, [Origin.NEW], id="rampdown"), + pytest.param(Stage.COMPLETE, [Origin.NEW], id="complete"), + ], + ) + def test_reads(self, builder: MigratorBuilder, stage: Stage, origins: List[Origin]): + def delay(payload): + sleep(0.1) + return Result.success("success") + + builder.track_latency(True) + builder.read(delay, delay) + migrator = builder.build() + assert isinstance(migrator, Migrator) + + result = migrator.read(stage.value, user, Stage.LIVE) + + assert result.is_success() + events = builder._client._event_processor._events # type: ignore + assert isinstance(events[0], EventInputEvaluation) + + event = events[1] + assert isinstance(event, MigrationOpEvent) + assert len(origins) == len(event.latencies) + for o in origins: + assert o in event.latencies + assert event.latencies[o] >= timedelta(milliseconds=100) + + @pytest.mark.parametrize( + "stage,origins", + [ + pytest.param(Stage.OFF, [Origin.OLD], id="off"), + pytest.param(Stage.DUALWRITE, [Origin.OLD, Origin.NEW], id="dualwrite"), + pytest.param(Stage.SHADOW, [Origin.OLD, Origin.NEW], id="shadow"), + pytest.param(Stage.LIVE, [Origin.OLD, Origin.NEW], id="live"), + pytest.param(Stage.RAMPDOWN, [Origin.OLD, Origin.NEW], id="rampdown"), + pytest.param(Stage.COMPLETE, [Origin.NEW], id="complete"), + ], + ) + def test_writes(self, builder: MigratorBuilder, stage: Stage, origins: List[Origin]): + def delay(payload): + sleep(0.1) + return Result.success("success") + + builder.track_latency(True) + builder.write(delay, delay) + migrator = builder.build() + assert isinstance(migrator, Migrator) + + result = migrator.write(stage.value, user, Stage.LIVE) + + assert result.authoritative.is_success() + events = builder._client._event_processor._events # type: ignore + assert isinstance(events[0], EventInputEvaluation) + + event = events[1] + assert isinstance(event, MigrationOpEvent) + assert len(origins) == len(event.latencies) + for o in origins: + assert o in event.latencies + assert event.latencies[o] >= timedelta(milliseconds=100) + + +class TestTrackingErrors: + @pytest.mark.parametrize( + "stage,origins", + [ + pytest.param(Stage.OFF, [Origin.OLD], id="off"), + pytest.param(Stage.DUALWRITE, [Origin.OLD], id="dualwrite"), + pytest.param(Stage.SHADOW, [Origin.OLD, Origin.NEW], id="shadow"), + pytest.param(Stage.LIVE, [Origin.OLD, Origin.NEW], id="live"), + pytest.param(Stage.RAMPDOWN, [Origin.NEW], id="rampdown"), + pytest.param(Stage.COMPLETE, [Origin.NEW], id="complete"), + ], + ) + def test_reads(self, builder: MigratorBuilder, stage: Stage, origins: List[Origin]): + builder.track_errors(True) + builder.read(lambda _: Result.fail("fail"), lambda _: Result.fail("fail")) + migrator = builder.build() + assert isinstance(migrator, Migrator) + + result = migrator.read(stage.value, user, Stage.LIVE) + + assert not result.is_success() + events = builder._client._event_processor._events # type: ignore + assert isinstance(events[0], EventInputEvaluation) + + event = events[1] + assert isinstance(event, MigrationOpEvent) + assert len(origins) == len(event.errors) + assert all(o in event.errors for o in origins) + + @pytest.mark.parametrize( + "stage,origin", + [ + pytest.param(Stage.OFF, Origin.OLD, id="off"), + pytest.param(Stage.DUALWRITE, Origin.OLD, id="dualwrite"), + pytest.param(Stage.SHADOW, Origin.OLD, id="shadow"), + pytest.param(Stage.LIVE, Origin.NEW, id="live"), + pytest.param(Stage.RAMPDOWN, Origin.NEW, id="rampdown"), + pytest.param(Stage.COMPLETE, Origin.NEW, id="complete"), + ], + ) + def test_authoritative_writes(self, builder: MigratorBuilder, stage: Stage, origin: Origin): + builder.track_errors(True) + builder.write(lambda _: Result.fail("fail"), lambda _: Result.fail("fail")) + migrator = builder.build() + assert isinstance(migrator, Migrator) + + result = migrator.write(stage.value, user, Stage.LIVE) + + assert not result.authoritative.is_success() + assert result.nonauthoritative is None + events = builder._client._event_processor._events # type: ignore + assert isinstance(events[0], EventInputEvaluation) + + event = events[1] + assert isinstance(event, MigrationOpEvent) + assert 1 == len(event.errors) + assert origin in event.errors + + @pytest.mark.parametrize( + "stage,fail_old,fail_new,origin", + [ + # Skip OFF and COMPLETE since they don't have non-authoritative writes + pytest.param(Stage.DUALWRITE, False, True, Origin.NEW, id="dualwrite"), + pytest.param(Stage.SHADOW, False, True, Origin.NEW, id="shadow"), + pytest.param(Stage.LIVE, True, False, Origin.OLD, id="live"), + pytest.param(Stage.RAMPDOWN, True, False, Origin.OLD, id="rampdown"), + ], + ) + def test_nonauthoritative_writes(self, builder: MigratorBuilder, stage: Stage, fail_old: bool, fail_new: bool, origin: Origin): + def success(_): + return Result.success(None) + + def fail(_): + return Result.fail("fail") + + builder.track_errors(True) + builder.write(fail if fail_old else success, fail if fail_new else success) + migrator = builder.build() + assert isinstance(migrator, Migrator) + + result = migrator.write(stage.value, user, Stage.LIVE) + + assert result.authoritative.is_success() + assert result.nonauthoritative is not None + assert not result.nonauthoritative.is_success() + events = builder._client._event_processor._events # type: ignore + assert isinstance(events[0], EventInputEvaluation) + + event = events[1] + assert isinstance(event, MigrationOpEvent) + assert 1 == len(event.errors) + assert origin in event.errors + + +class TestTrackingConsistency: + @pytest.mark.parametrize( + "stage", + [ + pytest.param(Stage.OFF, id="off"), + pytest.param(Stage.DUALWRITE, id="dualwrite"), + # SHADOW and LIVE are tested separately since they actually trigger consistency checks. + pytest.param(Stage.RAMPDOWN, id="rampdown"), + pytest.param(Stage.COMPLETE, id="complete"), + ], + ) + def test_consistency_is_not_run_in_most_stages(self, builder: MigratorBuilder, stage: Stage): + builder.read(lambda _: Result.success("value"), lambda _: Result.success("value"), lambda lhs, rhs: lhs == rhs) + migrator = builder.build() + assert isinstance(migrator, Migrator) + + result = migrator.read(stage.value, user, Stage.LIVE) + assert result.is_success() + events = builder._client._event_processor._events # type: ignore + assert isinstance(events[0], EventInputEvaluation) + + event = events[1] + assert isinstance(event, MigrationOpEvent) + assert event.consistent is None + + @pytest.mark.parametrize( + "stage,old,new,expected", + [ + # SHADOW and LIVE are the only two stages that run both origins for read. + pytest.param(Stage.SHADOW, "value", "value", True, id="shadow matches"), + pytest.param(Stage.LIVE, "value", "value", True, id="live matches"), + + pytest.param(Stage.SHADOW, "old", "new", False, id="shadow does not match"), + pytest.param(Stage.LIVE, "old", "new", False, id="live does not match"), + ], + ) + def test_consistency_is_tracked_correctly(self, builder: MigratorBuilder, stage: Stage, old: str, new: str, expected: bool): + builder.read(lambda _: Result.success(old), lambda _: Result.success(new), lambda lhs, rhs: lhs == rhs) + migrator = builder.build() + assert isinstance(migrator, Migrator) + + result = migrator.read(stage.value, user, Stage.LIVE) + assert result.is_success() + events = builder._client._event_processor._events # type: ignore + assert isinstance(events[0], EventInputEvaluation) + + event = events[1] + assert isinstance(event, MigrationOpEvent) + assert event.consistent is expected + + @pytest.mark.parametrize( + "stage,old,new,expected", + [ + # SHADOW and LIVE are the only two stages that run both origins for read. + pytest.param(Stage.SHADOW, "value", "value", True, id="shadow matches"), + pytest.param(Stage.LIVE, "value", "value", True, id="live matches"), + + pytest.param(Stage.SHADOW, "old", "new", False, id="shadow does not match"), + pytest.param(Stage.LIVE, "old", "new", False, id="live does not match"), + ], + ) + def test_consistency_handles_exceptions(self, builder: MigratorBuilder, stage: Stage, old: str, new: str, expected: bool): + def raise_exception(lhs, rhs): + raise Exception("error") + + builder.read(lambda _: Result.success(old), lambda _: Result.success(new), raise_exception) + migrator = builder.build() + assert isinstance(migrator, Migrator) + + result = migrator.read(stage.value, user, Stage.LIVE) + assert result.is_success() + events = builder._client._event_processor._events # type: ignore + assert isinstance(events[0], EventInputEvaluation) + + event = events[1] + assert isinstance(event, MigrationOpEvent) + assert event.consistent is None + + +class TestHandlesExceptionsInMigratorFn: + @pytest.mark.parametrize( + "stage,expected_msg", + [ + pytest.param(Stage.OFF, "old read", id="off"), + pytest.param(Stage.DUALWRITE, "old read", id="dualwrite"), + pytest.param(Stage.SHADOW, "old read", id="shadow"), + pytest.param(Stage.LIVE, "new read", id="live"), + pytest.param(Stage.RAMPDOWN, "new read", id="rampdown"), + pytest.param(Stage.COMPLETE, "new read", id="complete"), + ], + ) + def test_reads(self, builder: MigratorBuilder, stage: Stage, expected_msg: str): + + builder.read(raises_exception("old read"), raises_exception("new read")) + migrator = builder.build() + assert isinstance(migrator, Migrator) + + result = migrator.read(stage.value, user, Stage.LIVE) + + assert result.is_success() is False + assert str(result.exception) == expected_msg + + @pytest.mark.parametrize( + "stage,expected_msg", + [ + pytest.param(Stage.OFF, "old write", id="off"), + pytest.param(Stage.DUALWRITE, "old write", id="dualwrite"), + pytest.param(Stage.SHADOW, "old write", id="shadow"), + pytest.param(Stage.LIVE, "new write", id="live"), + pytest.param(Stage.RAMPDOWN, "new write", id="rampdown"), + pytest.param(Stage.COMPLETE, "new write", id="complete"), + ], + ) + def test_exception_in_authoritative_write(self, builder: MigratorBuilder, stage: Stage, expected_msg: str): + + builder.write(raises_exception("old write"), raises_exception("new write")) + migrator = builder.build() + assert isinstance(migrator, Migrator) + + result = migrator.write(stage.value, user, Stage.LIVE) + + assert result.authoritative.is_success() is False + assert str(result.authoritative.exception) == expected_msg + assert result.nonauthoritative is None + + @pytest.mark.parametrize( + "stage,expected_msg,old_fn,new_fn", + [ + # Skip OFF and COMPLETE since they don't have non-authoritative writes + pytest.param(Stage.DUALWRITE, "new write", success, raises_exception("new write"), id="dualwrite"), + pytest.param(Stage.SHADOW, "new write", success, raises_exception("new write"), id="shadow"), + pytest.param(Stage.LIVE, "old write", raises_exception("old write"), success, id="live"), + pytest.param(Stage.RAMPDOWN, "old write", raises_exception("old write"), success, id="rampdown"), + ], + ) + def test_exception_in_nonauthoritative_write(self, builder: MigratorBuilder, stage: Stage, expected_msg: str, old_fn: MigratorFn, new_fn: MigratorFn): + + builder.write(old_fn, new_fn) + migrator = builder.build() + assert isinstance(migrator, Migrator) + + result = migrator.write(stage.value, user, Stage.LIVE) + + assert result.authoritative.is_success() + assert result.nonauthoritative is not None + assert not result.nonauthoritative.is_success() + assert str(result.nonauthoritative.exception) == expected_msg + + +class TestSupportsExectionOrder: + @pytest.mark.parametrize( + "order,min_time", + [ + pytest.param(ExecutionOrder.PARALLEL, 300, id="parallel"), + pytest.param(ExecutionOrder.SERIAL, 600, id="serial"), + pytest.param(ExecutionOrder.RANDOM, 600, id="random"), + ], + ) + def test_parallel(self, builder: MigratorBuilder, order: ExecutionOrder, min_time: int): + def delay(payload): + sleep(0.3) + return Result.success("success") + + builder.read_execution_order(order) + builder.read(delay, delay) + migrator = builder.build() + assert isinstance(migrator, Migrator) + + start = datetime.now() + result = migrator.read('live', user, Stage.LIVE) + delta = datetime.now() - start + ms = timedelta_millis(delta) + + assert result.is_success() + assert ms >= min_time diff --git a/testing/migrations/test_migrator_builder.py b/testing/migrations/test_migrator_builder.py new file mode 100644 index 00000000..6a1db061 --- /dev/null +++ b/testing/migrations/test_migrator_builder.py @@ -0,0 +1,73 @@ +import pytest +from ldclient.client import LDClient, Config +from ldclient import Result +from ldclient.migrations import MigratorBuilder, Migrator, ExecutionOrder + + +def test_can_build_successfully(): + client = LDClient(config=Config(sdk_key='SDK_KEY')) + builder = MigratorBuilder(client) + builder.read( + lambda payload: Result.success("old origin"), + lambda payload: Result.success("new origin"), + None, + ) + builder.write( + lambda payload: Result.success("old origin"), + lambda payload: Result.success("new origin"), + ) + migrator = builder.build() + + assert isinstance(migrator, Migrator) + + +@pytest.mark.parametrize( + "order", + [ + pytest.param(ExecutionOrder.SERIAL, id="serial"), + pytest.param(ExecutionOrder.RANDOM, id="random"), + pytest.param(ExecutionOrder.PARALLEL, id="parallel"), + ], +) +def test_can_modify_execution_order(order): + client = LDClient(config=Config(sdk_key='SDK_KEY')) + builder = MigratorBuilder(client) + builder.read( + lambda payload: Result.success("old origin"), + lambda payload: Result.success("new origin"), + None, + ) + builder.write( + lambda payload: Result.success("old origin"), + lambda payload: Result.success("new origin"), + ) + builder.read_execution_order(order) + migrator = builder.build() + + assert isinstance(migrator, Migrator) + + +def test_build_fails_without_read(): + client = LDClient(config=Config(sdk_key='SDK_KEY')) + builder = MigratorBuilder(client) + builder.write( + lambda payload: Result.success("old origin"), + lambda payload: Result.success("new origin"), + ) + migrator = builder.build() + + assert isinstance(migrator, str) + assert migrator == "read configuration not provided" + + +def test_build_fails_without_write(): + client = LDClient(config=Config(sdk_key='SDK_KEY')) + builder = MigratorBuilder(client) + builder.read( + lambda payload: Result.success("old origin"), + lambda payload: Result.success("new origin"), + ) + migrator = builder.build() + + assert isinstance(migrator, str) + assert migrator == "write configuration not provided" diff --git a/testing/migrations/test_op_tracker.py b/testing/migrations/test_op_tracker.py new file mode 100644 index 00000000..7e40ee5f --- /dev/null +++ b/testing/migrations/test_op_tracker.py @@ -0,0 +1,298 @@ +import pytest +from datetime import timedelta +from ldclient import Context +from ldclient.migrations import OpTracker, Stage, Operation, Origin, MigrationOpEvent +from ldclient.evaluation import EvaluationDetail +from testing.builders import build_off_flag_with_value, MigrationSettingsBuilder +from testing.test_ldclient import user + + +@pytest.fixture +def bare_tracker() -> OpTracker: + flag = build_off_flag_with_value("flag", True).build() + detail = EvaluationDetail('value', 0, {'kind': 'OFF'}) + tracker = OpTracker("flag", flag, user, detail, Stage.LIVE) + + return tracker + + +@pytest.fixture +def tracker(bare_tracker) -> OpTracker: + bare_tracker.operation(Operation.READ) + bare_tracker.invoked(Origin.OLD) + bare_tracker.invoked(Origin.NEW) + + return bare_tracker + + +class TestBuilding: + def test_can_build_successfully(self, tracker: OpTracker): + event = tracker.build() + assert isinstance(event, MigrationOpEvent) + + def test_can_build_successfully_without_a_flag(self): + detail = EvaluationDetail('value', 0, {'kind': 'OFF'}) + tracker = OpTracker("flag", None, user, detail, Stage.LIVE) + tracker.operation(Operation.READ) + tracker.invoked(Origin.OLD) + + event = tracker.build() + assert isinstance(event, MigrationOpEvent) + + def test_fails_without_operation(self, bare_tracker: OpTracker): + event = bare_tracker.build() + + assert isinstance(event, str) + assert event == "operation not provided" + + def test_fails_with_empty_key(self): + detail = EvaluationDetail('value', 0, {'kind': 'OFF'}) + flag = build_off_flag_with_value("flag", True).build() + tracker = OpTracker("", flag, user, detail, Stage.LIVE) + tracker.operation(Operation.WRITE) + event = tracker.build() + + assert isinstance(event, str) + assert event == "migration operation cannot contain an empty key" + + def test_fails_with_invalid_operation(self, bare_tracker: OpTracker): + bare_tracker.operation("invalid operation") # type: ignore[arg-type] + event = bare_tracker.build() + + assert isinstance(event, str) + assert event == "operation not provided" + + def test_fails_without_invocations(self, bare_tracker: OpTracker): + bare_tracker.operation(Operation.WRITE) + event = bare_tracker.build() + + assert isinstance(event, str) + assert event == "no origins were invoked" + + def test_with_invalid_context(self): + flag = build_off_flag_with_value("flag", True).build() + detail = EvaluationDetail('value', 0, {'kind': 'OFF'}) + invalid_context = Context.from_dict({"kind": "multi", "key": "user-key"}) + tracker = OpTracker("flag", flag, invalid_context, detail, Stage.LIVE) + tracker.operation(Operation.WRITE) + tracker.invoked(Origin.OLD) + event = tracker.build() + + assert isinstance(event, str) + assert event == "provided context was invalid" + + @pytest.mark.parametrize( + "invoked,recorded", + [ + pytest.param(Origin.OLD, Origin.NEW, id="invoked old measured new"), + pytest.param(Origin.NEW, Origin.OLD, id="invoked new measured old"), + ], + ) + def test_latency_invoked_mismatch( + self, bare_tracker: OpTracker, invoked: Origin, recorded: Origin): + bare_tracker.operation(Operation.WRITE) + bare_tracker.invoked(invoked) + bare_tracker.latency(recorded, timedelta(milliseconds=20)) + event = bare_tracker.build() + + assert isinstance(event, str) + assert event == f"provided latency for origin '{recorded.value}' without recording invocation" + + @pytest.mark.parametrize( + "invoked,recorded", + [ + pytest.param(Origin.OLD, Origin.NEW, id="invoked old measured new"), + pytest.param(Origin.NEW, Origin.OLD, id="invoked new measured old"), + ], + ) + def test_error_invoked_mismatch( + self, bare_tracker: OpTracker, invoked: Origin, recorded: Origin): + bare_tracker.operation(Operation.WRITE) + bare_tracker.invoked(invoked) + bare_tracker.error(recorded) + event = bare_tracker.build() + + assert isinstance(event, str) + assert event == f"provided error for origin '{recorded.value}' without recording invocation" + + @pytest.mark.parametrize( + "origin", + [ + pytest.param(Origin.OLD, id="old"), + pytest.param(Origin.NEW, id="new"), + ], + ) + def test_consistency_invoked_mismatch(self, bare_tracker: OpTracker, origin: Origin): + bare_tracker.operation(Operation.WRITE) + bare_tracker.invoked(origin) + bare_tracker.consistent(lambda: True) + event = bare_tracker.build() + + assert isinstance(event, str) + assert event == "provided consistency without recording both invocations" + + +class TestTrackInvocations: + @pytest.mark.parametrize( + "origin", + [ + pytest.param(Origin.OLD, id="old"), + pytest.param(Origin.NEW, id="new"), + ], + ) + def test_individually(self, bare_tracker: OpTracker, origin: Origin): + bare_tracker.operation(Operation.WRITE) + bare_tracker.invoked(origin) + + event = bare_tracker.build() + assert isinstance(event, MigrationOpEvent) + + assert len(event.invoked) == 1 + assert origin in event.invoked + + def test_tracks_both(self, bare_tracker: OpTracker): + bare_tracker.operation(Operation.WRITE) + bare_tracker.invoked(Origin.OLD) + bare_tracker.invoked(Origin.NEW) + + event = bare_tracker.build() + assert isinstance(event, MigrationOpEvent) + + assert len(event.invoked) == 2 + assert Origin.OLD in event.invoked + assert Origin.NEW in event.invoked + + def test_ignores_invalid_origins(self, tracker: OpTracker): + tracker.invoked("this is clearly wrong") # type: ignore[arg-type] + tracker.invoked(False) # type: ignore[arg-type] + + event = tracker.build() + assert isinstance(event, MigrationOpEvent) + + assert len(event.invoked) == 2 + assert Origin.OLD in event.invoked + assert Origin.NEW in event.invoked + + +class TestTrackConsistency: + @pytest.mark.parametrize("consistent", [True, False]) + def test_without_check_ratio( + self, tracker: OpTracker, consistent: bool): + tracker.consistent(lambda: consistent) + event = tracker.build() + assert isinstance(event, MigrationOpEvent) + + assert event.consistent is consistent + assert event.consistent_ratio == 1 + + @pytest.mark.parametrize("consistent", [True, False]) + def test_with_check_ratio_of_1(self, consistent): + flag = build_off_flag_with_value("flag", 'off').migrations(MigrationSettingsBuilder().check_ratio(1).build()).build() + detail = EvaluationDetail('value', 0, {'kind': 'OFF'}) + tracker = OpTracker("flag", flag, user, detail, Stage.LIVE) + tracker.consistent(lambda: consistent) + tracker.operation(Operation.READ) + tracker.invoked(Origin.OLD) + tracker.invoked(Origin.NEW) + + event = tracker.build() + assert isinstance(event, MigrationOpEvent) + + assert event.consistent is consistent + assert event.consistent_ratio == 1 + + @pytest.mark.parametrize("consistent", [True, False]) + def test_can_disable_with_check_ratio_of_0(self, consistent: bool): + flag = build_off_flag_with_value("flag", 'off').migrations(MigrationSettingsBuilder().check_ratio(0).build()).build() + detail = EvaluationDetail('value', 0, {'kind': 'OFF'}) + tracker = OpTracker("flag", flag, user, detail, Stage.LIVE) + tracker.consistent(lambda: consistent) + tracker.operation(Operation.READ) + tracker.invoked(Origin.OLD) + tracker.invoked(Origin.NEW) + + event = tracker.build() + assert isinstance(event, MigrationOpEvent) + + assert event.consistent is None + assert event.consistent_ratio is None + + +class TestTrackErrors: + @pytest.mark.parametrize( + "origin", + [ + pytest.param(Origin.OLD, id="old"), + pytest.param(Origin.NEW, id="new"), + ], + ) + def test_individually(self, tracker: OpTracker, origin: Origin): + tracker.operation(Operation.WRITE) + tracker.error(origin) + + event = tracker.build() + assert isinstance(event, MigrationOpEvent) + + assert len(event.errors) == 1 + assert origin in event.errors + + def test_tracks_both(self, tracker: OpTracker): + tracker.operation(Operation.WRITE) + tracker.error(Origin.OLD) + tracker.error(Origin.NEW) + + event = tracker.build() + assert isinstance(event, MigrationOpEvent) + + assert len(event.errors) == 2 + assert Origin.OLD in event.errors + assert Origin.NEW in event.errors + + def test_ignores_invalid_origins(self, tracker: OpTracker): + tracker.error("this is clearly wrong") # type: ignore[arg-type] + tracker.error(False) # type: ignore[arg-type] + + event = tracker.build() + assert isinstance(event, MigrationOpEvent) + + assert len(event.errors) == 0 + + +class TestTrackLatencies: + @pytest.mark.parametrize( + "origin", + [ + pytest.param(Origin.OLD, id="old"), + pytest.param(Origin.NEW, id="new"), + ], + ) + def test_individually(self, tracker: OpTracker, origin: Origin): + tracker.operation(Operation.WRITE) + tracker.latency(origin, timedelta(milliseconds=10)) + + event = tracker.build() + assert isinstance(event, MigrationOpEvent) + + assert len(event.latencies) == 1 + assert event.latencies[origin] == timedelta(milliseconds=10) + + def test_tracks_both(self, tracker: OpTracker): + tracker.operation(Operation.WRITE) + tracker.latency(Origin.OLD, timedelta(milliseconds=10)) + tracker.latency(Origin.NEW, timedelta(milliseconds=5)) + + event = tracker.build() + assert isinstance(event, MigrationOpEvent) + + assert len(event.latencies) == 2 + assert event.latencies[Origin.OLD] == timedelta(milliseconds=10) + assert event.latencies[Origin.NEW] == timedelta(milliseconds=5) + + def test_ignores_invalid_origins(self, tracker: OpTracker): + tracker.latency("this is clearly wrong", timedelta(milliseconds=10)) # type: ignore[arg-type] + tracker.latency(False, timedelta(milliseconds=5)) # type: ignore[arg-type] + + event = tracker.build() + assert isinstance(event, MigrationOpEvent) + + assert len(event.latencies) == 0 diff --git a/testing/test_context.py b/testing/test_context.py index 065ce41a..37baa4c7 100644 --- a/testing/test_context.py +++ b/testing/test_context.py @@ -204,18 +204,6 @@ def test_json_decoding(self): assert_context_invalid(Context.from_dict({'kind': 'multi'})) assert_context_invalid(Context.from_dict({'kind': 'multi', 'kind1': 'x'})) - def test_json_decoding_old_user(self): - assert Context.from_dict({'key': 'key1'}) == Context.create('key1', 'user') - assert Context.from_dict({'key': 'key1', 'name': 'b'}) == Context.builder('key1').name('b').build() - assert Context.from_dict({'key': 'key1', 'custom': {'b': True}}) == \ - Context.builder('key1').set('b', True).build() - - assert_context_valid(Context.from_dict({'key': ''})) - assert_context_invalid(Context.from_dict({})) - assert_context_invalid(Context.from_dict({'key': None})) - assert_context_invalid(Context.from_dict({'key': 3})) - assert_context_invalid(Context.from_dict({'key': 'a', 'name': 3})) - class TestContextMulti: def test_create_multi(self): diff --git a/testing/test_file_data_source.py b/testing/test_file_data_source.py index 17efe8a0..0ff3b0d6 100644 --- a/testing/test_file_data_source.py +++ b/testing/test_file_data_source.py @@ -5,7 +5,7 @@ import threading import time -from ldclient.client import LDClient +from ldclient.client import LDClient, Context from ldclient.config import Config from ldclient.feature_store import InMemoryFeatureStore from ldclient.integrations import Files @@ -227,7 +227,7 @@ def test_evaluates_full_flag_with_client_as_expected(): try: factory = Files.new_data_source(paths = path) client = LDClient(config=Config('SDK_KEY', update_processor_class = factory, send_events = False)) - value = client.variation('flag1', { 'key': 'user' }, '') + value = client.variation('flag1', Context.from_dict({'key': 'user', 'kind': 'user'}), '') assert value == 'on' finally: os.remove(path) @@ -239,7 +239,7 @@ def test_evaluates_simplified_flag_with_client_as_expected(): try: factory = Files.new_data_source(paths = path) client = LDClient(config=Config('SDK_KEY', update_processor_class = factory, send_events = False)) - value = client.variation('flag2', { 'key': 'user' }, '') + value = client.variation('flag2', Context.from_dict({'key': 'user', 'kind': 'user'}), '') assert value == 'value2' finally: os.remove(path) diff --git a/testing/test_ldclient.py b/testing/test_ldclient.py index 506773b1..9846e805 100644 --- a/testing/test_ldclient.py +++ b/testing/test_ldclient.py @@ -15,17 +15,17 @@ context = Context.builder('xyz').set('bizzle', 'def').build() -user = { +user = Context.from_dict({ u'key': u'xyz', - u'custom': { - u'bizzle': u'def' - } -} + u'kind': u'user', + u'bizzle': u'def' +}) -anonymous_user = { +anonymous_user = Context.from_dict({ u'key': u'abc', + u'kind': u'user', u'anonymous': True -} +}) def make_client(store = InMemoryFeatureStore()): return LDClient(config=Config(sdk_key = 'SDK_KEY', @@ -128,7 +128,7 @@ def test_no_defaults(): def test_secure_mode_hash(): context_to_hash = Context.create('Message') - equivalent_user_to_hash = {'key': 'Message'} + equivalent_user_to_hash = Context.from_dict({'key': 'Message', 'kind': 'user'}) expected_hash = "aa747c502a898200f9e4fa21bac68136f886a0e27aec70ba06daf2e2a5cb5597" with make_offline_client() as client: assert client.secure_mode_hash(context_to_hash) == expected_hash diff --git a/testing/test_ldclient_end_to_end.py b/testing/test_ldclient_end_to_end.py index 3f550d0f..96d49181 100644 --- a/testing/test_ldclient_end_to_end.py +++ b/testing/test_ldclient_end_to_end.py @@ -1,4 +1,4 @@ -from ldclient.client import LDClient +from ldclient.client import LDClient, Context from ldclient.config import Config, HTTPConfig from testing.http_util import BasicResponse, SequentialHandler, start_secure_server, start_server from testing.stub_util import make_put_event, poll_content, stream_content @@ -8,7 +8,7 @@ import sys sdk_key = 'sdk-key' -user = { 'key': 'userkey' } +user = Context.from_dict({ 'key': 'userkey', 'kind': 'user' }) always_true_flag = { 'key': 'flagkey', 'version': 1, 'on': False, 'offVariation': 1, 'variations': [ False, True ] } def test_client_starts_in_streaming_mode(): @@ -102,12 +102,6 @@ def test_client_sends_diagnostics(): data = json.loads(r.body) assert data['kind'] == 'diagnostic-init' -# The TLS tests are skipped in Python 3.7 because the embedded HTTPS server does not work correctly, causing -# a TLS handshake failure on the client side. It's unclear whether this is a problem with the self-signed -# certificate we are using or with some other server settings, but it does not appear to be a client-side -# problem since we know that the SDK is able to connect to secure LD endpoints. - -@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 7, reason = "test is skipped in Python 3.7") def test_cannot_connect_with_selfsigned_cert_by_default(): with start_secure_server() as server: server.for_path('/sdk/latest-all', poll_content()) @@ -120,7 +114,6 @@ def test_cannot_connect_with_selfsigned_cert_by_default(): with LDClient(config = config, start_wait = 1.5) as client: assert not client.is_initialized() -@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 7, reason = "test is skipped in Python 3.7") def test_can_connect_with_selfsigned_cert_if_ssl_verify_is_false(): with start_secure_server() as server: server.for_path('/sdk/latest-all', poll_content()) @@ -134,7 +127,6 @@ def test_can_connect_with_selfsigned_cert_if_ssl_verify_is_false(): with LDClient(config = config) as client: assert client.is_initialized() -@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 7, reason = "test is skipped in Python 3.7") def test_can_connect_with_selfsigned_cert_if_disable_ssl_verification_is_true(): with start_secure_server() as server: server.for_path('/sdk/latest-all', poll_content()) @@ -148,7 +140,6 @@ def test_can_connect_with_selfsigned_cert_if_disable_ssl_verification_is_true(): with LDClient(config = config) as client: assert client.is_initialized() -@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 7, reason = "test is skipped in Python 3.7") def test_can_connect_with_selfsigned_cert_by_setting_ca_certs(): with start_secure_server() as server: server.for_path('/sdk/latest-all', poll_content()) diff --git a/testing/test_ldclient_evaluation.py b/testing/test_ldclient_evaluation.py index ff3753fb..3e66e855 100644 --- a/testing/test_ldclient_evaluation.py +++ b/testing/test_ldclient_evaluation.py @@ -67,14 +67,6 @@ def test_variation_for_existing_feature(): client = make_client(store) assert 'value' == client.variation('feature.key', user, default='default') -def test_variation_passes_user_to_evaluator(): - u = {'key': 'userkey'} - feature = FlagBuilder('feature.key').on(True).variations('wrong', 'right').target(1, 'userkey').build() - store = InMemoryFeatureStore() - store.init({FEATURES: {'feature.key': feature}}) - client = make_client(store) - assert 'right' == client.variation('feature.key', u, default='default') - def test_variation_passes_context_to_evaluator(): c = Context.create('userkey') feature = FlagBuilder('feature.key').on(True).variations('wrong', 'right').target(1, 'userkey').build() @@ -88,19 +80,12 @@ def test_variation_for_unknown_feature(): client = make_client(store) assert 'default' == client.variation('feature.key', user, default='default') -def test_variation_when_user_is_none(): - feature = build_off_flag_with_value('feature.key', 'value').build() - store = InMemoryFeatureStore() - store.init({FEATURES: {'feature.key': feature}}) - client = make_client(store) - assert 'default' == client.variation('feature.key', None, default='default') - def test_variation_when_user_has_no_key(): feature = build_off_flag_with_value('feature.key', 'value').build() store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': feature}}) client = make_client(store) - assert 'default' == client.variation('feature.key', { }, default='default') + assert 'default' == client.variation('feature.key', Context.from_dict({}), default='default') def test_variation_for_invalid_context(): c = Context.create('') @@ -131,21 +116,13 @@ def test_variation_detail_for_unknown_feature(): expected = EvaluationDetail('default', None, {'kind': 'ERROR', 'errorKind': 'FLAG_NOT_FOUND'}) assert expected == client.variation_detail('feature.key', user, default='default') -def test_variation_detail_when_user_is_none(): - feature = build_off_flag_with_value('feature.key', 'value').build() - store = InMemoryFeatureStore() - store.init({FEATURES: {'feature.key': feature}}) - client = make_client(store) - expected = EvaluationDetail('default', None, {'kind': 'ERROR', 'errorKind': 'USER_NOT_SPECIFIED'}) - assert expected == client.variation_detail('feature.key', None, default='default') - def test_variation_detail_when_user_has_no_key(): feature = build_off_flag_with_value('feature.key', 'value').build() store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': feature}}) client = make_client(store) expected = EvaluationDetail('default', None, {'kind': 'ERROR', 'errorKind': 'USER_NOT_SPECIFIED'}) - assert expected == client.variation_detail('feature.key', { }, default='default') + assert expected == client.variation_detail('feature.key', Context.from_dict({}), default='default') def test_variation_detail_for_flag_that_evaluates_to_none(): empty_flag = FlagBuilder('feature.key').on(False).build() @@ -160,7 +137,7 @@ def test_variation_detail_for_flag_that_evaluates_to_none(): def test_variation_when_feature_store_throws_error(caplog): store = ErroringFeatureStore() client = make_client(store) - assert client.variation('feature.key', { "key": "user" }, default='default') == 'default' + assert client.variation('feature.key', Context.from_dict({ "key": "user", "kind": "user" }), default='default') == 'default' errlog = get_log_lines(caplog, 'ERROR') assert errlog == [ 'Unexpected error while retrieving feature flag "feature.key": NotImplementedError()' ] @@ -168,7 +145,7 @@ def test_variation_detail_when_feature_store_throws_error(caplog): store = ErroringFeatureStore() client = make_client(store) expected = EvaluationDetail('default', None, {'kind': 'ERROR', 'errorKind': 'EXCEPTION'}) - actual = client.variation_detail('feature.key', { "key": "user" }, default='default') + actual = client.variation_detail('feature.key', Context.from_dict({ "key": "user", "kind": "user" }), default='default') assert expected == actual assert actual.is_default_value() == True errlog = get_log_lines(caplog, 'ERROR') @@ -201,24 +178,17 @@ def test_all_flags_returns_values(): result = client.all_flags_state(user).to_values_map() assert result == { 'key1': 'value1', 'key2': 'value2' } -def test_all_flags_returns_none_if_user_is_none(): - store = InMemoryFeatureStore() - store.init({ FEATURES: { 'key1': flag1, 'key2': flag2 } }) - client = make_client(store) - result = client.all_flags_state(None) - assert not result.valid - def test_all_flags_returns_none_if_user_has_no_key(): store = InMemoryFeatureStore() store.init({ FEATURES: { 'key1': flag1, 'key2': flag2 } }) client = make_client(store) - result = client.all_flags_state({ }) + result = client.all_flags_state(Context.from_dict({})) assert not result.valid def test_all_flags_returns_none_if_feature_store_throws_error(caplog): store = ErroringFeatureStore() client = make_client(store) - assert not client.all_flags_state({ "key": "user" }).valid + assert not client.all_flags_state(Context.from_dict({ "key": "user", "kind": "user" })).valid errlog = get_log_lines(caplog, 'ERROR') assert errlog == [ 'Unable to read flags for all_flag_state: NotImplementedError()' ] @@ -379,24 +349,17 @@ def test_all_flags_state_can_omit_details_for_untracked_flags(): '$valid': True } -def test_all_flags_state_returns_empty_state_if_user_is_none(): - store = InMemoryFeatureStore() - store.init({ FEATURES: { 'key1': flag1, 'key2': flag2 } }) - client = make_client(store) - state = client.all_flags_state(None) - assert state.valid == False - def test_all_flags_state_returns_empty_state_if_user_has_no_key(): store = InMemoryFeatureStore() store.init({ FEATURES: { 'key1': flag1, 'key2': flag2 } }) client = make_client(store) - state = client.all_flags_state({ }) + state = client.all_flags_state(Context.from_dict({})) assert state.valid == False def test_all_flags_returns_empty_state_if_feature_store_throws_error(caplog): store = ErroringFeatureStore() client = make_client(store) - state = client.all_flags_state({ "key": "user" }) + state = client.all_flags_state(Context.from_dict({ "key": "user", "kind": "user" })) assert state.valid == False errlog = get_log_lines(caplog, 'ERROR') assert errlog == [ 'Unable to read flags for all_flag_state: NotImplementedError()' ] diff --git a/testing/test_ldclient_events.py b/testing/test_ldclient_events.py index aa5ff1b6..48568737 100644 --- a/testing/test_ldclient_events.py +++ b/testing/test_ldclient_events.py @@ -1,9 +1,12 @@ from ldclient.client import LDClient, Config, Context +from ldclient.evaluation import EvaluationDetail from ldclient.impl.events.event_processor import DefaultEventProcessor from ldclient.feature_store import InMemoryFeatureStore from ldclient.impl.events.types import EventInputCustom, EventInputEvaluation, EventInputIdentify +from ldclient.migrations.tracker import MigrationOpEvent from ldclient.impl.stubs import NullEventProcessor from ldclient.versioned_data_kind import FEATURES +from ldclient.migrations import OpTracker, Stage, Operation, Origin from testing.builders import * from testing.stub_util import MockUpdateProcessor @@ -55,15 +58,9 @@ def test_identify_with_user_dict(): assert e.context == context -def test_identify_no_user(): - with make_client() as client: - client.identify(None) - assert count_events(client) == 0 - - def test_identify_no_user_key(): with make_client() as client: - client.identify({ 'name': 'nokey' }) + client.identify(Context.from_dict({ 'kind': 'user', 'name': 'nokey' })) assert count_events(client) == 0 @@ -73,6 +70,39 @@ def test_identify_invalid_context(): assert count_events(client) == 0 +def test_migration_op(): + detail = EvaluationDetail('value', 0, {'kind': 'OFF'}) + flag = FlagBuilder('key').version(100).on(True).variations('value').build() + tracker = OpTracker('key', flag, context, detail, Stage.OFF) + tracker.operation(Operation.READ) + tracker.invoked(Origin.OLD) + + with make_client() as client: + client.track_migration_op(tracker) + + e = get_first_event(client) + assert isinstance(e, MigrationOpEvent) + assert e.flag == flag + assert e.context == context + assert e.operation == Operation.READ + assert e.detail == detail + assert e.invoked == set([Origin.OLD]) + + +def test_does_not_send_bad_event(): + detail = EvaluationDetail('value', 0, {'kind': 'OFF'}) + tracker = OpTracker('key', None, context, detail, Stage.OFF) + + with make_client() as client: + client.track_migration_op(tracker) + client.identify(context) # Emit this to ensure events are working + + + # This is only identify if the op tracker fails to build + e = get_first_event(client) + assert isinstance(e, EventInputIdentify) + + def test_track(): with make_client() as client: client.track('my_event', context) @@ -117,12 +147,6 @@ def test_track_with_metric_value(): assert e.metric_value == 1.5 -def test_track_no_context(): - with make_client() as client: - client.track('my_event', None) - assert count_events(client) == 0 - - def test_track_invalid_context(): with make_client() as client: client.track('my_event', Context.create('')) @@ -271,15 +295,6 @@ def test_event_for_unknown_feature(): e.track_events is False) -def test_no_event_for_existing_feature_with_no_context(): - feature = build_off_flag_with_value('feature.key', 'value').track_events(True).build() - store = InMemoryFeatureStore() - store.init({FEATURES: {feature.key: feature.to_json_dict()}}) - with make_client(store) as client: - assert 'default' == client.variation(feature.key, None, default='default') - assert count_events(client) == 0 - - def test_no_event_for_existing_feature_with_invalid_context(): feature = build_off_flag_with_value('feature.key', 'value').track_events(True).build() store = InMemoryFeatureStore() diff --git a/testing/test_ldclient_migration_variation.py b/testing/test_ldclient_migration_variation.py new file mode 100644 index 00000000..81d1c811 --- /dev/null +++ b/testing/test_ldclient_migration_variation.py @@ -0,0 +1,69 @@ +import pytest +from ldclient.feature_store import InMemoryFeatureStore +from ldclient.versioned_data_kind import FEATURES +from ldclient.migrations import Stage, Operation, Origin + +from testing.builders import FlagBuilder +from testing.test_ldclient import make_client, user + + +def test_uses_default_if_flag_not_found(): + store = InMemoryFeatureStore() + client = make_client(store) + + stage, tracker = client.migration_variation('key', user, Stage.LIVE) + + assert stage == Stage.LIVE + assert tracker is not None + + +def test_off_if_default_is_bad(): + store = InMemoryFeatureStore() + client = make_client(store) + + stage, tracker = client.migration_variation('key', user, 'invalid default stage') + + assert stage == Stage.OFF + assert tracker is not None + + +def test_uses_default_if_flag_returns_invalid_stage(): + feature = FlagBuilder('key').on(True).variations('i am not', 'a valid', 'migration flag').fallthrough_variation(1).build() + store = InMemoryFeatureStore() + store.init({FEATURES: {'key': feature}}) + client = make_client(store) + + stage, tracker = client.migration_variation('key', user, Stage.LIVE) + tracker.operation(Operation.READ) + tracker.invoked(Origin.OLD) + + assert stage == Stage.LIVE + assert tracker is not None + + event = tracker.build() + assert event.detail.value == Stage.LIVE.value + assert event.detail.variation_index is None + assert event.detail.reason["errorKind"] == "WRONG_TYPE" + + +@pytest.mark.parametrize( + "expected,default", + [ + pytest.param(Stage.OFF, Stage.DUALWRITE, id="off"), + pytest.param(Stage.DUALWRITE, Stage.SHADOW, id="dualwrite"), + pytest.param(Stage.SHADOW, Stage.LIVE, id="shadow"), + pytest.param(Stage.LIVE, Stage.RAMPDOWN, id="live"), + pytest.param(Stage.RAMPDOWN, Stage.COMPLETE, id="rampdown"), + pytest.param(Stage.COMPLETE, Stage.OFF, id="complete"), + ], +) +def test_can_determine_correct_stage(expected: Stage, default: Stage): + feature = FlagBuilder('key').on(True).variations(expected.value).fallthrough_variation(0).build() + store = InMemoryFeatureStore() + store.init({FEATURES: {'key': feature}}) + client = make_client(store) + + stage, tracker = client.migration_variation('key', user, default) + + assert stage == expected + assert tracker is not None