diff --git a/.circleci/config.yml b/.circleci/config.yml index 92699a3c..714c5ee1 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -17,13 +17,16 @@ test-template: &test-template command: | sudo pip install --upgrade pip setuptools; sudo pip install -r test-requirements.txt; + if [[ "$CIRCLE_JOB" != "test-3.3" ]] && [[ "$CIRCLE_JOB" != "test-3.4" ]]; then + sudo pip install -r consul-requirements.txt; + fi; sudo python setup.py install; pip freeze - run: name: run tests command: | mkdir test-reports; - if [[ $CIRCLE_JOB == test-2.7 ]]; then + if [[ "$CIRCLE_JOB" == "test-2.7" ]]; then pytest -s --cov=ldclient --junitxml=test-reports/junit.xml testing; sh -c '[ -n "${CODECLIMATE_REPO_TOKEN+1}" ] && codeclimate-test-reporter || echo "No CODECLIMATE_REPO_TOKEN value is set; not publishing coverage report"'; else @@ -41,33 +44,39 @@ jobs: - image: circleci/python:2.7-jessie - image: redis - image: amazon/dynamodb-local + - image: consul test-3.3: <<: *test-template docker: - image: circleci/python:3.3-jessie - image: redis - image: amazon/dynamodb-local + # python-consul doesn't support Python 3.3 test-3.4: <<: *test-template docker: - image: circleci/python:3.4-jessie - image: redis - image: amazon/dynamodb-local + # python-consul doesn't support Python 3.4 test-3.5: <<: *test-template docker: - image: circleci/python:3.5-jessie - image: redis - image: amazon/dynamodb-local + - image: consul test-3.6: <<: *test-template docker: - image: circleci/python:3.6-jessie - image: redis - image: amazon/dynamodb-local + - image: consul test-3.7: <<: *test-template docker: - image: circleci/python:3.7-stretch - image: redis - image: amazon/dynamodb-local + - image: consul diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 88668de9..af5083c2 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -8,7 +8,7 @@ Development information (for developing this module itself) 1. One-time setup: - mkvirtualenv python-client + mkvirtualenv python-client 1. When working on the project be sure to activate the python-client virtualenv using the technique of your choosing. @@ -17,13 +17,17 @@ Development information (for developing this module itself) pip install -r requirements.txt pip install -r test-requirements.txt -1. Run tests: You'll need redis running locally on its default port of 6379. +1. When running unit tests, in order for `test_feature_store.py` to run, you'll need all of the supported databases (Redis, Consul, DynamoDB) running locally on their default ports. + 1. If you want integration tests to run, set the ```LD_SDK_KEY``` environment variable to a valid production SDK Key. + 1. ```$ py.test testing``` -Developing with different python versions +1. All code must be compatible with all supported Python versions as described in README. Most portability issues are addressed by using the `six` package. We are avoiding the use of `__future__` imports, since they can easily be omitted by mistake causing code in one file to behave differently from another; instead, whenever possible, use an explicit approach that makes it clear what the desired behavior is in all Python versions (e.g. if you want to do floor division, use `//`; if you want to divide as floats, explicitly cast to floats). + +Developing with different Python versions ----------------------------------------- -Example for switching to python 3: +Example for switching to Python 3: ```virtualenv -p `which python3` ~/.virtualenvs/python-client``` \ No newline at end of file diff --git a/README.md b/README.md index d25ee307..61e67050 100644 --- a/README.md +++ b/README.md @@ -81,7 +81,9 @@ The SDK is tested with the most recent patch releases of Python 2.7, 3.3, 3.4, 3 Database integrations --------------------- -Feature flag data can be kept in a persistent store using Redis or DynamoDB. These adapters are implemented in the `DynamoDB` and `Redis` classes in `ldclient.integrations`; to use them, call the `new_feature_store` method in the appropriate class, and put the returned object in the `feature_store` property of your client configuration. See [`ldclient.integrations`](https://github.com/launchdarkly/python-client-private/blob/master/ldclient/integrations.py) and the [SDK reference guide](https://docs.launchdarkly.com/v2.0/docs/using-a-persistent-feature-store) for more information. +Feature flag data can be kept in a persistent store using Consul, DynamoDB, or Redis. These adapters are implemented in the `Consul`, `DynamoDB` and `Redis` classes in `ldclient.integrations`; to use them, call the `new_feature_store` method in the appropriate class, and put the returned object in the `feature_store` property of your client configuration. See [`ldclient.integrations`](https://github.com/launchdarkly/python-client-private/blob/master/ldclient/integrations.py) and the [SDK reference guide](https://docs.launchdarkly.com/v2.0/docs/using-a-persistent-feature-store) for more information. + +Note that Consul is not supported in Python 3.3 or 3.4. Using flag data from a file --------------------------- diff --git a/consul-requirements.txt b/consul-requirements.txt new file mode 100644 index 00000000..637f7fe1 --- /dev/null +++ b/consul-requirements.txt @@ -0,0 +1 @@ +python-consul>=1.0.1 diff --git a/demo/demo.py b/demo/demo.py index 987a05d4..8ac745f4 100644 --- a/demo/demo.py +++ b/demo/demo.py @@ -1,5 +1,3 @@ -from __future__ import print_function - import logging import sys diff --git a/ldclient/client.py b/ldclient/client.py index 30c37e53..ff96475b 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -1,12 +1,8 @@ -from __future__ import division, with_statement, absolute_import - import hashlib import hmac import threading import traceback -from builtins import object - from ldclient.config import Config as Config from ldclient.event_processor import NullEventProcessor from ldclient.feature_requester import FeatureRequesterImpl diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index 3b89420f..9a0cae83 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from collections import namedtuple from email.utils import parsedate import errno diff --git a/ldclient/feature_requester.py b/ldclient/feature_requester.py index 786c1708..046c594f 100644 --- a/ldclient/feature_requester.py +++ b/ldclient/feature_requester.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from collections import namedtuple import json import urllib3 diff --git a/ldclient/file_data_source.py b/ldclient/file_data_source.py index ebff765b..61088d50 100644 --- a/ldclient/file_data_source.py +++ b/ldclient/file_data_source.py @@ -1,29 +1,4 @@ -import json -import os -import six -import traceback - -have_yaml = False -try: - import yaml - have_yaml = True -except ImportError: - pass - -have_watchdog = False -try: - import watchdog - import watchdog.events - import watchdog.observers - have_watchdog = True -except ImportError: - pass - -from ldclient.interfaces import UpdateProcessor -from ldclient.repeating_timer import RepeatingTimer -from ldclient.util import log -from ldclient.versioned_data_kind import FEATURES, SEGMENTS - +from ldclient.impl.integrations.files.file_data_source import _FileDataSource class FileDataSource(UpdateProcessor): @classmethod @@ -32,80 +7,9 @@ def factory(cls, **kwargs): used in a test environment, to operate using a predetermined feature flag state without an actual LaunchDarkly connection. - To use this component, call `FileDataSource.factory`, and store its return value in the - `update_processor_class` property of your LaunchDarkly client configuration. In the options - to `factory`, set `paths` to the file path(s) of your data file(s): - :: - - factory = FileDataSource.factory(paths = [ myFilePath ]) - config = Config(update_processor_class = factory) - - This will cause the client not to connect to LaunchDarkly to get feature flags. The - client may still make network connections to send analytics events, unless you have disabled - this with Config.send_events or Config.offline. - - Flag data files can be either JSON or YAML (in order to use YAML, you must install the 'pyyaml' - package). They contain an object with three possible properties: - - * "flags": Feature flag definitions. - * "flagValues": Simplified feature flags that contain only a value. - * "segments": User segment definitions. - - The format of the data in "flags" and "segments" is defined by the LaunchDarkly application - and is subject to change. Rather than trying to construct these objects yourself, it is simpler - to request existing flags directly from the LaunchDarkly server in JSON format, and use this - output as the starting point for your file. In Linux you would do this: - :: - - curl -H "Authorization: {your sdk key}" https://app.launchdarkly.com/sdk/latest-all - - The output will look something like this (but with many more properties): - :: - - { - "flags": { - "flag-key-1": { - "key": "flag-key-1", - "on": true, - "variations": [ "a", "b" ] - } - }, - "segments": { - "segment-key-1": { - "key": "segment-key-1", - "includes": [ "user-key-1" ] - } - } - } - - Data in this format allows the SDK to exactly duplicate all the kinds of flag behavior supported - by LaunchDarkly. However, in many cases you will not need this complexity, but will just want to - set specific flag keys to specific values. For that, you can use a much simpler format: - :: - - { - "flagValues": { - "my-string-flag-key": "value-1", - "my-boolean-flag-key": true, - "my-integer-flag-key": 3 - } - } - - Or, in YAML: - :: - - flagValues: - my-string-flag-key: "value-1" - my-boolean-flag-key: true - my-integer-flag-key: 1 - - It is also possible to specify both "flags" and "flagValues", if you want some flags - to have simple values and others to have complex behavior. However, it is an error to use the - same flag key or segment key more than once, either in a single file or across multiple files. - - If the data source encounters any error in any file-- malformed content, a missing file, or a - duplicate key-- it will not load flags from any of the files. - + This module and this implementation class are deprecated and may be changed or removed in the future. + Please use :func:`ldclient.integrations.Files.new_data_source()`. + :param kwargs: See below @@ -123,150 +27,9 @@ def factory(cls, **kwargs): used only if auto_update is true, and if the native file-watching mechanism from 'watchdog' is not being used. The default value is 1 second. """ - return lambda config, store, ready : FileDataSource(store, kwargs, ready) - - def __init__(self, store, options, ready): - self._store = store - self._ready = ready - self._inited = False - self._paths = options.get('paths', []) - if isinstance(self._paths, six.string_types): - self._paths = [ self._paths ] - self._auto_update = options.get('auto_update', False) - self._auto_updater = None - self._poll_interval = options.get('poll_interval', 1) - self._force_polling = options.get('force_polling', False) # used only in tests - - def start(self): - self._load_all() - - if self._auto_update: - self._auto_updater = self._start_auto_updater() - - # We will signal readiness immediately regardless of whether the file load succeeded or failed - - # the difference can be detected by checking initialized() - self._ready.set() - - def stop(self): - if self._auto_updater: - self._auto_updater.stop() - - def initialized(self): - return self._inited - - def _load_all(self): - all_data = { FEATURES: {}, SEGMENTS: {} } - for path in self._paths: - try: - self._load_file(path, all_data) - except Exception as e: - log.error('Unable to load flag data from "%s": %s' % (path, repr(e))) - traceback.print_exc() - return - self._store.init(all_data) - self._inited = True - - def _load_file(self, path, all_data): - content = None - with open(path, 'r') as f: - content = f.read() - parsed = self._parse_content(content) - for key, flag in six.iteritems(parsed.get('flags', {})): - self._add_item(all_data, FEATURES, flag) - for key, value in six.iteritems(parsed.get('flagValues', {})): - self._add_item(all_data, FEATURES, self._make_flag_with_value(key, value)) - for key, segment in six.iteritems(parsed.get('segments', {})): - self._add_item(all_data, SEGMENTS, segment) - - def _parse_content(self, content): - if have_yaml: - return yaml.load(content) # pyyaml correctly parses JSON too - return json.loads(content) - - def _add_item(self, all_data, kind, item): - items = all_data[kind] - key = item.get('key') - if items.get(key) is None: - items[key] = item - else: - raise Exception('In %s, key "%s" was used more than once' % (kind.namespace, key)) - - def _make_flag_with_value(self, key, value): - return { - 'key': key, - 'on': True, - 'fallthrough': { - 'variation': 0 - }, - 'variations': [ value ] - } - - def _start_auto_updater(self): - resolved_paths = [] - for path in self._paths: - try: - resolved_paths.append(os.path.realpath(path)) - except: - log.warn('Cannot watch for changes to data file "%s" because it is an invalid path' % path) - if have_watchdog and not self._force_polling: - return FileDataSource.WatchdogAutoUpdater(resolved_paths, self._load_all) - else: - return FileDataSource.PollingAutoUpdater(resolved_paths, self._load_all, self._poll_interval) - - # Watch for changes to data files using the watchdog package. This uses native OS filesystem notifications - # if available for the current platform. - class WatchdogAutoUpdater(object): - def __init__(self, resolved_paths, reloader): - watched_files = set(resolved_paths) - - class LDWatchdogHandler(watchdog.events.FileSystemEventHandler): - def on_any_event(self, event): - if event.src_path in watched_files: - reloader() - - dir_paths = set() - for path in resolved_paths: - dir_paths.add(os.path.dirname(path)) - - self._observer = watchdog.observers.Observer() - handler = LDWatchdogHandler() - for path in dir_paths: - self._observer.schedule(handler, path) - self._observer.start() - - def stop(self): - self._observer.stop() - self._observer.join() - - # Watch for changes to data files by polling their modification times. This is used if auto-update is - # on but the watchdog package is not installed. - class PollingAutoUpdater(object): - def __init__(self, resolved_paths, reloader, interval): - self._paths = resolved_paths - self._reloader = reloader - self._file_times = self._check_file_times() - self._timer = RepeatingTimer(interval, self._poll) - self._timer.start() - - def stop(self): - self._timer.stop() - - def _poll(self): - new_times = self._check_file_times() - changed = False - for file_path, file_time in six.iteritems(self._file_times): - if new_times.get(file_path) is not None and new_times.get(file_path) != file_time: - changed = True - break - self._file_times = new_times - if changed: - self._reloader() - def _check_file_times(self): - ret = {} - for path in self._paths: - try: - ret[path] = os.path.getmtime(path) - except: - ret[path] = None - return ret + return lambda config, store, ready : _FileDataSource(store, ready, + paths=kwargs.get("paths"), + auto_update=kwargs.get("auto_update", False), + poll_interval=kwargs.get("poll_interval", 1), + force_polling=kwargs.get("force_polling", False)) diff --git a/ldclient/impl/__init__.py b/ldclient/impl/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ldclient/impl/integrations/__init__.py b/ldclient/impl/integrations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ldclient/impl/integrations/consul/__init__.py b/ldclient/impl/integrations/consul/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ldclient/impl/integrations/consul/consul_feature_store.py b/ldclient/impl/integrations/consul/consul_feature_store.py new file mode 100644 index 00000000..6fc8652e --- /dev/null +++ b/ldclient/impl/integrations/consul/consul_feature_store.py @@ -0,0 +1,125 @@ +import json + +have_consul = False +try: + import consul + have_consul = True +except ImportError: + pass + +from ldclient import log +from ldclient.feature_store import CacheConfig +from ldclient.feature_store_helpers import CachingStoreWrapper +from ldclient.interfaces import FeatureStore, FeatureStoreCore + +# +# Internal implementation of the Consul feature store. +# +# Implementation notes: +# +# * Feature flags, segments, and any other kind of entity the LaunchDarkly client may wish +# to store, are stored as individual items with the key "{prefix}/features/{flag-key}", +# "{prefix}/segments/{segment-key}", etc. +# +# * The special key "{prefix}/$inited" indicates that the store contains a complete data set. +# +# * Since Consul has limited support for transactions (they can't contain more than 64 +# operations), the init method-- which replaces the entire data store-- is not guaranteed to +# be atomic, so there can be a race condition if another process is adding new data via +# Upsert. To minimize this, we don't delete all the data at the start; instead, we update +# the items we've received, and then delete all other items. That could potentially result in +# deleting new data from another process, but that would be the case anyway if the Init +# happened to execute later than the Upsert; we are relying on the fact that normally the +# process that did the Init will also receive the new data shortly and do its own Upsert. +# + +class _ConsulFeatureStoreCore(FeatureStoreCore): + def __init__(self, host, port, prefix, consul_opts): + if not have_consul: + raise NotImplementedError("Cannot use Consul feature store because the python-consul package is not installed") + opts = consul_opts or {} + if host is not None: + opts['host'] = host + if port is not None: + opts['port'] = port + self._prefix = ("launchdarkly" if prefix is None else prefix) + "/" + self._client = consul.Consul(**opts) + + def init_internal(self, all_data): + # Start by reading the existing keys; we will later delete any of these that weren't in all_data. + index, keys = self._client.kv.get(self._prefix, recurse=True, keys=True) + unused_old_keys = set(keys or []) + + num_items = 0 + inited_key = self._inited_key() + unused_old_keys.discard(inited_key) + + # Insert or update every provided item. Note that this Consul client doesn't support batch + # operations (the "txn" method), so we'll write them one at a time. + for kind, items in all_data.items(): + for key, item in items.items(): + encoded_item = json.dumps(item) + db_key = self._item_key(kind, item['key']) + self._client.kv.put(db_key, encoded_item) + unused_old_keys.discard(db_key) + num_items = num_items + 1 + + # Now delete any previously existing items whose keys were not in the current data + for key in unused_old_keys: + self._client.kv.delete(key) + + # Now set the special key that we check in initialized_internal() + self._client.kv.put(inited_key, "") + + log.info('Initialized Consul store with %d items', num_items) + + def get_internal(self, kind, key): + index, resp = self._client.kv.get(self._item_key(kind, key)) + return None if resp is None else json.loads(resp['Value'].decode('utf-8')) + + def get_all_internal(self, kind): + items_out = {} + index, results = self._client.kv.get(self._kind_key(kind), recurse=True) + for result in results: + item = json.loads(result['Value'].decode('utf-8')) + items_out[item['key']] = item + return items_out + + def upsert_internal(self, kind, new_item): + key = self._item_key(kind, new_item['key']) + encoded_item = json.dumps(new_item) + + # We will potentially keep retrying indefinitely until someone's write succeeds + while True: + index, old_value = self._client.kv.get(key) + if old_value is None: + mod_index = 0 + else: + old_item = json.loads(old_value['Value'].decode('utf-8')) + # Check whether the item is stale. If so, don't do the update (and return the existing item to + # CachingStoreWrapper so it can be cached) + if old_item['version'] >= new_item['version']: + return old_item + mod_index = old_value['ModifyIndex'] + + # Otherwise, try to write. We will do a compare-and-set operation, so the write will only succeed if + # the key's ModifyIndex is still equal to the previous value. If the previous ModifyIndex was zero, + # it means the key did not previously exist and the write will only succeed if it still doesn't exist. + success = self._client.kv.put(key, encoded_item, cas=mod_index) + if success: + return new_item + + log.debug('Concurrent modification detected, retrying') + + def initialized_internal(self): + index, resp = self._client.kv.get(self._inited_key()) + return (resp is not None) + + def _kind_key(self, kind): + return self._prefix + kind.namespace + + def _item_key(self, kind, key): + return self._kind_key(kind) + '/' + key + + def _inited_key(self): + return self._prefix + ('$inited') diff --git a/ldclient/impl/integrations/dynamodb/__init__.py b/ldclient/impl/integrations/dynamodb/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ldclient/dynamodb_feature_store.py b/ldclient/impl/integrations/dynamodb/dynamodb_feature_store.py similarity index 100% rename from ldclient/dynamodb_feature_store.py rename to ldclient/impl/integrations/dynamodb/dynamodb_feature_store.py diff --git a/ldclient/impl/integrations/files/__init__.py b/ldclient/impl/integrations/files/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ldclient/impl/integrations/files/file_data_source.py b/ldclient/impl/integrations/files/file_data_source.py new file mode 100644 index 00000000..9ba6e561 --- /dev/null +++ b/ldclient/impl/integrations/files/file_data_source.py @@ -0,0 +1,172 @@ +import json +import os +import six +import traceback + +have_yaml = False +try: + import yaml + have_yaml = True +except ImportError: + pass + +have_watchdog = False +try: + import watchdog + import watchdog.events + import watchdog.observers + have_watchdog = True +except ImportError: + pass + +from ldclient.interfaces import UpdateProcessor +from ldclient.repeating_timer import RepeatingTimer +from ldclient.util import log +from ldclient.versioned_data_kind import FEATURES, SEGMENTS + +class _FileDataSource(UpdateProcessor): + def __init__(self, store, ready, paths, auto_update, poll_interval, force_polling): + self._store = store + self._ready = ready + self._inited = False + self._paths = paths + if isinstance(self._paths, six.string_types): + self._paths = [ self._paths ] + self._auto_update = auto_update + self._auto_updater = None + self._poll_interval = poll_interval + self._force_polling = force_polling + + def start(self): + self._load_all() + + if self._auto_update: + self._auto_updater = self._start_auto_updater() + + # We will signal readiness immediately regardless of whether the file load succeeded or failed - + # the difference can be detected by checking initialized() + self._ready.set() + + def stop(self): + if self._auto_updater: + self._auto_updater.stop() + + def initialized(self): + return self._inited + + def _load_all(self): + all_data = { FEATURES: {}, SEGMENTS: {} } + for path in self._paths: + try: + self._load_file(path, all_data) + except Exception as e: + log.error('Unable to load flag data from "%s": %s' % (path, repr(e))) + traceback.print_exc() + return + self._store.init(all_data) + self._inited = True + + def _load_file(self, path, all_data): + content = None + with open(path, 'r') as f: + content = f.read() + parsed = self._parse_content(content) + for key, flag in six.iteritems(parsed.get('flags', {})): + self._add_item(all_data, FEATURES, flag) + for key, value in six.iteritems(parsed.get('flagValues', {})): + self._add_item(all_data, FEATURES, self._make_flag_with_value(key, value)) + for key, segment in six.iteritems(parsed.get('segments', {})): + self._add_item(all_data, SEGMENTS, segment) + + def _parse_content(self, content): + if have_yaml: + return yaml.load(content) # pyyaml correctly parses JSON too + return json.loads(content) + + def _add_item(self, all_data, kind, item): + items = all_data[kind] + key = item.get('key') + if items.get(key) is None: + items[key] = item + else: + raise Exception('In %s, key "%s" was used more than once' % (kind.namespace, key)) + + def _make_flag_with_value(self, key, value): + return { + 'key': key, + 'on': True, + 'fallthrough': { + 'variation': 0 + }, + 'variations': [ value ] + } + + def _start_auto_updater(self): + resolved_paths = [] + for path in self._paths: + try: + resolved_paths.append(os.path.realpath(path)) + except: + log.warn('Cannot watch for changes to data file "%s" because it is an invalid path' % path) + if have_watchdog and not self._force_polling: + return _FileDataSource.WatchdogAutoUpdater(resolved_paths, self._load_all) + else: + return _FileDataSource.PollingAutoUpdater(resolved_paths, self._load_all, self._poll_interval) + + # Watch for changes to data files using the watchdog package. This uses native OS filesystem notifications + # if available for the current platform. + class WatchdogAutoUpdater(object): + def __init__(self, resolved_paths, reloader): + watched_files = set(resolved_paths) + + class LDWatchdogHandler(watchdog.events.FileSystemEventHandler): + def on_any_event(self, event): + if event.src_path in watched_files: + reloader() + + dir_paths = set() + for path in resolved_paths: + dir_paths.add(os.path.dirname(path)) + + self._observer = watchdog.observers.Observer() + handler = LDWatchdogHandler() + for path in dir_paths: + self._observer.schedule(handler, path) + self._observer.start() + + def stop(self): + self._observer.stop() + self._observer.join() + + # Watch for changes to data files by polling their modification times. This is used if auto-update is + # on but the watchdog package is not installed. + class PollingAutoUpdater(object): + def __init__(self, resolved_paths, reloader, interval): + self._paths = resolved_paths + self._reloader = reloader + self._file_times = self._check_file_times() + self._timer = RepeatingTimer(interval, self._poll) + self._timer.start() + + def stop(self): + self._timer.stop() + + def _poll(self): + new_times = self._check_file_times() + changed = False + for file_path, file_time in six.iteritems(self._file_times): + if new_times.get(file_path) is not None and new_times.get(file_path) != file_time: + changed = True + break + self._file_times = new_times + if changed: + self._reloader() + + def _check_file_times(self): + ret = {} + for path in self._paths: + try: + ret[path] = os.path.getmtime(path) + except: + ret[path] = None + return ret diff --git a/ldclient/impl/integrations/redis/__init__.py b/ldclient/impl/integrations/redis/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ldclient/impl/integrations/redis/redis_feature_store.py b/ldclient/impl/integrations/redis/redis_feature_store.py new file mode 100644 index 00000000..f0be83a4 --- /dev/null +++ b/ldclient/impl/integrations/redis/redis_feature_store.py @@ -0,0 +1,101 @@ +import json + +have_redis = False +try: + import redis + have_redis = True +except ImportError: + pass + +from ldclient import log +from ldclient.interfaces import FeatureStoreCore +from ldclient.versioned_data_kind import FEATURES + + +class _RedisFeatureStoreCore(FeatureStoreCore): + def __init__(self, url, prefix, max_connections): + if not have_redis: + raise NotImplementedError("Cannot use Redis feature store because redis package is not installed") + self._prefix = prefix or 'launchdarkly' + self._pool = redis.ConnectionPool.from_url(url=url, max_connections=max_connections) + self.test_update_hook = None # exposed for testing + log.info("Started RedisFeatureStore connected to URL: " + url + " using prefix: " + self._prefix) + + def _items_key(self, kind): + return "{0}:{1}".format(self._prefix, kind.namespace) + + def init_internal(self, all_data): + pipe = redis.Redis(connection_pool=self._pool).pipeline() + + all_count = 0 + + for kind, items in all_data.items(): + base_key = self._items_key(kind) + pipe.delete(base_key) + for key, item in items.items(): + item_json = json.dumps(item) + pipe.hset(base_key, key, item_json) + all_count = all_count + len(items) + pipe.execute() + log.info("Initialized RedisFeatureStore with %d items", all_count) + + def get_all_internal(self, kind): + r = redis.Redis(connection_pool=self._pool) + all_items = r.hgetall(self._items_key(kind)) + + if all_items is None or all_items is "": + all_items = {} + + results = {} + for key, item_json in all_items.items(): + key = key.decode('utf-8') # necessary in Python 3 + results[key] = json.loads(item_json.decode('utf-8')) + return results + + def get_internal(self, kind, key): + r = redis.Redis(connection_pool=self._pool) + item_json = r.hget(self._items_key(kind), key) + + if item_json is None or item_json is "": + log.debug("RedisFeatureStore: key %s not found in '%s'. Returning None.", key, kind.namespace) + return None + + return json.loads(item_json.decode('utf-8')) + + def upsert_internal(self, kind, item): + r = redis.Redis(connection_pool=self._pool) + base_key = self._items_key(kind) + key = item['key'] + item_json = json.dumps(item) + + while True: + pipeline = r.pipeline() + pipeline.watch(base_key) + old = self.get_internal(kind, key) + if self.test_update_hook is not None: + self.test_update_hook(base_key, key) + if old and old['version'] >= item['version']: + log.debug('RedisFeatureStore: Attempted to %s key: %s version %d with a version that is the same or older: %d in "%s"', + 'delete' if item.get('deleted') else 'update', + key, old['version'], item['version'], kind.namespace) + pipeline.unwatch() + return old + else: + pipeline.multi() + pipeline.hset(base_key, key, item_json) + try: + pipeline.execute() + # Unlike Redis implementations for other platforms, in redis-py a failed WATCH + # produces an exception rather than a null result from execute(). + except redis.exceptions.WatchError: + log.debug("RedisFeatureStore: concurrent modification detected, retrying") + continue + return item + + def initialized_internal(self): + r = redis.Redis(connection_pool=self._pool) + return r.exists(self._items_key(FEATURES)) + + def _before_update_transaction(self, base_key, key): + # exposed for testing + pass diff --git a/ldclient/integrations.py b/ldclient/integrations.py index 63c01202..fcc89abc 100644 --- a/ldclient/integrations.py +++ b/ldclient/integrations.py @@ -1,7 +1,46 @@ from ldclient.feature_store import CacheConfig from ldclient.feature_store_helpers import CachingStoreWrapper -from ldclient.dynamodb_feature_store import _DynamoDBFeatureStoreCore -from ldclient.redis_feature_store import _RedisFeatureStoreCore +from ldclient.impl.integrations.consul.consul_feature_store import _ConsulFeatureStoreCore +from ldclient.impl.integrations.dynamodb.dynamodb_feature_store import _DynamoDBFeatureStoreCore +from ldclient.impl.integrations.files.file_data_source import _FileDataSource +from ldclient.impl.integrations.redis.redis_feature_store import _RedisFeatureStoreCore + + +class Consul(object): + """Provides factory methods for integrations between the LaunchDarkly SDK and Consul. + """ + + """The key prefix that is used if you do not specify one.""" + DEFAULT_PREFIX = "launchdarkly" + + @staticmethod + def new_feature_store(host=None, + port=None, + prefix=None, + consul_opts=None, + caching=CacheConfig.default()): + """Creates a Consul-backed implementation of `:class:ldclient.feature_store.FeatureStore`. + For more details about how and why you can use a persistent feature store, see the + SDK reference guide: https://docs.launchdarkly.com/v2.0/docs/using-a-persistent-feature-store + + To use this method, you must first install the `python-consul` package. Then, put the object + returned by this method into the `feature_store` property of your client configuration + (:class:ldclient.config.Config). + + Note that `python-consul` is not available for Python 3.3 or 3.4, so this feature cannot be + used in those Python versions. + + :param string host: Hostname of the Consul server (uses "localhost" if omitted) + :param int port: Port of the Consul server (uses 8500 if omitted) + :param string prefix: A namespace prefix to be prepended to all Consul keys + :param dict consul_opts: Optional parameters for configuring the Consul client, if you need + to set any of them besides host and port, as defined in the python-consul API; see + https://python-consul.readthedocs.io/en/latest/#consul + :param CacheConfig caching: Specifies whether local caching should be enabled and if so, + sets the cache properties; defaults to `CacheConfig.default()` + """ + core = _ConsulFeatureStoreCore(host, port, prefix, consul_opts) + return CachingStoreWrapper(core, caching) class DynamoDB(object): @@ -14,6 +53,8 @@ def new_feature_store(table_name, dynamodb_opts={}, caching=CacheConfig.default()): """Creates a DynamoDB-backed implementation of `:class:ldclient.feature_store.FeatureStore`. + For more details about how and why you can use a persistent feature store, see the + SDK reference guide: https://docs.launchdarkly.com/v2.0/docs/using-a-persistent-feature-store To use this method, you must first install the `boto3` package containing the AWS SDK gems. Then, put the object returned by this method into the `feature_store` property of your @@ -52,6 +93,8 @@ def new_feature_store(url='redis://localhost:6379/0', max_connections=16, caching=CacheConfig.default()): """Creates a Redis-backed implementation of `:class:ldclient.feature_store.FeatureStore`. + For more details about how and why you can use a persistent feature store, see the + SDK reference guide: https://docs.launchdarkly.com/v2.0/docs/using-a-persistent-feature-store To use this method, you must first install the `redis` package. Then, put the object returned by this method into the `feature_store` property of your client configuration @@ -69,3 +112,107 @@ def new_feature_store(url='redis://localhost:6379/0', wrapper = CachingStoreWrapper(core, caching) wrapper.core = core # exposed for testing return wrapper + + +class Files(object): + """Provides factory methods for integrations with filesystem data. + """ + + @staticmethod + def new_data_source(paths, auto_update=False, poll_interval=1, force_polling=False): + """Provides a way to use local files as a source of feature flag state. This would typically be + used in a test environment, to operate using a predetermined feature flag state without an + actual LaunchDarkly connection. + + To use this component, call `new_data_source`, specifying the file path(s) of your data file(s) + in the `path` parameter; then put the value returned by this method into the `update_processor_class` + property of your LaunchDarkly client configuration (:class:ldclient.config.Config). + :: + + data_source = LaunchDarkly::Integrations::Files.new_data_source(paths=[ myFilePath ]) + config = Config(update_processor_class=data_source) + + This will cause the client not to connect to LaunchDarkly to get feature flags. The + client may still make network connections to send analytics events, unless you have disabled + this with Config.send_events or Config.offline. + + Flag data files can be either JSON or YAML (in order to use YAML, you must install the 'pyyaml' + package). They contain an object with three possible properties: + + * "flags": Feature flag definitions. + * "flagValues": Simplified feature flags that contain only a value. + * "segments": User segment definitions. + + The format of the data in "flags" and "segments" is defined by the LaunchDarkly application + and is subject to change. Rather than trying to construct these objects yourself, it is simpler + to request existing flags directly from the LaunchDarkly server in JSON format, and use this + output as the starting point for your file. In Linux you would do this: + :: + + curl -H "Authorization: {your sdk key}" https://app.launchdarkly.com/sdk/latest-all + + The output will look something like this (but with many more properties): + :: + + { + "flags": { + "flag-key-1": { + "key": "flag-key-1", + "on": true, + "variations": [ "a", "b" ] + } + }, + "segments": { + "segment-key-1": { + "key": "segment-key-1", + "includes": [ "user-key-1" ] + } + } + } + + Data in this format allows the SDK to exactly duplicate all the kinds of flag behavior supported + by LaunchDarkly. However, in many cases you will not need this complexity, but will just want to + set specific flag keys to specific values. For that, you can use a much simpler format: + :: + + { + "flagValues": { + "my-string-flag-key": "value-1", + "my-boolean-flag-key": true, + "my-integer-flag-key": 3 + } + } + + Or, in YAML: + :: + + flagValues: + my-string-flag-key: "value-1" + my-boolean-flag-key: true + my-integer-flag-key: 1 + + It is also possible to specify both "flags" and "flagValues", if you want some flags + to have simple values and others to have complex behavior. However, it is an error to use the + same flag key or segment key more than once, either in a single file or across multiple files. + + If the data source encounters any error in any file-- malformed content, a missing file, or a + duplicate key-- it will not load flags from any of the files. + + :param array paths: The paths of the source files for loading flag data. These may be absolute paths + or relative to the current working directory. Files will be parsed as JSON unless the 'pyyaml' + package is installed, in which case YAML is also allowed. + :param bool auto_update: (default: false) True if the data source should watch for changes to the source file(s) + and reload flags whenever there is a change. The default implementation of this feature is based on + polling the filesystem, which may not perform well; if you install the 'watchdog' package (not + included by default, to avoid adding unwanted dependencies to the SDK), its native file watching + mechanism will be used instead. Note that auto-updating will only work if all of the files you + specified have valid directory paths at startup time. + :param float poll_interval: (default: 1) The minimum interval, in seconds, between checks for file + modifications-- used only if `auto_update` is true, and if the native file-watching mechanism from + `watchdog` is not being used. + :param bool force_polling: (default: false) True if the data source should implement auto-update via + polling the filesystem even if a native mechanism is available. This is mainly for SDK testing. + + :return: an object (actually a lambda) to be stored in the `update_processor_class` configuration property + """ + return lambda config, store, ready : _FileDataSource(store, ready, paths, auto_update, poll_interval, force_polling) diff --git a/ldclient/redis_feature_store.py b/ldclient/redis_feature_store.py index 27139567..ff93c402 100644 --- a/ldclient/redis_feature_store.py +++ b/ldclient/redis_feature_store.py @@ -1,17 +1,8 @@ -import json +from ldclient.impl.integrations.redis.redis_feature_store import _RedisFeatureStoreCore -have_redis = False -try: - import redis - have_redis = True -except ImportError: - pass - -from ldclient import log from ldclient.feature_store import CacheConfig from ldclient.feature_store_helpers import CachingStoreWrapper -from ldclient.interfaces import FeatureStore, FeatureStoreCore -from ldclient.versioned_data_kind import FEATURES +from ldclient.interfaces import FeatureStore # Note that this class is now just a facade around CachingStoreWrapper, which is in turn delegating @@ -22,8 +13,8 @@ class RedisFeatureStore(FeatureStore): """A Redis-backed implementation of :class:`ldclient.feature_store.FeatureStore`. - This implementation class is deprecated and may be changed or removed in the future. Please use - :func:`ldclient.integrations.Redis.new_feature_store()`. + This module and this implementation class are deprecated and may be changed or removed in the future. + Please use :func:`ldclient.integrations.Redis.new_feature_store()`. """ def __init__(self, url='redis://localhost:6379/0', @@ -31,8 +22,6 @@ def __init__(self, max_connections=16, expiration=15, capacity=1000): - if not have_redis: - raise NotImplementedError("Cannot use Redis feature store because redis package is not installed") self.core = _RedisFeatureStoreCore(url, prefix, max_connections) # exposed for testing self._wrapper = CachingStoreWrapper(self.core, CacheConfig(expiration=expiration, capacity=capacity)) @@ -54,91 +43,3 @@ def delete(self, kind, key, version): @property def initialized(self): return self._wrapper.initialized - - -class _RedisFeatureStoreCore(FeatureStoreCore): - def __init__(self, url, prefix, max_connections): - - self._prefix = prefix - self._pool = redis.ConnectionPool.from_url(url=url, max_connections=max_connections) - self.test_update_hook = None # exposed for testing - log.info("Started RedisFeatureStore connected to URL: " + url + " using prefix: " + prefix) - - def _items_key(self, kind): - return "{0}:{1}".format(self._prefix, kind.namespace) - - def init_internal(self, all_data): - pipe = redis.Redis(connection_pool=self._pool).pipeline() - - all_count = 0 - - for kind, items in all_data.items(): - base_key = self._items_key(kind) - pipe.delete(base_key) - for key, item in items.items(): - item_json = json.dumps(item) - pipe.hset(base_key, key, item_json) - all_count = all_count + len(items) - pipe.execute() - log.info("Initialized RedisFeatureStore with %d items", all_count) - - def get_all_internal(self, kind): - r = redis.Redis(connection_pool=self._pool) - all_items = r.hgetall(self._items_key(kind)) - - if all_items is None or all_items is "": - all_items = {} - - results = {} - for key, item_json in all_items.items(): - key = key.decode('utf-8') # necessary in Python 3 - results[key] = json.loads(item_json.decode('utf-8')) - return results - - def get_internal(self, kind, key): - r = redis.Redis(connection_pool=self._pool) - item_json = r.hget(self._items_key(kind), key) - - if item_json is None or item_json is "": - log.debug("RedisFeatureStore: key %s not found in '%s'. Returning None.", key, kind.namespace) - return None - - return json.loads(item_json.decode('utf-8')) - - def upsert_internal(self, kind, item): - r = redis.Redis(connection_pool=self._pool) - base_key = self._items_key(kind) - key = item['key'] - item_json = json.dumps(item) - - while True: - pipeline = r.pipeline() - pipeline.watch(base_key) - old = self.get_internal(kind, key) - if self.test_update_hook is not None: - self.test_update_hook(base_key, key) - if old and old['version'] >= item['version']: - log.debug('RedisFeatureStore: Attempted to %s key: %s version %d with a version that is the same or older: %d in "%s"', - 'delete' if item.get('deleted') else 'update', - key, old['version'], item['version'], kind.namespace) - pipeline.unwatch() - return old - else: - pipeline.multi() - pipeline.hset(base_key, key, item_json) - try: - pipeline.execute() - # Unlike Redis implementations for other platforms, in redis-py a failed WATCH - # produces an exception rather than a null result from execute(). - except redis.exceptions.WatchError: - log.debug("RedisFeatureStore: concurrent modification detected, retrying") - continue - return item - - def initialized_internal(self): - r = redis.Redis(connection_pool=self._pool) - return r.exists(self._items_key(FEATURES)) - - def _before_update_transaction(self, base_key, key): - # exposed for testing - pass diff --git a/ldclient/sse_client.py b/ldclient/sse_client.py index c97eb2d4..5b41413b 100644 --- a/ldclient/sse_client.py +++ b/ldclient/sse_client.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import re import time import warnings @@ -111,6 +109,10 @@ def __next__(self): return msg + # The following two lines make our iterator class compatible with both Python 2.x and 3.x, + # even though they expect different magic method names. We could accomplish the same thing + # by importing builtins.object and deriving from that, but this way it's easier to see + # what we're doing. if six.PY2: next = __next__ diff --git a/ldclient/streaming.py b/ldclient/streaming.py index bac83433..20599eb1 100644 --- a/ldclient/streaming.py +++ b/ldclient/streaming.py @@ -1,4 +1,3 @@ -from __future__ import absolute_import from collections import namedtuple import json diff --git a/ldclient/util.py b/ldclient/util.py index 618a7d9e..fbb2f11d 100644 --- a/ldclient/util.py +++ b/ldclient/util.py @@ -1,5 +1,3 @@ -from __future__ import division, with_statement, absolute_import - import certifi import logging import sys diff --git a/requirements.txt b/requirements.txt index 8787ac53..f86f3039 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,6 @@ backoff>=1.4.3 certifi>=2018.4.16 expiringdict>=1.1.4 -future>=0.16.0 six>=1.10.0 pyRFC3339>=1.0 jsonpickle==0.9.3 diff --git a/setup.py b/setup.py index bf59d9a3..8a075cf8 100644 --- a/setup.py +++ b/setup.py @@ -19,12 +19,16 @@ def parse_requirements(filename): install_reqs = parse_requirements('requirements.txt') test_reqs = parse_requirements('test-requirements.txt') redis_reqs = parse_requirements('redis-requirements.txt') +consul_reqs = parse_requirements('consul-requirements.txt') +dynamodb_reqs = parse_requirements('dynamodb-requirements.txt') # reqs is a list of requirement # e.g. ['django==1.5.1', 'mezzanine==1.4.6'] reqs = [ir for ir in install_reqs] testreqs = [ir for ir in test_reqs] redisreqs = [ir for ir in redis_reqs] +consulreqs = [ir for ir in consul_reqs] +dynamodbreqs = [ir for ir in dynamodb_reqs] class PyTest(Command): @@ -63,11 +67,14 @@ def run(self): 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', 'Topic :: Software Development', 'Topic :: Software Development :: Libraries', ], extras_require={ - "redis": redisreqs + "redis": redisreqs, + "consul": consulreqs, + "dynamodb": dynamodbreqs }, tests_require=testreqs, cmdclass={'test': PyTest}, diff --git a/testing/test_feature_store.py b/testing/test_feature_store.py index 8ab8c422..ce0150cf 100644 --- a/testing/test_feature_store.py +++ b/testing/test_feature_store.py @@ -4,9 +4,17 @@ import redis import time -from ldclient.dynamodb_feature_store import _DynamoDBFeatureStoreCore, _DynamoDBHelpers +# Consul is only supported in some Python versions +have_consul = False +try: + import consul + have_consul = True +except ImportError: + pass + from ldclient.feature_store import CacheConfig, InMemoryFeatureStore -from ldclient.integrations import DynamoDB, Redis +from ldclient.impl.integrations.dynamodb.dynamodb_feature_store import _DynamoDBFeatureStoreCore, _DynamoDBHelpers +from ldclient.integrations import Consul, DynamoDB, Redis from ldclient.redis_feature_store import RedisFeatureStore from ldclient.versioned_data_kind import FEATURES @@ -15,6 +23,10 @@ class InMemoryTester(object): def init_store(self): return InMemoryFeatureStore() + @property + def supports_prefix(self): + return False + class RedisTester(object): redis_host = 'localhost' @@ -23,19 +35,46 @@ class RedisTester(object): def __init__(self, cache_config): self._cache_config = cache_config - def init_store(self): + def init_store(self, prefix=None): self._clear_data() - return Redis.new_feature_store(caching=self._cache_config) + return Redis.new_feature_store(caching=self._cache_config, prefix=prefix) + + @property + def supports_prefix(self): + return True def _clear_data(self): r = redis.StrictRedis(host=self.redis_host, port=self.redis_port, db=0) - r.delete("launchdarkly:features") + r.flushdb() class RedisWithDeprecatedConstructorTester(RedisTester): - def init_store(self): + def init_store(self, prefix=None): self._clear_data() - return RedisFeatureStore(expiration=(30 if self._cache_config.enabled else 0)) + return RedisFeatureStore(expiration=(30 if self._cache_config.enabled else 0), prefix=prefix) + + @property + def supports_prefix(self): + return True + + +class ConsulTester(object): + def __init__(self, cache_config): + self._cache_config = cache_config + + def init_store(self, prefix=None): + self._clear_data(prefix or "launchdarkly") + return Consul.new_feature_store(prefix=prefix, caching=self._cache_config) + + @property + def supports_prefix(self): + return True + + def _clear_data(self, prefix): + client = consul.Consul() + index, keys = client.kv.get(prefix + "/", recurse=True, keys=True) + for key in (keys or []): + client.kv.delete(key) class DynamoDBTester(object): @@ -51,10 +90,15 @@ class DynamoDBTester(object): def __init__(self, cache_config): self._cache_config = cache_config - def init_store(self): + def init_store(self, prefix=None): self._create_table() self._clear_data() - return DynamoDB.new_feature_store(self.table_name, dynamodb_opts=self.options) + return DynamoDB.new_feature_store(self.table_name, prefix=prefix, dynamodb_opts=self.options, + caching=self._cache_config) + + @property + def supports_prefix(self): + return True def _create_table(self): if self.table_created: @@ -131,6 +175,14 @@ class TestFeatureStore: DynamoDBTester(CacheConfig.disabled()) ] + if have_consul: + params.append(ConsulTester(CacheConfig.default())) + params.append(ConsulTester(CacheConfig.disabled())) + + @pytest.fixture(params=params) + def tester(self, request): + return request.param + @pytest.fixture(params=params) def store(self, request): return request.param.init_store() @@ -230,6 +282,39 @@ def test_upsert_older_version_after_delete(self, store): store.upsert(FEATURES, old_ver) assert store.get(FEATURES, 'foo', lambda x: x) is None + def test_stores_with_different_prefixes_are_independent(self, tester): + # This verifies that init(), get(), all(), and upsert() are all correctly using the specified key prefix. + # The delete() method isn't tested separately because it's implemented as a variant of upsert(). + if not tester.supports_prefix: + return + + flag_a1 = { 'key': 'flagA1', 'version': 1 } + flag_a2 = { 'key': 'flagA2', 'version': 1 } + flag_b1 = { 'key': 'flagB1', 'version': 1 } + flag_b2 = { 'key': 'flagB2', 'version': 1 } + store_a = tester.init_store('a') + store_b = tester.init_store('b') + + store_a.init({ FEATURES: { 'flagA1': flag_a1 } }) + store_a.upsert(FEATURES, flag_a2) + + store_b.init({ FEATURES: { 'flagB1': flag_b1 } }) + store_b.upsert(FEATURES, flag_b2) + + item = store_a.get(FEATURES, 'flagA1', lambda x: x) + assert item == flag_a1 + item = store_a.get(FEATURES, 'flagB1', lambda x: x) + assert item is None + items = store_a.all(FEATURES, lambda x: x) + assert items == { 'flagA1': flag_a1, 'flagA2': flag_a2 } + + item = store_b.get(FEATURES, 'flagB1', lambda x: x) + assert item == flag_b1 + item = store_b.get(FEATURES, 'flagA1', lambda x: x) + assert item is None + items = store_b.all(FEATURES, lambda x: x) + assert items == { 'flagB1': flag_b1, 'flagB2': flag_b2 } + class TestRedisFeatureStoreExtraTests: def test_upsert_race_condition_against_external_client_with_higher_version(self): diff --git a/testing/test_file_data_source.py b/testing/test_file_data_source.py index 68d1e5b7..2e232ec8 100644 --- a/testing/test_file_data_source.py +++ b/testing/test_file_data_source.py @@ -9,7 +9,7 @@ from ldclient.client import LDClient from ldclient.config import Config from ldclient.feature_store import InMemoryFeatureStore -from ldclient.file_data_source import FileDataSource +from ldclient.integrations import Files from ldclient.versioned_data_kind import FEATURES, SEGMENTS @@ -94,7 +94,7 @@ def teardown_function(): def make_data_source(**kwargs): global data_source - data_source = FileDataSource.factory(**kwargs)(Config(), store, ready) + data_source = Files.new_data_source(**kwargs)(Config(), store, ready) return data_source def make_temp_file(content): @@ -217,7 +217,7 @@ def test_reloads_modified_file_in_polling_mode(): def test_evaluates_full_flag_with_client_as_expected(): path = make_temp_file(all_properties_json) try: - factory = FileDataSource.factory(paths = path) + factory = Files.new_data_source(paths = path) client = LDClient(config=Config(update_processor_class = factory, send_events = False)) value = client.variation('flag1', { 'key': 'user' }, '') assert value == 'on' @@ -229,7 +229,7 @@ def test_evaluates_full_flag_with_client_as_expected(): def test_evaluates_simplified_flag_with_client_as_expected(): path = make_temp_file(all_properties_json) try: - factory = FileDataSource.factory(paths = path) + factory = Files.new_data_source(paths = path) client = LDClient(config=Config(update_processor_class = factory, send_events = False)) value = client.variation('flag2', { 'key': 'user' }, '') assert value == 'value2' diff --git a/testing/test_ldclient.py b/testing/test_ldclient.py index a31d2324..0e6c33a2 100644 --- a/testing/test_ldclient.py +++ b/testing/test_ldclient.py @@ -1,4 +1,3 @@ -from builtins import object from ldclient.client import LDClient, Config from ldclient.event_processor import NullEventProcessor from ldclient.feature_store import InMemoryFeatureStore diff --git a/testing/test_user_filter.py b/testing/test_user_filter.py index 15550541..e1711ffb 100644 --- a/testing/test_user_filter.py +++ b/testing/test_user_filter.py @@ -1,4 +1,3 @@ -from builtins import object import json from ldclient.client import Config from ldclient.user_filter import UserFilter