Skip to content

Commit

Permalink
Merge pull request #488 from splitio/development
Browse files Browse the repository at this point in the history
9.6.0 release
  • Loading branch information
chillaq committed Nov 3, 2023
2 parents 703b5cd + a6e4041 commit f96bd15
Show file tree
Hide file tree
Showing 52 changed files with 3,253 additions and 1,645 deletions.
10 changes: 10 additions & 0 deletions CHANGES.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,13 @@
9.6.0 (Nov 3, 2023)
- Added support for Flag Sets on the SDK, which enables grouping feature flags and interacting with the group rather than individually (more details in our documentation):
- Added new variations of the get treatment methods to support evaluating flags in given flag set/s.
- get_treatments_by_flag_set and get_treatments_by_flag_sets
- get_treatments_with_config_by_flag_set and get_treatments_with_config_by_flag_sets
- Added a new optional Split Filter configuration option. This allows the SDK and Split services to only synchronize the flags in the specified flag sets, avoiding unused or unwanted flags from being synced on the SDK instance, bringing all the benefits from a reduced payload.
- Note: Only applicable when the SDK is in charge of the rollout data synchronization. When not applicable, the SDK will log a warning on init.
- Updated the following SDK manager methods to expose flag sets on flag views.
- Removed raising an exception when Telemetry post config data fails, SDK will only log the error.

9.5.1 (Sep 5, 2023)
- Exclude tests from when building the package
- Fixed exception when fetching telemetry stats if no SSE Feature flags update events are stored
Expand Down
15 changes: 14 additions & 1 deletion splitio/api/commons.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def record_telemetry(status_code, elapsed, metric_name, telemetry_runtime_produc
class FetchOptions(object):
"""Fetch Options object."""

def __init__(self, cache_control_headers=False, change_number=None):
def __init__(self, cache_control_headers=False, change_number=None, sets=None):
"""
Class constructor.
Expand All @@ -66,9 +66,13 @@ def __init__(self, cache_control_headers=False, change_number=None):
:param change_number: ChangeNumber to use for bypassing CDN in request.
:type change_number: int
:param sets: list of flag sets
:type sets: list
"""
self._cache_control_headers = cache_control_headers
self._change_number = change_number
self._sets = sets

@property
def cache_control_headers(self):
Expand All @@ -80,12 +84,19 @@ def change_number(self):
"""Return change number."""
return self._change_number

@property
def sets(self):
"""Return sets."""
return self._sets

def __eq__(self, other):
"""Match between other options."""
if self._cache_control_headers != other._cache_control_headers:
return False
if self._change_number != other._change_number:
return False
if self._sets != other._sets:
return False
return True


Expand Down Expand Up @@ -113,4 +124,6 @@ def build_fetch(change_number, fetch_options, metadata):
extra_headers[_CACHE_CONTROL] = _CACHE_CONTROL_NO_CACHE
if fetch_options.change_number is not None:
query['till'] = fetch_options.change_number
if fetch_options.sets is not None:
query['sets'] = fetch_options.sets
return query, extra_headers
2 changes: 2 additions & 0 deletions splitio/api/splits.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,8 @@ def fetch_splits(self, change_number, fetch_options):
if 200 <= response.status_code < 300:
return json.loads(response.body)
else:
if response.status_code == 414:
_LOGGER.error('Error fetching feature flags; the amount of flag sets provided are too big, causing uri length error.')
raise APIException(response.body, response.status_code)
except HttpClientException as exc:
_LOGGER.error('Error fetching feature flags because an exception was raised by the HTTPClient')
Expand Down
1 change: 0 additions & 1 deletion splitio/api/telemetry.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,6 @@ def record_init(self, configs):
'Error posting init config because an exception was raised by the HTTPClient'
)
_LOGGER.debug('Error: ', exc_info=True)
raise APIException('Init config data not flushed properly.') from exc

def record_stats(self, stats):
"""
Expand Down
138 changes: 133 additions & 5 deletions splitio/client/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from splitio.models.impressions import Impression, Label
from splitio.models.events import Event, EventWrapper
from splitio.models.telemetry import get_latency_bucket_index, MethodExceptionsAndLatencies
from splitio.client import input_validator
from splitio.client import input_validator, config
from splitio.util.time import get_current_epoch_time_ms, utctime_ms

_LOGGER = logging.getLogger(__name__)
Expand Down Expand Up @@ -59,8 +59,9 @@ def destroyed(self):
"""Return whether the factory holding this client has been destroyed."""
return self._factory.destroyed

def _evaluate_if_ready(self, matching_key, bucketing_key, feature, attributes=None):
def _evaluate_if_ready(self, matching_key, bucketing_key, feature, method, attributes=None):
if not self.ready:
_LOGGER.warning("%s: The SDK is not ready, results may be incorrect for feature flag %s. Make sure to wait for SDK readiness before using this method", method, feature)
self._telemetry_init_producer.record_not_ready_usage()
return {
'treatment': CONTROL,
Expand Down Expand Up @@ -102,7 +103,7 @@ def _make_evaluation(self, key, feature_flag, attributes, method_name, metric_na
or not input_validator.validate_attributes(attributes, method_name):
return CONTROL, None

result = self._evaluate_if_ready(matching_key, bucketing_key, feature_flag, attributes)
result = self._evaluate_if_ready(matching_key, bucketing_key, feature_flag, method_name, attributes)

impression = self._build_impression(
matching_key,
Expand Down Expand Up @@ -167,7 +168,7 @@ def _make_evaluations(self, key, feature_flags, attributes, method_name, metric_

try:
evaluations = self._evaluate_features_if_ready(matching_key, bucketing_key,
list(feature_flags), attributes)
list(feature_flags), method_name, attributes)

for feature_flag in feature_flags:
try:
Expand Down Expand Up @@ -212,8 +213,9 @@ def _make_evaluations(self, key, feature_flags, attributes, method_name, metric_
_LOGGER.debug('Error: ', exc_info=True)
return input_validator.generate_control_treatments(list(feature_flags), method_name)

def _evaluate_features_if_ready(self, matching_key, bucketing_key, feature_flags, attributes=None):
def _evaluate_features_if_ready(self, matching_key, bucketing_key, feature_flags, method, attributes=None):
if not self.ready:
_LOGGER.warning("%s: The SDK is not ready, results may be incorrect for feature flags %s. Make sure to wait for SDK readiness before using this method", method, ', '.join([feature for feature in feature_flags]))
self._telemetry_init_producer.record_not_ready_usage()
return {
feature_flag: {
Expand Down Expand Up @@ -309,6 +311,132 @@ def get_treatments(self, key, feature_flags, attributes=None):
MethodExceptionsAndLatencies.TREATMENTS)
return {feature_flag: result[0] for (feature_flag, result) in with_config.items()}

def get_treatments_by_flag_set(self, key, flag_set, attributes=None):
"""
Get treatments for feature flags that contain given flag set.
This method never raises an exception. If there's a problem, the appropriate log message
will be generated and the method will return the CONTROL treatment.
:param key: The key for which to get the treatment
:type key: str
:param flag_set: flag set
:type flag_sets: str
:param attributes: An optional dictionary of attributes
:type attributes: dict
:return: Dictionary with the result of all the feature flags provided
:rtype: dict
"""
return self._get_treatments_by_flag_sets( key, [flag_set], MethodExceptionsAndLatencies.TREATMENTS_BY_FLAG_SET, attributes)

def get_treatments_by_flag_sets(self, key, flag_sets, attributes=None):
"""
Get treatments for feature flags that contain given flag sets.
This method never raises an exception. If there's a problem, the appropriate log message
will be generated and the method will return the CONTROL treatment.
:param key: The key for which to get the treatment
:type key: str
:param flag_sets: list of flag sets
:type flag_sets: list
:param attributes: An optional dictionary of attributes
:type attributes: dict
:return: Dictionary with the result of all the feature flags provided
:rtype: dict
"""
return self._get_treatments_by_flag_sets( key, flag_sets, MethodExceptionsAndLatencies.TREATMENTS_BY_FLAG_SETS, attributes)

def get_treatments_with_config_by_flag_set(self, key, flag_set, attributes=None):
"""
Get treatments for feature flags that contain given flag set.
This method never raises an exception. If there's a problem, the appropriate log message
will be generated and the method will return the CONTROL treatment.
:param key: The key for which to get the treatment
:type key: str
:param flag_set: flag set
:type flag_sets: str
:param attributes: An optional dictionary of attributes
:type attributes: dict
:return: Dictionary with the result of all the feature flags provided
:rtype: dict
"""
return self._get_treatments_by_flag_sets( key, [flag_set], MethodExceptionsAndLatencies.TREATMENTS_WITH_CONFIG_BY_FLAG_SET, attributes)

def get_treatments_with_config_by_flag_sets(self, key, flag_sets, attributes=None):
"""
Get treatments for feature flags that contain given flag set.
This method never raises an exception. If there's a problem, the appropriate log message
will be generated and the method will return the CONTROL treatment.
:param key: The key for which to get the treatment
:type key: str
:param flag_set: flag set
:type flag_sets: str
:param attributes: An optional dictionary of attributes
:type attributes: dict
:return: Dictionary with the result of all the feature flags provided
:rtype: dict
"""
return self._get_treatments_by_flag_sets( key, flag_sets, MethodExceptionsAndLatencies.TREATMENTS_WITH_CONFIG_BY_FLAG_SETS, attributes)

def _get_treatments_by_flag_sets(self, key, flag_sets, method, attributes=None):
"""
Get treatments for feature flags that contain given flag sets.
This method never raises an exception. If there's a problem, the appropriate log message
will be generated and the method will return the CONTROL treatment.
:param key: The key for which to get the treatment
:type key: str
:param flag_sets: list of flag sets
:type flag_sets: list
:param method: Treatment by flag set method flavor
:type method: splitio.models.telemetry.MethodExceptionsAndLatencies
:param attributes: An optional dictionary of attributes
:type attributes: dict
:return: Dictionary with the result of all the feature flags provided
:rtype: dict
"""
feature_flags_names = self._get_feature_flag_names_by_flag_sets(flag_sets, method.value)
if feature_flags_names == []:
_LOGGER.warning("%s: No valid Flag set or no feature flags found for evaluating treatments" % (method.value))
return {}

if 'config' in method.value:
return self._make_evaluations(key, feature_flags_names, attributes, method.value,
method)

with_config = self._make_evaluations(key, feature_flags_names, attributes, method.value,
method)
return {feature_flag: result[0] for (feature_flag, result) in with_config.items()}


def _get_feature_flag_names_by_flag_sets(self, flag_sets, method_name):
"""
Sanitize given flag sets and return list of feature flag names associated with them
:param flag_sets: list of flag sets
:type flag_sets: list
:return: list of feature flag names
:rtype: list
"""
sanitized_flag_sets = input_validator.validate_flag_sets(flag_sets, method_name)
feature_flags_by_set = self._split_storage.get_feature_flags_by_sets(sanitized_flag_sets)
if feature_flags_by_set is None:
_LOGGER.warning("Fetching feature flags for flag set %s encountered an error, skipping this flag set." % (flag_sets))
return []
return feature_flags_by_set

def _build_impression( # pylint: disable=too-many-arguments
self,
matching_key,
Expand Down
13 changes: 9 additions & 4 deletions splitio/client/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,12 @@
import logging

from splitio.engine.impressions import ImpressionsMode
from splitio.client.input_validator import validate_flag_sets


_LOGGER = logging.getLogger(__name__)
DEFAULT_DATA_SAMPLING = 1


DEFAULT_CONFIG = {
'operationMode': 'standalone',
'connectionTimeout': 1500,
Expand Down Expand Up @@ -58,10 +58,10 @@
'dataSampling': DEFAULT_DATA_SAMPLING,
'storageWrapper': None,
'storagePrefix': None,
'storageType': None
'storageType': None,
'flagSetsFilter': None
}


def _parse_operation_mode(sdk_key, config):
"""
Process incoming config to determine operation mode and storage type
Expand Down Expand Up @@ -118,7 +118,6 @@ def _sanitize_impressions_mode(storage_type, mode, refresh_rate=None):

return mode, refresh_rate


def sanitize(sdk_key, config):
"""
Look for inconsistencies or ill-formed configs and tune it accordingly.
Expand All @@ -143,4 +142,10 @@ def sanitize(sdk_key, config):
_LOGGER.warning('metricRefreshRate parameter minimum value is 60 seconds, defaulting to 3600 seconds.')
processed['metricsRefreshRate'] = 3600

if config['operationMode'] == 'consumer' and config.get('flagSetsFilter') is not None:
processed['flagSetsFilter'] = None
_LOGGER.warning('config: FlagSets filter is not applicable for Consumer modes where the SDK does keep rollout data in sync. FlagSet filter was discarded.')
else:
processed['flagSetsFilter'] = sorted(validate_flag_sets(processed['flagSetsFilter'], 'SDK Config')) if processed['flagSetsFilter'] is not None else None

return processed

0 comments on commit f96bd15

Please sign in to comment.