Skip to content

Commit

Permalink
chore: Formatting code (#222)
Browse files Browse the repository at this point in the history
  • Loading branch information
aliabbasrizvi committed Nov 14, 2019
1 parent 955712b commit d3ffe33
Show file tree
Hide file tree
Showing 56 changed files with 16,854 additions and 17,912 deletions.
5 changes: 5 additions & 0 deletions .flake8
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
[flake8]
# E722 - do not use bare 'except'
ignore = E722
exclude = optimizely/lib/pymmh3.py,*virtualenv*
max-line-length = 120
136 changes: 66 additions & 70 deletions optimizely/bucketer.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,11 @@
# limitations under the License.

import math

try:
import mmh3
import mmh3
except ImportError:
from .lib import pymmh3 as mmh3
from .lib import pymmh3 as mmh3


MAX_TRAFFIC_VALUE = 10000
Expand All @@ -27,15 +28,15 @@


class Bucketer(object):
""" Optimizely bucketing algorithm that evenly distributes visitors. """
""" Optimizely bucketing algorithm that evenly distributes visitors. """

def __init__(self):
""" Bucketer init method to set bucketing seed and logger instance. """
def __init__(self):
""" Bucketer init method to set bucketing seed and logger instance. """

self.bucket_seed = HASH_SEED
self.bucket_seed = HASH_SEED

def _generate_unsigned_hash_code_32_bit(self, bucketing_id):
""" Helper method to retrieve hash code.
def _generate_unsigned_hash_code_32_bit(self, bucketing_id):
""" Helper method to retrieve hash code.
Args:
bucketing_id: ID for bucketing.
Expand All @@ -44,11 +45,11 @@ def _generate_unsigned_hash_code_32_bit(self, bucketing_id):
Hash code which is a 32 bit unsigned integer.
"""

# Adjusting MurmurHash code to be unsigned
return (mmh3.hash(bucketing_id, self.bucket_seed) & UNSIGNED_MAX_32_BIT_VALUE)
# Adjusting MurmurHash code to be unsigned
return mmh3.hash(bucketing_id, self.bucket_seed) & UNSIGNED_MAX_32_BIT_VALUE

def _generate_bucket_value(self, bucketing_id):
""" Helper function to generate bucket value in half-closed interval [0, MAX_TRAFFIC_VALUE).
def _generate_bucket_value(self, bucketing_id):
""" Helper function to generate bucket value in half-closed interval [0, MAX_TRAFFIC_VALUE).
Args:
bucketing_id: ID for bucketing.
Expand All @@ -57,11 +58,11 @@ def _generate_bucket_value(self, bucketing_id):
Bucket value corresponding to the provided bucketing ID.
"""

ratio = float(self._generate_unsigned_hash_code_32_bit(bucketing_id)) / MAX_HASH_VALUE
return math.floor(ratio * MAX_TRAFFIC_VALUE)
ratio = float(self._generate_unsigned_hash_code_32_bit(bucketing_id)) / MAX_HASH_VALUE
return math.floor(ratio * MAX_TRAFFIC_VALUE)

def find_bucket(self, project_config, bucketing_id, parent_id, traffic_allocations):
""" Determine entity based on bucket value and traffic allocations.
def find_bucket(self, project_config, bucketing_id, parent_id, traffic_allocations):
""" Determine entity based on bucket value and traffic allocations.
Args:
project_config: Instance of ProjectConfig.
Expand All @@ -73,22 +74,21 @@ def find_bucket(self, project_config, bucketing_id, parent_id, traffic_allocatio
Entity ID which may represent experiment or variation.
"""

bucketing_key = BUCKETING_ID_TEMPLATE.format(bucketing_id=bucketing_id, parent_id=parent_id)
bucketing_number = self._generate_bucket_value(bucketing_key)
project_config.logger.debug('Assigned bucket %s to user with bucketing ID "%s".' % (
bucketing_number,
bucketing_id
))
bucketing_key = BUCKETING_ID_TEMPLATE.format(bucketing_id=bucketing_id, parent_id=parent_id)
bucketing_number = self._generate_bucket_value(bucketing_key)
project_config.logger.debug(
'Assigned bucket %s to user with bucketing ID "%s".' % (bucketing_number, bucketing_id)
)

for traffic_allocation in traffic_allocations:
current_end_of_range = traffic_allocation.get('endOfRange')
if bucketing_number < current_end_of_range:
return traffic_allocation.get('entityId')
for traffic_allocation in traffic_allocations:
current_end_of_range = traffic_allocation.get('endOfRange')
if bucketing_number < current_end_of_range:
return traffic_allocation.get('entityId')

return None
return None

def bucket(self, project_config, experiment, user_id, bucketing_id):
""" For a given experiment and bucketing ID determines variation to be shown to user.
def bucket(self, project_config, experiment, user_id, bucketing_id):
""" For a given experiment and bucketing ID determines variation to be shown to user.
Args:
project_config: Instance of ProjectConfig.
Expand All @@ -100,45 +100,41 @@ def bucket(self, project_config, experiment, user_id, bucketing_id):
Variation in which user with ID user_id will be put in. None if no variation.
"""

if not experiment:
return None

# Determine if experiment is in a mutually exclusive group
if experiment.groupPolicy in GROUP_POLICIES:
group = project_config.get_group(experiment.groupId)

if not group:
if not experiment:
return None

# Determine if experiment is in a mutually exclusive group
if experiment.groupPolicy in GROUP_POLICIES:
group = project_config.get_group(experiment.groupId)

if not group:
return None

user_experiment_id = self.find_bucket(
project_config, bucketing_id, experiment.groupId, group.trafficAllocation,
)
if not user_experiment_id:
project_config.logger.info('User "%s" is in no experiment.' % user_id)
return None

if user_experiment_id != experiment.id:
project_config.logger.info(
'User "%s" is not in experiment "%s" of group %s.' % (user_id, experiment.key, experiment.groupId)
)
return None

project_config.logger.info(
'User "%s" is in experiment %s of group %s.' % (user_id, experiment.key, experiment.groupId)
)

# Bucket user if not in white-list and in group (if any)
variation_id = self.find_bucket(project_config, bucketing_id, experiment.id, experiment.trafficAllocation)
if variation_id:
variation = project_config.get_variation_from_id(experiment.key, variation_id)
project_config.logger.info(
'User "%s" is in variation "%s" of experiment %s.' % (user_id, variation.key, experiment.key)
)
return variation

project_config.logger.info('User "%s" is in no variation.' % user_id)
return None

user_experiment_id = self.find_bucket(project_config, bucketing_id, experiment.groupId, group.trafficAllocation)
if not user_experiment_id:
project_config.logger.info('User "%s" is in no experiment.' % user_id)
return None

if user_experiment_id != experiment.id:
project_config.logger.info('User "%s" is not in experiment "%s" of group %s.' % (
user_id,
experiment.key,
experiment.groupId
))
return None

project_config.logger.info('User "%s" is in experiment %s of group %s.' % (
user_id,
experiment.key,
experiment.groupId
))

# Bucket user if not in white-list and in group (if any)
variation_id = self.find_bucket(project_config, bucketing_id, experiment.id, experiment.trafficAllocation)
if variation_id:
variation = project_config.get_variation_from_id(experiment.key, variation_id)
project_config.logger.info('User "%s" is in variation "%s" of experiment %s.' % (
user_id,
variation.key,
experiment.key
))
return variation

project_config.logger.info('User "%s" is in no variation.' % user_id)
return None
99 changes: 51 additions & 48 deletions optimizely/config_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,7 @@
class BaseConfigManager(ABC):
""" Base class for Optimizely's config manager. """

def __init__(self,
logger=None,
error_handler=None,
notification_center=None):
def __init__(self, logger=None, error_handler=None, notification_center=None):
""" Initialize config manager.
Args:
Expand Down Expand Up @@ -74,12 +71,9 @@ def get_config(self):
class StaticConfigManager(BaseConfigManager):
""" Config manager that returns ProjectConfig based on provided datafile. """

def __init__(self,
datafile=None,
logger=None,
error_handler=None,
notification_center=None,
skip_json_validation=False):
def __init__(
self, datafile=None, logger=None, error_handler=None, notification_center=None, skip_json_validation=False,
):
""" Initialize config manager. Datafile has to be provided to use.
Args:
Expand All @@ -91,9 +85,9 @@ def __init__(self,
validation upon object invocation. By default
JSON schema validation will be performed.
"""
super(StaticConfigManager, self).__init__(logger=logger,
error_handler=error_handler,
notification_center=notification_center)
super(StaticConfigManager, self).__init__(
logger=logger, error_handler=error_handler, notification_center=notification_center,
)
self._config = None
self.validate_schema = not skip_json_validation
self._set_config(datafile)
Expand Down Expand Up @@ -153,17 +147,19 @@ def get_config(self):
class PollingConfigManager(StaticConfigManager):
""" Config manager that polls for the datafile and updated ProjectConfig based on an update interval. """

def __init__(self,
sdk_key=None,
datafile=None,
update_interval=None,
blocking_timeout=None,
url=None,
url_template=None,
logger=None,
error_handler=None,
notification_center=None,
skip_json_validation=False):
def __init__(
self,
sdk_key=None,
datafile=None,
update_interval=None,
blocking_timeout=None,
url=None,
url_template=None,
logger=None,
error_handler=None,
notification_center=None,
skip_json_validation=False,
):
""" Initialize config manager. One of sdk_key or url has to be set to be able to use.
Args:
Expand All @@ -185,13 +181,16 @@ def __init__(self,
"""
self._config_ready_event = threading.Event()
super(PollingConfigManager, self).__init__(datafile=datafile,
logger=logger,
error_handler=error_handler,
notification_center=notification_center,
skip_json_validation=skip_json_validation)
self.datafile_url = self.get_datafile_url(sdk_key, url,
url_template or enums.ConfigManager.DATAFILE_URL_TEMPLATE)
super(PollingConfigManager, self).__init__(
datafile=datafile,
logger=logger,
error_handler=error_handler,
notification_center=notification_center,
skip_json_validation=skip_json_validation,
)
self.datafile_url = self.get_datafile_url(
sdk_key, url, url_template or enums.ConfigManager.DATAFILE_URL_TEMPLATE
)
self.set_update_interval(update_interval)
self.set_blocking_timeout(blocking_timeout)
self.last_modified = None
Expand Down Expand Up @@ -227,7 +226,8 @@ def get_datafile_url(sdk_key, url, url_template):
return url_template.format(sdk_key=sdk_key)
except (AttributeError, KeyError):
raise optimizely_exceptions.InvalidInputException(
'Invalid url_template {} provided.'.format(url_template))
'Invalid url_template {} provided.'.format(url_template)
)

return url

Expand All @@ -238,8 +238,8 @@ def _set_config(self, datafile):
datafile: JSON string representing the Optimizely project.
"""
if datafile or self._config_ready_event.is_set():
super(PollingConfigManager, self)._set_config(datafile=datafile)
self._config_ready_event.set()
super(PollingConfigManager, self)._set_config(datafile=datafile)
self._config_ready_event.set()

def get_config(self):
""" Returns instance of ProjectConfig. Returns immediately if project config is ready otherwise
Expand Down Expand Up @@ -269,9 +269,10 @@ def set_update_interval(self, update_interval):

# If polling interval is less than or equal to 0 then set it to default update interval.
if update_interval <= 0:
self.logger.debug('update_interval value {} too small. Defaulting to {}'.format(
update_interval,
enums.ConfigManager.DEFAULT_UPDATE_INTERVAL)
self.logger.debug(
'update_interval value {} too small. Defaulting to {}'.format(
update_interval, enums.ConfigManager.DEFAULT_UPDATE_INTERVAL
)
)
update_interval = enums.ConfigManager.DEFAULT_UPDATE_INTERVAL

Expand All @@ -294,9 +295,10 @@ def set_blocking_timeout(self, blocking_timeout):

# If blocking timeout is less than 0 then set it to default blocking timeout.
if blocking_timeout < 0:
self.logger.debug('blocking timeout value {} too small. Defaulting to {}'.format(
blocking_timeout,
enums.ConfigManager.DEFAULT_BLOCKING_TIMEOUT)
self.logger.debug(
'blocking timeout value {} too small. Defaulting to {}'.format(
blocking_timeout, enums.ConfigManager.DEFAULT_BLOCKING_TIMEOUT
)
)
blocking_timeout = enums.ConfigManager.DEFAULT_BLOCKING_TIMEOUT

Expand Down Expand Up @@ -337,9 +339,9 @@ def fetch_datafile(self):
if self.last_modified:
request_headers[enums.HTTPHeaders.IF_MODIFIED_SINCE] = self.last_modified

response = requests.get(self.datafile_url,
headers=request_headers,
timeout=enums.ConfigManager.REQUEST_TIMEOUT)
response = requests.get(
self.datafile_url, headers=request_headers, timeout=enums.ConfigManager.REQUEST_TIMEOUT,
)
self._handle_response(response)

@property
Expand All @@ -350,12 +352,13 @@ def is_running(self):
def _run(self):
""" Triggered as part of the thread which fetches the datafile and sleeps until next update interval. """
try:
while self.is_running:
self.fetch_datafile()
time.sleep(self.update_interval)
while self.is_running:
self.fetch_datafile()
time.sleep(self.update_interval)
except (OSError, OverflowError) as err:
self.logger.error('Error in time.sleep. '
'Provided update_interval value may be too big. Error: {}'.format(str(err)))
self.logger.error(
'Error in time.sleep. ' 'Provided update_interval value may be too big. Error: {}'.format(str(err))
)
raise

def start(self):
Expand Down

0 comments on commit d3ffe33

Please sign in to comment.