Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 33 additions & 0 deletions Samples/BasicTelemetry.sample.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
{
"feature_management": {
"feature_flags": [
{
"id": "TelemetryVariant",
"description": "",
"enabled": "true",
"conditions": {
"client_filters": []
},
"variants": [
{
"name": "True_Override",
"configuration_value": "default",
"status_override": "Disabled"
}
],
"allocation": {
"default_when_enabled": "True_Override"
},
"telemetry": {
"enabled": "true",
"metadata": {
"ETag": "cmwBRcIAq1jUyKL3Kj8bvf9jtxBrFg-R-ayExStMC90",
"FeatureFlagReference": "https://fake-config-store/kv/.appconfig.featureflag/TelemetryVariant",
"FeatureFlagId": "7vpkRJe452WVvlKXfA5XF3ASllwKsYZfC7D4w05rIoo",
"AllocationId": "MExY1waco2tqen4EcJKK"
}
}
}
]
}
}
33 changes: 33 additions & 0 deletions Samples/BasicTelemetry.tests.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
[
{
"FeatureFlagName": "TelemetryVariant",
"Inputs": {"User":"Aiden"},
"IsEnabled": {
"Result": "false"
},
"Variant": {
"Result": {
"Name": "True_Override",
"ConfigurationValue": "default"
}
},
"Telemetry": {
"EventName": "FeatureEvaluation",
"EventProperties": {
"FeatureName": "TelemetryVariant",
"Enabled": "False",
"Version": "1.0.0",
"Variant": "True_Override",
"VariantAssignmentReason": "DefaultWhenEnabled",
"VariantAssignmentPercentage": "100",
"DefaultWhenEnabled": "True_Override",
"AllocationId": "MExY1waco2tqen4EcJKK",
"ETag": "cmwBRcIAq1jUyKL3Kj8bvf9jtxBrFg-R-ayExStMC90",
"FeatureFlagReference": "https://fake-config-store/kv/.appconfig.featureflag/TelemetryVariant",
"FeatureFlagId": "7vpkRJe452WVvlKXfA5XF3ASllwKsYZfC7D4w05rIoo",
"TargetingId": "Aiden"
}
},
"Description": "A basic feature flag with telemetry."
}
]
4 changes: 2 additions & 2 deletions libraryValidations/Python/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
pytest
featuremanagement==2.0.0b2
azure-appconfiguration-provider==2.0.0b2
featuremanagement["AzureMonitor"]==2.0.0b3
azure-appconfiguration-provider==2.0.0b3
106 changes: 74 additions & 32 deletions libraryValidations/Python/test_json_validations.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,18 +4,22 @@
# license information.
# --------------------------------------------------------------------------

import logging
import json
import unittest
from pytest import raises
from featuremanagement import FeatureManager, TargetingContext
from featuremanagement.azuremonitor import publish_telemetry
from unittest.mock import patch, call

FILE_PATH = "../../Samples/"
SAMPLE_JSON_KEY = ".sample.json"
TESTS_JSON_KEY = ".tests.json"
FRIENDLY_NAME_KEY = "FriendlyName"
IS_ENABLED_KEY = "IsEnabled"
GET_VARIANT_KEY = "Variant"
GET_TELEMETRY_KEY = "Telemetry"
EVENT_NAME_KEY = "EventName"
EVENT_PROPERTIES_KEY = "EventProperties"
RESULT_KEY = "Result"
VARIANT_NAME_KEY = "Name"
CONFIGURATION_VALUE_KEY = "ConfigurationValue"
Expand All @@ -26,9 +30,6 @@
EXCEPTION_KEY = "Exception"
DESCRIPTION_KEY = "Description"

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


def convert_boolean_value(enabled):
if enabled is None:
Expand All @@ -43,63 +44,82 @@ def convert_boolean_value(enabled):


class TestFromFile(unittest.TestCase):
# method: is_enabled

def test_no_filters(self):
test_key = "NoFilters"
self.run_tests(test_key)

# method: is_enabled
def test_time_window_filter(self):
test_key = "TimeWindowFilter"
self.run_tests(test_key)

# method: is_enabled
def test_targeting_filter(self):
test_key = "TargetingFilter"
self.run_tests(test_key)

# method: is_enabled
def test_targeting_filter_modified(self):
test_key = "TargetingFilter.modified"
self.run_tests(test_key)

# method: is_enabled
def test_requirement_type(self):
test_key = "RequirementType"
self.run_tests(test_key)

# method: is_enabled
def test_basic_variant(self):
test_key = "BasicVariant"
self.run_tests(test_key)


# method: is_enabled
def test_variant_assignment(self):
test_key = "VariantAssignment"
self.run_tests(test_key)

@patch("featuremanagement.azuremonitor._send_telemetry.azure_monitor_track_event")
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This doesn't need to be a patch, right? Since we pass in the callback

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If you look at the method telemetry_callback you see we call the standard publish_telemetry method, so we still need this. The idea is we are testing the publish_telemetry method, the custom callback method is for validating publish_telemetry.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Okay that works. That feels more like a python test rather than one of the shared test, but I don't think there's harm in checking it here too.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure what you mean by this? This is testing the values we calculate in publish_telemetry. Doesn't .Net also calculate a few values then too?

def test_basic_telemetry(self, track_event_mock):
test_key = "BasicTelemetry"
self._ran_callback = False
self._mock_track_event = track_event_mock
self.run_tests(test_key, telemetry_callback=self.telemetry_callback)
assert self._ran_callback

@staticmethod
def load_from_file(file):
def load_from_file(file, telemetry_callback=None):
with open(FILE_PATH + file, "r", encoding="utf-8") as feature_flags_file:
feature_flags = json.load(feature_flags_file)

feature_manager = FeatureManager(feature_flags)
feature_manager = FeatureManager(
feature_flags, on_feature_evaluated=telemetry_callback
)
assert feature_manager is not None

return feature_manager

# method: is_enabled
def run_tests(self, test_key):
feature_manager = self.load_from_file(test_key + SAMPLE_JSON_KEY)

with open(FILE_PATH + test_key + TESTS_JSON_KEY, "r", encoding="utf-8") as feature_flag_test_file:
def telemetry_callback(self, evaluation_event):
publish_telemetry(evaluation_event)
expected_telemetry = self._feature_flag_test.get(GET_TELEMETRY_KEY, {})
self._mock_track_event.assert_called_once()
self.assertEqual(self._mock_track_event.call_args[0][0], expected_telemetry.get(EVENT_NAME_KEY, None))
(event_properties) = self._mock_track_event.call_args[0][1]
self.assertEqual(sorted(event_properties), sorted(expected_telemetry.get(EVENT_PROPERTIES_KEY, {})))
self._ran_callback = True
self._mock_track_event.reset_mock()

def run_tests(self, test_key, telemetry_callback=None):
feature_manager = self.load_from_file(
test_key + SAMPLE_JSON_KEY, telemetry_callback=telemetry_callback
)

with open(
FILE_PATH + test_key + TESTS_JSON_KEY, "r", encoding="utf-8"
) as feature_flag_test_file:
feature_flag_tests = json.load(feature_flag_test_file)

for feature_flag_test in feature_flag_tests:
self._feature_flag_test = feature_flag_test
is_enabled = feature_flag_test[IS_ENABLED_KEY]
get_variant = feature_flag_test.get(GET_VARIANT_KEY, None)
expected_is_enabled_result = convert_boolean_value(is_enabled.get(RESULT_KEY))
expected_is_enabled_result = convert_boolean_value(
is_enabled.get(RESULT_KEY)
)
feature_flag_id = test_key + "." + feature_flag_test[FEATURE_FLAG_NAME_KEY]

failed_description = f"Test {feature_flag_id} failed. Description: {feature_flag_test[DESCRIPTION_KEY]}"
Expand All @@ -109,25 +129,47 @@ def run_tests(self, test_key):
groups = feature_flag_test[INPUTS_KEY].get(GROUPS_KEY, [])
assert (
feature_manager.is_enabled(
feature_flag_test[FEATURE_FLAG_NAME_KEY], TargetingContext(user_id=user, groups=groups)
feature_flag_test[FEATURE_FLAG_NAME_KEY],
TargetingContext(user_id=user, groups=groups),
)
== expected_is_enabled_result
), failed_description
else:
with raises(ValueError) as ex_info:
feature_manager.is_enabled(feature_flag_test[FEATURE_FLAG_NAME_KEY])
feature_manager.is_enabled(
feature_flag_test[FEATURE_FLAG_NAME_KEY],
TargetingContext(user_id=user, groups=groups),
)
expected_message = is_enabled.get(EXCEPTION_KEY)
assert str(ex_info.value) == expected_message, failed_description

if get_variant is not None and RESULT_KEY in get_variant:
if get_variant:
user = feature_flag_test[INPUTS_KEY].get(USER_KEY, None)
groups = feature_flag_test[INPUTS_KEY].get(GROUPS_KEY, [])
variant = feature_manager.get_variant(feature_flag_test[FEATURE_FLAG_NAME_KEY], TargetingContext(user_id=user, groups=groups))

if get_variant[RESULT_KEY] == None:
assert variant == None
else:
if VARIANT_NAME_KEY in get_variant[RESULT_KEY]:
assert variant.name == get_variant[RESULT_KEY][VARIANT_NAME_KEY], failed_description

assert variant.configuration == get_variant[RESULT_KEY][CONFIGURATION_VALUE_KEY], failed_description

if RESULT_KEY not in get_variant:
with raises(ValueError) as ex_info:
feature_manager.get_variant(
feature_flag_test[FEATURE_FLAG_NAME_KEY],
TargetingContext(user_id=user, groups=groups),
)
expected_message = get_variant.get(EXCEPTION_KEY)
assert str(ex_info.value) == expected_message, failed_description
continue

variant = feature_manager.get_variant(
feature_flag_test[FEATURE_FLAG_NAME_KEY],
TargetingContext(user_id=user, groups=groups),
)
if not get_variant[RESULT_KEY]:
assert not variant
continue
if VARIANT_NAME_KEY in get_variant[RESULT_KEY]:
assert (
variant.name == get_variant[RESULT_KEY][VARIANT_NAME_KEY]
), failed_description

assert (
variant.configuration
== get_variant[RESULT_KEY][CONFIGURATION_VALUE_KEY]
), failed_description
Loading
Loading