Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions flagsmith/flagsmith.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
map_context_and_identity_data_to_context,
map_environment_document_to_context,
map_environment_document_to_environment_updated_at,
map_segment_results_to_identity_segments,
)
from flagsmith.models import DefaultFlag, Flags, Segment
from flagsmith.offline_handlers import OfflineHandler
Expand All @@ -22,6 +23,7 @@
from flagsmith.types import (
ApplicationMetadata,
JsonType,
SDKEvaluationContext,
StreamEvent,
TraitMapping,
)
Expand Down Expand Up @@ -106,7 +108,7 @@ def __init__(
self.default_flag_handler = default_flag_handler
self.enable_realtime_updates = enable_realtime_updates
self._analytics_processor: typing.Optional[AnalyticsProcessor] = None
self._evaluation_context: typing.Optional[engine.EvaluationContext] = None
self._evaluation_context: typing.Optional[SDKEvaluationContext] = None
self._environment_updated_at: typing.Optional[datetime] = None

# argument validation
Expand Down Expand Up @@ -283,10 +285,8 @@ def get_identity_segments(
evaluation_result = engine.get_evaluation_result(
context=context,
)
return [
Segment(id=int(segment_result["key"]), name=segment_result["name"])
for segment_result in evaluation_result["segments"]
]

return map_segment_results_to_identity_segments(evaluation_result["segments"])

def update_environment(self) -> None:
try:
Expand Down
45 changes: 37 additions & 8 deletions flagsmith/mappers.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,20 @@

import sseclient
from flag_engine.context.types import (
EvaluationContext,
FeatureContext,
SegmentContext,
SegmentRule,
)
from flag_engine.result.types import SegmentResult
from flag_engine.segments.types import ContextValue

from flagsmith.types import StreamEvent, TraitConfig
from flagsmith.models import Segment
from flagsmith.types import (
SDKEvaluationContext,
SegmentMetadata,
StreamEvent,
TraitConfig,
)

OverrideKey = typing.Tuple[
str,
Expand All @@ -24,6 +30,24 @@
OverridesKey = typing.Tuple[OverrideKey, ...]


def map_segment_results_to_identity_segments(
segment_results: list[SegmentResult[SegmentMetadata]],
) -> list[Segment]:
identity_segments: list[Segment] = []
for segment_result in segment_results:
if metadata := segment_result.get("metadata"):
if metadata.get("source") == "api" and (
(flagsmith_id := metadata.get("flagsmith_id")) is not None
):
identity_segments.append(
Segment(
id=flagsmith_id,
name=segment_result["name"],
)
)
return identity_segments


def map_sse_event_to_stream_event(event: sseclient.Event) -> StreamEvent:
event_data = json.loads(event.data)
return {
Expand All @@ -45,7 +69,7 @@ def map_environment_document_to_environment_updated_at(


def map_context_and_identity_data_to_context(
context: EvaluationContext,
context: SDKEvaluationContext,
identifier: str,
traits: typing.Optional[
typing.Mapping[
Expand All @@ -56,7 +80,7 @@ def map_context_and_identity_data_to_context(
],
]
],
) -> EvaluationContext:
) -> SDKEvaluationContext:
return {
**context,
"identity": {
Expand All @@ -76,7 +100,7 @@ def map_context_and_identity_data_to_context(

def map_environment_document_to_context(
environment_document: dict[str, typing.Any],
) -> EvaluationContext:
) -> SDKEvaluationContext:
return {
"environment": {
"key": environment_document["api_key"],
Expand All @@ -90,7 +114,7 @@ def map_environment_document_to_context(
},
"segments": {
**{
(segment_key := str(segment["id"])): {
(segment_key := str(segment_id := segment["id"])): {
"key": segment_key,
"name": segment["name"],
"rules": _map_environment_document_rules_to_context_rules(
Expand All @@ -101,6 +125,10 @@ def map_environment_document_to_context(
segment.get("feature_states") or []
)
),
"metadata": SegmentMetadata(
flagsmith_id=segment_id,
source="api",
),
}
for segment in environment_document["project"]["segments"]
},
Expand All @@ -113,7 +141,7 @@ def map_environment_document_to_context(

def _map_identity_overrides_to_segments(
identity_overrides: list[dict[str, typing.Any]],
) -> dict[str, SegmentContext]:
) -> dict[str, SegmentContext[SegmentMetadata]]:
features_to_identifiers: typing.Dict[
OverridesKey,
typing.List[str],
Expand All @@ -137,7 +165,7 @@ def _map_identity_overrides_to_segments(
)
)
features_to_identifiers[overrides_key].append(identity_override["identifier"])
segment_contexts: typing.Dict[str, SegmentContext] = {}
segment_contexts: typing.Dict[str, SegmentContext[SegmentMetadata]] = {}
for overrides_key, identifiers in features_to_identifiers.items():
# Create a segment context for each unique set of overrides
# Generate a unique key to avoid collisions
Expand Down Expand Up @@ -168,6 +196,7 @@ def _map_identity_overrides_to_segments(
}
for feature_key, feature_name, feature_enabled, feature_value in overrides_key
],
metadata=SegmentMetadata(source="identity_overrides"),
)
return segment_contexts

Expand Down
9 changes: 5 additions & 4 deletions flagsmith/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,11 @@
import typing
from dataclasses import dataclass, field

from flag_engine.result.types import EvaluationResult, FlagResult
from flag_engine.result.types import FlagResult

from flagsmith.analytics import AnalyticsProcessor
from flagsmith.exceptions import FlagsmithFeatureDoesNotExistError
from flagsmith.types import SDKEvaluationResult


@dataclass
Expand Down Expand Up @@ -57,19 +58,19 @@ class Flags:
@classmethod
def from_evaluation_result(
cls,
evaluation_result: EvaluationResult,
evaluation_result: SDKEvaluationResult,
analytics_processor: typing.Optional[AnalyticsProcessor],
default_flag_handler: typing.Optional[typing.Callable[[str], DefaultFlag]],
) -> Flags:
return cls(
flags={
flag["name"]: Flag(
flag_name: Flag(
enabled=flag["enabled"],
value=flag["value"],
feature_name=flag["name"],
feature_id=int(flag["feature_key"]),
)
for flag in evaluation_result["flags"]
for flag_name, flag in evaluation_result["flags"].items()
},
default_flag_handler=default_flag_handler,
_analytics_processor=analytics_processor,
Expand Down
13 changes: 6 additions & 7 deletions flagsmith/offline_handlers.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,12 @@
from pathlib import Path
from typing import Protocol

from flag_engine.context.types import EvaluationContext

from flagsmith.mappers import map_environment_document_to_context
from flagsmith.types import SDKEvaluationContext


class OfflineHandler(Protocol):
def get_evaluation_context(self) -> EvaluationContext: ...
def get_evaluation_context(self) -> SDKEvaluationContext: ...


class EvaluationContextLocalFileHandler:
Expand All @@ -21,11 +20,11 @@ class EvaluationContextLocalFileHandler:
"""

def __init__(self, file_path: str) -> None:
self.evaluation_context: EvaluationContext = json.loads(
self.evaluation_context: SDKEvaluationContext = json.loads(
Path(file_path).read_text(),
)

def get_evaluation_context(self) -> EvaluationContext:
def get_evaluation_context(self) -> SDKEvaluationContext:
return self.evaluation_context


Expand All @@ -39,15 +38,15 @@ class EnvironmentDocumentLocalFileHandler:
"""

def __init__(self, file_path: str) -> None:
self.evaluation_context: EvaluationContext = (
self.evaluation_context: SDKEvaluationContext = (
map_environment_document_to_context(
json.loads(
Path(file_path).read_text(),
),
)
)

def get_evaluation_context(self) -> EvaluationContext:
def get_evaluation_context(self) -> SDKEvaluationContext:
return self.evaluation_context


Expand Down
13 changes: 13 additions & 0 deletions flagsmith/types.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
import typing
from datetime import datetime

from flag_engine.context.types import EvaluationContext
from flag_engine.engine import ContextValue
from flag_engine.result.types import EvaluationResult
from typing_extensions import NotRequired, TypeAlias

_JsonScalarType: TypeAlias = typing.Union[
Expand Down Expand Up @@ -33,3 +35,14 @@ class TraitConfig(typing.TypedDict):
class ApplicationMetadata(typing.TypedDict):
name: NotRequired[str]
version: NotRequired[str]


class SegmentMetadata(typing.TypedDict):
flagsmith_id: NotRequired[int]
"""The ID of the segment used in Flagsmith API."""
source: NotRequired[typing.Literal["api", "identity_overrides"]]
"""The source of the segment, e.g. 'api', 'identity_overrides'."""


SDKEvaluationContext = EvaluationContext[SegmentMetadata]
SDKEvaluationResult = EvaluationResult[SegmentMetadata]
16 changes: 10 additions & 6 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ packages = [{ include = "flagsmith" }]
python = ">=3.9,<4"
requests = "^2.32.3"
requests-futures = "^1.0.1"
flagsmith-flag-engine = "^7.0.0"
flagsmith-flag-engine = { git = "https://github.com/Flagsmith/flagsmith-engine.git", branch = "feat/generic-metadata" }
sseclient-py = "^1.8.0"

[tool.poetry.group.dev]
Expand Down
4 changes: 2 additions & 2 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,13 @@

import pytest
import responses
from flag_engine.engine import EvaluationContext
from pyfakefs.fake_filesystem import FakeFilesystem
from pytest_mock import MockerFixture

from flagsmith import Flagsmith
from flagsmith.analytics import AnalyticsProcessor
from flagsmith.mappers import map_environment_document_to_context
from flagsmith.types import SDKEvaluationContext

DATA_DIR = os.path.join(os.path.dirname(__file__), "data")

Expand Down Expand Up @@ -74,7 +74,7 @@ def local_eval_flagsmith(


@pytest.fixture()
def evaluation_context(environment_json: str) -> EvaluationContext:
def evaluation_context(environment_json: str) -> SDKEvaluationContext:
return map_environment_document_to_context(json.loads(environment_json))


Expand Down
Loading