Skip to content

Commit 8eaa386

Browse files
authored
feat!: Use new EvaluationResult, updated test data (#261)
1 parent b4a47d7 commit 8eaa386

File tree

9 files changed

+86
-207
lines changed

9 files changed

+86
-207
lines changed

.gitmodules

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
[submodule "tests/engine_tests/engine-test-data"]
22
path = tests/engine_tests/engine-test-data
33
url = https://github.com/flagsmith/engine-test-data.git
4-
branch = feat/context-values
4+
tag = v2.0.0

flag_engine/result/types.py

Lines changed: 7 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,22 +1,18 @@
11
# generated by datamodel-codegen:
2-
# filename: result.json
3-
# timestamp: 2025-08-11T11:47:46+00:00
2+
# filename: https://raw.githubusercontent.com/Flagsmith/flagsmith/refs/heads/chore/improve-evaluation-result/sdk/evaluation-result.json # noqa: E501
3+
# timestamp: 2025-10-01T11:31:34+00:00
44

55
from __future__ import annotations
66

7-
from typing import Any, List, Optional, TypedDict
8-
9-
from typing_extensions import NotRequired
10-
11-
from flag_engine.context.types import EvaluationContext
7+
from typing import Any, Dict, List, TypedDict
128

139

1410
class FlagResult(TypedDict):
15-
name: str
1611
feature_key: str
12+
name: str
1713
enabled: bool
18-
value: NotRequired[Optional[Any]]
19-
reason: NotRequired[str]
14+
value: Any
15+
reason: str
2016

2117

2218
class SegmentResult(TypedDict):
@@ -25,6 +21,5 @@ class SegmentResult(TypedDict):
2521

2622

2723
class EvaluationResult(TypedDict):
28-
context: EvaluationContext
29-
flags: List[FlagResult]
24+
flags: Dict[str, FlagResult]
3025
segments: List[SegmentResult]

flag_engine/segments/constants.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,5 +21,5 @@
2121
IS_NOT_SET: ConditionOperator = "IS_NOT_SET"
2222
IN: ConditionOperator = "IN"
2323

24-
# Lowest possible priority for segment overrides
24+
# Weakest possible priority for segment overrides
2525
DEFAULT_PRIORITY = float("inf")

flag_engine/segments/evaluator.py

Lines changed: 12 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ def get_evaluation_result(context: EvaluationContext) -> EvaluationResult:
4040
:return: EvaluationResult containing the context, flags, and segments
4141
"""
4242
segments: list[SegmentResult] = []
43-
flags: list[FlagResult] = []
43+
flags: dict[str, FlagResult] = {}
4444

4545
segment_feature_contexts: dict[SupportsStr, FeatureContextWithSegmentName] = {}
4646

@@ -82,29 +82,25 @@ def get_evaluation_result(context: EvaluationContext) -> EvaluationResult:
8282
else None
8383
)
8484
for feature_context in (context.get("features") or {}).values():
85+
feature_name = feature_context["name"]
8586
if feature_context_with_segment_name := segment_feature_contexts.get(
8687
feature_context["feature_key"],
8788
):
8889
feature_context = feature_context_with_segment_name["feature_context"]
89-
flags.append(
90-
{
91-
"enabled": feature_context["enabled"],
92-
"feature_key": feature_context["feature_key"],
93-
"name": feature_context["name"],
94-
"reason": f"TARGETING_MATCH; segment={feature_context_with_segment_name['segment_name']}",
95-
"value": feature_context.get("value"),
96-
}
97-
)
90+
flags[feature_name] = {
91+
"enabled": feature_context["enabled"],
92+
"feature_key": feature_context["feature_key"],
93+
"name": feature_context["name"],
94+
"reason": f"TARGETING_MATCH; segment={feature_context_with_segment_name['segment_name']}",
95+
"value": feature_context.get("value"),
96+
}
9897
continue
99-
flags.append(
100-
get_flag_result_from_feature_context(
101-
feature_context=feature_context,
102-
key=identity_key,
103-
)
98+
flags[feature_name] = get_flag_result_from_feature_context(
99+
feature_context=feature_context,
100+
key=identity_key,
104101
)
105102

106103
return {
107-
"context": context,
108104
"flags": flags,
109105
"segments": segments,
110106
}

requirements-dev.in

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,3 +11,4 @@ types-setuptools
1111
mypy
1212
absolufy-imports
1313
datamodel-code-generator
14+
pyjson5

requirements-dev.txt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,8 @@ pygments==2.19.2
9393
# via
9494
# pytest
9595
# rich
96+
pyjson5==2.0.0
97+
# via -r requirements-dev.in
9698
pyproject-hooks==1.2.0
9799
# via
98100
# build
Submodule engine-test-data updated 147 files

tests/engine_tests/test_engine.py

Lines changed: 37 additions & 148 deletions
Original file line numberDiff line numberDiff line change
@@ -1,176 +1,65 @@
1-
import json
21
import typing
3-
from dataclasses import dataclass
4-
from operator import itemgetter
2+
from itertools import chain
53
from pathlib import Path
64

5+
import pyjson5
76
import pytest
8-
from pytest_mock import MockerFixture
7+
from _pytest.mark import ParameterSet
98

10-
from flag_engine.context.types import EvaluationContext, FeatureContext, SegmentRule
9+
from flag_engine.context.types import EvaluationContext
1110
from flag_engine.engine import get_evaluation_result
11+
from flag_engine.result.types import EvaluationResult
1212

13-
MODULE_PATH = Path(__file__).parent.resolve()
13+
TEST_CASES_PATH = Path(__file__).parent / "engine-test-data/test_cases"
1414

1515
EnvironmentDocument = dict[str, typing.Any]
16-
APIResponse = dict[str, typing.Any]
17-
18-
19-
@dataclass
20-
class EngineTestCase:
21-
context: EvaluationContext
22-
response: APIResponse
2316

2417

2518
def _extract_test_cases(
26-
file_path: Path,
27-
) -> typing.Iterable[tuple[EvaluationContext, APIResponse]]:
28-
"""
29-
Extract the test cases from the json data file which should be in the following
30-
format.
31-
32-
{
33-
"environment": {...}, // the environment document as found in DynamoDB
34-
"identities_and_responses": [
35-
{
36-
"identity": {...}, // the identity as found in DynamoDB,
37-
"response": {...}, // the response that was obtained from the current API
38-
}
39-
]
40-
}
41-
42-
:param file_path: the path to the json data file
43-
:return: a list of tuples containing the environment, identity and api response
44-
"""
45-
test_data = json.loads(file_path.read_text())
46-
47-
environment_document = test_data["environment"]
48-
49-
def _extract_segment_rules(rules: list[dict[str, typing.Any]]) -> list[SegmentRule]:
50-
return [
51-
{
52-
"type": rule["type"],
53-
"conditions": [
54-
{
55-
"property": condition.get("property_"),
56-
"operator": condition["operator"],
57-
"value": condition["value"],
58-
}
59-
for condition in rule.get("conditions", [])
60-
],
61-
"rules": _extract_segment_rules(rule.get("rules", [])),
62-
}
63-
for rule in rules
64-
]
65-
66-
def _extract_feature_contexts(
67-
feature_states: list[dict[str, typing.Any]],
68-
) -> typing.Iterable[FeatureContext]:
69-
for feature_state in feature_states:
70-
feature_context = FeatureContext(
71-
key=str(feature_state["django_id"]),
72-
feature_key=str(feature_state["feature"]["id"]),
73-
name=feature_state["feature"]["name"],
74-
enabled=feature_state["enabled"],
75-
value=feature_state["feature_state_value"],
76-
)
77-
if multivariate_feature_state_values := feature_state.get(
78-
"multivariate_feature_state_values"
79-
):
80-
feature_context["variants"] = [
81-
{
82-
"value": multivariate_feature_state_value[
83-
"multivariate_feature_option"
84-
]["value"],
85-
"weight": multivariate_feature_state_value[
86-
"percentage_allocation"
87-
],
88-
}
89-
for multivariate_feature_state_value in sorted(
90-
multivariate_feature_state_values,
91-
key=itemgetter("id"),
92-
)
93-
]
94-
if (
95-
priority := (feature_state.get("feature_segment") or {}).get("priority")
96-
is not None
97-
):
98-
feature_context["priority"] = priority
99-
100-
yield feature_context
101-
102-
for case in test_data["identities_and_responses"]:
103-
identity_data = case["identity"]
104-
response = case["response"]
105-
106-
context: EvaluationContext = {
107-
"environment": {
108-
"key": environment_document["api_key"],
109-
"name": "Test Environment",
110-
},
111-
"features": {
112-
feature["name"]: feature
113-
for feature in _extract_feature_contexts(
114-
environment_document["feature_states"]
115-
)
116-
},
117-
"segments": {
118-
str(segment["id"]): {
119-
"key": str(segment["id"]),
120-
"name": segment["name"],
121-
"rules": _extract_segment_rules(segment["rules"]),
122-
"overrides": [
123-
*_extract_feature_contexts(segment.get("feature_states", []))
124-
],
125-
}
126-
for segment in environment_document["project"]["segments"]
127-
},
128-
"identity": {
129-
"identifier": identity_data["identifier"],
130-
"key": identity_data.get("django_id") or identity_data["composite_key"],
131-
"traits": {
132-
trait["trait_key"]: trait["trait_value"]
133-
for trait in identity_data["identity_traits"]
134-
},
135-
},
136-
}
137-
138-
yield context, response
139-
140-
141-
TEST_CASES = list(
142-
_extract_test_cases(
143-
MODULE_PATH / "engine-test-data/data/environment_n9fbf9h3v4fFgH3U3ngWhb.json"
144-
)
145-
)
19+
test_cases_dir_path: Path,
20+
) -> typing.Iterable[ParameterSet]:
21+
for file_path in chain(
22+
test_cases_dir_path.glob("*.json"),
23+
test_cases_dir_path.glob("*.jsonc"),
24+
):
25+
test_data = pyjson5.loads(file_path.read_text())
26+
yield pytest.param(
27+
test_data["context"],
28+
test_data["result"],
29+
id=file_path.stem,
30+
)
31+
32+
33+
def _extract_benchmark_contexts(
34+
test_cases_dir_path: Path,
35+
) -> typing.Iterable[EvaluationContext]:
36+
for file_path in [
37+
"test_0cfd0d72-4de4-4ed7-9cfb-d80dc3dacead__default.json",
38+
"test_1bde8445-ca19-4bda-a9d5-3543a800fc0f__context_values.json",
39+
]:
40+
yield pyjson5.loads((test_cases_dir_path / file_path).read_text())["context"]
41+
42+
43+
TEST_CASES = list(_extract_test_cases(TEST_CASES_PATH))
44+
BENCHMARK_CONTEXTS = list(_extract_benchmark_contexts(TEST_CASES_PATH))
14645

14746

14847
@pytest.mark.parametrize(
149-
"context, response",
48+
"context, expected_result",
15049
TEST_CASES,
15150
)
15251
def test_engine(
15352
context: EvaluationContext,
154-
response: APIResponse,
155-
mocker: MockerFixture,
53+
expected_result: EvaluationResult,
15654
) -> None:
15755
# When
158-
engine_response = get_evaluation_result(context)
56+
result = get_evaluation_result(context)
15957

16058
# Then
161-
assert {flag["feature_key"]: flag for flag in engine_response["flags"]} == {
162-
(feature_key := str(flag["feature"]["id"])): {
163-
"name": flag["feature"]["name"],
164-
"feature_key": feature_key,
165-
"enabled": flag["enabled"],
166-
"value": flag["feature_state_value"],
167-
"reason": mocker.ANY,
168-
}
169-
for flag in response["flags"]
170-
}
59+
assert result == expected_result
17160

17261

17362
@pytest.mark.benchmark
17463
def test_engine_benchmark() -> None:
175-
for context, _ in TEST_CASES:
64+
for context in BENCHMARK_CONTEXTS:
17665
get_evaluation_result(context)

0 commit comments

Comments
 (0)