|
1 | | -import json |
2 | 1 | import typing |
3 | | -from dataclasses import dataclass |
4 | | -from operator import itemgetter |
| 2 | +from itertools import chain |
5 | 3 | from pathlib import Path |
6 | 4 |
|
| 5 | +import pyjson5 |
7 | 6 | import pytest |
8 | | -from pytest_mock import MockerFixture |
| 7 | +from _pytest.mark import ParameterSet |
9 | 8 |
|
10 | | -from flag_engine.context.types import EvaluationContext, FeatureContext, SegmentRule |
| 9 | +from flag_engine.context.types import EvaluationContext |
11 | 10 | from flag_engine.engine import get_evaluation_result |
| 11 | +from flag_engine.result.types import EvaluationResult |
12 | 12 |
|
13 | | -MODULE_PATH = Path(__file__).parent.resolve() |
| 13 | +TEST_CASES_PATH = Path(__file__).parent / "engine-test-data/test_cases" |
14 | 14 |
|
15 | 15 | EnvironmentDocument = dict[str, typing.Any] |
16 | | -APIResponse = dict[str, typing.Any] |
17 | | - |
18 | | - |
19 | | -@dataclass |
20 | | -class EngineTestCase: |
21 | | - context: EvaluationContext |
22 | | - response: APIResponse |
23 | 16 |
|
24 | 17 |
|
25 | 18 | def _extract_test_cases( |
26 | | - file_path: Path, |
27 | | -) -> typing.Iterable[tuple[EvaluationContext, APIResponse]]: |
28 | | - """ |
29 | | - Extract the test cases from the json data file which should be in the following |
30 | | - format. |
31 | | -
|
32 | | - { |
33 | | - "environment": {...}, // the environment document as found in DynamoDB |
34 | | - "identities_and_responses": [ |
35 | | - { |
36 | | - "identity": {...}, // the identity as found in DynamoDB, |
37 | | - "response": {...}, // the response that was obtained from the current API |
38 | | - } |
39 | | - ] |
40 | | - } |
41 | | -
|
42 | | - :param file_path: the path to the json data file |
43 | | - :return: a list of tuples containing the environment, identity and api response |
44 | | - """ |
45 | | - test_data = json.loads(file_path.read_text()) |
46 | | - |
47 | | - environment_document = test_data["environment"] |
48 | | - |
49 | | - def _extract_segment_rules(rules: list[dict[str, typing.Any]]) -> list[SegmentRule]: |
50 | | - return [ |
51 | | - { |
52 | | - "type": rule["type"], |
53 | | - "conditions": [ |
54 | | - { |
55 | | - "property": condition.get("property_"), |
56 | | - "operator": condition["operator"], |
57 | | - "value": condition["value"], |
58 | | - } |
59 | | - for condition in rule.get("conditions", []) |
60 | | - ], |
61 | | - "rules": _extract_segment_rules(rule.get("rules", [])), |
62 | | - } |
63 | | - for rule in rules |
64 | | - ] |
65 | | - |
66 | | - def _extract_feature_contexts( |
67 | | - feature_states: list[dict[str, typing.Any]], |
68 | | - ) -> typing.Iterable[FeatureContext]: |
69 | | - for feature_state in feature_states: |
70 | | - feature_context = FeatureContext( |
71 | | - key=str(feature_state["django_id"]), |
72 | | - feature_key=str(feature_state["feature"]["id"]), |
73 | | - name=feature_state["feature"]["name"], |
74 | | - enabled=feature_state["enabled"], |
75 | | - value=feature_state["feature_state_value"], |
76 | | - ) |
77 | | - if multivariate_feature_state_values := feature_state.get( |
78 | | - "multivariate_feature_state_values" |
79 | | - ): |
80 | | - feature_context["variants"] = [ |
81 | | - { |
82 | | - "value": multivariate_feature_state_value[ |
83 | | - "multivariate_feature_option" |
84 | | - ]["value"], |
85 | | - "weight": multivariate_feature_state_value[ |
86 | | - "percentage_allocation" |
87 | | - ], |
88 | | - } |
89 | | - for multivariate_feature_state_value in sorted( |
90 | | - multivariate_feature_state_values, |
91 | | - key=itemgetter("id"), |
92 | | - ) |
93 | | - ] |
94 | | - if ( |
95 | | - priority := (feature_state.get("feature_segment") or {}).get("priority") |
96 | | - is not None |
97 | | - ): |
98 | | - feature_context["priority"] = priority |
99 | | - |
100 | | - yield feature_context |
101 | | - |
102 | | - for case in test_data["identities_and_responses"]: |
103 | | - identity_data = case["identity"] |
104 | | - response = case["response"] |
105 | | - |
106 | | - context: EvaluationContext = { |
107 | | - "environment": { |
108 | | - "key": environment_document["api_key"], |
109 | | - "name": "Test Environment", |
110 | | - }, |
111 | | - "features": { |
112 | | - feature["name"]: feature |
113 | | - for feature in _extract_feature_contexts( |
114 | | - environment_document["feature_states"] |
115 | | - ) |
116 | | - }, |
117 | | - "segments": { |
118 | | - str(segment["id"]): { |
119 | | - "key": str(segment["id"]), |
120 | | - "name": segment["name"], |
121 | | - "rules": _extract_segment_rules(segment["rules"]), |
122 | | - "overrides": [ |
123 | | - *_extract_feature_contexts(segment.get("feature_states", [])) |
124 | | - ], |
125 | | - } |
126 | | - for segment in environment_document["project"]["segments"] |
127 | | - }, |
128 | | - "identity": { |
129 | | - "identifier": identity_data["identifier"], |
130 | | - "key": identity_data.get("django_id") or identity_data["composite_key"], |
131 | | - "traits": { |
132 | | - trait["trait_key"]: trait["trait_value"] |
133 | | - for trait in identity_data["identity_traits"] |
134 | | - }, |
135 | | - }, |
136 | | - } |
137 | | - |
138 | | - yield context, response |
139 | | - |
140 | | - |
141 | | -TEST_CASES = list( |
142 | | - _extract_test_cases( |
143 | | - MODULE_PATH / "engine-test-data/data/environment_n9fbf9h3v4fFgH3U3ngWhb.json" |
144 | | - ) |
145 | | -) |
| 19 | + test_cases_dir_path: Path, |
| 20 | +) -> typing.Iterable[ParameterSet]: |
| 21 | + for file_path in chain( |
| 22 | + test_cases_dir_path.glob("*.json"), |
| 23 | + test_cases_dir_path.glob("*.jsonc"), |
| 24 | + ): |
| 25 | + test_data = pyjson5.loads(file_path.read_text()) |
| 26 | + yield pytest.param( |
| 27 | + test_data["context"], |
| 28 | + test_data["result"], |
| 29 | + id=file_path.stem, |
| 30 | + ) |
| 31 | + |
| 32 | + |
| 33 | +def _extract_benchmark_contexts( |
| 34 | + test_cases_dir_path: Path, |
| 35 | +) -> typing.Iterable[EvaluationContext]: |
| 36 | + for file_path in [ |
| 37 | + "test_0cfd0d72-4de4-4ed7-9cfb-d80dc3dacead__default.json", |
| 38 | + "test_1bde8445-ca19-4bda-a9d5-3543a800fc0f__context_values.json", |
| 39 | + ]: |
| 40 | + yield pyjson5.loads((test_cases_dir_path / file_path).read_text())["context"] |
| 41 | + |
| 42 | + |
| 43 | +TEST_CASES = list(_extract_test_cases(TEST_CASES_PATH)) |
| 44 | +BENCHMARK_CONTEXTS = list(_extract_benchmark_contexts(TEST_CASES_PATH)) |
146 | 45 |
|
147 | 46 |
|
148 | 47 | @pytest.mark.parametrize( |
149 | | - "context, response", |
| 48 | + "context, expected_result", |
150 | 49 | TEST_CASES, |
151 | 50 | ) |
152 | 51 | def test_engine( |
153 | 52 | context: EvaluationContext, |
154 | | - response: APIResponse, |
155 | | - mocker: MockerFixture, |
| 53 | + expected_result: EvaluationResult, |
156 | 54 | ) -> None: |
157 | 55 | # When |
158 | | - engine_response = get_evaluation_result(context) |
| 56 | + result = get_evaluation_result(context) |
159 | 57 |
|
160 | 58 | # Then |
161 | | - assert {flag["feature_key"]: flag for flag in engine_response["flags"]} == { |
162 | | - (feature_key := str(flag["feature"]["id"])): { |
163 | | - "name": flag["feature"]["name"], |
164 | | - "feature_key": feature_key, |
165 | | - "enabled": flag["enabled"], |
166 | | - "value": flag["feature_state_value"], |
167 | | - "reason": mocker.ANY, |
168 | | - } |
169 | | - for flag in response["flags"] |
170 | | - } |
| 59 | + assert result == expected_result |
171 | 60 |
|
172 | 61 |
|
173 | 62 | @pytest.mark.benchmark |
174 | 63 | def test_engine_benchmark() -> None: |
175 | | - for context, _ in TEST_CASES: |
| 64 | + for context in BENCHMARK_CONTEXTS: |
176 | 65 | get_evaluation_result(context) |
0 commit comments