1- import json
21import typing
32from pathlib import Path
43
4+ import pyjson5
55import pytest
6+ from _pytest .mark import ParameterSet
67
78from flag_engine .context .types import EvaluationContext
89from flag_engine .engine import get_evaluation_result
910from flag_engine .result .types import EvaluationResult
1011
11- MODULE_PATH = Path (__file__ ).parent . resolve ()
12+ TEST_CASES_PATH = Path (__file__ ).parent / "engine-test-data/test_cases"
1213
1314EnvironmentDocument = dict [str , typing .Any ]
1415
1516
1617def _extract_test_cases (
17- file_path : Path ,
18- ) -> typing .Iterable [tuple [EvaluationContext , EvaluationResult ]]:
19- test_data = json .loads (file_path .read_text ())
18+ test_cases_dir_path : Path ,
19+ ) -> typing .Iterable [ParameterSet ]:
20+ for file_path in test_cases_dir_path .glob ("*.json" ):
21+ test_data = pyjson5 .loads (file_path .read_text ())
22+ yield pytest .param (
23+ test_data ["context" ],
24+ test_data ["result" ],
25+ id = file_path .stem ,
26+ )
2027
21- for case in test_data ["test_cases" ]:
22- context : EvaluationContext = case ["context" ]
23- result : EvaluationResult = case ["result" ]
24- yield context , result
2528
29+ def _extract_benchmark_contexts (
30+ test_cases_dir_path : Path ,
31+ ) -> typing .Iterable [EvaluationContext ]:
32+ for file_path in [
33+ "test_0cfd0d72-4de4-4ed7-9cfb-d80dc3dacead__default.json" ,
34+ "test_1bde8445-ca19-4bda-a9d5-3543a800fc0f__context_values.json" ,
35+ ]:
36+ yield pyjson5 .loads ((test_cases_dir_path / file_path ).read_text ())["context" ]
2637
27- TEST_CASES = list (
28- _extract_test_cases (
29- MODULE_PATH / "engine-test-data/data/environment_n9fbf9h3v4fFgH3U3ngWhb.json"
30- )
31- )
38+
39+ TEST_CASES = list (_extract_test_cases (TEST_CASES_PATH ))
40+ BENCHMARK_CONTEXTS = list (_extract_benchmark_contexts (TEST_CASES_PATH ))
3241
3342
3443@pytest .mark .parametrize (
@@ -48,5 +57,5 @@ def test_engine(
4857
4958@pytest .mark .benchmark
5059def test_engine_benchmark () -> None :
51- for context , _ in TEST_CASES :
60+ for context in BENCHMARK_CONTEXTS :
5261 get_evaluation_result (context )
0 commit comments