|
7 | 7 | import pytest |
8 | 8 | from absbox.local.interface import mkTag |
9 | 9 |
|
10 | | - |
11 | | -this_file = os.path.dirname(__file__) |
12 | | -china_folder = os.path.join("absbox","tests","benchmark","china") |
13 | | -us_folder = os.path.join("absbox","tests","benchmark","us") |
14 | | -test_folder = os.path.join("absbox","tests") |
15 | | -config_file = os.path.join(test_folder,"config.json") |
16 | | - |
17 | | -with open(config_file,'r') as cfh: |
18 | | - config = json.load(cfh) |
19 | | - |
20 | | - |
21 | | -def read_test_cases(): |
22 | | - r = {} |
23 | | - with open(os.path.join(test_folder,"test_case.txt") ,'r') as f: |
24 | | - rs = f.readlines() |
25 | | - file_paths = [r.rstrip() for r in rs if not r.startswith("#") ] |
26 | | - for file_path in file_paths: |
27 | | - country,test_num,deal_var_name = file_path.split(",") |
28 | | - deal_path = os.path.join(test_folder,"benchmark",country,test_num) |
29 | | - spec = importlib.util.spec_from_file_location("runner", deal_path) |
30 | | - module = importlib.util.module_from_spec(spec) |
31 | | - spec.loader.exec_module(module) |
32 | | - deal = getattr(module,deal_var_name) |
33 | | - r[file_path] = deal |
34 | | - return r |
35 | | - |
36 | | - |
37 | | -def translate(d, folder, o): |
38 | | - print(f"Translating>> {d} >> {o} ") |
39 | | - benchfile = os.path.join(this_file, "benchmark", folder, "out", o) |
40 | | - if not os.path.exists(benchfile) or os.stat(benchfile).st_size < 10 : |
41 | | - print(">>>>> creating/rebuild <<<<<<") |
42 | | - with open(benchfile, 'w', encoding='utf8') as newbench: |
43 | | - try: |
44 | | - print(f"Writing new bench out case -> ") |
45 | | - except Exception as e: |
46 | | - print(f"Error in build deal json:{e}") |
47 | | - assert d.json is not None, f"None: Failed to generate Deal JSON file:{d.json}" |
48 | | - assert d.json != "", f"Empty: Failed to generate Deal JSON file:{d.json}" |
49 | | - json.dump(d.json, newbench, indent=2) |
50 | | - logging.info(f"Create new case for {o}") |
51 | | - else: |
52 | | - print(f">>>>> comparing <<<<<< \n Comparing with benchmark file:{benchfile}") |
53 | | - with open(benchfile, 'r') as ofile: |
54 | | - try: |
55 | | - benchmark_out = json.load(ofile) |
56 | | - if d.json != benchmark_out: |
57 | | - print(f"Failed with benchmark file:{benchfile} ") |
58 | | - diff_result = DeepDiff(d.json, benchmark_out) |
59 | | - pp.pprint(diff_result, indent=2) |
60 | | - assert d.json == benchmark_out, f"testing fail on {o}" |
61 | | - else: |
62 | | - return True |
63 | | - except JSONDecodeError as e: |
64 | | - print(f"Error parsing json format:{benchfile}") |
65 | | - |
66 | | - |
67 | | -@pytest.mark.dependency(name="test_translate") |
68 | | -def test_translate(): |
69 | | - cases = read_test_cases() |
70 | | - |
71 | | - for case, v in cases.items(): |
72 | | - output_folder, test_py, deal_obj = case.split(",") |
73 | | - translate(v, output_folder, test_py.replace(".py", ".json")) |
74 | | - |
75 | | -def run_deal(input_folder, pair): |
76 | | - |
77 | | - input_req_folder = os.path.join(input_folder,"out") |
78 | | - input_scen_folder = os.path.join(input_folder,"scenario") |
79 | | - input_resp_folder = os.path.join(input_folder,"resp") |
80 | | - |
81 | | - print("Select server to run") |
82 | | - test_server = config["test_server"] |
83 | | - if 'TEST_RUN_SERVER' in os.environ and os.environ['TEST_RUN_SERVER'] != "" : |
84 | | - print("Using Custom Server") |
85 | | - logging.info(f"Using Env Server {os.environ['TEST_RUN_SERVER']}") |
86 | | - test_server = os.environ['TEST_RUN_SERVER'] |
87 | | - else: |
88 | | - print(f"Using Config Server => {test_server}") |
89 | | - logging.info(f"Using Server from Config {test_server}") |
90 | | - #test_server = "https://absbox.org/api/dev" # config["test_server"] #https://deal-bench.xyz/api/run_deal2" |
91 | | - |
92 | | - for dinput, sinput, nonPinput, eoutput in pair: |
93 | | - print(f"Comparing:{dinput},{sinput},{eoutput}") |
94 | | - with open(os.path.join(input_req_folder, dinput), 'r') as dq: # deal request |
95 | | - with open(os.path.join(input_scen_folder, sinput), 'r') as sq: # scenario request |
96 | | - print(f"With deal request=> {dinput}, scenario => {sinput}") |
97 | | - nonPerfInput = None |
98 | | - if nonPinput: |
99 | | - nonPerfInput = json.load(open(os.path.join(input_scen_folder, nonPinput))) |
100 | | - print(nonPerfInput) |
101 | | - else: |
102 | | - nonPerfInput = {} |
103 | | - req = mkTag(("SingleRunReq", [[], json.load(dq), json.load(sq), nonPerfInput])) |
104 | | - print("build req done") |
105 | | - hdrs = {'Content-type': 'application/json', 'Accept': '*/*'} |
106 | | - tresp = None |
107 | | - try: |
108 | | - tresp = requests.post(f"{test_server}/runDeal" |
109 | | - , data=json.dumps(req, ensure_ascii=False).encode('utf-8') |
110 | | - , headers=hdrs |
111 | | - , verify=False) |
112 | | - if tresp.status_code != 200: |
113 | | - print(f"Failed to finish req:{dinput} with code {tresp.status_code}") |
114 | | - print(f"response text {tresp.text}") |
115 | | - else: |
116 | | - print(f"responds received") |
117 | | - except requests.exceptions.ConnectionError as e: |
118 | | - print(f"Failed to get resp from {dinput} ,url: {test_server}") |
119 | | - try: |
120 | | - s_result = json.loads(tresp.text) |
121 | | - except JSONDecodeError as e: |
122 | | - logging.error(f"Error parsing {tresp.text}") |
123 | | - raise RuntimeError(f"Failed to parse response for {dinput} with error {e}") |
124 | | - local_bench_file = os.path.join(input_resp_folder,eoutput) |
125 | | - if not os.path.exists(local_bench_file): |
126 | | - print(f"writing to new resp for {local_bench_file}") |
127 | | - with open(local_bench_file,'w') as wof: # write output file |
128 | | - json.dump(s_result,wof,indent=2) |
129 | | - continue |
130 | | - with open(local_bench_file,'r') as eout: # expected output |
131 | | - print(f"reading benchmark resp for {local_bench_file}") |
132 | | - local_result = json.load(eout) |
133 | | - assert "Right" in local_result, f"{dinput}:Left error : {local_result['Left']}" |
134 | | - assert "Right" in s_result, f"{dinput}:Left error : {s_result['Left']}" |
135 | | - |
136 | | - print(local_result.keys()) |
137 | | - print(s_result.keys()) |
138 | | - |
139 | | - if local_result['Right'][1] != s_result['Right'][1]: |
140 | | - print(f"Pool Flow Is Not matching => {dinput}") |
141 | | - print(f"Poo diff",DeepDiff(local_result['Right'][1], s_result['Right'][1])) |
142 | | - min_length = min([len(local_result['Right'][1]), len(s_result['Right'][1])]) |
143 | | - for i in range(min_length): |
144 | | - if local_result['Right'][1][i] != s_result['Right'][1][i]: |
145 | | - print(f"diff at=> {i}") |
146 | | - print(f"bench:{local_result[1][i]}") |
147 | | - print(f"test:{s_result[1][i]}") |
148 | | - break |
149 | | - |
150 | | - local_result_content = local_result['Right'][0]['contents'] |
151 | | - s_result_content = s_result['Right'][0]['contents'] |
152 | | - if not local_result_content['waterfall']==s_result_content['waterfall']: |
153 | | - local_keys = local_result_content['waterfall'].keys() |
154 | | - server_keys = s_result_content['waterfall'].keys() |
155 | | - assert local_keys == server_keys,f"Keys are not matched, local keys:{local_keys},server keys:{server_keys}" |
156 | | - commonKeys = local_result_content['waterfall'].keys() |
157 | | - # for (idx,(local_w,test_w)) in enumerate(zip(local_result_content['waterfall'],s_result_content['waterfall'])): |
158 | | - for (idx,actionName) in commonKeys: |
159 | | - assert local_result_content['waterfall'][actionName] == s_result_content['waterfall'][actionName], f"diff in waterfall action {actionName},local={local_result_content['waterfall'][actionName]},test={s_result_content['waterfall'][actionName]}" |
160 | | - |
161 | | - #assert False,f"diff in waterfall: {diff(local_result_content['waterfall'],s_result_content['waterfall'])}" |
162 | | - if local_result_content['bonds']!=s_result_content['bonds']: |
163 | | - print("Bonds are not matching") |
164 | | - for bn,bv in local_result_content['bonds'].items(): |
165 | | - assert 'bonds' in s_result['Right'][0]['contents'],f"No bonds in server resp, with key -> {s_result[0]['contents'].keys()}" |
166 | | - if s_result['Right'][0]['contents']['bonds'][bn]!=bv: |
167 | | - print(f"Bond {bn} is not matching") |
168 | | - print(DeepDiff(s_result_content['bonds'][bn], bv)) |
169 | | - |
170 | | - #for i in ['status','dates','pool','fees','bonds','accounts']: |
171 | | - # assert local_result[0][i]==s_result[0][i], f"Deal {i} is not matching" |
172 | | - bench_keys = local_result_content.keys() |
173 | | - result_keys = s_result_content.keys() |
174 | | - assert set(bench_keys) == set(result_keys),f"keys are not matching: bench {bench_keys},result {result_keys}" |
175 | | - for i in ['status','dates','pool','fees','bonds','accounts']: |
176 | | - assert local_result_content[i]==s_result_content[i], f"Deal {i} is not matching" |
177 | | - assert s_result == local_result , f"Server Test Failed {dinput} {sinput} {eoutput} " |
178 | | - print("Compare Done") |
179 | | - |
180 | | - |
181 | | -@pytest.mark.dependency(depends=["test_translate"]) |
182 | | -def test_resp(): |
183 | | - pair = [("test01.json","mortgage_empty.json",None,"test01.out.json") |
184 | | - ,("test02.json","mortgage_empty.json",None,"test02.out.json") |
185 | | - ,("test03.json","mortgage_empty.json","rates.json","test03.out.json") |
186 | | - ,("test04.json","mortgage_empty.json","rates.json","test04.out.json") |
187 | | - ,("test05.json","mortgage_empty.json","rates.json","test05.out.json") |
188 | | - ,("test06.json","mortgage_empty.json","rates.json","test06.out.json") |
189 | | - ,("test07.json","mortgage_empty.json",None,"test07.out.json") |
190 | | - ,("test08.json","mortgage_empty.json",None,"test08.out.json") |
191 | | - ,("test09.json","mortgage_empty.json",None,"test09.out.json") |
192 | | - ,("test10.json","mortgage_empty.json",None,"test10.out.json") |
193 | | - ,("test11.json","mortgage_empty.json","rates.json","test11.out.json") |
194 | | - ,("test12.json","mortgage_empty.json","rates.json","test12.out.json") |
195 | | - ,("test13.json","mortgage_empty.json",None,"test13.out.json") |
196 | | - ,("test14.json","mortgage_empty.json","rates.json","test14.out.json") |
197 | | - ,("test15.json","mortgage_empty.json",None,"test15.out.json") |
198 | | - ,("test16.json","mortgage_empty.json",None,"test16.out.json") |
199 | | - ,("test17.json","mortgage_empty.json",None,"test17.out.json") |
200 | | - ,("test18.json","mortgage_empty.json",None,"test18.out.json") |
201 | | - ,("test19.json","mortgage_empty.json",None,"test19.out.json") |
202 | | - ,("test20.json","loan_empty.json",None,"test20.out.json") |
203 | | - ,("test21.json","mortgage_empty.json",None,"test21.out.json") |
204 | | - #,("test23.json","empty.json","test23.out.json") |
205 | | - ,("test24.json","mortgage_empty.json","bmwRevolving.json","test24.out.json") |
206 | | - ,("test25.json","mortgage_empty.json",None,"test25.out.json") |
207 | | - ] |
208 | | - print(">>>> Runing China Bench Files") |
209 | | - run_deal(china_folder, pair) |
210 | | - |
211 | | - print(">>>> Runing US Bench Files") |
212 | | - pair = [("test01.json","empty.json",None,"test01.out.json") |
213 | | - ,("test02.json","empty.json",None,"test02.out.json") |
214 | | - ,("test03.json","empty.json",None,"test03.out.json") |
215 | | - ,("test04.json","empty.json",None,"test04.out.json") |
216 | | - ,("test05.json","empty.json",None,"test05.out.json") |
217 | | - ,("test06.json","empty.json",None,"test06.out.json") |
218 | | - ,("test07.json","empty.json","bmwRevolving.json","test07.out.json") |
219 | | - ] |
220 | | - run_deal(us_folder, pair) |
221 | | - |
0 commit comments