20
20
from causal_testing .specification .causal_specification import CausalSpecification
21
21
from causal_testing .specification .scenario import Scenario
22
22
from causal_testing .specification .variable import Input , Meta , Output
23
- from causal_testing .testing .base_test_case import BaseTestCase
24
23
from causal_testing .testing .causal_test_case import CausalTestCase
25
24
from causal_testing .testing .causal_test_engine import CausalTestEngine
26
25
from causal_testing .testing .estimators import Estimator
@@ -86,7 +85,6 @@ def _create_abstract_test_case(self, test, mutates, effects):
86
85
treatment_var .distribution = getattr (scipy .stats , dist )(** params )
87
86
self ._append_to_file (treatment_var .name + f" { dist } ({ params } )" , logging .INFO )
88
87
89
- print ("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" )
90
88
abstract_test = AbstractCausalTestCase (
91
89
scenario = self .scenario ,
92
90
intervention_constraints = [mutates [v ](k ) for k , v in test ["mutations" ].items ()],
@@ -139,22 +137,23 @@ def run_json_tests(self, effects: dict, estimators: dict, f_flag: bool = False,
139
137
]
140
138
failures = self ._execute_tests (concrete_tests , test , f_flag )
141
139
msg = (
142
- f"Executing test: { test ['name' ]} \n "
143
- + f" { concrete_tests [0 ]} \n "
144
- + f" { failures } /{ len (concrete_tests )} failed for { test ['name' ]} "
140
+ f"Executing test: { test ['name' ]} \n "
141
+ + f" { concrete_tests [0 ]} \n "
142
+ + f" { failures } /{ len (concrete_tests )} failed for { test ['name' ]} "
145
143
)
146
144
else :
147
145
abstract_test = self ._create_abstract_test_case (test , mutates , effects )
148
146
concrete_tests , dummy = abstract_test .generate_concrete_tests (5 , 0.05 )
149
147
failures = self ._execute_tests (concrete_tests , test , f_flag )
150
148
151
149
msg = (
152
- f"Executing test: { test ['name' ]} \n "
153
- + " abstract_test \n "
154
- + f" { abstract_test } \n "
155
- + f" { abstract_test .treatment_variable .name } ,{ abstract_test .treatment_variable .distribution } \n "
156
- + f" Number of concrete tests for test case: { str (len (concrete_tests ))} \n "
157
- + f" { failures } /{ len (concrete_tests )} failed for { test ['name' ]} "
150
+ f"Executing test: { test ['name' ]} \n "
151
+ + " abstract_test \n "
152
+ + f" { abstract_test } \n "
153
+ + f" { abstract_test .treatment_variable .name } ,"
154
+ + f" { abstract_test .treatment_variable .distribution } \n "
155
+ + f" Number of concrete tests for test case: { str (len (concrete_tests ))} \n "
156
+ + f" { failures } /{ len (concrete_tests )} failed for { test ['name' ]} "
158
157
)
159
158
self ._append_to_file (msg , logging .INFO )
160
159
else :
@@ -179,11 +178,11 @@ def run_json_tests(self, effects: dict, estimators: dict, f_flag: bool = False,
179
178
result = "passed"
180
179
181
180
msg = (
182
- f"Executing concrete test: { test ['name' ]} \n "
183
- + f"treatment variable: { test ['treatment_variable' ]} \n "
184
- + f"outcome_variable = { outcome_variable } \n "
185
- + f"control value = { test ['control_value' ]} , treatment value = { test ['treatment_value' ]} \n "
186
- + f"result - { result } "
181
+ f"Executing concrete test: { test ['name' ]} \n "
182
+ + f"treatment variable: { test ['treatment_variable' ]} \n "
183
+ + f"outcome_variable = { outcome_variable } \n "
184
+ + f"control value = { test ['control_value' ]} , treatment value = { test ['treatment_value' ]} \n "
185
+ + f"result - { result } "
187
186
)
188
187
self ._append_to_file (msg , logging .INFO )
189
188
@@ -258,7 +257,7 @@ def _execute_test_case(self, causal_test_case: CausalTestCase, test: Iterable[Ma
258
257
return failed
259
258
260
259
def _setup_test (
261
- self , causal_test_case : CausalTestCase , test : Mapping , conditions : list [str ] = None
260
+ self , causal_test_case : CausalTestCase , test : Mapping , conditions : list [str ] = None
262
261
) -> tuple [CausalTestEngine , Estimator ]:
263
262
"""Create the necessary inputs for a single test case
264
263
:param causal_test_case: The concrete test case to be executed
@@ -343,7 +342,7 @@ def get_args(test_args=None) -> argparse.Namespace:
343
342
parser .add_argument (
344
343
"-w" ,
345
344
help = "Specify to overwrite any existing output files. This can lead to the loss of existing outputs if not "
346
- "careful" ,
345
+ "careful" ,
347
346
action = "store_true" ,
348
347
)
349
348
parser .add_argument (
0 commit comments