Skip to content

Commit 1203490

Browse files
pylint
1 parent c2d00be commit 1203490

File tree

3 files changed

+25
-26
lines changed

3 files changed

+25
-26
lines changed

causal_testing/generation/enum_gen.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ class EnumGen(rv_discrete):
1212
abstract test cases."""
1313

1414
def __init__(self, datatype: Enum):
15+
super().__init__()
1516
self.datatype = dict(enumerate(datatype, 1))
1617
self.inverse_dt = {v: k for k, v in self.datatype.items()}
1718

@@ -28,16 +29,16 @@ def ppf(self, q):
2829
"""
2930
return np.vectorize(self.datatype.get)(np.ceil(len(self.datatype) * q))
3031

31-
def cdf(self, q):
32+
def cdf(self, k):
3233
"""
3334
Cumulative distribution function of the given RV.
3435
Parameters
3536
----------
36-
q : array_like
37+
k : array_like
3738
quantiles
3839
Returns
3940
-------
4041
cdf : ndarray
4142
Cumulative distribution function evaluated at `x`
4243
"""
43-
return np.vectorize(self.inverse_dt.get)(q) / len(self.datatype)
44+
return np.vectorize(self.inverse_dt.get)(k) / len(self.datatype)

causal_testing/json_front/json_class.py

Lines changed: 17 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@
2020
from causal_testing.specification.causal_specification import CausalSpecification
2121
from causal_testing.specification.scenario import Scenario
2222
from causal_testing.specification.variable import Input, Meta, Output
23-
from causal_testing.testing.base_test_case import BaseTestCase
2423
from causal_testing.testing.causal_test_case import CausalTestCase
2524
from causal_testing.testing.causal_test_engine import CausalTestEngine
2625
from causal_testing.testing.estimators import Estimator
@@ -86,7 +85,6 @@ def _create_abstract_test_case(self, test, mutates, effects):
8685
treatment_var.distribution = getattr(scipy.stats, dist)(**params)
8786
self._append_to_file(treatment_var.name + f" {dist}({params})", logging.INFO)
8887

89-
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
9088
abstract_test = AbstractCausalTestCase(
9189
scenario=self.scenario,
9290
intervention_constraints=[mutates[v](k) for k, v in test["mutations"].items()],
@@ -139,22 +137,23 @@ def run_json_tests(self, effects: dict, estimators: dict, f_flag: bool = False,
139137
]
140138
failures = self._execute_tests(concrete_tests, test, f_flag)
141139
msg = (
142-
f"Executing test: {test['name']} \n"
143-
+ f" {concrete_tests[0]} \n"
144-
+ f" {failures}/{len(concrete_tests)} failed for {test['name']}"
140+
f"Executing test: {test['name']} \n"
141+
+ f" {concrete_tests[0]} \n"
142+
+ f" {failures}/{len(concrete_tests)} failed for {test['name']}"
145143
)
146144
else:
147145
abstract_test = self._create_abstract_test_case(test, mutates, effects)
148146
concrete_tests, dummy = abstract_test.generate_concrete_tests(5, 0.05)
149147
failures = self._execute_tests(concrete_tests, test, f_flag)
150148

151149
msg = (
152-
f"Executing test: {test['name']} \n"
153-
+ " abstract_test \n"
154-
+ f" {abstract_test} \n"
155-
+ f" {abstract_test.treatment_variable.name},{abstract_test.treatment_variable.distribution} \n"
156-
+ f" Number of concrete tests for test case: {str(len(concrete_tests))} \n"
157-
+ f" {failures}/{len(concrete_tests)} failed for {test['name']}"
150+
f"Executing test: {test['name']} \n"
151+
+ " abstract_test \n"
152+
+ f" {abstract_test} \n"
153+
+ f" {abstract_test.treatment_variable.name},"
154+
+ f" {abstract_test.treatment_variable.distribution} \n"
155+
+ f" Number of concrete tests for test case: {str(len(concrete_tests))} \n"
156+
+ f" {failures}/{len(concrete_tests)} failed for {test['name']}"
158157
)
159158
self._append_to_file(msg, logging.INFO)
160159
else:
@@ -179,11 +178,11 @@ def run_json_tests(self, effects: dict, estimators: dict, f_flag: bool = False,
179178
result = "passed"
180179

181180
msg = (
182-
f"Executing concrete test: {test['name']} \n"
183-
+ f"treatment variable: {test['treatment_variable']} \n"
184-
+ f"outcome_variable = {outcome_variable} \n"
185-
+ f"control value = {test['control_value']}, treatment value = {test['treatment_value']} \n"
186-
+ f"result - {result}"
181+
f"Executing concrete test: {test['name']} \n"
182+
+ f"treatment variable: {test['treatment_variable']} \n"
183+
+ f"outcome_variable = {outcome_variable} \n"
184+
+ f"control value = {test['control_value']}, treatment value = {test['treatment_value']} \n"
185+
+ f"result - {result}"
187186
)
188187
self._append_to_file(msg, logging.INFO)
189188

@@ -258,7 +257,7 @@ def _execute_test_case(self, causal_test_case: CausalTestCase, test: Iterable[Ma
258257
return failed
259258

260259
def _setup_test(
261-
self, causal_test_case: CausalTestCase, test: Mapping, conditions: list[str] = None
260+
self, causal_test_case: CausalTestCase, test: Mapping, conditions: list[str] = None
262261
) -> tuple[CausalTestEngine, Estimator]:
263262
"""Create the necessary inputs for a single test case
264263
:param causal_test_case: The concrete test case to be executed
@@ -343,7 +342,7 @@ def get_args(test_args=None) -> argparse.Namespace:
343342
parser.add_argument(
344343
"-w",
345344
help="Specify to overwrite any existing output files. This can lead to the loss of existing outputs if not "
346-
"careful",
345+
"careful",
347346
action="store_true",
348347
)
349348
parser.add_argument(

tests/json_front_tests/test_json_class.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2,17 +2,15 @@
22
from pathlib import Path
33
from statistics import StatisticsError
44
import scipy
5-
import csv
6-
import json
5+
76

87
from causal_testing.testing.estimators import LinearRegressionEstimator
98
from causal_testing.testing.causal_test_outcome import NoEffect, Positive
10-
from tests.test_helpers import create_temp_dir_if_non_existent, remove_temp_dir_if_existent
9+
from tests.test_helpers import remove_temp_dir_if_existent
1110
from causal_testing.json_front.json_class import JsonUtility, CausalVariables
1211
from causal_testing.specification.variable import Input, Output, Meta
1312
from causal_testing.specification.scenario import Scenario
1413
from causal_testing.specification.causal_specification import CausalSpecification
15-
from causal_testing.generation.abstract_causal_test_case import AbstractCausalTestCase
1614

1715

1816
class TestJsonClass(unittest.TestCase):
@@ -151,7 +149,8 @@ def test_run_json_tests_from_json(self):
151149

152150
self.json_class.run_json_tests(effects=effects, estimators=estimators, f_flag=False, mutates=mutates)
153151

154-
# Test that the final log message prints that failed tests are printed, which is expected behaviour for this scenario
152+
# Test that the final log message prints that failed tests are printed, which is expected behaviour for this
153+
# scenario
155154
with open("temp_out.txt", "r") as reader:
156155
temp_out = reader.readlines()
157156
self.assertIn("failed", temp_out[-1])

0 commit comments

Comments
 (0)