Skip to content

Commit 73f360c

Browse files
committed
Upped the code coverage
1 parent bd32e1a commit 73f360c

File tree

4 files changed

+81
-33
lines changed

4 files changed

+81
-33
lines changed

causal_testing/testing/estimators.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ def __init__(
4848
adjustment_set: set,
4949
outcome: str,
5050
df: pd.DataFrame = None,
51-
effect_modifiers: dict[Variable:Any] = None,
51+
effect_modifiers: dict[str:Any] = None,
5252
):
5353
self.treatment = treatment
5454
self.treatment_value = treatment_value
@@ -59,9 +59,9 @@ def __init__(
5959
if effect_modifiers is None:
6060
self.effect_modifiers = {}
6161
elif isinstance(effect_modifiers, (list, set)):
62-
self.effect_modifiers = {k.name for k in effect_modifiers}
62+
self.effect_modifiers = {k for k in effect_modifiers}
6363
elif isinstance(effect_modifiers, dict):
64-
self.effect_modifiers = {k.name: v for k, v in effect_modifiers.items()}
64+
self.effect_modifiers = {k: v for k, v in effect_modifiers.items()}
6565
else:
6666
raise ValueError(f"Unsupported type for effect_modifiers {effect_modifiers}. Expected iterable")
6767
self.modelling_assumptions = []
@@ -106,19 +106,17 @@ def __init__(
106106
adjustment_set: set,
107107
outcome: str,
108108
df: pd.DataFrame = None,
109-
effect_modifiers: dict[Variable:Any] = None,
109+
effect_modifiers: dict[str:Any] = None,
110110
formula: str = None,
111111
):
112112
super().__init__(treatment, treatment_value, control_value, adjustment_set, outcome, df, effect_modifiers)
113113

114114
self.model = None
115-
if effect_modifiers is None:
116-
effect_modifiers = []
117115

118116
if formula is not None:
119117
self.formula = formula
120118
else:
121-
terms = [treatment] + sorted(list(adjustment_set)) + sorted(list(effect_modifiers))
119+
terms = [treatment] + sorted(list(adjustment_set)) + sorted(list(self.effect_modifiers))
122120
self.formula = f"{outcome} ~ {'+'.join(((terms)))}"
123121

124122
for term in self.effect_modifiers:
@@ -170,7 +168,6 @@ def estimate(self, data: pd.DataFrame, adjustment_config=None) -> RegressionResu
170168
:param data: A pandas dataframe containing execution data from the system-under-test.
171169
172170
"""
173-
print(data)
174171
if adjustment_config is None:
175172
adjustment_config = {}
176173

@@ -211,6 +208,11 @@ def estimate_control_treatment(self, bootstrap_size=100) -> tuple[pd.Series, pd.
211208
"a small dataset."
212209
)
213210
return (y.iloc[1], None), (y.iloc[0], None)
211+
except np.linalg.LinAlgError:
212+
logger.warning(
213+
"Singular matrix detected. Confidence intervals not available. Try with a larger data set"
214+
)
215+
return (y.iloc[1], None), (y.iloc[0], None)
214216

215217
# Delta method confidence intervals from
216218
# https://stackoverflow.com/questions/47414842/confidence-interval-of-probability-prediction-from-logistic-regression-statsmode
@@ -272,8 +274,6 @@ def estimate_risk_ratio(self, bootstrap_size=100) -> float:
272274

273275
bootstraps = sorted(list(treatment_bootstraps / control_bootstraps))
274276
bound = ceil((bootstrap_size * 0.05) / 2)
275-
print("bootstraps", bootstraps)
276-
print("bound", bound)
277277
ci_low = bootstraps[bound]
278278
ci_high = bootstraps[bootstrap_size - bound]
279279

tests/testing_tests/test_causal_test_engine.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -240,7 +240,7 @@ def test_execute_observational_causal_forest_estimator_cates(self):
240240
self.minimal_adjustment_set,
241241
"C",
242242
self.causal_test_engine.scenario_execution_data_df,
243-
effect_modifiers={Input("M", int): None},
243+
effect_modifiers={"M": None},
244244
)
245245
causal_test_result = self.causal_test_engine.execute_test(
246246
estimation_model, self.causal_test_case, estimate_type="cate"

tests/testing_tests/test_causal_test_outcome.py

Lines changed: 45 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import unittest
2-
from causal_testing.testing.causal_test_outcome import ExactValue, SomeEffect
2+
from causal_testing.testing.causal_test_outcome import ExactValue, SomeEffect, Positive, Negative
33
from causal_testing.testing.causal_test_result import CausalTestResult, TestValue
44
from causal_testing.testing.estimators import LinearRegressionEstimator
55

@@ -63,6 +63,50 @@ def test_empty_adjustment_set(self):
6363
),
6464
)
6565

66+
def test_Positive_pass(self):
67+
test_value = TestValue(type="ate", value=5.05)
68+
ctr = CausalTestResult(
69+
estimator=self.estimator,
70+
test_value=test_value,
71+
confidence_intervals=None,
72+
effect_modifier_configuration=None,
73+
)
74+
ev = Positive()
75+
self.assertTrue(ev.apply(ctr))
76+
77+
def test_Positive_fail(self):
78+
test_value = TestValue(type="ate", value=0)
79+
ctr = CausalTestResult(
80+
estimator=self.estimator,
81+
test_value=test_value,
82+
confidence_intervals=None,
83+
effect_modifier_configuration=None,
84+
)
85+
ev = Positive()
86+
self.assertFalse(ev.apply(ctr))
87+
88+
def test_Negative_pass(self):
89+
test_value = TestValue(type="ate", value=-5.05)
90+
ctr = CausalTestResult(
91+
estimator=self.estimator,
92+
test_value=test_value,
93+
confidence_intervals=None,
94+
effect_modifier_configuration=None,
95+
)
96+
ev = Negative()
97+
self.assertTrue(ev.apply(ctr))
98+
99+
def test_Negative_fail(self):
100+
test_value = TestValue(type="ate", value=0)
101+
ctr = CausalTestResult(
102+
estimator=self.estimator,
103+
test_value=test_value,
104+
confidence_intervals=None,
105+
effect_modifier_configuration=None,
106+
)
107+
ev = Negative()
108+
self.assertFalse(ev.apply(ctr))
109+
66110
def test_exactValue_pass(self):
67111
test_value = TestValue(type="ate", value=5.05)
68112
ctr = CausalTestResult(

tests/testing_tests/test_estimators.py

Lines changed: 25 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -76,40 +76,44 @@ class TestLogisticRegressionEstimator(unittest.TestCase):
7676

7777
@classmethod
7878
def setUpClass(cls) -> None:
79-
cls.scarf_df = pd.DataFrame(
80-
[
81-
{"length_in": 55, "completed": 1},
82-
{"length_in": 55, "completed": 1},
83-
{"length_in": 55, "completed": 1},
84-
{"length_in": 60, "completed": 1},
85-
{"length_in": 60, "completed": 0},
86-
{"length_in": 70, "completed": 1},
87-
{"length_in": 70, "completed": 0},
88-
{"length_in": 82, "completed": 1},
89-
{"length_in": 82, "completed": 0},
90-
{"length_in": 82, "completed": 0},
91-
{"length_in": 82, "completed": 0},
92-
]
93-
)
79+
cls.scarf_df = pd.DataFrame([
80+
{ 'length_in': 55, 'large_gauge': 1, 'color': 'orange', 'completed': 1 },
81+
{ 'length_in': 55, 'large_gauge': 0, 'color': 'orange', 'completed': 1 },
82+
{ 'length_in': 55, 'large_gauge': 0, 'color': 'brown', 'completed': 1 },
83+
{ 'length_in': 60, 'large_gauge': 0, 'color': 'brown', 'completed': 1 },
84+
{ 'length_in': 60, 'large_gauge': 0, 'color': 'grey', 'completed': 0 },
85+
{ 'length_in': 70, 'large_gauge': 0, 'color': 'grey', 'completed': 1 },
86+
{ 'length_in': 70, 'large_gauge': 0, 'color': 'orange', 'completed': 0 },
87+
{ 'length_in': 82, 'large_gauge': 1, 'color': 'grey', 'completed': 1 },
88+
{ 'length_in': 82, 'large_gauge': 0, 'color': 'brown', 'completed': 0 },
89+
{ 'length_in': 82, 'large_gauge': 0, 'color': 'orange', 'completed': 0 },
90+
{ 'length_in': 82, 'large_gauge': 1, 'color': 'brown', 'completed': 0 },
91+
])
9492

9593
def test_ate(self):
96-
df = self.scarf_df
94+
df = self.scarf_df.copy()
9795
logistic_regression_estimator = LogisticRegressionEstimator("length_in", 65, 55, set(), "completed", df)
9896
ate, _ = logistic_regression_estimator.estimate_ate()
9997
self.assertEqual(round(ate, 4), -0.1987)
10098

10199
def test_risk_ratio(self):
102-
df = self.scarf_df
100+
df = self.scarf_df.copy()
103101
logistic_regression_estimator = LogisticRegressionEstimator("length_in", 65, 55, set(), "completed", df)
104102
rr, _ = logistic_regression_estimator.estimate_risk_ratio()
105103
self.assertEqual(round(rr, 4), 0.7664)
106104

107105
def test_odds_ratio(self):
108-
df = self.scarf_df
106+
df = self.scarf_df.copy()
109107
logistic_regression_estimator = LogisticRegressionEstimator("length_in", 65, 55, set(), "completed", df)
110108
odds = logistic_regression_estimator.estimate_unit_odds_ratio()
111109
self.assertEqual(round(odds, 4), 0.8948)
112110

111+
def test_ate_effect_modifiers(self):
112+
df = self.scarf_df.copy()
113+
logistic_regression_estimator = LogisticRegressionEstimator("length_in", 65, 55, set(), "completed", df, effect_modifiers={"large_gauge": 0})
114+
ate, _ = logistic_regression_estimator.estimate_ate()
115+
self.assertEqual(round(ate, 4), -0.3388)
116+
113117

114118
class TestInstrumentalVariableEstimator(unittest.TestCase):
115119
"""
@@ -368,7 +372,7 @@ def test_program_15_ate(self):
368372
"smokeyrs",
369373
}
370374
causal_forest = CausalForestEstimator(
371-
"qsmk", 1, 0, covariates, "wt82_71", df, {Input("smokeintensity", int): 40}
375+
"qsmk", 1, 0, covariates, "wt82_71", df, {"smokeintensity": 40}
372376
)
373377
ate, _ = causal_forest.estimate_ate()
374378
self.assertGreater(round(ate, 1), 2.5)
@@ -395,7 +399,7 @@ def test_program_15_cate(self):
395399
"smokeyrs",
396400
}
397401
causal_forest = CausalForestEstimator(
398-
"qsmk", 1, 0, covariates, "wt82_71", smoking_intensity_5_and_40_df, {Input("smokeintensity", int): 40}
402+
"qsmk", 1, 0, covariates, "wt82_71", smoking_intensity_5_and_40_df, {"smokeintensity": 40}
399403
)
400404
cates_df, _ = causal_forest.estimate_cates()
401405
self.assertGreater(cates_df["cate"].mean(), 0)
@@ -415,7 +419,7 @@ def test_X1_effect(self):
415419
"""When we fix the value of X2 to 0, the effect of X1 on Y should become ~2 (because X2 terms are cancelled)."""
416420
x2 = Input("X2", float)
417421
lr_model = LinearRegressionEstimator(
418-
"X1", 1, 0, {"X2"}, "Y", effect_modifiers={x2: 0}, formula="Y ~ X1 + X2 + (X1 * X2)", df=self.df
422+
"X1", 1, 0, {"X2"}, "Y", effect_modifiers={x2.name: 0}, formula="Y ~ X1 + X2 + (X1 * X2)", df=self.df
419423
)
420424
test_results = lr_model.estimate_ate()
421425
ate = test_results[0]

0 commit comments

Comments
 (0)