-
Notifications
You must be signed in to change notification settings - Fork 111
Expand file tree
/
Copy pathdid.py
More file actions
559 lines (482 loc) · 21.4 KB
/
did.py
File metadata and controls
559 lines (482 loc) · 21.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
import warnings
import numpy as np
from sklearn.utils import check_X_y
from sklearn.utils.multiclass import type_of_target
from doubleml.data.did_data import DoubleMLDIDData
from doubleml.double_ml import DoubleML
from doubleml.double_ml_score_mixins import LinearScoreMixin
from doubleml.utils._checks import _check_finite_predictions, _check_is_propensity, _check_score
from doubleml.utils._estimation import _dml_cv_predict, _dml_tune, _get_cond_smpls
# TODO: Remove DoubleMLDIDData with version 0.12.0
class DoubleMLDID(LinearScoreMixin, DoubleML):
"""Double machine learning for difference-in-differences models with panel data (two time periods).
Parameters
----------
obj_dml_data : :class:`DoubleMLDIDData` object
The :class:`DoubleMLDIDData` object providing the data and specifying the variables for the causal model.
ml_g : estimator implementing ``fit()`` and ``predict()``
A machine learner implementing ``fit()`` and ``predict()`` methods (e.g.
:py:class:`sklearn.ensemble.RandomForestRegressor`) for the nuisance function :math:`g_0(d,X) = E[Y_1-Y_0|D=d, X]`.
For a binary outcome variable :math:`Y` (with values 0 and 1), a classifier implementing ``fit()`` and
``predict_proba()`` can also be specified. If :py:func:`sklearn.base.is_classifier` returns ``True``,
``predict_proba()`` is used otherwise ``predict()``.
ml_m : classifier implementing ``fit()`` and ``predict_proba()``
A machine learner implementing ``fit()`` and ``predict_proba()`` methods (e.g.
:py:class:`sklearn.ensemble.RandomForestClassifier`) for the nuisance function :math:`m_0(X) = E[D=1|X]`.
Only relevant for ``score='observational'``.
n_folds : int
Number of folds.
Default is ``5``.
n_rep : int
Number of repetitions for the sample splitting.
Default is ``1``.
score : str
A str (``'observational'`` or ``'experimental'``) specifying the score function.
The ``'experimental'`` scores refers to an A/B setting, where the treatment is independent
from the pretreatment covariates.
Default is ``'observational'``.
in_sample_normalization : bool
Indicates whether to use a slightly different normalization from Sant'Anna and Zhao (2020).
Default is ``True``.
clipping_threshold : float
The threshold used for clipping.
Default is ``1e-2``.
draw_sample_splitting : bool
Indicates whether the sample splitting should be drawn during initialization of the object.
Default is ``True``.
Examples
--------
>>> import numpy as np
>>> import doubleml as dml
>>> from doubleml.did.datasets import make_did_SZ2020
>>> from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
>>> np.random.seed(42)
>>> ml_g = RandomForestRegressor(n_estimators=100, max_depth=5, min_samples_leaf=5)
>>> ml_m = RandomForestClassifier(n_estimators=100, max_depth=5, min_samples_leaf=5)
>>> data = make_did_SZ2020(n_obs=500, return_type='DataFrame')
>>> obj_dml_data = dml.DoubleMLDIDData(data, 'y', 'd')
>>> dml_did_obj = dml.DoubleMLDID(obj_dml_data, ml_g, ml_m)
>>> dml_did_obj.fit().summary
coef std err t P>|t| 2.5 % 97.5 %
d -2.840718 1.760386 -1.613691 0.106595 -6.291011 0.609575
"""
def __init__(
self,
obj_dml_data,
ml_g,
ml_m=None,
n_folds=5,
n_rep=1,
score="observational",
in_sample_normalization=True,
clipping_threshold=1e-2,
draw_sample_splitting=True,
):
warnings.warn(
"DoubleMLDID is deprecated and will be removed with version 0.12.0. " "Please use DoubleMLDIDBinary instead.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(obj_dml_data, n_folds, n_rep, score, draw_sample_splitting)
self._check_data(self._dml_data)
valid_scores = ["observational", "experimental"]
_check_score(self.score, valid_scores, allow_callable=False)
self._in_sample_normalization = in_sample_normalization
if not isinstance(self.in_sample_normalization, bool):
raise TypeError(
"in_sample_normalization indicator has to be boolean. "
+ f"Object of type {str(type(self.in_sample_normalization))} passed."
)
# set stratication for resampling
self._strata = self._dml_data.d
if draw_sample_splitting:
self.draw_sample_splitting()
# check learners
ml_g_is_classifier = self._check_learner(ml_g, "ml_g", regressor=True, classifier=True)
if self.score == "observational":
_ = self._check_learner(ml_m, "ml_m", regressor=False, classifier=True)
self._learner = {"ml_g": ml_g, "ml_m": ml_m}
else:
assert self.score == "experimental"
if ml_m is not None:
warnings.warn(
(
'A learner ml_m has been provided for score = "experimental" but will be ignored. '
"A learner ml_m is not required for estimation."
)
)
self._learner = {"ml_g": ml_g}
if ml_g_is_classifier:
if obj_dml_data.binary_outcome:
self._predict_method = {"ml_g": "predict_proba"}
else:
raise ValueError(
f"The ml_g learner {str(ml_g)} was identified as classifier "
"but the outcome variable is not binary with values 0 and 1."
)
else:
self._predict_method = {"ml_g": "predict"}
if "ml_m" in self._learner:
self._predict_method["ml_m"] = "predict_proba"
self._initialize_ml_nuisance_params()
self._clipping_threshold = clipping_threshold
self._sensitivity_implemented = True
self._external_predictions_implemented = True
@property
def in_sample_normalization(self):
"""
Indicates whether the in sample normalization of weights are used.
"""
return self._in_sample_normalization
@property
def clipping_threshold(self):
"""
Specifies the used clipping threshold.
"""
return self._clipping_threshold
def _initialize_ml_nuisance_params(self):
if self.score == "observational":
valid_learner = ["ml_g0", "ml_g1", "ml_m"]
else:
assert self.score == "experimental"
valid_learner = ["ml_g0", "ml_g1"]
self._params = {learner: {key: [None] * self.n_rep for key in self._dml_data.d_cols} for learner in valid_learner}
def _check_data(self, obj_dml_data):
if not isinstance(obj_dml_data, DoubleMLDIDData):
raise TypeError(
"For repeated outcomes the data must be of DoubleMLDIDData type. "
f"{str(obj_dml_data)} of type {str(type(obj_dml_data))} was passed."
)
if obj_dml_data.z_cols is not None:
raise ValueError(
"Incompatible data. " + " and ".join(obj_dml_data.z_cols) + " have been set as instrumental variable(s). "
"At the moment there are not DiD models with instruments implemented."
)
one_treat = obj_dml_data.n_treat == 1
binary_treat = type_of_target(obj_dml_data.d) == "binary"
zero_one_treat = np.all((np.power(obj_dml_data.d, 2) - obj_dml_data.d) == 0)
if not (one_treat & binary_treat & zero_one_treat):
raise ValueError(
"Incompatible data. "
"To fit an DID model with DML "
"exactly one binary variable with values 0 and 1 "
"needs to be specified as treatment variable."
)
return
def _nuisance_est(self, smpls, n_jobs_cv, external_predictions, return_models=False):
x, y = check_X_y(self._dml_data.x, self._dml_data.y, ensure_all_finite=False)
x, d = check_X_y(x, self._dml_data.d, ensure_all_finite=False)
# nuisance g
# get train indices for d == 0
smpls_d0, smpls_d1 = _get_cond_smpls(smpls, d)
# nuisance g for d==0
if external_predictions["ml_g0"] is not None:
ml_g0_targets = np.full_like(y, np.nan, dtype="float64")
ml_g0_targets[d == 0] = y[d == 0]
g_hat0 = {"preds": external_predictions["ml_g0"], "targets": ml_g0_targets, "models": None}
else:
g_hat0 = _dml_cv_predict(
self._learner["ml_g"],
x,
y,
smpls=smpls_d0,
n_jobs=n_jobs_cv,
est_params=self._get_params("ml_g0"),
method=self._predict_method["ml_g"],
return_models=return_models,
)
_check_finite_predictions(g_hat0["preds"], self._learner["ml_g"], "ml_g", smpls)
# adjust target values to consider only compatible subsamples
g_hat0["targets"] = g_hat0["targets"].astype(float)
g_hat0["targets"][d == 1] = np.nan
# nuisance g for d==1
if external_predictions["ml_g1"] is not None:
ml_g1_targets = np.full_like(y, np.nan, dtype="float64")
ml_g1_targets[d == 1] = y[d == 1]
g_hat1 = {"preds": external_predictions["ml_g1"], "targets": ml_g1_targets, "models": None}
else:
g_hat1 = _dml_cv_predict(
self._learner["ml_g"],
x,
y,
smpls=smpls_d1,
n_jobs=n_jobs_cv,
est_params=self._get_params("ml_g1"),
method=self._predict_method["ml_g"],
return_models=return_models,
)
_check_finite_predictions(g_hat1["preds"], self._learner["ml_g"], "ml_g", smpls)
# adjust target values to consider only compatible subsamples
g_hat1["targets"] = g_hat1["targets"].astype(float)
g_hat1["targets"][d == 0] = np.nan
# only relevant for observational setting
m_hat = {"preds": None, "targets": None, "models": None}
if self.score == "observational":
# nuisance m
if external_predictions["ml_m"] is not None:
m_hat = {"preds": external_predictions["ml_m"], "targets": d, "models": None}
else:
m_hat = _dml_cv_predict(
self._learner["ml_m"],
x,
d,
smpls=smpls,
n_jobs=n_jobs_cv,
est_params=self._get_params("ml_m"),
method=self._predict_method["ml_m"],
return_models=return_models,
)
_check_finite_predictions(m_hat["preds"], self._learner["ml_m"], "ml_m", smpls)
_check_is_propensity(m_hat["preds"], self._learner["ml_m"], "ml_m", smpls, eps=1e-12)
m_hat["preds"] = np.clip(m_hat["preds"], self.clipping_threshold, 1 - self.clipping_threshold)
# nuisance estimates of the uncond. treatment prob.
p_hat = np.full_like(d, d.mean(), dtype="float64")
psi_a, psi_b = self._score_elements(y, d, g_hat0["preds"], g_hat1["preds"], m_hat["preds"], p_hat)
psi_elements = {"psi_a": psi_a, "psi_b": psi_b}
preds = {
"predictions": {"ml_g0": g_hat0["preds"], "ml_g1": g_hat1["preds"], "ml_m": m_hat["preds"]},
"targets": {"ml_g0": g_hat0["targets"], "ml_g1": g_hat1["targets"], "ml_m": m_hat["targets"]},
"models": {"ml_g0": g_hat0["models"], "ml_g1": g_hat1["models"], "ml_m": m_hat["models"]},
}
return psi_elements, preds
def _score_elements(self, y, d, g_hat0, g_hat1, m_hat, p_hat):
# calc residuals
resid_d0 = y - g_hat0
if self.score == "observational":
if self.in_sample_normalization:
weight_psi_a = np.divide(d, np.mean(d))
propensity_weight = np.multiply(1.0 - d, np.divide(m_hat, 1.0 - m_hat))
weight_resid_d0 = np.divide(d, np.mean(d)) - np.divide(propensity_weight, np.mean(propensity_weight))
else:
weight_psi_a = np.divide(d, p_hat)
weight_resid_d0 = np.divide(d - m_hat, np.multiply(p_hat, 1.0 - m_hat))
psi_b_1 = np.zeros_like(y)
else:
assert self.score == "experimental"
if self.in_sample_normalization:
weight_psi_a = np.ones_like(y)
weight_g0 = np.divide(d, np.mean(d)) - 1.0
weight_g1 = 1.0 - np.divide(d, np.mean(d))
weight_resid_d0 = np.divide(d, np.mean(d)) - np.divide(1.0 - d, np.mean(1.0 - d))
else:
weight_psi_a = np.ones_like(y)
weight_g0 = np.divide(d, p_hat) - 1.0
weight_g1 = 1.0 - np.divide(d, p_hat)
weight_resid_d0 = np.divide(d - p_hat, np.multiply(p_hat, 1.0 - p_hat))
psi_b_1 = np.multiply(weight_g0, g_hat0) + np.multiply(weight_g1, g_hat1)
# set score elements
psi_a = -1.0 * weight_psi_a
psi_b = psi_b_1 + np.multiply(weight_resid_d0, resid_d0)
return psi_a, psi_b
def _sensitivity_element_est(self, preds):
y = self._dml_data.y
d = self._dml_data.d
m_hat = preds["predictions"]["ml_m"]
g_hat0 = preds["predictions"]["ml_g0"]
g_hat1 = preds["predictions"]["ml_g1"]
g_hat = np.multiply(d, g_hat1) + np.multiply(1.0 - d, g_hat0)
sigma2_score_element = np.square(y - g_hat)
sigma2 = np.mean(sigma2_score_element)
psi_sigma2 = sigma2_score_element - sigma2
# calc m(W,alpha) and Riesz representer
p_hat = np.mean(d)
if self.score == "observational":
propensity_weight_d0 = np.divide(m_hat, 1.0 - m_hat)
if self.in_sample_normalization:
weight_d0 = np.multiply(1.0 - d, propensity_weight_d0)
mean_weight_d0 = np.mean(weight_d0)
m_alpha = np.multiply(
np.divide(d, p_hat), np.divide(1.0, p_hat) + np.divide(propensity_weight_d0, mean_weight_d0)
)
rr = np.divide(d, p_hat) - np.divide(weight_d0, mean_weight_d0)
else:
m_alpha = np.multiply(np.divide(d, np.square(p_hat)), (1.0 + propensity_weight_d0))
rr = np.divide(d, p_hat) - np.multiply(np.divide(1.0 - d, p_hat), propensity_weight_d0)
else:
assert self.score == "experimental"
# the same with or without self-normalization
m_alpha = np.divide(1.0, p_hat) + np.divide(1.0, 1.0 - p_hat)
rr = np.divide(d, p_hat) - np.divide(1.0 - d, 1.0 - p_hat)
nu2_score_element = np.multiply(2.0, m_alpha) - np.square(rr)
nu2 = np.mean(nu2_score_element)
psi_nu2 = nu2_score_element - nu2
element_dict = {
"sigma2": sigma2,
"nu2": nu2,
"psi_sigma2": psi_sigma2,
"psi_nu2": psi_nu2,
"riesz_rep": rr,
}
return element_dict
def _nuisance_tuning(
self,
smpls,
param_grids,
scoring_methods,
n_folds_tune,
n_jobs_cv,
search_mode,
n_iter_randomized_search,
):
x, y = check_X_y(self._dml_data.x, self._dml_data.y, ensure_all_finite=False)
x, d = check_X_y(x, self._dml_data.d, ensure_all_finite=False)
# get train indices for d == 0 and d == 1
smpls_d0, smpls_d1 = _get_cond_smpls(smpls, d)
if scoring_methods is None:
scoring_methods = {"ml_g": None, "ml_m": None}
train_inds = [train_index for (train_index, _) in smpls]
train_inds_d0 = [train_index for (train_index, _) in smpls_d0]
train_inds_d1 = [train_index for (train_index, _) in smpls_d1]
g0_tune_res = _dml_tune(
y,
x,
train_inds_d0,
self._learner["ml_g"],
param_grids["ml_g"],
scoring_methods["ml_g"],
n_folds_tune,
n_jobs_cv,
search_mode,
n_iter_randomized_search,
learner_name="ml_g",
)
g1_tune_res = _dml_tune(
y,
x,
train_inds_d1,
self._learner["ml_g"],
param_grids["ml_g"],
scoring_methods["ml_g"],
n_folds_tune,
n_jobs_cv,
search_mode,
n_iter_randomized_search,
learner_name="ml_g",
)
g0_best_params = [xx.best_params_ for xx in g0_tune_res]
g1_best_params = [xx.best_params_ for xx in g1_tune_res]
if self.score == "observational":
m_tune_res = _dml_tune(
d,
x,
train_inds,
self._learner["ml_m"],
param_grids["ml_m"],
scoring_methods["ml_m"],
n_folds_tune,
n_jobs_cv,
search_mode,
n_iter_randomized_search,
learner_name="ml_m",
)
m_best_params = [xx.best_params_ for xx in m_tune_res]
params = {"ml_g0": g0_best_params, "ml_g1": g1_best_params, "ml_m": m_best_params}
tune_res = {"g0_tune": g0_tune_res, "g1_tune": g1_tune_res, "m_tune": m_tune_res}
else:
params = {"ml_g0": g0_best_params, "ml_g1": g1_best_params}
tune_res = {"g0_tune": g0_tune_res, "g1_tune": g1_tune_res}
res = {"params": params, "tune_res": tune_res}
return res
def _nuisance_tuning_optuna(
self,
param_grids,
scoring_methods,
n_folds_tune,
n_jobs_cv,
optuna_settings,
):
"""
Optuna-based hyperparameter tuning for DID nuisance models.
Performs tuning once on the whole dataset using cross-validation,
returning the same optimal parameters for all folds.
"""
from ..utils._tune_optuna import _dml_tune_optuna
x, y = check_X_y(self._dml_data.x, self._dml_data.y, force_all_finite=False)
x, d = check_X_y(x, self._dml_data.d, force_all_finite=False)
if scoring_methods is None:
if self.score == "observational":
scoring_methods = {"ml_g0": None, "ml_g1": None, "ml_m": None}
else:
scoring_methods = {"ml_g0": None, "ml_g1": None}
# Separate data by treatment status for conditional mean tuning
mask_d0 = d == 0
mask_d1 = d == 1
x_d0 = x[mask_d0, :]
y_d0 = y[mask_d0]
train_inds_d0 = [np.arange(x_d0.shape[0])]
g0_tune_res = _dml_tune_optuna(
y_d0,
x_d0,
train_inds_d0,
self._learner["ml_g"],
param_grids["ml_g0"],
scoring_methods["ml_g0"],
n_folds_tune,
n_jobs_cv,
optuna_settings,
learner_name="ml_g0",
)
x_d1 = x[mask_d1, :]
y_d1 = y[mask_d1]
train_inds_d1 = [np.arange(x_d1.shape[0])]
g1_tune_res = _dml_tune_optuna(
y_d1,
x_d1,
train_inds_d1,
self._learner["ml_g"],
param_grids["ml_g1"],
scoring_methods["ml_g1"],
n_folds_tune,
n_jobs_cv,
optuna_settings,
learner_name="ml_g1",
)
# Tune propensity score on full dataset for observational score
full_train_inds = [np.arange(x.shape[0])]
m_tune_res = None
if self.score == "observational":
m_tune_res = _dml_tune_optuna(
d,
x,
full_train_inds,
self._learner["ml_m"],
param_grids["ml_m"],
scoring_methods["ml_m"],
n_folds_tune,
n_jobs_cv,
optuna_settings,
learner_name="ml_m",
)
g0_best_params = g0_tune_res.best_params_
g1_best_params = g1_tune_res.best_params_
if self.score == "observational":
m_best_params = m_tune_res.best_params_
params = {"ml_g0": g0_best_params, "ml_g1": g1_best_params, "ml_m": m_best_params}
tune_res = {"g0_tune": g0_tune_res, "g1_tune": g1_tune_res, "m_tune": m_tune_res}
else:
params = {"ml_g0": g0_best_params, "ml_g1": g1_best_params}
tune_res = {"g0_tune": g0_tune_res, "g1_tune": g1_tune_res}
return {"params": params, "tune_res": tune_res}
def sensitivity_benchmark(self, benchmarking_set, fit_args=None):
"""
Computes a benchmark for a given set of features.
Returns a DataFrame containing the corresponding values for cf_y, cf_d, rho and the change in estimates.
Parameters
----------
benchmarking_set : list
List of features to be used for benchmarking.
fit_args : dict, optional
Additional arguments for the fit method.
Default is None.
Returns
-------
benchmark_results : pandas.DataFrame
Benchmark results.
"""
if self.score == "experimental":
warnings.warn(
"Sensitivity benchmarking for experimental score may not be meaningful. "
"Consider using score='observational' for conditional treatment assignment.",
UserWarning,
)
return super().sensitivity_benchmark(benchmarking_set, fit_args)