@@ -162,7 +162,7 @@ To install from source, see [For Developers](#for-developers) section below.
162162 <summary >Orthogonal Random Forests (click to expand)</summary >
163163
164164 ``` Python
165- from econml.ortho_forest import DMLOrthoForest, DROrthoForest
165+ from econml.orf import DMLOrthoForest, DROrthoForest
166166 from econml.sklearn_extensions.linear_model import WeightedLasso, WeightedLassoCV
167167 # Use defaults
168168 est = DMLOrthoForest()
@@ -233,7 +233,7 @@ To install from source, see [For Developers](#for-developers) section below.
233233* Linear final stage
234234
235235``` Python
236- from econml.drlearner import LinearDRLearner
236+ from econml.dr import LinearDRLearner
237237from sklearn.ensemble import GradientBoostingRegressor, GradientBoostingClassifier
238238
239239est = LinearDRLearner(model_propensity = GradientBoostingClassifier(),
@@ -246,7 +246,7 @@ lb, ub = est.effect_interval(X_test, alpha=0.05)
246246* Sparse linear final stage
247247
248248``` Python
249- from econml.drlearner import SparseLinearDRLearner
249+ from econml.dr import SparseLinearDRLearner
250250from sklearn.ensemble import GradientBoostingRegressor, GradientBoostingClassifier
251251
252252est = SparseLinearDRLearner(model_propensity = GradientBoostingClassifier(),
@@ -259,7 +259,7 @@ lb, ub = est.effect_interval(X_test, alpha=0.05)
259259* Nonparametric final stage
260260
261261``` Python
262- from econml.drlearner import ForestDRLearner
262+ from econml.dr import ForestDRLearner
263263from sklearn.ensemble import GradientBoostingRegressor, GradientBoostingClassifier
264264
265265est = ForestDRLearner(model_propensity = GradientBoostingClassifier(),
@@ -276,7 +276,7 @@ lb, ub = est.effect_interval(X_test, alpha=0.05)
276276* Intent to Treat Doubly Robust Learner (discrete instrument, discrete treatment)
277277
278278``` Python
279- from econml.ortho_iv import LinearIntentToTreatDRIV
279+ from econml.iv.dr import LinearIntentToTreatDRIV
280280from sklearn.ensemble import GradientBoostingRegressor, GradientBoostingClassifier
281281from sklearn.linear_model import LinearRegression
282282
@@ -295,7 +295,7 @@ lb, ub = est.effect_interval(X_test, alpha=0.05) # OLS confidence intervals
295295
296296``` Python
297297import keras
298- from econml.deepiv import DeepIVEstimator
298+ from econml.iv.nnet import DeepIV
299299
300300treatment_model = keras.Sequential([keras.layers.Dense(128 , activation = ' relu' , input_shape = (2 ,)),
301301 keras.layers.Dropout(0.17 ),
@@ -310,11 +310,11 @@ response_model = keras.Sequential([keras.layers.Dense(128, activation='relu', in
310310 keras.layers.Dense(32 , activation = ' relu' ),
311311 keras.layers.Dropout(0.17 ),
312312 keras.layers.Dense(1 )])
313- est = DeepIVEstimator (n_components = 10 , # Number of gaussians in the mixture density networks)
314- m = lambda z , x : treatment_model(keras.layers.concatenate([z, x])), # Treatment model
315- h = lambda t , x : response_model(keras.layers.concatenate([t, x])), # Response model
316- n_samples = 1 # Number of samples used to estimate the response
317- )
313+ est = DeepIV (n_components = 10 , # Number of gaussians in the mixture density networks)
314+ m = lambda z , x : treatment_model(keras.layers.concatenate([z, x])), # Treatment model
315+ h = lambda t , x : response_model(keras.layers.concatenate([t, x])), # Response model
316+ n_samples = 1 # Number of samples used to estimate the response
317+ )
318318est.fit(Y, T, X = X, Z = Z) # Z -> instrumental variables
319319treatment_effects = est.effect(X_test)
320320```
0 commit comments