@@ -1494,7 +1494,9 @@ class InversePropensityWeighting(ExperimentalDesign, PropensityDataValidator):
1494
1494
A string denoting the outcome variable in datq to be reweighted
1495
1495
:param weighting_scheme:
1496
1496
A string denoting which weighting scheme to use among: 'raw', 'robust',
1497
- 'doubly robust'
1497
+ 'doubly robust' or 'overlap'. See Aronow and Miller "Foundations
1498
+ of Agnostic Statistics" for discussion and computation of these
1499
+ weighting schemes.
1498
1500
:param model:
1499
1501
A PyMC model
1500
1502
@@ -1548,6 +1550,9 @@ def __init__(
1548
1550
self .model .fit (X = self .X , t = self .t , coords = COORDS )
1549
1551
1550
1552
def make_robust_adjustments (self , ps ):
1553
+ """ This estimator is discussed in Aronow
1554
+ and Miller's book as being related to the
1555
+ Horvitz Thompson method """
1551
1556
X = pd .DataFrame (self .X , columns = self .labels )
1552
1557
X ["ps" ] = ps
1553
1558
X [self .outcome_variable ] = self .y
@@ -1565,6 +1570,9 @@ def make_robust_adjustments(self, ps):
1565
1570
return weighted_outcome0 , weighted_outcome1 , n_ntrt , n_trt
1566
1571
1567
1572
def make_raw_adjustments (self , ps ):
1573
+ """ This estimator is discussed in Aronow and
1574
+ Miller as the simplest of base form of
1575
+ inverse propensity weighting schemes"""
1568
1576
X = pd .DataFrame (self .X , columns = self .labels )
1569
1577
X ["ps" ] = ps
1570
1578
X [self .outcome_variable ] = self .y
@@ -1581,6 +1589,10 @@ def make_raw_adjustments(self, ps):
1581
1589
return weighted_outcome0 , weighted_outcome1 , n_ntrt , n_trt
1582
1590
1583
1591
def make_overlap_adjustments (self , ps ):
1592
+ """This weighting scheme was adapted from
1593
+ Lucy D’Agostino McGowan's blog on
1594
+ Propensity Score Weights referenced in
1595
+ the primary CausalPy explainer notebook"""
1584
1596
X = pd .DataFrame (self .X , columns = self .labels )
1585
1597
X ["ps" ] = ps
1586
1598
X [self .outcome_variable ] = self .y
@@ -1597,6 +1609,12 @@ def make_overlap_adjustments(self, ps):
1597
1609
return weighted_outcome0 , weighted_outcome1 , n_ntrt , n_trt
1598
1610
1599
1611
def make_doubly_robust_adjustment (self , ps ):
1612
+ """ The doubly robust weighting scheme is also
1613
+ discussed in Aronow and Miller, but a bit more generally
1614
+ than our implementation here. Here we have specified
1615
+ the outcome model to be a simple OLS model.
1616
+ In this way the compromise between the outcome model and
1617
+ the propensity model is always done with OLS."""
1600
1618
X = pd .DataFrame (self .X , columns = self .labels )
1601
1619
X ["ps" ] = ps
1602
1620
t = self .t .flatten ()
@@ -1722,8 +1740,9 @@ def make_hists(idata, i, axs, method=method):
1722
1740
0.9 , linestyle = "--" , label = "Hi Extreme Propensity Scores" , color = "black"
1723
1741
)
1724
1742
axs [0 ].set_title (
1725
- "Draws from the Posterior \n Propensity Scores Distribution" , fontsize = 20
1743
+ "Weighted and Unweighted Draws from the Posterior \n Propensity Scores Distribution" , fontsize = 20
1726
1744
)
1745
+ axs [0 ].set_ylabel ("Counts of Observations" )
1727
1746
axs [0 ].set_xlabel ("Propensity Scores" )
1728
1747
custom_lines = [
1729
1748
Line2D ([0 ], [0 ], color = "skyblue" , lw = 2 ),
0 commit comments