@@ -172,6 +172,18 @@ def d2logp(self, vars=None):
172
172
"""Compiled log probability density hessian function"""
173
173
return self .model .fn (hessian (self .logpt , vars ))
174
174
175
+ @property
176
+ def logp_nojac (self ):
177
+ return self .model .fn (self .logp_nojact )
178
+
179
+ def dlogp_nojac (self , vars = None ):
180
+ """Compiled log density gradient function, without jacobian terms."""
181
+ return self .model .fn (gradient (self .logp_nojact , vars ))
182
+
183
+ def d2logp_nojac (self , vars = None ):
184
+ """Compiled log density hessian function, without jacobian terms."""
185
+ return self .model .fn (hessian (self .logp_nojact , vars ))
186
+
175
187
@property
176
188
def fastlogp (self ):
177
189
"""Compiled log probability density function"""
@@ -185,13 +197,36 @@ def fastd2logp(self, vars=None):
185
197
"""Compiled log probability density hessian function"""
186
198
return self .model .fastfn (hessian (self .logpt , vars ))
187
199
200
+ @property
201
+ def fastlogp_nojac (self ):
202
+ return self .model .fastfn (self .logp_nojact )
203
+
204
+ def fastdlogp_nojac (self , vars = None ):
205
+ """Compiled log density gradient function, without jacobian terms."""
206
+ return self .model .fastfn (gradient (self .logp_nojact , vars ))
207
+
208
+ def fastd2logp_nojac (self , vars = None ):
209
+ """Compiled log density hessian function, without jacobian terms."""
210
+ return self .model .fastfn (hessian (self .logp_nojact , vars ))
211
+
188
212
@property
189
213
def logpt (self ):
190
214
"""Theano scalar of log-probability of the model"""
191
215
if getattr (self , 'total_size' , None ) is not None :
192
- logp = tt .sum (self .logp_elemwiset ) * self .scaling
216
+ logp = self .logp_sum_unscaledt * self .scaling
217
+ else :
218
+ logp = self .logp_sum_unscaledt
219
+ if self .name is not None :
220
+ logp .name = '__logp_%s' % self .name
221
+ return logp
222
+
223
+ @property
224
+ def logp_nojact (self ):
225
+ """Theano scalar of log-probability, excluding jacobian terms."""
226
+ if getattr (self , 'total_size' , None ) is not None :
227
+ logp = tt .sum (self .logp_nojac_unscaledt ) * self .scaling
193
228
else :
194
- logp = tt .sum (self .logp_elemwiset )
229
+ logp = tt .sum (self .logp_nojac_unscaledt )
195
230
if self .name is not None :
196
231
logp .name = '__logp_%s' % self .name
197
232
return logp
@@ -626,9 +661,26 @@ def logp_dlogp_function(self, grad_vars=None, **kwargs):
626
661
def logpt (self ):
627
662
"""Theano scalar of log-probability of the model"""
628
663
with self :
629
- factors = [var .logpt for var in self .basic_RVs ] + self .potentials
630
- logp = tt .add (* map (tt .sum , factors ))
631
- logp .name = '__logp'
664
+ factors = [var .logpt for var in self .basic_RVs ]
665
+ logp_factors = tt .sum (factors )
666
+ logp_potentials = tt .sum ([tt .sum (pot ) for pot in self .potentials ])
667
+ logp = logp_factors + logp_potentials
668
+ if self .name :
669
+ logp .name = '__logp_%s' % self .name
670
+ else :
671
+ logp .name = '__logp'
672
+ return logp
673
+
674
+ @property
675
+ def logp_nojact (self ):
676
+ """Theano scalar of log-probability of the model"""
677
+ with self :
678
+ factors = [var .logp_nojact for var in self .basic_RVs ] + self .potentials
679
+ logp = tt .sum ([tt .sum (factor ) for factor in factors ])
680
+ if self .name :
681
+ logp .name = '__logp_nojac_%s' % self .name
682
+ else :
683
+ logp .name = '__logp_nojac'
632
684
return logp
633
685
634
686
@property
@@ -637,7 +689,7 @@ def varlogpt(self):
637
689
(excluding deterministic)."""
638
690
with self :
639
691
factors = [var .logpt for var in self .vars ]
640
- return tt .add ( * map ( tt . sum , factors ) )
692
+ return tt .sum ( factors )
641
693
642
694
@property
643
695
def vars (self ):
@@ -1069,6 +1121,10 @@ def __init__(self, type=None, owner=None, index=None, name=None,
1069
1121
self .tag .test_value = np .ones (
1070
1122
distribution .shape , distribution .dtype ) * distribution .default ()
1071
1123
self .logp_elemwiset = distribution .logp (self )
1124
+ # The logp might need scaling in minibatches.
1125
+ # This is done in `Factor`.
1126
+ self .logp_sum_unscaledt = distribution .logp_sum (self )
1127
+ self .logp_nojac_unscaledt = distribution .logp_nojac (self )
1072
1128
self .total_size = total_size
1073
1129
self .model = model
1074
1130
self .scaling = _get_scaling (total_size , self .shape , self .ndim )
@@ -1172,6 +1228,10 @@ def __init__(self, type=None, owner=None, index=None, name=None, data=None,
1172
1228
1173
1229
self .missing_values = data .missing_values
1174
1230
self .logp_elemwiset = distribution .logp (data )
1231
+ # The logp might need scaling in minibatches.
1232
+ # This is done in `Factor`.
1233
+ self .logp_sum_unscaledt = distribution .logp_sum (data )
1234
+ self .logp_nojac_unscaledt = distribution .logp_nojac (data )
1175
1235
self .total_size = total_size
1176
1236
self .model = model
1177
1237
self .distribution = distribution
@@ -1223,6 +1283,10 @@ def __init__(self, name, data, distribution, total_size=None, model=None):
1223
1283
self .missing_values = [datum .missing_values for datum in self .data .values ()
1224
1284
if datum .missing_values is not None ]
1225
1285
self .logp_elemwiset = distribution .logp (** self .data )
1286
+ # The logp might need scaling in minibatches.
1287
+ # This is done in `Factor`.
1288
+ self .logp_sum_unscaledt = distribution .logp_sum (** self .data )
1289
+ self .logp_nojac_unscaledt = distribution .logp_nojac (** self .data )
1226
1290
self .total_size = total_size
1227
1291
self .model = model
1228
1292
self .distribution = distribution
0 commit comments