@@ -206,8 +206,8 @@ function DynamicPPL.initialstep(
206
206
end
207
207
theta = vi[:]
208
208
209
- # Cache current log density.
210
- log_density_old = getloglikelihood (vi)
209
+ # Cache current log density. We will reuse this if the transition is rejected.
210
+ logp_old = DynamicPPL . getlogp (vi)
211
211
212
212
# Find good eps if not provided one
213
213
if iszero (spl. alg. ϵ)
@@ -232,15 +232,21 @@ function DynamicPPL.initialstep(
232
232
)
233
233
end
234
234
235
- # Update `vi` based on acceptance
235
+ # Update VarInfo based on acceptance
236
236
if t. stat. is_accept
237
237
vi = DynamicPPL. unflatten (vi, t. z. θ)
238
- # TODO (mhauru) Is setloglikelihood! the right thing here?
239
- vi = setloglikelihood!! (vi, t. stat. log_density)
238
+ # Re-evaluate to calculate log probability density.
239
+ # TODO (penelopeysm): This seems a little bit wasteful. The need for
240
+ # this stems from the fact that the HMC sampler doesn't keep track of
241
+ # prior and likelihood separately but rather a single log-joint, for
242
+ # which we have no way to decompose this back into prior and
243
+ # likelihood. I don't immediately see how to solve this without
244
+ # re-evaluating the model.
245
+ vi = DynamicPPL. evaluate!! (model, vi)
240
246
else
247
+ # Reset VarInfo back to its original state.
241
248
vi = DynamicPPL. unflatten (vi, theta)
242
- # TODO (mhauru) Is setloglikelihood! the right thing here?
243
- vi = setloglikelihood!! (vi, log_density_old)
249
+ vi = DynamicPPL. setlogp!! (vi, logp_old)
244
250
end
245
251
246
252
transition = Transition (model, vi, t)
0 commit comments