@@ -236,12 +236,12 @@ function DynamicPPL.initialstep(
236
236
if t. stat. is_accept
237
237
vi = DynamicPPL. unflatten (vi, t. z. θ)
238
238
# Re-evaluate to calculate log probability density.
239
- # TODO (penelopeysm): This seems a little bit wasteful. The need for
240
- # this stems from the fact that the HMC sampler doesn't keep track of
241
- # prior and likelihood separately but rather a single log-joint, for
242
- # which we have no way to decompose this back into prior and
243
- # likelihood. I don't immediately see how to solve this without
244
- # re-evaluating the model.
239
+ # TODO (penelopeysm): This seems a little bit wasteful. Unfortunately,
240
+ # even though `t.stat.log_density` contains some kind of logp, this
241
+ # doesn't track prior and likelihood separately but rather a single
242
+ # log-joint (and in linked space), so which we have no way to decompose
243
+ # this back into prior and likelihood. I don't immediately see how to
244
+ # solve this without re-evaluating the model.
245
245
_, vi = DynamicPPL. evaluate!! (model, vi)
246
246
else
247
247
# Reset VarInfo back to its original state.
@@ -291,8 +291,9 @@ function AbstractMCMC.step(
291
291
vi = state. vi
292
292
if t. stat. is_accept
293
293
vi = DynamicPPL. unflatten (vi, t. z. θ)
294
- # TODO (mhauru) Is setloglikelihood! the right thing here?
295
- vi = setloglikelihood!! (vi, t. stat. log_density)
294
+ # Re-evaluate to calculate log probability density.
295
+ # TODO (penelopeysm): This seems a little bit wasteful. See note above.
296
+ _, vi = DynamicPPL. evaluate!! (model, vi)
296
297
end
297
298
298
299
# Compute next transition and state.
0 commit comments