Skip to content

Commit bfa1a1a

Browse files
refactor: change comments for clarity
1 parent 6b70a5f commit bfa1a1a

File tree

1 file changed

+3
-2
lines changed

1 file changed

+3
-2
lines changed

pymc_extras/inference/laplace.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ def get_conditional_gaussian_approximation(
106106
jac = pytensor.gradient.grad(f_x, x)
107107
hess = pytensor.gradient.jacobian(jac.flatten(), x)
108108

109-
# Component of log(p(x | y, params)) which depends on x
109+
# log(p(x | y, params)) only including terms that depend on x for the minimization step (logdet(Q) ignored as it is a constant wrt x)
110110
log_x_posterior = f_x - 0.5 * (x - mu).T @ Q @ (x - mu)
111111

112112
# Maximize log(p(x | y, params)) wrt x
@@ -129,7 +129,8 @@ def get_conditional_gaussian_approximation(
129129
-0.5 * x.T @ (-hess + Q) @ x + x.T @ (Q @ mu + jac - hess @ x0) + 0.5 * logdetQ
130130
)
131131

132-
# TODO Currently x being passed in as an initial guess for x0 AND then also going to the true value of x
132+
# Currently x is passed both as the query point for f(x, args) = logp(x | y, params) AND as an initial guess for x0. This may cause issues if the query point is
133+
# far from the mode x0 or in a neighbourhood which results in poor convergence.
133134
return pytensor.function(args, [x0, conditional_gaussian_approx])
134135

135136

0 commit comments

Comments
 (0)