@@ -4,7 +4,6 @@ using Optimisers, ADTypes
4
4
using Mooncake
5
5
using Bijectors
6
6
using Bijectors: partition, combine, PartitionMask
7
- using SimpleUnPack: @unpack
8
7
9
8
using NormalizingFlows
10
9
@@ -62,7 +61,7 @@ function _leapfrog(
62
61
end
63
62
64
63
function Bijectors. transform (lf:: LeapFrog{T} , z:: AbstractVector{T} ) where {T<: Real }
65
- @unpack dim, logϵ, L, ∇logp = lf
64
+ (; dim, logϵ, L, ∇logp) = lf
66
65
@assert length (z) == 2 dim " dimension of input must be even, z = [x, ρ]"
67
66
68
67
ϵ = _get_stepsize (lf)
73
72
74
73
function Bijectors. transform (ilf:: Inverse{<:LeapFrog{T}} , z:: AbstractVector{T} ) where {T<: Real }
75
74
lf = ilf. orig
76
- @unpack dim, logϵ, L, ∇logp = lf
75
+ (; dim, logϵ, L, ∇logp) = lf
77
76
@assert length (z) == 2 dim " dimension of input must be even, z = [x, ρ]"
78
77
79
78
ϵ = _get_stepsize (lf)
@@ -123,6 +122,9 @@ function logp_joint(z::AbstractVector{T}) where {T<:Real}
123
122
logp_ρ = sum (logpdf (Normal (), ρ))
124
123
return logp_x + logp_ρ
125
124
end
125
+
126
+ # the score function is the gradient of the logpdf.
127
+ # In all the synthetic targets, the score function is only implemented for the Banana target
126
128
∇logp = Base. Fix1 (score, target)
127
129
128
130
# #####################################
0 commit comments