|
25 | 25 | end |
26 | 26 | end |
27 | 27 | end |
| 28 | + |
| 29 | + @testset "Turing#2151: ReverseDiff compilation & eltype(vi, spl)" begin |
| 30 | + # Failing model |
| 31 | + t = 1:0.05:8 |
| 32 | + σ = 0.3 |
| 33 | + y = @. rand(sin(t) + Normal(0, σ)) |
| 34 | + @model function state_space(y, TT, ::Type{T}=Float64) where {T} |
| 35 | + # Priors |
| 36 | + α ~ Normal(y[1], 0.001) |
| 37 | + τ ~ Exponential(1) |
| 38 | + η ~ filldist(Normal(0, 1), TT-1) |
| 39 | + σ ~ Exponential(1) |
| 40 | + # create latent variable |
| 41 | + x = Vector{T}(undef, TT) |
| 42 | + x[1] = α |
| 43 | + for t in 2:TT |
| 44 | + x[t] = x[t-1] + η[t-1] * τ |
| 45 | + end |
| 46 | + # measurement model |
| 47 | + y ~ MvNormal(x, σ^2 * I) |
| 48 | + return x |
| 49 | + end |
| 50 | + model = state_space(y, length(t)) |
| 51 | + |
| 52 | + # Dummy sampling algorithm for testing. The test case can only be replicated |
| 53 | + # with a custom sampler, it doesn't work with SampleFromPrior() |
| 54 | + struct MyAlg end |
| 55 | + DynamicPPL.getspace(::DynamicPPL.Sampler{MyAlg}) = () |
| 56 | + DynamicPPL.assume(rng, ::DynamicPPL.Sampler{MyAlg}, dist, vn, vi) = |
| 57 | + DynamicPPL.assume(dist, vn, vi) |
| 58 | + |
| 59 | + # Compiling the ReverseDiff tape used to fail here spl = Sampler(MyAlg()) |
| 60 | + vi = VarInfo(model) |
| 61 | + ldf = DynamicPPL.LogDensityFunction(vi, model, SamplingContext(spl)) |
| 62 | + @test LogDensityProblemsAD.ADgradient(AutoReverseDiff(; compile=true), ldf) isa Any |
| 63 | + end |
28 | 64 | end |
0 commit comments