Skip to content

Commit 1c93ec3

Browse files
committed
Fix deprecations (#318)
Fixes deprecations of `MvNormal` constructors in Distributions 0.25.11. Tests pass locally.
1 parent 80240bf commit 1c93ec3

13 files changed

+53
-54
lines changed

src/compiler.jl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -133,8 +133,8 @@ This is used mainly to unwrap `NamedDist` distributions and adjust the indices o
133133
variables.
134134
135135
# Example
136-
```jldoctest; setup=:(using Distributions)
137-
julia> _, _, vns = DynamicPPL.unwrap_right_left_vns(MvNormal([1.0, 1.0], [1.0 0.0; 0.0 1.0]), randn(2, 2), @varname(x)); string(vns[end])
136+
```jldoctest; setup=:(using Distributions, LinearAlgebra)
137+
julia> _, _, vns = DynamicPPL.unwrap_right_left_vns(MvNormal(ones(2), I), randn(2, 2), @varname(x)); string(vns[end])
138138
"x[:,2]"
139139
140140
julia> _, _, vns = DynamicPPL.unwrap_right_left_vns(Normal(), randn(1, 2), @varname(x[:])); string(vns[end])

src/loglikelihoods.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,7 @@ y .~ Normal(μ, σ)
145145
```
146146
3. using `MvNormal`:
147147
```julia
148-
y ~ MvNormal(fill(μ, n), Diagonal(fill(σ, n)))
148+
y ~ MvNormal(fill(μ, n), σ^2 * I)
149149
```
150150
151151
In (1) and (2), `y` will be treated as a collection of `n` i.i.d. 1-dimensional variables,

src/varinfo.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ Let `md` be an instance of `Metadata`:
3434
To make `md::Metadata` type stable, all the `md.vns` must have the same symbol
3535
and distribution type. However, one can have a Julia variable, say `x`, that is a
3636
matrix or a hierarchical array sampled in partitions, e.g.
37-
`x[1][:] ~ MvNormal(zeros(2), 1.0); x[2][:] ~ MvNormal(ones(2), 1.0)`, and is managed by
37+
`x[1][:] ~ MvNormal(zeros(2), I); x[2][:] ~ MvNormal(ones(2), I)`, and is managed by
3838
a single `md::Metadata` so long as all the distributions on the RHS of `~` are of the
3939
same type. Type unstable `Metadata` will still work but will have inferior performance.
4040
When sampling, the first iteration uses a type unstable `Metadata` for all the

test/Project.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
2222
AbstractMCMC = "2.1, 3.0"
2323
AbstractPPL = "0.2"
2424
Bijectors = "0.9.5"
25-
Distributions = "< 0.25.11"
25+
Distributions = "0.25"
2626
DistributionsAD = "0.6.3"
2727
Documenter = "0.26.1, 0.27"
2828
ForwardDiff = "0.10.12"

test/compiler.jl

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -206,7 +206,7 @@ end
206206
# test DPPL#61
207207
@model function testmodel_missing5(z)
208208
m ~ Normal()
209-
z[1:end] ~ MvNormal(fill(m, length(z)), 1.0)
209+
z[1:end] ~ MvNormal(fill(m, length(z)), I)
210210
return m
211211
end
212212
model = testmodel_missing5(rand(10))
@@ -379,7 +379,7 @@ end
379379

380380
# AR1 model. Dynamic prefixing.
381381
@model function AR1(num_steps, α, μ, σ, ::Type{TV}=Vector{Float64}) where {TV}
382-
η ~ MvNormal(num_steps, 1.0)
382+
η ~ MvNormal(zeros(num_steps), I)
383383
δ = sqrt(1 - α^2)
384384

385385
x = TV(undef, num_steps)
@@ -400,7 +400,7 @@ end
400400
num_obs = length(y)
401401
@inbounds for i in 1:num_obs
402402
x = @submodel $(Symbol("ar1_$i")) AR1(num_steps, α, μ, σ)
403-
y[i] ~ MvNormal(x, 0.1)
403+
y[i] ~ MvNormal(x, 0.01 * I)
404404
end
405405
end
406406

@@ -419,7 +419,7 @@ end
419419
x = Normal()
420420
@test DynamicPPL.check_tilde_rhs(x) === x
421421

422-
x = [Laplace(), Normal(), MvNormal(3, 1.0)]
422+
x = [Laplace(), Normal(), MvNormal(zeros(3), I)]
423423
@test DynamicPPL.check_tilde_rhs(x) === x
424424
end
425425
@testset "isliteral" begin
@@ -436,14 +436,14 @@ end
436436
# Verify that we indeed can parse this.
437437
@test @model(function array_literal_model()
438438
# `assume` and literal `observe`
439-
m ~ MvNormal(2, 1.0)
440-
return [10.0, 10.0] ~ MvNormal(m, 0.5 * ones(2))
439+
m ~ MvNormal(zeros(2), I)
440+
return [10.0, 10.0] ~ MvNormal(m, 0.25 * I)
441441
end) isa Function
442442

443443
@model function array_literal_model2()
444444
# `assume` and literal `observe`
445-
m ~ MvNormal(2, 1.0)
446-
return [10.0, 10.0] ~ MvNormal(m, 0.5 * ones(2))
445+
m ~ MvNormal(zeros(2), I)
446+
return [10.0, 10.0] ~ MvNormal(m, 0.25 * I)
447447
end
448448

449449
@test array_literal_model2()() == [10.0, 10.0]

test/context_implementations.jl

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,9 @@
22
# https://github.com/TuringLang/DynamicPPL.jl/issues/129
33
@testset "#129" begin
44
@model function test(x)
5-
μ ~ MvNormal(fill(0, 2), 2.0)
5+
μ ~ MvNormal(zeros(2), 4 * I)
66
z = Vector{Int}(undef, length(x))
7-
# `z .~ Categorical.(ps)` cannot be parsed by Julia 1.0
8-
(.~)(z, Categorical.(fill([0.5, 0.5], length(x))))
7+
z .~ Categorical.(fill([0.5, 0.5], length(x)))
98
for i in 1:length(x)
109
x[i] ~ Normal(μ[z[i]], 0.1)
1110
end

test/loglikelihoods.jl

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,28 +1,28 @@
11
# A collection of models for which the mean-of-means for the posterior should
22
# be same.
3-
@model function gdemo1(x=10 * ones(2), ::Type{TV}=Vector{Float64}) where {TV}
3+
@model function gdemo1(x=[10.0, 10.0], ::Type{TV}=Vector{Float64}) where {TV}
44
# `dot_assume` and `observe`
55
m = TV(undef, length(x))
66
m .~ Normal()
7-
return x ~ MvNormal(m, 0.5)
7+
return x ~ MvNormal(m, 0.25 * I)
88
end
99

10-
@model function gdemo2(x=10 * ones(2), ::Type{TV}=Vector{Float64}) where {TV}
10+
@model function gdemo2(x=[10.0, 10.0], ::Type{TV}=Vector{Float64}) where {TV}
1111
# `assume` with indexing and `observe`
1212
m = TV(undef, length(x))
1313
for i in eachindex(m)
1414
m[i] ~ Normal()
1515
end
16-
return x ~ MvNormal(m, 0.5)
16+
return x ~ MvNormal(m, 0.25 * I)
1717
end
1818

19-
@model function gdemo3(x=10 * ones(2))
19+
@model function gdemo3(x=[10.0, 10.0])
2020
# Multivariate `assume` and `observe`
21-
m ~ MvNormal(length(x), 1.0)
22-
return x ~ MvNormal(m, 0.5)
21+
m ~ MvNormal(zero(x), I)
22+
return x ~ MvNormal(m, 0.25 * I)
2323
end
2424

25-
@model function gdemo4(x=10 * ones(2), ::Type{TV}=Vector{Float64}) where {TV}
25+
@model function gdemo4(x=[10.0, 10.0], ::Type{TV}=Vector{Float64}) where {TV}
2626
# `dot_assume` and `observe` with indexing
2727
m = TV(undef, length(x))
2828
m .~ Normal()
@@ -33,16 +33,16 @@ end
3333

3434
# Using vector of `length` 1 here so the posterior of `m` is the same
3535
# as the others.
36-
@model function gdemo5(x=10 * ones(1))
36+
@model function gdemo5(x=[10.0])
3737
# `assume` and `dot_observe`
3838
m ~ Normal()
3939
return x .~ Normal(m, 0.5)
4040
end
4141

4242
@model function gdemo6(::Type{TV}=Vector{Float64}) where {TV}
4343
# `assume` and literal `observe`
44-
m ~ MvNormal(2, 1.0)
45-
return [10.0, 10.0] ~ MvNormal(m, 0.5)
44+
m ~ MvNormal(zeros(2), I)
45+
return [10.0, 10.0] ~ MvNormal(m, 0.25 * I)
4646
end
4747

4848
@model function gdemo7(::Type{TV}=Vector{Float64}) where {TV}
@@ -76,23 +76,23 @@ end
7676
end
7777

7878
@model function _likelihood_dot_observe(m, x)
79-
return x ~ MvNormal(m, 0.5)
79+
return x ~ MvNormal(m, 0.25 * I)
8080
end
8181

82-
@model function gdemo10(x=10 * ones(2), ::Type{TV}=Vector{Float64}) where {TV}
82+
@model function gdemo10(x=[10.0, 10.0], ::Type{TV}=Vector{Float64}) where {TV}
8383
m = TV(undef, length(x))
8484
m .~ Normal()
8585

8686
# Submodel likelihood
8787
@submodel _likelihood_dot_observe(m, x)
8888
end
8989

90-
@model function gdemo11(x=10 * ones(2, 1), ::Type{TV}=Vector{Float64}) where {TV}
90+
@model function gdemo11(x=fill(10.0, 2, 1), ::Type{TV}=Vector{Float64}) where {TV}
9191
m = TV(undef, length(x))
9292
m .~ Normal()
9393

9494
# Dotted observe for `Matrix`.
95-
return x .~ MvNormal(m, 0.5)
95+
return x .~ MvNormal(m, 0.25 * I)
9696
end
9797

9898
const gdemo_models = (

test/prob_macro.jl

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -30,15 +30,15 @@
3030
@testset "vector" begin
3131
n = 5
3232
@model function demo(x, n=n)
33-
m ~ MvNormal(n, 1.0)
34-
return x ~ MvNormal(m, 1.0)
33+
m ~ MvNormal(zeros(n), I)
34+
return x ~ MvNormal(m, I)
3535
end
3636
mval = rand(n)
3737
xval = rand(n)
3838
iters = 1000
3939

40-
logprior = logpdf(MvNormal(n, 1.0), mval)
41-
loglike = logpdf(MvNormal(mval, 1.0), xval)
40+
logprior = logpdf(MvNormal(zeros(n), I), mval)
41+
loglike = logpdf(MvNormal(mval, I), xval)
4242
logjoint = logprior + loglike
4343

4444
model = demo(xval)

test/turing/compiler.jl

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -175,12 +175,12 @@
175175

176176
res = sample(vdemo1b(x), alg, 250)
177177

178-
D = 2
179178
@model function vdemo2(x)
180-
μ ~ MvNormal(zeros(D), ones(D))
181-
@. x ~ $(MvNormal(μ, ones(D)))
179+
μ ~ MvNormal(zeros(size(x, 1)), I)
180+
@. x ~ $(MvNormal(μ, I))
182181
end
183182

183+
D = 2
184184
alg = HMC(0.01, 5)
185185
res = sample(vdemo2(randn(D, 100)), alg, 250)
186186

@@ -206,7 +206,7 @@
206206

207207
t_vec = @elapsed res = sample(vdemo4(), alg, 1000)
208208

209-
@model vdemo5() = x ~ MvNormal(zeros(N), 2 * ones(N))
209+
@model vdemo5() = x ~ MvNormal(zeros(N), 4 * I)
210210

211211
t_mv = @elapsed res = sample(vdemo5(), alg, 1000)
212212

@@ -243,12 +243,12 @@
243243
x = randn(100)
244244
res = sample(vdemo1(x), alg, 250)
245245

246-
D = 2
247246
@model function vdemo2(x)
248-
μ ~ MvNormal(zeros(D), ones(D))
249-
return x .~ MvNormal(μ, ones(D))
247+
μ ~ MvNormal(zeros(size(x, 1)), I)
248+
return x .~ MvNormal(μ, I)
250249
end
251250

251+
D = 2
252252
alg = HMC(0.01, 5)
253253
res = sample(vdemo2(randn(D, 100)), alg, 250)
254254

@@ -274,7 +274,7 @@
274274

275275
t_vec = @elapsed res = sample(vdemo4(), alg, 1000)
276276

277-
@model vdemo5() = x ~ MvNormal(zeros(N), 2 * ones(N))
277+
@model vdemo5() = x ~ MvNormal(zeros(N), 4 * I)
278278

279279
t_mv = @elapsed res = sample(vdemo5(), alg, 1000)
280280

test/turing/model.jl

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
end
1515

1616
@model function demo2(xs)
17-
m ~ MvNormal(2, 1.0)
17+
m ~ MvNormal(zeros(2), I)
1818

1919
for i in eachindex(xs)
2020
xs[i] ~ Normal(m[1], 1.0)
@@ -50,7 +50,7 @@
5050
@model function demo3(xs, ::Type{TV}=Vector{Float64}) where {TV}
5151
m = Vector{TV}(undef, 2)
5252
for i in 1:length(m)
53-
m[i] ~ MvNormal(2, 1.0)
53+
m[i] ~ MvNormal(zeros(2), I)
5454
end
5555

5656
for i in eachindex(xs)
@@ -63,7 +63,7 @@
6363
@model function demo4(xs, ::Type{TV}=Vector{Vector{Float64}}) where {TV}
6464
m = TV(undef, 2)
6565
for i in 1:length(m)
66-
m[i] ~ MvNormal(2, 1.0)
66+
m[i] ~ MvNormal(zeros(2), I)
6767
end
6868

6969
for i in eachindex(xs)

0 commit comments

Comments
 (0)