Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
d8c64db
chore: change error message for update!
bvdmitri Jul 14, 2022
71cdafe
chore: better err msgs
bvdmitri Jul 14, 2022
44f46ae
add type check in the `inference` function
bvdmitri Jul 14, 2022
f36cc9b
fix: add clamplog
bvdmitri Jul 14, 2022
5ffd8f9
remove wrong type check in the inference function
bvdmitri Jul 14, 2022
156876a
add clamplog mean for Dirichlet
bvdmitri Jul 15, 2022
5d160b9
Merge pull request #167 from biaslab/dev-issue-166
bvdmitri Jul 25, 2022
a2701b4
refactor: use `clamplog` in contingency entropy calculation
bvdmitri Jul 25, 2022
6fcef9a
test: add edge case test
bvdmitri Jul 25, 2022
c1abe5d
fix: automatically renormalize Contingency matrix
bvdmitri Jul 25, 2022
9cf27bf
style: make format
bvdmitri Jul 25, 2022
493bb01
fix: fix method overwriting
bvdmitri Jul 27, 2022
cbd4e45
style: make format
bvdmitri Jul 27, 2022
cd28cc4
test: fix Contingency tests
bvdmitri Jul 27, 2022
60362ea
Merge pull request #173 from biaslab/dev-contingency-docs
bvdmitri Jul 28, 2022
3a1015f
Merge branch 'master' into dev-issue-165
bvdmitri Jul 29, 2022
df41679
fix preoptimised factorisation constraint rules for nodes with 2 inputs
bvdmitri Jul 29, 2022
c83cde5
Update test_wishart_inverse.jl
bvdmitri Jul 29, 2022
8f5c90f
add missing rand methods for mv gaussians
bvdmitri Jul 29, 2022
70f13d2
style: make format
bvdmitri Jul 29, 2022
08ae7a1
Merge pull request #175 from biaslab/dev-fix-resolve-factorisation
bvdmitri Jul 29, 2022
3d2e880
Merge pull request #169 from biaslab/dev-issue-165
bvdmitri Jul 29, 2022
439287c
Merge pull request #176 from biaslab/dev-issue-174
bvdmitri Jul 29, 2022
0e1efd8
update: bump version to 2.3.1
bvdmitri Jul 29, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "ReactiveMP"
uuid = "a194aa59-28ba-4574-a09c-4a745416d6e3"
authors = ["Dmitry Bagaev <[email protected]>", "Albert Podusenko <[email protected]>", "Bart van Erp <[email protected]>", "Ismail Senoz <[email protected]>"]
version = "2.3.0"
version = "2.3.1"

[deps]
DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
Expand Down
2 changes: 1 addition & 1 deletion src/constraints/spec/factorisation_spec.jl
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ resolve_factorisation(::UnspecifiedConstraints, any, model, fform, variables) =
resolve_factorisation(::UnspecifiedConstraints, ::Deterministic, model, fform, variables) = FullFactorisation()

# Preoptimised dispatch rules for unspecified constraints and a stochastic node with 2 inputs
resolve_factorisation(::UnspecifiedConstraints, ::Stochastic, model, fform, ::Tuple{V1, V2}) where {V1 <: RandomVariable, V2 <: RandomVariable} = ((1, 2))
resolve_factorisation(::UnspecifiedConstraints, ::Stochastic, model, fform, ::Tuple{V1, V2}) where {V1 <: RandomVariable, V2 <: RandomVariable} = ((1, 2),)
resolve_factorisation(::UnspecifiedConstraints, ::Stochastic, model, fform, ::Tuple{V1, V2}) where {V1 <: Union{<:ConstVariable, <:DataVariable}, V2 <: RandomVariable} = ((1,), (2,))
resolve_factorisation(::UnspecifiedConstraints, ::Stochastic, model, fform, ::Tuple{V1, V2}) where {V1 <: RandomVariable, V2 <: Union{<:ConstVariable, <:DataVariable}} = ((1,), (2,))

Expand Down
2 changes: 1 addition & 1 deletion src/distributions/categorical.jl
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
export Bernoulli
export Categorical

import Distributions: Categorical, probs

Expand Down
28 changes: 27 additions & 1 deletion src/distributions/contingency.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,41 @@ export Contingency

using LinearAlgebra

"""
Contingency(P, renormalize = Val(true))

The contingency distribution is a multivariate generalization of the categorical distribution. As a bivariate distribution, the
contingency distribution defines the joint probability over two unit vectors `v1` and `v2`. The parameter `P` encodes a contingency matrix that specifies the probability of co-occurrence.

v1 ∈ {0, 1}^d1 where Σ_j v1_j = 1
v2 ∈ {0, 1}^d2 where Σ_k v2_k = 1

P ∈ [0, 1]^{d1 × d2}, where Σ_jk P_jk = 1

f(v1, v2, P) = Contingency(out1, out2 | P) = Π_jk P_jk^{v1_j * v2_k}

A `Contingency` distribution over more than two variables requires higher-order tensors as parameters; these are not implemented in ReactiveMP.

# Arguments:
- `P`, required, contingency matrix
- `renormalize`, optional, supports either `Val(true)` or `Val(false)`, specifies whether matrix `P` must be automatically renormalized. Does not modify the original `P` and allocates a new one for the renormalized version. If set to `false` the contingency matrix `P` **must** be normalized by hand, otherwise the result of related calculations might be wrong

"""
struct Contingency{T, P <: AbstractMatrix{T}} <: ContinuousMatrixDistribution
p::P

Contingency{T, P}(A::AbstractMatrix) where {T, P <: AbstractMatrix{T}} = new(A)
end

Contingency(P::AbstractMatrix) = Contingency(P, Val(true))
Contingency(P::M, renormalize::Val{true}) where {T, M <: AbstractMatrix{T}} = Contingency{T, M}(P ./ sum(P))
Contingency(P::M, renormalize::Val{false}) where {T, M <: AbstractMatrix{T}} = Contingency{T, M}(P)

contingency_matrix(distribution::Contingency) = distribution.p

vague(::Type{<:Contingency}, dims::Int) = Contingency(ones(dims, dims) ./ abs2(dims))

function entropy(distribution::Contingency)
P = contingency_matrix(distribution)
-sum(P .* log.(clamp.(P, tiny, Inf)))
return -mapreduce((p) -> p * clamplog(p), +, P)
end
3 changes: 2 additions & 1 deletion src/distributions/dirichlet.jl
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@ end
probvec(dist::Dirichlet) = params(dist)[1]

# probvec is not normalised
mean(::typeof(log), dist::Dirichlet) = digamma.(probvec(dist)) .- digamma(sum(probvec(dist)))
mean(::typeof(log), dist::Dirichlet) = digamma.(probvec(dist)) .- digamma(sum(probvec(dist)))
mean(::typeof(clamplog), dist::Dirichlet) = digamma.((clamp(p, tiny, typemax(p)) for p in probvec(dist))) .- digamma(sum(probvec(dist)))

# Variate forms promotion

Expand Down
14 changes: 12 additions & 2 deletions src/distributions/normal.jl
Original file line number Diff line number Diff line change
Expand Up @@ -204,14 +204,19 @@ function Random.rand(rng::AbstractRNG, dist::UnivariateNormalDistributionsFamily
return μ + σ * randn(rng, float(T))
end

function Random.rand(rng::AbstractRNG, dist::UnivariateNormalDistributionsFamily{T}, size::Int64) where {T}
container = Vector{T}(undef, size)
return rand!(rng, dist, container)
end

function Random.rand!(
rng::AbstractRNG,
dist::UnivariateNormalDistributionsFamily,
container::AbstractArray{T}
) where {T <: Real}
randn!(rng, container)
μ, σ = mean_std(dist)
@turbo for i in 1:length(container)
@turbo for i in eachindex(container)
container[i] = μ + σ * container[i]
end
container
Expand All @@ -224,6 +229,11 @@ function Random.rand(rng::AbstractRNG, dist::MultivariateNormalDistributionsFami
return μ + L * randn(rng, length(μ))
end

function Random.rand(rng::AbstractRNG, dist::MultivariateNormalDistributionsFamily{T}, size::Int64) where {T}
container = Matrix{T}(undef, ndims(dist), size)
return rand!(rng, dist, container)
end

function Random.rand!(
rng::AbstractRNG,
dist::MultivariateNormalDistributionsFamily,
Expand All @@ -232,7 +242,7 @@ function Random.rand!(
preallocated = similar(container)
randn!(rng, reshape(preallocated, length(preallocated)))
μ, L = mean_std(dist)
@views for i in 1:size(preallocated)[2]
@views for i in axes(preallocated, 2)
copyto!(container[:, i], μ)
mul!(container[:, i], L, preallocated[:, i], 1, 1)
end
Expand Down
2 changes: 2 additions & 0 deletions src/distributions/pointmass.jl
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ probvec(distribution::PointMass{V}) where {T <: Real, V <: AbstractVector{T}} =

mean(::typeof(inv), distribution::PointMass{V}) where {T <: Real, V <: AbstractVector{T}} = error("mean of inverse of `::PointMass{ <: AbstractVector }` is not defined")
mean(::typeof(log), distribution::PointMass{V}) where {T <: Real, V <: AbstractVector{T}} = log.(mean(distribution))
mean(::typeof(clamplog), distribution::PointMass{V}) where {T <: Real, V <: AbstractVector{T}} = clamplog.(mean(distribution))
mean(::typeof(mirrorlog), distribution::PointMass{V}) where {T <: Real, V <: AbstractVector{T}} = error("mean of mirrorlog of `::PointMass{ <: AbstractVector }` is not defined")
mean(::typeof(loggamma), distribution::PointMass{V}) where {T <: Real, V <: AbstractVector{T}} = loggamma.(mean(distribution))
mean(::typeof(logdet), distribution::PointMass{V}) where {T <: Real, V <: AbstractVector{T}} = error("mean of logdet of `::PointMass{ <: AbstractVector }` is not defined")
Expand Down Expand Up @@ -91,6 +92,7 @@ probvec(distribution::PointMass{M}) where {T <: Real, M <: AbstractMatrix{T}} =

mean(::typeof(inv), distribution::PointMass{M}) where {T <: Real, M <: AbstractMatrix{T}} = cholinv(mean(distribution))
mean(::typeof(log), distribution::PointMass{M}) where {T <: Real, M <: AbstractMatrix{T}} = log.(mean(distribution))
mean(::typeof(clamplog), distribution::PointMass{M}) where {T <: Real, M <: AbstractMatrix{T}} = clamplog.(mean(distribution))
mean(::typeof(mirrorlog), distribution::PointMass{M}) where {T <: Real, M <: AbstractMatrix{T}} = error("mean of mirrorlog of `::PointMass{ <: AbstractMatrix }` is not defined")
mean(::typeof(loggamma), distribution::PointMass{M}) where {T <: Real, M <: AbstractMatrix{T}} = loggamma.(mean(distribution))
mean(::typeof(logdet), distribution::PointMass{M}) where {T <: Real, M <: AbstractMatrix{T}} = logdet(mean(distribution))
Expand Down
11 changes: 8 additions & 3 deletions src/helpers.jl
Original file line number Diff line number Diff line change
Expand Up @@ -264,6 +264,11 @@ end

## Other helpers

"""
Same as `log` but clamps the input argument `x` to be in the range `tiny <= x <= typemax(x)` such that `log(0)` does not explode.
"""
clamplog(x) = log(clamp(x, tiny, typemax(x)))

# We override this function for some specific types
function is_typeof_equal(left, right)
_isequal = typeof(left) === typeof(right)
Expand Down Expand Up @@ -311,9 +316,9 @@ Float64
"""
function deep_eltype end

deep_eltype(::Type{T}) where {T <: Number} = T
deep_eltype(::Type{T}) where {T} = deep_eltype(eltype(T))
deep_eltype(::T) where {T} = deep_eltype(T)
deep_eltype(::Type{T}) where {T} = T
deep_eltype(::Type{T}) where {T <: AbstractArray} = deep_eltype(eltype(T))
deep_eltype(any) = deep_eltype(typeof(any))

##

Expand Down
5 changes: 4 additions & 1 deletion src/inference.jl
Original file line number Diff line number Diff line change
Expand Up @@ -392,7 +392,10 @@ function inference(;
error("Data is empty. Make sure you used `data` keyword argument with correct value.")
else
foreach(filter(pair -> isdata(last(pair)), pairs(vardict))) do pair
haskey(data, first(pair)) || error("Data entry $(first(pair)) is missing in `data` dictionary.")
varname = first(pair)
haskey(data, varname) || error(
"Data entry `$(varname)` is missing in `data` argument. Double check `data = ($(varname) = ???, )`"
)
end
end

Expand Down
2 changes: 1 addition & 1 deletion src/nodes/categorical.jl
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@

@node Categorical Stochastic [out, p]

@average_energy Categorical (q_out::Categorical, q_p::Any) = -sum(probvec(q_out) .* mean(log, q_p))
@average_energy Categorical (q_out::Categorical, q_p::Any) = -sum(probvec(q_out) .* mean(clamplog, q_p))
6 changes: 3 additions & 3 deletions src/nodes/transition.jl
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,13 @@ end
end

@average_energy Transition (q_out_in::Contingency, q_a::PointMass) = begin
# `map(d -> log(clamp(d, tiny, huge)), mean(q_a))` is an equivalent of `mean(log, q_a)` with an extra `clamp(el, tiny, huge)` operation
# `map(clamplog, mean(q_a))` is an equivalent of `mean(log, q_a)` with an extra `clamp(el, tiny, Inf)` operation
# The reason is that we don't want to take log of zeros in the matrix `q_a` (if there are any)
# The trick here is that if RHS matrix has zero inputs, than the corresponding entries of the `contingency_matrix` matrix
# should also be zeros (see corresponding @marginalrule), so at the end `log(tiny) * 0` should not influence the result.
return -ReactiveMP.mul_trace(ReactiveMP.contingency_matrix(q_out_in)', map(d -> log(clamp(d, tiny, huge)), mean(q_a)))
return -ReactiveMP.mul_trace(ReactiveMP.contingency_matrix(q_out_in)', mean(clamplog, q_a))
end

@average_energy Transition (q_out::Any, q_in::Any, q_a::PointMass) = begin
return -probvec(q_out)' * mean(log, q_a) * probvec(q_in)
return -probvec(q_out)' * mean(clamplog, q_a) * probvec(q_in)
end
12 changes: 8 additions & 4 deletions src/rules/transition/marginals.jl
Original file line number Diff line number Diff line change
@@ -1,17 +1,21 @@

@marginalrule Transition(:out_in) (m_out::Categorical, m_in::Categorical, q_a::MatrixDirichlet) = begin
B = Diagonal(probvec(m_out)) * exp.(mean(log, q_a)) * Diagonal(probvec(m_in))
return Contingency(B ./ sum(B))
D = map(e -> clamp(exp(e), tiny, huge), mean(log, q_a))
B = Diagonal(probvec(m_out)) * D * Diagonal(probvec(m_in))
P = map!(Base.Fix2(/, sum(B)), B, B) # inplace version of B ./ sum(B)
return Contingency(P, Val(false)) # Matrix `P` has been normalized by hand
end

@marginalrule Transition(:out_in) (m_out::Categorical, m_in::Categorical, q_a::PointMass) = begin
B = Diagonal(probvec(m_out)) * mean(q_a) * Diagonal(probvec(m_in))
return Contingency(B ./ sum(B))
P = map!(Base.Fix2(/, sum(B)), B, B) # inplace version of B ./ sum(B)
return Contingency(P, Val(false)) # Matrix `P` has been normalized by hand
end

@marginalrule Transition(:out_in_a) (m_out::Categorical, m_in::Categorical, m_a::PointMass) = begin
B = Diagonal(probvec(m_out)) * mean(m_a) * Diagonal(probvec(m_in))
return (out_in = Contingency(B ./ sum(B)), a = m_a)
P = map!(Base.Fix2(/, sum(B)), B, B) # inplace version of B ./ sum(B)
return (out_in = Contingency(P, Val(false)), a = m_a) # Matrix `P` has been normalized by hand
end

@marginalrule Transition(:out_in_a) (m_out::PointMass, m_in::Categorical, m_a::PointMass, meta::Any) = begin
Expand Down
4 changes: 4 additions & 0 deletions src/variable.jl
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,10 @@ linear_index(::VariableIndividual) = nothing
linear_index(v::VariableVector) = v.index
linear_index(v::VariableArray) = LinearIndices(v.size)[v.index]

string_index(::VariableIndividual) = ""
string_index(v::VariableVector) = string("[", v.index, "]")
string_index(v::VariableArray) = string("[", join(v.index.I, ", "), "]")

indexed_name(::VariableIndividual, name::Symbol) = string(name)
indexed_name(seq::VariableVector, name::Symbol) = string(name, "_", seq.index)
indexed_name(array::VariableArray, name::Symbol) = string(name, "_", join(array.index.I, "_"))
Expand Down
25 changes: 23 additions & 2 deletions src/variables/data.jl
Original file line number Diff line number Diff line change
Expand Up @@ -121,15 +121,36 @@ update!(datavar::DataVariable, data::Number) = update!(eltype(datavar), d
update!(datavar::DataVariable, data::AbstractArray) = update!(eltype(datavar), datavar, data)

update!(::Type{D}, datavar, data::D) where {D} = next!(messageout(datavar, 1), Message(data, false, false))
update!(::Type{D1}, datavar, data::D2) where {D1, D2} = error("'$(name(datavar)) = datavar($D1, ...)' accepts data of type $D1, but $D2 has been supplied. Check 'update!($(name(datavar)), data::$D2)' and explicitly convert data to type $D1.")
update!(::Type{D1}, datavar, data::D2) where {D1, D2} = __update_wrong_type_error(D1, D2, collection_type(datavar), datavar)

__datavar_drop_pointmass(::Type{D}) where {D} = D
__datavar_drop_pointmass(::Type{PointMass{D}}) where {D} = D

__update_wrong_type_error(::Type{D1}, ::Type{D2}, ctype::VariableIndividual, datavar) where {D1, D2} = error(
"""
`$(name(datavar)) = datavar($(__datavar_drop_pointmass(D1)))` accepts data only of type `$(__datavar_drop_pointmass(D1))`, but the value of type `$D2` has been used.
Double check `update!($(name(datavar)), data)` call and explicitly convert data to the type `$(__datavar_drop_pointmass(D1))`, e.g. `convert($(__datavar_drop_pointmass(D1)), data)`.
"""
)

__update_wrong_type_error(::Type{D1}, ::Type{D2}, ctype::Union{VariableVector, VariableArray}, datavar) where {D1, D2} =
error(
"""
`$(name(datavar)) = datavar($(__datavar_drop_pointmass(D1)), ...)` accepts data only of type `$(__datavar_drop_pointmass(D1))`, but the value of type `$D2` has been used.
Double check `update!($(name(datavar))$(string_index(ctype)), d)` call and explicitly convert data to the type `$(__datavar_drop_pointmass(D1))`, e.g. `update!($(name(datavar))$(string_index(ctype)), convert($(__datavar_drop_pointmass(D1)), d))`.
If you use broadcasted version of the `update!` function, e.g. `update!($(name(datavar)), data)` you may broadcast `convert` function over the whole dataset as well, e.g. `update!($(name(datavar)), convert.($(__datavar_drop_pointmass(D1)), dataset))`
"""
)

update!(::Type{PointMass{D}}, datavar, data::D) where {D} =
next!(messageout(datavar, 1), Message(PointMass(data), false, false))

resend!(datavar::DataVariable) = update!(datavar, Rocket.getrecent(messageout(datavar, 1)))

function update!(datavars::AbstractArray{<:DataVariable}, data::AbstractArray)
@assert size(datavars) === size(data) "Invalid update! call: size of datavar array and data should match"
@assert size(datavars) === size(data) """
Invalid `update!` call: size of datavar array and data must match: `$(name(first(datavars)))` has size $(size(datavars)) and data has size $(size(data)).
"""
foreach(zip(datavars, data)) do (var, d)
update!(var, d)
end
Expand Down
7 changes: 5 additions & 2 deletions test/constraints/spec/test_factorisation_spec.jl
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ import ReactiveMP: FunctionalIndex
import ReactiveMP: CombinedRange, SplittedRange, is_splitted
import ReactiveMP: __as_unit_range, __factorisation_specification_resolve_index
import ReactiveMP: resolve_factorisation
import ReactiveMP: DefaultConstraints
import ReactiveMP: DefaultConstraints, UnspecifiedConstraints
import ReactiveMP: setanonymous!, activate!

using GraphPPL # for `@constraints` macro
Expand Down Expand Up @@ -579,14 +579,17 @@ using GraphPPL # for `@constraints` macro
# empty
end

for cs in (empty, DefaultConstraints)
# DefaultConstraints are equal to `UnspecifiedConstraints()` for now, but it might change in the future so we test both
for cs in (empty, UnspecifiedConstraints(), DefaultConstraints)
let model = FactorGraphModel()
d = datavar(model, :d, Float64)
c = constvar(model, :c, 1.0)
x = randomvar(model, :x)
y = randomvar(model, :y)
z = randomvar(model, :z)

@test ReactiveMP.resolve_factorisation(cs, model, fform, (x, y)) === ((1, 2),)
@test ReactiveMP.resolve_factorisation(cs, model, fform, (y, x)) === ((1, 2),)
@test ReactiveMP.resolve_factorisation(cs, model, fform, (d, d)) === ((1,), (2,))
@test ReactiveMP.resolve_factorisation(cs, model, fform, (c, c)) === ((1,), (2,))
@test ReactiveMP.resolve_factorisation(cs, model, fform, (d, x)) === ((1,), (2,))
Expand Down
19 changes: 14 additions & 5 deletions test/distributions/test_contingency.jl
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,12 @@ using Random
end

@testset "contingency_matrix" begin
@test ReactiveMP.contingency_matrix(Contingency(ones(3, 3))) == ones(3, 3)
@test ReactiveMP.contingency_matrix(Contingency(ones(4, 4))) == ones(4, 4)
@test ReactiveMP.contingency_matrix(Contingency(ones(3, 3))) == ones(3, 3) ./ 9
@test ReactiveMP.contingency_matrix(Contingency(ones(3, 3), Val(true))) == ones(3, 3) ./ 9
@test ReactiveMP.contingency_matrix(Contingency(ones(3, 3), Val(false))) == ones(3, 3) # Matrix is wrong, but just to test that `false` is working
@test ReactiveMP.contingency_matrix(Contingency(ones(4, 4))) == ones(4, 4) ./ 16
@test ReactiveMP.contingency_matrix(Contingency(ones(4, 4), Val(true))) == ones(4, 4) ./ 16
@test ReactiveMP.contingency_matrix(Contingency(ones(4, 4), Val(false))) == ones(4, 4)
end

@testset "vague" begin
Expand All @@ -35,9 +39,14 @@ using Random
end

@testset "entropy" begin
@test entropy(Contingency([0.1 0.9; 0.9 0.1])) ≈ 0.6501659467828964
@test entropy(Contingency([0.2 0.8; 0.8 0.2])) ≈ 1.0008048470763757
@test entropy(Contingency([0.45 0.75; 0.55 0.25])) ≈ 1.2504739583323967
@test entropy(Contingency([0.7 0.1; 0.1 0.1])) ≈ 0.9404479886553263
@test entropy(Contingency(10.0 * [0.7 0.1; 0.1 0.1])) ≈ 0.9404479886553263
@test entropy(Contingency([0.07 0.41; 0.31 0.21])) ≈ 1.242506182893139
@test entropy(Contingency(10.0 * [0.07 0.41; 0.31 0.21])) ≈ 1.242506182893139
@test entropy(Contingency([0.09 0.00; 0.00 0.91])) ≈ 0.30253782309749805
@test entropy(Contingency(10.0 * [0.09 0.00; 0.00 0.91])) ≈ 0.30253782309749805
@test !isnan(entropy(Contingency([0.0 1.0; 1.0 0.0])))
@test !isinf(entropy(Contingency([0.0 1.0; 1.0 0.0])))
end
end

Expand Down
Loading