diff --git a/docs/src/extends.md b/docs/src/extends.md index 0c87b8bfbe..25eae4ecd9 100644 --- a/docs/src/extends.md +++ b/docs/src/extends.md @@ -4,11 +4,8 @@ Whereas this package already provides a large collection of common distributions Generally, you don't have to implement every API method listed in the documentation. This package provides a series of generic functions that turn a small number of internal methods into user-end API methods. What you need to do is to implement this small set of internal methods for your distributions. -By default, `Discrete` sampleables have the support of type `Int` while `Continuous` sampleables have the support of type `Float64`. If this assumption does not hold for your new distribution or sampler, or its `ValueSupport` is neither `Discrete` nor `Continuous`, you should implement the `eltype` method in addition to the other methods listed below. - **Note:** The methods that need to be implemented are different for distributions of different variate forms. - ## Create a Sampler Unlike full-fledged distributions, a sampler, in general, only provides limited functionalities, mainly to support sampling. @@ -18,60 +15,48 @@ Unlike full-fledged distributions, a sampler, in general, only provides limited To implement a univariate sampler, one can define a subtype (say `Spl`) of `Sampleable{Univariate,S}` (where `S` can be `Discrete` or `Continuous`), and provide a `rand` method, as ```julia -function rand(rng::AbstractRNG, s::Spl) +function Base.rand(rng::AbstractRNG, s::Spl) # ... generate a single sample from s end ``` -The package already implements a vectorized version of `rand!` and `rand` that repeatedly calls the scalar version to generate multiple samples; as wells as a one arg version that uses the default random number generator. - -### Multivariate Sampler +The package already implements vectorized versions `rand!(rng::AbstractRNG, s::Spl, dims::Int...)` and `rand(rng::AbstractRNG, s::Spl, dims::Int...)` that repeatedly call the scalar version to generate multiple samples. +Additionally, the package implements versions of these functions without the `rng::AbstractRNG` argument that use the default random number generator. -To implement a multivariate sampler, one can define a subtype of `Sampleable{Multivariate,S}`, and provide both `length` and `_rand!` methods, as +If there is a more efficient method to generate multiple samples, one should provide the following method ```julia -Base.length(s::Spl) = ... # return the length of each sample - -function _rand!(rng::AbstractRNG, s::Spl, x::AbstractVector{T}) where T<:Real - # ... generate a single vector sample to x +function Random.rand!(rng::AbstractRNG, s::Spl, x::AbstractArray{<:Real}) + # ... generate multiple samples from s in x end ``` -This function can assume that the dimension of `x` is correct, and doesn't need to perform dimension checking. +### Multivariate Sampler -The package implements both `rand` and `rand!` as follows (which you don't need to implement in general): +To implement a multivariate sampler, one can define a subtype of `Sampleable{Multivariate,S}`, and provide `length`, `rand`, and `rand!` methods, as ```julia -function _rand!(rng::AbstractRNG, s::Sampleable{Multivariate}, A::DenseMatrix) - for i = 1:size(A,2) - _rand!(rng, s, view(A,:,i)) - end - return A -end +Base.length(s::Spl) = ... # return the length of each sample -function rand!(rng::AbstractRNG, s::Sampleable{Multivariate}, A::AbstractVector) - length(A) == length(s) || - throw(DimensionMismatch("Output size inconsistent with sample length.")) - _rand!(rng, s, A) +function Base.rand(rng::AbstractRNG, s::Spl) + # ... generate a single vector sample from s end -function rand!(rng::AbstractRNG, s::Sampleable{Multivariate}, A::DenseMatrix) - size(A,1) == length(s) || - throw(DimensionMismatch("Output size inconsistent with sample length.")) - _rand!(rng, s, A) +@inline function Random.rand!(rng::AbstractRNG, s::Spl, x::AbstractVector{<:Real}) + # `@inline` + `@boundscheck` allows users to skip bound checks by calling `@inbounds rand!(...)` + # Ref https://docs.julialang.org/en/v1/devdocs/boundscheck/#Eliding-bounds-checks + @boundscheck # ... check size (and possibly indices) of `x` + # ... generate a single vector sample from s in x end - -rand(rng::AbstractRNG, s::Sampleable{Multivariate,S}) where {S<:ValueSupport} = - _rand!(rng, s, Vector{eltype(S)}(length(s))) - -rand(rng::AbstractRNG, s::Sampleable{Multivariate,S}, n::Int) where {S<:ValueSupport} = - _rand!(rng, s, Matrix{eltype(S)}(length(s), n)) ``` If there is a more efficient method to generate multiple vector samples in a batch, one should provide the following method ```julia -function _rand!(rng::AbstractRNG, s::Spl, A::DenseMatrix{T}) where T<:Real +@inline function Random.rand!(rng::AbstractRNG, s::Spl, A::AbstractMatrix{<:Real}) + # `@inline` + `@boundscheck` allows users to skip bound checks by calling `@inbounds rand!(...)` + # Ref https://docs.julialang.org/en/v1/devdocs/boundscheck/#Eliding-bounds-checks + @boundscheck # ... check size (and possibly indices) of `x` # ... generate multiple vector samples in batch end ``` @@ -80,17 +65,22 @@ Remember that each *column* of A is a sample. ### Matrix-variate Sampler -To implement a multivariate sampler, one can define a subtype of `Sampleable{Multivariate,S}`, and provide both `size` and `_rand!` methods, as +To implement a multivariate sampler, one can define a subtype of `Sampleable{Multivariate,S}`, and provide `size`, `rand`, and `rand!` methods, as ```julia Base.size(s::Spl) = ... # the size of each matrix sample -function _rand!(rng::AbstractRNG, s::Spl, x::DenseMatrix{T}) where T<:Real - # ... generate a single matrix sample to x +function Base.rand(rng::AbstractRNG, s::Spl) + # ... generate a single matrix sample from s end -``` -Note that you can assume `x` has correct dimensions in `_rand!` and don't have to perform dimension checking, the generic `rand` and `rand!` will do dimension checking and array allocation for you. +@inline function Random.rand!(rng::AbstractRNG, s::Spl, x::AbstractMatrix{<:Real}) + # `@inline` + `@boundscheck` allows users to skip bound checks by calling `@inbounds rand!(...)` + # Ref https://docs.julialang.org/en/v1/devdocs/boundscheck/#Eliding-bounds-checks + @boundscheck # ... check size (and possibly indices) of `x` + # ... generate a single matrix sample from s in x +end +``` ## Create a Distribution @@ -106,7 +96,7 @@ A univariate distribution type should be defined as a subtype of `DiscreteUnivar The following methods need to be implemented for each univariate distribution type: -- [`rand(::AbstractRNG, d::UnivariateDistribution)`](@ref) +- [`Base.rand(::AbstractRNG, d::UnivariateDistribution)`](@ref) - [`sampler(d::Distribution)`](@ref) - [`logpdf(d::UnivariateDistribution, x::Real)`](@ref) - [`cdf(d::UnivariateDistribution, x::Real)`](@ref) @@ -138,8 +128,8 @@ The following methods need to be implemented for each multivariate distribution - [`length(d::MultivariateDistribution)`](@ref) - [`sampler(d::Distribution)`](@ref) -- [`eltype(d::Distribution)`](@ref) -- [`Distributions._rand!(::AbstractRNG, d::MultivariateDistribution, x::AbstractArray)`](@ref) +- [`Base.rand(::AbstractRNG, d::MultivariateDistribution)`](@ref) +- [`Random.rand!(::AbstractRNG, d::MultivariateDistribution, x::AbstractVector{<:Real})`](@ref) - [`Distributions._logpdf(d::MultivariateDistribution, x::AbstractArray)`](@ref) Note that if there exist faster methods for batch evaluation, one should override `_logpdf!` and `_pdf!`. @@ -161,6 +151,7 @@ A matrix-variate distribution type should be defined as a subtype of `DiscreteMa The following methods need to be implemented for each matrix-variate distribution type: - [`size(d::MatrixDistribution)`](@ref) -- [`Distributions._rand!(rng::AbstractRNG, d::MatrixDistribution, A::AbstractMatrix)`](@ref) +- [`Base.rand(rng::AbstractRNG, d::MatrixDistribution)`](@ref) +- [`Random.rand!(rng::AbstractRNG, d::MatrixDistribution, A::AbstractMatrix{<:Real})`](@ref) - [`sampler(d::MatrixDistribution)`](@ref) - [`Distributions._logpdf(d::MatrixDistribution, x::AbstractArray)`](@ref) diff --git a/docs/src/matrix.md b/docs/src/matrix.md index 6d352f19a7..6c80d82c27 100644 --- a/docs/src/matrix.md +++ b/docs/src/matrix.md @@ -25,7 +25,8 @@ var(::MatrixDistribution) cov(::MatrixDistribution) pdf(d::MatrixDistribution, x::AbstractMatrix{<:Real}) logpdf(d::MatrixDistribution, x::AbstractMatrix{<:Real}) -Distributions._rand!(::AbstractRNG, ::MatrixDistribution, A::AbstractMatrix) +Base.rand(::AbstractRNG, ::MatrixDistribution) +Random.rand!(::AbstractRNG, ::MatrixDistribution, A::AbstractMatrix{<:Real}) ``` ## Distributions diff --git a/docs/src/multivariate.md b/docs/src/multivariate.md index 35a0c947b3..f3132d1994 100644 --- a/docs/src/multivariate.md +++ b/docs/src/multivariate.md @@ -18,7 +18,6 @@ The methods listed below are implemented for each multivariate distribution, whi ```@docs length(::MultivariateDistribution) size(::MultivariateDistribution) -eltype(::Type{MultivariateDistribution}) mean(::MultivariateDistribution) var(::MultivariateDistribution) std(::MultivariateDistribution) @@ -42,8 +41,8 @@ loglikelihood(::MultivariateDistribution, ::AbstractVector{<:Real}) ### Sampling ```@docs -rand(rng::AbstractRNG, ::MultivariateDistribution) -rand!(rng::AbstractRNG, d::MultivariateDistribution, x::AbstractArray) +Base.rand(rng::AbstractRNG, ::MultivariateDistribution) +Random.rand!(rng::AbstractRNG, d::MultivariateDistribution, x::AbstractVector{<:Real}) ``` **Note:** In addition to these common methods, each multivariate distribution has its special methods, as introduced below. diff --git a/docs/src/types.md b/docs/src/types.md index f0ed23836a..eb77d9c472 100644 --- a/docs/src/types.md +++ b/docs/src/types.md @@ -57,7 +57,6 @@ The basic functionalities that a sampleable object provides are to *retrieve inf length(::Sampleable) size(::Sampleable) nsamples(::Type{Sampleable}, ::Any) -eltype(::Type{Sampleable}) rand(::AbstractRNG, ::Sampleable) rand!(::AbstractRNG, ::Sampleable, ::AbstractArray) ``` diff --git a/src/censored.jl b/src/censored.jl index 5b8cfac5ee..4c14514451 100644 --- a/src/censored.jl +++ b/src/censored.jl @@ -112,8 +112,6 @@ function partype(d::Censored{<:UnivariateDistribution,<:ValueSupport,T}) where { return promote_type(partype(d.uncensored), T) end -Base.eltype(::Type{<:Censored{D,S,T}}) where {D,S,T} = promote_type(T, eltype(D)) - #### Range and Support isupperbounded(d::LeftCensored) = isupperbounded(d.uncensored) diff --git a/src/cholesky/lkjcholesky.jl b/src/cholesky/lkjcholesky.jl index 3455bf8bb1..9503e8890e 100644 --- a/src/cholesky/lkjcholesky.jl +++ b/src/cholesky/lkjcholesky.jl @@ -7,7 +7,7 @@ matrices (positive-definite matrices with ones on the diagonal). Variates or samples of the distribution are `LinearAlgebra.Cholesky` objects, as might be returned by `F = LinearAlgebra.cholesky(R)`, so that `Matrix(F) ≈ R` is a variate or -sample of [`LKJ`](@ref). +sample of [`LKJ`](@ref). Sampling `LKJCholesky` is faster than sampling `LKJ`, and often having the correlation matrix in factorized form makes subsequent computations cheaper as well. @@ -48,8 +48,8 @@ function LKJCholesky(d::Int, η::Real, _uplo::Union{Char,Symbol} = 'L'; check_ar ) logc0 = lkj_logc0(d, η) uplo = _char_uplo(_uplo) - T = Base.promote_eltype(η, logc0) - return LKJCholesky(d, T(η), uplo, T(logc0)) + _η, _logc0 = promote(η, logc0) + return LKJCholesky(d, _η, uplo, _logc0) end # adapted from LinearAlgebra.char_uplo @@ -82,8 +82,6 @@ end # Properties # ----------------------------------------------------------------------------- -Base.eltype(::Type{LKJCholesky{T}}) where {T} = T - function Base.size(d::LKJCholesky) p = d.d return (p, p) @@ -134,7 +132,7 @@ function logkernel(d::LKJCholesky, R::LinearAlgebra.Cholesky) p == 1 && return c * log(first(factors)) # assuming D = diag(factors) with length(D) = p, # logp = sum(i -> (c - i) * log(D[i]), 2:p) - logp = sum(Iterators.drop(enumerate(diagind(factors)), 1)) do (i, di) + logp = sum(Iterators.drop(enumerate(diagind(factors)), 1)) do (i, di) return (c - i) * log(factors[di]) end return logp @@ -159,95 +157,114 @@ end # ----------------------------------------------------------------------------- function Base.rand(rng::AbstractRNG, d::LKJCholesky) - factors = Matrix{eltype(d)}(undef, size(d)) - R = LinearAlgebra.Cholesky(factors, d.uplo, 0) - return _lkj_cholesky_onion_sampler!(rng, d, R) -end -function Base.rand(rng::AbstractRNG, d::LKJCholesky, dims::Dims) p = d.d + η = d.η uplo = d.uplo - T = eltype(d) - TM = Matrix{T} - Rs = Array{LinearAlgebra.Cholesky{T,TM}}(undef, dims) - for i in eachindex(Rs) - factors = TM(undef, p, p) - Rs[i] = R = LinearAlgebra.Cholesky(factors, uplo, 0) - _lkj_cholesky_onion_sampler!(rng, d, R) + factors = Matrix{float(partype(d))}(undef, p, p) + _lkj_cholesky_onion_tri!(rng, factors, p, η, uplo) + return LinearAlgebra.Cholesky(factors, uplo, 0) +end +function rand(rng::AbstractRNG, d::LKJCholesky, dims::Dims) + let rng=rng, d=d + map(_ -> rand(rng, d), CartesianIndices(dims)) end - return Rs end -Random.rand!(d::LKJCholesky, R::LinearAlgebra.Cholesky) = Random.rand!(default_rng(), d, R) -function Random.rand!(rng::AbstractRNG, d::LKJCholesky, R::LinearAlgebra.Cholesky) - return _lkj_cholesky_onion_sampler!(rng, d, R) +Base.@propagate_inbounds function rand!(d::LKJCholesky, R::LinearAlgebra.Cholesky{<:Real}) + return rand!(default_rng(), d, R) +end +@inline function rand!(rng::AbstractRNG, d::LKJCholesky, R::LinearAlgebra.Cholesky{<:Real}) + @boundscheck size(R.factors) == size(d) + _lkj_cholesky_onion_tri!(rng, R.factors, d.d, d.η, R.uplo) + return R end -function Random.rand!( +function rand!( rng::AbstractRNG, d::LKJCholesky, Rs::AbstractArray{<:LinearAlgebra.Cholesky{T,TM}}, allocate::Bool, -) where {T,TM} +) where {T<:Real,TM} p = d.d - uplo = d.uplo + η = d.η if allocate + uplo = d.uplo for i in eachindex(Rs) - Rs[i] = _lkj_cholesky_onion_sampler!( - rng, - d, - LinearAlgebra.Cholesky(TM(undef, p, p), uplo, 0), + Rs[i] = LinearAlgebra.Cholesky( + _lkj_cholesky_onion_tri!( + rng, + TM(undef, p, p), + p, + η, + uplo, + ), + uplo, + 0, ) end else - for i in eachindex(Rs) - _lkj_cholesky_onion_sampler!(rng, d, Rs[i]) + for R in Rs + _lkj_cholesky_onion_tri!(rng, R.factors, p, η, R.uplo) end end return Rs end -function Random.rand!( +function rand!( rng::AbstractRNG, d::LKJCholesky, Rs::AbstractArray{<:LinearAlgebra.Cholesky{<:Real}}, ) allocate = any(!isassigned(Rs, i) for i in eachindex(Rs)) || any(R -> size(R, 1) != d.d, Rs) - return Random.rand!(rng, d, Rs, allocate) + return rand!(rng, d, Rs, allocate) end # # onion method # -function _lkj_cholesky_onion_sampler!( - rng::AbstractRNG, - d::LKJCholesky, - R::LinearAlgebra.Cholesky, -) - if R.uplo === 'U' - _lkj_cholesky_onion_tri!(rng, R.factors, d.d, d.η, Val(:U)) - else - _lkj_cholesky_onion_tri!(rng, R.factors, d.d, d.η, Val(:L)) - end - return R -end - function _lkj_cholesky_onion_tri!( rng::AbstractRNG, - A::AbstractMatrix, + A::AbstractMatrix{<:Real}, d::Int, η::Real, - ::Val{uplo}, -) where {uplo} + uplo::Char, +) + # Currently requires one-based indexing + # TODO: Generalize to more general indices + Base.require_one_based_indexing(A) + @assert size(A) == (d, d) + + # Special case: Distribution collapses to a Dirac distribution at the identity matrix + # We handle this separately, to increase performance and since `rand(Beta(Inf, Inf))` is not well defined. + if η == Inf + if uplo === 'L' + for j in 1:d + A[j, j] = 1 + for i in (j+1):d + A[i, j] = 0 + end + end + else + for j in 1:d + for i in 1:(j - 1) + A[i, j] = 0 + end + A[j, j] = 1 + end + end + return A + end + # Section 3.2 in LKJ (2009 JMA) # reformulated to incrementally construct Cholesky factor as mentioned in Section 5 # equivalent steps in algorithm in reference are marked. - @assert size(A) == (d, d) + A[1, 1] = 1 d > 1 || return A β = η + (d - 2)//2 # 1. Initialization w0 = 2 * rand(rng, Beta(β, β)) - 1 - @inbounds if uplo === :L + @inbounds if uplo === 'L' A[2, 1] = w0 else A[1, 2] = w0 @@ -261,7 +278,7 @@ function _lkj_cholesky_onion_tri!( y = rand(rng, Beta(k//2, β)) # (c)-(e) # w is directionally uniform vector of length √y - @inbounds w = @views uplo === :L ? A[k + 1, 1:k] : A[1:k, k + 1] + @inbounds w = @views uplo === 'L' ? A[k + 1, 1:k] : A[1:k, k + 1] Random.randn!(rng, w) rmul!(w, sqrt(y) / norm(w)) # normalize so new row/column has unit norm diff --git a/src/common.jl b/src/common.jl index 899cca41d2..4a6d7f6936 100644 --- a/src/common.jl +++ b/src/common.jl @@ -100,14 +100,34 @@ Base.size(s::Sampleable{Univariate}) = () Base.size(s::Sampleable{Multivariate}) = (length(s),) """ - eltype(::Type{Sampleable}) - -The default element type of a sample. This is the type of elements of the samples generated -by the `rand` method. However, one can provide an array of different element types to -store the samples using `rand!`. -""" -Base.eltype(::Type{<:Sampleable{F,Discrete}}) where {F} = Int -Base.eltype(::Type{<:Sampleable{F,Continuous}}) where {F} = Float64 + Base.eltype(::Type{S}) where {S<:Distributions.Sampleable} + +The default element type of a sample from a sampler of type `S`. + +This is the type of elements of the samples generated by the `rand` method. +However, one can provide an array of different element types to store the samples using `rand!`. + +!!! warn + This method is deprecated and will be removed in an upcoming breaking release. +""" +function Base.eltype(::Type{S}) where {VF<:VariateForm,VS<:Union{Discrete,Continuous},S<:Sampleable{VF,VS}} + Base.depwarn("`eltype(::Type{<:Distributions.Sampleable})` is deprecated and will be removed", :eltype) + T = Base.promote_op(rand, S) + if T === Union{} + # `T` can be `Union{}` if + # - `rand(::S)` is not defined and/or + # - `rand(::S)` calls `eltype(::S)` or `eltype(::Type{S})` but they are not specialized and fall back to the generic definition here + # (the latter case happens e.g. for non-univariate samplers that only implement `_rand!` and rely on the generic fallback for `rand`) + # In all these cases we return 1) `Int` for `Discrete` samplers and 2) `Float64` for `Continuous` samplers + if VS === Discrete + return Int + elseif VS === Continuous + return Float64 + end + else + return eltype(T) + end +end """ nsamples(s::Sampleable) diff --git a/src/genericrand.jl b/src/genericrand.jl index 6b3b213f16..9f737a3be4 100644 --- a/src/genericrand.jl +++ b/src/genericrand.jl @@ -26,38 +26,28 @@ rand(rng::AbstractRNG, s::Sampleable, dim1::Int, moredims::Int...) = # default fallback (redefined for univariate distributions) function rand(rng::AbstractRNG, s::Sampleable{<:ArrayLikeVariate}) + Base.depwarn("Please implement `rand(rng::AbstractRNG, s::$(typeof(s)))`. The default fallback will be removed", :rand) return @inbounds rand!(rng, s, Array{eltype(s)}(undef, size(s))) end # multiple samples -function rand(rng::AbstractRNG, s::Sampleable{Univariate}, dims::Dims) - out = Array{eltype(s)}(undef, dims) - return @inbounds rand!(rng, sampler(s), out) +# we use function barriers since for some distributions `sampler(s)` is not type-stable: +# https://github.com/JuliaStats/Distributions.jl/pull/1281 +function rand(rng::AbstractRNG, s::Sampleable{<:ArrayLikeVariate}, dims::Dims) + return _rand(rng, sampler(s), dims) end -function rand( - rng::AbstractRNG, s::Sampleable{<:ArrayLikeVariate}, dims::Dims, -) - sz = size(s) - ax = map(Base.OneTo, dims) - out = [Array{eltype(s)}(undef, sz) for _ in Iterators.product(ax...)] - return @inbounds rand!(rng, sampler(s), out, false) +function _rand(rng::AbstractRNG, s::Sampleable{<:ArrayLikeVariate}, dims::Dims) + r = rand(rng, s) + out = Array{typeof(r)}(undef, dims) + out[1] = r + rand!(rng, s, @view(out[2:end])) + return out end -# these are workarounds for sampleables that incorrectly base `eltype` on the parameters +# this is a workaround for sampleables that incorrectly base `eltype` on the parameters function rand(rng::AbstractRNG, s::Sampleable{<:ArrayLikeVariate,Continuous}) - return @inbounds rand!(rng, sampler(s), Array{float(eltype(s))}(undef, size(s))) -end -function rand(rng::AbstractRNG, s::Sampleable{Univariate,Continuous}, dims::Dims) - out = Array{float(eltype(s))}(undef, dims) - return @inbounds rand!(rng, sampler(s), out) -end -function rand( - rng::AbstractRNG, s::Sampleable{<:ArrayLikeVariate,Continuous}, dims::Dims, -) - sz = size(s) - ax = map(Base.OneTo, dims) - out = [Array{float(eltype(s))}(undef, sz) for _ in Iterators.product(ax...)] - return @inbounds rand!(rng, sampler(s), out, false) + Base.depwarn("Please implement `rand(rng::AbstractRNG, s::$(typeof(s))`. The default fallback will be removed", :rand) + return @inbounds rand!(rng, s, Array{float(eltype(s))}(undef, size(s))) end """ @@ -75,9 +65,6 @@ form as specified above. The rules are summarized as below: """ function rand! end Base.@propagate_inbounds rand!(s::Sampleable, X::AbstractArray) = rand!(default_rng(), s, X) -Base.@propagate_inbounds function rand!(rng::AbstractRNG, s::Sampleable, X::AbstractArray) - return _rand!(rng, s, X) -end # default definitions for arraylike variates @inline function rand!( @@ -85,6 +72,7 @@ end s::Sampleable{ArrayLikeVariate{N}}, x::AbstractArray{<:Real,N}, ) where {N} + Base.depwarn("Please implement `Random.rand!(rng::Random.AbstractRNG, s::$(typeof(s)), x::AbstractArray{<:Real,$N})`, the default fallback will be removed.", :rand!) @boundscheck begin size(x) == size(s) || throw(DimensionMismatch("inconsistent array dimensions")) end @@ -105,7 +93,8 @@ end throw(DimensionMismatch("inconsistent array dimensions")) end # the function barrier fixes performance issues if `sampler(s)` is type unstable - return _rand!(rng, sampler(s), x) + _rand!(rng, sampler(s), x) + return x end function _rand!( diff --git a/src/matrix/inversewishart.jl b/src/matrix/inversewishart.jl index 4c605caf25..03ff74bcf1 100644 --- a/src/matrix/inversewishart.jl +++ b/src/matrix/inversewishart.jl @@ -127,24 +127,14 @@ end # Sampling # ----------------------------------------------------------------------------- -_rand!(rng::AbstractRNG, d::InverseWishart, A::AbstractMatrix) = - (A .= inv(cholesky!(_rand!(rng, Wishart(d.df, inv(d.Ψ)), A)))) - -# ----------------------------------------------------------------------------- -# Test utils -# ----------------------------------------------------------------------------- - -function _univariate(d::InverseWishart) - check_univariate(d) - ν, Ψ = params(d) - α = ν / 2 - β = Matrix(Ψ)[1] / 2 - return InverseGamma(α, β) +function rand(rng::AbstractRNG, d::InverseWishart) + A = Matrix{float(partype(d))}(undef, size(d)) + @inbounds rand!(rng, d, A) + return A end -function _rand_params(::Type{InverseWishart}, elty, n::Int, p::Int) - n == p || throw(ArgumentError("dims must be equal for InverseWishart")) - ν = elty( n + 3 + abs(10randn()) ) - Ψ = (X = 2rand(elty, n, n) .- 1 ; X * X') - return ν, Ψ +@inline function rand!(rng::AbstractRNG, d::InverseWishart, A::AbstractMatrix{<:Real}) + @boundscheck size(d) == size(A) + @inbounds rand!(rng, Wishart(d.df, inv(d.Ψ)), A) + A .= inv(cholesky!(Symmetric(A))) end diff --git a/src/matrix/lkj.jl b/src/matrix/lkj.jl index e56b7888e0..233711f151 100644 --- a/src/matrix/lkj.jl +++ b/src/matrix/lkj.jl @@ -39,8 +39,8 @@ function LKJ(d::Integer, η::Real; check_args::Bool=true) (η, η > 0, "shape parameter must be positive."), ) logc0 = lkj_logc0(d, η) - T = Base.promote_eltype(η, logc0) - LKJ{T, typeof(d)}(d, T(η), T(logc0)) + _η, _logc0 = promote(η, logc0) + return LKJ(d, _η, _logc0) end # ----------------------------------------------------------------------------- @@ -100,13 +100,13 @@ params(d::LKJ) = (d.d, d.η) # ----------------------------------------------------------------------------- function lkj_logc0(d::Integer, η::Real) - T = float(Base.promote_typeof(d, η)) + η = float(first(promote(η, d))) d > 1 || return zero(η) if isone(η) if iseven(d) - logc0 = -lkj_onion_loginvconst_uniform_even(d, T) + logc0 = -lkj_onion_loginvconst_uniform_even(d, η) else - logc0 = -lkj_onion_loginvconst_uniform_odd(d, T) + logc0 = -lkj_onion_loginvconst_uniform_odd(d, η) end else logc0 = -lkj_onion_loginvconst(d, η) @@ -120,14 +120,39 @@ logkernel(d::LKJ, R::AbstractMatrix) = (d.η - 1) * logdet(R) # Sampling # ----------------------------------------------------------------------------- -function _rand!(rng::AbstractRNG, d::LKJ, R::AbstractMatrix) - R .= _lkj_onion_sampler(d.d, d.η, rng) +function rand(rng::AbstractRNG, d::LKJ) + R = Matrix{float(partype(d))}(undef, size(d)) + @inbounds rand!(rng, d, R) + return R end -function _lkj_onion_sampler(d::Integer, η::Real, rng::AbstractRNG = Random.default_rng()) +@inbounds function rand!(rng::AbstractRNG, d::LKJ, R::AbstractMatrix{<:Real}) + @boundscheck size(R) == size(d) + + # Currently requires one-based indexing + # TODO: Generalize to more general indices + Base.require_one_based_indexing(R) + + # Special case: Distribution collapses to a Dirac distribution at the identity matrix + # We handle this separately, to increase performance and since `rand(Beta(Inf, Inf))` is not well defined. + η = d.η + d = d.d + if η == Inf + for j in 1:d + for i in 1:(j - 1) + R[i, j] = 0 + end + R[j, j] = 1 + for i in (j + 1):d + R[i, j] = 0 + end + end + return R + end + # Section 3.2 in LKJ (2009 JMA) # 1. Initialization - R = ones(typeof(η), d, d) + fill!(R, oneunit(float(η))) d > 1 || return R β = η + 0.5d - 1 u = rand(rng, Beta(β, β)) @@ -159,25 +184,9 @@ end # ----------------------------------------------------------------------------- function _marginal(lkj::LKJ) - d = lkj.d - η = lkj.η - α = η + 0.5d - 1 - AffineDistribution(-1, 2, Beta(α, α)) -end - -# ----------------------------------------------------------------------------- -# Test utils -# ----------------------------------------------------------------------------- - -function _univariate(d::LKJ) - check_univariate(d) - return DiscreteNonParametric([one(d.η)], [one(d.η)]) -end - -function _rand_params(::Type{LKJ}, elty, n::Int, p::Int) - n == p || throw(ArgumentError("dims must be equal for LKJ")) - η = abs(3randn(elty)) - return n, η + η, d = promote(lkj.η, lkj.d) + α = η + d/2 - 1 + return 2 * Beta(α, α) - 1 end # ----------------------------------------------------------------------------- @@ -187,78 +196,49 @@ end # for 1 / c₀, even if they say that it is not. # ----------------------------------------------------------------------------- +# Equation (17) in LKJ (2009 JMA) function lkj_onion_loginvconst(d::Integer, η::Real) - # Equation (17) in LKJ (2009 JMA) - T = float(Base.promote_typeof(d, η)) - h = T(1//2) - α = η + h * d - 1 - loginvconst = (2*η + d - 3)*T(logtwo) + (T(logπ) / 4) * (d * (d - 1) - 2) + logbeta(α, α) - (d - 2) * loggamma(η + h * (d - 1)) + η = float(first(promote(η, d))) + α = η + oftype(η, d) / 2 - 1 + loginvconst = (2 * η + d - 3) * oftype(η, logtwo) + (d * (d - 1) - 2) * (oftype(η, logπ) / 4) + logbeta(α, α) + z = loggamma(η + oftype(η, d - 1) / 2) for k in 2:(d - 1) - loginvconst += loggamma(η + h * (d - 1 - k)) + loginvconst += loggamma(η + oftype(η, d - 1 - k) / 2) - z end return loginvconst end -function lkj_onion_loginvconst_uniform_odd(d::Integer, ::Type{T}) where {T <: Real} - # Theorem 5 in LKJ (2009 JMA) - h = T(1//2) - loginvconst = (d - 1) * ((d + 1) * (T(logπ) / 4) - (d - 1) * (T(logtwo) / 4) - loggamma(h * (d + 1))) +# Theorem 5 in LKJ (2009 JMA) +function lkj_onion_loginvconst_uniform_odd(d::Integer, η::Real) + @assert isodd(d) && isone(η) + η = float(first(promote(η, d))) + loginvconst = (d - 1) * ((d + 1) * (oftype(η, logπ) / 4) - (d - 1) * (oftype(η, logtwo) / 4) - loggamma(oftype(η, (d + 1) ÷ 2))) for k in 2:2:(d - 1) - loginvconst += loggamma(T(k)) + loginvconst += loggamma(oftype(η, k)) end return loginvconst end -function lkj_onion_loginvconst_uniform_even(d::Integer, ::Type{T}) where {T <: Real} - # Theorem 5 in LKJ (2009 JMA) - h = T(1//2) - loginvconst = d * ((d - 2) * (T(logπ) / 4) + (3 * d - 4) * (T(logtwo) / 4) + loggamma(h * d)) - (d - 1) * loggamma(T(d)) +# Theorem 5 in LKJ (2009 JMA) +function lkj_onion_loginvconst_uniform_even(d::Integer, η::Real) + @assert iseven(d) && isone(η) + η = float(first(promote(η, d))) + loginvconst = d * ((d - 2) * (oftype(η, logπ) / 4) + (3 * d - 4) * (oftype(η, logtwo) / 4) + loggamma(oftype(η, d ÷ 2))) - (d - 1) * loggamma(oftype(η, d)) for k in 2:2:(d - 2) - loginvconst += loggamma(k) + loginvconst += loggamma(oftype(η, k)) end return loginvconst end function lkj_vine_loginvconst(d::Integer, η::Real) # Equation (16) in LKJ (2009 JMA) - expsum = zero(η) - betasum = zero(η) - for k in 1:d - 1 - α = η + 0.5(d - k - 1) - expsum += (2η - 2 + d - k) * (d - k) + η = float(first(promote(η, d))) + expsum = betasum = zero(η) + for k in 1:(d - 1) + α = η + oftype(η, d - k - 1) / 2 + expsum += (2 * η - 2 + d - k) * (d - k) betasum += (d - k) * logbeta(α, α) end loginvconst = expsum * logtwo + betasum return loginvconst end - -function lkj_vine_loginvconst_uniform(d::Integer) - # Equation after (16) in LKJ (2009 JMA) - expsum = 0.0 - betasum = 0.0 - for k in 1:d - 1 - α = (k + 1) / 2 - expsum += k ^ 2 - betasum += k * logbeta(α, α) - end - loginvconst = expsum * logtwo + betasum - return loginvconst -end - -function lkj_loginvconst_alt(d::Integer, η::Real) - # Third line in first proof of Section 3.3 in LKJ (2009 JMA) - loginvconst = zero(η) - for k in 1:d - 1 - loginvconst += 0.5k*logπ + loggamma(η + 0.5(d - 1 - k)) - loggamma(η + 0.5(d - 1)) - end - return loginvconst -end - -function corr_logvolume(n::Integer) - # https://doi.org/10.4169/amer.math.monthly.123.9.909 - logvol = 0.0 - for k in 1:n - 1 - logvol += 0.5k*logπ + k*loggamma((k+1)/2) - k*loggamma((k+2)/2) - end - return logvol -end diff --git a/src/matrix/matrixbeta.jl b/src/matrix/matrixbeta.jl index 588c818909..9b93a57d69 100644 --- a/src/matrix/matrixbeta.jl +++ b/src/matrix/matrixbeta.jl @@ -119,7 +119,14 @@ end # Mitra (1970 Sankhyā) -function _rand!(rng::AbstractRNG, d::MatrixBeta, A::AbstractMatrix) +function rand(rng::AbstractRNG, d::MatrixBeta) + A = Matrix{float(partype(d))}(undef, size(d)) + @inbounds rand!(rng, d, A) + return A +end + +@inline function rand!(rng::AbstractRNG, d::MatrixBeta, A::AbstractMatrix{<:Real}) + @boundscheck size(A) == size(d) S1 = PDMat( rand(rng, d.W1) ) S2 = PDMat( rand(rng, d.W2) ) S = S1 + S2 @@ -127,19 +134,3 @@ function _rand!(rng::AbstractRNG, d::MatrixBeta, A::AbstractMatrix) A .= X_A_Xt(S1, invL) end -# ----------------------------------------------------------------------------- -# Test utils -# ----------------------------------------------------------------------------- - -function _univariate(d::MatrixBeta) - check_univariate(d) - p, n1, n2 = params(d) - return Beta(n1 / 2, n2 / 2) -end - -function _rand_params(::Type{MatrixBeta}, elty, n::Int, p::Int) - n == p || throw(ArgumentError("dims must be equal for MatrixBeta")) - n1 = elty( n + 1 + abs(10randn()) ) - n2 = elty( n + 1 + abs(10randn()) ) - return n, n1, n2 -end diff --git a/src/matrix/matrixfdist.jl b/src/matrix/matrixfdist.jl index 5a7e18efb7..2348a8e2ad 100644 --- a/src/matrix/matrixfdist.jl +++ b/src/matrix/matrixfdist.jl @@ -136,9 +136,18 @@ end # Sampling # ----------------------------------------------------------------------------- -function _rand!(rng::AbstractRNG, d::MatrixFDist, A::AbstractMatrix) - Ψ = rand(rng, d.W) - A .= rand(rng, InverseWishart(d.n2, Ψ) ) +function rand(rng::AbstractRNG, d::MatrixFDist) + A = Matrix{float(partype(d))}(undef, size(d)) + @inbounds rand!(rng, d, A) + return A +end + +@inline function rand!(rng::AbstractRNG, d::MatrixFDist, A::AbstractMatrix{<:Real}) + @boundscheck size(A) == size(d) + X = rand(rng, d.W) + Ψ = PDMat(Symmetric(X)) + @inbounds rand!(rng, InverseWishart(d.n2, Ψ), A) + return A end # ----------------------------------------------------------------------------- @@ -146,23 +155,3 @@ end # ----------------------------------------------------------------------------- inv(d::MatrixFDist) = ( (n1, n2, B) = params(d); MatrixFDist(n2, n1, inv(B)) ) - -# ----------------------------------------------------------------------------- -# Test utils -# ----------------------------------------------------------------------------- - -function _univariate(d::MatrixFDist) - check_univariate(d) - n1, n2, B = params(d) - μ = zero(partype(d)) - σ = (n1 / n2) * Matrix(B)[1] - return AffineDistribution(μ, σ, FDist(n1, n2)) -end - -function _rand_params(::Type{MatrixFDist}, elty, n::Int, p::Int) - n == p || throw(ArgumentError("dims must be equal for MatrixFDist")) - n1 = elty( n + 1 + abs(10randn()) ) - n2 = elty( n + 3 + abs(10randn()) ) - B = (X = 2rand(elty, n, n) .- 1; X * X') - return n1, n2, B -end diff --git a/src/matrix/matrixnormal.jl b/src/matrix/matrixnormal.jl index 9d480bd69d..f2d091836e 100644 --- a/src/matrix/matrixnormal.jl +++ b/src/matrix/matrixnormal.jl @@ -104,10 +104,12 @@ params(d::MatrixNormal) = (d.M, d.U, d.V) # Evaluation # ----------------------------------------------------------------------------- -function matrixnormal_logc0(U::AbstractPDMat, V::AbstractPDMat) +function matrixnormal_logc0(U::AbstractPDMat{T}, V::AbstractPDMat{T}) where {T<:Real} n = size(U, 1) p = size(V, 1) - -(n * p / 2) * (logtwo + logπ) - (n / 2) * logdet(V) - (p / 2) * logdet(U) + term1 = n * logdet(V) + p * logdet(U) + term2 = n * p * (oftype(term1, logtwo) + oftype(term1, logπ)) + return - (term1 + term2) / 2 end function logkernel(d::MatrixNormal, X::AbstractMatrix) @@ -121,35 +123,17 @@ end # https://en.wikipedia.org/wiki/Matrix_normal_distribution#Drawing_values_from_the_distribution -function _rand!(rng::AbstractRNG, d::MatrixNormal, Y::AbstractMatrix) +function rand(rng::AbstractRNG, d::MatrixNormal) + Y = Matrix{float(partype(d))}(undef, size(d)) + @inbounds rand!(rng, d, Y) + return Y +end + +@inline function rand!(rng::AbstractRNG, d::MatrixNormal, Y::AbstractMatrix{<:Real}) + @boundscheck size(Y) == size(d) n, p = size(d) X = randn(rng, n, p) A = cholesky(d.U).L B = cholesky(d.V).U Y .= d.M .+ A * X * B end - -# ----------------------------------------------------------------------------- -# Test utils -# ----------------------------------------------------------------------------- - -function _univariate(d::MatrixNormal) - check_univariate(d) - M, U, V = params(d) - μ = M[1] - σ = sqrt( Matrix(U)[1] * Matrix(V)[1] ) - return Normal(μ, σ) -end - -function _multivariate(d::MatrixNormal) - n, p = size(d) - all([n, p] .> 1) && throw(ArgumentError("Row or col dim of `MatrixNormal` must be 1 to coerce to `MvNormal`")) - return vec(d) -end - -function _rand_params(::Type{MatrixNormal}, elty, n::Int, p::Int) - M = randn(elty, n, p) - U = (X = 2 .* rand(elty, n, n) .- 1; X * X') - V = (Y = 2 .* rand(elty, p, p) .- 1; Y * Y') - return M, U, V -end diff --git a/src/matrix/matrixtdist.jl b/src/matrix/matrixtdist.jl index e37119603c..4f503e68ce 100644 --- a/src/matrix/matrixtdist.jl +++ b/src/matrix/matrixtdist.jl @@ -120,22 +120,19 @@ var(d::MatrixTDist) = d.ν <= 2 ? throw(ArgumentError("var only defined for df > params(d::MatrixTDist) = (d.ν, d.M, d.Σ, d.Ω) -@inline partype(d::MatrixTDist{T}) where {T <: Real} = T +partype(::MatrixTDist{T}) where {T <: Real} = T # ----------------------------------------------------------------------------- # Evaluation # ----------------------------------------------------------------------------- -function matrixtdist_logc0(Σ::AbstractPDMat, Ω::AbstractPDMat, ν::Real) +function matrixtdist_logc0(Σ::AbstractPDMat{T}, Ω::AbstractPDMat{T}, ν::T) where {T<:Real} # returns the natural log of the normalizing constant for the pdf n = size(Σ, 1) p = size(Ω, 1) - term1 = logmvgamma(p, (ν + n + p - 1) / 2) - term2 = - (n * p / 2) * logπ - term3 = - logmvgamma(p, (ν + p - 1) / 2) - term4 = (-n / 2) * logdet(Ω) - term5 = (-p / 2) * logdet(Σ) - term1 + term2 + term3 + term4 + term5 + term1 = logmvgamma(p, (ν + n + p - 1) / 2) - logmvgamma(p, (ν + p - 1) / 2) + term2 = (n * p * oftype(term1, logπ) + n * logdet(Ω) + p * logdet(Σ)) / 2 + return term1 - term2 end function logkernel(d::MatrixTDist, X::AbstractMatrix) @@ -150,10 +147,19 @@ end # Theorem 4.2.1 in Gupta and Nagar (1999) -function _rand!(rng::AbstractRNG, d::MatrixTDist, A::AbstractMatrix) +function rand(rng::AbstractRNG, d::MatrixTDist) + n, _ = size(d) + S = Symmetric(rand(rng, InverseWishart(d.ν + n - 1, d.Σ)), :L) + A = rand(rng, MatrixNormal(d.M, S, d.Ω)) + return A +end + +@inline function rand!(rng::AbstractRNG, d::MatrixTDist, A::AbstractMatrix{<:Real}) n, p = size(d) - S = rand(rng, InverseWishart(d.ν + n - 1, d.Σ) ) - A .= rand(rng, MatrixNormal(d.M, S, d.Ω) ) + @boundscheck size(A) == (n, p) + S = Symmetric(rand(rng, InverseWishart(d.ν + n - 1, d.Σ)), :L) + @inbounds rand!(rng, MatrixNormal(d.M, S, d.Ω), A) + return A end # ----------------------------------------------------------------------------- @@ -169,25 +175,3 @@ function MvTDist(MT::MatrixTDist) ν, M, Σ, Ω = params(MT) MvTDist(ν, vec(M), (1 / ν) * kron(Σ, Ω)) end - -# ----------------------------------------------------------------------------- -# Test utils -# ----------------------------------------------------------------------------- - -function _univariate(d::MatrixTDist) - check_univariate(d) - ν, M, Σ, Ω = params(d) - μ = M[1] - σ = sqrt( Matrix(Σ)[1] * Matrix(Ω)[1] / ν ) - return AffineDistribution(μ, σ, TDist(ν)) -end - -_multivariate(d::MatrixTDist) = MvTDist(d) - -function _rand_params(::Type{MatrixTDist}, elty, n::Int, p::Int) - ν = elty( n + p + 1 + abs(10randn()) ) - M = randn(elty, n, p) - Σ = (X = 2rand(elty, n, n) .- 1; X * X') - Ω = (Y = 2rand(elty, p, p) .- 1; Y * Y') - return ν, M, Σ, Ω -end diff --git a/src/matrix/wishart.jl b/src/matrix/wishart.jl index e8da450060..d91b4f49cc 100644 --- a/src/matrix/wishart.jl +++ b/src/matrix/wishart.jl @@ -193,7 +193,14 @@ end # Sampling # ----------------------------------------------------------------------------- -function _rand!(rng::AbstractRNG, d::Wishart, A::AbstractMatrix) +function rand(rng::AbstractRNG, d::Wishart) + A = Matrix{float(partype(d))}(undef, size(d)) + @inbounds rand!(rng, d, A) + return A +end + +@inline function rand!(rng::AbstractRNG, d::Wishart, A::AbstractMatrix{<:Real}) + @boundscheck size(d) == size(A) if d.singular axes2 = axes(A, 2) r = rank(d) @@ -204,6 +211,7 @@ function _rand!(rng::AbstractRNG, d::Wishart, A::AbstractMatrix) end unwhiten!(d.S, A) A .= A * A' + return A end function _wishart_genA!(rng::AbstractRNG, A::AbstractMatrix, df::Real) @@ -223,29 +231,8 @@ function _wishart_genA!(rng::AbstractRNG, A::AbstractMatrix, df::Real) elseif i > j randn(rng, T) else - rand(rng, Chi(df - i + 1)) + rand(rng, Chi{T}(T(df - i + 1))) end end return A end - -# ----------------------------------------------------------------------------- -# Test utils -# ----------------------------------------------------------------------------- - -function _univariate(d::Wishart) - check_univariate(d) - df, S = params(d) - α = df / 2 - β = 2 * first(S) - return Gamma(α, β) -end - -function _rand_params(::Type{Wishart}, elty, n::Int, p::Int) - n == p || throw(ArgumentError("dims must be equal for Wishart")) - ν = elty(n - 1 + abs(10 * randn())) - X = rand(elty, n, n) - X .= 2 .* X .- 1 - S = X * X' - return ν, S -end diff --git a/src/mixtures/mixturemodel.jl b/src/mixtures/mixturemodel.jl index 7f36642900..3a9cb3213c 100644 --- a/src/mixtures/mixturemodel.jl +++ b/src/mixtures/mixturemodel.jl @@ -472,16 +472,20 @@ end Base.length(s::MixtureSampler) = length(first(s.csamplers)) -rand(rng::AbstractRNG, s::MixtureSampler{Univariate}) = +# mixture sampler +rand(rng::AbstractRNG, s::MixtureSampler) = rand(rng, s.csamplers[rand(rng, s.psampler)]) -rand(rng::AbstractRNG, d::MixtureModel{Univariate}) = - rand(rng, component(d, rand(rng, d.prior))) +Base.@propagate_inbounds function rand!(rng::AbstractRNG, s::MixtureSampler{ArrayLikeVariate{N}}, x::AbstractArray{<:Real,N}) where {N} + rand!(rng, s.csamplers[rand(rng, s.psampler)], x) + return x +end + -# multivariate mixture sampler for a vector -_rand!(rng::AbstractRNG, s::MixtureSampler{Multivariate}, x::AbstractVector{<:Real}) = - @inbounds rand!(rng, s.csamplers[rand(rng, s.psampler)], x) # if only a single sample is requested, no alias table is created -_rand!(rng::AbstractRNG, d::MixtureModel{Multivariate}, x::AbstractVector{<:Real}) = - @inbounds rand!(rng, component(d, rand(rng, d.prior)), x) +rand(rng::AbstractRNG, d::MixtureModel) = rand(rng, component(d, rand(rng, d.prior))) +Base.@propagate_inbounds function rand!(rng::AbstractRNG, d::MixtureModel{ArrayLikeVariate{N}}, x::AbstractArray{<:Real,N}) where {N} + rand!(rng, component(d, rand(rng, d.prior)), x) + return x +end sampler(d::MixtureModel) = MixtureSampler(d) diff --git a/src/mixtures/unigmm.jl b/src/mixtures/unigmm.jl index f1d808ec18..c648a0e121 100644 --- a/src/mixtures/unigmm.jl +++ b/src/mixtures/unigmm.jl @@ -25,10 +25,12 @@ probs(d::UnivariateGMM) = probs(d.prior) mean(d::UnivariateGMM) = dot(d.means, probs(d)) -rand(d::UnivariateGMM) = (k = rand(d.prior); d.means[k] + randn() * d.stds[k]) - -rand(rng::AbstractRNG, d::UnivariateGMM) = - (k = rand(rng, d.prior); d.means[k] + randn(rng) * d.stds[k]) +function rand(rng::AbstractRNG, d::UnivariateGMM) + k = rand(rng, d.prior) + μ = d.means[k] + σ = d.stds[k] + return muladd(randn(rng, float(Base.promote_typeof(μ, σ))), σ, μ) +end params(d::UnivariateGMM) = (d.means, d.stds, d.prior) @@ -38,6 +40,22 @@ struct UnivariateGMMSampler{VT1<:AbstractVector{<:Real},VT2<:AbstractVector{<:Re psampler::AliasTable end -rand(rng::AbstractRNG, s::UnivariateGMMSampler) = - (k = rand(rng, s.psampler); s.means[k] + randn(rng) * s.stds[k]) +function rand(rng::AbstractRNG, s::UnivariateGMMSampler) + k = rand(rng, s.psampler) + μ = d.means[k] + σ = d.stds[k] + return muladd(randn(rng, float(Base.promote_typeof(μ, σ))), σ, μ) +end +function rand!(rng::AbstractRNG, s::UnivariateGMMSampler, x::AbstractArray{<:Real}) + psampler = s.psampler + means = s.means + stds = s.stds + randn!(rng, x) + for i in eachindex(x) + k = rand(rng, psampler) + x[i] = muladd(x[i], stds[k], means[k]) + end + return x +end + sampler(d::UnivariateGMM) = UnivariateGMMSampler(d.means, d.stds, sampler(d.prior)) diff --git a/src/multivariate/dirichlet.jl b/src/multivariate/dirichlet.jl index b24980ec98..b46ab9dabd 100644 --- a/src/multivariate/dirichlet.jl +++ b/src/multivariate/dirichlet.jl @@ -50,8 +50,6 @@ end length(d::DirichletCanon) = length(d.alpha) -Base.eltype(::Type{<:Dirichlet{T}}) where {T} = T - #### Conversions convert(::Type{Dirichlet{T}}, cf::DirichletCanon) where {T<:Real} = Dirichlet(convert(AbstractVector{T}, cf.alpha)) @@ -154,18 +152,25 @@ end # sampling -function _rand!(rng::AbstractRNG, - d::Union{Dirichlet,DirichletCanon}, - x::AbstractVector{<:Real}) +function rand(rng::AbstractRNG, d::Union{Dirichlet,DirichletCanon}) + x = map(αi -> rand(rng, Gamma(αi)), d.alpha) + return lmul!(inv(sum(x)), x) +end +function rand(rng::AbstractRNG, d::Dirichlet{<:Real,<:FillArrays.AbstractFill{<:Real}}) + x = rand(rng, Gamma(FillArrays.getindex_value(d.alpha)), length(d)) + return lmul!(inv(sum(x)), x) +end + +@inline function rand!(rng::AbstractRNG, d::Union{Dirichlet,DirichletCanon}, x::AbstractVector{<:Real}) + @boundscheck length(d) == length(x) for (i, αi) in zip(eachindex(x), d.alpha) @inbounds x[i] = rand(rng, Gamma(αi)) end lmul!(inv(sum(x)), x) # this returns x end -function _rand!(rng::AbstractRNG, - d::Dirichlet{T,<:FillArrays.AbstractFill{T}}, - x::AbstractVector{<:Real}) where {T<:Real} +@inline function rand!(rng::AbstractRNG, d::Dirichlet{T,<:FillArrays.AbstractFill{T}}, x::AbstractVector{<:Real}) where {T<:Real} + @boundscheck length(d) == length(x) rand!(rng, Gamma(FillArrays.getindex_value(d.alpha)), x) lmul!(inv(sum(x)), x) # this returns x end diff --git a/src/multivariate/dirichletmultinomial.jl b/src/multivariate/dirichletmultinomial.jl index eb15990cb2..c2d60feccd 100644 --- a/src/multivariate/dirichletmultinomial.jl +++ b/src/multivariate/dirichletmultinomial.jl @@ -97,8 +97,12 @@ end # Sampling -_rand!(rng::AbstractRNG, d::DirichletMultinomial, x::AbstractVector{<:Real}) = +rand(rng::AbstractRNG, d::DirichletMultinomial) = + multinom_rand(rng, ntrials(d), rand(rng, Dirichlet(d.α))) +@inline function rand!(rng::AbstractRNG, d::DirichletMultinomial, x::AbstractVector{<:Real}) + @boundscheck length(d) == length(x) multinom_rand!(rng, ntrials(d), rand(rng, Dirichlet(d.α)), x) +end # Fit Model # Using https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2945396/pdf/nihms205488.pdf diff --git a/src/multivariate/jointorderstatistics.jl b/src/multivariate/jointorderstatistics.jl index 1fbed0d1b6..7d97f68555 100644 --- a/src/multivariate/jointorderstatistics.jl +++ b/src/multivariate/jointorderstatistics.jl @@ -88,8 +88,6 @@ maximum(d::JointOrderStatistics) = Fill(maximum(d.dist), length(d)) params(d::JointOrderStatistics) = tuple(params(d.dist)..., d.n, d.ranks) partype(d::JointOrderStatistics) = partype(d.dist) -Base.eltype(::Type{<:JointOrderStatistics{D}}) where {D} = Base.eltype(D) -Base.eltype(d::JointOrderStatistics) = eltype(d.dist) function logpdf(d::JointOrderStatistics, x::AbstractVector{<:Real}) n = d.n @@ -125,7 +123,29 @@ function _marginalize_range(dist, i, j, xᵢ, xⱼ, T) return k * T(logdiffcdf(dist, xⱼ, xᵢ)) - loggamma(T(k + 1)) end -function _rand!(rng::AbstractRNG, d::JointOrderStatistics, x::AbstractVector{<:Real}) +function rand(rng::AbstractRNG, d::JointOrderStatistics) + n = d.n + if n == length(d.ranks) # ranks == 1:n + # direct method, slower than inversion method for large `n` and distributions with + # fast quantile function or that use inversion sampling + x = rand(rng, d.dist, n) + sort!(x) + else + # use exponential generation method with inversion, where for gaps in the ranks, we + # use the fact that the sum Y of k IID variables xₘ ~ Exp(1) is Y ~ Gamma(k, 1). + # Lurie, D., and H. O. Hartley. "Machine-generation of order statistics for Monte + # Carlo computations." The American Statistician 26.1 (1972): 26-27. + # this is slow if length(d.ranks) is close to n and quantile for d.dist is expensive, + # but this branch is probably taken when length(d.ranks) is small or much smaller than n. + xi = rand(rng, d.dist) # this is only used to obtain the type of samples from `d.dist` + x = Vector{typeof(xi)}(undef, length(d.ranks)) + rand!(rng, d, x) + end + return x +end + +@inline function rand!(rng::AbstractRNG, d::JointOrderStatistics, x::AbstractVector{<:Real}) + @boundscheck length(x) == length(d) n = d.n if n == length(d.ranks) # ranks == 1:n # direct method, slower than inversion method for large `n` and distributions with diff --git a/src/multivariate/multinomial.jl b/src/multivariate/multinomial.jl index c4db44b85b..dc9126ee7a 100644 --- a/src/multivariate/multinomial.jl +++ b/src/multivariate/multinomial.jl @@ -165,7 +165,8 @@ end # Sampling # if only a single sample is requested, no alias table is created -_rand!(rng::AbstractRNG, d::Multinomial, x::AbstractVector{<:Real}) = +rand(rng::AbstractRNG, d::Multinomial) = multinom_rand(rng, ntrials(d), probs(d)) +rand!(rng::AbstractRNG, d::Multinomial, x::AbstractVector{<:Real}) = multinom_rand!(rng, ntrials(d), probs(d), x) sampler(d::Multinomial) = MultinomialSampler(ntrials(d), probs(d)) diff --git a/src/multivariate/mvlogitnormal.jl b/src/multivariate/mvlogitnormal.jl index 0d60ddf654..33595800d2 100644 --- a/src/multivariate/mvlogitnormal.jl +++ b/src/multivariate/mvlogitnormal.jl @@ -52,14 +52,12 @@ canonform(d::MvLogitNormal{<:MvNormal}) = MvLogitNormal(canonform(d.normal)) # Properties length(d::MvLogitNormal) = length(d.normal) + 1 -Base.eltype(::Type{<:MvLogitNormal{D}}) where {D} = eltype(D) -Base.eltype(d::MvLogitNormal) = eltype(d.normal) params(d::MvLogitNormal) = params(d.normal) @inline partype(d::MvLogitNormal) = partype(d.normal) location(d::MvLogitNormal) = mean(d.normal) -minimum(d::MvLogitNormal) = fill(zero(eltype(d)), length(d)) -maximum(d::MvLogitNormal) = fill(oneunit(eltype(d)), length(d)) +minimum(d::MvLogitNormal) = Fill(zero(float(partype(d))), length(d)) +maximum(d::MvLogitNormal) = Fill(oneunit(float(partype(d))), length(d)) function insupport(d::MvLogitNormal, x::AbstractVector{<:Real}) return length(d) == length(x) && all(≥(0), x) && sum(x) ≈ 1 @@ -88,12 +86,29 @@ kldivergence(p::MvLogitNormal, q::MvLogitNormal) = kldivergence(p.normal, q.norm # Sampling -function _rand!(rng::AbstractRNG, d::MvLogitNormal, x::AbstractVecOrMat{<:Real}) - y = @views _drop1(x) - rand!(rng, d.normal, y) - _softmax1!(x, y) +function rand(rng::AbstractRNG, d::MvLogitNormal) + x = rand(rng, d.normal) + push!(x, zero(eltype(x))) + StatsFuns.softmax!(x) return x end +function rand(rng::AbstractRNG, d::MvLogitNormal, n::Int) + r = rand(rng, d.normal, n) + x = vcat(r, zeros(eltype(r), 1, n)) + StatsFuns.softmax!(x; dims=1) + return x +end + +for N in (1, 2) + @eval begin + Base.@propagate_inbounds function rand!(rng::AbstractRNG, d::MvLogitNormal, x::AbstractArray{<:Real, $N}) + y = @views _drop1(x) + rand!(rng, d.normal, y) + _softmax1!(x, y) + return x + end + end +end # Fitting diff --git a/src/multivariate/mvlognormal.jl b/src/multivariate/mvlognormal.jl index 1eecd38c2f..b95e5018ce 100644 --- a/src/multivariate/mvlognormal.jl +++ b/src/multivariate/mvlognormal.jl @@ -176,8 +176,6 @@ MvLogNormal(μ::AbstractVector,s::Real) = MvLogNormal(MvNormal(μ,s)) MvLogNormal(σ::AbstractVector) = MvLogNormal(MvNormal(σ)) MvLogNormal(d::Int,s::Real) = MvLogNormal(MvNormal(d,s)) -Base.eltype(::Type{<:MvLogNormal{T}}) where {T} = T - ### Conversion function convert(::Type{MvLogNormal{T}}, d::MvLogNormal) where T<:Real MvLogNormal(convert(MvNormal{T}, d.normal)) @@ -232,11 +230,26 @@ var(d::MvLogNormal) = diag(cov(d)) entropy(d::MvLogNormal) = length(d)*(1+log2π)/2 + logdetcov(d.normal)/2 + sum(mean(d.normal)) #See https://en.wikipedia.org/wiki/Log-normal_distribution -function _rand!(rng::AbstractRNG, d::MvLogNormal, x::AbstractVecOrMat{<:Real}) - _rand!(rng, d.normal, x) +function rand(rng::AbstractRNG, d::MvLogNormal) + x = rand(rng, d.normal) map!(exp, x, x) return x end +function rand(rng::AbstractRNG, d::MvLogNormal, n::Int) + xs = rand(rng, d.normal, n) + map!(exp, xs, xs) + return xs +end + +for N in (1, 2) + @eval begin + Base.@propagate_inbounds function rand!(rng::AbstractRNG, d::MvLogNormal, x::AbstractArray{<:Real,$N}) + rand!(rng, d.normal, x) + map!(exp, x, x) + return x + end + end +end _logpdf(d::MvLogNormal, x::AbstractVecOrMat{T}) where {T<:Real} = insupport(d, x) ? (_logpdf(d.normal, log.(x)) - sum(log.(x))) : -Inf _pdf(d::MvLogNormal, x::AbstractVecOrMat{T}) where {T<:Real} = insupport(d,x) ? _pdf(d.normal, log.(x))/prod(x) : 0.0 diff --git a/src/multivariate/mvnormal.jl b/src/multivariate/mvnormal.jl index 5806c00f31..7e649f6bff 100644 --- a/src/multivariate/mvnormal.jl +++ b/src/multivariate/mvnormal.jl @@ -8,6 +8,7 @@ # Each subtype should provide the following methods: # # - length(d): vector dimension +# - partype(d): Element type of the parameters # - mean(d): the mean vector (in full form) # - cov(d): the covariance matrix (in full form) # - invcov(d): inverse of covariance @@ -15,7 +16,8 @@ # - sqmahal(d, x): Squared Mahalanobis distance to center # - sqmahal!(r, d, x): Squared Mahalanobis distances # - gradlogpdf(d, x): Gradient of logpdf w.r.t. x -# - _rand!(d, x): Sample random vector(s) +# - rand(rng, d): Sample random vector +# - rand!(rng, d, x): Sample random vector(s) in x # # Other generic functions will be implemented on top # of these core functions. @@ -80,8 +82,8 @@ abstract type AbstractMvNormal <: ContinuousMultivariateDistribution end insupport(d::AbstractMvNormal, x::AbstractVector) = length(d) == length(x) && all(isfinite, x) -minimum(d::AbstractMvNormal) = fill(eltype(d)(-Inf), length(d)) -maximum(d::AbstractMvNormal) = fill(eltype(d)(Inf), length(d)) +minimum(d::AbstractMvNormal) = Fill(float(partype(d))(-Inf), length(d)) +maximum(d::AbstractMvNormal) = Fill(float(partype(d))(Inf), length(d)) mode(d::AbstractMvNormal) = mean(d) modes(d::AbstractMvNormal) = [mean(d)] @@ -134,7 +136,11 @@ When x is a vector, it returns a scalar value. When x is a matrix, it returns a """ sqmahal(d::AbstractMvNormal, x::AbstractArray) -sqmahal(d::AbstractMvNormal, x::AbstractMatrix) = sqmahal!(Vector{promote_type(partype(d), eltype(x))}(undef, size(x, 2)), d, x) +function sqmahal(d::AbstractMvNormal, x::AbstractMatrix{<:Real}) + r = Vector{float(promote_type(partype(d), eltype(x)))}(undef, size(x, 2)) + sqmahal!(r, d, x) + return r +end _logpdf(d::AbstractMvNormal, x::AbstractVector) = mvnormal_c0(d) - sqmahal(d, x)/2 @@ -219,8 +225,6 @@ Base.@deprecate MvNormal(μ::AbstractVector{<:Real}, σ::Real) MvNormal(μ, σ^2 Base.@deprecate MvNormal(σ::AbstractVector{<:Real}) MvNormal(LinearAlgebra.Diagonal(map(abs2, σ))) Base.@deprecate MvNormal(d::Int, σ::Real) MvNormal(LinearAlgebra.Diagonal(FillArrays.Fill(σ^2, d))) -Base.eltype(::Type{<:MvNormal{T}}) where {T} = T - ### Conversion function convert(::Type{MvNormal{T}}, d::MvNormal) where T<:Real MvNormal(convert(AbstractArray{T}, d.μ), convert(AbstractArray{T}, d.Σ)) @@ -268,16 +272,27 @@ gradlogpdf(d::MvNormal, x::AbstractVector{<:Real}) = -(d.Σ \ (x .- d.μ)) # Sampling (for GenericMvNormal) -function _rand!(rng::AbstractRNG, d::MvNormal, x::VecOrMat) +function rand(rng::AbstractRNG, d::MvNormal) + x = unwhiten!(d.Σ, randn(rng, float(partype(d)), length(d))) + x .+= d.μ + return x +end +function rand(rng::AbstractRNG, d::MvNormal, n::Int) + x = unwhiten!(d.Σ, randn(rng, float(partype(d)), length(d), n)) + x .+= d.μ + return x +end + +Base.@propagate_inbounds function rand!(rng::AbstractRNG, d::MvNormal, x::VecOrMat{<:Real}) unwhiten!(d.Σ, randn!(rng, x)) x .+= d.μ return x end # Workaround: randn! only works for Array, but not generally for AbstractArray -function _rand!(rng::AbstractRNG, d::MvNormal, x::AbstractVector) +Base.@propagate_inbounds function rand!(rng::AbstractRNG, d::MvNormal, x::AbstractVector{<:Real}) for i in eachindex(x) - @inbounds x[i] = randn(rng, eltype(x)) + x[i] = randn(rng, eltype(x)) end unwhiten!(d.Σ, x) x .+= d.μ diff --git a/src/multivariate/mvnormalcanon.jl b/src/multivariate/mvnormalcanon.jl index 79b43e9ba9..bd5b82d313 100644 --- a/src/multivariate/mvnormalcanon.jl +++ b/src/multivariate/mvnormalcanon.jl @@ -154,7 +154,6 @@ length(d::MvNormalCanon) = length(d.μ) mean(d::MvNormalCanon) = convert(Vector{eltype(d.μ)}, d.μ) params(d::MvNormalCanon) = (d.μ, d.h, d.J) @inline partype(d::MvNormalCanon{T}) where {T<:Real} = T -Base.eltype(::Type{<:MvNormalCanon{T}}) where {T} = T var(d::MvNormalCanon) = diag(inv(d.J)) cov(d::MvNormalCanon) = inv(d.J) @@ -170,12 +169,25 @@ sqmahal!(r::AbstractVector, d::MvNormalCanon, x::AbstractMatrix) = quad!(r, d.J, # Sampling (for GenericMvNormal) -function _rand!(rng::AbstractRNG, d::MvNormalCanon, x::AbstractVector) +function rand(rng::AbstractRNG, d::MvNormalCanon) + x = invunwhiten!(d.J, randn(rng, float(partype(d)), length(d))) + x .+= d.μ + return x +end +function rand(rng::AbstractRNG, d::MvNormalCanon, n::Int) + x = invunwhiten!(d.J, randn(rng, float(partype(d)), length(d), n)) + x .+= d.μ + return x +end + +@inline function rand!(rng::AbstractRNG, d::MvNormalCanon, x::AbstractVector{<:Real}) + @boundscheck length(x) == length(d) invunwhiten!(d.J, randn!(rng, x)) x .+= d.μ return x end -function _rand!(rng::AbstractRNG, d::MvNormalCanon, x::AbstractMatrix) +@inline function rand!(rng::AbstractRNG, d::MvNormalCanon, x::AbstractMatrix{<:Real}) + @boundscheck size(x, 1) == length(d) invunwhiten!(d.J, randn!(rng, x)) x .+= d.μ return x diff --git a/src/multivariate/mvtdist.jl b/src/multivariate/mvtdist.jl index 9076c364b5..01fec668c2 100644 --- a/src/multivariate/mvtdist.jl +++ b/src/multivariate/mvtdist.jl @@ -101,7 +101,6 @@ logdet_cov(d::GenericMvTDist) = d.df>2 ? logdet((d.df/(d.df-2))*d.Σ) : NaN params(d::GenericMvTDist) = (d.df, d.μ, d.Σ) @inline partype(d::GenericMvTDist{T}) where {T} = T -Base.eltype(::Type{<:GenericMvTDist{T}}) where {T} = T # For entropy calculations see "Multivariate t Distributions and their Applications", S. Kotz & S. Nadarajah function entropy(d::GenericMvTDist) @@ -125,12 +124,14 @@ sqmahal(d::AbstractMvTDist, x::AbstractMatrix{T}) where {T<:Real} = sqmahal!(Vec function mvtdist_consts(d::AbstractMvTDist) - H = convert(eltype(d), 0.5) - logpi = convert(eltype(d), log(pi)) - hdf = H * d.df - hdim = H * d.dim + logdet_Σ = logdet(d.Σ) + onehalf = oftype(logdet_Σ, 1//2) + logpi = oftype(logdet_Σ, logπ) + df = oftype(logdet_Σ, d.df) + hdf = onehalf * df + hdim = onehalf * d.dim shdfhdim = hdf + hdim - v = loggamma(shdfhdim) - loggamma(hdf) - hdim*log(d.df) - hdim*logpi - H*logdet(d.Σ) + v = loggamma(shdfhdim) - loggamma(hdf) - hdim * (log(df) + logpi) - onehalf * logdet_Σ return (shdfhdim, v) end @@ -155,7 +156,23 @@ function gradlogpdf(d::GenericMvTDist, x::AbstractVector{<:Real}) end # Sampling (for GenericMvTDist) -function _rand!(rng::AbstractRNG, d::GenericMvTDist, x::AbstractVector{<:Real}) +function rand(rng::AbstractRNG, d::GenericMvTDist) + chisqd = Chisq{partype(d)}(d.df) + y = sqrt(rand(rng, chisqd) / d.df) + x = unwhiten!(d.Σ, randn(rng, typeof(y), length(d))) + x .= x ./ y .+ d.μ + x +end +function rand(rng::AbstractRNG, d::GenericMvTDist, n::Int) + chisqd = Chisq{partype(d)}(d.df) + y = rand(rng, chisqd, n) + x = unwhiten!(d.Σ, randn(rng, eltype(y), length(d), n)) + x .= x ./ sqrt.(y' ./ d.df) .+ d.μ + x +end + +@inline function rand!(rng::AbstractRNG, d::GenericMvTDist, x::AbstractVector{<:Real}) + @boundscheck length(x) == length(d) chisqd = Chisq{partype(d)}(d.df) y = sqrt(rand(rng, chisqd) / d.df) unwhiten!(d.Σ, randn!(rng, x)) @@ -163,10 +180,11 @@ function _rand!(rng::AbstractRNG, d::GenericMvTDist, x::AbstractVector{<:Real}) x end -function _rand!(rng::AbstractRNG, d::GenericMvTDist, x::AbstractMatrix{T}) where T<:Real +@inline function rand!(rng::AbstractRNG, d::GenericMvTDist, x::AbstractMatrix{<:Real}) + @boundscheck size(x, 1) == length(d) cols = size(x,2) chisqd = Chisq{partype(d)}(d.df) - y = Matrix{T}(undef, 1, cols) + y = similar(x, (1, cols)) unwhiten!(d.Σ, randn!(rng, x)) rand!(rng, chisqd, y) x .= x ./ sqrt.(y ./ d.df) .+ d.μ diff --git a/src/multivariate/product.jl b/src/multivariate/product.jl index ada1c4e5f4..6cf1960f78 100644 --- a/src/multivariate/product.jl +++ b/src/multivariate/product.jl @@ -35,13 +35,24 @@ function Product(v::V) where {S<:ValueSupport,T<:UnivariateDistribution{S},V<:Ab end length(d::Product) = length(d.v) -function Base.eltype(::Type{<:Product{S,T}}) where {S<:ValueSupport, - T<:UnivariateDistribution{S}} - return eltype(T) -end -_rand!(rng::AbstractRNG, d::Product, x::AbstractVector{<:Real}) = +rand(rng::AbstractRNG, d::Product) = map(Base.Fix1(rand, rng), d.v) +@inline function rand!(rng::AbstractRNG, d::Product, x::AbstractVector{<:Real}) + @boundscheck length(x) == length(d) map!(Base.Fix1(rand, rng), x, d.v) + return x +end + +# Specializations for FillArrays (for which `map(Base.Fix1(rand, rng), d.v)` is incorrect) +function rand(rng::AbstractRNG, d::Product{S,T,<:FillArrays.AbstractFill{T,1}}) where {S<:ValueSupport,T<:UnivariateDistribution{S}} + return rand(rng, first(d.v), size(d)) +end +@inline function rand!(rng::AbstractRNG, d::Product{S,T,<:FillArrays.AbstractFill{T,1}}, x::AbstractVector{<:Real}) where {S<:ValueSupport,T<:UnivariateDistribution{S}} + @boundscheck length(x) == length(d) + rand!(rng, first(d.v), x) + return x +end + function _logpdf(d::Product, x::AbstractVector{<:Real}) dists = d.v if isempty(dists) diff --git a/src/multivariate/vonmisesfisher.jl b/src/multivariate/vonmisesfisher.jl index e4fe981fe6..3c02e72532 100644 --- a/src/multivariate/vonmisesfisher.jl +++ b/src/multivariate/vonmisesfisher.jl @@ -30,7 +30,7 @@ end VonMisesFisher(μ::Vector{T}, κ::T) where {T<:Real} = VonMisesFisher{T}(μ, κ) function VonMisesFisher(μ::Vector{T}, κ::Real) where {T<:Real} - R = promote_type(T, eltype(κ)) + R = promote_type(T, typeof(κ)) return VonMisesFisher(convert(AbstractArray{R}, μ), convert(R, κ)) end @@ -77,11 +77,10 @@ _logpdf(d::VonMisesFisher, x::AbstractVector{T}) where {T<:Real} = d.logCκ + d. sampler(d::VonMisesFisher) = VonMisesFisherSampler(d.μ, d.κ) -_rand!(rng::AbstractRNG, d::VonMisesFisher, x::AbstractVector) = - _rand!(rng, sampler(d), x) -_rand!(rng::AbstractRNG, d::VonMisesFisher, x::AbstractMatrix) = - _rand!(rng, sampler(d), x) - +rand(rng::AbstractRNG, d::VonMisesFisher) = rand(rng, sampler(d)) +Base.@propagate_inbounds function rand!(rng::AbstractRNG, d::VonMisesFisher, x::AbstractVector) + return rand!(rng, sampler(d), x) +end ### Estimation diff --git a/src/multivariates.jl b/src/multivariates.jl index 4d98ebe110..a1838270ce 100644 --- a/src/multivariates.jl +++ b/src/multivariates.jl @@ -18,10 +18,20 @@ size(d::MultivariateDistribution) # multiple multivariate, must allocate matrix # TODO: inconsistency with other `ArrayLikeVariate`s and `rand(s, (n,))` - maybe remove? -rand(rng::AbstractRNG, s::Sampleable{Multivariate}, n::Int) = - @inbounds rand!(rng, sampler(s), Matrix{eltype(s)}(undef, length(s), n)) -rand(rng::AbstractRNG, s::Sampleable{Multivariate,Continuous}, n::Int) = - @inbounds rand!(rng, sampler(s), Matrix{float(eltype(s))}(undef, length(s), n)) +function rand(rng::AbstractRNG, s::Sampleable{Multivariate}, n::Int) + return _rand(rng, sampler(s), n) +end +function _rand(rng, s::Sampleable{Multivariate}, n::Int) + r = rand(rng, s) + out = Matrix{eltype(r)}(undef, length(r), n) + if n > 0 + copyto!(out, r) + if n > 1 + rand!(rng, s, @view(out[:, 2:n])) + end + end + return out +end ## domain diff --git a/src/namedtuple/productnamedtuple.jl b/src/namedtuple/productnamedtuple.jl index 95f2fa1d4d..6a0b731c76 100644 --- a/src/namedtuple/productnamedtuple.jl +++ b/src/namedtuple/productnamedtuple.jl @@ -38,7 +38,7 @@ julia> var(d) # var of marginals (x = 1.0, y = [0.031746031746031744, 0.031746031746031744]) ``` """ -struct ProductNamedTupleDistribution{Tnames,Tdists,S<:ValueSupport,eltypes} <: +struct ProductNamedTupleDistribution{Tnames,Tdists,S<:ValueSupport} <: Distribution{NamedTupleVariate{Tnames},S} dists::NamedTuple{Tnames,Tdists} end @@ -46,23 +46,9 @@ function ProductNamedTupleDistribution( dists::NamedTuple{K,V} ) where {K,V<:Tuple{Distribution,Vararg{Distribution}}} vs = _product_valuesupport(values(dists)) - eltypes = _product_namedtuple_eltype(values(dists)) - return ProductNamedTupleDistribution{K,V,vs,eltypes}(dists) + return ProductNamedTupleDistribution{K,V,vs}(dists) end -_gentype(d::UnivariateDistribution) = eltype(d) -_gentype(d::Distribution{<:ArrayLikeVariate{S}}) where {S} = Array{eltype(d),S} -function _gentype(d::Distribution{CholeskyVariate}) - T = eltype(d) - return LinearAlgebra.Cholesky{T,Matrix{T}} -end -function _gentype(d::ProductNamedTupleDistribution{K}) where {K} - return NamedTuple{K,Tuple{map(_gentype, values(d.dists))...}} -end -_gentype(::Distribution) = Any - -_product_namedtuple_eltype(dists) = typejoin(map(_gentype, dists)...) - function Base.show(io::IO, d::ProductNamedTupleDistribution) return show_multline(io, d, collect(pairs(d.dists))) end @@ -88,8 +74,6 @@ end # Properties -Base.eltype(::Type{<:ProductNamedTupleDistribution{<:Any,<:Any,<:Any,T}}) where {T} = T - Base.minimum(d::ProductNamedTupleDistribution) = map(minimum, d.dists) Base.maximum(d::ProductNamedTupleDistribution) = map(maximum, d.dists) @@ -166,9 +150,9 @@ end function Base.rand( rng::AbstractRNG, d::ProductNamedTupleDistribution{K}, dims::Dims ) where {K} - return convert(AbstractArray{<:NamedTuple{K}}, _rand(rng, sampler(d), dims)) + return rand(rng, sampler(d), dims) end -function _rand!(rng::AbstractRNG, d::ProductNamedTupleDistribution, xs::AbstractArray) - return _rand!(rng, sampler(d), xs) +Base.@propagate_inbounds function Random.rand!(rng::AbstractRNG, d::ProductNamedTupleDistribution, xs::AbstractArray) + return rand!(rng, sampler(d), xs) end diff --git a/src/product.jl b/src/product.jl index 7129a097be..4c31d712a2 100644 --- a/src/product.jl +++ b/src/product.jl @@ -7,7 +7,7 @@ independent `M`-dimensional distributions by stacking them. Users should use [`product_distribution`](@ref) to construct a product distribution of independent distributions instead of constructing a `ProductDistribution` directly. """ -struct ProductDistribution{N,M,D,S<:ValueSupport,T} <: Distribution{ArrayLikeVariate{N},S} +struct ProductDistribution{N,M,D,S<:ValueSupport} <: Distribution{ArrayLikeVariate{N},S} dists::D size::Dims{N} @@ -15,7 +15,7 @@ struct ProductDistribution{N,M,D,S<:ValueSupport,T} <: Distribution{ArrayLikeVar if isempty(dists) throw(ArgumentError("a product distribution must consist of at least one distribution")) end - return new{N,M,D,_product_valuesupport(dists),_product_eltype(dists)}( + return new{N,M,D,_product_valuesupport(dists)}( dists, _product_size(dists), ) @@ -32,21 +32,19 @@ end # default definitions (type stable e.g. for arrays with concrete `eltype`) _product_valuesupport(dists) = mapreduce(value_support ∘ typeof, promote_type, dists) -_product_eltype(dists) = mapreduce(eltype, promote_type, dists) # type-stable and faster implementations for tuples function _product_valuesupport(dists::NTuple{<:Any,Distribution}) - return __product_promote_type(value_support, typeof(dists)) -end -function _product_eltype(dists::NTuple{<:Any,Distribution}) - return __product_promote_type(eltype, typeof(dists)) + return __promote_type(value_support, typeof(dists)) end -__product_promote_type(f::F, ::Type{Tuple{D}}) where {F,D<:Distribution} = f(D) -function __product_promote_type(f::F, ::Type{T}) where {F,T} +_common_eltype(xs) = mapreduce(eltype, promote_type, xs) +_common_eltype(xs::NTuple{N,<:Real}) where {N} = __promote_type(eltype, typeof(xs)) +__promote_type(f::F, ::Type{Tuple{D}}) where {F,D} = f(D) +function __promote_type(f::F, ::Type{T}) where {F,T<:Tuple} return promote_type( f(Base.tuple_type_head(T)), - __product_promote_type(f, Base.tuple_type_tail(T)), + __promote_type(f, Base.tuple_type_tail(T)), ) end @@ -63,17 +61,13 @@ function _product_size(dists::Tuple{Distribution{<:ArrayLikeVariate{M}},Vararg{D end ## aliases -const VectorOfUnivariateDistribution{D,S<:ValueSupport,T} = ProductDistribution{1,0,D,S,T} -const MatrixOfUnivariateDistribution{D,S<:ValueSupport,T} = ProductDistribution{2,0,D,S,T} -const ArrayOfUnivariateDistribution{N,D,S<:ValueSupport,T} = ProductDistribution{N,0,D,S,T} +const VectorOfUnivariateDistribution{D,S<:ValueSupport} = ProductDistribution{1,0,D,S} +const MatrixOfUnivariateDistribution{D,S<:ValueSupport} = ProductDistribution{2,0,D,S} +const ArrayOfUnivariateDistribution{N,D,S<:ValueSupport} = ProductDistribution{N,0,D,S} -const FillArrayOfUnivariateDistribution{N,D<:Fill{<:Any,N},S<:ValueSupport,T} = ProductDistribution{N,0,D,S,T} +const FillArrayOfUnivariateDistribution{N,D<:FillArrays.AbstractFill{<:Any,N},S<:ValueSupport} = ProductDistribution{N,0,D,S} ## General definitions -function Base.eltype(::Type{<:ProductDistribution{<:Any,<:Any,<:Any,<:ValueSupport,T}}) where {T} - return T -end - size(d::ProductDistribution) = d.size mean(d::ProductDistribution) = reshape(mapreduce(vec ∘ mean, vcat, d.dists), size(d)) @@ -110,15 +104,23 @@ length(d::VectorOfUnivariateDistribution) = length(d.dists) ## For matrix distributions cov(d::ProductDistribution{2}, ::Val{false}) = reshape(cov(d), size(d)..., size(d)...) -# `_rand!` for arrays of univariate distributions -function _rand!( +# Arrays of univariate distributions +function rand(rng::AbstractRNG, d::ArrayOfUnivariateDistribution) + x = map(Base.Fix1(rand, rng), d.dists) + if x isa AbstractArray{<:Real} + return x + else + # For instance, if x is a tuple + return collect(_common_eltype(x), x) + end +end +@inline function rand!( rng::AbstractRNG, d::ArrayOfUnivariateDistribution{N}, x::AbstractArray{<:Real,N}, ) where {N} - @inbounds for (i, di) in zip(eachindex(x), d.dists) - x[i] = rand(rng, di) - end + @boundscheck size(x) == size(d) + map!(Base.Fix1(rand, rng), x, d.dists) return x end @@ -135,13 +137,21 @@ function __logpdf(d::ArrayOfUnivariateDistribution, x::AbstractArray{<:Real,N}) return sum(Broadcast.instantiate(broadcasted)) end -# more efficient implementation of `_rand!` for `Fill` array of univariate distributions -function _rand!( +# more efficient sampling for `Fill` array of univariate distributions +function rand( + rng::AbstractRNG, + d::FillArrayOfUnivariateDistribution, +) + return rand(rng, first(d.dists), size(d)) +end +@inline function rand!( rng::AbstractRNG, d::FillArrayOfUnivariateDistribution{N}, x::AbstractArray{<:Real,N}, ) where {N} - return @inbounds rand!(rng, sampler(first(d.dists)), x) + @boundscheck size(x) == size(d) + rand!(rng, first(d.dists), x) + return x end # more efficient implementation of `_logpdf` for `Fill` array of univariate distributions @@ -158,12 +168,17 @@ function __logpdf( return @inbounds loglikelihood(first(d.dists), x) end -# `_rand! for arrays of distributions -function _rand!( +# sampling for arrays of distributions +function rand(rng::AbstractRNG, d::ProductDistribution) + x = mapreduce(vec ∘ Base.Fix1(rand, rng), hcat, d.dists) + return reshape(x, size(d)) +end +@inline function rand!( rng::AbstractRNG, d::ProductDistribution{N,M}, A::AbstractArray{<:Real,N}, ) where {N,M} + @boundscheck size(A) == size(d) @inbounds for (di, Ai) in zip(d.dists, eachvariate(A, ArrayLikeVariate{M})) rand!(rng, di, Ai) end @@ -186,32 +201,45 @@ function __logpdf( return sum(Broadcast.instantiate(broadcasted)) end -# more efficient implementation of `_rand!` for `Fill` arrays of distributions -function _rand!( +# more efficient sampling for `Fill` arrays of distributions +function rand(rng::AbstractRNG, d::ProductDistribution{<:Any,<:Any,<:FillArrays.AbstractFill}) + return _product_rand(rng, sampler(first(d.dists)), size(d)) +end +function _product_rand(rng::AbstractRNG, spl::Sampleable{ArrayLikeVariate{N}}, dims::Dims) where N + xi = rand(rng, spl) + x = Array{eltype(xi)}(undef, dims) + copyto!(x, xi) + vx = reshape(x, ntuple(i -> i <= N ? size(xi, i) : Colon(), N + 1)) + @inbounds rand!(rng, spl, @view(vx[ntuple(i -> i <= N ? Colon() : 2:lastindex(vx, N + 1), N + 1)...])) + return x +end + +@inline function rand!( rng::AbstractRNG, - d::ProductDistribution{N,M,<:Fill}, + d::ProductDistribution{N,M,<:FillArrays.AbstractFill}, A::AbstractArray{<:Real,N}, ) where {N,M} + @boundscheck size(A) == size(d) @inbounds rand!(rng, sampler(first(d.dists)), A) return A end -# more efficient implementation of `_logpdf` for `Fill` arrays of distributions +# more efficient implementation of `_logpdf` for `AbstractFill` arrays of distributions # we have to fix a method ambiguity function _logpdf( - d::ProductDistribution{N,M,<:Fill}, + d::ProductDistribution{N,M,<:FillArrays.AbstractFill}, x::AbstractArray{<:Real,N}, ) where {N,M} return __logpdf(d, x) end function _logpdf( - d::ProductDistribution{2,M,<:Fill}, + d::ProductDistribution{2,M,<:FillArrays.AbstractFill}, x::AbstractMatrix{<:Real}, ) where {M} return __logpdf(d, x) end function __logpdf( - d::ProductDistribution{N,M,<:Fill}, + d::ProductDistribution{N,M,<:FillArrays.AbstractFill}, x::AbstractArray{<:Real,N}, ) where {N,M} return @inbounds loglikelihood(first(d.dists), x) diff --git a/src/reshaped.jl b/src/reshaped.jl index 23f35fb46a..364e01e445 100644 --- a/src/reshaped.jl +++ b/src/reshaped.jl @@ -24,7 +24,6 @@ function _reshape_check_dims(dist::Distribution{<:ArrayLikeVariate}, dims::Dims) end Base.size(d::ReshapedDistribution) = d.dims -Base.eltype(::Type{ReshapedDistribution{<:Any,<:ValueSupport,D}}) where {D} = eltype(D) partype(d::ReshapedDistribution) = partype(d.dist) params(d::ReshapedDistribution) = (d.dist, d.dims) @@ -86,11 +85,15 @@ end end # sampling -function _rand!( +function rand(rng::AbstractRNG, d::ReshapedDistribution) + return reshape(rand(rng, d.dist), size(d)) +end +@inline function rand!( rng::AbstractRNG, d::ReshapedDistribution{N}, x::AbstractArray{<:Real,N} ) where {N} + @boundscheck size(x) == size(d.dist) dist = d.dist @inbounds rand!(rng, dist, reshape(x, size(dist))) return x diff --git a/src/samplers/multinomial.jl b/src/samplers/multinomial.jl index 7ce8b89730..bd339e726b 100644 --- a/src/samplers/multinomial.jl +++ b/src/samplers/multinomial.jl @@ -1,3 +1,6 @@ +function multinom_rand(rng::AbstractRNG, n::Int, p::AbstractVector{<:Real}) + return multinom_rand!(rng, n, p, Vector{Int}(undef, length(p))) +end function multinom_rand!(rng::AbstractRNG, n::Int, p::AbstractVector{<:Real}, x::AbstractVector{<:Real}) k = length(p) @@ -49,8 +52,13 @@ function MultinomialSampler(n::Int, prob::Vector{<:Real}) return MultinomialSampler(n, prob, AliasTable(prob)) end -function _rand!(rng::AbstractRNG, s::MultinomialSampler, +function rand(rng::AbstractRNG, s::MultinomialSampler) + x = Vector{Int}(undef, length(s.prob)) + return rand!(rng, s, x) +end +@inline function rand!(rng::AbstractRNG, s::MultinomialSampler, x::AbstractVector{<:Real}) + @boundscheck length(s) == length(x) n = s.n k = length(s) if n^2 > k diff --git a/src/samplers/productnamedtuple.jl b/src/samplers/productnamedtuple.jl index f46c1c2aad..e878995202 100644 --- a/src/samplers/productnamedtuple.jl +++ b/src/samplers/productnamedtuple.jl @@ -7,13 +7,15 @@ function Base.rand(rng::AbstractRNG, spl::ProductNamedTupleSampler{K}) where {K} return NamedTuple{K}(map(Base.Fix1(rand, rng), spl.samplers)) end -function _rand(rng::AbstractRNG, spl::ProductNamedTupleSampler, dims::Dims) - return map(CartesianIndices(dims)) do _ - return rand(rng, spl) - end +function Base.rand(rng::AbstractRNG, s::ProductNamedTupleSampler, dims::Dims) + r = rand(rng, s) + out = Array{typeof(r)}(undef, dims) + out[1] = r + rand!(rng, s, @view(out[2:end])) + return out end -function _rand!( +function Random.rand!( rng::AbstractRNG, spl::ProductNamedTupleSampler, xs::AbstractArray{<:NamedTuple{K}} ) where {K} for i in eachindex(xs) diff --git a/src/samplers/vonmisesfisher.jl b/src/samplers/vonmisesfisher.jl index fd3eb2df08..a77b6628df 100644 --- a/src/samplers/vonmisesfisher.jl +++ b/src/samplers/vonmisesfisher.jl @@ -26,8 +26,15 @@ Base.length(s::VonMisesFisherSampler) = length(s.v) return x end - -function _rand!(rng::AbstractRNG, spl::VonMisesFisherSampler, x::AbstractVector) +# Currently, the VonMisesFisherSampler is written for `Float64` +# TODO: Generalize to other number types +function rand(rng::AbstractRNG, spl::VonMisesFisherSampler) + x = Vector{Float64}(undef, length(spl)) + @inbounds rand!(rng, spl, x) + return x +end +@inline function rand!(rng::AbstractRNG, spl::VonMisesFisherSampler, x::AbstractVector{<:Real}) + @boundscheck length(spl) == length(x) w = _vmf_genw(rng, spl) p = spl.p x[1] = w diff --git a/src/truncate.jl b/src/truncate.jl index 48d62b015b..18c8716f92 100644 --- a/src/truncate.jl +++ b/src/truncate.jl @@ -126,9 +126,6 @@ end params(d::Truncated) = tuple(params(d.untruncated)..., d.lower, d.upper) partype(d::Truncated{<:UnivariateDistribution,<:ValueSupport,T}) where {T<:Real} = promote_type(partype(d.untruncated), T) -Base.eltype(::Type{<:Truncated{D}}) where {D<:UnivariateDistribution} = eltype(D) -Base.eltype(d::Truncated) = eltype(d.untruncated) - ### range and support islowerbounded(d::RightTruncated) = islowerbounded(d.untruncated) diff --git a/src/univariate/continuous/beta.jl b/src/univariate/continuous/beta.jl index fd2420d815..d1a84613ed 100644 --- a/src/univariate/continuous/beta.jl +++ b/src/univariate/continuous/beta.jl @@ -138,15 +138,16 @@ struct BetaSampler{T<:Real, S1 <: Sampleable{Univariate,Continuous}, s2::S2 end -function sampler(d::Beta{T}) where T - (α, β) = params(d) - if (α ≤ 1.0) && (β ≤ 1.0) +function sampler(d::Beta) + α, β = params(d) + if α ≤ 1 && β ≤ 1 return BetaSampler(false, inv(α), inv(β), - sampler(Uniform()), sampler(Uniform())) + sampler(Uniform(zero(α), oneunit(α))), + sampler(Uniform(zero(β), oneunit(β)))) else return BetaSampler(true, inv(α), inv(β), - sampler(Gamma(α, one(T))), - sampler(Gamma(β, one(T)))) + sampler(Gamma(α, oneunit(α))), + sampler(Gamma(β, oneunit(β)))) end end @@ -160,11 +161,11 @@ function rand(rng::AbstractRNG, s::BetaSampler) iα = s.iα iβ = s.iβ while true - u = rand(rng) # the Uniform sampler just calls rand() - v = rand(rng) + u = rand(rng, s.s1) # the Uniform sampler just calls rand() + v = rand(rng, s.s2) x = u^iα y = v^iβ - if x + y ≤ one(x) + if x + y ≤ 1 if (x + y > 0) return x / (x + y) else @@ -180,16 +181,20 @@ function rand(rng::AbstractRNG, s::BetaSampler) end end -function rand(rng::AbstractRNG, d::Beta{T}) where T - (α, β) = params(d) - if (α ≤ 1.0) && (β ≤ 1.0) +function rand(rng::AbstractRNG, d::Beta) + α, β = params(d) + if α ≤ 1 && β ≤ 1 + iα = inv(α) + iβ = inv(β) + Tu = typeof(float(iα)) + Tv = typeof(float(iβ)) while true - u = rand(rng) - v = rand(rng) - x = u^inv(α) - y = v^inv(β) - if x + y ≤ one(x) - if (x + y > 0) + u = rand(rng, Tu) + v = rand(rng, Tv) + x = u^iα + y = v^iβ + if x + y ≤ 1 + if x + y > 0 return x / (x + y) else logX = log(u) / α @@ -202,8 +207,8 @@ function rand(rng::AbstractRNG, d::Beta{T}) where T end end else - g1 = rand(rng, Gamma(α, one(T))) - g2 = rand(rng, Gamma(β, one(T))) + g1 = rand(rng, Gamma(α, oneunit(α))) + g2 = rand(rng, Gamma(β, oneunit(β))) return g1 / (g1 + g2) end end diff --git a/src/univariate/continuous/chisq.jl b/src/univariate/continuous/chisq.jl index 8604742ce5..1b125282b8 100644 --- a/src/univariate/continuous/chisq.jl +++ b/src/univariate/continuous/chisq.jl @@ -97,13 +97,16 @@ gradlogpdf(d::Chisq{T}, x::Real) where {T<:Real} = x > 0 ? (d.ν/2 - 1) / x - 1 #### Sampling function rand(rng::AbstractRNG, d::Chisq) - α = dof(d) / 2 - θ = oftype(α, 2) + α, θ = promote(dof(d) / 2, 2) return rand(rng, Gamma{typeof(α)}(α, θ)) end +function rand!(rng::AbstractRNG, d::Chisq, A::AbstractArray{<:Real}) + α, θ = promote(dof(d) / 2, 2) + rand!(rng, Gamma{typeof(α)}(α, θ), A) +end + function sampler(d::Chisq) - α = dof(d) / 2 - θ = oftype(α, 2) + α, θ = promote(dof(d) / 2, 2) return sampler(Gamma{typeof(α)}(α, θ)) end diff --git a/src/univariate/continuous/gumbel.jl b/src/univariate/continuous/gumbel.jl index 54090967fc..b014510dc2 100644 --- a/src/univariate/continuous/gumbel.jl +++ b/src/univariate/continuous/gumbel.jl @@ -41,8 +41,6 @@ Gumbel(μ::Real=0.0) = Gumbel(μ, one(μ); check_args=false) const DoubleExponential = Gumbel -Base.eltype(::Type{Gumbel{T}}) where {T} = T - #### Conversions convert(::Type{Gumbel{T}}, μ::S, θ::S) where {T <: Real, S <: Real} = Gumbel(T(μ), T(θ)) @@ -56,10 +54,10 @@ scale(d::Gumbel) = d.θ params(d::Gumbel) = (d.μ, d.θ) partype(::Gumbel{T}) where {T} = T -function Base.rand(rng::Random.AbstractRNG, d::Gumbel) - return d.μ - d.θ * log(randexp(rng, float(eltype(d)))) +function Base.rand(rng::Random.AbstractRNG, d::Gumbel{T}) where {T} + return d.μ - d.θ * log(randexp(rng, float(T))) end -function _rand!(rng::Random.AbstractRNG, d::Gumbel, x::AbstractArray{<:Real}) +function rand!(rng::Random.AbstractRNG, d::Gumbel, x::AbstractArray{<:Real}) randexp!(rng, x) x .= d.μ .- d.θ .* log.(x) return x diff --git a/src/univariate/continuous/johnsonsu.jl b/src/univariate/continuous/johnsonsu.jl index 65597adc2d..69506f5fea 100644 --- a/src/univariate/continuous/johnsonsu.jl +++ b/src/univariate/continuous/johnsonsu.jl @@ -94,7 +94,7 @@ invlogccdf(d::JohnsonSU, lq::Real) = xval(d, norminvlogccdf(lq)) #### Sampling -rand(rng::AbstractRNG, d::JohnsonSU) = xval(d, randn(rng)) +rand(rng::AbstractRNG, d::JohnsonSU) = xval(d, randn(rng, float(partype(d)))) ## Fitting diff --git a/src/univariate/continuous/normal.jl b/src/univariate/continuous/normal.jl index ef2b77bb58..bc67ea2105 100644 --- a/src/univariate/continuous/normal.jl +++ b/src/univariate/continuous/normal.jl @@ -60,8 +60,6 @@ params(d::Normal) = (d.μ, d.σ) location(d::Normal) = d.μ scale(d::Normal) = d.σ -Base.eltype(::Type{Normal{T}}) where {T} = T - #### Statistics mean(d::Normal) = d.μ diff --git a/src/univariate/continuous/semicircle.jl b/src/univariate/continuous/semicircle.jl index 760164ea02..d9bf6bb0da 100644 --- a/src/univariate/continuous/semicircle.jl +++ b/src/univariate/continuous/semicircle.jl @@ -78,8 +78,9 @@ function rand(rng::AbstractRNG, d::Semicircle) # sample polar coordinates r,θ # of point uniformly distributed on radius d.r half disk # project onto x axis - θ = rand(rng) # multiple of π - r = d.r * sqrt(rand(rng)) + rmax = float(d.r) + θ = rand(rng, typeof(rmax)) # multiple of π + r = rmax * sqrt(rand(rng, typeof(rmax))) return cospi(θ) * r end diff --git a/src/univariate/continuous/uniform.jl b/src/univariate/continuous/uniform.jl index d4beca7d79..80cebeae5e 100644 --- a/src/univariate/continuous/uniform.jl +++ b/src/univariate/continuous/uniform.jl @@ -151,9 +151,9 @@ Base.:*(c::Real, d::Uniform) = Uniform(minmax(c * d.a, c * d.b)...) #### Sampling -rand(rng::AbstractRNG, d::Uniform) = d.a + (d.b - d.a) * rand(rng) +rand(rng::AbstractRNG, d::Uniform{T}) where {T} = d.a + (d.b - d.a) * rand(rng, float(T)) -_rand!(rng::AbstractRNG, d::Uniform, A::AbstractArray{<:Real}) = +rand!(rng::AbstractRNG, d::Uniform, A::AbstractArray{<:Real}) = A .= Base.Fix1(quantile, d).(rand!(rng, A)) diff --git a/src/univariate/discrete/bernoulli.jl b/src/univariate/discrete/bernoulli.jl index c1dee968ce..fbfdab97d2 100644 --- a/src/univariate/discrete/bernoulli.jl +++ b/src/univariate/discrete/bernoulli.jl @@ -40,8 +40,6 @@ Bernoulli() = Bernoulli{Float64}(0.5) @distr_support Bernoulli false true -Base.eltype(::Type{<:Bernoulli}) = Bool - #### Conversions convert(::Type{Bernoulli{T}}, p::Real) where {T<:Real} = Bernoulli(T(p)) Base.convert(::Type{Bernoulli{T}}, d::Bernoulli) where {T<:Real} = Bernoulli{T}(T(d.p)) diff --git a/src/univariate/discrete/bernoullilogit.jl b/src/univariate/discrete/bernoullilogit.jl index f82059fed9..ca5233ea42 100644 --- a/src/univariate/discrete/bernoullilogit.jl +++ b/src/univariate/discrete/bernoullilogit.jl @@ -24,8 +24,6 @@ BernoulliLogit() = BernoulliLogit(0.0) @distr_support BernoulliLogit false true -Base.eltype(::Type{<:BernoulliLogit}) = Bool - #### Conversions Base.convert(::Type{BernoulliLogit{T}}, d::BernoulliLogit) where {T<:Real} = BernoulliLogit{T}(T(d.logitp)) Base.convert(::Type{BernoulliLogit{T}}, d::BernoulliLogit{T}) where {T<:Real} = d diff --git a/src/univariate/discrete/dirac.jl b/src/univariate/discrete/dirac.jl index 94d082b0fa..01b49bfb1b 100644 --- a/src/univariate/discrete/dirac.jl +++ b/src/univariate/discrete/dirac.jl @@ -22,8 +22,6 @@ struct Dirac{T} <: DiscreteUnivariateDistribution value::T end -Base.eltype(::Type{Dirac{T}}) where {T} = T - insupport(d::Dirac, x::Real) = x == d.value minimum(d::Dirac) = d.value maximum(d::Dirac) = d.value diff --git a/src/univariate/discrete/discretenonparametric.jl b/src/univariate/discrete/discretenonparametric.jl index 8e1eefab6e..1c3e047187 100644 --- a/src/univariate/discrete/discretenonparametric.jl +++ b/src/univariate/discrete/discretenonparametric.jl @@ -39,8 +39,6 @@ DiscreteNonParametric(vs::AbstractVector{T}, ps::AbstractVector{P}; check_args:: T<:Real,P<:Real} = DiscreteNonParametric{T,P,typeof(vs),typeof(ps)}(vs, ps; check_args=check_args) -Base.eltype(::Type{<:DiscreteNonParametric{T}}) where T = T - # Conversion convert(::Type{DiscreteNonParametric{T,P,Ts,Ps}}, d::DiscreteNonParametric) where {T,P,Ts,Ps} = DiscreteNonParametric{T,P,Ts,Ps}(convert(Ts, support(d)), convert(Ps, probs(d)), check_args=false) diff --git a/src/univariate/locationscale.jl b/src/univariate/locationscale.jl index ded6c2b2ac..3f12c1ad5f 100644 --- a/src/univariate/locationscale.jl +++ b/src/univariate/locationscale.jl @@ -51,8 +51,6 @@ end function AffineDistribution(μ::T, σ::T, ρ::UnivariateDistribution; check_args::Bool=true) where {T<:Real} @check_args AffineDistribution (σ, !iszero(σ)) - # μ and σ act on both random numbers and parameter-like quantities like mean - # hence do not promote: but take care in eltype and partype return AffineDistribution{T}(μ, σ, ρ) end @@ -72,8 +70,6 @@ end const ContinuousAffineDistribution{T<:Real,D<:ContinuousUnivariateDistribution} = AffineDistribution{T,Continuous,D} const DiscreteAffineDistribution{T<:Real,D<:DiscreteUnivariateDistribution} = AffineDistribution{T,Discrete,D} -Base.eltype(::Type{<:AffineDistribution{T,S,D}}) where {T,S,D} = promote_type(eltype(D), T) - minimum(d::AffineDistribution) = d.σ > 0 ? d.μ + d.σ * minimum(d.ρ) : d.μ + d.σ * maximum(d.ρ) maximum(d::AffineDistribution) = diff --git a/src/univariate/orderstatistic.jl b/src/univariate/orderstatistic.jl index 1a7055ef91..4a9012fb5c 100644 --- a/src/univariate/orderstatistic.jl +++ b/src/univariate/orderstatistic.jl @@ -58,8 +58,6 @@ insupport(d::OrderStatistic, x::Real) = insupport(d.dist, x) params(d::OrderStatistic) = tuple(params(d.dist)..., d.n, d.rank) partype(d::OrderStatistic) = partype(d.dist) -Base.eltype(::Type{<:OrderStatistic{D}}) where {D} = Base.eltype(D) -Base.eltype(d::OrderStatistic) = eltype(d.dist) # distribution of the ith order statistic from an IID uniform distribution, with CDF Uᵢₙ(x) function _uniform_orderstatistic(d::OrderStatistic) @@ -102,7 +100,6 @@ end function rand(rng::AbstractRNG, d::OrderStatistic) # inverse transform sampling. Since quantile function is Qₓ(Uᵢₙ⁻¹(p)), we draw a random # variable from Uᵢₙ and pass it through the quantile function of `d.dist` - T = eltype(d.dist) b = _uniform_orderstatistic(d) - return T(quantile(d.dist, rand(rng, b))) + return quantile(d.dist, float(partype(d.dist))(rand(rng, b))) end diff --git a/src/univariates.jl b/src/univariates.jl index b60e5a2949..68daebb0cf 100644 --- a/src/univariates.jl +++ b/src/univariates.jl @@ -154,7 +154,9 @@ end Generate a scalar sample from `d`. The general fallback is `quantile(d, rand())`. """ -rand(rng::AbstractRNG, d::UnivariateDistribution) = quantile(d, rand(rng)) +function rand(rng::AbstractRNG, d::UnivariateDistribution) + return quantile(d, rand(rng, float(partype(d)))) +end ## statistics diff --git a/test/censored.jl b/test/censored.jl index 27f30cadfc..d371370782 100644 --- a/test/censored.jl +++ b/test/censored.jl @@ -100,7 +100,7 @@ end d = Censored(Normal(0.0, 1.0), -1, 2) @test d isa Censored - @test eltype(d) === Float64 + @test @test_deprecated(eltype(d)) === Float64 @test params(d) === (params(Normal(0.0, 1.0))..., -1, 2) @test partype(d) === Float64 @test @inferred extrema(d) == (-1, 2) @@ -115,7 +115,7 @@ end d = Censored(Cauchy(0, 1), nothing, 2) @test d isa Censored - @test eltype(d) === Base.promote_type(eltype(Cauchy(0, 1)), Int) + @test @test_deprecated(eltype(d)) === Base.promote_type(@test_deprecated(eltype(Cauchy(0, 1))), Int) @test params(d) === (params(Cauchy(0, 1))..., nothing, 2) @test partype(d) === Float64 @test extrema(d) == (-Inf, 2.0) @@ -129,7 +129,7 @@ end d = Censored(Gamma(1, 2), 2, nothing) @test d isa Censored - @test eltype(d) === Base.promote_type(eltype(Gamma(1, 2)), Int) + @test @test_deprecated(eltype(d)) === Base.promote_type(@test_deprecated(eltype(Gamma(1, 2))), Int) @test params(d) === (params(Gamma(1, 2))..., 2, nothing) @test partype(d) === Float64 @test extrema(d) == (2.0, Inf) diff --git a/test/cholesky/lkjcholesky.jl b/test/cholesky/lkjcholesky.jl index 82e22f606c..9a8e1f88a3 100644 --- a/test/cholesky/lkjcholesky.jl +++ b/test/cholesky/lkjcholesky.jl @@ -17,9 +17,10 @@ using FiniteDifferences dmat = LKJ(p, d.η) marginal = Distributions._marginal(dmat) ndraws = length(xs) - zs = Array{eltype(d)}(undef, p, p, ndraws) - for k in 1:ndraws - zs[:, :, k] = Matrix(xs[k]) + zs = if VERSION >= v"1.9" + stack(Matrix, xs) + else + mapreduce(Matrix, (x , y) -> cat(x, y; dims=3), xs) end @testset "LKJCholesky marginal moments" begin @@ -194,7 +195,7 @@ using FiniteDifferences test_draw(d, rand(rng, d)) test_draws(d, rand(rng, d, 10^4); nkstests=nkstests) end - @test_broken rand(rng, LKJCholesky(5, Inf)) ≈ I + @test Matrix(rand(rng, LKJCholesky(5, Inf))) ≈ I end @testset "rand!" begin diff --git a/test/eachvariate.jl b/test/eachvariate.jl index 60cd1f68d3..6fcc8b8d88 100644 --- a/test/eachvariate.jl +++ b/test/eachvariate.jl @@ -14,8 +14,9 @@ end # MWE in #1817 struct FooEachvariate <: Sampleable{Multivariate, Continuous} end Base.length(::FooEachvariate) = 3 -Base.eltype(::FooEachvariate) = Float64 -function Distributions._rand!(rng::AbstractRNG, ::FooEachvariate, x::AbstractVector{<:Real}) +Base.rand(rng::AbstractRNG, d::FooEachvariate) = rand(rng, length(d)) +@inline function Random.rand!(rng::AbstractRNG, d::FooEachvariate, x::AbstractVector{<:Real}) + @boundscheck length(x) == length(d) return rand!(rng, x) end diff --git a/test/functionals.jl b/test/functionals.jl index 01357355a5..ce49ec0ff5 100644 --- a/test/functionals.jl +++ b/test/functionals.jl @@ -1,3 +1,6 @@ +using LinearAlgebra +using Random + # Struct to test AbstractMvNormal methods struct CholeskyMvNormal{M,T} <: Distributions.AbstractMvNormal m::M @@ -5,7 +8,7 @@ struct CholeskyMvNormal{M,T} <: Distributions.AbstractMvNormal end # Constructor for diagonal covariance matrices used in the tests below -function CholeskyMvNormal(m::Vector, Σ::Diagonal) +function CholeskyMvNormal(m::AbstractVector{<:Real}, Σ::Diagonal{<:Real}) L = Diagonal(map(sqrt, Σ.diag)) return CholeskyMvNormal{typeof(m),typeof(L)}(m, L) end @@ -14,11 +17,27 @@ Distributions.length(p::CholeskyMvNormal) = length(p.m) Distributions.mean(p::CholeskyMvNormal) = p.m Distributions.cov(p::CholeskyMvNormal) = p.L * p.L' Distributions.logdetcov(p::CholeskyMvNormal) = 2 * logdet(p.L) -function Distributions.sqmahal(p::CholeskyMvNormal, x::AbstractVector) +function Distributions.sqmahal(p::CholeskyMvNormal, x::AbstractVector{<:Real}) return sum(abs2, p.L \ (mean(p) - x)) end -function Distributions._rand!(rng::AbstractRNG, p::CholeskyMvNormal, x::Vector) - return x .= p.m .+ p.L * randn!(rng, x) +function Base.rand(rng::AbstractRNG, p::CholeskyMvNormal) + T = float(Base.promote_eltype(p.m, p.L)) + x = Vector{T}(undef, length(p.m)) + @inbounds rand!(rng, p, x) + return x +end +@inline function Random.rand!(rng::AbstractRNG, p::CholeskyMvNormal, x::AbstractVector{<:Real}) + @boundscheck length(x) == length(p) + randn!(rng, x) + # Older Julia versions that do not support `lmul!(::Diagonal, ::AbstractVector)` + # Ref https://github.com/JuliaLang/julia/pull/36012 + if p.L isa Diagonal && VERSION < v"1.6.0-DEV.88" + x .*= p.L.diag + else + lmul!(p.L, x) + end + x .+= p.m + return x end @testset "Expectations" begin @@ -99,7 +118,7 @@ end p = Geometric(0.3) q = Geometric(0.4) test_kl(p, q) - + x1 = nextfloat(0.0) x2 = prevfloat(1.0) p1 = Geometric(x1) diff --git a/test/matrixreshaped.jl b/test/matrixreshaped.jl index 5d7165bf73..71240599ed 100644 --- a/test/matrixreshaped.jl +++ b/test/matrixreshaped.jl @@ -73,7 +73,7 @@ function test_matrixreshaped(rng, d1, sizes) end @testset "MatrixReshaped eltype" begin for d in d1s - @test eltype(d) === eltype(d1) + @test @test_deprecated(eltype(d)) === @test_deprecated(eltype(d1)) end end @testset "MatrixReshaped logpdf" begin diff --git a/test/matrixvariates.jl b/test/matrixvariates.jl index 9235cdaf5d..dad3a25494 100644 --- a/test/matrixvariates.jl +++ b/test/matrixvariates.jl @@ -4,8 +4,82 @@ using LinearAlgebra using PDMats using Statistics using Test +using StatsFuns import JSON -import Distributions: _univariate, _multivariate, _rand_params +import SpecialFunctions + +# Test utility: Construct matrix distributions with random parameters + +function _rand_dist(::Type{InverseWishart{T}}, n::Int, p::Int; rng::Union{AbstractRNG,Nothing} = nothing)::InverseWishart{T} where {T<:Real} + n == p || throw(ArgumentError("dims must be equal for InverseWishart")) + rng = rng === nothing ? () : (rng,) + ν = n + 3 + abs(10 * randn(rng..., T)) + X = rand(rng..., T, n, n) + X .= 2 .* X .- 1 + Ψ = X * X' + return InverseWishart(ν, Ψ)::InverseWishart{T} +end +function _rand_dist(::Type{LKJ{T}}, n::Int, p::Int; rng::Union{AbstractRNG,Nothing} = nothing)::LKJ{T} where {T<:Real} + n == p || throw(ArgumentError("dims must be equal for LKJ")) + rng = rng === nothing ? () : (rng,) + η = abs(3 * randn(rng..., T)) + return LKJ(n, η) +end +function _rand_dist(::Type{MatrixBeta{T}}, n::Int, p::Int; rng::Union{AbstractRNG,Nothing} = nothing)::MatrixBeta{T} where {T<:Real} + n == p || throw(ArgumentError("dims must be equal for MatrixBeta")) + rng = rng === nothing ? () : (rng,) + n1 = n + 1 + abs(10 * randn(rng..., T)) + n2 = n + 1 + abs(10 * randn(rng..., T)) + return MatrixBeta(n, n1, n2) +end +function _rand_dist(::Type{MatrixFDist{T}}, n::Int, p::Int; rng::Union{AbstractRNG,Nothing} = nothing)::MatrixFDist{T} where {T<:Real} + n == p || throw(ArgumentError("dims must be equal for MatrixFDist")) + rng = rng === nothing ? () : (rng,) + n1 = n + 1 + abs(10 * randn(rng..., T)) + n2 = n + 3 + abs(10 * randn(rng..., T)) + X = rand(rng..., T, n, n) + X .= 2 .* X .- 1 + B = X * X' + return MatrixFDist(n1, n2, B) +end +function _rand_dist(::Type{MatrixNormal{T}}, n::Int, p::Int; rng::Union{AbstractRNG,Nothing} = nothing)::MatrixNormal{T} where {T<:Real} + rng = rng === nothing ? () : (rng,) + M = randn(rng..., T, n, p) + X = rand(rng..., T, n, n) + Y = rand(rng..., T, p, p) + + X .= 2 .* X .- 1 + U = X * X' + + Y .= 2 .* Y .- 1 + V = Y * Y' + + return MatrixNormal(M, U, V) +end +function _rand_dist(::Type{MatrixTDist{T}}, n::Int, p::Int; rng::Union{AbstractRNG,Nothing} = nothing)::MatrixTDist{T} where {T<:Real} + rng = rng === nothing ? () : (rng,) + ν = n + p + 1 + abs(10 * randn(rng..., T)) + M = randn(rng..., T, n, p) + X = rand(rng..., T, n, n) + Y = rand(rng..., T, p, p) + + X .= 2 .* X .- 1 + Σ = X * X' + + Y .= 2 .* Y .- 1 + Ω = Y * Y' + + return MatrixTDist(ν, M, Σ, Ω) +end +function _rand_dist(::Type{Wishart{T}}, n::Int, p::Int; rng::Union{AbstractRNG,Nothing} = nothing)::Wishart{T} where {T<:Real} + n == p || throw(ArgumentError("dims must be equal for Wishart")) + rng = rng === nothing ? () : (rng,) + ν = n - 1 + abs(10 * randn(rng..., T)) + X = rand(rng..., T, n, n) + X .= 2 .* X .- 1 + S = X * X' + return Wishart(ν, S) +end @testset "matrixvariates" begin #= @@ -26,7 +100,9 @@ import Distributions: _univariate, _multivariate, _rand_params # Check that a random draw from d has the right properties # -------------------------------------------------- -function test_draw(d::MatrixDistribution, X::AbstractMatrix) +function test_draw(d::MatrixDistribution; rng::Union{AbstractRNG,Nothing}=nothing) + X = rng === nothing ? rand(d) : rand(rng, d) + @test X isa Matrix{float(partype(d))} @test size(d) == size(X) @test size(d, 1) == size(X, 1) @test size(d, 2) == size(X, 2) @@ -46,8 +122,6 @@ function test_draw(d::MatrixDistribution, X::AbstractMatrix) nothing end -test_draw(d::MatrixDistribution) = test_draw(d, rand(d)) - # -------------------------------------------------- # Check that sample quantities are close to population quantities # -------------------------------------------------- @@ -55,7 +129,7 @@ test_draw(d::MatrixDistribution) = test_draw(d, rand(d)) function test_draws(d::MatrixDistribution, draws::AbstractArray{<:AbstractMatrix}) @test mean(draws) ≈ mean(d) rtol = 0.01 draws_matrix = mapreduce(vec, hcat, draws) - @test cov(draws_matrix; dims=2) ≈ cov(d) rtol = 0.1 + @test cov(draws_matrix; dims=2) ≈ cov(d) rtol = 0.5 nothing end @@ -65,44 +139,38 @@ function test_draws(d::LKJ, draws::AbstractArray{<:AbstractMatrix}) nothing end -function test_draws(d::MatrixDistribution, M::Integer) - rng = MersenneTwister(123) - @testset "Testing matrix-variates with $key" for (key, func) in - Dict("rand(...)" => [rand, rand], - "rand(rng, ...)" => [dist -> rand(rng, dist), (dist, n) -> rand(rng, dist, n)]) - test_draws(d, func[2](d, M)) +function test_draws(d::MatrixDistribution, M::Integer; rng::Union{AbstractRNG,Nothing} = nothing) + rng = rng === nothing ? () : (rng,) + @testset "Testing matrix-variates with rand(...)" begin + test_draws(d, rand(rng..., d, M)) end - @testset "Testing matrix-variates with $key" for (key, func) in - Dict("rand!(..., false)" => [(dist, X) -> rand!(dist, X, false), rand!], - "rand!(rng, ..., false)" => [(dist, X) -> rand!(rng, dist, X, false), (dist, X) -> rand!(rng, dist, X)]) - m = [Matrix{partype(d)}(undef, size(d)) for _ in Base.OneTo(M)] - x = func[1](d, m) + @testset "Testing matrix-variates with rand!(..., false)" begin + m = [Matrix{float(partype(d))}(undef, size(d)) for _ in 1:M] + x = rand!(rng..., d, m, false) @test x ≡ m - @test isapprox(mean(x) , mean(d) , atol = 0.1) - m3 = Array{partype(d), 3}(undef, size(d)..., M) - x = func[2](d, m3) + @test mean(x) ≈ mean(d) rtol = 0.1 + m3 = Array{float(partype(d))}(undef, size(d)..., M) + x = rand!(rng..., d, m3) @test x ≡ m3 - @test isapprox(mean(x) , mean(mean(d)) , atol = 0.1) + @test dropdims(mean(x; dims=3); dims=3) ≈ mean(d) rtol = 0.1 end - @testset "Testing matrix-variates with $key" for (key, func) in - Dict("rand!(..., true)" => (dist, X) -> rand!(dist, X, true), - "rand!(rng, true)" => (dist, X) -> rand!(rng, dist, X, true)) - m = Vector{Matrix{partype(d)}}(undef, M) - x = func(d, m) + @testset "Testing matrix-variates with rand!(..., true)" begin + m = Vector{Matrix{float(partype(d))}}(undef, M) + x = rand!(rng..., d, m, true) @test x ≡ m - @test isapprox(mean(x), mean(d) , atol = 0.1) + @test mean(x) ≈ mean(d) rtol = 0.1 end repeats = 10 - m = Vector{Matrix{partype(d)}}(undef, repeats) - rand!(d, m) + m = Vector{Matrix{float(partype(d))}}(undef, repeats) + rand!(rng..., d, m) @test isassigned(m, 1) m1=m[1] - rand!(d, m) + rand!(rng..., d, m) @test m1 ≡ m[1] - rand!(d, m, true) + rand!(rng..., d, m, true) @test m1 ≢ m[1] m1 = m[1] - rand!(d, m, false) + rand!(rng..., d, m, false) @test m1 ≡ m[1] nothing end @@ -138,7 +206,7 @@ function test_cov(d::MatrixDistribution) @test reshape(cov(d), size(d)..., size(d)...) ≈ cov(d, Val(false)) end -test_cov(d::LKJ) = nothing +test_cov(::LKJ) = nothing # -------------------------------------------------- # Check dim @@ -152,27 +220,28 @@ function test_dim(d::MatrixDistribution) @test n == size(mean(d), 2) end -test_dim(d::Union{MatrixNormal, MatrixTDist}) = nothing +test_dim(::Union{MatrixNormal, MatrixTDist}) = nothing # -------------------------------------------------- # RUN EVERYTHING # -------------------------------------------------- -function test_distr(d::MatrixDistribution, M::Integer) - test_draw(d) - test_draws(d, M) +function test_distr(d::MatrixDistribution, M::Integer; rng::Union{AbstractRNG,Nothing} = nothing) + test_draw(d; rng=rng) + test_draws(d, M; rng=rng) test_cov(d) test_convert(d) test_dim(d) nothing end -function test_distr(dist::Type{<:MatrixDistribution}, +function test_distr(::Type{D}, n::Integer, p::Integer, - M::Integer) - d = dist(_rand_params(dist, Float64, n, p)...) - test_distr(d, M) + M::Integer; + rng::Union{AbstractRNG,Nothing} = nothing) where {D<:MatrixDistribution} + d = _rand_dist(D, n, p; rng=rng) + test_distr(d, M; rng=rng) nothing end @@ -180,31 +249,66 @@ end # 2. test matrix-variate against the univariate it collapses to in the 1 x 1 case # ============================================================================= -function test_against_univariate(D::MatrixDistribution, d::UnivariateDistribution) - X = rand(D) - x = X[1] - @test logpdf(D, X) ≈ logpdf(d, x) - @test pdf(D, X) ≈ pdf(d, x) - @test mean(D)[1] ≈ mean(d) - @test var(D)[1] ≈ var(d) - nothing +function _univariate(d::InverseWishart) + ν, Ψ = params(d) + α = ν / 2 + β = Matrix(Ψ)[1] / 2 + return InverseGamma(α, β) +end +function _univariate(d::MatrixBeta) + p, n1, n2 = params(d) + return Beta(n1 / 2, n2 / 2) +end +function _univariate(d::MatrixFDist) + n1, n2, B = params(d) + μ = zero(partype(d)) + σ = (n1 / n2) * Matrix(B)[1] + return μ + σ * FDist(n1, n2) +end +function _univariate(d::MatrixNormal) + M, U, V = params(d) + μ = M[1] + σ = sqrt( Matrix(U)[1] * Matrix(V)[1] ) + return Normal(μ, σ) +end +function _univariate(d::MatrixTDist) + ν, M, Σ, Ω = params(d) + μ = M[1] + σ = sqrt( Matrix(Σ)[1] * Matrix(Ω)[1] / ν ) + return μ + σ * TDist(ν) +end +function _univariate(d::Wishart) + df, S = params(d) + α = df / 2 + β = 2 * first(S) + return Gamma(α, β) end -function test_draws_against_univariate_cdf(D::MatrixDistribution, d::UnivariateDistribution) - α = 0.025 - M = 100000 - matvardraws = [rand(D)[1] for m in 1:M] - @test pvalue_kolmogorovsmirnoff(matvardraws, d) >= α +function test_against_univariate(::Type{D}; rng::Union{AbstractRNG,Nothing} = nothing) where {D<:MatrixDistribution} + dist = _rand_dist(D, 1, 1; rng=rng) + univariate_dist = _univariate(dist) + rng = rng === nothing ? () : (rng,) + @testset "Univariate Case: (log)pdf, mean, and var" begin + X = rand(rng..., dist) + x = X[1] + @test logpdf(dist, X) ≈ logpdf(univariate_dist, x) + @test pdf(dist, X) ≈ pdf(univariate_dist, x) + @test mean(dist)[1] ≈ mean(univariate_dist) + @test var(dist)[1] ≈ var(univariate_dist) + end + @testset "Univariate Case: Hypothesis test" begin + α = 0.025 + M = 10_000 + matvardraws = [first(rand(rng..., dist)) for _ in 1:M] + @test pvalue_kolmogorovsmirnoff(matvardraws, univariate_dist) > α + end nothing end -test_draws_against_univariate_cdf(D::LKJ, d::UnivariateDistribution) = nothing - -function test_against_univariate(dist::Type{<:MatrixDistribution}) - D = dist(_rand_params(dist, Float64, 1, 1)...) - d = _univariate(D) - test_against_univariate(D, d) - test_draws_against_univariate_cdf(D, d) +function test_against_univariate(::Type{D}; rng::Union{AbstractRNG,Nothing} = nothing) where {T<:Real,D<:LKJ{T}} + dist = _rand_dist(D, 1, 1; rng=rng) + X = rng === nothing ? rand(dist) : rand(rng, dist) + @test isone(first(X)) nothing end @@ -212,89 +316,86 @@ end # 3. test matrix-variate against the multivariate it collapses to in the row/column case # ============================================================================= -function test_against_multivariate(D::MatrixDistribution, d::MultivariateDistribution) - X = rand(D) - x = vec(X) - @test logpdf(D, X) ≈ logpdf(d, x) - @test pdf(D, X) ≈ pdf(d, x) - @test vec(mean(D)) ≈ mean(d) - @test cov(D) ≈ cov(d) - nothing +function _multivariate(d::MatrixNormal) + n, p = size(d) + if n > 1 && p > 1 + throw(ArgumentError("Row or col dim of `MatrixNormal` must be 1 to coerce to `MvNormal`")) + end + return vec(d) end +_multivariate(d::MatrixTDist) = MvTDist(d) -function test_against_multivariate(dist::Union{Type{MatrixNormal}, Type{MatrixTDist}}, n::Integer, p::Integer) - D = dist(_rand_params(dist, Float64, n, 1)...) - d = _multivariate(D) - test_against_multivariate(D, d) - D = dist(_rand_params(dist, Float64, 1, p)...) - d = _multivariate(D) - test_against_multivariate(D, d) +function test_against_multivariate(::Type{D}, n::Integer, p::Integer; rng::Union{AbstractRNG,Nothing} = nothing) where {T<:Real,D<:Union{MatrixNormal{T}, MatrixTDist{T}}} + dist = _rand_dist(D, n, 1; rng=rng) + multivariate_dist = _multivariate(dist) + X = rng === nothing ? rand(dist) : rand(rng, dist) + x = vec(X) + @test logpdf(dist, X) ≈ logpdf(multivariate_dist, x) + @test pdf(dist, X) ≈ pdf(multivariate_dist, x) + @test vec(mean(dist)) ≈ mean(multivariate_dist) + @test cov(dist) ≈ cov(multivariate_dist) nothing end -test_against_multivariate(dist::Type{<:MatrixDistribution}, n::Integer, p::Integer) = nothing +test_against_multivariate(::Type{<:MatrixDistribution}, n::Integer, p::Integer; rng::Union{AbstractRNG,Nothing} = nothing) = nothing # ============================================================================= # 4. test density evaluation against archived output from Stan # ============================================================================= -function jsonparams2dist(dist::Type{Wishart}, dict) +function jsonparams2dist(::Type{<:Wishart}, dict) ν = dict["params"][1][1] S = zeros(Float64, dict["dims"]...) S[:] = Vector{Float64}( dict["params"][2] ) return Wishart(ν, S) end -function jsonparams2dist(dist::Type{InverseWishart}, dict) +function jsonparams2dist(::Type{<:InverseWishart}, dict) ν = dict["params"][1][1] S = zeros(Float64, dict["dims"]...) S[:] = Vector{Float64}( dict["params"][2] ) return InverseWishart(ν, S) end -function jsonparams2dist(dist::Type{LKJ}, dict) +function jsonparams2dist(::Type{<:LKJ}, dict) d = dict["params"][1][1] η = dict["params"][2][1] return LKJ(d, η) end -function unpack_matvar_json_dict(dist::Type{<:MatrixDistribution}, dict) - d = jsonparams2dist(dist, dict) +function unpack_matvar_json_dict(::Type{D}, dict) where {D<:Union{Wishart,InverseWishart,LKJ}} + d = jsonparams2dist(D, dict) X = zeros(Float64, dict["dims"]...) X[:] = Vector{Float64}(dict["X"]) lpdf = dict["lpdf"][1] return d, X, lpdf end -function test_against_stan(dist::Type{<:MatrixDistribution}) - filename = joinpath(@__DIR__, "ref", "matrixvariates", "jsonfiles", "$(dist)_stan_output.json") +function test_against_stan(::Type{D}) where {D<:Union{Wishart,InverseWishart,LKJ}} + filename = joinpath(@__DIR__, "ref", "matrixvariates", "jsonfiles", "$(nameof(D))_stan_output.json") stan_output = JSON.parsefile(filename) - K = length(stan_output) - for k in 1:K - d, X, lpdf = unpack_matvar_json_dict(dist, stan_output[k]) - @test isapprox(logpdf(d, X), lpdf, atol = 1e-10) - @test isapprox(logpdf(d, [X, X]), [lpdf, lpdf], atol=1e-8) - @test isapprox(pdf(d, X), exp(lpdf), atol = 1e-6) - @test isapprox(pdf(d, [X, X]), exp.([lpdf, lpdf]), atol=1e-6) + for stan_output_i in stan_output + d, X, lpdf = unpack_matvar_json_dict(D, stan_output_i) + @test logpdf(d, X) ≈ lpdf atol = 1e-10 + @test logpdf(d, [X, X]) ≈ [lpdf, lpdf] atol=1e-8 + @test pdf(d, X) ≈ exp(lpdf) atol = 1e-6 + @test pdf(d, [X, X]) ≈ exp.([lpdf, lpdf]) atol=1e-6 end nothing end -function test_against_stan(dist::Union{Type{MatrixNormal}, Type{MatrixTDist}, Type{MatrixBeta}, Type{MatrixFDist}}) - nothing -end +test_against_stan(::Type{<:MatrixDistribution}) = nothing # ============================================================================= # 5. special, distribution-specific tests # ============================================================================= -test_special(dist::Type{<:MatrixDistribution}) = nothing +test_special(::Type{<:MatrixDistribution}; rng::Union{AbstractRNG,Nothing} = nothing) = nothing -function test_special(dist::Type{MatrixNormal}) - D = MatrixNormal(_rand_params(MatrixNormal, Float64, 2, 2)...) +function test_special(::Type{DT}; rng::Union{AbstractRNG,Nothing} = nothing) where {T<:Real,DT<:MatrixNormal{T}} + D = _rand_dist(DT, 2, 2; rng=rng) @testset "X ~ MN(M, U, V) ⟺ vec(X) ~ N(vec(M), V ⊗ U)" begin - d = vec(D) - test_against_multivariate(D, d) + test_against_multivariate(DT, 2, 2; rng=rng) end @testset "MatrixNormal mode" begin @test mode(D) == D.M @@ -302,13 +403,14 @@ function test_special(dist::Type{MatrixNormal}) @testset "PDMat mixing and matching" begin n = 3 p = 4 - M = randn(n, p) - u = rand() + rng = rng === nothing ? () : (rng,) + M = randn(rng..., n, p) + u = rand(rng...) U_scale = ScalMat(n, u) U_dense = Matrix(U_scale) U_pd = PDMat(U_dense) U_pdiag = PDiagMat(u*ones(n)) - v = rand(p) + v = rand(rng..., p) V_pdiag = PDiagMat(v) V_dense = Matrix(V_pdiag) V_pd = PDMat(V_dense) @@ -325,87 +427,96 @@ function test_special(dist::Type{MatrixNormal}) nothing end -function test_special(dist::Type{Wishart}) +function test_special(::Type{D}; rng::Union{AbstractRNG,Nothing} = nothing) where {T<:Real,D<:Wishart{T}} n = 3 M = 5000 α = 0.05 - ν, Σ = _rand_params(Wishart, Float64, n, n) - d = Wishart(ν, Σ) - H = rand(d, M) + d = _rand_dist(D, n, n; rng=rng) + ν, Σ = params(d) + H = rng === nothing ? rand(d, M) : rand(rng, d, M) @testset "deprecations" begin for warn in (true, false) @test @test_deprecated(Wishart(n - 1, Σ, warn)) == Wishart(n - 1, Σ) end end @testset "meanlogdet" begin - @test isapprox(Distributions.meanlogdet(d), mean(logdet.(H)), atol = 0.1) + # Errors for Float32?! + if T === Float64 + @test Distributions.meanlogdet(d) ≈ mean(logdet, H) rtol = 0.1 + end end @testset "H ~ W(ν, Σ), a ~ p(a) independent ⟹ a'Ha / a'Σa ~ χ²(ν)" begin - q = MvTDist(10, randn(n), rand(d)) + q = rng === nothing ? MvTDist(10, randn(T, n), rand(d)) : MvTDist(10, randn(rng, T, n), rand(rng, d)) ρ = Chisq(ν) - A = rand(q, M) + A = rng === nothing ? rand(q, M) : rand(rng, q, M) z = [A[:, m]'*H[m]*A[:, m] / (A[:, m]'*Σ*A[:, m]) for m in 1:M] @test pvalue_kolmogorovsmirnoff(z, ρ) >= α end @testset "H ~ W(ν, I) ⟹ H[i, i] ~ χ²(ν)" begin κ = n + 1 ρ = Chisq(κ) - g = Wishart(κ, ScalMat(n, 1)) - mymats = zeros(n, n, M) + g = Wishart(T(κ), ScalMat(n, T(1))) + mymats = Array{T}(undef, n, n, M) for m in 1:M - mymats[:, :, m] = rand(g) + mymats[:, :, m] = rng === nothing ? rand(g) : rand(rng, g) + end + # Compute p-values of the KS tests for each diagonal entry + pvalues = map(1:n) do i + pvalue_kolmogorovsmirnoff(@view(mymats[i, i, :]), ρ) end - for i in 1:n - @test pvalue_kolmogorovsmirnoff(mymats[i, i, :], ρ) >= α / n + # Perform test with Bonferroni-Holm correction + sort!(pvalues) + for (i, p) in enumerate(pvalues) + @test p > α / (n + 1 - i) end end @testset "Check Singular Branch" begin # Check that no warnings are shown: #1410 - rank1 = @test_logs Wishart(n - 2, Σ) - rank2 = @test_logs Wishart(n - 1, Σ) - test_draw(rank1) - test_draw(rank2) - test_draws(rank1, rand(rank1, 10^6)) - test_draws(rank2, rand(rank2, 10^6)) - test_cov(rank1) - test_cov(rank2) + for ν in (n - 2, n - 1) + dist = @test_logs Wishart(ν, Σ) + test_draw(dist; rng=rng) + test_draws(dist, rng === nothing ? rand(dist, 10^6) : rand(rng, dist, 10^6)) + test_cov(dist) + end X = H[1] @test Distributions.singular_wishart_logkernel(d, X) ≈ Distributions.nonsingular_wishart_logkernel(d, X) @test Distributions.singular_wishart_logc0(n, ν, d.S, rank(d)) ≈ Distributions.nonsingular_wishart_logc0(n, ν, d.S) - @test logpdf(d, X) ≈ Distributions.singular_wishart_logkernel(d, X) + Distributions.singular_wishart_logc0(n, ν, d.S, rank(d)) + if T === Float64 + @test logpdf(d, X) ≈ Distributions.singular_wishart_logkernel(d, X) + Distributions.singular_wishart_logc0(n, ν, d.S, rank(d)) + end end nothing end -function test_special(dist::Type{InverseWishart}) +function test_special(::Type{D}; rng::Union{AbstractRNG,Nothing} = nothing) where {T<:Real,D<:InverseWishart{T}} @testset "InverseWishart constructor" begin # Tests https://github.com/JuliaStats/Distributions.jl/issues/1948 - @test typeof(InverseWishart(5, ScalMat(5, 1))) == InverseWishart{Float64, ScalMat{Float64}} - @test typeof(InverseWishart(5, PDiagMat(ones(Int, 5)))) == InverseWishart{Float64, PDiagMat{Float64, Vector{Float64}}} + @test InverseWishart(5, ScalMat(5, T(1))) isa InverseWishart{T, ScalMat{T}} + @test InverseWishart(5, PDiagMat(ones(T, 5))) isa InverseWishart{T, PDiagMat{T, Vector{T}}} end end - -function test_special(dist::Type{MatrixTDist}) + +function test_special(::Type{D}; rng::Union{AbstractRNG,Nothing} = nothing) where {T<:Real,D<:MatrixTDist{T}} @testset "MT(v, M, vΣ, Ω) → MN(M, Σ, Ω) as v → ∞" begin n, p = (6, 3) - M, Σ, Ω = _rand_params(MatrixNormal, Float64, n, p) + MN = _rand_dist(MatrixNormal{T}, n, p; rng=rng) + A = rng === nothing ? rand(MN) : rand(rng, MN) + M, Σ, Ω = params(MN) MT = MatrixTDist(1000, M, 1000Σ, Ω) - MN = MatrixNormal(M, Σ, Ω) - A = rand(MN) - @test isapprox(logpdf(MT, A), logpdf(MN, A), atol = 0.1) + @test logpdf(MT, A) ≈ logpdf(MN, A) atol = 0.1 end @testset "PDMat mixing and matching" begin n = 3 p = 4 ν = max(n, p) + 1 - M = randn(n, p) - u = rand() + M = rng === nothing ? randn(T, n, p) : randn(rng, T, n, p) + u = rng === nothing ? rand(T) : rand(rng, T) U_scale = ScalMat(n, u) U_dense = Matrix(U_scale) U_pd = PDMat(U_dense) U_pdiag = PDiagMat(u*ones(n)) - v = rand(p) + v = rng === nothing ? rand(T, p) : rand(rng, T, p) V_pdiag = PDiagMat(v) V_dense = Matrix(V_pdiag) V_pd = PDMat(V_dense) @@ -422,72 +533,111 @@ function test_special(dist::Type{MatrixTDist}) nothing end -function test_special(dist::Type{LKJ}) +# Equation after (16) in LKJ (2009 JMA) +function lkj_vine_loginvconst_uniform(d::Integer, η::Real) + @assert isone(η) + expsum = betasum = zero(float(first(promote(d, η)))) + for k in 1:(d - 1) + α = oftype(betasum, k + 1) / 2 + expsum += k^2 + betasum += k * SpecialFunctions.logbeta(α, α) + end + loginvconst = expsum * logtwo + betasum + return loginvconst +end +# Third line in first proof of Section 3.3 in LKJ (2009 JMA) +function lkj_loginvconst_alt(d::Integer, η::Real) + loginvconst = float(zero(η)) + halflogπ = oftype(loginvconst, logπ) / 2 + z = SpecialFunctions.loggamma(η + oftype(loginvconst, d - 1) / 2) + for k in 1:(d - 1) + loginvconst += k * halflogπ + SpecialFunctions.loggamma(η + oftype(loginvconst, d - 1 - k) / 2) - z + end + return loginvconst +end +# https://doi.org/10.4169/amer.math.monthly.123.9.909 +function corr_logvolume(n::Integer, η::Real) + logvol = zero(float(first(promote(n, η)))) + halflogπ = oftype(logvol, logπ) / 2 + for k in 1:(n - 1) + logvol += k * (halflogπ + SpecialFunctions.loggamma(oftype(logvol, k+1)/2) - SpecialFunctions.loggamma(oftype(logvol, k+2)/2)) + end + return logvol +end + +function test_special(::Type{D}; rng::Union{AbstractRNG,Nothing} = nothing) where {T<:Real,D<:LKJ{T}} @testset "LKJ mode" begin - @test mode(LKJ(5, 1.5)) == mean(LKJ(5, 1.5)) - @test_throws DomainError mode( LKJ(5, 0.5) ) + @test mode(LKJ(5, T(3//2))) == mean(LKJ(5, T(3//2))) + @test_throws DomainError mode(LKJ(5, T(1//2))) end @testset "LKJ marginals" begin d = 4 - η = abs(3randn()) - G = LKJ(d, η) + G = _rand_dist(D, d, d; rng=rng) M = 10000 - α = 0.05 - L = sum(1:(d - 1)) + α = 0.025 + L = (d * (d - 1)) >> 1 ρ = Distributions._marginal(G) - mymats = zeros(d, d, M) + mymats = Array{T}(undef, d, d, M) for m in 1:M - mymats[:, :, m] = rand(G) + mymats[:, :, m] = rng === nothing ? rand(G) : rand(rng, G) end - for i in 1:d - for j in 1:i-1 - @test pvalue_kolmogorovsmirnoff(mymats[i, j, :], ρ) >= α / L + # Compute p-values of the KS tests for each lower-triangular entry + pvalues = Vector{Float64}(undef, L) + k = 0 + for j in 1:d + for i in (j+1):d + pvalues[k += 1] = pvalue_kolmogorovsmirnoff(@view(mymats[i, j, :]), ρ) end end + # Perform test with Bonferroni-Holm correction + sort!(pvalues) + for (i, p) in enumerate(pvalues) + @test p > α / (L + 1 - i) + end end @testset "LKJ integrating constant" begin # ============= # odd non-uniform # ============= d = 5 - η = 2.3 + η = T(23//10) lkj = LKJ(d, η) @test Distributions.lkj_vine_loginvconst(d, η) ≈ Distributions.lkj_onion_loginvconst(d, η) - @test Distributions.lkj_onion_loginvconst(d, η) ≈ Distributions.lkj_loginvconst_alt(d, η) + @test Distributions.lkj_onion_loginvconst(d, η) ≈ lkj_loginvconst_alt(d, η) @test lkj.logc0 == -Distributions.lkj_onion_loginvconst(d, η) # ============= # odd uniform # ============= d = 5 - η = 1.0 + η = T(1) lkj = LKJ(d, η) @test Distributions.lkj_vine_loginvconst(d, η) ≈ Distributions.lkj_onion_loginvconst(d, η) - @test Distributions.lkj_onion_loginvconst(d, η) ≈ Distributions.lkj_onion_loginvconst_uniform_odd(d, Float64) - @test Distributions.lkj_vine_loginvconst(d, η) ≈ Distributions.lkj_vine_loginvconst_uniform(d) - @test Distributions.lkj_onion_loginvconst(d, η) ≈ Distributions.lkj_loginvconst_alt(d, η) - @test Distributions.lkj_onion_loginvconst(d, η) ≈ Distributions.corr_logvolume(d) - @test lkj.logc0 == -Distributions.lkj_onion_loginvconst_uniform_odd(d, Float64) + @test Distributions.lkj_onion_loginvconst(d, η) ≈ Distributions.lkj_onion_loginvconst_uniform_odd(d, η) + @test Distributions.lkj_vine_loginvconst(d, η) ≈ lkj_vine_loginvconst_uniform(d, η) + @test Distributions.lkj_onion_loginvconst(d, η) ≈ lkj_loginvconst_alt(d, η) + @test Distributions.lkj_onion_loginvconst(d, η) ≈ corr_logvolume(d, η) + @test lkj.logc0 == -Distributions.lkj_onion_loginvconst_uniform_odd(d, η) # ============= # even non-uniform # ============= d = 6 - η = 2.3 + η = T(23//10) lkj = LKJ(d, η) @test Distributions.lkj_vine_loginvconst(d, η) ≈ Distributions.lkj_onion_loginvconst(d, η) - @test Distributions.lkj_onion_loginvconst(d, η) ≈ Distributions.lkj_loginvconst_alt(d, η) + @test Distributions.lkj_onion_loginvconst(d, η) ≈ lkj_loginvconst_alt(d, η) @test lkj.logc0 == -Distributions.lkj_onion_loginvconst(d, η) # ============= # even uniform # ============= d = 6 - η = 1.0 + η = T(1) lkj = LKJ(d, η) @test Distributions.lkj_vine_loginvconst(d, η) ≈ Distributions.lkj_onion_loginvconst(d, η) - @test Distributions.lkj_onion_loginvconst(d, η) ≈ Distributions.lkj_onion_loginvconst_uniform_even(d, Float64) - @test Distributions.lkj_vine_loginvconst(d, η) ≈ Distributions.lkj_vine_loginvconst_uniform(d) - @test Distributions.lkj_onion_loginvconst(d, η) ≈ Distributions.lkj_loginvconst_alt(d, η) - @test Distributions.lkj_onion_loginvconst(d, η) ≈ Distributions.corr_logvolume(d) - @test lkj.logc0 == -Distributions.lkj_onion_loginvconst_uniform_even(d, Float64) + @test Distributions.lkj_onion_loginvconst(d, η) ≈ Distributions.lkj_onion_loginvconst_uniform_even(d, η) + @test Distributions.lkj_vine_loginvconst(d, η) ≈ lkj_vine_loginvconst_uniform(d, η) + @test Distributions.lkj_onion_loginvconst(d, η) ≈ lkj_loginvconst_alt(d, η) + @test Distributions.lkj_onion_loginvconst(d, η) ≈ corr_logvolume(d, η) + @test lkj.logc0 == -Distributions.lkj_onion_loginvconst_uniform_even(d, η) end @testset "check integrating constant as a volume" begin # d = 2: Lebesgue measure of the set of correlation matrices is 2. @@ -512,15 +662,16 @@ end # 6. main method # ============================================================================= -function test_matrixvariate(dist::Type{<:MatrixDistribution}, +function test_matrixvariate(::Type{D}, n::Integer, p::Integer, - M::Integer) - test_distr(dist, n, p, M) - test_against_univariate(dist) - test_against_multivariate(dist, n, p) - test_against_stan(dist) - test_special(dist) + M::Integer; + rng::Union{AbstractRNG,Nothing} = nothing) where {D<:MatrixDistribution} + test_distr(D, n, p, M; rng=rng) + test_against_univariate(D; rng=rng) + test_against_multivariate(D, n, p; rng=rng) + test_against_stan(D) + test_special(D; rng=rng) nothing end @@ -535,12 +686,12 @@ matrixvariates = [(MatrixNormal, 2, 4, 10^5), (MatrixBeta, 3, 3, 10^5), (MatrixFDist, 3, 3, 10^5), (LKJ, 3, 3, 10^5)] - -for distribution in matrixvariates - dist, n, p, M = distribution - println(" testing $(dist)") - @testset "$(dist)" begin - test_matrixvariate(dist, n, p, M) +for (D, n, p, M) in matrixvariates + println(" testing $(D)") + @testset "$(D) ($(rng === nothing ? "with" : "without") RNG)" for rng in (nothing, Random.default_rng()) + @testset for T in (Float32, Float64) + test_matrixvariate(D{T}, n, p, M; rng=rng) + end end end end diff --git a/test/mixture.jl b/test/mixture.jl index d2b2742ebc..967477e3d0 100644 --- a/test/mixture.jl +++ b/test/mixture.jl @@ -1,4 +1,4 @@ -using Distributions, Random +using Distributions, LinearAlgebra, Random using Test using LinearAlgebra using ForwardDiff: Dual @@ -8,29 +8,22 @@ using ForwardDiff: Dual function test_mixture(g::UnivariateMixture, n::Int, ns::Int, rng::Union{AbstractRNG, Missing} = missing) - if g isa UnivariateGMM - T = eltype(g.means) - else - T = eltype(typeof(g)) - end - X = zeros(T, n) - for i = 1:n - X[i] = rand(g) - end + X = rng === missing ? [rand(g) for _ in 1:n] : [rand(rng, g) for _ in 1:n] + @test eltype(X) === @test_deprecated(eltype(g)) K = ncomponents(g) pr = @inferred(probs(g)) @assert length(pr) == K # mean - mu = 0.0 + mu = zero(float(partype(g))) for k = 1:K mu += pr[k] * mean(component(g, k)) end @test @inferred(mean(g)) ≈ mu - # evaluation of cdf - cf = zeros(T, n) + # evaluation of cdfc + cf = zeros(eltype(X), n) for k = 1:K c_k = component(g, k) for i = 1:n @@ -44,8 +37,8 @@ function test_mixture(g::UnivariateMixture, n::Int, ns::Int, @test Base.Fix1(cdf, g).(X) ≈ cf # evaluation - P0 = zeros(T, n, K) - LP0 = zeros(T, n, K) + P0 = zeros(eltype(X), n, K) + LP0 = zeros(eltype(X), n, K) for k = 1:K c_k = component(g, k) for i = 1:n @@ -78,15 +71,14 @@ function test_mixture(g::UnivariateMixture, n::Int, ns::Int, @test @inferred(median(g)) ≈ quantile(g, 1//2) # sampling - # sampling does not work with `Float32` since `AliasTable` does not support `Float32` - # Ref: https://github.com/JuliaStats/StatsBase.jl/issues/158 - if T <: AbstractFloat && eltype(probs(g)) === Float64 + # sampling does not work with `Dual`s since `AliasTable` does not support them + if eltype(X) <: AbstractFloat if ismissing(rng) Xs = rand(g, ns) else Xs = rand(rng, g, ns) end - @test isa(Xs, Vector{T}) + @test Xs isa Vector{float(partype(g))} @test length(Xs) == ns @test isapprox(mean(Xs), mean(g), atol=0.01) end diff --git a/test/multivariate/dirichlet.jl b/test/multivariate/dirichlet.jl index 998dac8533..97b9d2a399 100644 --- a/test/multivariate/dirichlet.jl +++ b/test/multivariate/dirichlet.jl @@ -1,6 +1,7 @@ # Tests for Dirichlet distribution -using Distributions +using Distributions +using SpecialFunctions using Test, Random, LinearAlgebra using ChainRulesCore using ChainRulesTestUtils @@ -18,7 +19,7 @@ rng = MersenneTwister(123) d = Dirichlet(3, T(2)) @test length(d) == 3 - @test eltype(d) === T + @test @test_deprecated(eltype(d)) === float(T) @test d.alpha == [2, 2, 2] @test d.alpha0 == 6 @@ -54,7 +55,7 @@ rng = MersenneTwister(123) v = [2, 1, 3] d = Dirichlet(T.(v)) - @test eltype(d) === T + @test @test_deprecated(eltype(d)) === float(T) @test Dirichlet([2, 1, 3]).alpha == d.alpha @test length(d) == length(v) @@ -93,7 +94,7 @@ rng = MersenneTwister(123) v = [2, 1, 3] d = Dirichlet(Float32.(v)) - @test eltype(d) === Float32 + @test @test_deprecated(eltype(d)) === Float32 x = func[1](d) @test isa(x, Vector{Float32}) diff --git a/test/multivariate/jointorderstatistics.jl b/test/multivariate/jointorderstatistics.jl index d5d65a752e..14b0a6bba3 100644 --- a/test/multivariate/jointorderstatistics.jl +++ b/test/multivariate/jointorderstatistics.jl @@ -52,7 +52,7 @@ using Distributions, LinearAlgebra, Random, SpecialFunctions, Statistics, Test @test length(d) == length(r) @test params(d) == (params(dist)..., d.n, d.ranks) @test partype(d) === partype(dist) - @test eltype(d) === eltype(dist) + @test @test_deprecated(eltype(d)) === @test_deprecated(eltype(dist)) length(r) == n && @test JointOrderStatistics(dist, n) == d end @@ -94,7 +94,7 @@ using Distributions, LinearAlgebra, Random, SpecialFunctions, Statistics, Test logpdf(dist, xi) + logpdf(dist, xj) ) - @test logpdf(d, x) ≈ lp + @test logpdf(d, x) ≈ lp rtol=10 * sqrt(eps(T)) @test pdf(d, x) ≈ exp(lp) elseif collect(r) == 1:n @test logpdf(d, x) ≈ sum(Base.Fix1(logpdf, d.dist), x) + loggamma(T(n + 1)) diff --git a/test/multivariate/mvlogitnormal.jl b/test/multivariate/mvlogitnormal.jl index 109ab4f32c..5fdfd1ce01 100644 --- a/test/multivariate/mvlogitnormal.jl +++ b/test/multivariate/mvlogitnormal.jl @@ -15,8 +15,8 @@ function test_mvlogitnormal(d::MvLogitNormal; nsamples::Int=10^6) @test length(d) == length(dnorm) + 1 @test params(d) == params(dnorm) @test partype(d) == partype(dnorm) - @test eltype(d) == eltype(dnorm) - @test eltype(typeof(d)) == eltype(typeof(dnorm)) + @test @test_deprecated(eltype(d)) === @test_deprecated(eltype(dnorm)) + @test @test_deprecated(eltype(typeof(d))) === @test_deprecated(eltype(typeof(dnorm))) @test location(d) == mean(dnorm) @test minimum(d) == fill(0, length(d)) @test maximum(d) == fill(1, length(d)) diff --git a/test/multivariate/product.jl b/test/multivariate/product.jl index ada4302c12..f7d0a34c90 100644 --- a/test/multivariate/product.jl +++ b/test/multivariate/product.jl @@ -20,7 +20,7 @@ using Distributions: Product @test d_product isa Product # Check that methods for `Product` are consistent. @test length(d_product) == length(ds) - @test eltype(d_product) === eltype(ds[1]) + @test @test_deprecated(eltype(d_product)) === @test_deprecated(eltype(ds[1])) @test @inferred(logpdf(d_product, x)) ≈ sum(logpdf.(ds, x)) @test mean(d_product) == mean.(ds) @test std(d_product) == std.(ds) @@ -44,7 +44,7 @@ end @test d_product isa Product # Check that methods for `Product` are consistent. @test length(d_product) == length(ds) - @test eltype(d_product) === eltype(ds[1]) + @test @test_deprecated(eltype(d_product)) === @test_deprecated(eltype(ds[1])) @test @inferred(logpdf(d_product, x)) ≈ sum(logpdf.(ds, x)) @test mean(d_product) == mean.(ds) @test std(d_product) == std.(ds) @@ -75,7 +75,7 @@ end @test d_product isa Product # Check that methods for `Product` are consistent. @test length(d_product) == length(ds) - @test eltype(d_product) === eltype(ds[1]) + @test @test_deprecated(eltype(d_product)) === @test_deprecated(eltype(ds[1])) @test @inferred(logpdf(d_product, x)) ≈ sum(logpdf.(ds, x)) @test mean(d_product) == mean.(ds) @test std(d_product) == std.(ds) diff --git a/test/namedtuple/productnamedtuple.jl b/test/namedtuple/productnamedtuple.jl index 24ca59afad..60ad5e73a4 100644 --- a/test/namedtuple/productnamedtuple.jl +++ b/test/namedtuple/productnamedtuple.jl @@ -68,7 +68,7 @@ using Test (x=product_distribution((x=Normal(), y=Gamma())),), ] d = ProductNamedTupleDistribution(nt) - @test eltype(d) === eltype(rand(d)) + @test @test_deprecated(eltype(d)) === eltype(rand(d)) end end @@ -168,7 +168,7 @@ using Test d = ProductNamedTupleDistribution(nt) rng = MersenneTwister(973) x1 = @inferred rand(rng, d) - @test eltype(x1) === eltype(d) + @test eltype(x1) === @test_deprecated(eltype(d)) rng = MersenneTwister(973) x2 = ( x=rand(rng, nt.x), y=rand(rng, nt.y), z=rand(rng, nt.z), w=rand(rng, nt.w) diff --git a/test/product.jl b/test/product.jl index 54f8f9aefb..e3c4f20844 100644 --- a/test/product.jl +++ b/test/product.jl @@ -23,7 +23,7 @@ using LinearAlgebra # Check that methods for `ProductDistribution` are consistent. for (ds, d_product) in ((ds1, d_product1), (ds2, d_product2)) @test length(d_product) == length(ds) - @test eltype(d_product) === eltype(ds[1]) + @test @test_deprecated(eltype(d_product)) === @test_deprecated(eltype(ds[1])) @test mean(d_product) == mean.(ds) @test std(d_product) == std.(ds) @test var(d_product) == var.(ds) @@ -49,22 +49,22 @@ end # d_product1 = @inferred(product_distribution(ds1)) # when `Product` is removed d_product1 = @inferred(Distributions.ProductDistribution(ds1)) - @test d_product1 isa Distributions.VectorOfUnivariateDistribution{<:Vector,Continuous,Float64} + @test d_product1 isa Distributions.VectorOfUnivariateDistribution{<:Vector,Continuous} d_product2 = @inferred(product_distribution(ntuple(i -> Uniform(0.0, ubound[i]), 11)...)) - @test d_product2 isa Distributions.VectorOfUnivariateDistribution{<:Tuple,Continuous,Float64} + @test d_product2 isa Distributions.VectorOfUnivariateDistribution{<:Tuple,Continuous} ds3 = Fill(Uniform(0.0, first(ubound)), N) # Replace with # d_product3 = @inferred(product_distribution(ds3)) # when `Product` is removed d_product3 = @inferred(Distributions.ProductDistribution(ds3)) - @test d_product3 isa Distributions.VectorOfUnivariateDistribution{<:Fill,Continuous,Float64} + @test d_product3 isa Distributions.VectorOfUnivariateDistribution{<:Fill,Continuous} # Check that methods for `VectorOfUnivariateDistribution` are consistent. for (ds, d_product) in ((ds1, d_product1), (ds1, d_product2), (ds3, d_product3)) @test length(d_product) == length(ds) - @test eltype(d_product) === eltype(ds[1]) + @test @test_deprecated(eltype(d_product)) === @test_deprecated(eltype(ds[1])) @test @inferred(mean(d_product)) == mean.(ds) @test @inferred(std(d_product)) == std.(ds) @test @inferred(var(d_product)) == var.(ds) @@ -100,22 +100,22 @@ end # d_product1 = @inferred(product_distribution(ds1)) # when `Product` is removed d_product1 = @inferred(Distributions.ProductDistribution(ds1)) - @test d_product1 isa Distributions.VectorOfUnivariateDistribution{<:Vector{<:DiscreteNonParametric},Discrete,eltype(a)} + @test d_product1 isa Distributions.VectorOfUnivariateDistribution{<:Vector{<:DiscreteNonParametric},Discrete} d_product2 = @inferred(product_distribution(ntuple(_ -> DiscreteNonParametric(a, [0.5, 0.5]), 11)...)) - @test d_product2 isa Distributions.VectorOfUnivariateDistribution{<:NTuple{N,<:DiscreteNonParametric},Discrete,eltype(a)} + @test d_product2 isa Distributions.VectorOfUnivariateDistribution{<:NTuple{N,<:DiscreteNonParametric},Discrete} ds3 = Fill(DiscreteNonParametric(a, [0.5, 0.5]), N) # Replace with # d_product3 = @inferred(product_distribution(ds3)) # when `Product` is removed d_product3 = @inferred(Distributions.ProductDistribution(ds3)) - @test d_product3 isa Distributions.VectorOfUnivariateDistribution{<:Fill{<:DiscreteNonParametric,1},Discrete,eltype(a)} + @test d_product3 isa Distributions.VectorOfUnivariateDistribution{<:Fill{<:DiscreteNonParametric,1},Discrete} # Check that methods for `VectorOfUnivariateDistribution` are consistent. for (ds, d_product) in ((ds1, d_product1), (ds1, d_product3), (ds3, d_product2)) @test length(d_product) == length(ds) - @test eltype(d_product) === eltype(ds[1]) + @test @test_deprecated(eltype(d_product)) === @test_deprecated(eltype(ds[1])) @test @inferred(mean(d_product)) == mean.(ds) @test @inferred(std(d_product)) == std.(ds) @test @inferred(var(d_product)) == var.(ds) @@ -146,12 +146,12 @@ end ds = (Bernoulli(0.3), Uniform(0.0, 0.7), Categorical([0.4, 0.2, 0.4])) d_product = @inferred(product_distribution(ds...)) - @test d_product isa Distributions.VectorOfUnivariateDistribution{<:Tuple,Continuous,Float64} + @test d_product isa Distributions.VectorOfUnivariateDistribution{<:Tuple,Continuous} ds_vec = vcat(ds...) @test length(d_product) == 3 - @test eltype(d_product) === Float64 + @test @test_deprecated(eltype(d_product)) === Float64 @test @inferred(mean(d_product)) == mean.(ds_vec) @test @inferred(std(d_product)) == std.(ds_vec) @test @inferred(var(d_product)) == var.(ds_vec) @@ -182,16 +182,16 @@ end ds1 = Uniform.(0.0, ubound) d_product1 = @inferred(product_distribution(ds1)) - @test d_product1 isa Distributions.MatrixOfUnivariateDistribution{<:Matrix{<:Uniform},Continuous,Float64} + @test d_product1 isa Distributions.MatrixOfUnivariateDistribution{<:Matrix{<:Uniform},Continuous} ds2 = Fill(Uniform(0.0, first(ubound)), M, N) d_product2 = @inferred(product_distribution(ds2)) - @test d_product2 isa Distributions.MatrixOfUnivariateDistribution{<:Fill{<:Uniform,2},Continuous,Float64} + @test d_product2 isa Distributions.MatrixOfUnivariateDistribution{<:Fill{<:Uniform,2},Continuous} # Check that methods for `MatrixOfUnivariateDistribution` are consistent. for (ds, d_product) in ((ds1, d_product1), (ds2, d_product2)) @test size(d_product) == size(ds) - @test eltype(d_product) === eltype(ds[1]) + @test @test_deprecated(eltype(d_product)) === @test_deprecated(eltype(ds[1])) @test @inferred(mean(d_product)) == mean.(ds) @test @inferred(var(d_product)) == var.(ds) @test @inferred(cov(d_product)) == Diagonal(vec(var.(ds))) @@ -220,16 +220,16 @@ end ds1 = Dirichlet.(alphas) d_product1 = @inferred(product_distribution(ds1)) - @test d_product1 isa Distributions.ProductDistribution{length(N) + 1,1,<:Array{<:Dirichlet{Float64},length(N)},Continuous,Float64} + @test d_product1 isa Distributions.ProductDistribution{length(N) + 1,1,<:Array{<:Dirichlet{Float64},length(N)},Continuous} ds2 = Fill(Dirichlet(first(alphas)), N...) d_product2 = @inferred(product_distribution(ds2)) - @test d_product2 isa Distributions.ProductDistribution{length(N) + 1,1,<:Fill{<:Dirichlet{Float64},length(N)},Continuous,Float64} + @test d_product2 isa Distributions.ProductDistribution{length(N) + 1,1,<:Fill{<:Dirichlet{Float64},length(N)},Continuous} # Check that methods for `VectorOfMultivariateDistribution` are consistent. for (ds, d_product) in ((ds1, d_product1), (ds2, d_product2)) @test size(d_product) == (length(ds[1]), size(ds)...) - @test eltype(d_product) === eltype(ds[1]) + @test @test_deprecated(eltype(d_product)) === @test_deprecated(eltype(ds[1])) @test @inferred(mean(d_product)) == reshape(mapreduce(mean, (x, y) -> cat(x, y; dims=ndims(ds) + 1), ds), size(d_product)) @test @inferred(var(d_product)) == reshape(mapreduce(var, (x, y) -> cat(x, y; dims=ndims(ds) + 1), ds), size(d_product)) @test @inferred(cov(d_product)) == Diagonal(mapreduce(var, vcat, ds)) diff --git a/test/reshaped.jl b/test/reshaped.jl index 54bf9be8d8..30dcd408b3 100644 --- a/test/reshaped.jl +++ b/test/reshaped.jl @@ -78,7 +78,7 @@ # eltype for d in d1s - @test eltype(d) === eltype(d1) + @test @test_deprecated(eltype(d)) === @test_deprecated(eltype(d1)) end # logpdf diff --git a/test/runtests.jl b/test/runtests.jl index f63a3d5e8e..e711435f33 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -73,6 +73,7 @@ const tests = [ "univariate/discrete/poisson", "univariate/discrete/soliton", "univariate/continuous/skewnormal", + "univariate/continuous/beta", "univariate/continuous/chi", "univariate/continuous/chisq", "univariate/continuous/erlang", @@ -130,8 +131,6 @@ const tests = [ # "samplers/vonmisesfisher", # "show", # "truncated/loguniform", - # "univariate/continuous/beta", - # "univariate/continuous/beta", # "univariate/continuous/betaprime", # "univariate/continuous/biweight", # "univariate/continuous/cosine", diff --git a/test/statsapi.jl b/test/statsapi.jl index 37745c5223..4e15f7b9e1 100644 --- a/test/statsapi.jl +++ b/test/statsapi.jl @@ -1,8 +1,11 @@ using Distributions using StatsAPI: pvalue +using Random using Test +Random.seed!(7362) + @testset "pvalue" begin # For two discrete and two continuous distribution for dist in (Binomial(10, 0.3), Poisson(0.3), Normal(1.4, 2.1), Gamma(1.9, 0.8)) diff --git a/test/testutils.jl b/test/testutils.jl index d1e215745a..339b5291af 100644 --- a/test/testutils.jl +++ b/test/testutils.jl @@ -163,7 +163,12 @@ function test_samples(s::Sampleable{Univariate, Discrete}, # the sampleable samples3 = [rand(rng3, s) for _ in 1:n] samples4 = [rand(rng4, s) for _ in 1:n] end - @test length(samples) == n + T = typeof(rand(s)) + @test samples isa Vector{T} + @test samples2 isa Vector{T} + @test samples3 isa Vector{T} + @test samples4 isa Vector{T} + @test length(samples) == length(samples2) == length(samples3) == length(samples4) == n @test samples2 == samples @test samples3 == samples4 @@ -287,7 +292,12 @@ function test_samples(s::Sampleable{Univariate, Continuous}, # the sampleable samples3 = [rand(rng3, s) for _ in 1:n] samples4 = [rand(rng4, s) for _ in 1:n] end - @test length(samples) == n + T = typeof(rand(s)) + @test samples isa Vector{T} + @test samples2 isa Vector{T} + @test samples3 isa Vector{T} + @test samples4 isa Vector{T} + @test length(samples) == length(samples2) == length(samples3) == length(samples4) == n @test samples2 == samples @test samples3 == samples4 @@ -348,9 +358,8 @@ end function get_evalsamples(d::DiscreteUnivariateDistribution, q::Float64) # samples for testing evaluation functions (even spacing) - T = eltype(typeof(d)) - lv = (islowerbounded(d) ? minimum(d) : floor(T,quantile(d, q/2)))::T - hv = (isupperbounded(d) ? maximum(d) : ceil(T,cquantile(d, q/2)))::T + lv = islowerbounded(d) ? minimum(d) : quantile(d, q/2) + hv = isupperbounded(d) ? maximum(d) : cquantile(d, q/2) @assert lv <= hv return lv:hv end diff --git a/test/types.jl b/test/types.jl index 4eefd5d96d..05e2689c09 100644 --- a/test/types.jl +++ b/test/types.jl @@ -34,8 +34,8 @@ using ForwardDiff: Dual Distributions.mvtdist(one(T), Matrix{T}(I, 2, 2)), ) for dist in dists - @test eltype(typeof(dist)) === T - @test eltype(rand(dist)) === eltype(dist) + @test @test_deprecated(eltype(typeof(dist))) === T + @test eltype(rand(dist)) === @test_deprecated(eltype(dist)) end end end diff --git a/test/univariate/continuous.jl b/test/univariate/continuous.jl index 3a1da94b92..a52e75b09f 100644 --- a/test/univariate/continuous.jl +++ b/test/univariate/continuous.jl @@ -32,11 +32,11 @@ using ForwardDiff n64 = Normal(1., 0.1) nbig = Normal(big(pi), big(ℯ)) - @test eltype(typeof(n32)) === Float32 + @test @test_deprecated(eltype(typeof(n32))) === Float32 @test eltype(rand(n32)) === Float32 @test eltype(rand(n32, 4)) === Float32 - @test eltype(typeof(n64)) === Float64 + @test @test_deprecated(eltype(typeof(n64))) === Float64 @test eltype(rand(n64)) === Float64 @test eltype(rand(n64, 4)) === Float64 end diff --git a/test/univariate/continuous/beta.jl b/test/univariate/continuous/beta.jl new file mode 100644 index 0000000000..1010682a5b --- /dev/null +++ b/test/univariate/continuous/beta.jl @@ -0,0 +1,19 @@ +using Distributions +using Test + +@testset "beta.jl" begin + # issue #1907 + @testset "rand consistency" begin + for T in (Float32, Float64) + @test @inferred(rand(Beta(T(1), T(1)))) isa T + @test @inferred(rand(Beta(T(4//5), T(4//5)))) isa T + @test @inferred(rand(Beta(T(1), T(2)))) isa T + @test @inferred(rand(Beta(T(2), T(1)))) isa T + + @test @inferred(eltype(rand(Beta(T(1), T(1)), 2))) === T + @test @inferred(eltype(rand(Beta(T(4//5), T(4//5)), 2))) === T + @test @inferred(eltype(rand(Beta(T(1), T(2)), 2))) === T + @test @inferred(eltype(rand(Beta(T(2), T(1)), 2))) === T + end + end +end diff --git a/test/univariate/continuous/chisq.jl b/test/univariate/continuous/chisq.jl index 534b7aaf09..3e6d3ef2f2 100644 --- a/test/univariate/continuous/chisq.jl +++ b/test/univariate/continuous/chisq.jl @@ -3,7 +3,9 @@ test_cgf(Chisq(1), (0.49, -1, -100, -1.0f6)) test_cgf(Chisq(3), (0.49, -1, -100, -1.0f6)) - for T in (Float32, Float64) - @test @inferred(rand(Chisq(T(1)))) isa T + for T in (Int, Float32, Float64) + S = float(T) + @test @inferred(rand(Chisq(T(1)))) isa S + @test @inferred(eltype(rand(Chisq(T(1)), 2))) === S end end diff --git a/test/univariate/continuous/gumbel.jl b/test/univariate/continuous/gumbel.jl index 470680ee2c..b474c5077b 100644 --- a/test/univariate/continuous/gumbel.jl +++ b/test/univariate/continuous/gumbel.jl @@ -1,8 +1,8 @@ @testset "Gumbel" begin @testset "eltype" begin - @test eltype(Gumbel()) === Float64 - @test eltype(Gumbel(1f0)) === Float32 - @test eltype(Gumbel{Int}(0, 1)) === Int + @test @test_deprecated(eltype(Gumbel())) === Float64 + @test @test_deprecated(eltype(Gumbel(1f0))) === Float32 + @test @test_deprecated(eltype(Gumbel{Int}(0, 1))) === Float64 end @testset "rand" begin diff --git a/test/univariate/continuous/johnsonsu.jl b/test/univariate/continuous/johnsonsu.jl index 716f1b1df8..009f28f472 100644 --- a/test/univariate/continuous/johnsonsu.jl +++ b/test/univariate/continuous/johnsonsu.jl @@ -6,8 +6,9 @@ @test shape(d1) == 0.0 @test scale(d1) == 10.0 @test partype(d1) === Float64 - @test eltype(d1) === Float64 - @test rand(d1) isa Float64 + @test @test_deprecated(eltype(d1)) === Float64 + @test @inferred(rand(d1)) isa Float64 + @test @inferred(eltype(rand(d1, 2))) === Float64 @test median(d1) == quantile(d1, 0.5) x = Base.Fix1(quantile, d1).([0.25, 0.45, 0.60, 0.80, 0.90]) @@ -24,8 +25,9 @@ @test shape(d1) == 10.0f0 @test scale(d1) == 10.0f0 @test partype(d1) === Float32 - @test eltype(d1) === Float64 - @test rand(d1) isa Float64 + @test @test_deprecated(eltype(d1)) === Float32 + @test @inferred(rand(d1)) isa Float32 + @test @inferred(eltype(rand(d1, 2))) === Float32 d1 = JohnsonSU(1.0, 1, 0, 1) @test Base.convert(JohnsonSU{Float64}, d1) === d1 diff --git a/test/univariate/continuous/logistic.jl b/test/univariate/continuous/logistic.jl index 3eb0b8f2d0..1cd0772fde 100644 --- a/test/univariate/continuous/logistic.jl +++ b/test/univariate/continuous/logistic.jl @@ -1,2 +1,15 @@ -test_cgf(Logistic(0, 1), (-0.99,0.99, 1f-2, -1f-2)) -test_cgf(Logistic(100,10), (-0.099,0.099, 1f-2, -1f-2)) +using Distributions +using Test + +@testset "Logistic" begin + test_cgf(Logistic(0, 1), (-0.99,0.99, 1f-2, -1f-2)) + test_cgf(Logistic(100,10), (-0.099,0.099, 1f-2, -1f-2)) + + # issue 1082 + @testset "rand consistency" begin + for T in (Float32, Float64, BigFloat) + @test @inferred(rand(Logistic(T(0), T(1)))) isa T + @test @inferred(rand(Logistic(T(0), T(1)), 5)) isa Vector{T} + end + end +end diff --git a/test/univariate/continuous/loguniform.jl b/test/univariate/continuous/loguniform.jl index 3ba88f0216..c5e596de25 100644 --- a/test/univariate/continuous/loguniform.jl +++ b/test/univariate/continuous/loguniform.jl @@ -21,9 +21,9 @@ import Random end d = LogUniform(1,10) - @test eltype(d) === Float64 + @test @test_deprecated(eltype(d)) === Float64 @test 1 <= rand(rng, d) <= 10 - @test rand(rng, d) isa eltype(d) + @test rand(rng, d) isa @test_deprecated(eltype(d)) @test @inferred(quantile(d, 0)) ≈ 1 @test quantile(d, 0.5) ≈ sqrt(10) # geomean @test quantile(d, 1) ≈ 10 diff --git a/test/univariate/continuous/rician.jl b/test/univariate/continuous/rician.jl index a75397f891..e175500400 100644 --- a/test/univariate/continuous/rician.jl +++ b/test/univariate/continuous/rician.jl @@ -6,7 +6,7 @@ @test shape(d1) == 0.0 @test scale(d1) == 200.0 @test partype(d1) === Float64 - @test eltype(d1) === Float64 + @test @test_deprecated(eltype(d1)) === Float64 @test rand(d1) isa Float64 d2 = Rayleigh(10.0) @@ -35,7 +35,7 @@ @test shape(d1) == 0.5f0 @test scale(d1) == 300.0f0 @test partype(d1) === Float32 - @test eltype(d1) === Float64 + @test @test_deprecated(eltype(d1)) === Float64 @test rand(d1) isa Float64 d1 = Rician() diff --git a/test/univariate/continuous/semicircle.jl b/test/univariate/continuous/semicircle.jl index 7079aa9490..8f587f671c 100644 --- a/test/univariate/continuous/semicircle.jl +++ b/test/univariate/continuous/semicircle.jl @@ -66,3 +66,11 @@ rng = StableRNG(123) pvalue = pvalue_kolmogorovsmirnoff(sample, semi) @test pvalue > 1e-2 end + +@testset "rand consistency" begin + for T in (Int, Float32, Float64) + S = float(T) + @test @inferred(rand(Semicircle(T(1)))) isa S + @test @inferred(eltype(rand(Semicircle(T(1)), 2))) === S + end +end diff --git a/test/univariate/continuous/tdist.jl b/test/univariate/continuous/tdist.jl index 857a2274ae..1c89710f9e 100644 --- a/test/univariate/continuous/tdist.jl +++ b/test/univariate/continuous/tdist.jl @@ -11,5 +11,6 @@ using Test for T in (Float32, Float64) @test @inferred(rand(TDist(T(1)))) isa T + @test @inferred(rand(TDist(T(1)), 5)) isa Vector{T} end end diff --git a/test/univariate/continuous/uniform.jl b/test/univariate/continuous/uniform.jl index e3a5d729ed..0d928d0870 100644 --- a/test/univariate/continuous/uniform.jl +++ b/test/univariate/continuous/uniform.jl @@ -114,4 +114,12 @@ using Test end end end + + # issues 1252 and 1783 + @testset "rand consistency" begin + for T in (Float32, Float64, BigFloat) + @test @inferred(rand(Uniform(T(0), T(1)))) isa T + @test @inferred(rand(Uniform(T(0), T(1)), 5)) isa Vector{T} + end + end end diff --git a/test/univariate/discrete/bernoullilogit.jl b/test/univariate/discrete/bernoullilogit.jl index 55560692a0..9dacf69671 100644 --- a/test/univariate/discrete/bernoullilogit.jl +++ b/test/univariate/discrete/bernoullilogit.jl @@ -10,7 +10,7 @@ using Test, Random @test d isa BernoulliLogit{typeof(logitp)} @test convert(typeof(d), d) === d @test convert(BernoulliLogit{Float16}, d) === BernoulliLogit(Float16(logitp)) - @test eltype(typeof(d)) === Bool + @test @test_deprecated(eltype(typeof(d))) === Bool @test params(d) == (logitp,) @test partype(d) === typeof(logitp) end diff --git a/test/univariate/locationscale.jl b/test/univariate/locationscale.jl index 805589e8a1..b5a468c0a4 100644 --- a/test/univariate/locationscale.jl +++ b/test/univariate/locationscale.jl @@ -4,7 +4,7 @@ function test_location_scale( ) d = Distributions.AffineDistribution(μ, σ, ρ) @test params(d) == (μ,σ,ρ) - @test eltype(d) === eltype(dref) + @test @test_deprecated(eltype(d)) === @test_deprecated(eltype(dref)) # Different ways to construct the AffineDistribution object if dref isa DiscreteDistribution @@ -110,11 +110,10 @@ function test_location_scale( @test invlogccdf(dtest, log(0.5)) ≈ invlogccdf(dref, log(0.5)) @test invlogccdf(dtest, log(0.8)) ≈ invlogccdf(dref, log(0.8)) - r = Array{float(eltype(dtest))}(undef, 200000) - if ismissing(rng) - rand!(dtest, r) + r = if ismissing(rng) + rand(dtest, 200_000) else - rand!(rng, dtest, r) + rand(rng, dtest, 200_000) end @test mean(r) ≈ mean(dref) atol=0.02 @test std(r) ≈ std(dref) atol=0.02