|
| 1 | +# Moments of the truncated log-normal can be computed directly from the moment generating |
| 2 | +# function of the truncated normal: |
| 3 | +# Let Y ~ LogNormal(μ, σ) truncated to (a, b). Then log(Y) ~ Normal(μ, σ) truncated |
| 4 | +# to (log(a), log(b)), and E[Y^n] = E[(e^log(Y))^n] = E[e^(nlog(Y))] = mgf(log(Y), n). |
| 5 | + |
| 6 | +# Given `truncate(LogNormal(μ, σ), a, b)`, return `truncate(Normal(μ, σ), log(a), log(b))` |
| 7 | +function _truncnorm(d::Truncated{<:LogNormal}) |
| 8 | + μ, σ = params(d.untruncated) |
| 9 | + a = d.lower === nothing ? nothing : log(minimum(d)) |
| 10 | + b = d.upper === nothing ? nothing : log(maximum(d)) |
| 11 | + return truncated(Normal(μ, σ), a, b) |
| 12 | +end |
| 13 | + |
| 14 | +mean(d::Truncated{<:LogNormal}) = mgf(_truncnorm(d), 1) |
| 15 | + |
| 16 | +function var(d::Truncated{<:LogNormal}) |
| 17 | + tn = _truncnorm(d) |
| 18 | + m1 = mgf(tn, 1) |
| 19 | + m2 = sqrt(mgf(tn, 2)) |
| 20 | + return (m2 - m1) * (m2 + m1) |
| 21 | +end |
| 22 | + |
| 23 | +function skewness(d::Truncated{<:LogNormal}) |
| 24 | + tn = _truncnorm(d) |
| 25 | + m1 = mgf(tn, 1) |
| 26 | + m2 = sqrt(mgf(tn, 2)) |
| 27 | + m3 = mgf(tn, 3) |
| 28 | + v = (m2 - m1) * (m2 + m1) |
| 29 | + return (m3 - 3 * m1 * v - m1^3) / (v * sqrt(v)) |
| 30 | +end |
| 31 | + |
| 32 | +function kurtosis(d::Truncated{<:LogNormal}) |
| 33 | + tn = _truncnorm(d) |
| 34 | + m1 = mgf(tn, 1) |
| 35 | + m2 = mgf(tn, 2) |
| 36 | + m3 = mgf(tn, 3) |
| 37 | + m4 = mgf(tn, 4) |
| 38 | + sm2 = sqrt(m2) |
| 39 | + v = (sm2 - m1) * (sm2 + m1) |
| 40 | + return evalpoly(m1, (m4, -4m3, 6m2, 0, -3)) / v^2 - 3 |
| 41 | +end |
| 42 | + |
| 43 | +median(d::Truncated{<:LogNormal}) = exp(median(_truncnorm(d))) |
| 44 | + |
| 45 | +# TODO: The entropy can be written "directly" as well, according to Mathematica, but |
| 46 | +# the expression for it fills me with regret. There are some recognizable components, |
| 47 | +# so a sufficiently motivated person could try to manually simplify it into something |
| 48 | +# comprehensible. For reference, you can obtain the entropy with Mathematica like so: |
| 49 | +# |
| 50 | +# d = TruncatedDistribution[{a, b}, LogNormalDistribution[m, s]]; |
| 51 | +# Expectation[-LogLikelihood[d, {x}], Distributed[x, d], |
| 52 | +# Assumptions -> Element[x | m | s | a | b, Reals] && s > 0 && 0 < a < x < b] |
0 commit comments