Skip to content

fix accuracy of logcosh(::Union{Float16, Float32, Float64}) #101

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 11 commits into
base: master
Choose a base branch
from
54 changes: 54 additions & 0 deletions src/basicfuns.jl
Original file line number Diff line number Diff line change
Expand Up @@ -131,9 +131,63 @@ The implementation ensures `logcosh(-x) = logcosh(x)`.
"""
function logcosh(x::Real)
abs_x = abs(x)
if (x isa Union{Float32, Float64}) && (abs_x < oftype(x, 0.7373046875))
return logcosh_ker(x)
end
return abs_x + log1pexp(- 2 * abs_x) - IrrationalConstants.logtwo
end

"""
logcosh_ker(x::Union{Float32, Float64})

The kernel of `logcosh`.

The polynomial coefficients were found using Sollya:

```sollya
prec = 500!;
points = 50001!;
accurate = log(cosh(x));
domain = [-0.125, 0.7373046875];
constrained_part = (x^2) / 2;
free_monomials_32 = [|4, 6, 8, 10, 12|];
free_monomials_64 = [|4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24|];
polynomial_32 = fpminimax(accurate, free_monomials_32, [|single...|], domain, constrained_part);
polynomial_64 = fpminimax(accurate, free_monomials_64, [|double...|], domain, constrained_part);
polynomial_32;
polynomial_64;
```
"""
function logcosh_ker(x::Union{Float32, Float64})
x² = x * x
if x isa Float32
p = (
5f-1,
-0.083333164f0,
0.022217678f0,
-0.0067060017f0,
0.0020296266f0,
-0.00044135848f0,
)
elseif x isa Float64
p = (
5e-1,
-0.08333333333332801,
0.02222222222164912,
-0.0067460317245250445,
0.0021869484500251714,
-0.0007385985435311435,
0.0002565500026777061,
-9.084985367586575e-5,
3.2348259905568986e-5,
-1.1058814347469105e-5,
3.16293199955507e-6,
-5.312230207322749e-7,
)
end
evalpoly(x², p) * x²
end

"""
$(SIGNATURES)

Expand Down
6 changes: 6 additions & 0 deletions test/basicfuns.jl
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,12 @@ end
@test @inferred(logcosh(x)) === x
@test @inferred(logabssinh(x)) === x
end

@testset "accuracy of `logcosh`" begin
for t in (Float32, Float64)
@test ulp_error_maximum(logcosh, range(start = t(-3), stop = t(3), length = 1000)) < 3
end
end
end

@testset "log1psq" begin
Expand Down
51 changes: 51 additions & 0 deletions test/common/ULPError.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
module ULPError

export ulp_error, ulp_error_maximum

function ulp_error(accurate::AbstractFloat, approximate::AbstractFloat)
# the ULP error is usually not required to great accuracy, so `Float32` should be precise enough
zero_return = 0f0
inf_return = Inf32
# handle floating-point edge cases
if !(isfinite(accurate) && isfinite(approximate))
accur_is_nan = isnan(accurate)
approx_is_nan = isnan(approximate)
if accur_is_nan || approx_is_nan
return if accur_is_nan === approx_is_nan
zero_return
else
inf_return
end
end
if isinf(approximate)
return if isinf(accurate) && (signbit(accurate) == signbit(approximate))
zero_return
else
inf_return
end
end
end
acc = if accurate isa Union{Float16, Float32}
# widen for better accuracy when doing so does not impact performance too much
widen(accurate)
else
accurate
end
abs(Float32((approximate - acc) / eps(approximate))::Float32)
end

function ulp_error(accurate, approximate, x::AbstractFloat)
acc = accurate(x)
app = approximate(x)
ulp_error(acc, app)
end

function ulp_error(func::Func, x::AbstractFloat) where {Func}
ulp_error(func ∘ BigFloat, func, x)
end

function ulp_error_maximum(func::Func, iterator) where {Func}
maximum(Base.Fix1(ulp_error, func), iterator)
end

end
3 changes: 3 additions & 0 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,9 @@ using OffsetArrays
using Random
using Test

include("common/ULPError.jl")
using .ULPError

Random.seed!(1234)

include("basicfuns.jl")
Expand Down