diff --git a/src/ADNLPProblems/bard.jl b/src/ADNLPProblems/bard.jl index 3fa0ee76..cf4d8d30 100644 --- a/src/ADNLPProblems/bard.jl +++ b/src/ADNLPProblems/bard.jl @@ -8,7 +8,8 @@ end function bard(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} y = Rational{Int}[0.14 0.18 0.22 0.25 0.29 0.32 0.35 0.39 0.37 0.58 0.73 0.16 1.34 2.10 4.39] function f(x) - return 1 // 2 * sum(y[i] - (x[1] + i / ((16 - i) * x[2] + min(i, 16 - i) * x[3])) for i = 1:15) + return 1 // 2 * + sum((y[i] - (x[1] + i / ((16 - i) * x[2] + min(i, 16 - i) * x[3])))^2 for i = 1:15) end x0 = ones(T, 3) return ADNLPModels.ADNLPModel(f, x0, name = "bard"; kwargs...) diff --git a/src/ADNLPProblems/hs6.jl b/src/ADNLPProblems/hs6.jl index 96b9e95c..0625fae2 100644 --- a/src/ADNLPProblems/hs6.jl +++ b/src/ADNLPProblems/hs6.jl @@ -23,7 +23,7 @@ end function hs6(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, kwargs...) where {T} function F!(r, x) - r[1] = 1 // 2 * (x[1] - 1)^2 + r[1] = (x[1] - 1) return r end function c!(cx, x) diff --git a/src/ADNLPProblems/penalty1.jl b/src/ADNLPProblems/penalty1.jl index 207880b3..1b47fe0c 100644 --- a/src/ADNLPProblems/penalty1.jl +++ b/src/ADNLPProblems/penalty1.jl @@ -22,6 +22,6 @@ function penalty1(::Val{:nls}; n::Int = default_nvar, type::Type{T} = Float64, k r[n + 1] = sum(x[j]^2 for j = 1:n) - 1 // 4 return r end - x0 = ones(T, n) + x0 = T[j for j = 1:n] return ADNLPModels.ADNLSModel!(F!, x0, n + 1, name = "penalty1-nls"; kwargs...) end diff --git a/src/ADNLPProblems/watson.jl b/src/ADNLPProblems/watson.jl index 57b4a485..56e1f275 100644 --- a/src/ADNLPProblems/watson.jl +++ b/src/ADNLPProblems/watson.jl @@ -20,10 +20,11 @@ function watson(::Val{:nlp}; n::Int = default_nvar, type::Type{T} = Float64, kwa sum((j - 1) * x[j] * x[1]^(j - 2) for j = 2:n) - sum(x[j] * x[1]^(j - 1) for j = 1:n)^2 - 1 )^2 + - 1 // 2 * ( + 1 // 2 * + ( sum((j - 1) * x[j] * (x[2] - x[1]^2 - 1)^(j - 2) for j = 2:n) - sum(x[j] * (x[2] - x[1]^2 - 1)^(j - 1) for j = 1:n)^2 - 1 - ) + )^2 end x0 = zeros(T, n) return ADNLPModels.ADNLPModel(f, x0, name = "watson"; kwargs...) diff --git a/src/PureJuMP/bard.jl b/src/PureJuMP/bard.jl index dd54e0cf..cfe0e4ec 100644 --- a/src/PureJuMP/bard.jl +++ b/src/PureJuMP/bard.jl @@ -23,7 +23,7 @@ function bard(args...; n::Int = default_nvar, m::Int = 2n, kwargs...) @objective( nlp, Min, - 0.5 * sum(y[i] - (x[1] + i / ((16 - i) * x[2] + min(i, 16 - i) * x[3])) for i = 1:15) + 0.5 * sum((y[i] - (x[1] + i / ((16 - i) * x[2] + min(i, 16 - i) * x[3])))^2 for i = 1:15) ) return nlp diff --git a/src/PureJuMP/hs6.jl b/src/PureJuMP/hs6.jl index 23014e4c..c9b18111 100644 --- a/src/PureJuMP/hs6.jl +++ b/src/PureJuMP/hs6.jl @@ -19,7 +19,7 @@ function hs6(args...; kwargs...) x0 = [-1.2, 1] @variable(nlp, x[i = 1:2], start = x0[i]) - @objective(nlp, Min, 0.5 * (1 - x[1])^2) + @objective(nlp, Min, 0.5 * (x[1] - 1)^2) @constraint(nlp, 10 * (x[2] - x[1]^2) == 0) diff --git a/src/PureJuMP/watson.jl b/src/PureJuMP/watson.jl index f1f6028c..6dc9def6 100644 --- a/src/PureJuMP/watson.jl +++ b/src/PureJuMP/watson.jl @@ -37,10 +37,11 @@ function watson(args...; n::Int = default_nvar, kwargs...) ( sum((j - 1) * x[j] * x[1]^(j - 2) for j = 2:n) - sum(x[j] * x[1]^(j - 1) for j = 1:n)^2 - 1 )^2 + - 0.5 * ( + 0.5 * + ( sum((j - 1) * x[j] * (x[2] - x[1]^2 - 1)^(j - 2) for j = 2:n) - sum(x[j] * (x[2] - x[1]^2 - 1)^(j - 1) for j = 1:n)^2 - 1 - ) + )^2 ) return nlp diff --git a/test/runtests.jl b/test/runtests.jl index 74538ca5..22ce11bf 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -14,17 +14,22 @@ addprocs(np - 1) [n for n in names(mod) if isdefined(mod, n)] end -const list_problems = +@everywhere const list_problems = setdiff(union(defined_names(ADNLPProblems), defined_names(PureJuMP)), [:PureJuMP, :ADNLPProblems]) +@testset "Test that all problems have a meta" begin + @test sort(list_problems) == sort(Symbol.(OptimizationProblems.meta[!, :name])) +end + # The problems included should be carefully argumented and issues # to create them added. # TODO: tests are limited for JuMP-only problems -const list_problems_not_ADNLPProblems = +@everywhere const list_problems_not_ADNLPProblems = Symbol[:catmix, :gasoil, :glider, :methanol, :minsurf, :pinene, :rocket, :steering, :torsion] -const list_problems_ADNLPProblems = setdiff(list_problems, list_problems_not_ADNLPProblems) -const list_problems_not_PureJuMP = Symbol[] -const list_problems_PureJuMP = setdiff(list_problems, list_problems_not_PureJuMP) +@everywhere const list_problems_ADNLPProblems = + setdiff(list_problems, list_problems_not_ADNLPProblems) +@everywhere const list_problems_not_PureJuMP = Symbol[] +@everywhere const list_problems_PureJuMP = setdiff(list_problems, list_problems_not_PureJuMP) include("test-defined-problems.jl") include("test-utils.jl") @@ -45,7 +50,7 @@ end error("Problem $(prob) is not defined in $mod on pid $(myid()).") end ctor = getfield(mod, prob) - return MathOptNLPModel(ctor(; kwargs...)) + return MathOptNLPModel(ctor(; kwargs...); name = "$prob") end @everywhere function make_ad_nlp(prob::Symbol; kwargs...) @@ -57,6 +62,8 @@ end return ctor(matrix_free = true; kwargs...) end +include("test-in-place-residual.jl") + @everywhere function test_one_problem(prob::Symbol) pb = string(prob) @@ -75,41 +82,33 @@ end nlp_ad = timed_info("Instantiating $(pb)", make_nlp, prob) - @test nlp_ad.meta.name == pb + @testset "Sanity check (name, obj)" begin + @test nlp_ad.meta.name == pb + @test !isnothing(obj(nlp_ad, nlp_ad.meta.x0)) + end - if pb in meta[(meta.contype .== :quadratic) .| (meta.contype .== :general), :name] + if (typeof(nlp_ad) <: ADNLPModels.AbstractADNLPModel) && + (pb in meta[(meta.contype .== :quadratic) .| (meta.contype .== :general), :name]) @testset "Test In-place Nonlinear Constraints for AD-$prob" begin test_in_place_constraints(prob, nlp_ad) end end - @testset "Test multi-precision ADNLPProblems for $prob" begin - test_multi_precision(prob, nlp_ad) - end - - if pb in meta[meta.objtype .== :least_squares, :name] - @testset "Test Nonlinear Least Squares for $prob" begin - test_in_place_residual(prob) + if typeof(nlp_ad) <: ADNLPModels.AbstractADNLPModel + @testset "Test multi-precision ADNLPProblems for $prob" begin + test_multi_precision(prob, nlp_ad) end end - model = begin - mod = PureJuMP - if isdefined(mod, prob) - getfield(mod, prob)(n = ndef) - else - nothing - end - end - if !isnothing(model) + if mod in intersect(list_problems_PureJuMP, list_problems_ADNLPProblems) @testset "Test problems compatibility for $prob" begin - nlp_jump = MathOptNLPModel(model) + nlp_jump = make_jump_nlp(prob; n = ndef) test_compatibility(prob, nlp_jump, nlp_ad, ndef) end end end -pmap(test_one_problem, list_problems_ADNLPProblems) +pmap(test_one_problem, list_problems) include("test-scalable.jl") diff --git a/test/test-in-place-residual.jl b/test/test-in-place-residual.jl new file mode 100644 index 00000000..7e32e7c2 --- /dev/null +++ b/test/test-in-place-residual.jl @@ -0,0 +1,41 @@ +@everywhere function test_in_place_residual(prob::Symbol) + nlp = make_ad_nlp(prob; use_nls = false) + @test typeof(nlp) <: ADNLPModels.ADNLPModel + nls = make_ad_nlp(prob; use_nls = true) + @test typeof(nls) <: ADNLPModels.ADNLSModel + return test_in_place_residual(prob, nlp, nls) +end + +@everywhere function test_in_place_residual( + prob::Symbol, + nlp::AbstractNLPModel, + nls::AbstractNLSModel, +) + @testset "Test in-place residual $prob" begin + x = nls.meta.x0 + Fx = similar(x, nls.nls_meta.nequ) + pb = String(prob) + if VERSION ≥ v"1.7" && !occursin("palmer", pb) && (pb != "watson") # palmer residual allocate + @allocated residual!(nls, x, Fx) + @test (@allocated residual!(nls, x, Fx)) == 0 + end + m = OptimizationProblems.eval(Meta.parse("get_$(prob)_nls_nequ"))() + @test nls.nls_meta.nequ == m + end + + @testset "Compare NLS with NLP $prob: x0 and obj are the same." begin + x0 = nlp.meta.x0 + @test x0 == nls.meta.x0 + nlp_fx = obj(nlp, x0) + nls_fx = obj(nls, x0) + are_almost_same = (nlp_fx ≈ nls_fx) | (nlp_fx ≈ 2 * nls_fx) + if !(are_almost_same) + @info "$prob : NLS $(nls_fx) ≈ NLP $(nlp_fx)" + end + @test are_almost_same + end +end + +nls_name_list = + intersect(Symbol.(meta[meta.objtype .== :least_squares, :name]), list_problems_ADNLPProblems) +pmap(test_in_place_residual, nls_name_list) diff --git a/test/test-utils.jl b/test/test-utils.jl index 5ae09609..b5ff1829 100644 --- a/test/test-utils.jl +++ b/test/test-utils.jl @@ -46,24 +46,6 @@ end @test ncon == m end -@everywhere function test_in_place_residual(prob::Symbol) - nls = OptimizationProblems.ADNLPProblems.eval(prob)(use_nls = true) - @test typeof(nls) <: ADNLPModels.ADNLSModel - return test_in_place_residual(prob, nls) -end - -@everywhere function test_in_place_residual(prob::Symbol, nls::AbstractNLSModel) - x = nls.meta.x0 - Fx = similar(x, nls.nls_meta.nequ) - pb = String(prob) - if VERSION ≥ v"1.7" && !occursin("palmer", pb) && (pb != "watson") # palmer residual allocate - @allocated residual!(nls, x, Fx) - @test (@allocated residual!(nls, x, Fx)) == 0 - end - m = OptimizationProblems.eval(Meta.parse("get_$(prob)_nls_nequ"))() - @test nls.nls_meta.nequ == m -end - @everywhere function test_compatibility(prob::Symbol, ndef::Integer = ndef) prob_fn = eval(Meta.parse("PureJuMP.$(prob)")) model = prob_fn(n = ndef)