diff --git a/Project.toml b/Project.toml index d756acf..d590270 100644 --- a/Project.toml +++ b/Project.toml @@ -5,11 +5,13 @@ version = "0.10.4" [deps] Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9" NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6" +NLPModelsModifiers = "e01155f1-5c6f-4375-a9d8-616dd036575f" SolverCore = "ff4d7338-4cf1-434d-91df-b86cb86fb843" [compat] Ipopt = "1" -NLPModels = "0.14, 0.15, 0.16, 0.17, 0.18, 0.19, 0.20, 0.21" +NLPModels = "0.19, 0.20, 0.21" +NLPModelsModifiers = "0.7" SolverCore = "0.3" julia = "^1.6" diff --git a/src/NLPModelsIpopt.jl b/src/NLPModelsIpopt.jl index d79b096..b185c9b 100644 --- a/src/NLPModelsIpopt.jl +++ b/src/NLPModelsIpopt.jl @@ -3,6 +3,7 @@ module NLPModelsIpopt export ipopt, IpoptSolver, reset!, solve! using NLPModels, Ipopt, SolverCore +using NLPModelsModifiers: FeasibilityFormNLS const ipopt_statuses = Dict( 0 => :first_order, @@ -181,6 +182,44 @@ function ipopt(nlp::AbstractNLPModel; kwargs...) return solve!(solver, nlp, stats; kwargs...) end +""" + ipopt(nls::AbstractNLSModel; kwargs...) + +Solve the least-squares problem `nls` using `IPOPT` by moving the nonlinear residual to the constraints. + +# Arguments +- `nls::AbstractNLSModel`: The least-squares problem to solve. + +For advanced usage, first define an `IpoptSolver` to preallocate the memory used in the algorithm, and then call `solve!`: + solver = IpoptSolver(nls) + solve!(solver, nls; kwargs...) + +# Examples +```julia +using NLPModelsIpopt, ADNLPModels +nls = ADNLSModel(x -> [x[1] - 1, x[2] - 2], [0.0, 0.0], 2) +stats = ipopt(nls, print_level = 0) +``` +""" +function ipopt(ff_nls::FeasibilityFormNLS; kwargs...) + solver = IpoptSolver(ff_nls) + stats = GenericExecutionStats(ff_nls) + stats = solve!(solver, ff_nls, stats; kwargs...) + + return stats +end + +function ipopt(nls::AbstractNLSModel; kwargs...) + ff_nls = FeasibilityFormNLS(nls) + stats = ipopt(ff_nls; kwargs...) + + stats.solution = length(stats.solution) >= nls.meta.nvar ? stats.solution[1:nls.meta.nvar] : stats.solution + stats.multipliers_L = length(stats.multipliers_L) >= nls.meta.nvar ? stats.multipliers_L[1:nls.meta.nvar] : stats.multipliers_L + stats.multipliers_U = length(stats.multipliers_U) >= nls.meta.nvar ? stats.multipliers_U[1:nls.meta.nvar] : stats.multipliers_U + stats.multipliers = length(stats.multipliers) >= nls.meta.ncon ? stats.multipliers[end-nls.meta.ncon+1:end] : stats.multipliers + return stats +end + function SolverCore.solve!( solver::IpoptSolver, nlp::AbstractNLPModel, diff --git a/test/runtests.jl b/test/runtests.jl index 943bf7b..d8272bd 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,4 +1,5 @@ using ADNLPModels, NLPModelsIpopt, NLPModels, Ipopt, SolverCore, Test +using NLPModelsModifiers: FeasibilityFormNLS @testset "Restart NLPModelsIpopt" begin nlp = ADNLPModel(x -> (x[1] - 1)^2 + 100 * (x[2] - x[1]^2)^2, [-1.2; 1.0]) @@ -107,3 +108,12 @@ end @test stats.primal_feas ≈ 0.0 @test stats.dual_feas ≈ 0.0 atol = 1.49e-8 end + +@testset "ipopt with AbstractNLSModel" begin + nls = ADNLSModel(x -> [x[1] - 1, x[2] - 2], [0.0, 0.0], 2) + stats = ipopt(nls, print_level = 0) + @test isapprox(stats.solution, [1.0, 2.0], rtol = 1e-6) + @test stats.status == :first_order + @test stats.iter >= 0 + @test isapprox(stats.dual_feas, 0.0; atol=1e-8) +end