Skip to content

Commit b5ed99d

Browse files
nathanemactmigotdpo
authored
R2 solver (#124)
* add R2 solver Co-authored-by: tmigot <[email protected]> Co-authored-by: Dominique <[email protected]>
1 parent bea631a commit b5ed99d

File tree

6 files changed

+182
-4
lines changed

6 files changed

+182
-4
lines changed

.gitignore

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,5 @@
11
docs/build
22
docs/Manifest.toml
3+
*/.DS_Store
4+
.DS_Store
5+
Manifest.toml

docs/src/solvers.md

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,11 @@
55
- [`lbfgs`](@ref)
66
- [`tron`](@ref)
77
- [`trunk`](@ref)
8+
- [`R2`](@ref)
89

910
| Problem type | Solvers |
1011
| --------------------- | -------- |
11-
| Unconstrained NLP | [`lbfgs`](@ref), [`tron`](@ref), [`trunk`](@ref) |
12+
| Unconstrained NLP | [`lbfgs`](@ref), [`tron`](@ref), [`trunk`](@ref), [`R2`](@ref)|
1213
| Unconstrained NLS | [`trunk`](@ref), [`tron`](@ref) |
1314
| Bound-constrained NLP | [`tron`](@ref) |
1415
| Bound-constrained NLS | [`tron`](@ref) |
@@ -19,4 +20,5 @@
1920
lbfgs
2021
tron
2122
trunk
23+
R2
2224
```

src/JSOSolvers.jl

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ abstract type AbstractOptSolver{T, V} end
2121
# Unconstrained solvers
2222
include("lbfgs.jl")
2323
include("trunk.jl")
24+
include("R2.jl")
2425

2526
# Unconstrained solvers for NLS
2627
include("trunkls.jl")

src/R2.jl

Lines changed: 171 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,171 @@
1+
export R2, R2Solver
2+
3+
"""
4+
R2(nlp; kwargs...)
5+
solver = R2Solver(nlp;)
6+
solve!(solver, nlp; kwargs...)
7+
8+
A first-order quadratic regularization method for unconstrained optimization
9+
10+
# Arguments
11+
- `nlp::AbstractNLPModel{T, V}` is the model to solve, see `NLPModels.jl`.
12+
13+
# Keyword arguments
14+
- x0::V = nlp.meta.x0`: the initial guess
15+
- atol = eps(T)^(1 / 3): absolute tolerance
16+
- rtol = eps(T)^(1 / 3): relative tolerance: algorithm stop when ||∇f(x)|| ≤ ϵ_abs + ϵ_rel*||∇f(x0)||
17+
- η1 = eps(T)^(1/4), η2 = T(0.95): step acceptance parameters
18+
- γ1 = T(1/2), γ2 = 1/γ1: regularization update parameters
19+
- σmin = eps(T): step parameter for R2 algorithm
20+
- max_eval::Int: maximum number of evaluation of the objective function
21+
- max_time::Float64 = 3600.0: maximum time limit in seconds
22+
- verbose::Int = 0: if > 0, display iteration details every `verbose` iteration.
23+
24+
# Output
25+
The value returned is a `GenericExecutionStats`, see `SolverCore.jl`.
26+
27+
# Examples
28+
```jldoctest
29+
using JSOSolvers, ADNLPModels
30+
nlp = ADNLPModel(x -> sum(x.^2), ones(3))
31+
stats = R2(nlp)
32+
# output
33+
"Execution stats: first-order stationary"
34+
```
35+
```jldoctest
36+
using JSOSolvers, ADNLPModels
37+
nlp = ADNLPModel(x -> sum(x.^2), ones(3))
38+
solver = R2Solver(nlp);
39+
stats = solve!(solver, nlp)
40+
# output
41+
"Execution stats: first-order stationary"
42+
```
43+
"""
44+
mutable struct R2Solver{V}
45+
x::V
46+
gx::V
47+
cx::V
48+
end
49+
50+
function R2Solver(nlp::AbstractNLPModel{T, V}) where {T, V}
51+
x = similar(nlp.meta.x0)
52+
gx = similar(nlp.meta.x0)
53+
cx = similar(nlp.meta.x0)
54+
return R2Solver{V}(x, gx, cx)
55+
end
56+
57+
@doc (@doc R2Solver) function R2(
58+
nlp::AbstractNLPModel{T,V};
59+
kwargs...,
60+
) where {T, V}
61+
solver = R2Solver(nlp)
62+
return solve!(solver, nlp; kwargs...)
63+
end
64+
65+
function solve!(
66+
solver::R2Solver{V},
67+
nlp::AbstractNLPModel{T, V};
68+
x0::V = nlp.meta.x0,
69+
atol = eps(T)^(1/2),
70+
rtol = eps(T)^(1/2),
71+
η1 = eps(T)^(1/4),
72+
η2 = T(0.95),
73+
γ1 = T(1/2),
74+
γ2 = 1/γ1,
75+
σmin = zero(T),
76+
max_time::Float64 = 3600.0,
77+
max_eval::Int = -1,
78+
verbose::Int = 0,
79+
) where {T, V}
80+
81+
unconstrained(nlp) || error("R2 should only be called on unconstrained problems.")
82+
start_time = time()
83+
elapsed_time = 0.0
84+
85+
x = solver.x .= x0
86+
∇fk = solver.gx
87+
ck = solver.cx
88+
89+
iter = 0
90+
fk = obj(nlp, x)
91+
92+
grad!(nlp, x, ∇fk)
93+
norm_∇fk = norm(∇fk)
94+
# σk = norm(hess(nlp, x))
95+
σk = 2^round(log2(norm_∇fk + 1))
96+
97+
# Stopping criterion:
98+
ϵ = atol + rtol * norm_∇fk
99+
optimal = norm_∇fk ϵ
100+
if optimal
101+
@info("Optimal point found at initial point")
102+
@info @sprintf "%5s %9s %7s %7s " "iter" "f" "‖∇f‖" "σ"
103+
@info @sprintf "%5d %9.2e %7.1e %7.1e" iter fk norm_∇fk σk
104+
end
105+
tired = neval_obj(nlp) > max_eval 0 || elapsed_time > max_time
106+
if verbose > 0 && mod(iter, verbose) == 0
107+
@info @sprintf "%5s %9s %7s %7s " "iter" "f" "‖∇f‖" "σ"
108+
infoline = @sprintf "%5d %9.2e %7.1e %7.1e" iter fk norm_∇fk σk
109+
end
110+
111+
status = :unknown
112+
113+
while !(optimal | tired)
114+
115+
ck .= x .- (∇fk ./ σk)
116+
ΔTk= norm_∇fk^2 / σk
117+
fck = obj(nlp, ck)
118+
if fck == -Inf
119+
status = :unbounded
120+
break
121+
end
122+
123+
ρk = (fk - fck) / ΔTk
124+
125+
# Update regularization parameters
126+
if ρk >= η2
127+
σk = max(σmin, γ1 * σk)
128+
elseif ρk < η1
129+
σk = σk * γ2
130+
end
131+
132+
# Acceptance of the new candidate
133+
if ρk >= η1
134+
x .= ck
135+
fk = fck
136+
grad!(nlp, x, ∇fk)
137+
norm_∇fk = norm(∇fk)
138+
end
139+
140+
iter += 1
141+
elapsed_time = time() - start_time
142+
optimal = norm_∇fk ϵ
143+
tired = neval_obj(nlp) > max_eval 0 || elapsed_time > max_time
144+
145+
if verbose > 0 && mod(iter, verbose) == 0
146+
@info infoline
147+
infoline = @sprintf "%5d %9.2e %7.1e %7.1e" iter fk norm_∇fk σk
148+
end
149+
150+
end
151+
152+
status = if optimal
153+
:first_order
154+
elseif tired
155+
if elapsed_time > max_time
156+
status = :max_time
157+
elseif neval_obj(nlp) > max_eval
158+
status = :max_eval
159+
end
160+
end
161+
162+
return GenericExecutionStats(
163+
status,
164+
nlp,
165+
solution = x,
166+
objective = fk,
167+
dual_feas = norm_∇fk,
168+
elapsed_time = elapsed_time,
169+
iter = iter,
170+
)
171+
end

test/consistency.jl

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,9 @@ function consistency()
66
qnls = LBFGSModel(unls)
77

88
@testset "Consistency" begin
9-
args = Pair{Symbol, Number}[:atol => 1e-6, :rtol => 1e-6, :max_eval => 1000, :max_time => 60.0]
9+
args = Pair{Symbol, Number}[:atol => 1e-6, :rtol => 1e-6, :max_eval => 20000, :max_time => 60.0]
1010

11-
@testset "NLP with $mtd" for mtd in [trunk, lbfgs, tron]
11+
@testset "NLP with $mtd" for mtd in [trunk, lbfgs, tron, R2]
1212
with_logger(NullLogger()) do
1313
stats = mtd(unlp; args...)
1414
@test stats isa GenericExecutionStats
@@ -25,7 +25,7 @@ function consistency()
2525
end
2626
end
2727

28-
@testset "Quasi-Newton NLP with $mtd" for mtd in [trunk, lbfgs, tron]
28+
@testset "Quasi-Newton NLP with $mtd" for mtd in [trunk, lbfgs, tron, R2]
2929
with_logger(NullLogger()) do
3030
stats = mtd(qnlp; args...)
3131
@test stats isa GenericExecutionStats

test/test_solvers.jl

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ function tests()
77
("trunk+cg", (nlp; kwargs...) -> trunk(nlp, subsolver_type = CgSolver; kwargs...)),
88
("lbfgs", lbfgs),
99
("tron", tron),
10+
("R2", R2)
1011
]
1112
unconstrained_nlp(solver)
1213
multiprecision_nlp(solver, :unc)

0 commit comments

Comments
 (0)