Skip to content

Commit 8f5c249

Browse files
Merge branch 'JuliaSmoothOptimizers:master' into documentation
2 parents 8ee36b7 + 224f111 commit 8f5c249

File tree

16 files changed

+1040
-624
lines changed

16 files changed

+1040
-624
lines changed

Project.toml

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,11 +19,13 @@ ShiftedProximalOperators = "d4fd37fa-580c-4e43-9b30-361c21aae263"
1919
SolverCore = "ff4d7338-4cf1-434d-91df-b86cb86fb843"
2020

2121
[compat]
22+
ADNLPModels = "0.8.13"
2223
Arpack = "0.5"
2324
LinearOperators = "2.10.0"
2425
ManualNLPModels = "0.2.0"
2526
NLPModels = "0.19, 0.20, 0.21"
2627
NLPModelsModifiers = "0.7"
28+
OptimizationProblems = "0.9.2"
2729
Percival = "0.7.2"
2830
ProximalOperators = "0.15"
2931
RegularizedProblems = "0.1.1"
@@ -32,10 +34,12 @@ SolverCore = "0.3.0"
3234
julia = "^1.6.0"
3335

3436
[extras]
37+
ADNLPModels = "54578032-b7ea-4c30-94aa-7cbd1cce6c9a"
38+
OptimizationProblems = "5049e819-d29b-5fba-b941-0eee7e64c1c6"
3539
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
3640
RegularizedProblems = "ea076b23-609f-44d2-bb12-a4ae45328278"
3741
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
3842
TestSetExtensions = "98d24dd4-01ad-11ea-1b02-c9a08f80db04"
3943

4044
[targets]
41-
test = ["Random", "RegularizedProblems", "Test", "TestSetExtensions"]
45+
test = ["ADNLPModels", "OptimizationProblems", "Random", "RegularizedProblems", "Test", "TestSetExtensions"]

src/AL_alg.jl

Lines changed: 26 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -138,14 +138,14 @@ Notably, you can access, and modify, the following:
138138
- `stats.solver_specific[:smooth_obj]`: current value of the smooth part of the objective function;
139139
- `stats.solver_specific[:nonsmooth_obj]`: current value of the nonsmooth part of the objective function.
140140
"""
141-
mutable struct ALSolver{T, V, M, ST} <: AbstractOptimizationSolver
141+
mutable struct ALSolver{T, V, M, Pb, ST} <: AbstractOptimizationSolver
142142
x::V
143143
cx::V
144144
y::V
145145
has_bnds::Bool
146-
sub_model::AugLagModel{M, T, V}
146+
sub_problem::Pb
147147
sub_solver::ST
148-
sub_stats::GenericExecutionStats{T, V, V, Any}
148+
sub_stats::GenericExecutionStats{T, V, V, T}
149149
end
150150

151151
function ALSolver(reg_nlp::AbstractRegularizedNLPModel{T, V}; kwargs...) where {T, V}
@@ -155,12 +155,21 @@ function ALSolver(reg_nlp::AbstractRegularizedNLPModel{T, V}; kwargs...) where {
155155
cx = V(undef, ncon)
156156
y = V(undef, ncon)
157157
has_bnds = has_bounds(nlp)
158-
sub_model = AugLagModel(nlp, V(undef, ncon), T(0), x, T(0), V(undef, ncon))
158+
sub_model = AugLagModel(nlp, V(undef, ncon), T(0), x, T(0), cx)
159+
sub_problem = RegularizedNLPModel(sub_model, reg_nlp.h, reg_nlp.selected)
159160
sub_solver = R2Solver(reg_nlp; kwargs...)
160-
sub_stats = GenericExecutionStats(sub_model)
161+
sub_stats = RegularizedExecutionStats(sub_problem)
161162
M = typeof(nlp)
162163
ST = typeof(sub_solver)
163-
return ALSolver{T, V, M, ST}(x, cx, y, has_bnds, sub_model, sub_solver, sub_stats)
164+
return ALSolver{T, V, M, typeof(sub_problem), ST}(
165+
x,
166+
cx,
167+
y,
168+
has_bnds,
169+
sub_problem,
170+
sub_solver,
171+
sub_stats,
172+
)
164173
end
165174

166175
@doc (@doc ALSolver) function AL(::Val{:equ}, reg_nlp::AbstractRegularizedNLPModel; kwargs...)
@@ -182,7 +191,7 @@ function SolverCore.solve!(
182191
model::AbstractRegularizedNLPModel;
183192
kwargs...,
184193
)
185-
stats = GenericExecutionStats(model.model)
194+
stats = RegularizedExecutionStats(model)
186195
solve!(solver, model, stats; kwargs...)
187196
end
188197

@@ -209,6 +218,7 @@ function SolverCore.solve!(
209218
factor_decrease_subtol::T = T(1 // 4),
210219
dual_safeguard = project_y!,
211220
) where {T, V}
221+
reset!(stats)
212222

213223
# Retrieve workspace
214224
nlp = reg_nlp.model
@@ -254,8 +264,8 @@ function SolverCore.solve!(
254264
set_solver_specific!(stats, :nonsmooth_obj, hx)
255265

256266
mu = init_penalty
257-
solver.sub_model.y .= solver.y
258-
update_μ!(solver.sub_model, mu)
267+
solver.sub_problem.model.y .= solver.y
268+
update_μ!(solver.sub_problem.model, mu)
259269

260270
cviol = norm(solver.cx, Inf)
261271
cviol_old = Inf
@@ -279,15 +289,13 @@ function SolverCore.solve!(
279289
iter += 1
280290

281291
# dual safeguard
282-
dual_safeguard(solver.sub_model)
292+
dual_safeguard(solver.sub_problem.model)
283293

284-
# AL subproblem
285-
sub_reg_nlp = RegularizedNLPModel(solver.sub_model, h, selected)
286294
subtol = max(subtol, atol)
287295
reset!(subout)
288296
solve!(
289297
solver.sub_solver,
290-
sub_reg_nlp,
298+
solver.sub_problem,
291299
subout,
292300
x = solver.x,
293301
atol = subtol,
@@ -298,8 +306,8 @@ function SolverCore.solve!(
298306
verbose = subsolver_verbose,
299307
)
300308
solver.x .= subout.solution
301-
solver.cx .= solver.sub_model.cx
302-
subiters += subout.iter
309+
solver.cx .= solver.sub_problem.model.cx
310+
subiters = subout.iter
303311

304312
# objective
305313
fx = obj(nlp, solver.x)
@@ -310,8 +318,8 @@ function SolverCore.solve!(
310318
set_solver_specific!(stats, :nonsmooth_obj, hx)
311319

312320
# dual estimate
313-
update_y!(solver.sub_model)
314-
solver.y .= solver.sub_model.y
321+
update_y!(solver.sub_problem.model)
322+
solver.y .= solver.sub_problem.model.y
315323
set_constraint_multipliers!(stats, solver.y)
316324

317325
# stationarity measure
@@ -362,7 +370,7 @@ function SolverCore.solve!(
362370
if cviol > max(ctol, factor_primal_linear_improvement * cviol_old)
363371
mu *= factor_penalty_up
364372
end
365-
update_μ!(solver.sub_model, mu)
373+
update_μ!(solver.sub_problem.model, mu)
366374
cviol_old = cviol
367375
subtol *= factor_decrease_subtol
368376
rem_eval = max_eval < 0 ? max_eval : max_eval - neval_obj(nlp)

src/LMModel.jl

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
export LMModel
2+
3+
@doc raw"""
4+
LMModel(j_prod!, jt_prod, F, v, σ, xk)
5+
6+
Given the unconstrained optimization problem:
7+
```math
8+
\min \tfrac{1}{2} \| F(x) \|^2,
9+
```
10+
this model represents the smooth LM subproblem:
11+
```math
12+
\min_s \ \tfrac{1}{2} \| F(x) + J(x)s \|^2 + \tfrac{1}{2} σ \|s\|^2
13+
```
14+
where `J` is the Jacobian of `F` at `xk`, represented via matrix-free operations.
15+
`j_prod!(xk, s, out)` computes `J(xk) * s`, and `jt_prod!(xk, r, out)` computes `J(xk)' * r`.
16+
17+
`σ > 0` is a regularization parameter and `v` is a vector of the same size as `F(xk)` used for intermediary computations.
18+
"""
19+
mutable struct LMModel{
20+
T <: Real,
21+
V <: AbstractVector{T},
22+
Jac <: Union{AbstractMatrix, AbstractLinearOperator},
23+
} <: AbstractNLPModel{T, V}
24+
J::Jac
25+
F::V
26+
v::V
27+
xk::V
28+
σ::T
29+
meta::NLPModelMeta{T, V}
30+
counters::Counters
31+
end
32+
33+
function LMModel(J::Jac, F::V, σ::T, xk::V) where {T, V, Jac}
34+
meta = NLPModelMeta(
35+
length(xk),
36+
x0 = xk, # Perhaps we should add lvar and uvar as well here.
37+
)
38+
v = similar(F)
39+
return LMModel(J, F, v, xk, σ, meta, Counters())
40+
end
41+
42+
function NLPModels.obj(nlp::LMModel, x::AbstractVector{T}) where {T}
43+
@lencheck nlp.meta.nvar x
44+
increment!(nlp, :neval_obj)
45+
mul!(nlp.v, nlp.J, x)
46+
nlp.v .+= nlp.F
47+
return (dot(nlp.v, nlp.v) + nlp.σ * dot(x, x)) / 2
48+
end
49+
50+
function NLPModels.grad!(nlp::LMModel, x::AbstractVector{T}, g::AbstractVector{T}) where {T}
51+
@lencheck nlp.meta.nvar x
52+
@lencheck nlp.meta.nvar g
53+
increment!(nlp, :neval_grad)
54+
mul!(nlp.v, nlp.J, x)
55+
nlp.v .+= nlp.F
56+
mul!(g, nlp.J', nlp.v)
57+
@. g += nlp.σ .* x
58+
return g
59+
end

0 commit comments

Comments
 (0)