11export AL
22
3- function AL (nlp:: AbstractNLPModel , h:: H , options:: ROSolverOptions ; kwargs... ) where {H}
4- if unconstrained (nlp) || bound_constrained (nlp)
5- return AL (Val (:unc ), nlp, h, options; kwargs... )
6- elseif equality_constrained (nlp)
7- return AL (Val (:equ ), nlp, h, options; kwargs... )
3+ function AL (nlp:: AbstractNLPModel , h; kwargs... )
4+ kwargs_dict = Dict (kwargs... )
5+ selected = pop! (kwargs_dict, :selected , 1 : (nlp. meta. nvar))
6+ reg_nlp = RegularizedNLPModel (nlp, h, selected)
7+ return AL (reg_nlp; kwargs... )
8+ end
9+
10+ function AL (reg_nlp:: AbstractRegularizedNLPModel ; kwargs... )
11+ if unconstrained (reg_nlp. model) || bound_constrained (reg_nlp. model)
12+ return AL (Val (:unc ), reg_nlp; kwargs... )
13+ elseif equality_constrained (reg_nlp. model)
14+ return AL (Val (:equ ), reg_nlp; kwargs... )
815 else # has inequalities
9- return AL (Val (:ineq ), nlp, h, options ; kwargs... )
16+ return AL (Val (:ineq ), reg_nlp ; kwargs... )
1017 end
1118end
1219
1320function AL (
1421 :: Val{:unc} ,
15- nlp:: AbstractNLPModel ,
16- h:: H ,
17- options:: ROSolverOptions ;
18- subsolver = has_bounds (nlp) ? TR : R2,
22+ reg_nlp:: AbstractRegularizedNLPModel ;
23+ subsolver = has_bounds (reg_nlp. model) ? TR : R2,
1924 kwargs... ,
20- ) where {H}
21- if ! (unconstrained (nlp ) || bound_constrained (nlp ))
25+ )
26+ if ! (unconstrained (reg_nlp . model ) || bound_constrained (reg_nlp . model ))
2227 error (
2328 " AL(::Val{:unc}, ...) should only be called for unconstrained or bound-constrained problems. Use AL(...)" ,
2429 )
2530 end
2631 @warn " Problem does not have general explicit constraints; calling solver $(string (subsolver)) "
27- return subsolver (nlp, h, options ; kwargs... )
32+ return subsolver (reg_nlp ; kwargs... )
2833end
2934
3035# a uniform solver interface is missing
31- # TR(nlp, h, options ; kwargs...) = TR(nlp, h, NormLinf(1.0), options ; kwargs...)
36+ # TR(nlp, h; kwargs...) = TR(nlp, h, NormLinf(1.0); kwargs...)
3237
3338function AL (
3439 :: Val{:ineq} ,
35- nlp:: AbstractNLPModel ,
36- h:: H ,
37- options:: ROSolverOptions{T} ;
38- x0:: AbstractVector{T} = nlp. meta. x0,
40+ reg_nlp:: AbstractRegularizedNLPModel ;
41+ x0:: V = reg_nlp. model. meta. x0,
3942 kwargs... ,
40- ) where {H, T}
43+ ) where {V}
44+ nlp = reg_nlp. model
4145 if nlp. meta. ncon == 0 || equality_constrained (nlp)
4246 error (" AL(::Val{:ineq}, ...) should only be called for problems with inequalities. Use AL(...)" )
4347 end
4448 snlp = nlp isa AbstractNLSModel ? SlackNLSModel (nlp) : SlackModel (nlp)
49+ reg_snlp = RegularizedNLPModel (snlp, reg_nlp. h, reg_nlp. selected)
4550 if length (x0) != snlp. meta. nvar
46- x0s = zeros (T , snlp. meta. nvar)
47- x0s [1 : (nlp. meta. nvar)] .= x0
51+ x = fill! ( V (undef , snlp. meta. nvar), zero ( eltype (V)) )
52+ x [1 : (nlp. meta. nvar)] .= x0
4853 else
49- x0s = x0
54+ x = x0
5055 end
51- output = AL (Val (:equ ), snlp, h, options ; x0 = x0s , kwargs... )
56+ output = AL (Val (:equ ), reg_snlp ; x0 = x , kwargs... )
5257 output. solution = output. solution[1 : (nlp. meta. nvar)]
5358 return output
5459end
5560
5661"""
57- AL(nlp, h, options ; kwargs...)
62+ AL(reg_nlp ; kwargs...)
5863
59- An augmented Lagrangian method for the problem
64+ An augmented Lagrangian method for constrained regularized optimization, namely problems in the form
6065
61- min f(x) + h(x) subject to lvar ≤ x ≤ uvar, lcon ≤ c(x) ≤ ucon
66+ minimize f(x) + h(x)
67+ subject to lvar ≤ x ≤ uvar,
68+ lcon ≤ c(x) ≤ ucon
6269
6370where f: ℝⁿ → ℝ, c: ℝⁿ → ℝᵐ and their derivatives are Lipschitz continuous and h: ℝⁿ → ℝ is
6471lower semi-continuous, proper and prox-bounded.
6572
66- At each iteration, an iterate x is computed as an approximate solution of
73+ At each iteration, an iterate x is computed as an approximate solution of the subproblem
6774
68- min L(x;y,μ) + h(x) subject to lvar ≤ x ≤ uvar
75+ minimize L(x;y,μ) + h(x)
76+ subject to lvar ≤ x ≤ uvar
6977
7078where y is an estimate of the Lagrange multiplier vector for the constraints lcon ≤ c(x) ≤ ucon,
7179μ is the penalty parameter and L(⋅;y,μ) is the augmented Lagrangian function defined by
@@ -74,44 +82,64 @@ where y is an estimate of the Lagrange multiplier vector for the constraints lco
7482
7583### Arguments
7684
77- * `nlp::AbstractNLPModel `: a smooth optimization problem
78- * `h`: a regularizer such as those defined in ProximalOperators
79- * `options::ROSolverOptions`: a structure containing algorithmic parameters
85+ * `reg_nlp::AbstractRegularizedNLPModel `: a regularized optimization problem, see `RegularizedProblems.jl`,
86+ consisting of `model` representing a smooth optimization problem, see `NLPModels.jl`, and a regularizer `h` such
87+ as those defined in `ProximalOperators.jl`.
8088
81- The objective and gradient of `nlp ` will be accessed.
82- The Hessian of `nlp ` may be accessed or not, depending on the subsolver adopted.
89+ The objective and gradient of `model ` will be accessed.
90+ The Hessian of `model ` may be accessed or not, depending on the subsolver adopted.
8391If adopted, the Hessian is accessed as an abstract operator and need not be the exact Hessian.
8492
8593### Keyword arguments
8694
87- * `x0::AbstractVector`: a primal initial guess (default: `nlp.meta.x0`)
88- * `y0::AbstractVector`: a dual initial guess (default: `nlp.meta.y0`)
89- * `subsolver`: the procedure used to compute a step (e.g. `PG`, `R2`, `TR` or `TRDH`)
90- * `subsolver_logger::AbstractLogger`: a logger to pass to the subproblem solver
91- * `subsolver_options::ROSolverOptions`: default options to pass to the subsolver.
95+ * `x0::AbstractVector`: a primal initial guess (default: `reg_nlp.model.meta.x0`)
96+ * `y0::AbstractVector`: a dual initial guess (default: `reg_nlp.model.meta.y0`)
97+ - `atol::T = √eps(T)`: absolute optimality tolerance;
98+ - `ctol::T = atol`: absolute feasibility tolerance;
99+ - `verbose::Int = 0`: if > 0, display iteration details every `verbose` iteration;
100+ - `max_iter::Int = 10000`: maximum number of iterations;
101+ - `max_time::Float64 = 30.0`: maximum time limit in seconds;
102+ - `max_eval::Int = -1`: maximum number of evaluation of the objective function (negative number means unlimited);
103+ * `subsolver::AbstractOptimizationSolver = has_bounds(nlp) ? TR : R2`: the procedure used to compute a step (e.g. `PG`, `R2`, `TR` or `TRDH`);
104+ * `subsolver_logger::AbstractLogger`: a logger to pass to the subproblem solver;
105+ - `init_penalty::T = T(10)`: initial penalty parameter;
106+ - `factor_penalty_up::T = T(2)`: multiplicative factor to increase the penalty parameter;
107+ - `factor_primal_linear_improvement::T = T(3/4)`: fraction to declare sufficient improvement of feasibility;
108+ - `init_subtol::T = T(0.1)`: initial subproblem tolerance;
109+ - `factor_decrease_subtol::T = T(1/4)`: multiplicative factor to decrease the subproblem tolerance;
110+ - `dual_safeguard = (nlp::AugLagModel) -> nothing`: in-place function to modify, as needed, the dual estimate.
92111
93112### Return values
94113
95- * `stats::GenericExecutionStats`: solution and other info.
114+ * `stats::GenericExecutionStats`: solution and other info, see `SolverCore.jl` .
96115
97116"""
98117function AL (
99118 :: Val{:equ} ,
100- nlp:: AbstractNLPModel ,
101- h:: H ,
102- options:: ROSolverOptions{T} ;
103- x0:: V = nlp. meta. x0,
104- y0:: V = nlp. meta. y0,
105- subsolver = has_bounds (nlp) ? TR : R2,
119+ reg_nlp:: AbstractRegularizedNLPModel{T, V} ;
120+ x0:: V = reg_nlp. model. meta. x0,
121+ y0:: V = reg_nlp. model. meta. y0,
122+ atol:: T = √ eps (T),
123+ verbose:: Int = 0 ,
124+ max_iter:: Int = 10000 ,
125+ max_time:: Float64 = 30.0 ,
126+ max_eval:: Int = - 1 ,
127+ subsolver = has_bounds (reg_nlp. model) ? TR : R2,
106128 subsolver_logger:: Logging.AbstractLogger = Logging. NullLogger (),
107- init_penalty:: Real = T (10 ),
108- factor_penalty_up:: Real = T (2 ),
109- ctol:: Real = options . ϵa ,
110- init_subtol:: Real = T (0.1 ),
111- factor_primal_linear_improvement:: Real = T (3 // 4 ),
112- factor_decrease_subtol:: Real = T (1 // 4 ),
129+ init_penalty:: T = T (10 ),
130+ factor_penalty_up:: T = T (2 ),
131+ ctol:: T = atol ,
132+ init_subtol:: T = T (0.1 ),
133+ factor_primal_linear_improvement:: T = T (3 // 4 ),
134+ factor_decrease_subtol:: T = T (1 // 4 ),
113135 dual_safeguard = project_y!,
114- ) where {H, T <: Real , V}
136+ ) where {T, V}
137+
138+ # Retrieve workspace
139+ nlp = reg_nlp. model
140+ h = reg_nlp. h
141+ selected = reg_nlp. selected
142+
115143 if ! (nlp. meta. minimize)
116144 error (" AL only works for minimization problems" )
117145 end
@@ -131,10 +159,6 @@ function AL(
131159 @assert factor_penalty_up > 1
132160 @assert 0 < factor_primal_linear_improvement < 1
133161 @assert 0 < factor_decrease_subtol < 1
134- verbose = options. verbose
135- max_time = options. maxTime
136- max_iter = options. maxIter
137- max_eval = - 1
138162
139163 # initialization
140164 @assert length (x0) == nlp. meta. nvar
@@ -172,9 +196,9 @@ function AL(
172196 dual_safeguard (alf)
173197
174198 # AL subproblem
175- subtol = max (subtol, options . ϵa )
199+ subtol = max (subtol, atol )
176200 subout = with_logger (subsolver_logger) do
177- subsolver (alf, h, x = x, atol = subtol, rtol = zero (T))
201+ subsolver (alf, h, x = x, atol = subtol, rtol = zero (T), selected = selected )
178202 end
179203 x .= subout. solution
180204 subiters += subout. iter
@@ -199,10 +223,7 @@ function AL(
199223 set_primal_residual! (stats, cviol)
200224
201225 # termination checks
202- dual_ok =
203- subout. status_reliable &&
204- subout. status == :first_order &&
205- subtol <= options. ϵa
226+ dual_ok = subout. status_reliable && subout. status == :first_order && subtol <= atol
206227 primal_ok = cviol <= ctol
207228 optimal = dual_ok && primal_ok
208229
0 commit comments