1- export AL
1+ export AL, ALSolver, solve!
2+
3+ import SolverCore. solve!
24
35function AL (nlp:: AbstractNLPModel , h; kwargs... )
46 kwargs_dict = Dict (kwargs... )
@@ -32,13 +34,10 @@ function AL(
3234 return subsolver (reg_nlp; kwargs... )
3335end
3436
35- # a uniform solver interface is missing
36- # TR(nlp, h; kwargs...) = TR(nlp, h, NormLinf(1.0); kwargs...)
37-
3837function AL (
3938 :: Val{:ineq} ,
4039 reg_nlp:: AbstractRegularizedNLPModel ;
41- x0 :: V = reg_nlp. model. meta. x0,
40+ x :: V = reg_nlp. model. meta. x0,
4241 kwargs... ,
4342) where {V}
4443 nlp = reg_nlp. model
@@ -47,13 +46,13 @@ function AL(
4746 end
4847 snlp = nlp isa AbstractNLSModel ? SlackNLSModel (nlp) : SlackModel (nlp)
4948 reg_snlp = RegularizedNLPModel (snlp, reg_nlp. h, reg_nlp. selected)
50- if length (x0 ) != snlp. meta. nvar
51- x = fill! (V (undef, snlp. meta. nvar), zero (eltype (V)))
52- x [1 : (nlp. meta. nvar)] .= x0
49+ if length (x ) != snlp. meta. nvar
50+ xs = fill! (V (undef, snlp. meta. nvar), zero (eltype (V)))
51+ xs [1 : (nlp. meta. nvar)] .= x
5352 else
54- x = x0
53+ xs = x
5554 end
56- output = AL (Val (:equ ), reg_snlp; x0 = x , kwargs... )
55+ output = AL (Val (:equ ), reg_snlp; x = xs , kwargs... )
5756 output. solution = output. solution[1 : (nlp. meta. nvar)]
5857 return output
5958end
@@ -80,52 +79,110 @@ where y is an estimate of the Lagrange multiplier vector for the constraints lco
8079
8180 L(x;y,μ) := f(x) - yᵀc(x) + ½ μ ‖c(x)‖².
8281
83- ### Arguments
82+ For advanced usage, first define a solver "ALSolver" to preallocate the memory used in the algorithm, and then call `solve!`:
83+
84+ solver = ALSolver(reg_nlp)
85+ solve!(solver, reg_nlp)
86+
87+ stats = GenericExecutionStats(reg_nlp.model)
88+ solver = ALSolver(reg_nlp)
89+ solve!(solver, reg_nlp, stats)
8490
85- * `reg_nlp::AbstractRegularizedNLPModel`: a regularized optimization problem, see `RegularizedProblems.jl`,
91+ # Arguments
92+
93+ - `reg_nlp::AbstractRegularizedNLPModel`: a regularized optimization problem, see `RegularizedProblems.jl`,
8694 consisting of `model` representing a smooth optimization problem, see `NLPModels.jl`, and a regularizer `h` such
8795 as those defined in `ProximalOperators.jl`.
8896
8997The objective and gradient of `model` will be accessed.
9098The Hessian of `model` may be accessed or not, depending on the subsolver adopted.
9199If adopted, the Hessian is accessed as an abstract operator and need not be the exact Hessian.
92100
93- ### Keyword arguments
101+ # Keyword arguments
94102
95- * `x0 ::AbstractVector`: a primal initial guess (default: `reg_nlp.model.meta.x0`)
96- * `y0 ::AbstractVector`: a dual initial guess (default: `reg_nlp.model.meta.y0`)
103+ - `x ::AbstractVector`: a primal initial guess (default: `reg_nlp.model.meta.x0`)
104+ - `y ::AbstractVector`: a dual initial guess (default: `reg_nlp.model.meta.y0`)
97105- `atol::T = √eps(T)`: absolute optimality tolerance;
98106- `ctol::T = atol`: absolute feasibility tolerance;
99107- `verbose::Int = 0`: if > 0, display iteration details every `verbose` iteration;
100108- `max_iter::Int = 10000`: maximum number of iterations;
101109- `max_time::Float64 = 30.0`: maximum time limit in seconds;
102110- `max_eval::Int = -1`: maximum number of evaluation of the objective function (negative number means unlimited);
103- * `subsolver::AbstractOptimizationSolver = has_bounds(nlp) ? TR : R2`: the procedure used to compute a step (e.g. `PG`, `R2`, `TR` or `TRDH`);
104- * `subsolver_logger::AbstractLogger`: a logger to pass to the subproblem solver;
111+ - `subsolver::AbstractOptimizationSolver = has_bounds(nlp) ? TR : R2`: the procedure used to compute a step (e.g. `PG`, `R2`, `TR` or `TRDH`);
112+ - `subsolver_logger::AbstractLogger`: a logger to pass to the subproblem solver;
105113- `init_penalty::T = T(10)`: initial penalty parameter;
106114- `factor_penalty_up::T = T(2)`: multiplicative factor to increase the penalty parameter;
107115- `factor_primal_linear_improvement::T = T(3/4)`: fraction to declare sufficient improvement of feasibility;
108116- `init_subtol::T = T(0.1)`: initial subproblem tolerance;
109117- `factor_decrease_subtol::T = T(1/4)`: multiplicative factor to decrease the subproblem tolerance;
110118- `dual_safeguard = (nlp::AugLagModel) -> nothing`: in-place function to modify, as needed, the dual estimate.
111119
112- ### Return values
113-
114- * `stats::GenericExecutionStats`: solution and other info, see `SolverCore.jl`.
120+ # Output
115121
122+ - `stats::GenericExecutionStats`: solution and other info, see `SolverCore.jl`.
116123"""
117- function AL (
118- :: Val{:equ} ,
119- reg_nlp:: AbstractRegularizedNLPModel{T, V} ;
120- x0:: V = reg_nlp. model. meta. x0,
121- y0:: V = reg_nlp. model. meta. y0,
124+ mutable struct ALSolver{T, V, M, ST} <: AbstractOptimizationSolver
125+ x:: V
126+ cx:: V
127+ y:: V
128+ has_bnds:: Bool
129+ sub_model:: AugLagModel{M, T, V}
130+ sub_solver:: ST
131+ sub_stats:: GenericExecutionStats{T, V, V, Any}
132+ end
133+
134+ function ALSolver (reg_nlp:: AbstractRegularizedNLPModel{T, V} ; kwargs... ) where {T, V}
135+ nlp = reg_nlp. model
136+ nvar, ncon = nlp. meta. nvar, nlp. meta. ncon
137+ x = V (undef, nvar)
138+ cx = V (undef, ncon)
139+ y = V (undef, ncon)
140+ has_bnds = has_bounds (nlp)
141+ sub_model = AugLagModel (nlp, V (undef, ncon), T (0 ), x, T (0 ), V (undef, ncon))
142+ sub_solver = R2Solver (reg_nlp; kwargs... )
143+ sub_stats = GenericExecutionStats (sub_model)
144+ M = typeof (nlp)
145+ ST = typeof (sub_solver)
146+ return ALSolver {T, V, M, ST} (x, cx, y, has_bnds, sub_model, sub_solver, sub_stats)
147+ end
148+
149+ @doc (@doc ALSolver) function AL (:: Val{:equ} , reg_nlp:: AbstractRegularizedNLPModel ; kwargs... )
150+ nlp = reg_nlp. model
151+ if ! (nlp. meta. minimize)
152+ error (" AL only works for minimization problems" )
153+ end
154+ if nlp. meta. ncon == 0 || ! equality_constrained (nlp)
155+ error (
156+ " AL(::Val{:equ}, ...) should only be called for equality-constrained problems with bounded variables. Use AL(...)" ,
157+ )
158+ end
159+ solver = ALSolver (reg_nlp)
160+ solve! (solver, reg_nlp; kwargs... )
161+ end
162+
163+ function SolverCore. solve! (
164+ solver:: AbstractOptimizationSolver ,
165+ model:: AbstractRegularizedNLPModel ;
166+ kwargs... ,
167+ )
168+ stats = GenericExecutionStats (model. model)
169+ solve! (solver, model, stats; kwargs... )
170+ end
171+
172+ function SolverCore. solve! (
173+ solver:: ALSolver{T, V} ,
174+ reg_nlp:: AbstractRegularizedNLPModel{T, V} ,
175+ stats:: GenericExecutionStats{T, V} ;
176+ x:: V = reg_nlp. model. meta. x0,
177+ y:: V = reg_nlp. model. meta. y0,
122178 atol:: T = √ eps (T),
123179 verbose:: Int = 0 ,
124180 max_iter:: Int = 10000 ,
125181 max_time:: Float64 = 30.0 ,
126182 max_eval:: Int = - 1 ,
127- subsolver = has_bounds (reg_nlp. model) ? TR : R2,
128- subsolver_logger:: Logging.AbstractLogger = Logging. NullLogger (),
183+ subsolver_verbose:: Int = 0 ,
184+ subsolver_max_iter:: Int = 100000 ,
185+ subsolver_max_eval:: Int = - 1 ,
129186 init_penalty:: T = T (10 ),
130187 factor_penalty_up:: T = T (2 ),
131188 ctol:: T = atol,
@@ -140,6 +197,7 @@ function AL(
140197 h = reg_nlp. h
141198 selected = reg_nlp. selected
142199
200+ # Sanity checks
143201 if ! (nlp. meta. minimize)
144202 error (" AL only works for minimization problems" )
145203 end
@@ -148,68 +206,87 @@ function AL(
148206 " AL(::Val{:equ}, ...) should only be called for equality-constrained problems. Use AL(...)" ,
149207 )
150208 end
209+ @assert length (solver. x) == nlp. meta. nvar
210+ @assert length (solver. y) == nlp. meta. ncon
211+ # TODO check solver.has_bnds with has_bounds(nlp) for solver.sub_solver
151212
152- stats = GenericExecutionStats (nlp )
213+ reset! (stats )
153214 set_iter! (stats, 0 )
154215 start_time = time ()
155216 set_time! (stats, 0.0 )
156217
157- # parameters
218+ # check parameter values
158219 @assert init_penalty > 0
159220 @assert factor_penalty_up > 1
160221 @assert 0 < factor_primal_linear_improvement < 1
161222 @assert 0 < factor_decrease_subtol < 1
162223
163224 # initialization
164- @assert length (x0) == nlp. meta. nvar
165- @assert length (y0) == nlp . meta . ncon
166- x = similar (nlp . meta . x0 )
167- y = similar (nlp . meta . y0 )
168- x . = x0
169- y . = y0
170- set_solution! (stats, x )
171- set_constraint_multipliers! (stats, y )
172-
173- fx, cx = objcons (nlp, x)
225+ solver . x . = max .(nlp . meta . lvar, min .(x, nlp. meta. uvar))
226+ solver . y . = y
227+ set_solution! (stats, solver . x )
228+ set_constraint_multipliers! (stats, solver . y )
229+ subout = solver . sub_stats
230+
231+ objx, _ = objcons! (nlp, solver . x, solver . cx )
232+ objx += @views h (solver . x[selected] )
233+ set_objective! (stats, objx)
234+
174235 mu = init_penalty
175- alf = AugLagModel (nlp, y, mu, x, fx, cx)
236+ solver. sub_model. y .= solver. y
237+ update_μ! (solver. sub_model, mu)
176238
177- cviol = norm (alf . cx, Inf )
239+ cviol = norm (solver . cx, Inf )
178240 cviol_old = Inf
179241 iter = 0
180242 subiters = 0
181243 done = false
182244 subtol = init_subtol
245+ rem_eval = max_eval
183246
184247 if verbose > 0
185248 @info log_header (
186- [:iter , :subiter , :fx , :prim_res , :μ , :normy , :dual_tol , :inner_status ],
249+ [:iter , :sub_it , :obj , :cviol , :μ , :normy , :sub_tol , :sub_status ],
187250 [Int, Int, Float64, Float64, Float64, Float64, Float64, Symbol],
188251 )
189- @info log_row (Any[iter, subiters, fx , cviol, alf . μ , norm (y), subtol])
252+ @info log_row (Any[iter, subiters, objx , cviol, mu , norm (solver . y), subtol])
190253 end
191254
192255 while ! done
193256 iter += 1
194257
195258 # dual safeguard
196- dual_safeguard (alf )
259+ dual_safeguard (solver . sub_model )
197260
198261 # AL subproblem
262+ sub_reg_nlp = RegularizedNLPModel (solver. sub_model, h, selected)
199263 subtol = max (subtol, atol)
200- subout = with_logger (subsolver_logger) do
201- subsolver (alf, h, x = x, atol = subtol, rtol = zero (T), selected = selected)
202- end
203- x .= subout. solution
264+ reset! (subout)
265+ solve! (
266+ solver. sub_solver,
267+ sub_reg_nlp,
268+ subout,
269+ x = solver. x,
270+ atol = subtol,
271+ rtol = zero (T),
272+ max_time = max_time - stats. elapsed_time,
273+ max_eval = subsolver_max_eval < 0 ? rem_eval : min (subsolver_max_eval, rem_eval),
274+ max_iter = subsolver_max_iter,
275+ verbose = subsolver_verbose,
276+ )
277+ solver. x .= subout. solution
278+ solver. cx .= solver. sub_model. cx
204279 subiters += subout. iter
205280
206281 # objective
207- fx = obj (nlp, x)
208- set_objective! (stats, fx)
282+ objx = obj (nlp, solver. x)
283+ objx += @views h (solver. x[selected])
284+ set_objective! (stats, objx)
209285
210286 # dual estimate
211- update_y! (alf)
212- set_constraint_multipliers! (stats, alf. y)
287+ update_y! (solver. sub_model)
288+ solver. y .= solver. sub_model. y
289+ set_constraint_multipliers! (stats, solver. y)
213290
214291 # stationarity measure
215292 # FIXME it seems that R2 returns no dual_feas in `dual_feas`
@@ -219,7 +296,7 @@ function AL(
219296 end
220297
221298 # primal violation
222- cviol = norm (alf . cx, Inf )
299+ cviol = norm (solver . cx, Inf )
223300 set_primal_residual! (stats, cviol)
224301
225302 # termination checks
@@ -250,22 +327,22 @@ function AL(
250327 done = stats. status != :unknown
251328
252329 if verbose > 0 && (mod (stats. iter, verbose) == 0 || done)
253- @info log_row (Any[iter, subiters, fx , cviol, alf . μ , norm (alf . y), subtol, subout. status])
330+ @info log_row (Any[iter, subiters, objx , cviol, mu , norm (solver . y), subtol, subout. status])
254331 end
255332
256333 if ! done
257334 if cviol > max (ctol, factor_primal_linear_improvement * cviol_old)
258- # @info "decreasing mu"
259335 mu *= factor_penalty_up
260336 end
261- update_μ! (alf , mu)
337+ update_μ! (solver . sub_model , mu)
262338 cviol_old = cviol
263339 subtol *= factor_decrease_subtol
340+ rem_eval = max_eval < 0 ? max_eval : max_eval - neval_obj (nlp)
264341 end
265342 end
266- set_solution! (stats, x)
267- set_constraint_multipliers! (stats, alf . y)
268- return stats
343+ set_solution! (stats, solver . x)
344+ set_constraint_multipliers! (stats, solver . y)
345+ stats
269346end
270347
271348"""
0 commit comments