diff --git a/Project.toml b/Project.toml index e513061b..baf0d479 100644 --- a/Project.toml +++ b/Project.toml @@ -4,9 +4,11 @@ author = ["Robert Baraldi and Dominique Orban 0 is the trust-region radius. -### Arguments - -* `nlp::AbstractDiagonalQNModel`: a smooth optimization problem -* `h`: a regularizer such as those defined in ProximalOperators -* `χ`: a norm used to define the trust region in the form of a regularizer -* `options::ROSolverOptions`: a structure containing algorithmic parameters - -The objective and gradient of `nlp` will be accessed. - -In the second form, instead of `nlp`, the user may pass in - -* `f` a function such that `f(x)` returns the value of f at x -* `∇f!` a function to evaluate the gradient in place, i.e., such that `∇f!(g, x)` store ∇f(x) in `g` -* `x0::AbstractVector`: an initial guess. - -### Keyword arguments - -* `x0::AbstractVector`: an initial guess (default: `nlp.meta.x0`) -* `selected::AbstractVector{<:Integer}`: (default `1:f.meta.nvar`) - -### Return values - -* `xk`: the final iterate -* `Fobj_hist`: an array with the history of values of the smooth objective -* `Hobj_hist`: an array with the history of values of the nonsmooth objective -* `Complex_hist`: an array with the history of number of inner iterations. +For advanced usage, first define a solver "TRDHSolver" to preallocate the memory used in the algorithm, and then call `solve!`: + + solver = TRDH(reg_nlp; D = nothing, χ = NormLinf(1)) + solve!(solver, reg_nlp) + + stats = RegularizedExecutionStats(reg_nlp) + solve!(solver, reg_nlp, stats) + +# Arguments +* `reg_nlp::AbstractRegularizedNLPModel{T, V}`: the problem to solve, see `RegularizedProblems.jl`, `NLPModels.jl`. + +# Keyword arguments +- `x::V = nlp.meta.x0`: the initial guess; +- `atol::T = √eps(T)`: absolute tolerance; +- `rtol::T = √eps(T)`: relative tolerance; +- `neg_tol::T = eps(T)^(1 / 4)`: negative tolerance; +- `max_eval::Int = -1`: maximum number of evaluation of the objective function (negative number means unlimited); +- `max_time::Float64 = 30.0`: maximum time limit in seconds; +- `max_iter::Int = 10000`: maximum number of iterations; +- `verbose::Int = 0`: if > 0, display iteration details every `verbose` iteration; +- `Δk::T = T(1)`: initial value of the trust-region radius; +- `η1::T = √√eps(T)`: successful iteration threshold; +- `η2::T = T(0.9)`: very successful iteration threshold; +- `γ::T = T(3)`: trust-region radius parameter multiplier. Must satisfy `γ > 1`. The trust-region radius is updated as Δ := Δ*γ when the iteration is very successful and Δ := Δ/γ when the iteration is unsuccessful; +- `reduce_TR::Bool = true`: see explanation on the stopping criterion below; +- `χ::F = NormLinf(1)`: norm used to define the trust-region;` +- `D::L = nothing`: diagonal quasi-Newton approximation used for the model φ. If nothing is provided and `reg_nlp.model` is not a diagonal quasi-Newton approximation, a spectral gradient approximation is used.` + +The algorithm stops either when `√(ξₖ/νₖ) < atol + rtol*√(ξ₀/ν₀) ` or `ξₖ < 0` and `√(-ξₖ/νₖ) < neg_tol` where ξₖ := f(xₖ) + h(xₖ) - φ(sₖ; xₖ) - ψ(sₖ; xₖ), and √(ξₖ/νₖ) is a stationarity measure. +Alternatively, if `reduce_TR = true`, then ξₖ₁ := f(xₖ) + h(xₖ) - φ(sₖ₁; xₖ) - ψ(sₖ₁; xₖ) is used instead of ξₖ, where sₖ₁ is the Cauchy point. + +# Output +The value returned is a `GenericExecutionStats`, see `SolverCore.jl`. + +# Callback +$(callback_docstring) """ function TRDH( - nlp::AbstractDiagonalQNModel{R, S}, + nlp::AbstractDiagonalQNModel{T, V}, h, χ, - options::ROSolverOptions{R}; + options::ROSolverOptions{T}; kwargs..., -) where {R <: Real, S} +) where {T, V} kwargs_dict = Dict(kwargs...) + selected = pop!(kwargs_dict, :selected, 1:(nlp.meta.nvar)) x0 = pop!(kwargs_dict, :x0, nlp.meta.x0) - xk, k, outdict = TRDH( - x -> obj(nlp, x), - (g, x) -> grad!(nlp, x, g), - h, - hess_op(nlp, x0), - options, - x0; + reg_nlp = RegularizedNLPModel(nlp, h, selected) + stats = TRDH( + reg_nlp; + x = x0, + D = nlp.op, χ = χ, - l_bound = nlp.meta.lvar, - u_bound = nlp.meta.uvar, - kwargs..., + atol = options.ϵa, + rtol = options.ϵr, + neg_tol = options.neg_tol, + verbose = options.verbose, + max_iter = options.maxIter, + max_time = options.maxTime, + reduce_TR = options.reduce_TR, + Δk = options.Δk, + η1 = options.η1, + η2 = options.η2, + γ = options.γ, + kwargs_dict..., ) - sqrt_ξ_νInv = outdict[:sqrt_ξ_νInv] - stats = GenericExecutionStats(nlp) - set_status!(stats, outdict[:status]) - set_solution!(stats, xk) - set_objective!(stats, outdict[:fk] + outdict[:hk]) - set_residuals!(stats, zero(eltype(xk)), sqrt_ξ_νInv) - set_iter!(stats, k) - set_time!(stats, outdict[:elapsed_time]) - set_solver_specific!(stats, :radius, outdict[:radius]) - set_solver_specific!(stats, :Fhist, outdict[:Fhist]) - set_solver_specific!(stats, :Hhist, outdict[:Hhist]) - set_solver_specific!(stats, :NonSmooth, outdict[:NonSmooth]) - set_solver_specific!(stats, :SubsolverCounter, outdict[:Chist]) return stats end -# update l_bound_k and u_bound_k -function update_bounds!(l_bound_k, u_bound_k, is_subsolver, l_bound, u_bound, xk, Δ) - if is_subsolver - @. l_bound_k = max(xk - Δ, l_bound) - @. u_bound_k = min(xk + Δ, u_bound) - else - @. l_bound_k = max(-Δ, l_bound - xk) - @. u_bound_k = min(Δ, u_bound - xk) - end -end - function TRDH( f::F, ∇f!::G, @@ -105,266 +190,341 @@ function TRDH( selected::AbstractVector{<:Integer} = 1:length(x0), kwargs..., ) where {R <: Real, F, G, H, DQN <: AbstractDiagonalQuasiNewtonOperator, X} - start_time = time() - elapsed_time = 0.0 - ϵ = options.ϵa - ϵr = options.ϵr - Δk = options.Δk - neg_tol = options.neg_tol - verbose = options.verbose - maxIter = options.maxIter - maxTime = options.maxTime - η1 = options.η1 - η2 = options.η2 - γ = options.γ - α = options.α - β = options.β - reduce_TR = options.reduce_TR - - local l_bound, u_bound - has_bnds = false - kw_keys = keys(kwargs) - if :l_bound in kw_keys - l_bound = kwargs[:l_bound] - has_bnds = has_bnds || any(l_bound .!= R(-Inf)) - end - if :u_bound in kw_keys - u_bound = kwargs[:u_bound] - has_bnds = has_bnds || any(u_bound .!= R(Inf)) - end + nlp = NLPModel(x0, f, grad=∇f!) + reg_nlp = RegularizedNLPModel(nlp, h, selected) + stats = TRDH( + reg_nlp; + x = x0, + D = D, + χ = χ, + atol = options.ϵa, + rtol = options.ϵr, + neg_tol = options.neg_tol, + verbose = options.verbose, + max_iter = options.maxIter, + max_time = options.maxTime, + reduce_TR = options.reduce_TR, + Δk = options.Δk, + η1 = options.η1, + η2 = options.η2, + γ = options.γ, + kwargs..., + ) + return stats.solution, stats.iter, stats +end - if verbose == 0 - ptf = Inf - elseif verbose == 1 - ptf = round(maxIter / 10) - elseif verbose == 2 - ptf = round(maxIter / 100) - else - ptf = 1 +function TRDH(reg_nlp::AbstractRegularizedNLPModel{T, V}; kwargs...) where {T, V} + kwargs_dict = Dict(kwargs...) + D = pop!(kwargs_dict, :D, nothing) + χ = pop!(kwargs_dict, :χ, NormLinf(one(T))) + solver = TRDHSolver(reg_nlp, D = D, χ = χ) + stats = RegularizedExecutionStats(reg_nlp) + solve!(solver, reg_nlp, stats; kwargs_dict...) + return stats +end + +function SolverCore.solve!( + solver::TRDHSolver{T, G, V}, + reg_nlp::AbstractRegularizedNLPModel{T, V}, + stats::GenericExecutionStats{T, V}; + callback = (args...) -> nothing, + x::V = reg_nlp.model.meta.x0, + atol::T = √eps(T), + rtol::T = √eps(T), + neg_tol::T = eps(T)^(1 / 4), + verbose::Int = 0, + max_iter::Int = 10000, + max_time::Float64 = 30.0, + max_eval::Int = -1, + reduce_TR::Bool = true, + Δk::T = T(1), + η1::T = √√eps(T), + η2::T = T(0.9), + γ::T = T(3), +) where {T, G, V} + reset!(stats) + + # Retrieve workspace + selected = reg_nlp.selected + h = reg_nlp.h + nlp = reg_nlp.model + + xk = solver.xk .= x + + # Make sure ψ has the correct shift + shift!(solver.ψ, xk) + + ∇fk = solver.∇fk + ∇fk⁻ = solver.∇fk⁻ + mν∇fk = solver.mν∇fk + D = solver.D + dk = solver.dk + ψ = solver.ψ + xkn = solver.xkn + s = solver.s + χ = solver.χ + has_bnds = solver.has_bnds + + if has_bnds + l_bound_m_x = solver.l_bound_m_x + u_bound_m_x = solver.u_bound_m_x + l_bound = solver.l_bound + u_bound = solver.u_bound end # initialize parameters - xk = copy(x0) - hk = h(xk[selected]) + improper = false + hk = @views h(xk[selected]) if hk == Inf verbose > 0 && @info "TRDH: finding initial guess where nonsmooth term is finite" - prox!(xk, h, x0, one(eltype(x0))) - hk = h(xk[selected]) + prox!(xk, h, xk, T(1)) + hk = @views h(xk[selected]) hk < Inf || error("prox computation must be erroneous") verbose > 0 && @debug "TRDH: found point where h has value" hk end - hk == -Inf && error("nonsmooth term is not proper") + improper = (hk == -Inf) + improper == true && @warn "TRDH: Improper term detected" + improper == true && return stats - xkn = similar(xk) - s = zero(xk) - l_bound_k = similar(xk) - u_bound_k = similar(xk) is_subsolver = h isa ShiftedProximableFunction # case TRDH is used as a subsolver + if is_subsolver - ψ = shifted(h, xk) - @assert !has_bnds - l_bound = copy(ψ.l) - u_bound = copy(ψ.u) - @. l_bound_k = max(xk - Δk, l_bound) - @. u_bound_k = min(xk + Δk, u_bound) - has_bnds = true - set_bounds!(ψ, l_bound_k, u_bound_k) - else - if has_bnds - @. l_bound_k = max(-Δk, l_bound - xk) - @. u_bound_k = min(Δk, u_bound - xk) - ψ = shifted(h, xk, l_bound_k, u_bound_k, selected) - else - ψ = shifted(h, xk, Δk, χ) - end + l_bound .= ψ.l + u_bound .= ψ.u end - Fobj_hist = zeros(maxIter) - Hobj_hist = zeros(maxIter) - Complex_hist = zeros(Int, maxIter) if verbose > 0 - #! format: off - if reduce_TR - @info @sprintf "%6s %8s %8s %7s %7s %8s %7s %7s %7s %7s %1s" "outer" "f(x)" "h(x)" "√(ξ1/ν)" "√ξ" "ρ" "Δ" "‖x‖" "‖s‖" "‖Dₖ‖" "TRDH" - else - @info @sprintf "%6s %8s %8s %7s %8s %7s %7s %7s %7s %1s" "outer" "f(x)" "h(x)" "√(ξ/ν)" "ρ" "Δ" "‖x‖" "‖s‖" "‖Dₖ‖" "TRDH" - end - #! format: off + @info log_header( + [:iter, :fx, :hx, :xi, :ρ, :Δ, :normx, :norms, :normD, :arrow], + [Int, T, T, T, T, T, T, T, T, Char], + hdr_override = Dict{Symbol, String}( # TODO: Add this as constant dict elsewhere + :fx => "f(x)", + :hx => "h(x)", + :xi => "√(ξ/ν)", + :normx => "‖x‖", + :norms => "‖s‖", + :normD => "‖D‖", + :arrow => "TRDH", + ), + colsep = 1, + ) end - local ξ1 - local ξ - k = 0 + local ξ1::T + local ρk::T = zero(T) - fk = f(xk) - ∇fk = similar(xk) - ∇f!(∇fk, xk) - ∇fk⁻ = copy(∇fk) - DNorm = norm(D.d, Inf) - ν = (α * Δk)/(DNorm + one(R)) - mν∇fk = -ν .* ∇fk - sqrt_ξ_νInv = one(R) + α = 1 / eps(T) + β = 1 / eps(T) - optimal = false - tired = maxIter > 0 && k ≥ maxIter || elapsed_time > maxTime + fk = obj(nlp, xk) + grad!(nlp, xk, ∇fk) + ∇fk⁻ .= ∇fk - while !(optimal || tired) - k = k + 1 - elapsed_time = time() - start_time - Fobj_hist[k] = fk - Hobj_hist[k] = hk + dk .= D.d + DNorm = norm(D.d, Inf) - # model for prox-gradient step to update Δk if ||s|| is too small and ξ1 - φ1(d) = ∇fk' * d - mk1(d) = φ1(d) + ψ(d) + ν = (α * Δk)/(DNorm + one(T)) + sqrt_ξ_νInv = one(T) - if reduce_TR - prox!(s, ψ, mν∇fk, ν) - Complex_hist[k] += 1 - ξ1 = hk - mk1(s) + max(1, abs(hk)) * 10 * eps() - sqrt_ξ_νInv = ξ1 ≥ 0 ? sqrt(ξ1 / ν) : sqrt(-ξ1 / ν) + @. mν∇fk = -ν * ∇fk - if ξ1 ≥ 0 && k == 1 - ϵ += ϵr * sqrt_ξ_νInv # make stopping test absolute and relative - end + set_iter!(stats, 0) + start_time = time() + set_time!(stats, 0.0) + set_objective!(stats, fk + hk) + set_solver_specific!(stats, :smooth_obj, fk) + set_solver_specific!(stats, :nonsmooth_obj, hk) + + # models + φ1 = let ∇fk = ∇fk + d -> dot(∇fk, d) + end + mk1 = let ψ = ψ + d -> φ1(d) + ψ(d)::T + end - if (ξ1 < 0 && sqrt_ξ_νInv ≤ neg_tol) || (ξ1 ≥ 0 && sqrt_ξ_νInv < ϵ) - # the current xk is approximately first-order stationary - optimal = true - continue + φ = let ∇fk = ∇fk, dk = dk + d -> begin + result = zero(T) + n = length(d) + @inbounds for i = 1:n + result += d[i]^2 * dk[i] / 2 + ∇fk[i] * d[i] end - - ξ1 > 0 || error("TR: first prox-gradient step should produce a decrease but ξ1 = $(ξ1)") - end - - Δ_effective = reduce_TR ? min(β * χ(s), Δk) : Δk - # update radius - if has_bnds - update_bounds!(l_bound_k, u_bound_k, is_subsolver, l_bound, u_bound, xk, Δ_effective) - set_bounds!(ψ, l_bound_k, u_bound_k) - else - set_radius!(ψ, Δ_effective) + result end + end + mk = let ψ = ψ + d -> φ(d) + ψ(d)::T + end - # model with diagonal hessian - φ(d) = ∇fk' * d + (d' * (D.d .* d)) / 2 - mk(d) = φ(d) + ψ(d) + if reduce_TR + prox!(s, ψ, mν∇fk, ν) + mks = mk1(s) - iprox!(s, ψ, ∇fk, D) + ξ1 = hk - mks + max(1, abs(hk)) * 10 * eps() + sqrt_ξ_νInv = ξ1 ≥ 0 ? sqrt(ξ1 / ν) : sqrt(-ξ1 / ν) + solved = (ξ1 < 0 && sqrt_ξ_νInv ≤ neg_tol) || (ξ1 ≥ 0 && sqrt_ξ_νInv ≤ atol) + (ξ1 < 0 && sqrt_ξ_νInv > neg_tol) && + error("TR: prox-gradient step should produce a decrease but ξ = $(ξ)") + atol += rtol * sqrt_ξ_νInv # make stopping test absolute and relative + end - sNorm = χ(s) - xkn .= xk .+ s - fkn = f(xkn) - hkn = h(xkn[selected]) - hkn == -Inf && error("nonsmooth term is not proper") + Δ_effective = reduce_TR ? min(β * χ(s), Δk) : Δk - Δobj = fk + hk - (fkn + hkn) + max(1, abs(fk + hk)) * 10 * eps() - ξ = hk - mk(s) + max(1, abs(hk)) * 10 * eps() + # update radius + if has_bnds + update_bounds!(l_bound_m_x, u_bound_m_x, is_subsolver, l_bound, u_bound, xk, Δ_effective) + set_bounds!(ψ, l_bound_m_x, u_bound_m_x) + else + set_radius!(ψ, Δ_effective) + end - if !reduce_TR + iprox!(s, ψ, ∇fk, dk) + ξ = hk - mk(s) + max(1, abs(hk)) * 10 * eps() + sNorm = χ(s) - sqrt_ξ_νInv = ξ ≥ 0 ? sqrt(ξ / ν) : sqrt(-ξ / ν) - if ξ ≥ 0 && k == 1 - ϵ += ϵr * sqrt_ξ_νInv # make stopping test absolute and relative - end + if !reduce_TR + sqrt_ξ_νInv = ξ ≥ 0 ? sqrt(ξ / ν) : sqrt(-ξ / ν) + solved = (ξ < 0 && sqrt_ξ_νInv ≤ neg_tol) || (ξ ≥ 0 && sqrt_ξ_νInv < atol) + (ξ < 0 && sqrt_ξ_νInv > neg_tol) && + error("TRDH: prox-gradient step should produce a decrease but ξ = $(ξ)") + atol += rtol * sqrt_ξ_νInv # make stopping test absolute and relative #TODO : this is redundant code with the other case of the test. + end - if (ξ < 0 && sqrt_ξ_νInv ≤ neg_tol) || (ξ ≥ 0 && sqrt_ξ_νInv < ϵ) - # the current xk is approximately first-order stationary - optimal = true - continue - end - end + set_status!( + stats, + get_status( + reg_nlp, + elapsed_time = stats.elapsed_time, + iter = stats.iter, + optimal = solved, + improper = improper, + max_eval = max_eval, + max_time = max_time, + max_iter = max_iter, + ), + ) - if (ξ ≤ 0 || isnan(ξ)) - error("TRDH: failed to compute a step: ξ = $ξ") - end + callback(nlp, solver, stats) - ρk = Δobj / ξ + done = stats.status != :unknown - TR_stat = (η2 ≤ ρk < Inf) ? "↗" : (ρk < η1 ? "↘" : "=") + while !done + xkn .= xk .+ s + fkn = obj(nlp, xkn) + hkn = @views h(xkn[selected]) - if (verbose > 0) && (k % ptf == 0) - #! format: off - if reduce_TR - @info @sprintf "%6d %8.1e %8.1e %7.1e %7.1e %8.1e %7.1e %7.1e %7.1e %7.1e %1s" k fk hk sqrt_ξ_νInv sqrt(ξ) ρk Δk χ(xk) sNorm norm(D.d) TR_stat - else - @info @sprintf "%6d %8.1e %8.1e %7.1e %8.1e %7.1e %7.1e %7.1e %7.1e %1s" k fk hk sqrt_ξ_νInv ρk Δk χ(xk) sNorm norm(D.d) TR_stat - end - #! format: on - end + Δobj = fk + hk - (fkn + hkn) + max(1, abs(fk + hk)) * 10 * eps() + ρk = Δobj / ξ - if η2 ≤ ρk < Inf - Δk = max(Δk, γ * sNorm) - !has_bnds && set_radius!(ψ, Δk) - end + verbose > 0 && + stats.iter % verbose == 0 && + @info log_row( + Any[ + stats.iter, + fk, + hk, + sqrt_ξ_νInv, + ρk, + Δk, + χ(xk), + sNorm, + norm(D.d), + (η2 ≤ ρk < Inf) ? "↗" : (ρk < η1 ? "↘" : "="), + ], + colsep = 1, + ) if η1 ≤ ρk < Inf xk .= xkn - has_bnds && update_bounds!(l_bound_k, u_bound_k, is_subsolver, l_bound, u_bound, xk, Δk) - has_bnds && set_bounds!(ψ, l_bound_k, u_bound_k) - #update functions + if has_bnds + update_bounds!(l_bound_m_x, u_bound_m_x, is_subsolver, l_bound, u_bound, xk, Δk) + has_bnds && set_bounds!(ψ, l_bound_m_x, u_bound_m_x) + end fk = fkn hk = hkn shift!(ψ, xk) - ∇f!(∇fk, xk) - push!(D, s, ∇fk - ∇fk⁻) # update QN operator + grad!(nlp, xk, ∇fk) + @. ∇fk⁻ = ∇fk - ∇fk⁻ + push!(D, s, ∇fk⁻) # update QN operator + dk .= D.d DNorm = norm(D.d, Inf) ∇fk⁻ .= ∇fk end + if η2 ≤ ρk < Inf + Δk = max(Δk, γ * sNorm) + !has_bnds && set_radius!(ψ, Δk) + end + if ρk < η1 || ρk == Inf Δk = Δk / 2 - has_bnds && update_bounds!(l_bound_k, u_bound_k, is_subsolver, l_bound, u_bound, xk, Δk) - has_bnds ? set_bounds!(ψ, l_bound_k, u_bound_k) : set_radius!(ψ, Δk) + if has_bnds + update_bounds!(l_bound_m_x, u_bound_m_x, is_subsolver, l_bound, u_bound, xk, Δ_effective) + set_bounds!(ψ, l_bound_m_x, u_bound_m_x) + else + set_radius!(ψ, Δk) + end end - ν = reduce_TR ? (α * Δk)/(DNorm + one(R)) : α / (DNorm + one(R)) + set_objective!(stats, fk + hk) + set_solver_specific!(stats, :smooth_obj, fk) + set_solver_specific!(stats, :nonsmooth_obj, hk) + set_iter!(stats, stats.iter + 1) + set_time!(stats, time() - start_time) + + ν = reduce_TR ? (α * Δk)/(DNorm + one(T)) : α / (DNorm + one(T)) mν∇fk .= -ν .* ∇fk - tired = k ≥ maxIter || elapsed_time > maxTime - end + if reduce_TR + prox!(s, ψ, mν∇fk, ν) + ξ1 = hk - mk1(s) + max(1, abs(hk)) * 10 * eps() + sqrt_ξ_νInv = ξ1 ≥ 0 ? sqrt(ξ1 / ν) : sqrt(-ξ1 / ν) + solved = (ξ1 < 0 && sqrt_ξ_νInv ≤ neg_tol) || (ξ1 ≥ 0 && sqrt_ξ_νInv < atol) + (ξ1 < 0 && sqrt_ξ_νInv > neg_tol) && + error("TRDH: prox-gradient step should produce a decrease but ξ = $(ξ)") + end - if verbose > 0 - if k == 1 - @info @sprintf "%6d %8s %8.1e %8.1e" k "" fk hk - elseif optimal - #! format: off - if reduce_TR - @info @sprintf "%6d %8.1e %8.1e %7.1e %7.1e %8s %7.1e %7.1e %7.1e %7.1e" k fk hk sqrt_ξ_νInv sqrt(ξ1) "" Δk χ(xk) χ(s) norm(D.d) - #! format: on - @info "TRDH: terminating with √(ξ1/ν) = $(sqrt_ξ_νInv)" - else - #! format: off - @info @sprintf "%6d %8.1e %8.1e %7.1e %8s %7.1e %7.1e %7.1e %7.1e" k fk hk sqrt_ξ_νInv "" Δk χ(xk) χ(s) norm(D.d) - #! format: on - @info "TRDH: terminating with √(ξ/ν) = $(sqrt_ξ_νInv)" - end + iprox!(s, ψ, ∇fk, dk) + + sNorm = χ(s) + + if !reduce_TR + ξ = hk - mk(s) + max(1, abs(hk)) * 10 * eps() + sqrt_ξ_νInv = ξ ≥ 0 ? sqrt(ξ / ν) : sqrt(-ξ / ν) + solved = (ξ < 0 && sqrt_ξ_νInv ≤ neg_tol) || (ξ ≥ 0 && sqrt_ξ_νInv < atol) + (ξ < 0 && sqrt_ξ_νInv > neg_tol) && + error("TRDH: prox-gradient step should produce a decrease but ξ = $(ξ)") end - end - !reduce_TR && (ξ1 = ξ) # for output dict + set_status!( + stats, + get_status( + reg_nlp, + elapsed_time = stats.elapsed_time, + iter = stats.iter, + optimal = solved, + improper = improper, + max_eval = max_eval, + max_time = max_time, + max_iter = max_iter, + ), + ) + + callback(nlp, solver, stats) + + done = stats.status != :unknown + end - status = if optimal - :first_order - elseif elapsed_time > maxTime - :max_time - elseif tired - :max_iter - else - :exception + if verbose > 0 && stats.status == :first_order + @info log_row( + Any[stats.iter, fk, hk, sqrt_ξ_νInv, ρk, Δk, χ(xk), sNorm, norm(D.d), ""], + colsep = 1, + ) + @info "TRDH: terminating with √(ξ/ν) = $(sqrt_ξ_νInv)" end - outdict = Dict( - :Fhist => Fobj_hist[1:k], - :Hhist => Hobj_hist[1:k], - :Chist => Complex_hist[1:k], - :NonSmooth => h, - :status => status, - :fk => fk, - :hk => hk, - :sqrt_ξ_νInv => sqrt_ξ_νInv, - :elapsed_time => elapsed_time, - :radius => Δk, - ) - return xk, k, outdict + set_solution!(stats, xk) + + return stats end diff --git a/src/TR_alg.jl b/src/TR_alg.jl index cf16c827..6818a731 100644 --- a/src/TR_alg.jl +++ b/src/TR_alg.jl @@ -1,13 +1,99 @@ -export TR +export TR, TRSolver, solve! + +import SolverCore.solve! + +mutable struct TRSolver{ + T <: Real, + G <: ShiftedProximableFunction, + V <: AbstractVector{T}, + N, + ST <: AbstractOptimizationSolver, + PB <: AbstractRegularizedNLPModel, +} <: AbstractOptimizationSolver + xk::V + ∇fk::V + ∇fk⁻::V + mν∇fk::V + ψ::G + χ::N + xkn::V + s::V + has_bnds::Bool + l_bound::V + u_bound::V + l_bound_m_x::V + u_bound_m_x::V + subsolver::ST + subpb::PB + substats::GenericExecutionStats{T, V, V, T} +end + +function TRSolver( + reg_nlp::AbstractRegularizedNLPModel{T, V}; + χ::X = NormLinf(one(T)), + subsolver = R2Solver, +) where {T, V, X} + x0 = reg_nlp.model.meta.x0 + l_bound = reg_nlp.model.meta.lvar + u_bound = reg_nlp.model.meta.uvar + + xk = similar(x0) + ∇fk = similar(x0) + ∇fk⁻ = similar(x0) + mν∇fk = similar(x0) + xkn = similar(x0) + s = similar(x0) + has_bnds = any(l_bound .!= T(-Inf)) || any(u_bound .!= T(Inf)) + if has_bnds || subsolver == TRDHSolver + l_bound_m_x = similar(xk) + u_bound_m_x = similar(xk) + @. l_bound_m_x = l_bound - x0 + @. u_bound_m_x = u_bound - x0 + else + l_bound_m_x = similar(xk, 0) + u_bound_m_x = similar(xk, 0) + end + + ψ = + has_bnds || subsolver == TRDHSolver ? + shifted(reg_nlp.h, xk, l_bound_m_x, u_bound_m_x, reg_nlp.selected) : + shifted(reg_nlp.h, xk, T(1), χ) + + Bk = isa(reg_nlp.model, QuasiNewtonModel) ? hess_op(reg_nlp.model, xk) : hess_op!(reg_nlp.model, xk, similar(xk)) + sub_nlp = R2NModel(Bk, ∇fk, zero(T), x0) #FIXME + subpb = RegularizedNLPModel(sub_nlp, ψ) + substats = RegularizedExecutionStats(subpb) + subsolver = subsolver(subpb) + + return TRSolver( + xk, + ∇fk, + ∇fk⁻, + mν∇fk, + ψ, + χ, + xkn, + s, + has_bnds, + l_bound, + u_bound, + l_bound_m_x, + u_bound_m_x, + subsolver, + subpb, + substats, + ) +end """ + TR(reg_nlp; kwargs…) TR(nlp, h, χ, options; kwargs...) A trust-region method for the problem min f(x) + h(x) -where f: ℝⁿ → ℝ has a Lipschitz-continuous Jacobian, and h: ℝⁿ → ℝ is +where f: ℝⁿ → ℝ has a Lipschitz-continuous gradient, and h: ℝⁿ → ℝ is lower semi-continuous and proper. About each iterate xₖ, a step sₖ is computed as an approximate solution of @@ -16,33 +102,41 @@ About each iterate xₖ, a step sₖ is computed as an approximate solution of where φ(s ; xₖ) = f(xₖ) + ∇f(xₖ)ᵀs + ½ sᵀ Bₖ s is a quadratic approximation of f about xₖ, ψ(s; xₖ) = h(xₖ + s), ‖⋅‖ is a user-defined norm and Δₖ > 0 is the trust-region radius. -The subproblem is solved inexactly by way of a first-order method such as the proximal-gradient -method or the quadratic regularization method. -### Arguments +For advanced usage, first define a solver "TRSolver" to preallocate the memory used in the algorithm, and then call `solve!`: -* `nlp::AbstractNLPModel`: a smooth optimization problem -* `h`: a regularizer such as those defined in ProximalOperators -* `χ`: a norm used to define the trust region in the form of a regularizer -* `options::ROSolverOptions`: a structure containing algorithmic parameters + solver = TR(reg_nlp; χ = NormLinf(1), subsolver = R2Solver) + solve!(solver, reg_nlp) -The objective, gradient and Hessian of `nlp` will be accessed. -The Hessian is accessed as an abstract operator and need not be the exact Hessian. + stats = RegularizedExecutionStats(reg_nlp) + solve!(solver, reg_nlp, stats) -### Keyword arguments +# Arguments +* `reg_nlp::AbstractRegularizedNLPModel{T, V}`: the problem to solve, see `RegularizedProblems.jl`, `NLPModels.jl`. -* `x0::AbstractVector`: an initial guess (default: `nlp.meta.x0`) -* `subsolver_logger::AbstractLogger`: a logger to pass to the subproblem solver (default: the null logger) -* `subsolver`: the procedure used to compute a step (`PG`, `R2` or `TRDH`) -* `subsolver_options::ROSolverOptions`: default options to pass to the subsolver (default: all defaut options) -* `selected::AbstractVector{<:Integer}`: (default `1:f.meta.nvar`). +# Keyword arguments +- `x::V = nlp.meta.x0`: the initial guess; +- `atol::T = √eps(T)`: absolute tolerance; +- `rtol::T = √eps(T)`: relative tolerance; +- `neg_tol::T = eps(T)^(1 / 4)`: negative tolerance (see stopping conditions below); +- `max_eval::Int = -1`: maximum number of evaluation of the objective function (negative number means unlimited); +- `max_time::Float64 = 30.0`: maximum time limit in seconds; +- `max_iter::Int = 10000`: maximum number of iterations; +- `verbose::Int = 0`: if > 0, display iteration details every `verbose` iteration; +- `Δk::T = T(1)`: initial value of the trust-region radius; +- `η1::T = √√eps(T)`: successful iteration threshold; +- `η2::T = T(0.9)`: very successful iteration threshold; +- `γ::T = T(3)`: trust-region radius parameter multiplier. Must satisfy `γ > 1`. The trust-region radius is updated as Δ := Δ*γ when the iteration is very successful and Δ := Δ/γ when the iteration is unsuccessful; +- `χ::F = NormLinf(1)`: norm used to define the trust-region;` +- `subsolver::S = R2Solver`: subsolver used to solve the subproblem that appears at each iteration. -### Return values +The algorithm stops either when `√(ξₖ/νₖ) < atol + rtol*√(ξ₀/ν₀) ` or `ξₖ < 0` and `√(-ξₖ/νₖ) < neg_tol` where ξₖ := f(xₖ) + h(xₖ) - φ(sₖ; xₖ) - ψ(sₖ; xₖ), and √(ξₖ/νₖ) is a stationarity measure. -* `xk`: the final iterate -* `Fobj_hist`: an array with the history of values of the smooth objective -* `Hobj_hist`: an array with the history of values of the nonsmooth objective -* `Complex_hist`: an array with the history of number of inner iterations. +# Output +The value returned is a `GenericExecutionStats`, see `SolverCore.jl`. + +# Callback +$(callback_docstring) """ function TR( f::AbstractNLPModel, @@ -50,160 +144,232 @@ function TR( χ::X, options::ROSolverOptions{R}; x0::AbstractVector{R} = f.meta.x0, - subsolver_logger::Logging.AbstractLogger = Logging.NullLogger(), - subsolver = R2, subsolver_options = ROSolverOptions(ϵa = options.ϵa), selected::AbstractVector{<:Integer} = 1:(f.meta.nvar), + kwargs..., ) where {H, X, R} - start_time = time() - elapsed_time = 0.0 - # initialize passed options - ϵ = options.ϵa - ϵ_subsolver = subsolver_options.ϵa - ϵr = options.ϵr - Δk = options.Δk - verbose = options.verbose - maxIter = options.maxIter - maxTime = options.maxTime - η1 = options.η1 - η2 = options.η2 - γ = options.γ - α = options.α - θ = options.θ - β = options.β - - # store initial values of the subsolver_options fields that will be modified - ν_subsolver = subsolver_options.ν - ϵa_subsolver = subsolver_options.ϵa - Δk_subsolver = subsolver_options.Δk - - local l_bound, u_bound - if has_bounds(f) || subsolver == TRDH - l_bound = f.meta.lvar - u_bound = f.meta.uvar - end + reg_nlp = RegularizedNLPModel(f, h, selected) + stats = TR( + reg_nlp; + x = x0, + atol = options.ϵa, + sub_atol = subsolver_options.ϵa, + rtol = options.ϵr, + neg_tol = options.neg_tol, + verbose = options.verbose, + max_iter = options.maxIter, + max_time = options.maxTime, + Δk = options.Δk, + η1 = options.η1, + η2 = options.η2, + γ = options.γ, + kwargs..., + ) + return stats +end - if verbose == 0 - ptf = Inf - elseif verbose == 1 - ptf = round(maxIter / 10) - elseif verbose == 2 - ptf = round(maxIter / 100) +function TR(reg_nlp::AbstractRegularizedNLPModel{T, V}; kwargs...) where {T, V} + kwargs_dict = Dict(kwargs...) + subsolver = pop!(kwargs_dict, :subsolver, R2Solver) + χ = pop!(kwargs_dict, :χ, NormLinf(one(T))) + solver = TRSolver(reg_nlp, subsolver = subsolver, χ = χ) + stats = RegularizedExecutionStats(reg_nlp) + solve!(solver, reg_nlp, stats; kwargs_dict...) + return stats +end + +function SolverCore.solve!( + solver::TRSolver{T, G, V}, + reg_nlp::AbstractRegularizedNLPModel{T, V}, + stats::GenericExecutionStats{T, V}; + callback = (args...) -> nothing, + x::V = reg_nlp.model.meta.x0, + atol::T = √eps(T), + sub_atol::T = √eps(T), + rtol::T = √eps(T), + neg_tol::T = eps(T)^(1 / 4), + verbose::Int = 0, + subsolver_logger::Logging.AbstractLogger = Logging.SimpleLogger(), + max_iter::Int = 10000, + max_time::Float64 = 30.0, + max_eval::Int = -1, + Δk::T = T(1), + η1::T = √√eps(T), + η2::T = T(0.9), + γ::T = T(3), +) where {T, G, V} + reset!(stats) + + # Retrieve workspace + selected = reg_nlp.selected + h = reg_nlp.h + nlp = reg_nlp.model + + xk = solver.xk .= x + + # Make sure ψ has the correct shift + shift!(solver.ψ, xk) + + ∇fk = solver.∇fk + ∇fk⁻ = solver.∇fk⁻ + mν∇fk = solver.mν∇fk + ψ = solver.ψ + xkn = solver.xkn + s = solver.s + χ = solver.χ + has_bnds = solver.has_bnds + + if has_bnds || isa(solver.subsolver, TRDHSolver) #TODO elsewhere ? + l_bound_m_x, u_bound_m_x = solver.l_bound_m_x, solver.u_bound_m_x + l_bound, u_bound = solver.l_bound, solver.u_bound + update_bounds!(l_bound_m_x, u_bound_m_x, false, l_bound, u_bound, xk, Δk) + set_bounds!(ψ, l_bound_m_x, u_bound_m_x) + set_bounds!(solver.subsolver.ψ, l_bound_m_x, u_bound_m_x) else - ptf = 1 + set_radius!(ψ, Δk) + set_radius!(solver.subsolver.ψ, Δk) end # initialize parameters - xk = copy(x0) - hk = h(xk[selected]) + improper = false + hk = @views h(xk[selected]) if hk == Inf verbose > 0 && @info "TR: finding initial guess where nonsmooth term is finite" - prox!(xk, h, x0, one(eltype(x0))) - hk = h(xk[selected]) + prox!(xk, h, xk, T(1)) + hk = @views h(xk[selected]) hk < Inf || error("prox computation must be erroneous") verbose > 0 && @debug "TR: found point where h has value" hk end - hk == -Inf && error("nonsmooth term is not proper") - - xkn = similar(xk) - s = zero(xk) - ψ = - (has_bounds(f) || subsolver == TRDH) ? - shifted(h, xk, max.(-Δk, l_bound - xk), min.(Δk, u_bound - xk), selected) : - shifted(h, xk, Δk, χ) + improper = (hk == -Inf) + improper == true && @warn "TR: Improper term detected" - Fobj_hist = zeros(maxIter) - Hobj_hist = zeros(maxIter) - Complex_hist = zeros(Int, maxIter) if verbose > 0 - #! format: off - @info @sprintf "%6s %8s %8s %8s %7s %7s %8s %7s %7s %7s %7s %1s" "outer" "inner" "f(x)" "h(x)" "√(ξ1/ν)" "√ξ" "ρ" "Δ" "‖x‖" "‖s‖" "‖Bₖ‖" "TR" - #! format: on + @info log_header( + [:outer, :inner, :fx, :hx, :xi, :ρ, :Δ, :normx, :norms, :normB, :arrow], + [Int, Int, T, T, T, T, T, T, T, T, Char], + hdr_override = Dict{Symbol, String}( # TODO: Add this as constant dict elsewhere + :fx => "f(x)", + :hx => "h(x)", + :xi => "√(ξ1/ν)", + :normx => "‖x‖", + :norms => "‖s‖", + :normB => "‖B‖", + :arrow => "TR", + ), + colsep = 1, + ) end - local ξ1 - k = 0 + local ξ1::T + local ρk = zero(T) + + α = 1 / eps(T) + β = 1 / eps(T) - fk = obj(f, xk) - ∇fk = grad(f, xk) - ∇fk⁻ = copy(∇fk) + fk = obj(nlp, xk) + grad!(nlp, xk, ∇fk) + ∇fk⁻ .= ∇fk - quasiNewtTest = isa(f, QuasiNewtonModel) - Bk = hess_op(f, xk) + quasiNewtTest = isa(nlp, QuasiNewtonModel) + λmax = T(1) - λmax, found_λ = opnorm(Bk) + λmax, found_λ = opnorm(solver.subpb.model.B) found_λ || error("operator norm computation failed") - α⁻¹Δ⁻¹ = 1 / (α * Δk) - ν = 1 / (α⁻¹Δ⁻¹ + λmax * (α⁻¹Δ⁻¹ + 1)) - sqrt_ξ1_νInv = one(R) - - optimal = false - tired = k ≥ maxIter || elapsed_time > maxTime - - while !(optimal || tired) - k = k + 1 - elapsed_time = time() - start_time - Fobj_hist[k] = fk - Hobj_hist[k] = hk - - # model for first prox-gradient step and ξ1 - φ1(d) = ∇fk' * d - mk1(d) = φ1(d) + ψ(d) - - # model for subsequent prox-gradient steps and ξ - φ(d) = (d' * (Bk * d)) / 2 + ∇fk' * d - - ∇φ!(g, d) = begin - mul!(g, Bk, d) - g .+= ∇fk - g - end - mk(d) = φ(d) + ψ(d) + ν₁ = α * Δk / (1 + λmax * (α * Δk + 1)) + sqrt_ξ1_νInv = one(T) - # Take first proximal gradient step s1 and see if current xk is nearly stationary. - # s1 minimizes φ1(s) + ‖s‖² / 2 / ν + ψ(s) ⟺ s1 ∈ prox{νψ}(-ν∇φ1(0)). - prox!(s, ψ, -ν * ∇fk, ν) - ξ1 = hk - mk1(s) + max(1, abs(hk)) * 10 * eps() - ξ1 > 0 || error("TR: first prox-gradient step should produce a decrease but ξ1 = $(ξ1)") - sqrt_ξ1_νInv = sqrt(ξ1 / ν) + @. mν∇fk = -ν₁ * ∇fk - if ξ1 ≥ 0 && k == 1 - ϵ_increment = ϵr * sqrt_ξ1_νInv - ϵ += ϵ_increment # make stopping test absolute and relative - ϵ_subsolver += ϵ_increment - end + set_iter!(stats, 0) + start_time = time() + set_time!(stats, 0.0) + set_objective!(stats, fk + hk) + set_solver_specific!(stats, :smooth_obj, fk) + set_solver_specific!(stats, :nonsmooth_obj, hk) - if sqrt_ξ1_νInv < ϵ - # the current xk is approximately first-order stationary - optimal = true - continue - end + # models + φ1 = let ∇fk = ∇fk + d -> dot(∇fk, d) + end + mk1 = let ψ = ψ, φ1 = φ1 + d -> φ1(d) + ψ(d) + end + + mk = let ψ = ψ, solver = solver + d -> obj(solver.subpb.model, d) + ψ(d)::T + end - subsolver_options.ϵa = k == 1 ? 1.0e-5 : max(ϵ_subsolver, min(1e-2, sqrt_ξ1_νInv)) + prox!(s, ψ, mν∇fk, ν₁) + ξ1 = hk - mk1(s) + max(1, abs(hk)) * 10 * eps() + ξ1 > 0 || error("TR: first prox-gradient step should produce a decrease but ξ1 = $(ξ1)") + sqrt_ξ1_νInv = sqrt(ξ1 / ν₁) + + solved = (ξ1 < 0 && sqrt_ξ1_νInv ≤ neg_tol) || (ξ1 ≥ 0 && sqrt_ξ1_νInv ≤ atol) + (ξ1 < 0 && sqrt_ξ1_νInv > neg_tol) && + error("TR: prox-gradient step should produce a decrease but ξ1 = $(ξ1)") + atol += rtol * sqrt_ξ1_νInv # make stopping test absolute and relative + sub_atol += rtol * sqrt_ξ1_νInv + + set_status!( + stats, + get_status( + reg_nlp, + elapsed_time = stats.elapsed_time, + iter = stats.iter, + optimal = solved, + improper = improper, + max_eval = max_eval, + max_time = max_time, + max_iter = max_iter, + ), + ) + + callback(nlp, solver, stats) + + done = stats.status != :unknown + + while !done + sub_atol = stats.iter == 0 ? 1e-5 : max(sub_atol, min(1e-2, sqrt_ξ1_νInv)) ∆_effective = min(β * χ(s), Δk) - (has_bounds(f) || subsolver == TRDH) ? - set_bounds!(ψ, max.(-∆_effective, l_bound - xk), min.(∆_effective, u_bound - xk)) : - set_radius!(ψ, ∆_effective) - subsolver_options.Δk = ∆_effective / 10 - subsolver_options.ν = ν - subsolver_args = subsolver == TRDH ? (SpectralGradient(1 / ν, f.meta.nvar),) : () - s, iter, outdict = with_logger(subsolver_logger) do - subsolver(φ, ∇φ!, ψ, subsolver_args..., subsolver_options, s) + + if has_bnds || isa(solver.subsolver, TRDHSolver) #TODO elsewhere ? + update_bounds!(l_bound_m_x, u_bound_m_x, false, l_bound, u_bound, xk, Δk) + set_bounds!(ψ, l_bound_m_x, u_bound_m_x) + set_bounds!(solver.subsolver.ψ, l_bound_m_x, u_bound_m_x) + else + set_radius!(solver.subsolver.ψ, ∆_effective) + set_radius!(ψ, ∆_effective) + end + with_logger(subsolver_logger) do + if isa(solver.subsolver, TRDHSolver) #FIXME + solver.subsolver.D.d[1] = 1/ν₁ + solve!( + solver.subsolver, + solver.subpb, + solver.substats; + x = s, + atol = stats.iter == 0 ? 1e-5 : max(sub_atol, min(1e-2, sqrt_ξ1_νInv)), + Δk = ∆_effective / 10, + ) + else + solve!( + solver.subsolver, + solver.subpb, + solver.substats; + x = s, + atol = stats.iter == 0 ? 1e-5 : max(sub_atol, min(1e-2, sqrt_ξ1_νInv)), + ν = ν₁, + ) + end end - # restore initial values of subsolver_options here so that it is not modified - # if there is an error - subsolver_options.ν = ν_subsolver - subsolver_options.ϵa = ϵa_subsolver - subsolver_options.Δk = Δk_subsolver - Complex_hist[k] = sum(outdict[:Chist]) + s .= solver.substats.solution - sNorm = χ(s) xkn .= xk .+ s - fkn = obj(f, xkn) - hkn = h(xkn[selected]) - hkn == -Inf && error("nonsmooth term is not proper") + fkn = obj(nlp, xkn) + hkn = @views h(xkn[selected]) + sNorm = χ(s) Δobj = fk + hk - (fkn + hkn) + max(1, abs(fk + hk)) * 10 * eps() ξ = hk - mk(s) + max(1, abs(hk)) * 10 * eps() @@ -214,81 +380,109 @@ function TR( ρk = Δobj / ξ - TR_stat = (η2 ≤ ρk < Inf) ? "↗" : (ρk < η1 ? "↘" : "=") - - if (verbose > 0) && (k % ptf == 0) - #! format: off - @info @sprintf "%6d %8d %8.1e %8.1e %7.1e %7.1e %8.1e %7.1e %7.1e %7.1e %7.1e %1s" k iter fk hk sqrt_ξ1_νInv sqrt(ξ) ρk ∆_effective χ(xk) sNorm λmax TR_stat - #! format: on - end + verbose > 0 && + stats.iter % verbose == 0 && + @info log_row( + Any[ + stats.iter, + solver.substats.iter, + fk, + hk, + sqrt_ξ1_νInv, + ρk, + Δk, + χ(xk), + sNorm, + λmax, + (η2 ≤ ρk < Inf) ? '↗' : (ρk < η1 ? '↘' : '='), + ], + colsep = 1, + ) if η2 ≤ ρk < Inf Δk = max(Δk, γ * sNorm) - !(has_bounds(f) || subsolver == TRDH) && set_radius!(ψ, Δk) + if !(has_bnds || isa(solver.subsolver, TRDHSolver)) + set_radius!(ψ, Δk) + set_radius!(solver.subsolver.ψ, Δk) + end end if η1 ≤ ρk < Inf xk .= xkn - (has_bounds(f) || subsolver == TRDH) && - set_bounds!(ψ, max.(-Δk, l_bound - xk), min.(Δk, u_bound - xk)) - - #update functions + if has_bnds || isa(solver.subsolver, TRDHSolver) + update_bounds!(l_bound_m_x, u_bound_m_x, false, l_bound, u_bound, xk, Δk) + set_bounds!(ψ, l_bound_m_x, u_bound_m_x) + set_bounds!(solver.subsolver.ψ, l_bound_m_x, u_bound_m_x) + end fk = fkn hk = hkn + shift!(ψ, xk) - ∇fk = grad(f, xk) - # grad!(f, xk, ∇fk) + grad!(nlp, xk, ∇fk) + if quasiNewtTest - push!(f, s, ∇fk - ∇fk⁻) + @. ∇fk⁻ = ∇fk - ∇fk⁻ + push!(nlp, s, ∇fk⁻) # update QN operator end - Bk = hess_op(f, xk) - λmax, found_λ = opnorm(Bk) + + λmax, found_λ = opnorm(solver.subpb.model.B) found_λ || error("operator norm computation failed") + ∇fk⁻ .= ∇fk end if ρk < η1 || ρk == Inf Δk = Δk / 2 - (has_bounds(f) || subsolver == TRDH) ? - set_bounds!(ψ, max.(-Δk, l_bound - xk), min.(Δk, u_bound - xk)) : set_radius!(ψ, Δk) + if has_bnds || isa(solver.subsolver, TRDHSolver) + update_bounds!(l_bound_m_x, u_bound_m_x, false, l_bound, u_bound, xk, Δk) + set_bounds!(ψ, l_bound_m_x, u_bound_m_x) + set_bounds!(solver.subsolver.ψ, l_bound_m_x, u_bound_m_x) + else + set_radius!(ψ, Δk) + set_radius!(solver.subsolver.ψ, Δk) + end end - α⁻¹Δ⁻¹ = 1 / (α * Δk) - ν = 1 / (α⁻¹Δ⁻¹ + λmax * (α⁻¹Δ⁻¹ + 1)) - tired = k ≥ maxIter || elapsed_time > maxTime - end - if verbose > 0 - if k == 1 - @info @sprintf "%6d %8s %8.1e %8.1e" k "" fk hk - elseif optimal - #! format: off - @info @sprintf "%6d %8d %8.1e %8.1e %7.1e %7.1e %8s %7.1e %7.1e %7.1e %7.1e" k 1 fk hk sqrt_ξ1_νInv sqrt(ξ1) "" Δk χ(xk) χ(s) λmax - #! format: on - @info "TR: terminating with √(ξ1/ν) = $(sqrt_ξ1_νInv)" - end - end + set_objective!(stats, fk + hk) + set_solver_specific!(stats, :smooth_obj, fk) + set_solver_specific!(stats, :nonsmooth_obj, hk) + set_iter!(stats, stats.iter + 1) + set_time!(stats, time() - start_time) - status = if optimal - :first_order - elseif elapsed_time > maxTime - :max_time - elseif tired - :max_iter - else - :exception - end + ν₁ = α * Δk / (1 + λmax * (α * Δk + 1)) + @. mν∇fk = -ν₁ * ∇fk - stats = GenericExecutionStats(f) - set_status!(stats, status) - set_solution!(stats, xk) - set_objective!(stats, fk + hk) - set_residuals!(stats, zero(eltype(xk)), sqrt_ξ1_νInv) - set_iter!(stats, k) - set_time!(stats, elapsed_time) - set_solver_specific!(stats, :radius, Δk) - set_solver_specific!(stats, :Fhist, Fobj_hist[1:k]) - set_solver_specific!(stats, :Hhist, Hobj_hist[1:k]) - set_solver_specific!(stats, :NonSmooth, h) - set_solver_specific!(stats, :SubsolverCounter, Complex_hist[1:k]) - return stats + prox!(s, ψ, mν∇fk, ν₁) + ξ1 = hk - mk1(s) + max(1, abs(hk)) * 10 * eps() + sqrt_ξ1_νInv = sqrt(ξ1 / ν₁) + + solved = (ξ1 < 0 && sqrt_ξ1_νInv ≤ neg_tol) || (ξ1 ≥ 0 && sqrt_ξ1_νInv ≤ atol) + (ξ1 < 0 && sqrt_ξ1_νInv > neg_tol) && + error("TR: prox-gradient step should produce a decrease but ξ1 = $(ξ1)") + + set_status!( + stats, + get_status( + reg_nlp, + elapsed_time = stats.elapsed_time, + iter = stats.iter, + optimal = solved, + improper = improper, + max_eval = max_eval, + max_time = max_time, + max_iter = max_iter, + ), + ) + + callback(nlp, solver, stats) + + done = stats.status != :unknown + end + if verbose > 0 && stats.status == :first_order + @info log_row( + Any[stats.iter, solver.substats.iter, fk, hk, sqrt_ξ1_νInv, ρk, Δk, χ(xk), χ(s), λmax, ""], + colsep = 1, + ) + @info "TR: terminating with √(ξ1/ν) = $(sqrt_ξ1_νInv)" + end end diff --git a/test/runtests.jl b/test/runtests.jl index 1e8b28d4..1eddcad1 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -25,14 +25,7 @@ for (mod, mod_name) ∈ ((x -> x, "exact"), (LSR1Model, "lsr1"), (LBFGSModel, "l out = solver(mod(bpdn), h, args..., options, x0 = x0) @test typeof(out.solution) == typeof(bpdn.meta.x0) @test length(out.solution) == bpdn.meta.nvar - @test typeof(out.solver_specific[:Fhist]) == typeof(out.solution) - @test typeof(out.solver_specific[:Hhist]) == typeof(out.solution) - @test typeof(out.solver_specific[:SubsolverCounter]) == Array{Int, 1} @test typeof(out.dual_feas) == eltype(out.solution) - @test length(out.solver_specific[:Fhist]) == length(out.solver_specific[:Hhist]) - @test length(out.solver_specific[:Fhist]) == length(out.solver_specific[:SubsolverCounter]) - @test obj(bpdn, out.solution) == out.solver_specific[:Fhist][end] - @test h(out.solution) == out.solver_specific[:Hhist][end] @test out.status == :first_order end end @@ -66,15 +59,7 @@ for (mod, mod_name) ∈ ((LSR1Model, "lsr1"), (LBFGSModel, "lbfgs")) TR_out = TR(mod(bpdn), h, NormL2(1.0), options, x0 = x0) @test typeof(TR_out.solution) == typeof(bpdn.meta.x0) @test length(TR_out.solution) == bpdn.meta.nvar - @test typeof(TR_out.solver_specific[:Fhist]) == typeof(TR_out.solution) - @test typeof(TR_out.solver_specific[:Hhist]) == typeof(TR_out.solution) - @test typeof(TR_out.solver_specific[:SubsolverCounter]) == Array{Int, 1} @test typeof(TR_out.dual_feas) == eltype(TR_out.solution) - @test length(TR_out.solver_specific[:Fhist]) == length(TR_out.solver_specific[:Hhist]) - @test length(TR_out.solver_specific[:Fhist]) == - length(TR_out.solver_specific[:SubsolverCounter]) - @test obj(bpdn, TR_out.solution) == TR_out.solver_specific[:Fhist][end] - @test h(TR_out.solution) == TR_out.solver_specific[:Hhist][end] @test TR_out.status == :first_order end end diff --git a/test/test_allocs.jl b/test/test_allocs.jl index accf0ccc..90d67499 100644 --- a/test/test_allocs.jl +++ b/test/test_allocs.jl @@ -42,18 +42,21 @@ end # Test non allocating solve! @testset "allocs" begin for (h, h_name) ∈ ((NormL0(λ), "l0"),) - for (solver, solver_name) ∈ ((:R2Solver, "R2"), (:R2DHSolver, "R2DH"), (:R2NSolver, "R2N")) + for (solver, solver_name) ∈ ((:R2Solver, "R2"), (:R2DHSolver, "R2DH"), (:R2NSolver, "R2N"), (:TRDHSolver, "TRDH"), (:TRSolver, "TR")) @testset "$(solver_name)" begin - solver_name == "R2N" && continue #FIXME + (solver_name == "R2N" || solver_name == "TR") && continue #FIXME reg_nlp = RegularizedNLPModel(LBFGSModel(bpdn), h) solver = eval(solver)(reg_nlp) stats = RegularizedExecutionStats(reg_nlp) - solver_name == "R2" && - @test @wrappedallocs(solve!(solver, reg_nlp, stats, ν = 1.0, atol = 1e-6, rtol = 1e-6)) == - 0 + solver_name == "R2" && @test @wrappedallocs( + solve!(solver, reg_nlp, stats, ν = 1.0, atol = 1e-6, rtol = 1e-6) + ) == 0 solver_name == "R2DH" && @test @wrappedallocs( solve!(solver, reg_nlp, stats, σk = 1.0, atol = 1e-6, rtol = 1e-6) ) == 0 + solver_name == "TRDH" && @test @wrappedallocs( + solve!(solver, reg_nlp, stats, atol = 1e-6, rtol = 1e-6) + ) == 0 @test stats.status == :first_order end end diff --git a/test/test_bounds.jl b/test/test_bounds.jl index 14407d60..1b601b9a 100644 --- a/test/test_bounds.jl +++ b/test/test_bounds.jl @@ -1,5 +1,5 @@ const subsolver_options = deepcopy(options) -TR_TRDH(args...; kwargs...) = TR(args...; subsolver = TRDH, kwargs...) +TR_TRDH(args...; kwargs...) = TR(args...; subsolver = TRDHSolver, kwargs...) for (mod, mod_name) ∈ ((x -> x, "exact"), (LSR1Model, "lsr1"), (LBFGSModel, "lbfgs")) for (h, h_name) ∈ ((NormL0(λ), "l0"), (NormL1(λ), "l1")) @@ -15,14 +15,7 @@ for (mod, mod_name) ∈ ((x -> x, "exact"), (LSR1Model, "lsr1"), (LBFGSModel, "l out = solver(mod(bpdn2), h, args..., options; x0 = x0) @test typeof(out.solution) == typeof(bpdn2.meta.x0) @test length(out.solution) == bpdn2.meta.nvar - @test typeof(out.solver_specific[:Fhist]) == typeof(out.solution) - @test typeof(out.solver_specific[:Hhist]) == typeof(out.solution) - @test typeof(out.solver_specific[:SubsolverCounter]) == Array{Int, 1} @test typeof(out.dual_feas) == eltype(out.solution) - @test length(out.solver_specific[:Fhist]) == length(out.solver_specific[:Hhist]) - @test length(out.solver_specific[:Fhist]) == length(out.solver_specific[:SubsolverCounter]) - @test obj(bpdn2, out.solution) == out.solver_specific[:Fhist][end] - @test h(out.solution) == out.solver_specific[:Hhist][end] @test out.status == :first_order end end