Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions src/R2_alg.jl
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ For advanced usage, first define a solver "R2Solver" to preallocate the memory u
solver = R2Solver(reg_nlp)
solve!(solver, reg_nlp)

stats = GenericExecutionStats(reg_nlp.model)
stats = GenericExecutionStats(reg_nlp)
solver = R2Solver(reg_nlp)
solve!(solver, reg_nlp, stats)

Expand Down Expand Up @@ -292,7 +292,7 @@ function R2(reg_nlp::AbstractRegularizedNLPModel; kwargs...)
kwargs_dict = Dict(kwargs...)
max_iter = pop!(kwargs_dict, :max_iter, 10000)
solver = R2Solver(reg_nlp, max_iter = max_iter)
stats = GenericExecutionStats(reg_nlp.model)
stats = GenericExecutionStats(reg_nlp.model) # TODO: change this to `stats = GenericExecutionStats(reg_nlp)` when FHist etc. is ruled out.
cb = pop!(
kwargs_dict,
:callback,
Expand Down
21 changes: 21 additions & 0 deletions src/utils.jl
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
export GenericExecutionStats

import SolverCore.GenericExecutionStats

# use Arpack to obtain largest eigenvalue in magnitude with a minimum of robustness
function LinearAlgebra.opnorm(B; kwargs...)
_, s, _ = tsvd(B)
Expand All @@ -20,3 +24,20 @@ ShiftedProximalOperators.iprox!(

LinearAlgebra.diag(op::AbstractDiagonalQuasiNewtonOperator) = copy(op.d)
LinearAlgebra.diag(op::SpectralGradient{T}) where {T} = zeros(T, op.nrow) .* op.d[1]

"""
GenericExecutionStats(reg_nlp :: AbstractRegularizedNLPModel{T, V})

Construct a GenericExecutionStats object from an AbstractRegularizedNLPModel.
More specifically, construct a GenericExecutionStats on the NLPModel of reg_nlp and add three solver_specific entries namely :smooth_obj, :nonsmooth_obj and :xi.
This is useful for reducing the number of allocations when calling solve!(..., reg_nlp, stats) and should be used by default.
Warning: This should *not* be used when adding other solver_specific entries that do not have the current scalar type.
For instance, when one adds the history of the objective value as a solver_specific entry (which has Vector{T} type), this will cause an error and `GenericExecutionStats(reg_nlp.model)` should be used instead.
"""
function GenericExecutionStats(reg_nlp :: AbstractRegularizedNLPModel{T, V}) where{T, V}
stats = GenericExecutionStats(reg_nlp.model, solver_specific = Dict{Symbol, T}())
set_solver_specific!(stats, :smooth_obj, T(Inf))
set_solver_specific!(stats, :nonsmooth_obj, T(Inf))
set_solver_specific!(stats, :xi, T(Inf))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Wouldn’t $\xi$ be in dual_feas?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't see how $$\xi$$ is related with dual feasibility of the unconstrained problem but perhaps I am missing something ?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

By dual feasibility, we mean the gradient of the Lagrangian (for smooth problems). Here, the corresponding concept is $$\sqrt{\xi / \nu}$$.

return stats
end
2 changes: 1 addition & 1 deletion test/test_allocs.jl
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ end
for solver ∈ (:R2Solver,)
reg_nlp = RegularizedNLPModel(bpdn, h)
solver = eval(solver)(reg_nlp)
stats = GenericExecutionStats(bpdn, solver_specific = Dict{Symbol, Float64}())
stats = GenericExecutionStats(reg_nlp)
@test @wrappedallocs(solve!(solver, reg_nlp, stats)) == 0
end
end
Expand Down
Loading