Skip to content

Commit a0d866d

Browse files
abelsiqueiradpo
authored andcommitted
Modify QuadraticModels to NLPModels 0.12
1 parent 47aca9a commit a0d866d

File tree

4 files changed

+219
-138
lines changed

4 files changed

+219
-138
lines changed

src/qpmodel.jl

Lines changed: 137 additions & 98 deletions
Original file line numberDiff line numberDiff line change
@@ -1,143 +1,182 @@
11
export jac_structure!, hess_structure!, jac_coord!, hess_coord!
22

33
mutable struct QPData
4-
c0 :: Float64 # constant term in objective
5-
c :: AbstractVector{Float64} # linear term
6-
H :: AbstractMatrix{Float64} # quadratic term
7-
opH :: AbstractLinearOperator # assumed with preallocation!
8-
A :: AbstractMatrix{Float64} # constraint matrix
4+
c0 :: Float64 # constant term in objective
5+
c :: Vector # linear term
6+
Hrows :: Vector{Int} # quadratic term
7+
Hcols :: Vector{Int}
8+
Hvals :: Vector
9+
Arows :: Vector{Int} # constraints matrix
10+
Acols :: Vector{Int}
11+
Avals :: Vector
912
end
1013

1114
abstract type AbstractQuadraticModel <: AbstractNLPModel end
1215

1316
mutable struct QuadraticModel <: AbstractQuadraticModel
14-
meta :: NLPModelMeta
15-
counters :: Counters
16-
data :: QPData
17-
18-
function QuadraticModel(c :: AbstractVector{Float64}, H :: AbstractMatrix{Float64},
19-
opH :: AbstractLinearOperator,
20-
A :: AbstractMatrix{Float64},
21-
lcon :: AbstractVector{Float64}, ucon :: AbstractVector{Float64},
22-
lvar :: AbstractVector{Float64}, uvar :: AbstractVector{Float64};
23-
c0 :: Float64=0.0, kwargs...)
24-
ncon, nvar = size(A)
25-
nnzh = issparse(H) ? nnz(H) : (nvar * (nvar + 1) / 2)
26-
nnzj = issparse(A) ? nnz(A) : (nvar * ncon)
27-
new(NLPModelMeta(nvar,
28-
lvar=lvar, uvar=uvar,
29-
ncon=size(A,1), lcon=lcon, ucon=ucon,
30-
nnzj=nnzj,
31-
nnzh=nnzh,
32-
lin=1:ncon, nln=Int[], islp=(ncon == 0); kwargs...),
33-
Counters(),
34-
QPData(c0, c, H, opH, A))
17+
meta :: NLPModelMeta
18+
counters :: Counters
19+
data :: QPData
20+
end
21+
22+
function QuadraticModel(c :: AbstractVector,
23+
Hrows :: AbstractVector{<: Integer}, Hcols :: AbstractVector{<: Integer}, Hvals :: AbstractVector;
24+
Arows :: AbstractVector{<: Integer} = Int[], Acols :: AbstractVector{<: Integer} = Int[], Avals :: AbstractVector = Float64[],
25+
lcon :: AbstractVector = Float64[], ucon :: AbstractVector = Float64[],
26+
lvar :: AbstractVector = fill(-Inf, length(c)), uvar :: AbstractVector = fill(Int, length(c)),
27+
c0 :: Float64=0.0, kwargs...)
28+
nnzh = length(Hvals)
29+
if !(nnzh == length(Hrows) == length(Hcols))
30+
error("The length of Hrows, Hcols and Hvals must be the same")
31+
end
32+
nnzj = length(Avals)
33+
if !(nnzj == length(Arows) == length(Acols))
34+
error("The length of Arows, Acols and Avals must be the same")
35+
end
36+
ncon = length(lcon)
37+
if ncon != length(ucon)
38+
error("The length of lcon and ucon must be the same")
3539
end
40+
nvar = length(c)
41+
if !(nvar == length(lvar) == length(uvar))
42+
error("The length of c, lvar and uvar must be the same")
43+
end
44+
QuadraticModel(NLPModelMeta(length(c), lvar=lvar, uvar=uvar,
45+
ncon=ncon, lcon=lcon, ucon=ucon,
46+
nnzj=nnzj, nnzh=nnzh,
47+
lin=1:ncon, nln=Int[], islp=(ncon == 0); kwargs...),
48+
Counters(),
49+
QPData(c0, c, Hrows, Hcols, Hvals, Arows, Acols, Avals))
3650
end
3751

38-
function QuadraticModel(model :: AbstractNLPModel, x :: AbstractVector = model.meta.x0, args...; kwargs...)
39-
nvar = model.meta.nvar
40-
ncon = model.meta.ncon
41-
g = grad(model, x)
42-
H = hess(model, x, args...; kwargs...)
43-
c = cons(model, x)
44-
A = jac(model, x)
45-
QuadraticModel(g, H, opHermitian(H), A,
46-
model.meta.lcon .- c, model.meta.ucon .- c,
47-
model.meta.lvar .- x, model.meta.uvar .- x)
52+
function QuadraticModel(c :: AbstractVector, H :: AbstractMatrix;
53+
A :: AbstractMatrix=zeros(0,length(c)), lcon :: AbstractVector=zeros(0), ucon :: AbstractVector=zeros(0),
54+
lvar :: AbstractVector = fill(-Inf, length(c)), uvar :: AbstractVector = fill(Inf, length(c)),
55+
c0 :: Float64=0.0, kwargs...)
56+
ncon, nvar = size(A)
57+
nnzh, Hrows, Hcols, Hvals = if issparse(H)
58+
nnz(tril(H)), findnz(tril(H))...
59+
else
60+
I = ((i,j,H[i,j]) for i = 1:nvar, j = 1:nvar if i j)
61+
div(nvar * (nvar + 1), 2), getindex.(I, 1), getindex.(I, 2), getindex.(I, 3)
62+
end
63+
nnzj, Arows, Acols, Avals = if issparse(A)
64+
nnz(A), findnz(A)...
65+
else
66+
I = ((i,j,A[i,j]) for i = 1:ncon, j = 1:nvar)
67+
nvar * ncon, getindex.(I, 1)[:], getindex.(I, 2)[:], getindex.(I, 3)[:]
68+
end
69+
QuadraticModel(NLPModelMeta(nvar, lvar=lvar, uvar=uvar,
70+
ncon=size(A,1), lcon=lcon, ucon=ucon,
71+
nnzj=nnzj, nnzh=nnzh,
72+
lin=1:ncon, nln=Int[], islp=(ncon == 0); kwargs...),
73+
Counters(),
74+
QPData(c0, c, Hrows, Hcols, Hvals, Arows, Acols, Avals))
4875
end
4976

50-
linobj(qp::AbstractQuadraticModel, args...) = qp.data.c
77+
"""
78+
QuadraticModel(nlp, x)
5179
52-
function NLPModels.objgrad(qp :: AbstractQuadraticModel, x :: AbstractVector)
53-
g = Vector{eltype(x)}(undef, length(x))
54-
objgrad!(qp, x, g)
80+
Creates a quadratic Taylor model of `nlp` around `x`.
81+
"""
82+
function QuadraticModel(model :: AbstractNLPModel, x :: AbstractVector; kwargs...)
83+
nvar = model.meta.nvar
84+
ncon = model.meta.ncon
85+
c0 = obj(model, x)
86+
g = grad(model, x)
87+
Hrows, Hcols = hess_structure(model)
88+
Hvals = hess_coord(model, x)
89+
if model.meta.ncon > 0
90+
c = cons(model, x)
91+
Arows, Acols = jac_structure(model)
92+
Avals = jac_coord(model, x)
93+
QuadraticModel(g, Hrows, Hcols, Hvals, c0=c0,
94+
Arows=Arows, Acols=Acols, Avals=Avals, lcon=model.meta.lcon .- c, ucon=model.meta.ucon .- c,
95+
lvar=model.meta.lvar .- x, uvar=model.meta.uvar .- x, x0=zeros(model.meta.nvar))
96+
else
97+
QuadraticModel(g, Hrows, Hcols, Hvals, c0=c0,
98+
lvar=model.meta.lvar .- x, uvar=model.meta.uvar .- x, x0=zeros(model.meta.nvar))
99+
end
55100
end
56101

102+
linobj(qp::AbstractQuadraticModel, args...) = qp.data.c
103+
57104
function NLPModels.objgrad!(qp :: AbstractQuadraticModel, x :: AbstractVector, g :: AbstractVector)
58-
v = qp.data.opH * x
59-
@. g = qp.data.c + v
60-
f = qp.data.c0 + dot(qp.data.c, x) + dot(v, x) / 2
61-
qp.counters.neval_hprod += 1
62-
(f, g)
105+
NLPModels.increment!(qp, :neval_obj)
106+
NLPModels.increment!(qp, :neval_grad)
107+
coo_sym_prod!(qp.data.Hrows, qp.data.Hcols, qp.data.Hvals, x, g)
108+
f = qp.data.c0 + dot(qp.data.c, x) + dot(g, x) / 2
109+
@. g .+= qp.data.c
110+
return f, g
63111
end
64112

65113
function NLPModels.obj(qp :: AbstractQuadraticModel, x :: AbstractVector)
66-
v = qp.data.opH * x
67-
f = qp.data.c0 + dot(qp.data.c, x) + dot(v, x) / 2
68-
qp.counters.neval_hprod += 1
69-
f
114+
NLPModels.increment!(qp, :neval_obj)
115+
Hx = zeros(qp.meta.nvar)
116+
coo_sym_prod!(qp.data.Hrows, qp.data.Hcols, qp.data.Hvals, x, Hx)
117+
return qp.data.c0 + dot(qp.data.c, x) + dot(Hx, x) / 2
70118
end
71119

72120
function NLPModels.grad!(qp :: AbstractQuadraticModel, x :: AbstractVector, g :: AbstractVector)
73-
v = qp.data.opH * x
74-
@. g = qp.data.c + v
75-
qp.counters.neval_hprod += 1
76-
g
121+
NLPModels.increment!(qp, :neval_grad)
122+
coo_sym_prod!(qp.data.Hrows, qp.data.Hcols, qp.data.Hvals, x, g)
123+
g .+= qp.data.c
124+
return g
77125
end
78126

79-
hess(qp :: AbstractQuadraticModel, ::AbstractVector; kwargs...) = qp.data.H
80-
81-
hess_op(qp :: AbstractQuadraticModel, ::AbstractVector; kwargs...) = qp.data.opH
127+
# TODO: Better hess_op
82128

83-
"""
84-
Return the structure of the Lagrangian Hessian in sparse coordinate format in place.
85-
"""
86-
function NLPModels.hess_structure!(qp :: QuadraticModel, rows :: Vector{<: Integer}, cols :: Vector{<: Integer})
87-
rows .= qp.data.H.rowval
88-
cols .= findnz(qp.data.H)[2]
129+
function NLPModels.hess_structure!(qp :: QuadraticModel, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer})
130+
rows .= qp.data.Hrows
131+
cols .= qp.data.Hcols
132+
return rows, cols
89133
end
90134

91-
"""
92-
Evaluate the Lagrangian Hessian at `x` in sparse coordinate format. Only the lower triangle is returned.
93-
"""
94-
function NLPModels.hess_coord!(qp :: QuadraticModel, :: AbstractVector, rows :: AbstractVector{<: Integer},
95-
cols :: AbstractVector{<: Integer}, vals :: Vector{<: AbstractFloat}; kwargs...)
96-
vals .= findnz(qp.data.H)[3]
135+
function NLPModels.hess_coord!(qp :: QuadraticModel, x :: AbstractVector, vals :: AbstractVector{<: AbstractFloat}; obj_weight :: Real=one(eltype(x)))
136+
NLPModels.increment!(qp, :neval_hess)
137+
vals .= obj_weight * qp.data.Hvals
138+
return vals
97139
end
98140

99-
jac(qp :: AbstractQuadraticModel, ::AbstractVector) = qp.data.A
100-
101-
jac_op(qp :: AbstractQuadraticModel, ::AbstractVector) = LinearOperator(qp.data.A)
141+
NLPModels.hess_coord!(qp :: QuadraticModel, x :: AbstractVector, y :: AbstractVector, vals :: AbstractVector{<: AbstractFloat}; obj_weight :: Real=one(eltype(x))) = hess_coord!(qp, x, vals, obj_weight=obj_weight)
102142

103-
jac_coord(qp :: AbstractQuadraticModel, ::AbstractVector) = findnz(qp.data.A)
104-
105-
"""
106-
Return the structure of the constraints Jacobian in sparse coordinate format in place.
107-
"""
108-
function NLPModels.jac_structure!(qp :: QuadraticModel, rows :: Vector{<: Integer}, cols :: Vector{<: Integer})
109-
rows .= qp.data.A.rowval
110-
cols .= findnz(qp.data.A)[2]
143+
function NLPModels.jac_structure!(qp :: QuadraticModel, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer})
144+
rows .= qp.data.Arows
145+
cols .= qp.data.Acols
146+
return rows, cols
111147
end
112148

113-
"""
114-
Return the structure of the constraints Jacobian in sparse coordinate format in place.
115-
"""
116-
function NLPModels.jac_coord!(qp :: QuadraticModel, x :: AbstractVector, rows :: Vector{<: Integer},
117-
cols :: Vector{<: Integer}, vals :: Vector{<: AbstractFloat})
118-
vals .= findnz(qp.data.A)[3]
149+
function NLPModels.jac_coord!(qp :: QuadraticModel, x :: AbstractVector, vals :: AbstractVector{<: AbstractFloat})
150+
NLPModels.increment!(qp, :neval_jac)
151+
vals .= qp.data.Avals
152+
return vals
119153
end
120154

121155
function NLPModels.cons!(qp :: AbstractQuadraticModel, x :: AbstractVector, c :: AbstractVector)
122-
mul!(c, qp.data.A, x)
123-
qp.counters.neval_jprod += 1
124-
c
156+
NLPModels.increment!(qp, :neval_cons)
157+
coo_prod!(qp.data.Arows, qp.data.Acols, qp.data.Avals, x, c)
158+
return c
125159
end
126160

127-
function NLPModels.hprod!(qp :: AbstractQuadraticModel, x :: AbstractVector, v :: AbstractVector, Av :: AbstractVector)
128-
qp.counters.neval_hprod += 1
129-
Av .= qp.data.opH * v
130-
return Av
161+
function NLPModels.hprod!(qp :: AbstractQuadraticModel, x :: AbstractVector, v :: AbstractVector, Hv :: AbstractVector; obj_weight :: Real = one(eltype(x)))
162+
NLPModels.increment!(qp, :neval_hprod)
163+
coo_sym_prod!(qp.data.Hrows, qp.data.Hcols, qp.data.Hvals, v, Hv)
164+
if obj_weight != 1
165+
Hv .*= obj_weight
166+
end
167+
return Hv
131168
end
132169

170+
NLPModels.hprod!(qp :: AbstractQuadraticModel, x :: AbstractVector, y :: AbstractVector, v :: AbstractVector, Hv :: AbstractVector; obj_weight :: Real = one(eltype(x))) = hprod!(qp, x, v, Hv, obj_weight=obj_weight)
171+
133172
function NLPModels.jprod!(qp :: AbstractQuadraticModel, x :: AbstractVector, v :: AbstractVector, Av :: AbstractVector)
134-
qp.counters.neval_jprod += 1
135-
mul!(Av, qp.data.A, v)
136-
return Av
173+
NLPModels.increment!(qp, :neval_jprod)
174+
coo_prod!(qp.data.Arows, qp.data.Acols, qp.data.Avals, v, Av)
175+
return Av
137176
end
138177

139178
function NLPModels.jtprod!(qp :: AbstractQuadraticModel, x :: AbstractVector, v :: AbstractVector, Atv :: AbstractVector)
140-
qp.counters.neval_jtprod += 1
141-
mul!(Atv, qp.data.A', v)
142-
return Atv
179+
NLPModels.increment!(qp, :neval_jtprod)
180+
coo_prod!(qp.data.Acols, qp.data.Arows, qp.data.Avals, v, Atv)
181+
return Atv
143182
end

test/runtests.jl

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,13 @@
22
using Printf, SparseArrays, Test
33

44
# our packages
5-
using LinearOperators, NLPModels, QuadraticModels
5+
using LinearAlgebra, LinearOperators, NLPModels, QuadraticModels
66

7+
nlpmodels_path = joinpath(dirname(pathof(NLPModels)), "..", "test")
8+
nlpmodels_problems_path = joinpath(nlpmodels_path, "problems")
9+
10+
# Definition of the quadratic problem in ADNLPModel
711
include("simpleqp.jl")
12+
13+
include(joinpath(nlpmodels_path, "consistency.jl"))
814
include("test_consistency.jl")

test/simpleqp.jl

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,26 @@
22
function simpleqp_autodiff()
33

44
x0 = [-1.; 1.]
5-
f(x) = 4*x[1]^2 + 5*x[2]^2 + x[1]*x[2] + 1.5*x[1] - 2*x[2]
5+
f(x) = 4*x[1]^2 + 5*x[2]^2 + x[1]*x[2] + 1.5*x[1] - 2*x[2] + 1.0
66
c(x) = [2*x[1] + x[2]; -x[1] + 2*x[2]]
77
lcon = [2.0; -Inf]
88
ucon = [Inf; 6.0]
99
uvar = [20; Inf]
1010
lvar = [0.0; 0.0]
1111

1212
return ADNLPModel(f, x0, c=c, lcon=lcon, ucon=ucon, lvar=lvar, uvar=uvar)
13+
end
14+
15+
function simpleqp_QP()
16+
c = [1.5; -2.0]
17+
H = [8.0 0.0; 1.0 10.0]
18+
A = [2.0 1.0; -1.0 2.0]
19+
c0 = 1.0
20+
lcon = [2.0; -Inf]
21+
ucon = [Inf; 6.0]
22+
uvar = [20; Inf]
23+
lvar = [0.0; 0.0]
24+
x0 = [-1.0; 1.0]
1325

26+
return QuadraticModel(c, H, A=A, lcon=lcon, ucon=ucon, lvar=lvar, uvar=uvar, c0=c0, x0=x0)
1427
end

0 commit comments

Comments
 (0)