Skip to content

Commit 7fb897f

Browse files
RenanODabelsiqueira
authored andcommitted
add missing methods and fix tests
1 parent 9e9a8f0 commit 7fb897f

File tree

5 files changed

+97
-76
lines changed

5 files changed

+97
-76
lines changed

Project.toml

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -8,14 +8,14 @@ FastClosures = "9aa1b823-49e4-5ca5-8b0f-3971ec8bab6a"
88
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
99
LinearOperators = "5c8ed15e-5a4c-59e4-a42b-c7e8811fb125"
1010
NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6"
11-
NLPModelsIpopt = "f4238b75-b362-5c4c-b852-0801c9a21d71"
11+
Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"
1212
Requires = "ae029012-a4dd-5104-9daa-d747884805df"
1313
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
1414
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
1515

1616
[compat]
17-
FastClosures = "0.2.1"
18-
LinearOperators = "0.5.0"
19-
NLPModels = "0.6.1, < 0.9.0"
20-
Requires = "0.3.0"
17+
FastClosures = "0.2.1, 0.3.0"
18+
LinearOperators = "0.7.0, 1"
19+
NLPModels = "0.10.0, 1"
20+
Requires = "0.3, 0.4, 0.5"
2121
julia = "^1.0.0"

src/qpmodel.jl

Lines changed: 34 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
# override some NLPModels functions
21
export jac_structure!, hess_structure!, jac_coord!, hess_coord!
32

43
mutable struct QPData
@@ -36,46 +35,41 @@ mutable struct QuadraticModel <: AbstractQuadraticModel
3635
end
3736
end
3837

39-
function QuadraticModel(model :: AbstractNLPModel)
38+
function QuadraticModel(model :: AbstractNLPModel, x :: AbstractVector = model.meta.x0, args...; kwargs...)
4039
nvar = model.meta.nvar
4140
ncon = model.meta.ncon
42-
z = zeros(nvar)
43-
c = grad(model, z)
44-
H = hess(model, model.meta.x0)
45-
A = jac(model, z)
46-
QuadraticModel(c, H, opHermitian(H), A,
47-
model.meta.lcon, model.meta.ucon,
48-
model.meta.lvar, model.meta.uvar)
41+
g = grad(model, x)
42+
H = hess(model, x, args...; kwargs...)
43+
c = cons(model, x)
44+
A = jac(model, x)
45+
QuadraticModel(g, H, opHermitian(H), A,
46+
model.meta.lcon .- c, model.meta.ucon .- c,
47+
model.meta.lvar .- x, model.meta.uvar .- x)
4948
end
5049

5150
linobj(qp::AbstractQuadraticModel, args...) = qp.data.c
5251

53-
function objgrad(qp :: AbstractQuadraticModel, x :: AbstractVector)
54-
g = Vector{eltype(x)}(length(x))
52+
function NLPModels.objgrad(qp :: AbstractQuadraticModel, x :: AbstractVector)
53+
g = Vector{eltype(x)}(undef, length(x))
5554
objgrad!(qp, x, g)
5655
end
5756

58-
function objgrad!(qp :: AbstractQuadraticModel, x :: AbstractVector, g :: AbstractVector)
57+
function NLPModels.objgrad!(qp :: AbstractQuadraticModel, x :: AbstractVector, g :: AbstractVector)
5958
v = qp.data.opH * x
6059
@. g = qp.data.c + v
61-
f = qp.data.c0 + dot(qp.data.c, x) + 0.5 * dot(v, x)
60+
f = qp.data.c0 + dot(qp.data.c, x) + dot(v, x) / 2
6261
qp.counters.neval_hprod += 1
6362
(f, g)
6463
end
6564

66-
function obj(qp :: AbstractQuadraticModel, x :: AbstractVector)
65+
function NLPModels.obj(qp :: AbstractQuadraticModel, x :: AbstractVector)
6766
v = qp.data.opH * x
68-
f = qp.data.c0 + dot(qp.data.c, x) + 0.5 * dot(v, x)
67+
f = qp.data.c0 + dot(qp.data.c, x) + dot(v, x) / 2
6968
qp.counters.neval_hprod += 1
7069
f
7170
end
7271

73-
function grad(qp :: AbstractQuadraticModel, x :: AbstractVector)
74-
g = Vector{eltype(x)}(undef, qp.meta.nvar)
75-
grad!(qp, x, g)
76-
end
77-
78-
function grad!(qp :: AbstractQuadraticModel, x :: AbstractVector, g :: AbstractVector)
72+
function NLPModels.grad!(qp :: AbstractQuadraticModel, x :: AbstractVector, g :: AbstractVector)
7973
v = qp.data.opH * x
8074
@. g = qp.data.c + v
8175
qp.counters.neval_hprod += 1
@@ -86,12 +80,10 @@ hess(qp :: AbstractQuadraticModel, ::AbstractVector; kwargs...) = qp.data.H
8680

8781
hess_op(qp :: AbstractQuadraticModel, ::AbstractVector; kwargs...) = qp.data.opH
8882

89-
hess_coord(qp :: AbstractQuadraticModel, ::AbstractVector; kwargs...) = findnz(qp.data.H)
90-
9183
"""
9284
Return the structure of the Lagrangian Hessian in sparse coordinate format in place.
9385
"""
94-
function NLPModels.hess_structure!(qp :: QuadraticModel, rows :: Vector{<: Integer}, cols :: Vector{<: Integer}; kwargs...)
86+
function NLPModels.hess_structure!(qp :: QuadraticModel, rows :: Vector{<: Integer}, cols :: Vector{<: Integer})
9587
rows .= qp.data.H.rowval
9688
cols .= findnz(qp.data.H)[2]
9789
end
@@ -104,16 +96,16 @@ function NLPModels.hess_coord!(qp :: QuadraticModel, :: AbstractVector, rows ::
10496
vals .= findnz(qp.data.H)[3]
10597
end
10698

107-
jac(qp :: AbstractQuadraticModel, ::AbstractVector; kwargs...) = qp.data.A
99+
jac(qp :: AbstractQuadraticModel, ::AbstractVector) = qp.data.A
108100

109-
jac_op(qp :: AbstractQuadraticModel, ::AbstractVector; kwargs...) = LinearOperator(qp.data.A)
101+
jac_op(qp :: AbstractQuadraticModel, ::AbstractVector) = LinearOperator(qp.data.A)
110102

111-
jac_coord(qp :: AbstractQuadraticModel, ::AbstractVector; kwargs...) = findnz(qp.data.A)
103+
jac_coord(qp :: AbstractQuadraticModel, ::AbstractVector) = findnz(qp.data.A)
112104

113105
"""
114106
Return the structure of the constraints Jacobian in sparse coordinate format in place.
115107
"""
116-
function NLPModels.jac_structure!(qp :: QuadraticModel, rows :: Vector{<: Integer}, cols :: Vector{<: Integer}; kwargs...)
108+
function NLPModels.jac_structure!(qp :: QuadraticModel, rows :: Vector{<: Integer}, cols :: Vector{<: Integer})
117109
rows .= qp.data.A.rowval
118110
cols .= findnz(qp.data.A)[2]
119111
end
@@ -122,38 +114,30 @@ end
122114
Return the structure of the constraints Jacobian in sparse coordinate format in place.
123115
"""
124116
function NLPModels.jac_coord!(qp :: QuadraticModel, x :: AbstractVector, rows :: Vector{<: Integer},
125-
cols :: Vector{<: Integer}, vals :: Vector{<: AbstractFloat}; kwargs...)
117+
cols :: Vector{<: Integer}, vals :: Vector{<: AbstractFloat})
126118
vals .= findnz(qp.data.A)[3]
127119
end
128120

129-
function cons(qp::AbstractQuadraticModel, x :: AbstractVector)
130-
c = Vector{eltype(x)}(undef, qp.meta.ncon)
131-
cons!(qp, x, c)
132-
end
133-
134-
function cons!(qp :: AbstractQuadraticModel, x :: AbstractVector, c :: AbstractVector)
121+
function NLPModels.cons!(qp :: AbstractQuadraticModel, x :: AbstractVector, c :: AbstractVector)
135122
mul!(c, qp.data.A, x)
136123
qp.counters.neval_jprod += 1
137124
c
138125
end
139126

140-
function hprod(qp :: AbstractQuadraticModel, ::AbstractVector; kwargs...)
141-
@closure v -> begin
142-
qp.counters.neval_hprod += 1
143-
qp.data.opH * v
144-
end
127+
function NLPModels.hprod!(qp :: AbstractQuadraticModel, x :: AbstractVector, v :: AbstractVector, Av :: AbstractVector)
128+
qp.counters.neval_hprod += 1
129+
Av .= qp.data.opH * v
130+
return Av
145131
end
146132

147-
function jprod(qp :: AbstractQuadraticModel, ::AbstractVector; kwargs...)
148-
@closure v -> begin
149-
qp.counters.neval_jprod += 1
150-
qp.data.A * v
151-
end
133+
function NLPModels.jprod!(qp :: AbstractQuadraticModel, x :: AbstractVector, v :: AbstractVector, Av :: AbstractVector)
134+
qp.counters.neval_jprod += 1
135+
mul!(Av, qp.data.A, v)
136+
return Av
152137
end
153138

154-
function jtprod(qp :: AbstractQuadraticModel, ::AbstractVector; kwargs...)
155-
@closure v -> begin
156-
qp.counters.neval_jtprod += 1
157-
qp.data.A' * v
158-
end
139+
function NLPModels.jtprod!(qp :: AbstractQuadraticModel, x :: AbstractVector, v :: AbstractVector, Atv :: AbstractVector)
140+
qp.counters.neval_jtprod += 1
141+
mul!(Atv, qp.data.A', v)
142+
return Atv
159143
end

test/runtests.jl

Lines changed: 4 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,8 @@
11
# stdlib
2-
using SparseArrays, Test
2+
using Printf, SparseArrays, Test
33

44
# our packages
5-
using LinearOperators, NLPModels, NLPModelsIpopt, QuadraticModels
5+
using LinearOperators, NLPModels, QuadraticModels
66

7-
@testset "QuadraticModelsTests" begin
8-
c0 = 0.0
9-
c = [1.5; -2.0]
10-
Q = sparse([1; 2; 2], [1; 1; 2], [8.0; 2.0; 10.0])
11-
A = sparse([1; 2; 1; 2], [1; 1; 2; 2], [2.0; -1.0; 1.0; 2.0])
12-
lcon = [2.0; -Inf]
13-
ucon = [Inf; 6.0]
14-
uvar = [20; Inf]
15-
lvar = [0.0; 0.0]
16-
objective = 4.3718750e+00
17-
18-
qp = QuadraticModel(c, Q, opHermitian(Q), A, lcon, ucon, lvar, uvar, c0 = c0)
19-
output = ipopt(qp, print_level = 0)
20-
21-
@test output.dual_feas < 1e-6
22-
@test output.primal_feas < 1e-6
23-
@test abs(output.objective - objective) < 1e-6
24-
25-
end # testset
7+
include("simpleqp.jl")
8+
include("test_consistency.jl")

test/simpleqp.jl

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
2+
function simpleqp_autodiff()
3+
4+
x0 = [-1.; 1.]
5+
f(x) = 4*x[1]^2 + 5*x[2]^2 + x[1]*x[2] + 1.5*x[1] - 2*x[2]
6+
c(x) = [2*x[1] + x[2]; -x[1] + 2*x[2]]
7+
lcon = [2.0; -Inf]
8+
ucon = [Inf; 6.0]
9+
uvar = [20; Inf]
10+
lvar = [0.0; 0.0]
11+
12+
return ADNLPModel(f, x0, c=c, lcon=lcon, ucon=ucon, lvar=lvar, uvar=uvar)
13+
14+
end

test/test_consistency.jl

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
function check_quadratic_model(model, quadraticmodel)
2+
@assert typeof(quadraticmodel) <: QuadraticModels.AbstractQuadraticModel
3+
rtol = 1e-8
4+
@assert quadraticmodel.meta.nvar == model.meta.nvar
5+
@assert quadraticmodel.meta.ncon == model.meta.ncon
6+
7+
x = [-(-1.0)^i for i = 1:quadraticmodel.meta.nvar]
8+
9+
@assert isapprox(obj(model, x), obj(quadraticmodel, x), rtol=rtol)
10+
11+
f, g = objgrad(model, x)
12+
f_quad, g_quad = objgrad(quadraticmodel, x)
13+
14+
@assert isapprox(f, f_quad, rtol=rtol)
15+
@assert isapprox(g, g_quad, rtol=rtol)
16+
@assert isapprox(cons(model, x), cons(quadraticmodel, x), rtol=rtol)
17+
@assert isapprox(jac(model, x), jac(quadraticmodel, x), rtol=rtol)
18+
19+
v = [-(-1.0)^i for i = 1:quadraticmodel.meta.nvar]
20+
u = [-(-1.0)^i for i = 1:quadraticmodel.meta.ncon]
21+
22+
@assert isapprox(jprod(model, x, v), jprod(quadraticmodel, x, v), rtol=rtol)
23+
@assert isapprox(jtprod(model, x, u), jtprod(quadraticmodel, x, u), rtol=rtol)
24+
25+
H = hess_op(quadraticmodel, x)
26+
@assert typeof(H) <: LinearOperators.AbstractLinearOperator
27+
@assert size(H) == (model.meta.nvar, model.meta.nvar)
28+
@assert isapprox(H * v, hprod(model, x, v), rtol=rtol)
29+
30+
reset!(quadraticmodel)
31+
end
32+
33+
for problem in ["simpleqp"]
34+
problem_f = eval(Symbol(problem * "_autodiff"))
35+
nlp = problem_f()
36+
@printf("Checking QuadraticModel formulation of %-8s\t", problem)
37+
quadratic_model = QuadraticModel(nlp, zeros(nlp.meta.nvar))
38+
check_quadratic_model(nlp, quadratic_model)
39+
@printf("\n")
40+
end

0 commit comments

Comments
 (0)