|
1 | 1 | export jac_structure!, hess_structure!, jac_coord!, hess_coord!
|
2 | 2 |
|
3 | 3 | mutable struct QPData
|
4 |
| - c0 :: Float64 # constant term in objective |
5 |
| - c :: AbstractVector{Float64} # linear term |
6 |
| - H :: AbstractMatrix{Float64} # quadratic term |
7 |
| - opH :: AbstractLinearOperator # assumed with preallocation! |
8 |
| - A :: AbstractMatrix{Float64} # constraint matrix |
| 4 | + c0 :: Float64 # constant term in objective |
| 5 | + c :: Vector # linear term |
| 6 | + Hrows :: Vector{Int} # quadratic term |
| 7 | + Hcols :: Vector{Int} |
| 8 | + Hvals :: Vector |
| 9 | + Arows :: Vector{Int} # constraints matrix |
| 10 | + Acols :: Vector{Int} |
| 11 | + Avals :: Vector |
9 | 12 | end
|
10 | 13 |
|
11 | 14 | abstract type AbstractQuadraticModel <: AbstractNLPModel end
|
12 | 15 |
|
13 | 16 | mutable struct QuadraticModel <: AbstractQuadraticModel
|
14 |
| - meta :: NLPModelMeta |
15 |
| - counters :: Counters |
16 |
| - data :: QPData |
17 |
| - |
18 |
| - function QuadraticModel(c :: AbstractVector{Float64}, H :: AbstractMatrix{Float64}, |
19 |
| - opH :: AbstractLinearOperator, |
20 |
| - A :: AbstractMatrix{Float64}, |
21 |
| - lcon :: AbstractVector{Float64}, ucon :: AbstractVector{Float64}, |
22 |
| - lvar :: AbstractVector{Float64}, uvar :: AbstractVector{Float64}; |
23 |
| - c0 :: Float64=0.0, kwargs...) |
24 |
| - ncon, nvar = size(A) |
25 |
| - nnzh = issparse(H) ? nnz(H) : (nvar * (nvar + 1) / 2) |
26 |
| - nnzj = issparse(A) ? nnz(A) : (nvar * ncon) |
27 |
| - new(NLPModelMeta(nvar, |
28 |
| - lvar=lvar, uvar=uvar, |
29 |
| - ncon=size(A,1), lcon=lcon, ucon=ucon, |
30 |
| - nnzj=nnzj, |
31 |
| - nnzh=nnzh, |
32 |
| - lin=1:ncon, nln=Int[], islp=(ncon == 0); kwargs...), |
33 |
| - Counters(), |
34 |
| - QPData(c0, c, H, opH, A)) |
| 17 | + meta :: NLPModelMeta |
| 18 | + counters :: Counters |
| 19 | + data :: QPData |
| 20 | +end |
| 21 | + |
| 22 | +function QuadraticModel(c :: AbstractVector, |
| 23 | + Hrows :: AbstractVector{<: Integer}, Hcols :: AbstractVector{<: Integer}, Hvals :: AbstractVector; |
| 24 | + Arows :: AbstractVector{<: Integer} = Int[], Acols :: AbstractVector{<: Integer} = Int[], Avals :: AbstractVector = Float64[], |
| 25 | + lcon :: AbstractVector = Float64[], ucon :: AbstractVector = Float64[], |
| 26 | + lvar :: AbstractVector = fill(-Inf, length(c)), uvar :: AbstractVector = fill(Int, length(c)), |
| 27 | + c0 :: Float64=0.0, kwargs...) |
| 28 | + nnzh = length(Hvals) |
| 29 | + if !(nnzh == length(Hrows) == length(Hcols)) |
| 30 | + error("The length of Hrows, Hcols and Hvals must be the same") |
| 31 | + end |
| 32 | + nnzj = length(Avals) |
| 33 | + if !(nnzj == length(Arows) == length(Acols)) |
| 34 | + error("The length of Arows, Acols and Avals must be the same") |
| 35 | + end |
| 36 | + ncon = length(lcon) |
| 37 | + if ncon != length(ucon) |
| 38 | + error("The length of lcon and ucon must be the same") |
35 | 39 | end
|
| 40 | + nvar = length(c) |
| 41 | + if !(nvar == length(lvar) == length(uvar)) |
| 42 | + error("The length of c, lvar and uvar must be the same") |
| 43 | + end |
| 44 | + QuadraticModel(NLPModelMeta(length(c), lvar=lvar, uvar=uvar, |
| 45 | + ncon=ncon, lcon=lcon, ucon=ucon, |
| 46 | + nnzj=nnzj, nnzh=nnzh, |
| 47 | + lin=1:ncon, nln=Int[], islp=(ncon == 0); kwargs...), |
| 48 | + Counters(), |
| 49 | + QPData(c0, c, Hrows, Hcols, Hvals, Arows, Acols, Avals)) |
36 | 50 | end
|
37 | 51 |
|
38 |
| -function QuadraticModel(model :: AbstractNLPModel, x :: AbstractVector = model.meta.x0, args...; kwargs...) |
39 |
| - nvar = model.meta.nvar |
40 |
| - ncon = model.meta.ncon |
41 |
| - g = grad(model, x) |
42 |
| - H = hess(model, x, args...; kwargs...) |
43 |
| - c = cons(model, x) |
44 |
| - A = jac(model, x) |
45 |
| - QuadraticModel(g, H, opHermitian(H), A, |
46 |
| - model.meta.lcon .- c, model.meta.ucon .- c, |
47 |
| - model.meta.lvar .- x, model.meta.uvar .- x) |
| 52 | +function QuadraticModel(c :: AbstractVector, H :: AbstractMatrix; |
| 53 | + A :: AbstractMatrix=zeros(0,length(c)), lcon :: AbstractVector=zeros(0), ucon :: AbstractVector=zeros(0), |
| 54 | + lvar :: AbstractVector = fill(-Inf, length(c)), uvar :: AbstractVector = fill(Inf, length(c)), |
| 55 | + c0 :: Float64=0.0, kwargs...) |
| 56 | + ncon, nvar = size(A) |
| 57 | + nnzh, Hrows, Hcols, Hvals = if issparse(H) |
| 58 | + nnz(tril(H)), findnz(tril(H))... |
| 59 | + else |
| 60 | + I = ((i,j,H[i,j]) for i = 1:nvar, j = 1:nvar if i ≥ j) |
| 61 | + div(nvar * (nvar + 1), 2), getindex.(I, 1), getindex.(I, 2), getindex.(I, 3) |
| 62 | + end |
| 63 | + nnzj, Arows, Acols, Avals = if issparse(A) |
| 64 | + nnz(A), findnz(A)... |
| 65 | + else |
| 66 | + I = ((i,j,A[i,j]) for i = 1:ncon, j = 1:nvar) |
| 67 | + nvar * ncon, getindex.(I, 1)[:], getindex.(I, 2)[:], getindex.(I, 3)[:] |
| 68 | + end |
| 69 | + QuadraticModel(NLPModelMeta(nvar, lvar=lvar, uvar=uvar, |
| 70 | + ncon=size(A,1), lcon=lcon, ucon=ucon, |
| 71 | + nnzj=nnzj, nnzh=nnzh, |
| 72 | + lin=1:ncon, nln=Int[], islp=(ncon == 0); kwargs...), |
| 73 | + Counters(), |
| 74 | + QPData(c0, c, Hrows, Hcols, Hvals, Arows, Acols, Avals)) |
48 | 75 | end
|
49 | 76 |
|
50 |
| -linobj(qp::AbstractQuadraticModel, args...) = qp.data.c |
| 77 | +""" |
| 78 | + QuadraticModel(nlp, x) |
51 | 79 |
|
52 |
| -function NLPModels.objgrad(qp :: AbstractQuadraticModel, x :: AbstractVector) |
53 |
| - g = Vector{eltype(x)}(undef, length(x)) |
54 |
| - objgrad!(qp, x, g) |
| 80 | +Creates a quadratic Taylor model of `nlp` around `x`. |
| 81 | +""" |
| 82 | +function QuadraticModel(model :: AbstractNLPModel, x :: AbstractVector; kwargs...) |
| 83 | + nvar = model.meta.nvar |
| 84 | + ncon = model.meta.ncon |
| 85 | + c0 = obj(model, x) |
| 86 | + g = grad(model, x) |
| 87 | + Hrows, Hcols = hess_structure(model) |
| 88 | + Hvals = hess_coord(model, x) |
| 89 | + if model.meta.ncon > 0 |
| 90 | + c = cons(model, x) |
| 91 | + Arows, Acols = jac_structure(model) |
| 92 | + Avals = jac_coord(model, x) |
| 93 | + QuadraticModel(g, Hrows, Hcols, Hvals, c0=c0, |
| 94 | + Arows=Arows, Acols=Acols, Avals=Avals, lcon=model.meta.lcon .- c, ucon=model.meta.ucon .- c, |
| 95 | + lvar=model.meta.lvar .- x, uvar=model.meta.uvar .- x, x0=zeros(model.meta.nvar)) |
| 96 | + else |
| 97 | + QuadraticModel(g, Hrows, Hcols, Hvals, c0=c0, |
| 98 | + lvar=model.meta.lvar .- x, uvar=model.meta.uvar .- x, x0=zeros(model.meta.nvar)) |
| 99 | + end |
55 | 100 | end
|
56 | 101 |
|
| 102 | +linobj(qp::AbstractQuadraticModel, args...) = qp.data.c |
| 103 | + |
57 | 104 | function NLPModels.objgrad!(qp :: AbstractQuadraticModel, x :: AbstractVector, g :: AbstractVector)
|
58 |
| - v = qp.data.opH * x |
59 |
| - @. g = qp.data.c + v |
60 |
| - f = qp.data.c0 + dot(qp.data.c, x) + dot(v, x) / 2 |
61 |
| - qp.counters.neval_hprod += 1 |
62 |
| - (f, g) |
| 105 | + NLPModels.increment!(qp, :neval_obj) |
| 106 | + NLPModels.increment!(qp, :neval_grad) |
| 107 | + coo_sym_prod!(qp.data.Hrows, qp.data.Hcols, qp.data.Hvals, x, g) |
| 108 | + f = qp.data.c0 + dot(qp.data.c, x) + dot(g, x) / 2 |
| 109 | + @. g .+= qp.data.c |
| 110 | + return f, g |
63 | 111 | end
|
64 | 112 |
|
65 | 113 | function NLPModels.obj(qp :: AbstractQuadraticModel, x :: AbstractVector)
|
66 |
| - v = qp.data.opH * x |
67 |
| - f = qp.data.c0 + dot(qp.data.c, x) + dot(v, x) / 2 |
68 |
| - qp.counters.neval_hprod += 1 |
69 |
| - f |
| 114 | + NLPModels.increment!(qp, :neval_obj) |
| 115 | + Hx = zeros(qp.meta.nvar) |
| 116 | + coo_sym_prod!(qp.data.Hrows, qp.data.Hcols, qp.data.Hvals, x, Hx) |
| 117 | + return qp.data.c0 + dot(qp.data.c, x) + dot(Hx, x) / 2 |
70 | 118 | end
|
71 | 119 |
|
72 | 120 | function NLPModels.grad!(qp :: AbstractQuadraticModel, x :: AbstractVector, g :: AbstractVector)
|
73 |
| - v = qp.data.opH * x |
74 |
| - @. g = qp.data.c + v |
75 |
| - qp.counters.neval_hprod += 1 |
76 |
| - g |
| 121 | + NLPModels.increment!(qp, :neval_grad) |
| 122 | + coo_sym_prod!(qp.data.Hrows, qp.data.Hcols, qp.data.Hvals, x, g) |
| 123 | + g .+= qp.data.c |
| 124 | + return g |
77 | 125 | end
|
78 | 126 |
|
79 |
| -hess(qp :: AbstractQuadraticModel, ::AbstractVector; kwargs...) = qp.data.H |
80 |
| - |
81 |
| -hess_op(qp :: AbstractQuadraticModel, ::AbstractVector; kwargs...) = qp.data.opH |
| 127 | +# TODO: Better hess_op |
82 | 128 |
|
83 |
| -""" |
84 |
| -Return the structure of the Lagrangian Hessian in sparse coordinate format in place. |
85 |
| -""" |
86 |
| -function NLPModels.hess_structure!(qp :: QuadraticModel, rows :: Vector{<: Integer}, cols :: Vector{<: Integer}) |
87 |
| - rows .= qp.data.H.rowval |
88 |
| - cols .= findnz(qp.data.H)[2] |
| 129 | +function NLPModels.hess_structure!(qp :: QuadraticModel, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) |
| 130 | + rows .= qp.data.Hrows |
| 131 | + cols .= qp.data.Hcols |
| 132 | + return rows, cols |
89 | 133 | end
|
90 | 134 |
|
91 |
| -""" |
92 |
| -Evaluate the Lagrangian Hessian at `x` in sparse coordinate format. Only the lower triangle is returned. |
93 |
| -""" |
94 |
| -function NLPModels.hess_coord!(qp :: QuadraticModel, :: AbstractVector, rows :: AbstractVector{<: Integer}, |
95 |
| - cols :: AbstractVector{<: Integer}, vals :: Vector{<: AbstractFloat}; kwargs...) |
96 |
| - vals .= findnz(qp.data.H)[3] |
| 135 | +function NLPModels.hess_coord!(qp :: QuadraticModel, x :: AbstractVector, vals :: AbstractVector{<: AbstractFloat}; obj_weight :: Real=one(eltype(x))) |
| 136 | + NLPModels.increment!(qp, :neval_hess) |
| 137 | + vals .= obj_weight * qp.data.Hvals |
| 138 | + return vals |
97 | 139 | end
|
98 | 140 |
|
99 |
| -jac(qp :: AbstractQuadraticModel, ::AbstractVector) = qp.data.A |
100 |
| - |
101 |
| -jac_op(qp :: AbstractQuadraticModel, ::AbstractVector) = LinearOperator(qp.data.A) |
| 141 | +NLPModels.hess_coord!(qp :: QuadraticModel, x :: AbstractVector, y :: AbstractVector, vals :: AbstractVector{<: AbstractFloat}; obj_weight :: Real=one(eltype(x))) = hess_coord!(qp, x, vals, obj_weight=obj_weight) |
102 | 142 |
|
103 |
| -jac_coord(qp :: AbstractQuadraticModel, ::AbstractVector) = findnz(qp.data.A) |
104 |
| - |
105 |
| -""" |
106 |
| -Return the structure of the constraints Jacobian in sparse coordinate format in place. |
107 |
| -""" |
108 |
| -function NLPModels.jac_structure!(qp :: QuadraticModel, rows :: Vector{<: Integer}, cols :: Vector{<: Integer}) |
109 |
| - rows .= qp.data.A.rowval |
110 |
| - cols .= findnz(qp.data.A)[2] |
| 143 | +function NLPModels.jac_structure!(qp :: QuadraticModel, rows :: AbstractVector{<: Integer}, cols :: AbstractVector{<: Integer}) |
| 144 | + rows .= qp.data.Arows |
| 145 | + cols .= qp.data.Acols |
| 146 | + return rows, cols |
111 | 147 | end
|
112 | 148 |
|
113 |
| -""" |
114 |
| -Return the structure of the constraints Jacobian in sparse coordinate format in place. |
115 |
| -""" |
116 |
| -function NLPModels.jac_coord!(qp :: QuadraticModel, x :: AbstractVector, rows :: Vector{<: Integer}, |
117 |
| - cols :: Vector{<: Integer}, vals :: Vector{<: AbstractFloat}) |
118 |
| - vals .= findnz(qp.data.A)[3] |
| 149 | +function NLPModels.jac_coord!(qp :: QuadraticModel, x :: AbstractVector, vals :: AbstractVector{<: AbstractFloat}) |
| 150 | + NLPModels.increment!(qp, :neval_jac) |
| 151 | + vals .= qp.data.Avals |
| 152 | + return vals |
119 | 153 | end
|
120 | 154 |
|
121 | 155 | function NLPModels.cons!(qp :: AbstractQuadraticModel, x :: AbstractVector, c :: AbstractVector)
|
122 |
| - mul!(c, qp.data.A, x) |
123 |
| - qp.counters.neval_jprod += 1 |
124 |
| - c |
| 156 | + NLPModels.increment!(qp, :neval_cons) |
| 157 | + coo_prod!(qp.data.Arows, qp.data.Acols, qp.data.Avals, x, c) |
| 158 | + return c |
125 | 159 | end
|
126 | 160 |
|
127 |
| -function NLPModels.hprod!(qp :: AbstractQuadraticModel, x :: AbstractVector, v :: AbstractVector, Av :: AbstractVector) |
128 |
| - qp.counters.neval_hprod += 1 |
129 |
| - Av .= qp.data.opH * v |
130 |
| - return Av |
| 161 | +function NLPModels.hprod!(qp :: AbstractQuadraticModel, x :: AbstractVector, v :: AbstractVector, Hv :: AbstractVector; obj_weight :: Real = one(eltype(x))) |
| 162 | + NLPModels.increment!(qp, :neval_hprod) |
| 163 | + coo_sym_prod!(qp.data.Hrows, qp.data.Hcols, qp.data.Hvals, v, Hv) |
| 164 | + if obj_weight != 1 |
| 165 | + Hv .*= obj_weight |
| 166 | + end |
| 167 | + return Hv |
131 | 168 | end
|
132 | 169 |
|
| 170 | +NLPModels.hprod!(qp :: AbstractQuadraticModel, x :: AbstractVector, y :: AbstractVector, v :: AbstractVector, Hv :: AbstractVector; obj_weight :: Real = one(eltype(x))) = hprod!(qp, x, v, Hv, obj_weight=obj_weight) |
| 171 | + |
133 | 172 | function NLPModels.jprod!(qp :: AbstractQuadraticModel, x :: AbstractVector, v :: AbstractVector, Av :: AbstractVector)
|
134 |
| - qp.counters.neval_jprod += 1 |
135 |
| - mul!(Av, qp.data.A, v) |
136 |
| - return Av |
| 173 | + NLPModels.increment!(qp, :neval_jprod) |
| 174 | + coo_prod!(qp.data.Arows, qp.data.Acols, qp.data.Avals, v, Av) |
| 175 | + return Av |
137 | 176 | end
|
138 | 177 |
|
139 | 178 | function NLPModels.jtprod!(qp :: AbstractQuadraticModel, x :: AbstractVector, v :: AbstractVector, Atv :: AbstractVector)
|
140 |
| - qp.counters.neval_jtprod += 1 |
141 |
| - mul!(Atv, qp.data.A', v) |
142 |
| - return Atv |
| 179 | + NLPModels.increment!(qp, :neval_jtprod) |
| 180 | + coo_prod!(qp.data.Acols, qp.data.Arows, qp.data.Avals, v, Atv) |
| 181 | + return Atv |
143 | 182 | end
|
0 commit comments