Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 19 additions & 12 deletions docs/src/guidelines.md
Original file line number Diff line number Diff line change
Expand Up @@ -64,26 +64,33 @@ The following functions should be defined:
- `hess_coord!(nlp, x, hvals; obj_weight=1)` (sparse Hessian)
- `hess_dense!(nlp, x, Hx; obj_weight=1)` (dense Hessian)
- `hprod!(nlp, x, v, Hv; obj_weight=1)` (actually defaults to calling the constrained case)

- Constraints (constrained models need to worry about these and the ones above)
- `cons_lin!(nlp, x, c)`
- `cons_nln!(nlp, x, c)`
- `jac_lin_structure!(nlp, jrows, jcols)` (sparse Jacobian)
- `jac_nln_structure!(nlp, jrows, jcols)` (sparse Jacobian)
- `jac_lin_coord!(nlp, x, jvals)` (sparse Jacobian)
- `jac_nln_coord!(nlp, x, jvals)` (sparse Jacobian)
- `cons!(nlp, x, c)`
- `jac_structure!(nlp, jrows, jcols)` (sparse Jacobian)
- `jac_coord!(nlp, x, jvals)` (sparse Jacobian)
- `jac_dense!(nlp, x, Jx)` (dense Jacobian)
- `jprod_lin!(nlp, x, v, Jv)`
- `jprod_nln!(nlp, x, v, Jv)`
- `jtprod_lin!(nlp, x, v, Jtv)`
- `jtprod_nln!(nlp, x, v, Jtv)`
- `jprod!(nlp, x, v, Jv)`
- `jtprod!(nlp, x, v, Jtv)`
- `hess_coord!(nlp, x, y, hvals; obj_weight=1)` (sparse Hessian)
- `hess_dense!(nlp, x, y, Hx; obj_weight=1)` (dense Hessian)
- `hprod!(nlp, x, y, v, Hv; obj_weight=1)`

When the split API is enabled (`nlp.meta.split_api = true`), the treatment of linear and nonlinear constraints and their associated Jacobians is separated, and the following additional functions must be provided:

- `cons_lin!(nlp, x, c)`
- `cons_nln!(nlp, x, c)`
- `jac_lin_structure!(nlp, jrows, jcols)`
- `jac_nln_structure!(nlp, jrows, jcols)`
- `jac_lin_coord!(nlp, x, jvals)`
- `jac_nln_coord!(nlp, x, jvals)`
- `jprod_lin!(nlp, x, v, Jv)`
- `jprod_nln!(nlp, x, v, Jv)`
- `jtprod_lin!(nlp, x, v, Jtv)`
- `jtprod_nln!(nlp, x, v, Jtv)`

The linear constraints are specified at the initialization of the `NLPModelMeta` using the keyword arguement `lin`.
The indices of linear and nonlinear constraints are respectively available in `nlp.meta.lin` and `nlp.meta.nln`.
If your model uses only linear (resp. nonlinear) constraints, then it suffices to implement the `*_lin` (resp. `*_nln`) functions.
Alternatively, one could implement only the functions without the suffixes `_nln!` (e.g., only `cons!`), but this might run into errors with tools differentiating linear and nonlinear constraints.

If the Jacobian or the Hessian of the Lagrangian is dense, there is no need to implement the corresponding `*_structure!` and `*_coord!` methods.
Only the corresponding `*_dense!` methods need to be implemented.
Expand Down
12 changes: 6 additions & 6 deletions docs/src/reference.md
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@
# Reference

## Contents

```@contents
Pages = ["reference.md"]
```

## Index

```@index
Pages = ["reference.md"]
```

```@autodocs
Modules = [NLPModels]
```
```
119 changes: 5 additions & 114 deletions src/nlp/api.jl
Original file line number Diff line number Diff line change
Expand Up @@ -57,26 +57,7 @@ end

Evaluate ``c(x)``, the constraints at `x` in place.
"""
function cons!(nlp::AbstractNLPModel, x::AbstractVector, cx::AbstractVector)
@lencheck nlp.meta.nvar x
@lencheck nlp.meta.ncon cx
increment!(nlp, :neval_cons)
if nlp.meta.nlin > 0
if nlp.meta.nnln == 0
cons_lin!(nlp, x, cx)
else
cons_lin!(nlp, x, view(cx, nlp.meta.lin))
end
end
if nlp.meta.nnln > 0
if nlp.meta.nlin == 0
cons_nln!(nlp, x, cx)
else
cons_nln!(nlp, x, view(cx, nlp.meta.nln))
end
end
return cx
end
function cons! end

"""
c = cons_lin(nlp, x)
Expand Down Expand Up @@ -193,36 +174,7 @@ end
Return the structure of the constraints Jacobian in sparse coordinate format in place.
This function is only available when both `nlp.meta.jac_available` and `nlp.meta.sparse_jacobian` are set to `true`.
"""
function jac_structure!(
nlp::AbstractNLPModel,
rows::AbstractVector{T},
cols::AbstractVector{T},
) where {T}
@lencheck nlp.meta.nnzj rows cols
lin_ind = 1:(nlp.meta.lin_nnzj)
if nlp.meta.nlin > 0
if nlp.meta.nnln == 0
jac_lin_structure!(nlp, rows, cols)
else
jac_lin_structure!(nlp, view(rows, lin_ind), view(cols, lin_ind))
for i in lin_ind
rows[i] += count(x < nlp.meta.lin[rows[i]] for x in nlp.meta.nln)
end
end
end
if nlp.meta.nnln > 0
if nlp.meta.nlin == 0
jac_nln_structure!(nlp, rows, cols)
else
nln_ind = (nlp.meta.lin_nnzj + 1):(nlp.meta.lin_nnzj + nlp.meta.nln_nnzj)
jac_nln_structure!(nlp, view(rows, nln_ind), view(cols, nln_ind))
for i in nln_ind
rows[i] += count(x < nlp.meta.nln[rows[i]] for x in nlp.meta.lin)
end
end
end
return rows, cols
end
function jac_structure! end

"""
(rows,cols) = jac_lin_structure(nlp)
Expand Down Expand Up @@ -270,29 +222,7 @@ function jac_nln_structure! end
Evaluate ``J(x)``, the constraints Jacobian at `x` in sparse coordinate format, overwriting `vals`.
This function is only available when both `nlp.meta.jac_available` and `nlp.meta.sparse_jacobian` are set to `true`.
"""
function jac_coord!(nlp::AbstractNLPModel, x::AbstractVector, vals::AbstractVector)
@lencheck nlp.meta.nvar x
@lencheck nlp.meta.nnzj vals
increment!(nlp, :neval_jac)
if nlp.meta.nlin > 0
if nlp.meta.nnln == 0
jac_lin_coord!(nlp, x, vals)
else
lin_ind = 1:(nlp.meta.lin_nnzj)
jac_lin_coord!(nlp, x, view(vals, lin_ind))
end
end
if nlp.meta.nnln > 0
if nlp.meta.nlin == 0
jac_nln_coord!(nlp, x, vals)
else
nln_ind = (nlp.meta.lin_nnzj + 1):(nlp.meta.lin_nnzj + nlp.meta.nln_nnzj)
jac_nln_coord!(nlp, x, view(vals, nln_ind))
end
end
return vals
end

function jac_coord! end
"""
vals = jac_coord(nlp, x)

Expand Down Expand Up @@ -410,26 +340,7 @@ end
Evaluate ``J(x)v``, the Jacobian-vector product at `x` in place.
This function is only available if `nlp.meta.jprod_available` is set to `true`.
"""
function jprod!(nlp::AbstractNLPModel, x::AbstractVector, v::AbstractVector, Jv::AbstractVector)
@lencheck nlp.meta.nvar x v
@lencheck nlp.meta.ncon Jv
increment!(nlp, :neval_jprod)
if nlp.meta.nlin > 0
if nlp.meta.nnln == 0
jprod_lin!(nlp, x, v, Jv)
else
jprod_lin!(nlp, x, v, view(Jv, nlp.meta.lin))
end
end
if nlp.meta.nnln > 0
if nlp.meta.nlin == 0
jprod_nln!(nlp, x, v, Jv)
else
jprod_nln!(nlp, x, v, view(Jv, nlp.meta.nln))
end
end
return Jv
end
function jprod! end

"""
Jv = jprod!(nlp, rows, cols, vals, v, Jv)
Expand Down Expand Up @@ -554,27 +465,7 @@ Evaluate ``J(x)^Tv``, the transposed-Jacobian-vector product at `x` in place.
If the problem has linear and nonlinear constraints, this function allocates.
This function is only available if `nlp.meta.jtprod_available` is set to `true`.
"""
function jtprod!(nlp::AbstractNLPModel, x::AbstractVector, v::AbstractVector, Jtv::AbstractVector)
@lencheck nlp.meta.nvar x Jtv
@lencheck nlp.meta.ncon v
increment!(nlp, :neval_jtprod)
if nlp.meta.nnln == 0
(nlp.meta.nlin > 0) && jtprod_lin!(nlp, x, v, Jtv)
elseif nlp.meta.nlin == 0
(nlp.meta.nnln > 0) && jtprod_nln!(nlp, x, v, Jtv)
elseif nlp.meta.nlin >= nlp.meta.nnln
jtprod_lin!(nlp, x, view(v, nlp.meta.lin), Jtv)
if nlp.meta.nnln > 0
Jtv .+= jtprod_nln(nlp, x, view(v, nlp.meta.nln))
end
else
jtprod_nln!(nlp, x, view(v, nlp.meta.nln), Jtv)
if nlp.meta.nlin > 0
Jtv .+= jtprod_lin(nlp, x, view(v, nlp.meta.lin))
end
end
return Jtv
end
function jtprod! end

"""
Jtv = jtprod!(nlp, rows, cols, vals, v, Jtv)
Expand Down
82 changes: 73 additions & 9 deletions test/nlp/simple-model.jl
Original file line number Diff line number Diff line change
Expand Up @@ -94,22 +94,42 @@ function NLPModels.hprod!(
return Hv
end

function NLPModels.cons!(nlp::SimpleNLPModel, x::AbstractVector, cx::AbstractVector)
@lencheck 2 x
@lencheck 2 cx
increment!(nlp, :neval_cons)
cx[1] = x[1] - 2 * x[2] + 1
cx[2] = -x[1]^2 / 4 - x[2]^2 + 1
return cx
end

function NLPModels.cons_nln!(nlp::SimpleNLPModel, x::AbstractVector, cx::AbstractVector)
@lencheck 2 x
@lencheck 1 cx
increment!(nlp, :neval_cons_nln)
cx .= [-x[1]^2 / 4 - x[2]^2 + 1]
cx[1] = -x[1]^2 / 4 - x[2]^2 + 1
return cx
end

function NLPModels.cons_lin!(nlp::SimpleNLPModel, x::AbstractVector, cx::AbstractVector)
@lencheck 2 x
@lencheck 1 cx
increment!(nlp, :neval_cons_lin)
cx .= [x[1] - 2 * x[2] + 1]
cx[1] = x[1] - 2 * x[2] + 1
return cx
end

function NLPModels.jac_structure!(
nlp::SimpleNLPModel,
rows::AbstractVector{Int},
cols::AbstractVector{Int},
)
@lencheck 4 rows cols
rows .= [1, 1, 2, 2]
cols .= [1, 2, 1, 2]
return rows, cols
end

function NLPModels.jac_nln_structure!(
nlp::SimpleNLPModel,
rows::AbstractVector{Int},
Expand All @@ -132,20 +152,48 @@ function NLPModels.jac_lin_structure!(
return rows, cols
end

function NLPModels.jac_coord!(nlp::SimpleNLPModel, x::AbstractVector, vals::AbstractVector)
@lencheck 2 x
@lencheck 4 vals
increment!(nlp, :neval_jac)
vals[1] = 1
vals[2] = -2
vals[3] = -x[1] / 2
vals[4] = -2 * x[2]
return vals
end

function NLPModels.jac_nln_coord!(nlp::SimpleNLPModel, x::AbstractVector, vals::AbstractVector)
@lencheck 2 x vals
@lencheck 2 x
@lencheck 2 vals
increment!(nlp, :neval_jac_nln)
vals .= [-x[1] / 2, -2 * x[2]]
vals[1] = -x[1] / 2
vals[2] = -2 * x[2]
return vals
end

function NLPModels.jac_lin_coord!(nlp::SimpleNLPModel, x::AbstractVector, vals::AbstractVector)
@lencheck 2 x vals
increment!(nlp, :neval_jac_lin)
vals .= [1, -2]
vals[1] = 1
vals[2] = -2
return vals
end

function NLPModels.jprod!(
nlp::SimpleNLPModel,
x::AbstractVector,
v::AbstractVector,
Jv::AbstractVector,
)
@lencheck 2 x v
@lencheck 2 Jv
increment!(nlp, :neval_jprod)
Jv[1] = v[1] - 2 * v[2]
Jv[2] = -x[1] * v[1] / 2 - 2 * x[2] * v[2]
return Jv
end

function NLPModels.jprod_nln!(
nlp::SimpleNLPModel,
x::AbstractVector,
Expand All @@ -155,7 +203,7 @@ function NLPModels.jprod_nln!(
@lencheck 2 x v
@lencheck 1 Jv
increment!(nlp, :neval_jprod_nln)
Jv .= [-x[1] * v[1] / 2 - 2 * x[2] * v[2]]
Jv[1] = -x[1] * v[1] / 2 - 2 * x[2] * v[2]
return Jv
end

Expand All @@ -168,10 +216,24 @@ function NLPModels.jprod_lin!(
@lencheck 2 x v
@lencheck 1 Jv
increment!(nlp, :neval_jprod_lin)
Jv .= [v[1] - 2 * v[2]]
Jv[1] = v[1] - 2 * v[2]
return Jv
end

function NLPModels.jtprod!(
nlp::SimpleNLPModel,
x::AbstractVector,
v::AbstractVector,
Jtv::AbstractVector,
)
@lencheck 2 x Jtv
@lencheck 2 v
increment!(nlp, :neval_jtprod)
Jtv[1] = v[1] - x[1] * v[2] / 2
Jtv[2] = -2 * v[1] -2 * x[2] * v[2]
return Jtv
end

function NLPModels.jtprod_nln!(
nlp::SimpleNLPModel,
x::AbstractVector,
Expand All @@ -181,7 +243,8 @@ function NLPModels.jtprod_nln!(
@lencheck 2 x Jtv
@lencheck 1 v
increment!(nlp, :neval_jtprod_nln)
Jtv .= [-x[1] * v[1] / 2; -2 * x[2] * v[1]]
Jtv[1] = -x[1] * v[1] / 2
Jtv[2] = -2 * x[2] * v[1]
return Jtv
end

Expand All @@ -194,7 +257,8 @@ function NLPModels.jtprod_lin!(
@lencheck 2 x Jtv
@lencheck 1 v
increment!(nlp, :neval_jtprod_lin)
Jtv .= [v[1]; -2 * v[1]]
Jtv[1] = v[1]
Jtv[2] = -2 * v[1]
return Jtv
end

Expand Down
Loading
Loading