diff --git a/docs/src/guidelines.md b/docs/src/guidelines.md index 7ef1a7cb..474d73b4 100644 --- a/docs/src/guidelines.md +++ b/docs/src/guidelines.md @@ -68,11 +68,11 @@ The following functions should be defined: - `cons_nln!(nlp, x, c)` - `jac_lin_structure!(nlp, jrows, jcols)` - `jac_nln_structure!(nlp, jrows, jcols)` - - `jac_lin_coord!(nlp, x, jvals)` + - `jac_lin_coord!(nlp, jvals)` - `jac_nln_coord!(nlp, x, jvals)` - - `jprod_lin!(nlp, x, v, Jv)` + - `jprod_lin!(nlp, v, Jv)` - `jprod_nln!(nlp, x, v, Jv)` - - `jtprod_lin!(nlp, x, v, Jtv)` + - `jtprod_lin!(nlp, v, Jtv)` - `jtprod_nln!(nlp, x, v, Jtv)` - `hess_coord!(nlp, x, y, hvals; obj_weight=1)` - `hprod!(nlp, x, y, v, Hv; obj_weight=1)` diff --git a/src/nlp/api.jl b/src/nlp/api.jl index 46812e8e..59dea7ab 100644 --- a/src/nlp/api.jl +++ b/src/nlp/api.jl @@ -1,3 +1,5 @@ +using Base: @deprecate + export obj, grad, grad!, objgrad, objgrad!, objcons, objcons! export cons, cons!, cons_lin, cons_lin!, cons_nln, cons_nln! export jth_con, jth_congrad, jth_congrad!, jth_sparse_congrad @@ -266,10 +268,10 @@ function jac_coord!(nlp::AbstractNLPModel, x::AbstractVector, vals::AbstractVect increment!(nlp, :neval_jac) if nlp.meta.nlin > 0 if nlp.meta.nnln == 0 - jac_lin_coord!(nlp, x, vals) + jac_lin_coord!(nlp, vals) else lin_ind = 1:(nlp.meta.lin_nnzj) - jac_lin_coord!(nlp, x, view(vals, lin_ind)) + jac_lin_coord!(nlp, view(vals, lin_ind)) end end if nlp.meta.nnln > 0 @@ -307,36 +309,40 @@ function jac(nlp::AbstractNLPModel, x::AbstractVector) end """ - vals = jac_lin_coord!(nlp, x, vals) + vals = jac_lin_coord!(nlp, vals) -Evaluate ``J(x)``, the linear constraints Jacobian at `x` in sparse coordinate format, +Evaluate the linear constraints Jacobian in sparse coordinate format, overwriting `vals`. """ function jac_lin_coord! end +@deprecate jac_lin_coord!(nlp::AbstractNLPModel, x::AbstractVector, vals::AbstractVector) jac_lin_coord!(nlp, vals) + """ - vals = jac_lin_coord(nlp, x) + vals = jac_lin_coord(nlp) -Evaluate ``J(x)``, the linear constraints Jacobian at `x` in sparse coordinate format. +Evaluate the linear constraints Jacobian in sparse coordinate format. """ -function jac_lin_coord(nlp::AbstractNLPModel{T, S}, x::AbstractVector) where {T, S} - @lencheck nlp.meta.nvar x +function jac_lin_coord(nlp::AbstractNLPModel{T, S}) where {T, S} vals = S(undef, nlp.meta.lin_nnzj) - return jac_lin_coord!(nlp, x, vals) + return jac_lin_coord!(nlp, vals) end +@deprecate jac_lin_coord(nlp::AbstractNLPModel, x::AbstractVector) jac_lin_coord(nlp) + """ - Jx = jac_lin(nlp, x) + Jx = jac_lin(nlp) -Evaluate ``J(x)``, the linear constraints Jacobian at `x` as a sparse matrix. +Evaluate the linear constraints Jacobian as a sparse matrix. """ -function jac_lin(nlp::AbstractNLPModel, x::AbstractVector) - @lencheck nlp.meta.nvar x +function jac_lin(nlp::AbstractNLPModel) rows, cols = jac_lin_structure(nlp) - vals = jac_lin_coord(nlp, x) + vals = jac_lin_coord(nlp) sparse(rows, cols, vals, nlp.meta.nlin, nlp.meta.nvar) end +@deprecate jac_lin(nlp::AbstractNLPModel, x::AbstractVector) jac_lin(nlp) + """ vals = jac_nln_coord!(nlp, x, vals) @@ -390,9 +396,9 @@ function jprod!(nlp::AbstractNLPModel, x::AbstractVector, v::AbstractVector, Jv: increment!(nlp, :neval_jprod) if nlp.meta.nlin > 0 if nlp.meta.nnln == 0 - jprod_lin!(nlp, x, v, Jv) + jprod_lin!(nlp, v, Jv) else - jprod_lin!(nlp, x, v, view(Jv, nlp.meta.lin)) + jprod_lin!(nlp, v, view(Jv, nlp.meta.lin)) end end if nlp.meta.nnln > 0 @@ -427,23 +433,26 @@ function jprod!( end """ - Jv = jprod_lin(nlp, x, v) + Jv = jprod_lin(nlp, v) Evaluate ``J(x)v``, the linear Jacobian-vector product at `x`. """ -function jprod_lin(nlp::AbstractNLPModel{T, S}, x::AbstractVector, v::AbstractVector) where {T, S} - @lencheck nlp.meta.nvar x v +function jprod_lin(nlp::AbstractNLPModel{T, S}, v::AbstractVector) where {T, S} + @lencheck nlp.meta.nvar v Jv = S(undef, nlp.meta.nlin) - return jprod_lin!(nlp, x, v, Jv) + return jprod_lin!(nlp, v, Jv) end """ - Jv = jprod_lin!(nlp, x, v, Jv) + Jv = jprod_lin!(nlp, v, Jv) Evaluate ``J(x)v``, the linear Jacobian-vector product at `x` in place. """ function jprod_lin! end +@deprecate jprod_lin(nlp::AbstractNLPModel, x::AbstractVector, v::AbstractVector) jprod_lin(nlp, v) +@deprecate jprod_lin!(nlp::AbstractNLPModel, x::AbstractVector, v::AbstractVector, Jv::AbstractVector) jprod_lin!(nlp, v, Jv) + """ Jv = jprod_lin!(nlp, rows, cols, vals, v, Jv) @@ -527,18 +536,18 @@ function jtprod!(nlp::AbstractNLPModel, x::AbstractVector, v::AbstractVector, Jt @lencheck nlp.meta.ncon v increment!(nlp, :neval_jtprod) if nlp.meta.nnln == 0 - (nlp.meta.nlin > 0) && jtprod_lin!(nlp, x, v, Jtv) + (nlp.meta.nlin > 0) && jtprod_lin!(nlp, v, Jtv) elseif nlp.meta.nlin == 0 (nlp.meta.nnln > 0) && jtprod_nln!(nlp, x, v, Jtv) elseif nlp.meta.nlin >= nlp.meta.nnln - jtprod_lin!(nlp, x, view(v, nlp.meta.lin), Jtv) + jtprod_lin!(nlp, view(v, nlp.meta.lin), Jtv) if nlp.meta.nnln > 0 Jtv .+= jtprod_nln(nlp, x, view(v, nlp.meta.nln)) end else jtprod_nln!(nlp, x, view(v, nlp.meta.nln), Jtv) if nlp.meta.nlin > 0 - Jtv .+= jtprod_lin(nlp, x, view(v, nlp.meta.lin)) + Jtv .+= jtprod_lin(nlp, view(v, nlp.meta.lin)) end end return Jtv @@ -566,24 +575,26 @@ function jtprod!( end """ - Jtv = jtprod_lin(nlp, x, v) + Jtv = jtprod_lin(nlp, v) Evaluate ``J(x)^Tv``, the linear transposed-Jacobian-vector product at `x`. """ -function jtprod_lin(nlp::AbstractNLPModel{T, S}, x::AbstractVector, v::AbstractVector) where {T, S} - @lencheck nlp.meta.nvar x +function jtprod_lin(nlp::AbstractNLPModel{T, S}, v::AbstractVector) where {T, S} @lencheck nlp.meta.nlin v Jtv = S(undef, nlp.meta.nvar) - return jtprod_lin!(nlp, x, v, Jtv) + return jtprod_lin!(nlp, v, Jtv) end """ - Jtv = jtprod_lin!(nlp, x, v, Jtv) + Jtv = jtprod_lin!(nlp, v, Jtv) Evaluate ``J(x)^Tv``, the linear transposed-Jacobian-vector product at `x` in place. """ function jtprod_lin! end +@deprecate jtprod_lin(nlp::AbstractNLPModel, x::AbstractVector, v::AbstractVector) jtprod_lin(nlp, v) +@deprecate jtprod_lin!(nlp::AbstractNLPModel, x::AbstractVector, v::AbstractVector, Jtv::AbstractVector) jtprod_lin!(nlp, v, Jtv) + """ Jtv = jtprod_lin!(nlp, rows, cols, vals, v, Jtv) @@ -736,37 +747,37 @@ function jac_op!( end """ - J = jac_lin_op(nlp, x) + J = jac_lin_op(nlp) -Return the linear Jacobian at `x` as a linear operator. +Return the linear Jacobian as a linear operator. The resulting object may be used as if it were a matrix, e.g., `J * v` or `J' * v`. """ -function jac_lin_op(nlp::AbstractNLPModel{T, S}, x::AbstractVector) where {T, S} - @lencheck nlp.meta.nvar x +function jac_lin_op(nlp::AbstractNLPModel{T, S}) where {T, S} Jv = S(undef, nlp.meta.nlin) Jtv = S(undef, nlp.meta.nvar) - return jac_lin_op!(nlp, x, Jv, Jtv) + return jac_lin_op!(nlp, Jv, Jtv) end +@deprecate jac_lin_op(nlp::AbstractNLPModel, x::AbstractVector) jac_lin_op(nlp) + """ - J = jac_lin_op!(nlp, x, Jv, Jtv) + J = jac_lin_op!(nlp, Jv, Jtv) -Return the linear Jacobian at `x` as a linear operator. +Return the linear Jacobian as a linear operator. The resulting object may be used as if it were a matrix, e.g., `J * v` or `J' * v`. The values `Jv` and `Jtv` are used as preallocated storage for the operations. """ function jac_lin_op!( nlp::AbstractNLPModel{T, S}, - x::AbstractVector{T}, Jv::AbstractVector, Jtv::AbstractVector, ) where {T, S} - @lencheck nlp.meta.nvar x Jtv @lencheck nlp.meta.nlin Jv + @lencheck nlp.meta.nvar Jtv prod! = @closure (res, v, α, β) -> begin # res = α * J * v + β * res - jprod_lin!(nlp, x, v, Jv) + jprod_lin!(nlp, v, Jv) if β == 0 res .= α .* Jv else @@ -775,7 +786,7 @@ function jac_lin_op!( return res end ctprod! = @closure (res, v, α, β) -> begin - jtprod_lin!(nlp, x, v, Jtv) + jtprod_lin!(nlp, v, Jtv) if β == 0 res .= α .* Jtv else @@ -786,6 +797,8 @@ function jac_lin_op!( return LinearOperator{T}(nlp.meta.nlin, nlp.meta.nvar, false, false, prod!, ctprod!, ctprod!) end +@deprecate jac_lin_op!(nlp::AbstractNLPModel, x::AbstractVector, Jv::AbstractVector, Jtv::AbstractVector) jac_lin_op!(nlp, Jv, Jtv) + """ J = jac_lin_op!(nlp, rows, cols, vals, Jv, Jtv) diff --git a/test/nlp/api.jl b/test/nlp/api.jl index 730dc5ca..3d938e1d 100644 --- a/test/nlp/api.jl +++ b/test/nlp/api.jl @@ -30,13 +30,13 @@ @test cons_lin(nlp, x) == c(x)[1:1] @test jac(nlp, x) ≈ J(x) @test jac_nln(nlp, x) ≈ J(x)[2:2, :] - @test jac_lin(nlp, x) ≈ J(x)[1:1, :] + @test jac_lin(nlp) ≈ J(x)[1:1, :] @test jprod(nlp, x, v) ≈ J(x) * v @test jprod_nln(nlp, x, v) ≈ J(x)[2:2, :] * v - @test jprod_lin(nlp, x, v) ≈ J(x)[1:1, :] * v + @test jprod_lin(nlp, v) ≈ J(x)[1:1, :] * v @test jtprod(nlp, x, w) ≈ J(x)' * w @test jtprod_nln(nlp, x, w[2:2]) ≈ J(x)[2:2, :]' * w[2:2] - @test jtprod_lin(nlp, x, w[1:1]) ≈ J(x)[1:1, :]' * w[1:1] + @test jtprod_lin(nlp, w[1:1]) ≈ J(x)[1:1, :]' * w[1:1] @test hess(nlp, x, y) ≈ tril(H(x, y)) @test hprod(nlp, x, y, v) ≈ H(x, y) * v @@ -56,12 +56,12 @@ @test jprod!(nlp, jac_structure(nlp)..., jac_coord(nlp, x), v, Jv) ≈ J(x) * v @test jprod_nln!(nlp, jac_nln_structure(nlp)..., jac_nln_coord(nlp, x), v, Jv[2:2]) ≈ J(x)[2:2, :] * v - @test jprod_lin!(nlp, jac_lin_structure(nlp)..., jac_lin_coord(nlp, x), v, Jv[1:1]) ≈ + @test jprod_lin!(nlp, jac_lin_structure(nlp)..., jac_lin_coord(nlp), v, Jv[1:1]) ≈ J(x)[1:1, :] * v @test jtprod!(nlp, jac_structure(nlp)..., jac_coord(nlp, x), w, Jtw) ≈ J(x)' * w @test jtprod_nln!(nlp, jac_nln_structure(nlp)..., jac_nln_coord(nlp, x), w[2:2], Jtw) ≈ J(x)[2:2, :]' * w[2:2] - @test jtprod_lin!(nlp, jac_lin_structure(nlp)..., jac_lin_coord(nlp, x), w[1:1], Jtw) ≈ + @test jtprod_lin!(nlp, jac_lin_structure(nlp)..., jac_lin_coord(nlp), w[1:1], Jtw) ≈ J(x)[1:1, :]' * w[1:1] Jop = jac_op!(nlp, x, Jv, Jtw) @test Jop * v ≈ J(x) * v @@ -91,14 +91,14 @@ @test mul!(w[2:2], Jop, v, 1.0, -1.0) ≈ res res = J(x)[2:2, :]' * w[2:2] - v @test mul!(v, Jop', w[2:2], 1.0, -1.0) ≈ res - Jop = jac_lin_op!(nlp, x, Jv[1:1], Jtw) + Jop = jac_lin_op!(nlp, Jv[1:1], Jtw) @test Jop * v ≈ J(x)[1:1, :] * v @test Jop' * w[1:1] ≈ Jtw res = J(x)[1:1, :] * v - w[1:1] @test mul!(w[1:1], Jop, v, 1.0, -1.0) ≈ res res = J(x)[1:1, :]' * w[1:1] - v @test mul!(v, Jop', w[1:1], 1.0, -1.0) ≈ res - Jop = jac_lin_op!(nlp, jac_lin_structure(nlp)..., jac_lin_coord(nlp, x), Jv[1:1], Jtw) + Jop = jac_lin_op!(nlp, jac_lin_structure(nlp)..., jac_lin_coord(nlp), Jv[1:1], Jtw) @test Jop * v ≈ J(x)[1:1, :] * v @test Jop' * w[1:1] ≈ Jtw res = J(x)[1:1, :] * v - w[1:1] diff --git a/test/nlp/dummy-model.jl b/test/nlp/dummy-model.jl index aa0983dd..4dd2f9b5 100644 --- a/test/nlp/dummy-model.jl +++ b/test/nlp/dummy-model.jl @@ -19,8 +19,8 @@ end @test_throws(MethodError, jth_congrad(model, [0.0], 1)) @test_throws(MethodError, jth_sparse_congrad(model, [0.0], 1)) @test_throws(MethodError, jth_congrad!(model, [0.0], 1, [2.0])) - @test_throws(MethodError, jprod_lin!(model, [0.0], [1.0], [2.0])) - @test_throws(MethodError, jtprod_lin!(model, [0.0], [1.0], [2.0])) + @test_throws(MethodError, jprod_lin!(model, [0.0], [2.0])) + @test_throws(MethodError, jtprod_lin!(model, [0.0], [2.0])) @test_throws(MethodError, jprod_nln!(model, [0.0], [1.0], [2.0])) @test_throws(MethodError, jtprod_nln!(model, [0.0], [1.0], [2.0])) @test_throws(MethodError, jth_hess_coord!(model, [0.0], 1)) @@ -28,6 +28,6 @@ end @test_throws(MethodError, ghjvprod!(model, [0.0], [1.0], [2.0], [3.0])) @assert isa(hess_op(model, [0.0]), LinearOperator) @assert isa(jac_op(model, [0.0]), LinearOperator) - @assert isa(jac_lin_op(model, [0.0]), LinearOperator) + @assert isa(jac_lin_op(model), LinearOperator) @assert isa(jac_nln_op(model, [0.0]), LinearOperator) end diff --git a/test/nlp/simple-model.jl b/test/nlp/simple-model.jl index 7f61c626..3c780004 100644 --- a/test/nlp/simple-model.jl +++ b/test/nlp/simple-model.jl @@ -139,8 +139,8 @@ function NLPModels.jac_nln_coord!(nlp::SimpleNLPModel, x::AbstractVector, vals:: return vals end -function NLPModels.jac_lin_coord!(nlp::SimpleNLPModel, x::AbstractVector, vals::AbstractVector) - @lencheck 2 x vals +function NLPModels.jac_lin_coord!(nlp::SimpleNLPModel, vals::AbstractVector) + @lencheck 2 vals increment!(nlp, :neval_jac_lin) vals .= [1, -2] return vals @@ -159,16 +159,12 @@ function NLPModels.jprod_nln!( return Jv end -function NLPModels.jprod_lin!( - nlp::SimpleNLPModel, - x::AbstractVector, - v::AbstractVector, - Jv::AbstractVector, -) - @lencheck 2 x v + +function NLPModels.jprod_lin!(nlp::SimpleNLPModel, v::AbstractVector, Jv::AbstractVector) + @lencheck 2 v @lencheck 1 Jv increment!(nlp, :neval_jprod_lin) - Jv .= [v[1] - 2 * v[2]] + Jv[1] = v[1] - 2 * v[2] return Jv end @@ -185,13 +181,8 @@ function NLPModels.jtprod_nln!( return Jtv end -function NLPModels.jtprod_lin!( - nlp::SimpleNLPModel, - x::AbstractVector, - v::AbstractVector, - Jtv::AbstractVector, -) - @lencheck 2 x Jtv +function NLPModels.jtprod_lin!(nlp::SimpleNLPModel, v::AbstractVector, Jtv::AbstractVector) + @lencheck 2 Jtv @lencheck 1 v increment!(nlp, :neval_jtprod_lin) Jtv .= [v[1]; -2 * v[1]]