Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 7 additions & 1 deletion .github/workflows/Breakage.yml
Original file line number Diff line number Diff line change
Expand Up @@ -159,8 +159,14 @@ jobs:
fi
done >> summary.md

- name: Display summary in CI logs
run: |
echo "### Breakage Summary" >> $GITHUB_STEP_SUMMARY
cat breakage/summary.md >> $GITHUB_STEP_SUMMARY

- name: PR comment with file
uses: actions/github-script@v6
if: github.event.pull_request.head.repo.fork == false
uses: actions/github-script@main
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
Expand Down
14 changes: 7 additions & 7 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,26 +13,26 @@ jobs:
strategy:
fail-fast: false
matrix:
version: ['1.6', '1']
os: [ubuntu-latest, macOS-latest, windows-latest]
version: ['lts', '1']
os: [ubuntu-latest, macos-latest, windows-latest]
arch: [x64]
allow_failure: [false]
include:
- version: 'nightly'
- version: 'pre'
os: ubuntu-latest
arch: x64
allow_failure: true
- version: 'nightly'
os: macOS-latest
- version: 'pre'
os: macos-latest
arch: x64
allow_failure: true
- version: 'nightly'
- version: 'pre'
os: windows-latest
arch: x64
allow_failure: true
steps:
- uses: actions/checkout@v2
- uses: julia-actions/setup-julia@v1
- uses: julia-actions/setup-julia@v2
with:
version: ${{ matrix.version }}
arch: ${{ matrix.arch }}
Expand Down
6 changes: 3 additions & 3 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,11 @@ NLPModelsModifiers = "e01155f1-5c6f-4375-a9d8-616dd036575f"
SolverCore = "ff4d7338-4cf1-434d-91df-b86cb86fb843"

[compat]
Ipopt = "1"
NLPModels = "0.19, 0.20, 0.21"
Ipopt = "1.11"
NLPModels = "0.21.6"
NLPModelsModifiers = "0.7"
SolverCore = "0.3"
julia = "^1.6"
julia = "1.10"

[extras]
ADNLPModels = "54578032-b7ea-4c30-94aa-7cbd1cce6c9a"
Expand Down
4 changes: 2 additions & 2 deletions docs/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,6 @@ NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6"
Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"

[compat]
ADNLPModels = "0.7"
ADNLPModels = "0.8"
Documenter = "1.0"
NLPModels = "0.20"
NLPModels = "0.21.6"
6 changes: 4 additions & 2 deletions docs/src/tutorial.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,15 @@ You can call Ipopt with the L-BFGS Hessian approximation by passing the followin
stats_ipopt = ipopt(nlp,
hessian_approximation="limited-memory",
limited_memory_update_type="bfgs",
limited_memory_max_history=10)
limited_memory_max_history=6)
```

This will use the L-BFGS method for Hessian approximation with a history size of 10.
This will use the L-BFGS method for Hessian approximation with a history size of 6.
Note that these options are used by default in `NLPModelsIpopt.jl` if `nlp.meta.hess_available` is `false`.

Reference:
- [Ipopt.jl Manual: Hessian approximation](https://coin-or.github.io/Ipopt/OPTIONS.html#OPT_Hessian_Approximation)

# Tutorial

NLPModelsIpopt is a thin IPOPT wrapper for NLPModels. In this tutorial we show examples of problems created with NLPModels and solved with Ipopt.
Expand Down
8 changes: 8 additions & 0 deletions src/NLPModelsIpopt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ mutable struct IpoptSolver <: AbstractOptimizationSolver
end

function IpoptSolver(nlp::AbstractNLPModel)
@assert nlp.meta.grad_available && (nlp.meta.ncon == 0 || nlp.meta.jac_available)
eval_f, eval_g, eval_grad_f, eval_jac_g, eval_h = set_callbacks(nlp)

problem = CreateIpoptProblem(
Expand Down Expand Up @@ -248,6 +249,13 @@ function SolverCore.solve!(
SolverCore.reset!(stats)
kwargs = Dict(kwargs)

# Use L-BFGS if the sparse hessian of the Lagrangian is not available
if !nlp.meta.hess_available
AddIpoptStrOption(problem, "hessian_approximation", "limited-memory")
AddIpoptStrOption(problem, "limited_memory_update_type", "bfgs")
AddIpoptIntOption(problem, "limited_memory_max_history", 6)
end

# see if user wants to warm start from an initial primal-dual guess
if all(k ∈ keys(kwargs) for k ∈ [:x0, :y0, :zL0, :zU0])
AddIpoptStrOption(problem, "warm_start_init_point", "yes")
Expand Down
Loading