diff --git a/.github/workflows/Breakage.yml b/.github/workflows/Breakage.yml index 3883812..00c5e6b 100644 --- a/.github/workflows/Breakage.yml +++ b/.github/workflows/Breakage.yml @@ -159,8 +159,14 @@ jobs: fi done >> summary.md + - name: Display summary in CI logs + run: | + echo "### Breakage Summary" >> $GITHUB_STEP_SUMMARY + cat breakage/summary.md >> $GITHUB_STEP_SUMMARY + - name: PR comment with file - uses: actions/github-script@v6 + if: github.event.pull_request.head.repo.fork == false + uses: actions/github-script@main with: github-token: ${{ secrets.GITHUB_TOKEN }} script: | diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 54b2a7f..12e7533 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,26 +13,26 @@ jobs: strategy: fail-fast: false matrix: - version: ['1.6', '1'] - os: [ubuntu-latest, macOS-latest, windows-latest] + version: ['lts', '1'] + os: [ubuntu-latest, macos-latest, windows-latest] arch: [x64] allow_failure: [false] include: - - version: 'nightly' + - version: 'pre' os: ubuntu-latest arch: x64 allow_failure: true - - version: 'nightly' - os: macOS-latest + - version: 'pre' + os: macos-latest arch: x64 allow_failure: true - - version: 'nightly' + - version: 'pre' os: windows-latest arch: x64 allow_failure: true steps: - uses: actions/checkout@v2 - - uses: julia-actions/setup-julia@v1 + - uses: julia-actions/setup-julia@v2 with: version: ${{ matrix.version }} arch: ${{ matrix.arch }} diff --git a/Project.toml b/Project.toml index 0d53334..cc2024f 100644 --- a/Project.toml +++ b/Project.toml @@ -9,11 +9,11 @@ NLPModelsModifiers = "e01155f1-5c6f-4375-a9d8-616dd036575f" SolverCore = "ff4d7338-4cf1-434d-91df-b86cb86fb843" [compat] -Ipopt = "1" -NLPModels = "0.19, 0.20, 0.21" +Ipopt = "1.11" +NLPModels = "0.21.6" NLPModelsModifiers = "0.7" SolverCore = "0.3" -julia = "^1.6" +julia = "1.10" [extras] ADNLPModels = "54578032-b7ea-4c30-94aa-7cbd1cce6c9a" diff --git a/docs/Project.toml b/docs/Project.toml index 030a953..08f92bb 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -9,6 +9,6 @@ NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6" Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" [compat] -ADNLPModels = "0.7" +ADNLPModels = "0.8" Documenter = "1.0" -NLPModels = "0.20" +NLPModels = "0.21.6" diff --git a/docs/src/tutorial.md b/docs/src/tutorial.md index b10ec4c..22d6664 100644 --- a/docs/src/tutorial.md +++ b/docs/src/tutorial.md @@ -6,13 +6,15 @@ You can call Ipopt with the L-BFGS Hessian approximation by passing the followin stats_ipopt = ipopt(nlp, hessian_approximation="limited-memory", limited_memory_update_type="bfgs", - limited_memory_max_history=10) + limited_memory_max_history=6) ``` -This will use the L-BFGS method for Hessian approximation with a history size of 10. +This will use the L-BFGS method for Hessian approximation with a history size of 6. +Note that these options are used by default in `NLPModelsIpopt.jl` if `nlp.meta.hess_available` is `false`. Reference: - [Ipopt.jl Manual: Hessian approximation](https://coin-or.github.io/Ipopt/OPTIONS.html#OPT_Hessian_Approximation) + # Tutorial NLPModelsIpopt is a thin IPOPT wrapper for NLPModels. In this tutorial we show examples of problems created with NLPModels and solved with Ipopt. diff --git a/src/NLPModelsIpopt.jl b/src/NLPModelsIpopt.jl index d7048a2..db36fd5 100644 --- a/src/NLPModelsIpopt.jl +++ b/src/NLPModelsIpopt.jl @@ -61,6 +61,7 @@ mutable struct IpoptSolver <: AbstractOptimizationSolver end function IpoptSolver(nlp::AbstractNLPModel) + @assert nlp.meta.grad_available && (nlp.meta.ncon == 0 || nlp.meta.jac_available) eval_f, eval_g, eval_grad_f, eval_jac_g, eval_h = set_callbacks(nlp) problem = CreateIpoptProblem( @@ -248,6 +249,13 @@ function SolverCore.solve!( SolverCore.reset!(stats) kwargs = Dict(kwargs) + # Use L-BFGS if the sparse hessian of the Lagrangian is not available + if !nlp.meta.hess_available + AddIpoptStrOption(problem, "hessian_approximation", "limited-memory") + AddIpoptStrOption(problem, "limited_memory_update_type", "bfgs") + AddIpoptIntOption(problem, "limited_memory_max_history", 6) + end + # see if user wants to warm start from an initial primal-dual guess if all(k ∈ keys(kwargs) for k ∈ [:x0, :y0, :zL0, :zU0]) AddIpoptStrOption(problem, "warm_start_init_point", "yes")