Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 25 additions & 0 deletions .github/workflows/Benchmark.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
name: Run benchmarks

on:
pull_request:
types: [labeled, opened, synchronize, reopened]

# Only trigger the benchmark job when you add `run benchmark` label to the PR
jobs:
Benchmark:
runs-on: ubuntu-latest
if: contains(github.event.pull_request.labels.*.name, 'run benchmark')
steps:
- uses: actions/checkout@v2
- uses: julia-actions/setup-julia@latest
with:
version: lts
- uses: julia-actions/julia-buildpkg@latest
- name: Install dependencies
run: julia -e 'using Pkg; pkg"add PkgBenchmark BenchmarkCI"'
- name: Run benchmarks
run: julia -e 'using BenchmarkCI; BenchmarkCI.judge(;baseline = "origin/main", script = joinpath(pwd(), "benchmark", "benchmarks.jl"))'
- name: Post results
run: julia -e 'using BenchmarkCI; BenchmarkCI.postjudge()'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
12 changes: 12 additions & 0 deletions benchmark/Project.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
[deps]
ADNLPModels = "54578032-b7ea-4c30-94aa-7cbd1cce6c9a"
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
Dates = "ade2ca70-3891-5945-98fb-dc099432e06a"
JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819"
JuMP = "4076af6c-e467-56ae-b986-b466b2749572"
Logging = "56ddb016-857b-54e1-b83d-db4d58db5568"
NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6"
NLPModelsJuMP = "792afdf1-32c1-5681-94e0-d7bf7a5df49e"
OptimizationProblems = "5049e819-d29b-5fba-b941-0eee7e64c1c6"
PkgBenchmark = "32113eaa-f34f-5b0d-bd6c-c81e245fc73d"
1 change: 1 addition & 0 deletions benchmark/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# Benchmarks for OptimizationProblems
50 changes: 50 additions & 0 deletions benchmark/benchmarks.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
# Include useful packages
using ADNLPModels, NLPModels, OptimizationProblems
using BenchmarkTools
# Most likely benchmark with JuMP as well
using JuMP, NLPModelsJuMP

const list_problems = Symbol.(OptimizationProblems.meta[!, :name])

# Should match the runtests
const list_problems_not_ADNLPProblems =
Symbol[:catmix, :gasoil, :glider, :methanol, :minsurf, :pinene, :rocket, :steering, :torsion]
const list_problems_ADNLPProblems = setdiff(list_problems, list_problems_not_ADNLPProblems)
const list_problems_not_PureJuMP = Symbol[]
const list_problems_PureJuMP = setdiff(list_problems, list_problems_not_PureJuMP)

# Run locally with `tune!(SUITE)` and then `run(SUITE)`
const SUITE = BenchmarkGroup()

const SAMPLES = 5
const EVALS = 1

SUITE["ADNLPProblems"] = BenchmarkGroup()
SUITE["ADNLPProblems"]["NLP"] = BenchmarkGroup()
SUITE["ADNLPProblems"]["NLP"]["constructor"] = BenchmarkGroup()
SUITE["ADNLPProblems"]["NLP"]["obj"] = BenchmarkGroup()
for pb in list_problems_ADNLPProblems
problem_constructor = getproperty(OptimizationProblems.ADNLPProblems, Symbol(pb))
SUITE["ADNLPProblems"]["NLP"]["constructor"][pb] = @benchmarkable $(problem_constructor)() samples=SAMPLES evals=EVALS
SUITE["ADNLPProblems"]["NLP"]["obj"][pb] = @benchmarkable obj(nlp, nlp.meta.x0) samples=SAMPLES evals=EVALS setup = (nlp = $(problem_constructor)())
end

SUITE["ADNLPProblems"]["NLS"] = BenchmarkGroup()
SUITE["ADNLPProblems"]["NLS"]["constructor"] = BenchmarkGroup()
SUITE["ADNLPProblems"]["NLS"]["obj"] = BenchmarkGroup()
list_problems_nls_ADNLPProblems = intersect(Symbol.(OptimizationProblems.meta[OptimizationProblems.meta.objtype .== :least_squares, :name]), list_problems_ADNLPProblems)
for pb in list_problems_nls_ADNLPProblems
problem_constructor = getproperty(OptimizationProblems.ADNLPProblems, Symbol(pb))
SUITE["ADNLPProblems"]["NLS"]["constructor"][pb] = @benchmarkable $(problem_constructor)(use_nls = true) samples=SAMPLES evals=EVALS
SUITE["ADNLPProblems"]["NLS"]["obj"][pb] = @benchmarkable obj(nlp, nlp.meta.x0) samples=SAMPLES evals=EVALS setup = (nlp = $(problem_constructor)(use_nls = true))
end
SUITE["PureJuMP"] = BenchmarkGroup()
SUITE["PureJuMP"]["constructor"] = BenchmarkGroup()
SUITE["PureJuMP"]["obj"] = BenchmarkGroup()
for pb in list_problems_PureJuMP
problem_constructor = getproperty(OptimizationProblems.PureJuMP, Symbol(pb))
SUITE["PureJuMP"]["constructor"][pb] = @benchmarkable $(problem_constructor)() samples=SAMPLES evals=EVALS
SUITE["PureJuMP"]["obj"][pb] = @benchmarkable obj(nlp, nlp.meta.x0) samples=SAMPLES evals=EVALS setup = (
nlp = MathOptNLPModel($(problem_constructor)())
)
end
39 changes: 39 additions & 0 deletions benchmark/run_local.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
using Pkg
Pkg.activate("benchmark")
Pkg.instantiate()
using Logging, JLD2, Dates

path = dirname(@__FILE__)
skip_tune = true

@info "INITIALIZE"
include("benchmarks.jl")

list_of_benchmark = keys(SUITE)
for benchmark_in_suite in list_of_benchmark
@info "$(benchmark_in_suite)"
end

@info "TUNE"
if !skip_tune
@time with_logger(ConsoleLogger(Error)) do
tune!(SUITE)
BenchmarkTools.save("params.json", params(suite))
end
else
@info "Skip tuning"
# https://juliaci.github.io/BenchmarkTools.jl/dev/manual/
BenchmarkTools.DEFAULT_PARAMETERS.evals = 1
end

@info "RUN"
@time result = with_logger(ConsoleLogger(Error)) do
if "params.json" in (path == "" ? readdir() : readdir(path))
loadparams!(suite, BenchmarkTools.load("params.json")[1], :evals, :samples)
end
run(SUITE, verbose = true)
end

@info "SAVE BENCHMARK RESULT"
name = "$(today())_optimizationproblems_benchmark"
BenchmarkTools.save("$name.json", result)
Loading