Skip to content

Commit ccf94f8

Browse files
authored
Add a benchmark folder (#389)
* Add a benchmark folder * up benchmark * use more recent version of BenchmarkCI * fix julia version for benchmark * run the benchmark faster * fix
1 parent 80dbed6 commit ccf94f8

File tree

5 files changed

+127
-0
lines changed

5 files changed

+127
-0
lines changed

.github/workflows/Benchmark.yml

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
name: Run benchmarks
2+
3+
on:
4+
pull_request:
5+
types: [labeled, opened, synchronize, reopened]
6+
7+
# Only trigger the benchmark job when you add `run benchmark` label to the PR
8+
jobs:
9+
Benchmark:
10+
runs-on: ubuntu-latest
11+
if: contains(github.event.pull_request.labels.*.name, 'run benchmark')
12+
steps:
13+
- uses: actions/checkout@v2
14+
- uses: julia-actions/setup-julia@latest
15+
with:
16+
version: lts
17+
- uses: julia-actions/julia-buildpkg@latest
18+
- name: Install dependencies
19+
run: julia -e 'using Pkg; pkg"add PkgBenchmark BenchmarkCI"'
20+
- name: Run benchmarks
21+
run: julia -e 'using BenchmarkCI; BenchmarkCI.judge(;baseline = "origin/main", script = joinpath(pwd(), "benchmark", "benchmarks.jl"))'
22+
- name: Post results
23+
run: julia -e 'using BenchmarkCI; BenchmarkCI.postjudge()'
24+
env:
25+
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

benchmark/Project.toml

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
[deps]
2+
ADNLPModels = "54578032-b7ea-4c30-94aa-7cbd1cce6c9a"
3+
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
4+
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
5+
Dates = "ade2ca70-3891-5945-98fb-dc099432e06a"
6+
JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819"
7+
JuMP = "4076af6c-e467-56ae-b986-b466b2749572"
8+
Logging = "56ddb016-857b-54e1-b83d-db4d58db5568"
9+
NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6"
10+
NLPModelsJuMP = "792afdf1-32c1-5681-94e0-d7bf7a5df49e"
11+
OptimizationProblems = "5049e819-d29b-5fba-b941-0eee7e64c1c6"
12+
PkgBenchmark = "32113eaa-f34f-5b0d-bd6c-c81e245fc73d"

benchmark/README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
# Benchmarks for OptimizationProblems

benchmark/benchmarks.jl

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
# Include useful packages
2+
using ADNLPModels, NLPModels, OptimizationProblems
3+
using BenchmarkTools
4+
# Most likely benchmark with JuMP as well
5+
using JuMP, NLPModelsJuMP
6+
7+
const list_problems = Symbol.(OptimizationProblems.meta[!, :name])
8+
9+
# Should match the runtests
10+
const list_problems_not_ADNLPProblems =
11+
Symbol[:catmix, :gasoil, :glider, :methanol, :minsurf, :pinene, :rocket, :steering, :torsion]
12+
const list_problems_ADNLPProblems = setdiff(list_problems, list_problems_not_ADNLPProblems)
13+
const list_problems_not_PureJuMP = Symbol[]
14+
const list_problems_PureJuMP = setdiff(list_problems, list_problems_not_PureJuMP)
15+
16+
# Run locally with `tune!(SUITE)` and then `run(SUITE)`
17+
const SUITE = BenchmarkGroup()
18+
19+
const SAMPLES = 5
20+
const EVALS = 1
21+
22+
SUITE["ADNLPProblems"] = BenchmarkGroup()
23+
SUITE["ADNLPProblems"]["NLP"] = BenchmarkGroup()
24+
SUITE["ADNLPProblems"]["NLP"]["constructor"] = BenchmarkGroup()
25+
SUITE["ADNLPProblems"]["NLP"]["obj"] = BenchmarkGroup()
26+
for pb in list_problems_ADNLPProblems
27+
problem_constructor = getproperty(OptimizationProblems.ADNLPProblems, Symbol(pb))
28+
SUITE["ADNLPProblems"]["NLP"]["constructor"][pb] = @benchmarkable $(problem_constructor)() samples=SAMPLES evals=EVALS
29+
SUITE["ADNLPProblems"]["NLP"]["obj"][pb] = @benchmarkable obj(nlp, nlp.meta.x0) samples=SAMPLES evals=EVALS setup = (nlp = $(problem_constructor)())
30+
end
31+
32+
SUITE["ADNLPProblems"]["NLS"] = BenchmarkGroup()
33+
SUITE["ADNLPProblems"]["NLS"]["constructor"] = BenchmarkGroup()
34+
SUITE["ADNLPProblems"]["NLS"]["obj"] = BenchmarkGroup()
35+
list_problems_nls_ADNLPProblems = intersect(Symbol.(OptimizationProblems.meta[OptimizationProblems.meta.objtype .== :least_squares, :name]), list_problems_ADNLPProblems)
36+
for pb in list_problems_nls_ADNLPProblems
37+
problem_constructor = getproperty(OptimizationProblems.ADNLPProblems, Symbol(pb))
38+
SUITE["ADNLPProblems"]["NLS"]["constructor"][pb] = @benchmarkable $(problem_constructor)(use_nls = true) samples=SAMPLES evals=EVALS
39+
SUITE["ADNLPProblems"]["NLS"]["obj"][pb] = @benchmarkable obj(nlp, nlp.meta.x0) samples=SAMPLES evals=EVALS setup = (nlp = $(problem_constructor)(use_nls = true))
40+
end
41+
SUITE["PureJuMP"] = BenchmarkGroup()
42+
SUITE["PureJuMP"]["constructor"] = BenchmarkGroup()
43+
SUITE["PureJuMP"]["obj"] = BenchmarkGroup()
44+
for pb in list_problems_PureJuMP
45+
problem_constructor = getproperty(OptimizationProblems.PureJuMP, Symbol(pb))
46+
SUITE["PureJuMP"]["constructor"][pb] = @benchmarkable $(problem_constructor)() samples=SAMPLES evals=EVALS
47+
SUITE["PureJuMP"]["obj"][pb] = @benchmarkable obj(nlp, nlp.meta.x0) samples=SAMPLES evals=EVALS setup = (
48+
nlp = MathOptNLPModel($(problem_constructor)())
49+
)
50+
end

benchmark/run_local.jl

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
using Pkg
2+
Pkg.activate("benchmark")
3+
Pkg.instantiate()
4+
using Logging, JLD2, Dates
5+
6+
path = dirname(@__FILE__)
7+
skip_tune = true
8+
9+
@info "INITIALIZE"
10+
include("benchmarks.jl")
11+
12+
list_of_benchmark = keys(SUITE)
13+
for benchmark_in_suite in list_of_benchmark
14+
@info "$(benchmark_in_suite)"
15+
end
16+
17+
@info "TUNE"
18+
if !skip_tune
19+
@time with_logger(ConsoleLogger(Error)) do
20+
tune!(SUITE)
21+
BenchmarkTools.save("params.json", params(suite))
22+
end
23+
else
24+
@info "Skip tuning"
25+
# https://juliaci.github.io/BenchmarkTools.jl/dev/manual/
26+
BenchmarkTools.DEFAULT_PARAMETERS.evals = 1
27+
end
28+
29+
@info "RUN"
30+
@time result = with_logger(ConsoleLogger(Error)) do
31+
if "params.json" in (path == "" ? readdir() : readdir(path))
32+
loadparams!(suite, BenchmarkTools.load("params.json")[1], :evals, :samples)
33+
end
34+
run(SUITE, verbose = true)
35+
end
36+
37+
@info "SAVE BENCHMARK RESULT"
38+
name = "$(today())_optimizationproblems_benchmark"
39+
BenchmarkTools.save("$name.json", result)

0 commit comments

Comments
 (0)