diff --git a/.github/workflows/Benchmark.yml b/.github/workflows/Benchmark.yml new file mode 100644 index 00000000..2f13d23d --- /dev/null +++ b/.github/workflows/Benchmark.yml @@ -0,0 +1,25 @@ +name: Run benchmarks + +on: + pull_request: + types: [labeled, opened, synchronize, reopened] + +# Only trigger the benchmark job when you add `run benchmark` label to the PR +jobs: + Benchmark: + runs-on: ubuntu-latest + if: contains(github.event.pull_request.labels.*.name, 'run benchmark') + steps: + - uses: actions/checkout@v2 + - uses: julia-actions/setup-julia@latest + with: + version: lts + - uses: julia-actions/julia-buildpkg@latest + - name: Install dependencies + run: julia -e 'using Pkg; pkg"add PkgBenchmark BenchmarkCI"' + - name: Run benchmarks + run: julia -e 'using BenchmarkCI; BenchmarkCI.judge(;baseline = "origin/main", script = joinpath(pwd(), "benchmark", "benchmarks.jl"))' + - name: Post results + run: julia -e 'using BenchmarkCI; BenchmarkCI.postjudge()' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/benchmark/Project.toml b/benchmark/Project.toml new file mode 100644 index 00000000..ca1a98cd --- /dev/null +++ b/benchmark/Project.toml @@ -0,0 +1,12 @@ +[deps] +ADNLPModels = "54578032-b7ea-4c30-94aa-7cbd1cce6c9a" +BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" +DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" +Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" +JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819" +JuMP = "4076af6c-e467-56ae-b986-b466b2749572" +Logging = "56ddb016-857b-54e1-b83d-db4d58db5568" +NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6" +NLPModelsJuMP = "792afdf1-32c1-5681-94e0-d7bf7a5df49e" +OptimizationProblems = "5049e819-d29b-5fba-b941-0eee7e64c1c6" +PkgBenchmark = "32113eaa-f34f-5b0d-bd6c-c81e245fc73d" diff --git a/benchmark/README.md b/benchmark/README.md new file mode 100644 index 00000000..af44a46a --- /dev/null +++ b/benchmark/README.md @@ -0,0 +1 @@ +# Benchmarks for OptimizationProblems diff --git a/benchmark/benchmarks.jl b/benchmark/benchmarks.jl new file mode 100644 index 00000000..dd9f2080 --- /dev/null +++ b/benchmark/benchmarks.jl @@ -0,0 +1,50 @@ +# Include useful packages +using ADNLPModels, NLPModels, OptimizationProblems +using BenchmarkTools +# Most likely benchmark with JuMP as well +using JuMP, NLPModelsJuMP + +const list_problems = Symbol.(OptimizationProblems.meta[!, :name]) + +# Should match the runtests +const list_problems_not_ADNLPProblems = + Symbol[:catmix, :gasoil, :glider, :methanol, :minsurf, :pinene, :rocket, :steering, :torsion] +const list_problems_ADNLPProblems = setdiff(list_problems, list_problems_not_ADNLPProblems) +const list_problems_not_PureJuMP = Symbol[] +const list_problems_PureJuMP = setdiff(list_problems, list_problems_not_PureJuMP) + +# Run locally with `tune!(SUITE)` and then `run(SUITE)` +const SUITE = BenchmarkGroup() + +const SAMPLES = 5 +const EVALS = 1 + +SUITE["ADNLPProblems"] = BenchmarkGroup() +SUITE["ADNLPProblems"]["NLP"] = BenchmarkGroup() +SUITE["ADNLPProblems"]["NLP"]["constructor"] = BenchmarkGroup() +SUITE["ADNLPProblems"]["NLP"]["obj"] = BenchmarkGroup() +for pb in list_problems_ADNLPProblems + problem_constructor = getproperty(OptimizationProblems.ADNLPProblems, Symbol(pb)) + SUITE["ADNLPProblems"]["NLP"]["constructor"][pb] = @benchmarkable $(problem_constructor)() samples=SAMPLES evals=EVALS + SUITE["ADNLPProblems"]["NLP"]["obj"][pb] = @benchmarkable obj(nlp, nlp.meta.x0) samples=SAMPLES evals=EVALS setup = (nlp = $(problem_constructor)()) +end + +SUITE["ADNLPProblems"]["NLS"] = BenchmarkGroup() +SUITE["ADNLPProblems"]["NLS"]["constructor"] = BenchmarkGroup() +SUITE["ADNLPProblems"]["NLS"]["obj"] = BenchmarkGroup() +list_problems_nls_ADNLPProblems = intersect(Symbol.(OptimizationProblems.meta[OptimizationProblems.meta.objtype .== :least_squares, :name]), list_problems_ADNLPProblems) +for pb in list_problems_nls_ADNLPProblems + problem_constructor = getproperty(OptimizationProblems.ADNLPProblems, Symbol(pb)) + SUITE["ADNLPProblems"]["NLS"]["constructor"][pb] = @benchmarkable $(problem_constructor)(use_nls = true) samples=SAMPLES evals=EVALS + SUITE["ADNLPProblems"]["NLS"]["obj"][pb] = @benchmarkable obj(nlp, nlp.meta.x0) samples=SAMPLES evals=EVALS setup = (nlp = $(problem_constructor)(use_nls = true)) +end +SUITE["PureJuMP"] = BenchmarkGroup() +SUITE["PureJuMP"]["constructor"] = BenchmarkGroup() +SUITE["PureJuMP"]["obj"] = BenchmarkGroup() +for pb in list_problems_PureJuMP + problem_constructor = getproperty(OptimizationProblems.PureJuMP, Symbol(pb)) + SUITE["PureJuMP"]["constructor"][pb] = @benchmarkable $(problem_constructor)() samples=SAMPLES evals=EVALS + SUITE["PureJuMP"]["obj"][pb] = @benchmarkable obj(nlp, nlp.meta.x0) samples=SAMPLES evals=EVALS setup = ( + nlp = MathOptNLPModel($(problem_constructor)()) + ) +end diff --git a/benchmark/run_local.jl b/benchmark/run_local.jl new file mode 100644 index 00000000..cc75b9ae --- /dev/null +++ b/benchmark/run_local.jl @@ -0,0 +1,39 @@ +using Pkg +Pkg.activate("benchmark") +Pkg.instantiate() +using Logging, JLD2, Dates + +path = dirname(@__FILE__) +skip_tune = true + +@info "INITIALIZE" +include("benchmarks.jl") + +list_of_benchmark = keys(SUITE) +for benchmark_in_suite in list_of_benchmark + @info "$(benchmark_in_suite)" +end + +@info "TUNE" +if !skip_tune + @time with_logger(ConsoleLogger(Error)) do + tune!(SUITE) + BenchmarkTools.save("params.json", params(suite)) + end +else + @info "Skip tuning" + # https://juliaci.github.io/BenchmarkTools.jl/dev/manual/ + BenchmarkTools.DEFAULT_PARAMETERS.evals = 1 +end + +@info "RUN" +@time result = with_logger(ConsoleLogger(Error)) do + if "params.json" in (path == "" ? readdir() : readdir(path)) + loadparams!(suite, BenchmarkTools.load("params.json")[1], :evals, :samples) + end + run(SUITE, verbose = true) +end + +@info "SAVE BENCHMARK RESULT" +name = "$(today())_optimizationproblems_benchmark" +BenchmarkTools.save("$name.json", result)