Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 38 additions & 0 deletions .github/workflows/Benchmark.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
name: Run benchmarks
on:
pull_request:
# Only trigger the benchmark job when you add `run benchmark` label to the PR
types: [labeled, opened, synchronize, reopened]
concurrency:
# Skip intermediate builds: always.
# Cancel intermediate builds: only if it is a pull request build.
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }}
jobs:
Benchmark:
runs-on: ubuntu-latest
permissions:
pull-requests: write
actions: write # needed to allow julia-actions/cache to proactively delete old caches that it has created
contents: read
if: contains(github.event.pull_request.labels.*.name, 'benchmark')
steps:
- uses: actions/checkout@v4
- uses: julia-actions/setup-julia@latest
- name: Cache artifacts
uses: actions/cache@v4
env:
cache-name: cache-artifacts
with:
path: ~/.julia/artifacts
key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }}
restore-keys: |
${{ runner.os }}-test-${{ env.cache-name }}-
${{ runner.os }}-test-
${{ runner.os }}-
- name: Install dependencies
run: julia -e 'using Pkg; pkg"add JSON PkgBenchmark [email protected]"'
- name: Run benchmarks
run: julia benchmark/run_benchmarks.jl
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,6 @@ LocalPreferences.toml
*.cov
docs/Manifest.toml
lcov.info
benchmark/Manifest.toml
/.benchmarkci
/benchmark/*.json
12 changes: 12 additions & 0 deletions benchmark/Project.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
[deps]
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
DAQP = "c47d62df-3981-49c8-9651-128b1cd08617"
FiniteDiff = "6a86dc24-6348-571c-b903-95158fe2bd41"
Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9"
JuMP = "4076af6c-e467-56ae-b986-b466b2749572"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
MadNLP = "2621e9c9-9eb4-46b1-8089-e8c72242dfb6"
ModelPredictiveControl = "61f9bdb8-6ae4-484a-811f-bbf86720c31c"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
SparseConnectivityTracer = "9f842d2f-2579-4b1d-911e-f412cf18a3f5"
SparseMatrixColorings = "0a514795-09f3-496d-8182-132a7b665d35"
25 changes: 25 additions & 0 deletions benchmark/benchmarks.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
using BenchmarkTools
using Random

const SUITE = BenchmarkGroup()

SUITE["utf8"] = BenchmarkGroup(["string", "unicode"])
teststr = String(join(rand(MersenneTwister(1), 'a':'d', 10^4)))
SUITE["utf8"]["replace"] = @benchmarkable replace($teststr, "a" => "b")
SUITE["utf8"]["join"] = @benchmarkable join($teststr, $teststr)
SUITE["utf8"]["plots"] = BenchmarkGroup()

SUITE["trigonometry"] = BenchmarkGroup(["math", "triangles"])
SUITE["trigonometry"]["circular"] = BenchmarkGroup()
for f in (sin, cos, tan)
for x in (0.0, pi)
SUITE["trigonometry"]["circular"][string(f), x] = @benchmarkable ($f)($x)
end
end

SUITE["trigonometry"]["hyperbolic"] = BenchmarkGroup()
for f in (sin, cos, tan)
for x in (0.0, pi)
SUITE["trigonometry"]["hyperbolic"][string(f), x] = @benchmarkable ($f)($x)
end
end
10 changes: 10 additions & 0 deletions benchmark/run_benchmarks.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
# To run benchmarks locally, BenchmarkCI should be added to root project. Then call:
# ```bash
# julia benchmark/run_benchmarks.jl
# ```

using BenchmarkCI
on_CI = haskey(ENV, "GITHUB_ACTIONS")

BenchmarkCI.judge(; baseline = "origin/main")
on_CI ? BenchmarkCI.postjudge() : BenchmarkCI.displayjudgement()
Loading