Skip to content

Commit 71b3b43

Browse files
committed
Add CI
1 parent 4cf8253 commit 71b3b43

File tree

4 files changed

+197
-6
lines changed

4 files changed

+197
-6
lines changed

.buildkite/pipeline.yml

Lines changed: 128 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,12 @@ steps:
1919
queue: "juliaecosystem"
2020
os: "macos"
2121
arch: "aarch64"
22-
if: build.message !~ /\[skip tests\]/
22+
if: |
23+
build.message =~ /\[only tests\]/ ||
24+
build.message =~ /\[only julia\]/ ||
25+
build.message !~ /\[only/ && !build.pull_request.draft &&
26+
build.message !~ /\[skip tests\]/ &&
27+
build.message !~ /\[skip julia\]/
2328
timeout_in_minutes: 60
2429
matrix:
2530
setup:
@@ -46,7 +51,12 @@ steps:
4651
queue: "juliaecosystem"
4752
os: "macos"
4853
arch: "aarch64"
49-
if: build.message !~ /\[skip tests\]/ && !build.pull_request.draft
54+
if: |
55+
build.message =~ /\[only tests\]/ ||
56+
build.message =~ /\[only special\]/ ||
57+
build.message !~ /\[only/ && !build.pull_request.draft &&
58+
build.message !~ /\[skip tests\]/ &&
59+
build.message !~ /\[skip special\]/
5060
timeout_in_minutes: 60
5161
matrix:
5262
setup:
@@ -75,7 +85,12 @@ steps:
7585
queue: "juliaecosystem"
7686
os: "macos"
7787
arch: "aarch64"
78-
if: build.message !~ /\[skip tests\]/ && !build.pull_request.draft
88+
if: |
89+
build.message =~ /\[only tests\]/ ||
90+
build.message =~ /\[only special\]/ ||
91+
build.message !~ /\[only/ && !build.pull_request.draft &&
92+
build.message !~ /\[skip tests\]/ &&
93+
build.message !~ /\[skip special\]/
7994
timeout_in_minutes: 60
8095
- label: "Opaque pointers"
8196
plugins:
@@ -95,5 +110,114 @@ steps:
95110
queue: "juliaecosystem"
96111
os: "macos"
97112
arch: "aarch64"
98-
if: build.message !~ /\[skip tests\]/ && !build.pull_request.draft
113+
if: |
114+
build.message =~ /\[only tests\]/ ||
115+
build.message =~ /\[only special\]/ ||
116+
build.message !~ /\[only/ && !build.pull_request.draft &&
117+
build.message !~ /\[skip tests\]/ &&
118+
build.message !~ /\[skip special\]/
99119
timeout_in_minutes: 60
120+
121+
# we want to benchmark every commit on the master branch, even if it failed CI
122+
- wait: ~
123+
# continue_on_failure: true
124+
125+
- group: ":racehorse: Benchmarks"
126+
steps:
127+
- label: "Benchmarks"
128+
plugins:
129+
- JuliaCI/julia#v1:
130+
version: "1.10"
131+
command: |
132+
julia --project=perf -e '
133+
using Pkg
134+
135+
println("--- :julia: Instantiating project")
136+
Pkg.develop([PackageSpec(path=pwd())])
137+
Pkg.instantiate()
138+
push!(LOAD_PATH, @__DIR__)
139+
140+
println("+++ :julia: Benchmarking")
141+
include("perf/runbenchmarks.jl")'
142+
artifact_paths:
143+
- "benchmarkresults.json"
144+
agents:
145+
queue: "juliaecosystem"
146+
os: "macos"
147+
arch: "aarch64"
148+
if: |
149+
build.message =~ /\[only benchmarks\]/ ||
150+
build.message !~ /\[only/ && !build.pull_request.draft &&
151+
build.message !~ /\[skip benchmarks\]/
152+
timeout_in_minutes: 30
153+
# - label: "Benchmarks (dry run)"
154+
# plugins:
155+
# - JuliaCI/julia#v1:
156+
# version: "1.10"
157+
# command: |
158+
# julia --project -e '
159+
# using Pkg
160+
161+
# println("--- :julia: Instantiating project")
162+
# Pkg.resolve()
163+
# Pkg.instantiate()
164+
# Pkg.activate("perf")
165+
# Pkg.resolve()
166+
# Pkg.instantiate()
167+
# push!(LOAD_PATH, @__DIR__)
168+
169+
# println("+++ :julia: Benchmarking")
170+
# include("perf/runbenchmarks.jl")'
171+
# artifact_paths:
172+
# - "results.json"
173+
# agents:
174+
# queue: "juliaecosystem"
175+
# os: "macos"
176+
# arch: "aarch64"
177+
# if: |
178+
# build.message =~ /\[only benchmarks\]/ ||
179+
# build.message !~ /\[only/ && !build.pull_request.draft &&
180+
# build.message !~ /\[skip benchmarks\]/
181+
# timeout_in_minutes: 30
182+
183+
# if we will submit results, use the benchmark queue so that we will
184+
# be running on the same system each time
185+
# - label: "Benchmarks on Julia {{matrix.julia}}"
186+
# plugins:
187+
# - JuliaCI/julia#v1:
188+
# version: "{{matrix.julia}}"
189+
# env:
190+
# BENCHMARKS: "true"
191+
# CODESPEED_PROJECT: "$BUILDKITE_PIPELINE_NAME"
192+
# CODESPEED_BRANCH: "$BUILDKITE_BRANCH"
193+
# CODESPEED_COMMIT: "$BUILDKITE_COMMIT"
194+
# CODESPEED_EXECUTABLE: "Julia {{matrix.julia}}"
195+
# command: |
196+
# julia --project -e '
197+
# using Pkg
198+
# ENV["CODESPEED_ENVIRONMENT"] = ENV["BUILDKITE_AGENT_NAME"]
199+
200+
# println("--- :julia: Instantiating project")
201+
# Pkg.resolve()
202+
# Pkg.instantiate()
203+
# Pkg.activate("perf")
204+
# Pkg.resolve()
205+
# Pkg.instantiate()
206+
# push!(LOAD_PATH, @__DIR__)
207+
208+
# println("+++ :julia: Benchmarking")
209+
# include("perf/runbenchmarks.jl")'
210+
# agents:
211+
# queue: "benchmark"
212+
# gpu: "rtx2070"
213+
# cuda: "*"
214+
# if: |
215+
# build.branch =~ /^master$$/ && build.message =~ /\[only benchmarks\]/ ||
216+
# build.branch =~ /^master$$/ && build.message !~ /\[only/ &&
217+
# build.message !~ /\[skip benchmarks\]/
218+
# matrix:
219+
# setup:
220+
# julia:
221+
# - "1.10"
222+
# - "1.11"
223+
# timeout_in_minutes: 30

.github/workflows/Benchmark.yml

Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
name: Benchmarks
2+
permissions:
3+
contents: write # contents permission to update benchmark contents in gh-pages branch
4+
statuses: read
5+
deployments: write # deployments permission to deploy GitHub pages website
6+
pull-requests: write
7+
8+
on:
9+
pull_request:
10+
branches:
11+
- main
12+
paths:
13+
- "src/**/*"
14+
- "ext/**/*"
15+
- "perf/**/*"
16+
- ".buildkite/**/*"
17+
- "Project.toml"
18+
- ".github/workflows/Benchmark.yml"
19+
push:
20+
branches:
21+
- main
22+
paths:
23+
- "src/**/*"
24+
- "ext/**/*"
25+
- "benchmarks/**/*"
26+
- ".buildkite/**/*"
27+
- "Project.toml"
28+
- ".github/workflows/Benchmark.yml"
29+
30+
jobs:
31+
benchmark:
32+
if: ${{ !contains(github.event.head_commit.message, '[skip benchmarks]') }}
33+
runs-on: ubuntu-latest
34+
steps:
35+
- uses: actions/checkout@v4
36+
- name: Download Buildkite Artifacts
37+
id: download
38+
uses: EnricoMi/download-buildkite-artifact-action@v1
39+
with:
40+
buildkite_token: ${{ secrets.BUILDKITE_TOKEN }}
41+
ignore_build_states: blocked,canceled,skipped,not_run,failed
42+
ignore_job_states: timed_out,failed
43+
output_path: artifacts
44+
45+
- name: Locate Benchmarks Artifact
46+
id: locate
47+
if: ${{ steps.download.outputs.download-state == 'success' }}
48+
run: echo "path=$(find artifacts -type f -name benchmarkresults.json 2>/dev/null)" >> $GITHUB_OUTPUT
49+
50+
- name: Upload Benchmark Results
51+
if: ${{ steps.locate.outputs.path != '' }}
52+
uses: benchmark-action/github-action-benchmark@v1
53+
with:
54+
name: Metal Benchmarks
55+
tool: "julia"
56+
output-file-path: ${{ steps.locate.outputs.path }}
57+
benchmark-data-dir-path: ""
58+
github-token: ${{ secrets.GITHUB_TOKEN }}
59+
comment-always: true
60+
summary-always: true
61+
alert-threshold: "150%"
62+
fail-on-alert: false
63+
auto-push: ${{ github.event_name != 'pull_request' }}

perf/Project.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,5 +2,6 @@
22
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
33
HTTP = "cd3eb016-35fb-5094-929b-558a96fad6f3"
44
JSON = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
5+
Metal = "dde4c033-4e86-420c-a63e-0dd931031962"
56
StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3"
67
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"

perf/runbenchmarks.jl

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,9 @@ if real_run
1414
BenchmarkTools.DEFAULT_PARAMETERS.evals = 0
1515
end
1616

17+
# print system information
18+
@info "System information:\n" * sprint(io->Metal.versioninfo(io))
19+
1720
# convenience macro to create a benchmark that requires synchronizing the GPU
1821
macro async_benchmarkable(ex...)
1922
quote
@@ -66,7 +69,7 @@ println(results)
6669
## comparison
6770

6871
# write out the results
69-
BenchmarkTools.save(joinpath(@__DIR__, "results.json"), results)
72+
BenchmarkTools.save("benchmarkresults.json", results)
7073

7174
# compare against previous results
7275
# TODO: store these results so that we can compare when benchmarking PRs
@@ -85,7 +88,7 @@ end
8588

8689
## submission
8790

88-
using JSON, HTTP
91+
# using JSON, HTTP
8992

9093
if real_run
9194
@info "Submitting to Codespeed..."

0 commit comments

Comments
 (0)