Skip to content

Commit fa10aa0

Browse files
authored
Merge pull request #105 from EnzymeAD/ap/comptime_nn
perf: test compile times for neural networks -- VGG16
2 parents 46b946a + de6264e commit fa10aa0

File tree

2 files changed

+82
-64
lines changed

2 files changed

+82
-64
lines changed

.github/workflows/benchmark_pr.yml

Lines changed: 66 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -7,70 +7,72 @@ permissions:
77
pull-requests: write
88

99
jobs:
10-
generate_plots:
11-
runs-on: ubuntu-latest
10+
generate_plots:
11+
runs-on: ubuntu-latest
1212

13-
steps:
14-
- uses: actions/checkout@v4
15-
- uses: julia-actions/setup-julia@v2
16-
with:
17-
version: "1"
18-
- uses: julia-actions/cache@v2
19-
- name: Extract Package Name from Project.toml
20-
id: extract-package-name
21-
run: |
22-
PACKAGE_NAME=$(grep "^name" Project.toml | sed 's/^name = "\(.*\)"$/\1/')
23-
echo "::set-output name=package_name::$PACKAGE_NAME"
24-
- name: Build AirspeedVelocity
25-
env:
26-
JULIA_NUM_THREADS: 2
27-
run: |
28-
# Lightweight build step, as sometimes the runner runs out of memory:
29-
julia -e 'ENV["JULIA_PKG_PRECOMPILE_AUTO"]=0; import Pkg; Pkg.add("AirspeedVelocity")'
30-
julia -e 'ENV["JULIA_PKG_PRECOMPILE_AUTO"]=0; import Pkg; Pkg.build("AirspeedVelocity")'
31-
- name: Add ~/.julia/bin to PATH
32-
run: |
33-
echo "$HOME/.julia/bin" >> $GITHUB_PATH
34-
- name: Run benchmarks
35-
run: |
36-
echo $PATH
37-
ls -l ~/.julia/bin
38-
mkdir results
39-
benchpkg ${{ steps.extract-package-name.outputs.package_name }} --rev="${{github.event.repository.default_branch}},${{github.event.pull_request.head.sha}}" --url=${{ github.event.repository.clone_url }} --bench-on="${{github.event.repository.default_branch}}" --output-dir=results/ --tune --add Enzyme
40-
- name: Create plots from benchmarks
41-
run: |
42-
mkdir -p plots
43-
benchpkgplot ${{ steps.extract-package-name.outputs.package_name }} --rev="${{github.event.repository.default_branch}},${{github.event.pull_request.head.sha}}" --npart=10 --format=png --input-dir=results/ --output-dir=plots/
44-
- name: Upload plot as artifact
45-
uses: actions/upload-artifact@v4
46-
with:
47-
name: plots
48-
path: plots
49-
- name: Create markdown table from benchmarks
50-
run: |
51-
benchpkgtable ${{ steps.extract-package-name.outputs.package_name }} --rev="${{github.event.repository.default_branch}},${{github.event.pull_request.head.sha}}" --input-dir=results/ --ratio > table.md
52-
echo '### Benchmark Results' > body.md
53-
echo '' >> body.md
54-
echo '' >> body.md
55-
cat table.md >> body.md
56-
echo '' >> body.md
57-
echo '' >> body.md
58-
echo '### Benchmark Plots' >> body.md
59-
echo 'A plot of the benchmark results have been uploaded as an artifact to the workflow run for this PR.' >> body.md
60-
echo 'Go to "Actions"->"Benchmark a pull request"->[the most recent run]->"Artifacts" (at the bottom).' >> body.md
13+
steps:
14+
- uses: actions/checkout@v4
15+
- uses: julia-actions/setup-julia@v2
16+
with:
17+
version: "1"
18+
- uses: julia-actions/cache@v2
19+
- name: Extract Package Name from Project.toml
20+
id: extract-package-name
21+
run: |
22+
PACKAGE_NAME=$(grep "^name" Project.toml | sed 's/^name = "\(.*\)"$/\1/')
23+
echo "::set-output name=package_name::$PACKAGE_NAME"
24+
- name: Build AirspeedVelocity
25+
env:
26+
JULIA_NUM_THREADS: 2
27+
run: |
28+
# Lightweight build step, as sometimes the runner runs out of memory:
29+
julia -e 'ENV["JULIA_PKG_PRECOMPILE_AUTO"]=0; import Pkg; Pkg.add("AirspeedVelocity")'
30+
julia -e 'ENV["JULIA_PKG_PRECOMPILE_AUTO"]=0; import Pkg; Pkg.build("AirspeedVelocity")'
31+
- name: Add ~/.julia/bin to PATH
32+
run: |
33+
echo "$HOME/.julia/bin" >> $GITHUB_PATH
34+
- name: Run benchmarks
35+
run: |
36+
echo $PATH
37+
ls -l ~/.julia/bin
38+
mkdir results
39+
benchpkg ${{ steps.extract-package-name.outputs.package_name }} --rev="${{github.event.repository.default_branch}},${{github.event.pull_request.head.sha}}" --url=${{ github.event.repository.clone_url }} --bench-on="${{github.event.repository.default_branch}}" --output-dir=results/ -s="benchmark/benchmarks.jl" --tune --add="Enzyme,Lux,Boltz,Random"
40+
env:
41+
JULIA_PKG_SERVER: ""
42+
- name: Create plots from benchmarks
43+
run: |
44+
mkdir -p plots
45+
benchpkgplot ${{ steps.extract-package-name.outputs.package_name }} --rev="${{github.event.repository.default_branch}},${{github.event.pull_request.head.sha}}" --npart=10 --format=png --input-dir=results/ --output-dir=plots/
46+
- name: Upload plot as artifact
47+
uses: actions/upload-artifact@v4
48+
with:
49+
name: plots
50+
path: plots
51+
- name: Create markdown table from benchmarks
52+
run: |
53+
benchpkgtable ${{ steps.extract-package-name.outputs.package_name }} --rev="${{github.event.repository.default_branch}},${{github.event.pull_request.head.sha}}" --input-dir=results/ --ratio > table.md
54+
echo '### Benchmark Results' > body.md
55+
echo '' >> body.md
56+
echo '' >> body.md
57+
cat table.md >> body.md
58+
echo '' >> body.md
59+
echo '' >> body.md
60+
echo '### Benchmark Plots' >> body.md
61+
echo 'A plot of the benchmark results have been uploaded as an artifact to the workflow run for this PR.' >> body.md
62+
echo 'Go to "Actions"->"Benchmark a pull request"->[the most recent run]->"Artifacts" (at the bottom).' >> body.md
6163
62-
- name: Find Comment
63-
uses: peter-evans/find-comment@v3
64-
id: fcbenchmark
65-
with:
66-
issue-number: ${{ github.event.pull_request.number }}
67-
comment-author: 'github-actions[bot]'
68-
body-includes: Benchmark Results
64+
- name: Find Comment
65+
uses: peter-evans/find-comment@v3
66+
id: fcbenchmark
67+
with:
68+
issue-number: ${{ github.event.pull_request.number }}
69+
comment-author: "github-actions[bot]"
70+
body-includes: Benchmark Results
6971

70-
- name: Comment on PR
71-
uses: peter-evans/create-or-update-comment@v4
72-
with:
73-
comment-id: ${{ steps.fcbenchmark.outputs.comment-id }}
74-
issue-number: ${{ github.event.pull_request.number }}
75-
body-path: body.md
76-
edit-mode: replace
72+
- name: Comment on PR
73+
uses: peter-evans/create-or-update-comment@v4
74+
with:
75+
comment-id: ${{ steps.fcbenchmark.outputs.comment-id }}
76+
issue-number: ${{ github.event.pull_request.number }}
77+
body-path: body.md
78+
edit-mode: replace

benchmark/benchmarks.jl

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,10 +9,12 @@
99
using BenchmarkTools
1010
using Reactant
1111
using Enzyme
12+
using Boltz, Lux, Random
1213

1314
const SUITE = BenchmarkGroup()
1415

1516
SUITE["comptime"] = BenchmarkGroup()
17+
1618
SUITE["comptime"]["basics"] = BenchmarkGroup()
1719
SUITE["comptime"]["basics"]["2D sum"] = @benchmarkable Reactant.compile(sum, (a,)) setup = (
1820
a = Reactant.ConcreteRArray(ones(2, 10))
@@ -21,6 +23,20 @@ SUITE["comptime"]["basics"]["Basic cos"] = @benchmarkable Reactant.compile(cos,
2123
a = Reactant.ConcreteRArray(ones(2, 10))
2224
)
2325

26+
SUITE["comptime"]["lux neural networks"] = BenchmarkGroup()
27+
28+
for depth in [11, 13, 16, 19]
29+
SUITE["comptime"]["lux neural networks"]["vgg$depth"] = @benchmarkable begin
30+
Reactant.compile(vgg, (x, ps_concrete, st_concrete))
31+
end setup = begin
32+
vgg = Vision.VGG($depth; pretrained=false, batchnorm=false)
33+
ps, st = Lux.setup(Random.default_rng(), vgg)
34+
ps_concrete = ps |> Reactant.to_rarray
35+
st_concrete = st |> Lux.testmode |> Reactant.to_rarray
36+
x = rand(Float32, 224, 224, 3, 16) |> Reactant.to_rarray
37+
end
38+
end
39+
2440
function sumcos(x)
2541
return sum(cos.(x))
2642
end

0 commit comments

Comments
 (0)