Skip to content

Commit a8541b5

Browse files
Julia script for benchmarking on top of current setup
1 parent 0460b64 commit a8541b5

13 files changed

+119
-121332
lines changed

benchmarks/Project.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
1111
DrWatson = "634d3b9d-ee7a-5ddf-bec9-22491ea816e1"
1212
DynamicPPL = "366bfd00-2699-11ea-058f-f148b4cae6d8"
1313
InteractiveUtils = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
14+
JSON = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
1415
LibGit2 = "76f85450-5226-5b5a-8eaa-529ad045b433"
1516
Markdown = "d6f4376e-aef5-505a-96c1-9c027394607a"
1617
Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"

benchmarks/benchmarks.jl

Lines changed: 94 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,94 @@
1+
using BenchmarkTools
2+
using DynamicPPL
3+
using Distributions
4+
using DynamicPPLBenchmarks: time_model_def, make_suite
5+
using PrettyTables
6+
using Dates
7+
using LibGit2
8+
9+
const RESULTS_DIR = "results"
10+
const BENCHMARK_NAME = let
11+
repo = try
12+
LibGit2.GitRepo(joinpath(pkgdir(DynamicPPL), ".."))
13+
catch
14+
nothing
15+
end
16+
isnothing(repo) ? "benchmarks_$(Dates.format(now(), "yyyy-mm-dd_HH-MM-SS"))" :
17+
"$(LibGit2.headname(repo))_$(string(LibGit2.GitHash(LibGit2.peel(LibGit2.GitCommit, LibGit2.head(repo))))[1:6])"
18+
end
19+
20+
mkpath(joinpath(RESULTS_DIR, BENCHMARK_NAME))
21+
22+
@model function demo1(x)
23+
m ~ Normal()
24+
x ~ Normal(m, 1)
25+
return (m = m, x = x)
26+
end
27+
28+
@model function demo2(y)
29+
p ~ Beta(1, 1)
30+
N = length(y)
31+
for n in 1:N
32+
y[n] ~ Bernoulli(p)
33+
end
34+
return (; p)
35+
end
36+
37+
models = [
38+
(name = "demo1", model = demo1, data = (1.0,)),
39+
(name = "demo2", model = demo2, data = (rand(0:1, 10),))
40+
]
41+
42+
results = []
43+
for (model_name, model_def, data) in models
44+
println(">> Running benchmarks for model: $model_name")
45+
m = time_model_def(model_def, data...)
46+
println()
47+
suite = make_suite(m)
48+
bench_results = run(suite, seconds=10)
49+
50+
output_path = joinpath(RESULTS_DIR, BENCHMARK_NAME, "$(model_name)_benchmarks.json")
51+
BenchmarkTools.save(output_path, bench_results)
52+
53+
for (eval_type, trial) in bench_results
54+
push!(results, (
55+
Model = model_name,
56+
Evaluation = eval_type,
57+
Time = minimum(trial).time,
58+
Memory = trial.memory,
59+
Allocations = trial.allocs,
60+
Samples = length(trial.times)
61+
))
62+
end
63+
end
64+
65+
formatted = map(results) do r
66+
(Model = r.Model,
67+
Evaluation = replace(r.Evaluation, "_" => " "),
68+
Time = BenchmarkTools.prettytime(r.Time),
69+
Memory = BenchmarkTools.prettymemory(r.Memory),
70+
Allocations = string(r.Allocations),
71+
Samples = string(r.Samples))
72+
end
73+
74+
md_output = """
75+
## DynamicPPL Benchmark Results ($BENCHMARK_NAME)
76+
77+
### Execution Environment
78+
- Julia version: $(VERSION)
79+
- DynamicPPL version: $(pkgversion(DynamicPPL))
80+
- Benchmark date: $(now())
81+
82+
$(pretty_table(String, formatted,
83+
tf = tf_markdown,
84+
header = ["Model", "Evaluation Type", "Time", "Memory", "Allocs", "Samples"],
85+
alignment = [:l, :l, :r, :r, :r, :r]
86+
))
87+
"""
88+
89+
println(md_output)
90+
open(joinpath(RESULTS_DIR, BENCHMARK_NAME, "REPORT.md"), "w") do io
91+
write(io, md_output)
92+
end
93+
94+
println("Benchmark results saved to: $RESULTS_DIR/$BENCHMARK_NAME")
Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
## DynamicPPL Benchmark Results (benchmarks_2025-02-02_04-21-46)
2+
3+
### Execution Environment
4+
- Julia version: 1.10.5
5+
- DynamicPPL version: 0.32.2
6+
- Benchmark date: 2025-02-02T04:22:00.341
7+
8+
| Model | Evaluation Type | Time | Memory | Allocs | Samples |
9+
|-------|-------------------------------------------|------------|-----------|--------|---------|
10+
| demo1 | evaluation typed | 194.000 ns | 160 bytes | 3 | 10000 |
11+
| demo1 | evaluation untyped | 1.027 μs | 1.52 KiB | 32 | 10000 |
12+
| demo1 | evaluation simple varinfo dict | 694.000 ns | 704 bytes | 26 | 10000 |
13+
| demo1 | evaluation simple varinfo nt | 43.000 ns | 0 bytes | 0 | 10000 |
14+
| demo1 | evaluation simple varinfo dict from nt | 48.000 ns | 0 bytes | 0 | 10000 |
15+
| demo1 | evaluation simple varinfo componentarrays | 44.000 ns | 0 bytes | 0 | 10000 |
16+
| demo2 | evaluation typed | 273.000 ns | 160 bytes | 3 | 10000 |
17+
| demo2 | evaluation untyped | 2.528 μs | 3.47 KiB | 67 | 10000 |
18+
| demo2 | evaluation simple varinfo dict | 2.189 μs | 1.42 KiB | 60 | 10000 |
19+
| demo2 | evaluation simple varinfo nt | 136.000 ns | 0 bytes | 0 | 10000 |
20+
| demo2 | evaluation simple varinfo dict from nt | 119.000 ns | 0 bytes | 0 | 10000 |
21+
| demo2 | evaluation simple varinfo componentarrays | 136.000 ns | 0 bytes | 0 | 10000 |
22+

benchmarks/results/benchmarks_2025-02-02_04-21-46/demo1_benchmarks.json

Lines changed: 1 addition & 0 deletions
Large diffs are not rendered by default.

benchmarks/results/benchmarks_2025-02-02_04-21-46/demo2_benchmarks.json

Lines changed: 1 addition & 0 deletions
Large diffs are not rendered by default.

0 commit comments

Comments
 (0)