1
+ using BenchmarkTools
2
+ using DynamicPPL
3
+ using Distributions
4
+ using DynamicPPLBenchmarks: time_model_def, make_suite
5
+ using PrettyTables
6
+ using Dates
7
+ using LibGit2
8
+
9
+ const RESULTS_DIR = " results"
10
+ const BENCHMARK_NAME = let
11
+ repo = try
12
+ LibGit2. GitRepo (joinpath (pkgdir (DynamicPPL), " .." ))
13
+ catch
14
+ nothing
15
+ end
16
+ isnothing (repo) ? " benchmarks_$(Dates. format (now (), " yyyy-mm-dd_HH-MM-SS" )) " :
17
+ " $(LibGit2. headname (repo)) _$(string (LibGit2. GitHash (LibGit2. peel (LibGit2. GitCommit, LibGit2. head (repo))))[1 : 6 ]) "
18
+ end
19
+
20
+ mkpath (joinpath (RESULTS_DIR, BENCHMARK_NAME))
21
+
22
+ @model function demo1 (x)
23
+ m ~ Normal ()
24
+ x ~ Normal (m, 1 )
25
+ return (m = m, x = x)
26
+ end
27
+
28
+ @model function demo2 (y)
29
+ p ~ Beta (1 , 1 )
30
+ N = length (y)
31
+ for n in 1 : N
32
+ y[n] ~ Bernoulli (p)
33
+ end
34
+ return (; p)
35
+ end
36
+
37
+ models = [
38
+ (name = " demo1" , model = demo1, data = (1.0 ,)),
39
+ (name = " demo2" , model = demo2, data = (rand (0 : 1 , 10 ),))
40
+ ]
41
+
42
+ results = []
43
+ for (model_name, model_def, data) in models
44
+ println (" >> Running benchmarks for model: $model_name " )
45
+ m = time_model_def (model_def, data... )
46
+ println ()
47
+ suite = make_suite (m)
48
+ bench_results = run (suite, seconds= 10 )
49
+
50
+ output_path = joinpath (RESULTS_DIR, BENCHMARK_NAME, " $(model_name) _benchmarks.json" )
51
+ BenchmarkTools. save (output_path, bench_results)
52
+
53
+ for (eval_type, trial) in bench_results
54
+ push! (results, (
55
+ Model = model_name,
56
+ Evaluation = eval_type,
57
+ Time = minimum (trial). time,
58
+ Memory = trial. memory,
59
+ Allocations = trial. allocs,
60
+ Samples = length (trial. times)
61
+ ))
62
+ end
63
+ end
64
+
65
+ formatted = map (results) do r
66
+ (Model = r. Model,
67
+ Evaluation = replace (r. Evaluation, " _" => " " ),
68
+ Time = BenchmarkTools. prettytime (r. Time),
69
+ Memory = BenchmarkTools. prettymemory (r. Memory),
70
+ Allocations = string (r. Allocations),
71
+ Samples = string (r. Samples))
72
+ end
73
+
74
+ md_output = """
75
+ ## DynamicPPL Benchmark Results ($BENCHMARK_NAME )
76
+
77
+ ### Execution Environment
78
+ - Julia version: $(VERSION )
79
+ - DynamicPPL version: $(pkgversion (DynamicPPL))
80
+ - Benchmark date: $(now ())
81
+
82
+ $(pretty_table (String, formatted,
83
+ tf = tf_markdown,
84
+ header = [" Model" , " Evaluation Type" , " Time" , " Memory" , " Allocs" , " Samples" ],
85
+ alignment = [:l , :l , :r , :r , :r , :r ]
86
+ ))
87
+ """
88
+
89
+ println (md_output)
90
+ open (joinpath (RESULTS_DIR, BENCHMARK_NAME, " REPORT.md" ), " w" ) do io
91
+ write (io, md_output)
92
+ end
93
+
94
+ println (" Benchmark results saved to: $RESULTS_DIR /$BENCHMARK_NAME " )
0 commit comments