1
1
using DynamicPPLBenchmarks: Models, make_suite
2
- using BenchmarkTools: median, run
3
- using PrettyTables: PrettyTables
2
+ using BenchmarkTools: @benchmark , median, run
3
+ using PrettyTables: PrettyTables, ft_printf
4
4
using Random: seed!
5
5
6
6
seed! (23 )
7
7
8
+ # Create DynamicPPL.Model instances to run benchmarks on.
8
9
smorgasbord_instance = Models. smorgasbord (randn (100 ), randn (100 ))
9
10
loop_univariate1k, multivariate1k = begin
10
11
data_1k = randn (1_000 )
@@ -33,6 +34,8 @@ chosen_combinations = [
33
34
(" Smorgasbord" , smorgasbord_instance, :untyped , :forwarddiff ),
34
35
(" Smorgasbord" , smorgasbord_instance, :simple_dict , :forwarddiff ),
35
36
(" Smorgasbord" , smorgasbord_instance, :typed , :reversediff ),
37
+ # TODO (mhauru) Add Mooncake once TuringBenchmarking.jl supports it. Consider changing
38
+ # all the below :reversediffs to :mooncakes too.
36
39
# ("Smorgasbord", smorgasbord_instance, :typed, :mooncake),
37
40
(" Loop univariate 1k" , loop_univariate1k, :typed , :reversediff ),
38
41
(" Multivariate 1k" , multivariate1k, :typed , :reversediff ),
@@ -43,30 +46,54 @@ chosen_combinations = [
43
46
(" LDA" , lda_instance, :typed , :reversediff ),
44
47
]
45
48
49
+ # Time running a model-like function that does not use DynamicPPL, as a reference point.
50
+ # Eval timings will be relative to this.
51
+ reference_time = begin
52
+ obs = randn ()
53
+ median (@benchmark Models. simple_assume_observe_non_model (obs)). time
54
+ end
55
+
46
56
results_table = Tuple{String,String,String,Float64,Float64}[]
47
57
48
58
for (model_name, model, varinfo_choice, adbackend) in chosen_combinations
49
59
suite = make_suite (model, varinfo_choice, adbackend)
50
60
results = run (suite)
51
61
52
62
eval_time = median (results[" evaluation" ][" standard" ]). time
63
+ relative_eval_time = eval_time / reference_time
53
64
54
65
grad_group = results[" gradient" ]
55
66
if isempty (grad_group)
56
- ad_eval_time = NaN
67
+ relative_ad_eval_time = NaN
57
68
else
58
69
grad_backend_key = first (keys (grad_group))
59
70
ad_eval_time = median (grad_group[grad_backend_key][" standard" ]). time
71
+ relative_ad_eval_time = ad_eval_time / eval_time
60
72
end
61
73
62
74
push! (
63
75
results_table,
64
- (model_name, string (adbackend), string (varinfo_choice), eval_time, ad_eval_time),
76
+ (
77
+ model_name,
78
+ string (adbackend),
79
+ string (varinfo_choice),
80
+ relative_eval_time,
81
+ relative_ad_eval_time,
82
+ ),
65
83
)
66
84
end
67
85
68
86
table_matrix = hcat (Iterators. map (collect, zip (results_table... ))... )
69
87
header = [
70
- " Model" , " AD Backend" , " VarInfo Type" , " Evaluation Time (ns)" , " AD Eval Time (ns)"
88
+ " Model" ,
89
+ " AD Backend" ,
90
+ " VarInfo Type" ,
91
+ " Evaluation Time / Reference Time" ,
92
+ " AD Time / Eval Time" ,
71
93
]
72
- PrettyTables. pretty_table (table_matrix; header= header, tf= PrettyTables. tf_markdown)
94
+ PrettyTables. pretty_table (
95
+ table_matrix;
96
+ header= header,
97
+ tf= PrettyTables. tf_markdown,
98
+ formatters= ft_printf (" %.1f" , [4 , 5 ]),
99
+ )
0 commit comments