@@ -107,9 +107,20 @@ function _run(b::Benchmark, p::Parameters; verbose = false, pad = "", kwargs...)
107
107
return sort! (trial), return_val
108
108
end
109
109
110
+
111
+ """
112
+ run(b::Benchmark[, p::Parameters = b.params]; kwargs...)
113
+
114
+ Run the benchmark defined by [`@benchmarkable`](@ref).
115
+ """
110
116
Base. run (b:: Benchmark , p:: Parameters = b. params; progressid= nothing , nleaves= NaN , ndone= NaN , kwargs... ) =
111
117
run_result (b, p; kwargs... )[1 ]
112
118
119
+ """
120
+ run(group::BenchmarkGroup[, args...]; verbose::Bool = false, pad = "", kwargs...)
121
+
122
+ Run the benchmark group, with benchmark parameters set to `group`'s by default.
123
+ """
113
124
Base. run (group:: BenchmarkGroup , args... ; verbose:: Bool = false , pad = " " , kwargs... ) =
114
125
_withprogress (" Benchmarking" , group; kwargs... ) do progressid, nleaves, ndone
115
126
result = similar (group)
@@ -196,6 +207,15 @@ for i in 1:8 (EVALS[((i*1000)+1):((i+1)*1000)] .= 11 - i) end # linearl
196
207
197
208
guessevals (t) = t <= length (EVALS) ? EVALS[t] : 1
198
209
210
+ """
211
+ tune!(group::BenchmarkGroup; verbose::Bool = false, pad = "", kwargs...)
212
+
213
+ Tune a `BenchmarkGroup` instance. For most benchmarks, `tune!` needs to perform many
214
+ evaluations to determine the proper parameters for any given benchmark - often more
215
+ evaluations than are performed when running a trial. In fact, the majority of total
216
+ benchmarking time is usually spent tuning parameters, rather than actually running
217
+ trials.
218
+ """
199
219
tune! (group:: BenchmarkGroup ; verbose:: Bool = false , pad = " " , kwargs... ) =
200
220
_withprogress (" Tuning" , group; kwargs... ) do progressid, nleaves, ndone
201
221
gcscrub () # run GC before running group, even if individual benchmarks don't manually GC
@@ -218,6 +238,11 @@ tune!(group::BenchmarkGroup; verbose::Bool = false, pad = "", kwargs...) =
218
238
return group
219
239
end
220
240
241
+ """
242
+ tune!(b::Benchmark, p::Parameters = b.params; verbose::Bool = false, pad = "", kwargs...)
243
+
244
+ Tune a `Benchmark` instance.
245
+ """
221
246
function tune! (b:: Benchmark , p:: Parameters = b. params;
222
247
progressid= nothing , nleaves= NaN , ndone= NaN , # ignored
223
248
verbose:: Bool = false , pad = " " , kwargs... )
@@ -296,6 +321,64 @@ function quasiquote!(ex::Expr, vars::Vector{Expr})
296
321
return ex
297
322
end
298
323
324
+ raw """
325
+ @benchmark <expr to benchmark> [setup=<setup expr>]
326
+
327
+ Run benchmark on a given expression.
328
+
329
+ # Example
330
+
331
+ The simplest usage of this macro is to put it in front of what you want
332
+ to benchmark.
333
+
334
+ ```julia-repl
335
+ julia> @benchmark sin(1)
336
+ BenchmarkTools.Trial:
337
+ memory estimate: 0 bytes
338
+ allocs estimate: 0
339
+ --------------
340
+ minimum time: 13.610 ns (0.00% GC)
341
+ median time: 13.622 ns (0.00% GC)
342
+ mean time: 13.638 ns (0.00% GC)
343
+ maximum time: 21.084 ns (0.00% GC)
344
+ --------------
345
+ samples: 10000
346
+ evals/sample: 998
347
+ ```
348
+
349
+ You can interpolate values into `@benchmark` expressions:
350
+
351
+ ```julia
352
+ # rand(1000) is executed for each evaluation
353
+ julia> @benchmark sum(rand(1000))
354
+ BenchmarkTools.Trial:
355
+ memory estimate: 7.94 KiB
356
+ allocs estimate: 1
357
+ --------------
358
+ minimum time: 1.566 μs (0.00% GC)
359
+ median time: 2.135 μs (0.00% GC)
360
+ mean time: 3.071 μs (25.06% GC)
361
+ maximum time: 296.818 μs (95.91% GC)
362
+ --------------
363
+ samples: 10000
364
+ evals/sample: 10
365
+
366
+ # rand(1000) is evaluated at definition time, and the resulting
367
+ # value is interpolated into the benchmark expression
368
+ julia> @benchmark sum($(rand(1000)))
369
+ BenchmarkTools.Trial:
370
+ memory estimate: 0 bytes
371
+ allocs estimate: 0
372
+ --------------
373
+ minimum time: 101.627 ns (0.00% GC)
374
+ median time: 101.909 ns (0.00% GC)
375
+ mean time: 103.834 ns (0.00% GC)
376
+ maximum time: 276.033 ns (0.00% GC)
377
+ --------------
378
+ samples: 10000
379
+ evals/sample: 935
380
+ ```
381
+ """
299
382
macro benchmark (args... )
300
383
_, params = prunekwargs (args... )
301
384
tmp = gensym ()
@@ -337,6 +420,12 @@ function benchmarkable_parts(args)
337
420
return core, setup, teardown, params
338
421
end
339
422
423
+ """
424
+ @benchmarkable <expr to benchmark> [setup=<setup expr>]
425
+
426
+ Create a `Benchmark` instance for the given expression. `@benchmarkable`
427
+ has similar syntax with `@benchmark`. See also [`@benchmark`](@ref).
428
+ """
340
429
macro benchmarkable (args... )
341
430
core, setup, teardown, params = benchmarkable_parts (args)
342
431
map! (esc, params, params)
0 commit comments