Skip to content

Commit f7af12d

Browse files
authored
add some doc strings (#188)
1 parent a8e48df commit f7af12d

File tree

1 file changed

+89
-0
lines changed

1 file changed

+89
-0
lines changed

src/execution.jl

Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,9 +107,20 @@ function _run(b::Benchmark, p::Parameters; verbose = false, pad = "", kwargs...)
107107
return sort!(trial), return_val
108108
end
109109

110+
111+
"""
112+
run(b::Benchmark[, p::Parameters = b.params]; kwargs...)
113+
114+
Run the benchmark defined by [`@benchmarkable`](@ref).
115+
"""
110116
Base.run(b::Benchmark, p::Parameters = b.params; progressid=nothing, nleaves=NaN, ndone=NaN, kwargs...) =
111117
run_result(b, p; kwargs...)[1]
112118

119+
"""
120+
run(group::BenchmarkGroup[, args...]; verbose::Bool = false, pad = "", kwargs...)
121+
122+
Run the benchmark group, with benchmark parameters set to `group`'s by default.
123+
"""
113124
Base.run(group::BenchmarkGroup, args...; verbose::Bool = false, pad = "", kwargs...) =
114125
_withprogress("Benchmarking", group; kwargs...) do progressid, nleaves, ndone
115126
result = similar(group)
@@ -196,6 +207,15 @@ for i in 1:8 (EVALS[((i*1000)+1):((i+1)*1000)] .= 11 - i) end # linearl
196207

197208
guessevals(t) = t <= length(EVALS) ? EVALS[t] : 1
198209

210+
"""
211+
tune!(group::BenchmarkGroup; verbose::Bool = false, pad = "", kwargs...)
212+
213+
Tune a `BenchmarkGroup` instance. For most benchmarks, `tune!` needs to perform many
214+
evaluations to determine the proper parameters for any given benchmark - often more
215+
evaluations than are performed when running a trial. In fact, the majority of total
216+
benchmarking time is usually spent tuning parameters, rather than actually running
217+
trials.
218+
"""
199219
tune!(group::BenchmarkGroup; verbose::Bool = false, pad = "", kwargs...) =
200220
_withprogress("Tuning", group; kwargs...) do progressid, nleaves, ndone
201221
gcscrub() # run GC before running group, even if individual benchmarks don't manually GC
@@ -218,6 +238,11 @@ tune!(group::BenchmarkGroup; verbose::Bool = false, pad = "", kwargs...) =
218238
return group
219239
end
220240

241+
"""
242+
tune!(b::Benchmark, p::Parameters = b.params; verbose::Bool = false, pad = "", kwargs...)
243+
244+
Tune a `Benchmark` instance.
245+
"""
221246
function tune!(b::Benchmark, p::Parameters = b.params;
222247
progressid=nothing, nleaves=NaN, ndone=NaN, # ignored
223248
verbose::Bool = false, pad = "", kwargs...)
@@ -296,6 +321,64 @@ function quasiquote!(ex::Expr, vars::Vector{Expr})
296321
return ex
297322
end
298323

324+
raw"""
325+
@benchmark <expr to benchmark> [setup=<setup expr>]
326+
327+
Run benchmark on a given expression.
328+
329+
# Example
330+
331+
The simplest usage of this macro is to put it in front of what you want
332+
to benchmark.
333+
334+
```julia-repl
335+
julia> @benchmark sin(1)
336+
BenchmarkTools.Trial:
337+
memory estimate: 0 bytes
338+
allocs estimate: 0
339+
--------------
340+
minimum time: 13.610 ns (0.00% GC)
341+
median time: 13.622 ns (0.00% GC)
342+
mean time: 13.638 ns (0.00% GC)
343+
maximum time: 21.084 ns (0.00% GC)
344+
--------------
345+
samples: 10000
346+
evals/sample: 998
347+
```
348+
349+
You can interpolate values into `@benchmark` expressions:
350+
351+
```julia
352+
# rand(1000) is executed for each evaluation
353+
julia> @benchmark sum(rand(1000))
354+
BenchmarkTools.Trial:
355+
memory estimate: 7.94 KiB
356+
allocs estimate: 1
357+
--------------
358+
minimum time: 1.566 μs (0.00% GC)
359+
median time: 2.135 μs (0.00% GC)
360+
mean time: 3.071 μs (25.06% GC)
361+
maximum time: 296.818 μs (95.91% GC)
362+
--------------
363+
samples: 10000
364+
evals/sample: 10
365+
366+
# rand(1000) is evaluated at definition time, and the resulting
367+
# value is interpolated into the benchmark expression
368+
julia> @benchmark sum($(rand(1000)))
369+
BenchmarkTools.Trial:
370+
memory estimate: 0 bytes
371+
allocs estimate: 0
372+
--------------
373+
minimum time: 101.627 ns (0.00% GC)
374+
median time: 101.909 ns (0.00% GC)
375+
mean time: 103.834 ns (0.00% GC)
376+
maximum time: 276.033 ns (0.00% GC)
377+
--------------
378+
samples: 10000
379+
evals/sample: 935
380+
```
381+
"""
299382
macro benchmark(args...)
300383
_, params = prunekwargs(args...)
301384
tmp = gensym()
@@ -337,6 +420,12 @@ function benchmarkable_parts(args)
337420
return core, setup, teardown, params
338421
end
339422

423+
"""
424+
@benchmarkable <expr to benchmark> [setup=<setup expr>]
425+
426+
Create a `Benchmark` instance for the given expression. `@benchmarkable`
427+
has similar syntax with `@benchmark`. See also [`@benchmark`](@ref).
428+
"""
340429
macro benchmarkable(args...)
341430
core, setup, teardown, params = benchmarkable_parts(args)
342431
map!(esc, params, params)

0 commit comments

Comments
 (0)