Skip to content

Commit d0f4804

Browse files
authored
fix eval module scoping, escape issues, and deprecations for v0.7 (#77)
1 parent 935872a commit d0f4804

11 files changed

+130
-90
lines changed

REQUIRE

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
julia 0.6
2-
Compat 0.26
2+
Compat 0.33
33
JSON

src/BenchmarkTools.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ using Compat
66
using JSON
77
using Base.Iterators
88

9-
const BENCHMARKTOOLS_VERSION = v"0.0.6"
9+
const BENCHMARKTOOLS_VERSION = v"0.2.2"
1010

1111
##############
1212
# Parameters #

src/execution.jl

Lines changed: 95 additions & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -25,32 +25,53 @@ function Base.show(io::IO, b::Benchmark)
2525
print(io, str)
2626
end
2727

28+
######################
29+
# compatiblity hacks #
30+
######################
31+
32+
if VERSION >= v"0.7.0-DEV.1139"
33+
run_result(b::Benchmark, p::Parameters = b.params; kwargs...) = Base.invokelatest(_run, b, p; kwargs...)
34+
lineartrial(b::Benchmark, p::Parameters = b.params; kwargs...) = Base.invokelatest(_lineartrial, b, p; kwargs...)
35+
elseif VERSION < v"0.7.0-DEV.484"
36+
# `invokelatest` on v0.6 doesn't take keyword arguments, so we fall back
37+
# to the `current_module` approach if we're on a Julia version where that's
38+
# still possible.
39+
function run_result(b::Benchmark, p::Parameters = b.params; kwargs...)
40+
return eval(current_module(), :(BenchmarkTools._run($(b), $(p); $(kwargs...))))
41+
end
42+
function lineartrial(b::Benchmark, p::Parameters = b.params; kwargs...)
43+
return eval(@__MODULE__, :(BenchmarkTools._lineartrial($(b), $(p); $(kwargs...))))
44+
end
45+
else
46+
# There's a commit gap between `current_module` deprecation and `invokelatest` keyword
47+
# argument support. I could try to hack something together just for that gap, but it's
48+
# probably not worth it.
49+
error("Congratulations, you've found the obscure range of Julia v0.7-dev commits where BenchmarkTools is really, really broken. Sorry about that :(")
50+
end
51+
2852
#############
2953
# execution #
3054
#############
31-
# Note that trials executed via `run` and `lineartrial` are always executed at the top-level
32-
# scope of the module returned by `@__MODULE__`. This is to avoid any weird quirks
33-
# when calling the experiment from within other contexts.
3455

35-
sample(b::Benchmark, args...) = error("no execution method defined on type $(typeof(b))")
36-
_run(b::Benchmark, args...; kwargs...) = error("no execution method defined on type $(typeof(b))")
56+
# Note that trials executed via `run` and `lineartrial` are always executed at top-level
57+
# scope, in order to allow transfer of locally-scoped variables into benchmark scope.
3758

38-
# return (Trial, result) tuple, where result is the result of the benchmarked expression
39-
function run_result(b::Benchmark, p::Parameters = b.params; kwargs...)
40-
return eval(@__MODULE__, :(BenchmarkTools._run($(b), $(p); $(kwargs...))))
41-
end
59+
# these stubs get overloaded by @benchmarkable
60+
function sample end
61+
function _run end
4262

43-
Base.run(b::Benchmark, p::Parameters = b.params; kwargs...) =
44-
run_result(b, p; kwargs...)[1]
63+
Base.run(b::Benchmark, p::Parameters = b.params; kwargs...) = run_result(b, p; kwargs...)[1]
4564

4665
function Base.run(group::BenchmarkGroup, args...; verbose::Bool = false, pad = "", kwargs...)
4766
result = similar(group)
4867
gcscrub() # run GC before running group, even if individual benchmarks don't manually GC
4968
i = 1
5069
for id in keys(group)
51-
verbose && (println(pad, "($(i)/$(length(group))) benchmarking ", repr(id), "..."); tic())
52-
result[id] = run(group[id], args...; verbose = verbose, pad = pad*" ", kwargs...)
53-
verbose && (println(pad, "done (took ", toq(), " seconds)"); i += 1)
70+
verbose && println(pad, "($(i)/$(length(group))) benchmarking ", repr(id), "...")
71+
took_seconds = @elapsed begin
72+
result[id] = run(group[id], args...; verbose = verbose, pad = pad*" ", kwargs...)
73+
end
74+
verbose && (println(pad, "done (took ", took_seconds, " seconds)"); i += 1)
5475
end
5576
return result
5677
end
@@ -71,13 +92,13 @@ function _lineartrial(b::Benchmark, p::Parameters = b.params; maxevals = RESOLUT
7192
return estimates[1:completed]
7293
end
7394

74-
function lineartrial(b::Benchmark, p::Parameters = b.params; kwargs...)
75-
return eval(@__MODULE__, :(BenchmarkTools._lineartrial($(b), $(p); $(kwargs...))))
95+
function warmup(item; verbose::Bool = true)
96+
return run(item;
97+
verbose = verbose, samples = 1,
98+
evals = 1, gctrial = false,
99+
gcsample = false)
76100
end
77101

78-
warmup(item; verbose::Bool = true) = run(item; verbose = verbose, samples = 1, evals = 1,
79-
gctrial = false, gcsample = false)
80-
81102
####################
82103
# parameter tuning #
83104
####################
@@ -118,9 +139,9 @@ function tune!(group::BenchmarkGroup; verbose::Bool = false, pad = "", kwargs...
118139
gcscrub() # run GC before running group, even if individual benchmarks don't manually GC
119140
i = 1
120141
for id in keys(group)
121-
verbose && (println(pad, "($(i)/$(length(group))) tuning ", repr(id), "..."); tic())
122-
tune!(group[id]; verbose = verbose, pad = pad*" ", kwargs...)
123-
verbose && (println(pad, "done (took ", toq(), " seconds)"); i += 1)
142+
verbose && println(pad, "($(i)/$(length(group))) tuning ", repr(id), "...")
143+
took_seconds = @elapsed tune!(group[id]; verbose = verbose, pad = pad*" ", kwargs...)
144+
verbose && (println(pad, "done (took ", took_seconds, " seconds)"); i += 1)
124145
end
125146
return group
126147
end
@@ -203,12 +224,13 @@ end
203224

204225
macro benchmark(args...)
205226
_, params = prunekwargs(args...)
206-
quote
207-
tmp = @benchmarkable $(map(esc,args)...)
208-
warmup(tmp)
209-
$(hasevals(params) ? :() : :(tune!(tmp)))
210-
run(tmp)
211-
end
227+
tmp = gensym()
228+
return esc(quote
229+
$tmp = $BenchmarkTools.@benchmarkable $(args...)
230+
$BenchmarkTools.warmup($tmp)
231+
$(hasevals(params) ? :() : :($BenchmarkTools.tune!($tmp)))
232+
$BenchmarkTools.run($tmp)
233+
end)
212234
end
213235

214236
function benchmarkable_parts(args)
@@ -248,33 +270,33 @@ macro benchmarkable(args...)
248270
core_vars = isa(core, Expr) ? collectvars(core) : []
249271
out_vars = filter(var -> var in setup_vars, core_vars)
250272

273+
eval_module = VERSION >= v"0.7.0-DEV.484" ? __module__ : current_module()
274+
251275
# generate the benchmark definition
252276
return esc(quote
253-
BenchmarkTools.generate_benchmark_definition(@__MODULE__,
254-
$(Expr(:quote, out_vars)),
255-
$(Expr(:quote, setup_vars)),
256-
$(Expr(:quote, core)),
257-
$(Expr(:quote, setup)),
258-
$(Expr(:quote, teardown)),
259-
$Parameters($(params...)))
277+
$BenchmarkTools.generate_benchmark_definition($(eval_module),
278+
$(Expr(:quote, out_vars)),
279+
$(Expr(:quote, setup_vars)),
280+
$(Expr(:quote, core)),
281+
$(Expr(:quote, setup)),
282+
$(Expr(:quote, teardown)),
283+
$Parameters($(params...)))
260284
end)
261285
end
262286

263287
# `eval` an expression that forcibly defines the specified benchmark at
264-
# top-level of `eval_module`, hopefully ensuring that the "location" of the benchmark's
265-
# definition will not be a factor for the sake of performance testing.
288+
# top-level in order to allow transfer of locally-scoped variables into
289+
# benchmark scope.
266290
#
267291
# The double-underscore-prefixed variable names are not particularly hygienic - it's
268292
# possible for them to conflict with names used in the setup or teardown expressions.
269293
# A more robust solution would be preferable.
270-
function generate_benchmark_definition(eval_module, out_vars, setup_vars,
271-
core, setup, teardown, params)
294+
function generate_benchmark_definition(eval_module, out_vars, setup_vars, core, setup, teardown, params)
272295
id = Expr(:quote, gensym("benchmark"))
273296
corefunc = gensym("core")
274297
samplefunc = gensym("sample")
275298
signature = Expr(:call, corefunc, setup_vars...)
276299
if length(out_vars) == 0
277-
#returns = :(return $(Expr(:tuple, setup_vars...)))
278300
invocation = signature
279301
core_body = core
280302
elseif length(out_vars) == 1
@@ -286,9 +308,9 @@ function generate_benchmark_definition(eval_module, out_vars, setup_vars,
286308
invocation = :($(Expr(:tuple, out_vars...)) = $(signature))
287309
core_body = :($(core); $(returns))
288310
end
289-
eval(eval_module, quote
311+
return eval(eval_module, quote
290312
@noinline $(signature) = begin $(core_body) end
291-
@noinline function $(samplefunc)(__params::BenchmarkTools.Parameters)
313+
@noinline function $(samplefunc)(__params::$BenchmarkTools.Parameters)
292314
$(setup)
293315
__evals = __params.evals
294316
__gc_start = Base.gc_num()
@@ -308,31 +330,31 @@ function generate_benchmark_definition(eval_module, out_vars, setup_vars,
308330
__evals))
309331
return __time, __gctime, __memory, __allocs, __return_val
310332
end
311-
function BenchmarkTools.sample(b::BenchmarkTools.Benchmark{$(id)},
312-
p::BenchmarkTools.Parameters = b.params)
333+
function $BenchmarkTools.sample(b::$BenchmarkTools.Benchmark{$(id)},
334+
p::$BenchmarkTools.Parameters = b.params)
313335
return $(samplefunc)(p)
314336
end
315-
function BenchmarkTools._run(b::BenchmarkTools.Benchmark{$(id)},
316-
p::BenchmarkTools.Parameters;
317-
verbose = false, pad = "", kwargs...)
318-
params = BenchmarkTools.Parameters(p; kwargs...)
337+
function $BenchmarkTools._run(b::$BenchmarkTools.Benchmark{$(id)},
338+
p::$BenchmarkTools.Parameters;
339+
verbose = false, pad = "", kwargs...)
340+
params = $BenchmarkTools.Parameters(p; kwargs...)
319341
@assert params.seconds > 0.0 "time limit must be greater than 0.0"
320-
params.gctrial && BenchmarkTools.gcscrub()
342+
params.gctrial && $BenchmarkTools.gcscrub()
321343
start_time = time()
322-
trial = BenchmarkTools.Trial(params)
323-
params.gcsample && BenchmarkTools.gcscrub()
344+
trial = $BenchmarkTools.Trial(params)
345+
params.gcsample && $BenchmarkTools.gcscrub()
324346
s = $(samplefunc)(params)
325347
push!(trial, s[1:end-1]...)
326348
return_val = s[end]
327349
iters = 2
328350
while (time() - start_time) < params.seconds && iters params.samples
329-
params.gcsample && BenchmarkTools.gcscrub()
351+
params.gcsample && $BenchmarkTools.gcscrub()
330352
push!(trial, $(samplefunc)(params)[1:end-1]...)
331353
iters += 1
332354
end
333355
return sort!(trial), return_val
334356
end
335-
BenchmarkTools.Benchmark{$(id)}($(params))
357+
$BenchmarkTools.Benchmark{$(id)}($(params))
336358
end)
337359
end
338360

@@ -356,7 +378,9 @@ is the *minimum* elapsed time measured during the benchmark.
356378
"""
357379
macro belapsed(args...)
358380
b = Expr(:macrocall, Symbol("@benchmark"), map(esc, args)...)
359-
:(time(minimum($b))/1e9)
381+
return esc(quote
382+
$BenchmarkTools.time($BenchmarkTools.minimum($BenchmarkTools.@benchmark $(args...)))/1e9
383+
end)
360384
end
361385

362386
"""
@@ -374,16 +398,21 @@ is the *minimum* elapsed time measured during the benchmark.
374398
"""
375399
macro btime(args...)
376400
_, params = prunekwargs(args...)
377-
quote
378-
tmp = @benchmarkable $(map(esc,args)...)
379-
warmup(tmp)
380-
$(hasevals(params) ? :() : :(tune!(tmp)))
381-
b, val = run_result(tmp)
382-
bmin = minimum(b)
383-
a = allocs(bmin)
384-
println(" ", prettytime(time(bmin)),
385-
" ($a allocation", a == 1 ? "" : "s", ": ",
386-
prettymemory(memory(bmin)), ")")
387-
val
388-
end
401+
bench, trial, result = gensym(), gensym(), gensym()
402+
trialmin, trialallocs = gensym(), gensym()
403+
tune_phase = hasevals(params) ? :() : :($BenchmarkTools.tune!($bench))
404+
return esc(quote
405+
$bench = $BenchmarkTools.@benchmarkable $(args...)
406+
$BenchmarkTools.warmup($bench)
407+
$tune_phase
408+
$trial, $result = $BenchmarkTools.run_result($bench)
409+
$trialmin = $BenchmarkTools.minimum($trial)
410+
$trialallocs = $BenchmarkTools.allocs($trialmin)
411+
println(" ",
412+
$BenchmarkTools.prettytime($BenchmarkTools.time($trialmin)),
413+
" (", $trialallocs , " allocation",
414+
$trialallocs == 1 ? "" : "s", ": ",
415+
$BenchmarkTools.prettymemory($BenchmarkTools.memory($trialmin)), ")")
416+
$result
417+
end)
389418
end

src/groups.jl

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,12 @@ end
5656
mapvals!(f, group::BenchmarkGroup) = mapvals!(f, similar(group), group)
5757
mapvals(f, groups::BenchmarkGroup...) = mapvals!(f, similar(first(groups)), groups...)
5858

59-
filtervals!(f, group::BenchmarkGroup) = (filter!((k, v) -> f(v), group.data); return group)
59+
if VERSION >= v"0.7.0-DEV.1393"
60+
filtervals!(f, group::BenchmarkGroup) = (filter!(kv -> f(kv[2]), group.data); return group)
61+
else
62+
filtervals!(f, group::BenchmarkGroup) = (filter!((k, v) -> f(v), group.data); return group)
63+
end
64+
6065
filtervals(f, group::BenchmarkGroup) = filtervals!(f, copy(group))
6166

6267
Base.filter!(f, group::BenchmarkGroup) = (filter!(f, group.data); return group)

src/serialization.jl

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,7 @@
1+
if VERSION >= v"0.7.0-DEV.2437"
2+
using Base.Meta.parse
3+
end
4+
15
const VERSIONS = Dict("Julia" => string(VERSION),
26
"BenchmarkTools" => string(BENCHMARKTOOLS_VERSION))
37

test/ExecutionTests.jl

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,8 @@
11
module ExecutionTests
22

3-
using Base.Test
43
using BenchmarkTools
54
using Compat
6-
import Compat.String
5+
using Compat.Test
76

87
seteq(a, b) = length(a) == length(b) == length(intersect(a, b))
98

test/GroupsTests.jl

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
11
# module GroupsTests
22

3-
using Base.Test
43
using BenchmarkTools
54
using BenchmarkTools: TrialEstimate, Parameters
5+
using Compat
6+
using Compat.Test
67

78
seteq(a, b) = length(a) == length(b) == length(intersect(a, b))
89

test/ParametersTests.jl

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
module ParametersTests
22

3-
using Base.Test
3+
using Compat
4+
using Compat.Test
45
using BenchmarkTools
56
using BenchmarkTools: Parameters
67

test/SerializationTests.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
module SerializationTests
22

3+
using BenchmarkTools
34
using Compat
45
using Compat.Test
5-
using BenchmarkTools
66

77
eq(x::T, y::T) where {T<:Union{BenchmarkTools.SUPPORTED_TYPES...}} =
88
all(i->eq(getfield(x, i), getfield(y, i)), 1:fieldcount(T))

test/TrialsTests.jl

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
11
module TrialsTests
22

3-
using Base.Test
43
using BenchmarkTools
4+
using Compat
5+
using Compat.Test
56

67
#########
78
# Trial #

0 commit comments

Comments
 (0)