Skip to content

Commit 89bc9df

Browse files
authored
Merge pull request #83890 from slavapestov/async-benchmark
Add support for async benchmarks
2 parents a309fc0 + d786430 commit 89bc9df

File tree

6 files changed

+56
-32
lines changed

6 files changed

+56
-32
lines changed

benchmark/cmake/modules/AddSwiftBenchmarkSuite.cmake

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -363,6 +363,10 @@ function (swift_benchmark_compile_archopts)
363363
list(APPEND common_options "-g")
364364
endif()
365365

366+
if("${optflag}" STREQUAL "Onone")
367+
list(APPEND common_options "-DDEBUG")
368+
endif()
369+
366370
if (is_darwin)
367371
list(APPEND common_options
368372
"-I" "${srcdir}/utils/ObjectiveCTests"
@@ -400,6 +404,10 @@ function (swift_benchmark_compile_archopts)
400404
"-target" "${target}"
401405
"-${driver_opt}")
402406

407+
if(${optflag} STREQUAL "Onone")
408+
list(APPEND common_options_driver "-DDEBUG")
409+
endif()
410+
403411
if(SWIFT_BENCHMARK_GENERATE_DEBUG_INFO)
404412
list(APPEND common_options_driver "-g")
405413
endif()
Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,14 @@
11
import TestsUtils
2-
import Dispatch
32

43
public let benchmarks = [
54
BenchmarkInfo(
65
name: "Monoids",
76
runFunction: run_Monoids,
8-
tags: [.algorithm])
7+
tags: [.algorithm, .miniapplication, .long])
98
]
109

11-
func run_Monoids(_ n: Int) {
12-
let semaphore = DispatchSemaphore(value: 0)
10+
func run_Monoids(_ n: Int) async {
1311
for _ in 0 ... n {
14-
Task {
15-
await run(output: false)
16-
semaphore.signal()
17-
}
18-
semaphore.wait()
12+
await run(output: false)
1913
}
2014
}

benchmark/scripts/build_script_helper.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,15 @@ def perform_build(args, swiftbuild_path, config, binary_name, opt_flag):
2525
"-Xswiftc",
2626
"-align-module-to-page-size",
2727
"-Xswiftc",
28-
opt_flag,
28+
opt_flag
2929
]
30+
31+
if config == "debug":
32+
swiftbuild_args += [
33+
"-Xswiftc",
34+
"-DDEBUG"
35+
]
36+
3037
if args.verbose:
3138
swiftbuild_args.append("--verbose")
3239
subprocess.call(swiftbuild_args)

benchmark/utils/DriverUtils.swift

Lines changed: 30 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,12 @@ import LibProc
2323

2424
import TestsUtils
2525

26+
/// Sorry.
27+
private func ??<T>(_ x: T?, _ y: @autoclosure () async -> T) async -> T {
28+
if let x { return x }
29+
return await y()
30+
}
31+
2632
struct MeasurementMetadata {
2733
// Note: maxRSS and pages subtract the RSS measured
2834
// after the benchmark driver setup has finished.
@@ -198,10 +204,16 @@ struct TestConfig {
198204
action = c.action ?? .run
199205
allowNondeterministicHashing = c.allowNondeterministicHashing ?? false
200206
jsonOutput = c.jsonOutput ?? false
207+
208+
var skipTags: Set<BenchmarkCategory>
209+
skipTags = c.tags ?? [.unstable, .skip]
210+
#if DEBUG
211+
skipTags.insert(.long)
212+
#endif
201213
tests = TestConfig.filterTests(registeredBenchmarks,
202214
tests: c.tests ?? [],
203215
tags: c.tags ?? [],
204-
skipTags: c.skipTags ?? [.unstable, .skip])
216+
skipTags: skipTags)
205217

206218
if tests.count > 0 {
207219
testNameLength = tests.map{$0.info.name.count}.sorted().reversed().first!
@@ -481,13 +493,13 @@ final class TestRunner {
481493
}
482494

483495
/// Measure the `fn` and return the average sample time per iteration (μs).
484-
func measure(_ name: String, fn: (Int) -> Void, numIters: Int) -> Double {
496+
func measure(_ name: String, fn: (Int) async -> Void, numIters: Int) async -> Double {
485497
#if SWIFT_RUNTIME_ENABLE_LEAK_CHECKER
486498
name.withCString { p in startTrackingObjects(p) }
487499
#endif
488500

489501
startMeasurement()
490-
fn(numIters)
502+
await fn(numIters)
491503
stopMeasurement()
492504

493505
#if SWIFT_RUNTIME_ENABLE_LEAK_CHECKER
@@ -502,7 +514,7 @@ final class TestRunner {
502514
}
503515

504516
/// Run the benchmark and return the measured results.
505-
func run(_ test: BenchmarkInfo) -> BenchResults? {
517+
func run(_ test: BenchmarkInfo) async -> BenchResults? {
506518
// Before we do anything, check that we actually have a function to
507519
// run. If we don't it is because the benchmark is not supported on
508520
// the platform and we should skip it.
@@ -528,8 +540,8 @@ final class TestRunner {
528540
}
529541

530542
// Determine number of iterations for testFn to run for desired time.
531-
func iterationsPerSampleTime() -> (numIters: Int, oneIter: Double) {
532-
let oneIter = measure(test.name, fn: testFn, numIters: 1)
543+
func iterationsPerSampleTime() async -> (numIters: Int, oneIter: Double) {
544+
let oneIter = await measure(test.name, fn: testFn, numIters: 1)
533545
if oneIter > 0 {
534546
let timePerSample = c.sampleTime * 1_000_000.0 // microseconds (μs)
535547
return (max(Int(timePerSample / oneIter), 1), oneIter)
@@ -540,28 +552,28 @@ final class TestRunner {
540552

541553
// Determine the scale of measurements. Re-use the calibration result if
542554
// it is just one measurement.
543-
func calibrateMeasurements() -> Int {
544-
let (numIters, oneIter) = iterationsPerSampleTime()
555+
func calibrateMeasurements() async -> Int {
556+
let (numIters, oneIter) = await iterationsPerSampleTime()
545557
if numIters == 1 { addSample(oneIter) }
546558
else { resetMeasurements() } // for accurate yielding reports
547559
return numIters
548560
}
549561

550562
let numIters = min( // Cap to prevent overflow on 32-bit systems when scaled
551563
Int.max / 10_000, // by the inner loop multiplier inside the `testFn`.
552-
c.numIters ?? calibrateMeasurements())
564+
await c.numIters ?? (await calibrateMeasurements()))
553565

554-
let numSamples = c.numSamples ??
566+
let numSamples = await c.numSamples ??
555567
// Compute the number of samples to measure for `sample-time`,
556568
// clamped in (`min-samples`, 200) range, if the `num-iters` are fixed.
557-
max(c.minSamples ?? 1, min(200, c.numIters == nil ? 1 :
558-
calibrateMeasurements()))
569+
(max(await c.minSamples ?? 1, min(200, c.numIters == nil ? 1 :
570+
await calibrateMeasurements())))
559571

560572
samples.reserveCapacity(numSamples)
561573
logVerbose(" Collecting \(numSamples) samples.")
562574
logVerbose(" Measuring with scale \(numIters).")
563575
for _ in samples.count..<numSamples {
564-
addSample(measure(test.name, fn: testFn, numIters: numIters))
576+
addSample(await measure(test.name, fn: testFn, numIters: numIters))
565577
}
566578

567579
test.tearDownFunction?()
@@ -681,16 +693,16 @@ final class TestRunner {
681693
}
682694

683695
/// Run each benchmark and emit the results in JSON
684-
func runBenchmarks() {
696+
func runBenchmarks() async {
685697
var testCount = 0
686698
if !c.jsonOutput {
687699
printTextHeading()
688700
}
689701
for (index, info) in c.tests {
690702
if c.jsonOutput {
691-
printJSON(index: index, info: info, results: run(info))
703+
printJSON(index: index, info: info, results: await run(info))
692704
} else {
693-
printText(index: index, info: info, results: run(info))
705+
printText(index: index, info: info, results: await run(info))
694706
}
695707
testCount += 1
696708
}
@@ -712,7 +724,7 @@ extension Hasher {
712724
}
713725
}
714726

715-
public func main() {
727+
public func main() async {
716728
let config = TestConfig(registeredBenchmarks)
717729
switch (config.action) {
718730
case .listTests:
@@ -742,7 +754,7 @@ public func main() {
742754
the option '--allow-nondeterministic-hashing to the benchmarking executable.
743755
""")
744756
}
745-
TestRunner(config).runBenchmarks()
757+
await TestRunner(config).runBenchmarks()
746758
if let x = config.afterRunSleep {
747759
sleep(x)
748760
}

benchmark/utils/TestsUtils.swift

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,9 @@ public enum BenchmarkCategory : String {
6969
// significant optimization.
7070
case cpubench
7171

72+
// Benchmarks to skip on -Onone runs.
73+
case long
74+
7275
// Explicit skip marker
7376
case skip
7477
}
@@ -113,10 +116,10 @@ public struct BenchmarkInfo {
113116
public var name: String
114117

115118
/// Shadow static variable for runFunction.
116-
private var _runFunction: (Int) -> ()
119+
private var _runFunction: (Int) async -> ()
117120

118121
/// A function that invokes the specific benchmark routine.
119-
public var runFunction: ((Int) -> ())? {
122+
public var runFunction: ((Int) async -> ())? {
120123
if !shouldRun {
121124
return nil
122125
}
@@ -171,7 +174,7 @@ public struct BenchmarkInfo {
171174
/// to be interrupted by a context switch.
172175
public var legacyFactor: Int?
173176

174-
public init(name: String, runFunction: @escaping (Int) -> (), tags: [BenchmarkCategory],
177+
public init(name: String, runFunction: @escaping (Int) async -> (), tags: [BenchmarkCategory],
175178
setUpFunction: (() -> ())? = nil,
176179
tearDownFunction: (() -> ())? = nil,
177180
unsupportedPlatforms: BenchmarkPlatformSet = [],

benchmark/utils/main.swift

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -421,4 +421,4 @@ register(Walsh.benchmarks)
421421
register(WordCount.benchmarks)
422422
register(XorLoop.benchmarks)
423423

424-
main()
424+
await main()

0 commit comments

Comments
 (0)