@@ -23,6 +23,12 @@ import LibProc
23
23
24
24
import TestsUtils
25
25
26
+ /// Sorry.
27
+ private func ?? < T> ( _ x: T ? , _ y: @autoclosure ( ) async -> T ) async -> T {
28
+ if let x { return x }
29
+ return await y ( )
30
+ }
31
+
26
32
struct MeasurementMetadata {
27
33
// Note: maxRSS and pages subtract the RSS measured
28
34
// after the benchmark driver setup has finished.
@@ -487,13 +493,13 @@ final class TestRunner {
487
493
}
488
494
489
495
/// Measure the `fn` and return the average sample time per iteration (μs).
490
- func measure( _ name: String , fn: ( Int ) -> Void , numIters: Int ) -> Double {
496
+ func measure( _ name: String , fn: ( Int ) async -> Void , numIters: Int ) async -> Double {
491
497
#if SWIFT_RUNTIME_ENABLE_LEAK_CHECKER
492
498
name. withCString { p in startTrackingObjects ( p) }
493
499
#endif
494
500
495
501
startMeasurement ( )
496
- fn ( numIters)
502
+ await fn ( numIters)
497
503
stopMeasurement ( )
498
504
499
505
#if SWIFT_RUNTIME_ENABLE_LEAK_CHECKER
@@ -508,7 +514,7 @@ final class TestRunner {
508
514
}
509
515
510
516
/// Run the benchmark and return the measured results.
511
- func run( _ test: BenchmarkInfo ) -> BenchResults ? {
517
+ func run( _ test: BenchmarkInfo ) async -> BenchResults ? {
512
518
// Before we do anything, check that we actually have a function to
513
519
// run. If we don't it is because the benchmark is not supported on
514
520
// the platform and we should skip it.
@@ -534,8 +540,8 @@ final class TestRunner {
534
540
}
535
541
536
542
// Determine number of iterations for testFn to run for desired time.
537
- func iterationsPerSampleTime( ) -> ( numIters: Int , oneIter: Double ) {
538
- let oneIter = measure ( test. name, fn: testFn, numIters: 1 )
543
+ func iterationsPerSampleTime( ) async -> ( numIters: Int , oneIter: Double ) {
544
+ let oneIter = await measure ( test. name, fn: testFn, numIters: 1 )
539
545
if oneIter > 0 {
540
546
let timePerSample = c. sampleTime * 1_000_000.0 // microseconds (μs)
541
547
return ( max ( Int ( timePerSample / oneIter) , 1 ) , oneIter)
@@ -546,28 +552,28 @@ final class TestRunner {
546
552
547
553
// Determine the scale of measurements. Re-use the calibration result if
548
554
// it is just one measurement.
549
- func calibrateMeasurements( ) -> Int {
550
- let ( numIters, oneIter) = iterationsPerSampleTime ( )
555
+ func calibrateMeasurements( ) async -> Int {
556
+ let ( numIters, oneIter) = await iterationsPerSampleTime ( )
551
557
if numIters == 1 { addSample ( oneIter) }
552
558
else { resetMeasurements ( ) } // for accurate yielding reports
553
559
return numIters
554
560
}
555
561
556
562
let numIters = min ( // Cap to prevent overflow on 32-bit systems when scaled
557
563
Int . max / 10_000 , // by the inner loop multiplier inside the `testFn`.
558
- c. numIters ?? calibrateMeasurements ( ) )
564
+ await c. numIters ?? ( await calibrateMeasurements ( ) ) )
559
565
560
- let numSamples = c. numSamples ??
566
+ let numSamples = await c. numSamples ??
561
567
// Compute the number of samples to measure for `sample-time`,
562
568
// clamped in (`min-samples`, 200) range, if the `num-iters` are fixed.
563
- max ( c. minSamples ?? 1 , min ( 200 , c. numIters == nil ? 1 :
564
- calibrateMeasurements ( ) ) )
569
+ ( max ( await c. minSamples ?? 1 , min ( 200 , c. numIters == nil ? 1 :
570
+ await calibrateMeasurements ( ) ) ) )
565
571
566
572
samples. reserveCapacity ( numSamples)
567
573
logVerbose ( " Collecting \( numSamples) samples. " )
568
574
logVerbose ( " Measuring with scale \( numIters) . " )
569
575
for _ in samples. count..< numSamples {
570
- addSample ( measure ( test. name, fn: testFn, numIters: numIters) )
576
+ addSample ( await measure ( test. name, fn: testFn, numIters: numIters) )
571
577
}
572
578
573
579
test. tearDownFunction ? ( )
@@ -687,16 +693,16 @@ final class TestRunner {
687
693
}
688
694
689
695
/// Run each benchmark and emit the results in JSON
690
- func runBenchmarks( ) {
696
+ func runBenchmarks( ) async {
691
697
var testCount = 0
692
698
if !c. jsonOutput {
693
699
printTextHeading ( )
694
700
}
695
701
for (index, info) in c. tests {
696
702
if c. jsonOutput {
697
- printJSON ( index: index, info: info, results: run ( info) )
703
+ printJSON ( index: index, info: info, results: await run ( info) )
698
704
} else {
699
- printText ( index: index, info: info, results: run ( info) )
705
+ printText ( index: index, info: info, results: await run ( info) )
700
706
}
701
707
testCount += 1
702
708
}
@@ -718,7 +724,7 @@ extension Hasher {
718
724
}
719
725
}
720
726
721
- public func main( ) {
727
+ public func main( ) async {
722
728
let config = TestConfig ( registeredBenchmarks)
723
729
switch ( config. action) {
724
730
case . listTests:
@@ -748,7 +754,7 @@ public func main() {
748
754
the option '--allow-nondeterministic-hashing to the benchmarking executable.
749
755
""" )
750
756
}
751
- TestRunner ( config) . runBenchmarks ( )
757
+ await TestRunner ( config) . runBenchmarks ( )
752
758
if let x = config. afterRunSleep {
753
759
sleep ( x)
754
760
}
0 commit comments