@@ -23,6 +23,12 @@ import LibProc
23
23
24
24
import TestsUtils
25
25
26
+ /// Sorry.
27
+ private func ?? < T> ( _ x: T ? , _ y: @autoclosure ( ) async -> T ) async -> T {
28
+ if let x { return x }
29
+ return await y ( )
30
+ }
31
+
26
32
struct MeasurementMetadata {
27
33
// Note: maxRSS and pages subtract the RSS measured
28
34
// after the benchmark driver setup has finished.
@@ -198,10 +204,16 @@ struct TestConfig {
198
204
action = c. action ?? . run
199
205
allowNondeterministicHashing = c. allowNondeterministicHashing ?? false
200
206
jsonOutput = c. jsonOutput ?? false
207
+
208
+ var skipTags : Set < BenchmarkCategory >
209
+ skipTags = c. tags ?? [ . unstable, . skip]
210
+ #if DEBUG
211
+ skipTags. insert ( . long)
212
+ #endif
201
213
tests = TestConfig . filterTests ( registeredBenchmarks,
202
214
tests: c. tests ?? [ ] ,
203
215
tags: c. tags ?? [ ] ,
204
- skipTags: c . skipTags ?? [ . unstable , . skip ] )
216
+ skipTags: skipTags)
205
217
206
218
if tests. count > 0 {
207
219
testNameLength = tests. map { $0. info. name. count} . sorted ( ) . reversed ( ) . first!
@@ -481,13 +493,13 @@ final class TestRunner {
481
493
}
482
494
483
495
/// Measure the `fn` and return the average sample time per iteration (μs).
484
- func measure( _ name: String , fn: ( Int ) -> Void , numIters: Int ) -> Double {
496
+ func measure( _ name: String , fn: ( Int ) async -> Void , numIters: Int ) async -> Double {
485
497
#if SWIFT_RUNTIME_ENABLE_LEAK_CHECKER
486
498
name. withCString { p in startTrackingObjects ( p) }
487
499
#endif
488
500
489
501
startMeasurement ( )
490
- fn ( numIters)
502
+ await fn ( numIters)
491
503
stopMeasurement ( )
492
504
493
505
#if SWIFT_RUNTIME_ENABLE_LEAK_CHECKER
@@ -502,7 +514,7 @@ final class TestRunner {
502
514
}
503
515
504
516
/// Run the benchmark and return the measured results.
505
- func run( _ test: BenchmarkInfo ) -> BenchResults ? {
517
+ func run( _ test: BenchmarkInfo ) async -> BenchResults ? {
506
518
// Before we do anything, check that we actually have a function to
507
519
// run. If we don't it is because the benchmark is not supported on
508
520
// the platform and we should skip it.
@@ -528,8 +540,8 @@ final class TestRunner {
528
540
}
529
541
530
542
// Determine number of iterations for testFn to run for desired time.
531
- func iterationsPerSampleTime( ) -> ( numIters: Int , oneIter: Double ) {
532
- let oneIter = measure ( test. name, fn: testFn, numIters: 1 )
543
+ func iterationsPerSampleTime( ) async -> ( numIters: Int , oneIter: Double ) {
544
+ let oneIter = await measure ( test. name, fn: testFn, numIters: 1 )
533
545
if oneIter > 0 {
534
546
let timePerSample = c. sampleTime * 1_000_000.0 // microseconds (μs)
535
547
return ( max ( Int ( timePerSample / oneIter) , 1 ) , oneIter)
@@ -540,28 +552,28 @@ final class TestRunner {
540
552
541
553
// Determine the scale of measurements. Re-use the calibration result if
542
554
// it is just one measurement.
543
- func calibrateMeasurements( ) -> Int {
544
- let ( numIters, oneIter) = iterationsPerSampleTime ( )
555
+ func calibrateMeasurements( ) async -> Int {
556
+ let ( numIters, oneIter) = await iterationsPerSampleTime ( )
545
557
if numIters == 1 { addSample ( oneIter) }
546
558
else { resetMeasurements ( ) } // for accurate yielding reports
547
559
return numIters
548
560
}
549
561
550
562
let numIters = min ( // Cap to prevent overflow on 32-bit systems when scaled
551
563
Int . max / 10_000 , // by the inner loop multiplier inside the `testFn`.
552
- c. numIters ?? calibrateMeasurements ( ) )
564
+ await c. numIters ?? ( await calibrateMeasurements ( ) ) )
553
565
554
- let numSamples = c. numSamples ??
566
+ let numSamples = await c. numSamples ??
555
567
// Compute the number of samples to measure for `sample-time`,
556
568
// clamped in (`min-samples`, 200) range, if the `num-iters` are fixed.
557
- max ( c. minSamples ?? 1 , min ( 200 , c. numIters == nil ? 1 :
558
- calibrateMeasurements ( ) ) )
569
+ ( max ( await c. minSamples ?? 1 , min ( 200 , c. numIters == nil ? 1 :
570
+ await calibrateMeasurements ( ) ) ) )
559
571
560
572
samples. reserveCapacity ( numSamples)
561
573
logVerbose ( " Collecting \( numSamples) samples. " )
562
574
logVerbose ( " Measuring with scale \( numIters) . " )
563
575
for _ in samples. count..< numSamples {
564
- addSample ( measure ( test. name, fn: testFn, numIters: numIters) )
576
+ addSample ( await measure ( test. name, fn: testFn, numIters: numIters) )
565
577
}
566
578
567
579
test. tearDownFunction ? ( )
@@ -681,16 +693,16 @@ final class TestRunner {
681
693
}
682
694
683
695
/// Run each benchmark and emit the results in JSON
684
- func runBenchmarks( ) {
696
+ func runBenchmarks( ) async {
685
697
var testCount = 0
686
698
if !c. jsonOutput {
687
699
printTextHeading ( )
688
700
}
689
701
for (index, info) in c. tests {
690
702
if c. jsonOutput {
691
- printJSON ( index: index, info: info, results: run ( info) )
703
+ printJSON ( index: index, info: info, results: await run ( info) )
692
704
} else {
693
- printText ( index: index, info: info, results: run ( info) )
705
+ printText ( index: index, info: info, results: await run ( info) )
694
706
}
695
707
testCount += 1
696
708
}
@@ -712,7 +724,7 @@ extension Hasher {
712
724
}
713
725
}
714
726
715
- public func main( ) {
727
+ public func main( ) async {
716
728
let config = TestConfig ( registeredBenchmarks)
717
729
switch ( config. action) {
718
730
case . listTests:
@@ -742,7 +754,7 @@ public func main() {
742
754
the option '--allow-nondeterministic-hashing to the benchmarking executable.
743
755
""" )
744
756
}
745
- TestRunner ( config) . runBenchmarks ( )
757
+ await TestRunner ( config) . runBenchmarks ( )
746
758
if let x = config. afterRunSleep {
747
759
sleep ( x)
748
760
}
0 commit comments