@@ -17,54 +17,54 @@ import Darwin
17
17
#endif
18
18
19
19
public enum BenchmarkCategories : CustomStringConvertible {
20
- // Validation "micro" benchmarks test a specific operation or critical path that
21
- // we know is important to measure.
22
- case validation
23
- // subsystems to validate and their subcategories.
24
- case api, Array, String, Dictionary, Codable, Set
25
- case sdk
26
- case runtime, refcount, metadata
27
- // Other general areas of compiled code validation.
28
- case abstraction, safetychecks, exceptions, bridging, concurrency
29
-
30
- // Algorithms are "micro" that test some well-known algorithm in isolation:
31
- // sorting, searching, hashing, fibonaci, crypto, etc.
32
- case algorithm
33
-
34
- // Miniapplications are contrived to mimic some subset of application behavior
35
- // in a way that can be easily measured. They are larger than micro-benchmarks,
36
- // combining multiple APIs, data structures, or algorithms. This includes small
37
- // standardized benchmarks, pieces of real applications that have been extracted
38
- // into a benchmark, important functionality like JSON parsing, etc.
39
- case miniapplication
40
-
41
- // Regression benchmarks is a catch-all for less important "micro"
42
- // benchmarks. This could be a random piece of code that was attached to a bug
43
- // report. We want to make sure the optimizer as a whole continues to handle
44
- // this case, but don't know how applicable it is to general Swift performance
45
- // relative to the other micro-benchmarks. In particular, these aren't weighted
46
- // as highly as "validation" benchmarks and likely won't be the subject of
47
- // future investigation unless they significantly regress.
48
- case regression
49
-
50
- // Most benchmarks are assumed to be "stable" and will be regularly tracked at
51
- // each commit. A handful may be marked unstable if continually tracking them is
52
- // counterproductive.
53
- case unstable
54
-
55
- // CPU benchmarks represent instrinsic Swift performance. They are useful for
56
- // measuring a fully baked Swift implementation across different platforms and
57
- // hardware. The benchmark should also be reasonably applicable to real Swift
58
- // code--it should exercise a known performance critical area. Typically these
59
- // will be drawn from the validation benchmarks once the language and standard
60
- // library implementation of the benchmark meets a reasonable efficiency
61
- // baseline. A benchmark should only be tagged "cpubench" after a full
62
- // performance investigation of the benchmark has been completed to determine
63
- // that it is a good representation of future Swift performance. Benchmarks
64
- // should not be tagged if they make use of an API that we plan on
65
- // reimplementing or call into code paths that have known opportunities for
66
- // significant optimization.
67
- case cpubench
20
+ // Validation "micro" benchmarks test a specific operation or critical path that
21
+ // we know is important to measure.
22
+ case validation
23
+ // subsystems to validate and their subcategories.
24
+ case api, Array, String, Dictionary, Codable, Set
25
+ case sdk
26
+ case runtime, refcount, metadata
27
+ // Other general areas of compiled code validation.
28
+ case abstraction, safetychecks, exceptions, bridging, concurrency
29
+
30
+ // Algorithms are "micro" that test some well-known algorithm in isolation:
31
+ // sorting, searching, hashing, fibonaci, crypto, etc.
32
+ case algorithm
33
+
34
+ // Miniapplications are contrived to mimic some subset of application behavior
35
+ // in a way that can be easily measured. They are larger than micro-benchmarks,
36
+ // combining multiple APIs, data structures, or algorithms. This includes small
37
+ // standardized benchmarks, pieces of real applications that have been extracted
38
+ // into a benchmark, important functionality like JSON parsing, etc.
39
+ case miniapplication
40
+
41
+ // Regression benchmarks is a catch-all for less important "micro"
42
+ // benchmarks. This could be a random piece of code that was attached to a bug
43
+ // report. We want to make sure the optimizer as a whole continues to handle
44
+ // this case, but don't know how applicable it is to general Swift performance
45
+ // relative to the other micro-benchmarks. In particular, these aren't weighted
46
+ // as highly as "validation" benchmarks and likely won't be the subject of
47
+ // future investigation unless they significantly regress.
48
+ case regression
49
+
50
+ // Most benchmarks are assumed to be "stable" and will be regularly tracked at
51
+ // each commit. A handful may be marked unstable if continually tracking them is
52
+ // counterproductive.
53
+ case unstable
54
+
55
+ // CPU benchmarks represent instrinsic Swift performance. They are useful for
56
+ // measuring a fully baked Swift implementation across different platforms and
57
+ // hardware. The benchmark should also be reasonably applicable to real Swift
58
+ // code--it should exercise a known performance critical area. Typically these
59
+ // will be drawn from the validation benchmarks once the language and standard
60
+ // library implementation of the benchmark meets a reasonable efficiency
61
+ // baseline. A benchmark should only be tagged "cpubench" after a full
62
+ // performance investigation of the benchmark has been completed to determine
63
+ // that it is a good representation of future Swift performance. Benchmarks
64
+ // should not be tagged if they make use of an API that we plan on
65
+ // reimplementing or call into code paths that have known opportunities for
66
+ // significant optimization.
67
+ case cpubench
68
68
69
69
public var description : String {
70
70
switch self {
0 commit comments