@@ -63,12 +63,7 @@ def plain_text_comparison(data, metric, baseline_name=None, candidate_name=None)
63
63
"""
64
64
Create a tabulated comparison of the baseline and the candidate for the given metric.
65
65
"""
66
- # Compute additional info in new columns. In text mode, we can assume that we are
67
- # comparing exactly two data sets (suffixed _0 and _1).
68
- data ['difference' ] = data [f'{ metric } _1' ] - data [f'{ metric } _0' ]
69
- data ['percent' ] = 100 * (data ['difference' ] / data [f'{ metric } _0' ])
70
-
71
- data = data .replace (numpy .nan , None ).sort_values (by = 'benchmark' ) # avoid NaNs in tabulate output
66
+ data = data .replace (numpy .nan , None ) # avoid NaNs in tabulate output
72
67
headers = ['Benchmark' , baseline_name , candidate_name , 'Difference' , '% Difference' ]
73
68
fmt = (None , '.2f' , '.2f' , '.2f' , '.2f' )
74
69
table = data [['benchmark' , f'{ metric } _0' , f'{ metric } _1' , 'difference' , 'percent' ]].set_index ('benchmark' )
@@ -78,7 +73,7 @@ def create_chart(data, metric, subtitle=None, series_names=None):
78
73
"""
79
74
Create a bar chart comparing the given metric across the provided series.
80
75
"""
81
- data = data .sort_values ( by = 'benchmark' ). rename (columns = {f'{ metric } _{ i } ' : series_names [i ] for i in range (len (series_names ))})
76
+ data = data .rename (columns = {f'{ metric } _{ i } ' : series_names [i ] for i in range (len (series_names ))})
82
77
title = ' vs ' .join (series_names )
83
78
figure = plotly .express .bar (data , title = title , subtitle = subtitle , x = 'benchmark' , y = series_names , barmode = 'group' )
84
79
figure .update_layout (xaxis_title = '' , yaxis_title = '' , legend_title = '' )
@@ -102,6 +97,15 @@ def main(argv):
102
97
parser .add_argument ('--filter' , type = str , required = False ,
103
98
help = 'An optional regular expression used to filter the benchmarks included in the comparison. '
104
99
'Only benchmarks whose names match the regular expression will be included.' )
100
+ parser .add_argument ('--sort' , type = str , required = False , default = 'benchmark' ,
101
+ choices = ['benchmark' , 'baseline' , 'candidate' , 'percent_diff' ],
102
+ help = 'Optional sorting criteria for displaying results. By default, results are displayed in '
103
+ 'alphabetical order of the benchmark. Supported sorting criteria are: '
104
+ '`benchmark` (sort using the alphabetical name of the benchmark), '
105
+ '`baseline` (sort using the absolute number of the baseline run), '
106
+ '`candidate` (sort using the absolute number of the candidate run), '
107
+ 'and `percent_diff` (sort using the percent difference between the baseline and the candidate). '
108
+ 'Note that when more than two input files are compared, the only valid sorting order is `benchmark`.' )
105
109
parser .add_argument ('--format' , type = str , choices = ['text' , 'chart' ], default = 'text' ,
106
110
help = 'Select the output format. `text` generates a plain-text comparison in tabular form, and `chart` '
107
111
'generates a self-contained HTML graph that can be opened in a browser. The default is `text`.' )
@@ -116,6 +120,8 @@ def main(argv):
116
120
'This option cannot be used with the plain text output.' )
117
121
args = parser .parse_args (argv )
118
122
123
+ # Validate arguments (the values admissible for various arguments depend on other
124
+ # arguments, the number of inputs, etc)
119
125
if args .format == 'text' :
120
126
if len (args .files ) != 2 :
121
127
parser .error ('--format=text requires exactly two input files to compare' )
@@ -124,6 +130,9 @@ def main(argv):
124
130
if args .open :
125
131
parser .error ('Passing --open makes no sense with --format=text' )
126
132
133
+ if len (args .files ) != 2 and args .sort != 'benchmark' :
134
+ parser .error ('Using any sort order other than `benchmark` requires exactly two input files.' )
135
+
127
136
if args .series_names is None :
128
137
args .series_names = ['Baseline' ]
129
138
if len (args .files ) == 2 :
@@ -142,10 +151,25 @@ def main(argv):
142
151
# Join the inputs into a single dataframe
143
152
data = functools .reduce (lambda a , b : a .merge (b , how = 'outer' , on = 'benchmark' ), inputs )
144
153
154
+ # If we have exactly two data sets, compute additional info in new columns
155
+ if len (lnt_inputs ) == 2 :
156
+ data ['difference' ] = data [f'{ args .metric } _1' ] - data [f'{ args .metric } _0' ]
157
+ data ['percent' ] = 100 * (data ['difference' ] / data [f'{ args .metric } _0' ])
158
+
145
159
if args .filter is not None :
146
160
keeplist = [b for b in data ['benchmark' ] if re .search (args .filter , b ) is not None ]
147
161
data = data [data ['benchmark' ].isin (keeplist )]
148
162
163
+ # Sort the data by the appropriate criteria
164
+ if args .sort == 'benchmark' :
165
+ data = data .sort_values (by = 'benchmark' )
166
+ elif args .sort == 'baseline' :
167
+ data = data .sort_values (by = f'{ args .metric } _0' )
168
+ elif args .sort == 'candidate' :
169
+ data = data .sort_values (by = f'{ args .metric } _1' )
170
+ elif args .sort == 'percent_diff' :
171
+ data = data .sort_values (by = f'percent' )
172
+
149
173
if args .format == 'chart' :
150
174
figure = create_chart (data , args .metric , subtitle = args .subtitle , series_names = args .series_names )
151
175
do_open = args .output is None or args .open
0 commit comments