@@ -997,30 +997,31 @@ def load_geomean_data(field, machine, limit, xaxis_date, revision_cache=None):
997
997
ts = request .get_testsuite ()
998
998
values = session .query (sqlalchemy .sql .func .min (field .column ),
999
999
ts .Order ,
1000
- sqlalchemy .sql .func .min (ts .Run .start_time )) \
1000
+ ts .Run .start_time ,
1001
+ ts .Run .id ) \
1001
1002
.filter (ts .Run .order_id == ts .Order .id ) \
1002
1003
.filter (ts .Run .id == ts .Sample .run_id ) \
1003
1004
.filter (ts .Test .id == ts .Sample .test_id ) \
1004
1005
.filter (ts .Run .machine_id == machine .id ) \
1005
1006
.filter (field .column .isnot (None )) \
1006
- .group_by (ts .Order .llvm_project_revision , ts .Test , ts .Order .id )
1007
+ .group_by (ts .Order .llvm_project_revision , ts .Test , ts .Order .id , ts . Run . id )
1007
1008
1008
1009
if limit :
1009
1010
values = values .limit (limit )
1010
1011
1011
1012
data = multidict .multidict (
1012
- ((order , date ), val )
1013
- for val , order , date in values ).items ()
1013
+ ((order , date , run_id ), val )
1014
+ for val , order , date , run_id in values ).items ()
1014
1015
1015
1016
# Calculate geomean of each revision.
1016
1017
if xaxis_date :
1017
- data = [(date , [(calc_geomean (vals ), order , date )])
1018
- for ((order , date ), vals ) in data ]
1018
+ data = [(date , [(calc_geomean (vals ), order , date , run_id )])
1019
+ for ((order , date , run_id ), vals ) in data ]
1019
1020
# Sort data points according to date.
1020
1021
data .sort (key = lambda sample : sample [0 ])
1021
1022
else :
1022
- data = [(order .llvm_project_revision , [(calc_geomean (vals ), order , date )])
1023
- for ((order , date ), vals ) in data ]
1023
+ data = [(order .llvm_project_revision , [(calc_geomean (vals ), order , date , run_id )])
1024
+ for ((order , date , run_id ), vals ) in data ]
1024
1025
# Sort data points according to order (revision).
1025
1026
data .sort (key = lambda sample : convert_revision (sample [0 ], cache = revision_cache ))
1026
1027
@@ -1235,7 +1236,7 @@ def trace_group(test_name, field_name, machine):
1235
1236
# And the date on which they were taken.
1236
1237
dates = [data_array [2 ] for data_array in datapoints ]
1237
1238
# Run ID where this point was collected.
1238
- run_ids = [data_array [3 ] for data_array in datapoints if len ( data_array ) == 4 ]
1239
+ run_ids = [data_array [3 ] for data_array in datapoints ]
1239
1240
1240
1241
values = [v * normalize_by for v in values ]
1241
1242
@@ -1260,20 +1261,17 @@ def trace_group(test_name, field_name, machine):
1260
1261
# Generate point metadata.
1261
1262
point_metadata = {"order" : orders [agg_index ].as_ordered_string (),
1262
1263
"orderID" : orders [agg_index ].id ,
1263
- "date" : str (dates [agg_index ])}
1264
- if run_ids :
1265
- point_metadata ["runID" ] = str (run_ids [agg_index ])
1266
-
1264
+ "date" : str (dates [agg_index ]),
1265
+ "runID" : str (run_ids [agg_index ])}
1267
1266
meta .append (point_metadata )
1268
1267
1269
1268
# Add the multisample points, if requested.
1270
1269
if not hide_all_points and is_multisample :
1271
1270
for i , v in enumerate (values ):
1272
1271
multisample_metadata = {"order" : orders [i ].as_ordered_string (),
1273
1272
"orderID" : orders [i ].id ,
1274
- "date" : str (dates [i ])}
1275
- if run_ids :
1276
- multisample_metadata ["runID" ] = str (run_ids [i ])
1273
+ "date" : str (dates [i ]),
1274
+ "runID" : str (run_ids [i ])}
1277
1275
multisample_points_data ["x" ].append (point_label )
1278
1276
multisample_points_data ["y" ].append (v )
1279
1277
multisample_points_data ["meta" ].append (multisample_metadata )
0 commit comments