diff --git a/tools/torchci/test_insights/daily_regression.py b/tools/torchci/test_insights/daily_regression.py
index cea6d8bea0..e440766786 100644
--- a/tools/torchci/test_insights/daily_regression.py
+++ b/tools/torchci/test_insights/daily_regression.py
@@ -11,7 +11,7 @@
CONFIG = [
{
- "team": "dev-infra",
+ "team": "pytorch-dev-infra",
"condition": lambda _: True,
"link": FILE_REPORT_URL,
},
@@ -74,9 +74,7 @@ def gen_regression_for_team(
if (info["short_job_name"], info["file"]) in relevant_keys
]
- def _sum_invoking_file_info(
- data: list[dict[str, Any]], field: str
- ) -> dict[str, Any]:
+ def _sum_invoking_file_info(data: list[dict[str, Any]]) -> dict[str, Any]:
info = {
"count": sum(item["count"] for item in data),
"cost": sum(item["cost"] for item in data),
@@ -85,12 +83,8 @@ def _sum_invoking_file_info(
}
return info
- agg_prev_file_info = _sum_invoking_file_info(
- relevant_prev_invoking_file_info, "prev"
- )
- agg_curr_file_info = _sum_invoking_file_info(
- relevant_curr_invoking_file_info, "curr"
- )
+ agg_prev_file_info = _sum_invoking_file_info(relevant_prev_invoking_file_info)
+ agg_curr_file_info = _sum_invoking_file_info(relevant_curr_invoking_file_info)
invoking_file_info_diff = {
"count": {
diff --git a/tools/torchci/test_insights/file_report_generator.py b/tools/torchci/test_insights/file_report_generator.py
index 064b2c44b0..b2731e8251 100644
--- a/tools/torchci/test_insights/file_report_generator.py
+++ b/tools/torchci/test_insights/file_report_generator.py
@@ -209,6 +209,7 @@ def _get_frequency(self) -> List[Dict[str, Any]]:
where
j.created_at > now() - interval 8 day
and j.created_at < now() - interval 1 day
+ and j.conclusion != 'cancelled'
group by
name
"""
diff --git a/torchci/pages/tests/fileReport.tsx b/torchci/pages/tests/fileReport.tsx
index 1a15f52d59..2982e6fc41 100644
--- a/torchci/pages/tests/fileReport.tsx
+++ b/torchci/pages/tests/fileReport.tsx
@@ -464,44 +464,33 @@ function Overview({
);
const groupedRows = _.map(groupByTarget, (rows, key) => {
- // Sum within sha
- const summedBySha = _.map(_.groupBy(rows, "sha"), (shaRows) => {
- return _.reduce(
- shaRows,
- (acc, row) => {
- acc.count += row.count || 0;
- acc.time += row.time || 0;
- acc.cost += row.cost || 0;
- acc.skipped += row.skipped || 0;
- acc.frequency += row.frequency || 0;
- return acc;
- },
- { count: 0, time: 0, cost: 0, skipped: 0, frequency: 0 }
- );
- });
- // the reduce across shas for average
- return _.reduce(
- summedBySha,
- (acc, summed) => {
- acc.count += summed.count;
- acc.time += summed.time;
- acc.cost += summed.cost;
- acc.skipped += summed.skipped;
- acc.frequency += summed.frequency;
+ // Sum
+ const summed = _.reduce(
+ rows,
+ (acc, row) => {
+ acc.count += row.count || 0;
+ acc.time += row.time || 0;
+ acc.cost += row.cost || 0;
+ acc.skipped += row.skipped || 0;
+ acc.frequency += row.frequency || 0;
return acc;
},
- {
- id: rows[0].id,
- file: rows[0].file,
- short_job_name: rows[0].short_job_name,
- labels: key,
- count: 0,
- time: 0,
- cost: 0,
- skipped: 0,
- frequency: 0,
- }
+ { count: 0, time: 0, cost: 0, skipped: 0, frequency: 0 }
);
+
+ // Average across sha data points
+ const numShas = _.uniq(rows.map((r) => r.sha)).length;
+ return {
+ id: rows[0].id,
+ file: rows[0].file,
+ short_job_name: rows[0].short_job_name,
+ labels: key,
+ count: summed.count / numShas,
+ time: summed.time / numShas,
+ cost: summed.cost / numShas,
+ skipped: summed.skipped / numShas,
+ frequency: summed.frequency / numShas,
+ };
});
return (
@@ -1069,14 +1058,29 @@ export default function Page() {
{
+ setFileFilter(input);
+ setFileRegex(false);
+ }}
+ setJobFilter={(input) => {
+ setJobFilter(input);
+ setJobRegex(false);
+ }}
+ setLabelFilter={(input) => {
+ setLabelFilter(input);
+ setLabelRegex(false);
+ }}
/>
{
+ setFileFilter(input);
+ setFileRegex(false);
+ }}
+ setJobFilter={(input) => {
+ setJobFilter(input);
+ setJobRegex(false);
+ }}
/>