Skip to content

Commit b427bd7

Browse files
authored
[Backend Tester] Add markdown summary in CI (#13535)
Add a nice markdown summary in the GitHub actions job. This will show up when clicking on the run on a PR. It's intended to give an easy way to see the results without needing to download the job artifact. See https://github.com/pytorch/executorch/actions/runs/17090546309 for example output.
1 parent 8a43370 commit b427bd7

File tree

4 files changed

+141
-10
lines changed

4 files changed

+141
-10
lines changed

.ci/scripts/test_backend_linux.sh

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,8 @@ SUITE=$1
1010
FLOW=$2
1111
ARTIFACT_DIR=$3
1212

13+
REPORT_FILE="$ARTIFACT_DIR/test-report-$FLOW-$SUITE.csv"
14+
1315
echo "Running backend test job for suite $SUITE, flow $FLOW."
1416
echo "Saving job artifacts to $ARTIFACT_DIR."
1517

@@ -48,4 +50,8 @@ fi
4850
# We need the runner to test the built library.
4951
PYTHON_EXECUTABLE=python CMAKE_ARGS="$EXTRA_BUILD_ARGS" .ci/scripts/setup-linux.sh --build-tool cmake --build-mode Release --editable true
5052

51-
python -m executorch.backends.test.suite.runner $SUITE --flow $FLOW --report "$ARTIFACT_DIR/test_results.csv"
53+
EXIT_CODE=0
54+
python -m executorch.backends.test.suite.runner $SUITE --flow $FLOW --report "$REPORT_FILE" || EXIT_CODE=$?
55+
56+
# Generate markdown summary.
57+
python -m executorch.backends.test.suite.generate_markdown_summary "$REPORT_FILE" > ${GITHUB_STEP_SUMMARY:-"step_summary.md"} --exit-code $EXIT_CODE

.ci/scripts/test_backend_macos.sh

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,8 @@ SUITE=$1
1010
FLOW=$2
1111
ARTIFACT_DIR=$3
1212

13+
REPORT_FILE="$ARTIFACT_DIR/test-report-$FLOW-$SUITE.csv"
14+
1315
echo "Running backend test job for suite $SUITE, flow $FLOW."
1416
echo "Saving job artifacts to $ARTIFACT_DIR."
1517

@@ -21,4 +23,8 @@ eval "$(conda shell.bash hook)"
2123
PYTHON_EXECUTABLE=python
2224
${CONDA_RUN} --no-capture-output .ci/scripts/setup-macos.sh --build-tool cmake --build-mode Release
2325

24-
${CONDA_RUN} --no-capture-output python -m executorch.backends.test.suite.runner $SUITE --flow $FLOW --report "$ARTIFACT_DIR/test_results.csv"
26+
EXIT_CODE=0
27+
${CONDA_RUN} --no-capture-output python -m executorch.backends.test.suite.runner $SUITE --flow $FLOW --report "$REPORT_FILE" || EXIT_CODE=$?
28+
29+
# Generate markdown summary.
30+
${CONDA_RUN} --no-capture-output python -m executorch.backends.test.suite.generate_markdown_summary "$REPORT_FILE" > ${GITHUB_STEP_SUMMARY:-"step_summary.md"} --exit-code $EXIT_CODE

.github/workflows/nightly.yml

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -57,11 +57,8 @@ jobs:
5757
upload-artifact: test-report-${{ matrix.flow }}-${{ matrix.suite }}
5858
script: |
5959
set -eux
60-
# Intentionally suppressing exit code for now.
61-
# TODO (gjcomer) Remove this when jobs are stable.
62-
EXIT_CODE=0
63-
.ci/scripts/test_backend_linux.sh "${{ matrix.suite }}" "${{ matrix.flow }}" "${RUNNER_ARTIFACT_DIR}" || EXIT_CODE=$?
64-
echo "Test run complete with exit code $EXIT_CODE."
60+
61+
source .ci/scripts/test_backend_linux.sh "${{ matrix.suite }}" "${{ matrix.flow }}" "${RUNNER_ARTIFACT_DIR}"
6562
6663
backend-test-macos:
6764
uses: pytorch/test-infra/.github/workflows/macos_job.yml@main
@@ -86,6 +83,4 @@ jobs:
8683
# This is needed to get the prebuilt PyTorch wheel from S3
8784
${CONDA_RUN} --no-capture-output pip install awscli==1.37.21
8885
89-
EXIT_CODE=0
90-
.ci/scripts/test_backend_macos.sh "${{ matrix.suite }}" "${{ matrix.flow }}" "${RUNNER_ARTIFACT_DIR}" || EXIT_CODE=$?
91-
echo "Test run complete with exit code $EXIT_CODE."
86+
source .ci/scripts/test_backend_macos.sh "${{ matrix.suite }}" "${{ matrix.flow }}" "${RUNNER_ARTIFACT_DIR}"
Lines changed: 124 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,124 @@
1+
import argparse
2+
import csv
3+
import sys
4+
5+
#
6+
# A standalone script to generate a Markdown representation of a test report.
7+
# This is primarily intended to be used with GitHub actions to generate a nice
8+
# representation of the test results when looking at the action run.
9+
#
10+
# Usage: python executorch/backends/test/suite/generate_markdown_summary.py <path to test report CSV file>
11+
# Markdown is written to stdout.
12+
#
13+
14+
15+
def generate_markdown(csv_path: str, exit_code: int = 0): # noqa (C901)
16+
# Print warning if exit code is non-zero
17+
if exit_code != 0:
18+
print("> [!WARNING]")
19+
print(
20+
f"> Exit code {exit_code} was non-zero. Test process may have crashed. Check the job logs for more information.\n"
21+
)
22+
23+
with open(csv_path, newline="", encoding="utf-8") as f:
24+
reader = csv.reader(f)
25+
rows = list(reader)
26+
27+
header = rows[0]
28+
data_rows = rows[1:]
29+
30+
# Find the Result and Result Detail column indices
31+
result_column_index = None
32+
result_detail_column_index = None
33+
for i, col in enumerate(header):
34+
if col.lower() == "result":
35+
result_column_index = i
36+
elif col.lower() == "result detail":
37+
result_detail_column_index = i
38+
39+
# Count results and prepare data
40+
pass_count = 0
41+
fail_count = 0
42+
skip_count = 0
43+
failed_tests = []
44+
processed_rows = []
45+
result_detail_counts = {}
46+
47+
for row in data_rows:
48+
# Make a copy of the row to avoid modifying the original
49+
processed_row = row.copy()
50+
51+
# Count results and collect failed tests
52+
if result_column_index is not None and result_column_index < len(row):
53+
result_value = row[result_column_index].strip().lower()
54+
if result_value == "pass":
55+
pass_count += 1
56+
processed_row[result_column_index] = (
57+
'<span style="color:green">Pass</span>'
58+
)
59+
elif result_value == "fail":
60+
fail_count += 1
61+
processed_row[result_column_index] = (
62+
'<span style="color:red">Fail</span>'
63+
)
64+
failed_tests.append(processed_row.copy())
65+
elif result_value == "skip":
66+
skip_count += 1
67+
processed_row[result_column_index] = (
68+
'<span style="color:gray">Skip</span>'
69+
)
70+
71+
# Count result details (excluding empty ones)
72+
if result_detail_column_index is not None and result_detail_column_index < len(
73+
row
74+
):
75+
result_detail_value = row[result_detail_column_index].strip()
76+
if result_detail_value: # Only count non-empty result details
77+
if result_detail_value in result_detail_counts:
78+
result_detail_counts[result_detail_value] += 1
79+
else:
80+
result_detail_counts[result_detail_value] = 1
81+
82+
processed_rows.append(processed_row)
83+
84+
# Generate Summary section
85+
total_rows = len(data_rows)
86+
print("# Summary\n")
87+
print(f"- **Pass**: {pass_count}/{total_rows}")
88+
print(f"- **Fail**: {fail_count}/{total_rows}")
89+
print(f"- **Skip**: {skip_count}/{total_rows}")
90+
91+
print("## Failure Breakdown:")
92+
total_rows_with_result_detail = sum(result_detail_counts.values())
93+
for detail, count in sorted(result_detail_counts.items()):
94+
print(f"- **{detail}**: {count}/{total_rows_with_result_detail}")
95+
96+
# Generate Failed Tests section
97+
print("# Failed Tests\n")
98+
if failed_tests:
99+
print("| " + " | ".join(header) + " |")
100+
print("|" + "|".join(["---"] * len(header)) + "|")
101+
for row in failed_tests:
102+
print("| " + " | ".join(row) + " |")
103+
else:
104+
print("No failed tests.\n")
105+
106+
107+
def main():
108+
parser = argparse.ArgumentParser(
109+
description="Generate a Markdown representation of a test report."
110+
)
111+
parser.add_argument("csv_path", help="Path to the test report CSV file.")
112+
parser.add_argument(
113+
"--exit-code", type=int, default=0, help="Exit code from the test process."
114+
)
115+
args = parser.parse_args()
116+
try:
117+
generate_markdown(args.csv_path, args.exit_code)
118+
except Exception as e:
119+
print(f"Error: {e}", file=sys.stderr)
120+
sys.exit(1)
121+
122+
123+
if __name__ == "__main__":
124+
main()

0 commit comments

Comments
 (0)