Skip to content

Commit a1695ac

Browse files
authored
Add support for updating README.md automatically (#12)
1 parent 1f1a036 commit a1695ac

File tree

11 files changed

+244
-50
lines changed

11 files changed

+244
-50
lines changed

.ci/benchmark.py

Lines changed: 55 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,64 @@
1+
import argparse
12
import os
23
import sys
34
from src.benchmark.utils import read_metrics, to_markdown_table
45

5-
if __name__ == "__main__":
6-
# Generate statistics report
7-
statistics_path = sys.argv[1]
8-
metrics = read_metrics(statistics_path, metric="accuracy")
6+
7+
def parse_args():
8+
parser = argparse.ArgumentParser()
9+
parser.add_argument("--path", type=str, required=True, help="Report path.")
10+
parser.add_argument("--write-gh-job-summary", action="store_true", help="Write to GitHub job summary.")
11+
parser.add_argument("--update-readme", action="store_true", help="Update statistics report in README.md.")
12+
return parser.parse_args()
13+
14+
15+
def generate_report(path: str):
16+
metrics = read_metrics(path, metric="accuracy")
917
html_table = to_markdown_table(metrics)
18+
return html_table
1019

11-
# Write to workflow job summary
20+
21+
def write_job_summary(report):
1222
summary_path = os.environ["GITHUB_STEP_SUMMARY"]
1323
with open(summary_path, "a") as f:
1424
f.write("## Torchbenchmark statistics report\n")
15-
f.write(html_table)
25+
f.write(report)
26+
27+
28+
def update_readme(report):
29+
project_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
30+
readme_path = os.path.join(project_path, "README.md")
31+
print(readme_path)
32+
with open(readme_path, "r") as f:
33+
readme_content = f.read()
34+
35+
start_marker = "<!-- Torchbenchmark start -->"
36+
end_marker = "<!-- Torchbenchmark end -->"
37+
start_index = readme_content.find(start_marker)
38+
end_index = readme_content.find(end_marker)
39+
assert start_index != -1
40+
assert end_index != -1
41+
42+
start_index += len(start_marker)
43+
new_readme_content = (
44+
readme_content[:start_index] + "\n\n" +
45+
report + "\n\n" +
46+
readme_content[end_index:]
47+
)
48+
with open(readme_path, "w") as f:
49+
f.write(new_readme_content)
50+
51+
52+
if __name__ == "__main__":
53+
args = parse_args()
54+
55+
# Generate statistics report
56+
report = generate_report(args.path)
57+
58+
# Write to workflow job summary
59+
if args.write_gh_job_summary:
60+
write_job_summary(report)
61+
62+
# Update README.md
63+
if args.update_readme:
64+
update_readme(report)

.github/dependabot.yml

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
version: 2
2+
updates:
3+
- package-ecosystem: "github-actions"
4+
directory: "/"
5+
schedule:
6+
# Check for updates to GitHub Actions every week
7+
interval: "weekly"
8+
open-pull-requests-limit: 2
9+
reviewers:
10+
- "shink"

.github/workflows/_ascend_npu_benchmark.yml

Lines changed: 21 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,8 @@ jobs:
100100
run: |
101101
pip install -r benchmark/requirements.txt --constraint ascend_npu/requirements.txt "numpy==1.*"
102102
python benchmark/install.py --userbenchmark test_bench --continue_on_fail
103+
env:
104+
HF_ENDPOINT: https://hf-mirror.com
103105

104106
- name: Install project dependencies
105107
run: |
@@ -130,19 +132,26 @@ jobs:
130132
overwrite: true
131133

132134
- name: Write to workflow job summary
133-
id: report
134135
run: |
135-
set -x
136-
realpath benchmark/ascend_npu_benchmark.json
137-
ls benchmark
138-
cat benchmark/ascend_npu_benchmark.json
136+
python .ci/benchmark.py --write-gh-job-summary --path benchmark/ascend_npu_benchmark.json
139137
140-
output_path=$(realpath benchmark/ascend_npu_benchmark.json)
141-
python .ci/benchmark.py ${output_path}
142-
143-
# TODO(shink)
144138
- name: Update README.md
145-
if: ${{ github.event_name == 'push' }}
139+
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }}
140+
id: update-readme
146141
run: |
147-
echo "${{ github.event_name }}"
148-
echo "${{ github.event_name == 'push' }}"
142+
python .ci/benchmark.py --update-readme --path benchmark/ascend_npu_benchmark.json
143+
if git diff --quiet README.md; then
144+
echo "changed=false" >> $GITHUB_OUTPUT
145+
else
146+
echo "changed=true" >> $GITHUB_OUTPUT
147+
fi
148+
149+
- name: Create a pull request for changes to README.md
150+
if: ${{ steps.update-readme.outputs.changed == 'true' }}
151+
uses: peter-evans/create-pull-request@v7
152+
with:
153+
add-paths: README.md
154+
branch: ascend-npu/benchmark
155+
title: "[Ascend NPU] Update torchbenchmark report in README.md"
156+
commit-message: "Update README.md"
157+
reviewers: shink

.github/workflows/_ascend_npu_test.yml renamed to .github/workflows/_ascend_npu_ut.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
name: '_ascend_npu_test'
1+
name: '_ascend_npu_ut'
22

33
on:
44
workflow_call:

.github/workflows/ascend_npu_test.yml

Lines changed: 21 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,18 +4,34 @@ on:
44
push:
55
branches:
66
- 'main'
7-
7+
paths:
8+
- '.github/workflows/ascend_npu_test.yml'
9+
- '.github/workflows/_ascend_npu_build.yml'
10+
- '.github/workflows/_ascend_npu_ut.yml'
11+
- '.github/workflows/_ascend_npu_benchmark.yml'
12+
- '.github/actions/**'
13+
- '.ci/**'
14+
- 'ascend_npu/**'
15+
- 'src/**'
16+
- '!**/*.md'
817
pull_request:
918
branches:
1019
- 'main'
11-
20+
paths:
21+
- '.github/workflows/ascend_npu_test.yml'
22+
- '.github/workflows/_ascend_npu_build.yml'
23+
- '.github/workflows/_ascend_npu_ut.yml'
24+
- '.github/workflows/_ascend_npu_benchmark.yml'
25+
- '.github/actions/**'
26+
- '.ci/**'
27+
- 'ascend_npu/**'
28+
- 'src/**'
29+
- '!**/*.md'
1230
release:
1331
types:
1432
- 'published'
15-
1633
schedule:
1734
- cron: '0 12 * * *'
18-
1935
workflow_dispatch:
2036
inputs:
2137
runner:
@@ -87,7 +103,7 @@ jobs:
87103
needs:
88104
- prepare
89105
- build
90-
uses: ./.github/workflows/_ascend_npu_test.yml
106+
uses: ./.github/workflows/_ascend_npu_ut.yml
91107
with:
92108
runner: ${{ needs.prepare.outputs.runner }}
93109
image: ${{ needs.prepare.outputs.image }}

README.md

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,11 @@ across various devices by running comprehensive GitHub workflows.
77

88
## Accelerator Integration Test Results
99

10-
<!-- Start -->
10+
<details>
11+
12+
<summary>Torchbenchmark statistics report</summary>
13+
14+
<!-- Torchbenchmark start -->
1115

1216
| | [torch_npu][1] |
1317
|---------------------------------|----------------|
@@ -121,7 +125,9 @@ across various devices by running comprehensive GitHub workflows.
121125

122126
[3]: https://github.com/cosdt/pytorch-integration-tests/actions/workflows/ascend_npu_test.yml
123127

124-
<!-- End -->
128+
<!-- Torchbenchmark end -->
129+
130+
</details>
125131

126132
## Overview
127133

ascend_npu/matadata.yml

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
device: "npu"
2+
backend_extension: "torch_npu"
3+
link: https://github.com/Ascend/pytorch
4+
torchbenchmark:
5+
test:
6+
- train
7+
- eval
8+
models:
9+
skip:
10+
- llava

ascend_npu/metadata.json

Lines changed: 0 additions & 11 deletions
This file was deleted.

src/benchmark/utils.py

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -25,13 +25,13 @@ class TorchBenchModelMetric:
2525

2626

2727
def read_json(path: str) -> dict:
28-
with open(path, 'r') as f:
28+
with open(path, "r") as f:
2929
data = json.load(f)
3030
return data
3131

3232

3333
def save_file(path: str, data) -> None:
34-
with open(path, 'w') as file:
34+
with open(path, "w") as file:
3535
file.write(data)
3636

3737

@@ -54,7 +54,7 @@ def parse_to_dict(config_str: str):
5454

5555
def read_metrics(path: str, *, metric=None) -> List[TorchBenchModelMetric]:
5656
output = read_json(path)
57-
metrics_data = output.get('metrics', {})
57+
metrics_data = output.get("metrics", {})
5858

5959
metrics = []
6060
for metric_key, metric_value in metrics_data.items():
@@ -75,10 +75,11 @@ def read_metrics(path: str, *, metric=None) -> List[TorchBenchModelMetric]:
7575

7676

7777
def generate_table_rows(metrics: List[TorchBenchModelMetric]):
78-
models = list({metric.key.name for metric in metrics})
7978
devices = list({metric.key.device for metric in metrics})
79+
models = list({metric.key.name for metric in metrics})
80+
models = sorted(models, key=lambda x: x.lower())
8081

81-
def filter_result(metrics: List[TorchBenchModelMetric], *, model, device):
82+
def filter_metric(metrics: List[TorchBenchModelMetric], *, model, device):
8283
for metric in metrics:
8384
if metric.key.name == model and metric.key.device == device:
8485
return metric
@@ -87,10 +88,14 @@ def filter_result(metrics: List[TorchBenchModelMetric], *, model, device):
8788
for model in models:
8889
row = [model]
8990
for device in devices:
90-
metric = filter_result(metrics, model=model, device=device)
91+
metric = filter_metric(metrics, model=model, device=device)
9192
if metric is not None:
92-
is_pass = metric.value == "pass"
93-
cell = "✅" if is_pass else "❌"
93+
if metric.value == "pass":
94+
cell = "✅"
95+
elif metric.value == "skip":
96+
cell = "⚠️"
97+
else:
98+
cell = "❌"
9499
else:
95100
cell = ""
96101
row.append(cell)

0 commit comments

Comments
 (0)