Skip to content

Commit da448ce

Browse files
authored
Merge branch 'pytorch:main' into add-profiling-to-xnn-executor-runner-2
2 parents 1a9721f + a059981 commit da448ce

File tree

309 files changed

+7966
-3374
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

309 files changed

+7966
-3374
lines changed
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
2024-05-15
1+
2024-12-16
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
2ea4b56ec872424e486c4fe2d55da061067a2ed3
1+
0a94bb432ed75cc2d950d81b2921363218a7e459

.ci/docker/conda-env-ci.txt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,4 @@
11
cmake=3.22.1
22
ninja=1.10.2
3+
libuv
4+
pkg-config
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
#!/bin/bash
2+
# Copyright 2024 Arm Limited and/or its affiliates.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
# NB: This function could be used to install Arm dependencies
8+
# Setup arm example environment (including TOSA tools)
9+
git config --global user.email "[email protected]"
10+
git config --global user.name "Github Executorch"
11+
bash examples/arm/setup.sh --i-agree-to-the-contained-eula

.ci/scripts/setup-macos.sh

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -131,5 +131,9 @@ if [[ -z "${GITHUB_RUNNER:-}" ]]; then
131131
fi
132132

133133
print_cmake_info
134-
install_executorch
134+
install_pytorch_and_domains
135+
# We build PyTorch from source here instead of using nightly. This allows CI to test against
136+
# the pinned commit from PyTorch
137+
install_executorch "use-pt-pinned-commit"
135138
build_executorch_runner "${BUILD_TOOL}"
139+
do_not_use_nightly_on_ci

.ci/scripts/utils.sh

Lines changed: 36 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,42 @@ install_pip_dependencies() {
4040
popd || return
4141
}
4242

43+
install_domains() {
44+
echo "Install torchvision and torchaudio"
45+
pip install --no-use-pep517 --user "git+https://github.com/pytorch/audio.git@${TORCHAUDIO_VERSION}"
46+
pip install --no-use-pep517 --user "git+https://github.com/pytorch/vision.git@${TORCHVISION_VERSION}"
47+
}
48+
49+
install_pytorch_and_domains() {
50+
pushd .ci/docker || return
51+
TORCH_VERSION=$(cat ci_commit_pins/pytorch.txt)
52+
popd || return
53+
54+
git clone https://github.com/pytorch/pytorch.git
55+
56+
# Fetch the target commit
57+
pushd pytorch || return
58+
git checkout "${TORCH_VERSION}"
59+
git submodule update --init --recursive
60+
61+
export USE_DISTRIBUTED=1
62+
# Then build and install PyTorch
63+
python setup.py bdist_wheel
64+
pip install "$(echo dist/*.whl)"
65+
66+
# Grab the pinned audio and vision commits from PyTorch
67+
TORCHAUDIO_VERSION=$(cat .github/ci_commit_pins/audio.txt)
68+
export TORCHAUDIO_VERSION
69+
TORCHVISION_VERSION=$(cat .github/ci_commit_pins/vision.txt)
70+
export TORCHVISION_VERSION
71+
72+
install_domains
73+
74+
popd || return
75+
# Print sccache stats for debugging
76+
sccache --show-stats || true
77+
}
78+
4379
install_flatc_from_source() {
4480
# NB: This function could be used to install flatbuffer from source
4581
pushd third-party/flatbuffers || return
@@ -59,17 +95,6 @@ install_flatc_from_source() {
5995
popd || return
6096
}
6197

62-
install_arm() {
63-
# NB: This function could be used to install Arm dependencies
64-
# Setup arm example environment (including TOSA tools)
65-
git config --global user.email "[email protected]"
66-
git config --global user.name "Github Executorch"
67-
bash examples/arm/setup.sh --i-agree-to-the-contained-eula
68-
69-
# Test tosa_reference flow
70-
source examples/arm/ethos-u-scratch/setup_path.sh
71-
}
72-
7398
build_executorch_runner_buck2() {
7499
# Build executorch runtime with retry as this step is flaky on macos CI
75100
retry buck2 build //examples/portable/executor_runner:executor_runner

.github/scripts/extract_benchmark_results.py

Lines changed: 76 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
# This source code is licensed under the BSD-style license found in the
66
# LICENSE file in the root directory of this source tree.
77

8+
import glob
89
import json
910
import logging
1011
import os
@@ -22,6 +23,7 @@
2223

2324
BENCHMARK_RESULTS_FILENAME = "benchmark_results.json"
2425
ARTIFACTS_FILENAME_REGEX = re.compile(r"(android|ios)-artifacts-(?P<job_id>\d+).json")
26+
BENCHMARK_CONFIG_REGEX = re.compile(r"The benchmark config is (?P<benchmark_config>.+)")
2527

2628
# iOS-related regexes and variables
2729
IOS_TEST_SPEC_REGEX = re.compile(
@@ -51,7 +53,7 @@ def __call__(
5153
parser.error(f"{values} is not a valid JSON file (*.json)")
5254

5355

54-
class ValidateOutputDir(Action):
56+
class ValidateDir(Action):
5557
def __call__(
5658
self,
5759
parser: ArgumentParser,
@@ -81,7 +83,7 @@ def parse_args() -> Any:
8183
"--output-dir",
8284
type=str,
8385
required=True,
84-
action=ValidateOutputDir,
86+
action=ValidateDir,
8587
help="the directory to keep the benchmark results",
8688
)
8789
parser.add_argument(
@@ -114,6 +116,13 @@ def parse_args() -> Any:
114116
required=True,
115117
help="which retry of the workflow this is",
116118
)
119+
parser.add_argument(
120+
"--benchmark-configs",
121+
type=str,
122+
required=True,
123+
action=ValidateDir,
124+
help="the directory to keep the benchmark configs",
125+
)
117126

118127
return parser.parse_args()
119128

@@ -300,9 +309,60 @@ def extract_job_id(artifacts_filename: str) -> int:
300309
return int(m.group("job_id"))
301310

302311

312+
def read_all_benchmark_configs() -> Dict[str, Dict[str, str]]:
313+
"""
314+
Read all the benchmark configs that we can find
315+
"""
316+
benchmark_configs = {}
317+
318+
for file in glob.glob(f"{benchmark_configs}/*.json"):
319+
filename = os.path.basename(file)
320+
with open(file) as f:
321+
try:
322+
benchmark_configs[filename] = json.load(f)
323+
except json.JSONDecodeError as e:
324+
warning(f"Fail to load benchmark config {file}: {e}")
325+
326+
return benchmark_configs
327+
328+
329+
def read_benchmark_config(
330+
artifact_s3_url: str, benchmark_configs_dir: str
331+
) -> Dict[str, str]:
332+
"""
333+
Get the correct benchmark config for this benchmark run
334+
"""
335+
try:
336+
with request.urlopen(artifact_s3_url) as data:
337+
for line in data.read().decode("utf8").splitlines():
338+
m = BENCHMARK_CONFIG_REGEX.match(line)
339+
if not m:
340+
continue
341+
342+
benchmark_config = m.group("benchmark_config")
343+
filename = os.path.join(
344+
benchmark_configs_dir, f"{benchmark_config}.json"
345+
)
346+
347+
if not os.path.exists(filename):
348+
warning(f"There is no benchmark config {filename}")
349+
continue
350+
351+
with open(filename) as f:
352+
try:
353+
return json.load(f)
354+
except json.JSONDecodeError as e:
355+
warning(f"Fail to load benchmark config {filename}: {e}")
356+
except error.HTTPError:
357+
warning(f"Fail to read the test spec output at {artifact_s3_url}")
358+
359+
return {}
360+
361+
303362
def transform(
304363
app_type: str,
305364
benchmark_results: List,
365+
benchmark_config: Dict[str, str],
306366
repo: str,
307367
head_branch: str,
308368
workflow_name: str,
@@ -352,29 +412,25 @@ def transform(
352412
for r in benchmark_results
353413
]
354414
elif schema_version == "v3":
355-
quantization = (
356-
r["benchmarkModel"]["quantization"]
357-
if r["benchmarkModel"]["quantization"]
358-
else "unknown"
359-
)
415+
v3_benchmark_results = []
360416
# From https://github.com/pytorch/pytorch/wiki/How-to-integrate-with-PyTorch-OSS-benchmark-database
361417
return [
362418
{
363419
"benchmark": {
364420
"name": "ExecuTorch",
365421
"mode": "inference",
366-
"dtype": quantization,
367422
"extra_info": {
368423
"app_type": app_type,
424+
# Just keep a copy of the benchmark config here
425+
"benchmark_config": json.dumps(benchmark_config),
369426
},
370427
},
371428
"model": {
372-
"name": r["benchmarkModel"]["name"],
429+
"name": benchmark_config.get("model", r["benchmarkModel"]["name"]),
373430
"type": "OSS model",
374-
"backend": r["benchmarkModel"].get("backend", ""),
375-
"extra_info": {
376-
"quantization": quantization,
377-
},
431+
"backend": benchmark_config.get(
432+
"config", r["benchmarkModel"].get("backend", "")
433+
),
378434
},
379435
"metric": {
380436
"name": r["metric"],
@@ -405,6 +461,7 @@ def main() -> None:
405461
"v2": [],
406462
"v3": [],
407463
}
464+
benchmark_config = {}
408465

409466
with open(args.artifacts) as f:
410467
for artifact in json.load(f):
@@ -420,6 +477,11 @@ def main() -> None:
420477
artifact_type = artifact["type"]
421478
artifact_s3_url = artifact["s3_url"]
422479

480+
if artifact_type == "TESTSPEC_OUTPUT":
481+
benchmark_config = read_benchmark_config(
482+
artifact_s3_url, args.benchmark_configs
483+
)
484+
423485
if app_type == "ANDROID_APP":
424486
benchmark_results = extract_android_benchmark_results(
425487
job_name, artifact_type, artifact_s3_url
@@ -435,6 +497,7 @@ def main() -> None:
435497
results = transform(
436498
app_type,
437499
benchmark_results,
500+
benchmark_config,
438501
args.repo,
439502
args.head_branch,
440503
args.workflow_name,

.github/workflows/android-perf.yml

Lines changed: 39 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,10 @@ jobs:
9898
- uses: actions/checkout@v3
9999

100100
- name: Prepare the spec
101+
id: prepare
101102
shell: bash
103+
env:
104+
BENCHMARK_CONFIG: ${{ toJSON(matrix) }}
102105
working-directory: extension/benchmark/android/benchmark
103106
run: |
104107
set -eux
@@ -108,11 +111,20 @@ jobs:
108111
# We could write a script to properly use jinja here, but there is only one variable,
109112
# so let's just sed it
110113
sed -i -e 's,{{ model_path }},'"${MODEL_PATH}"',g' android-llm-device-farm-test-spec.yml.j2
111-
cp android-llm-device-farm-test-spec.yml.j2 android-llm-device-farm-test-spec.yml
112114
115+
BENCHMARK_CONFIG_ID=$(echo "${{ matrix.model }}_${{ matrix.config }}" | sed -e 's/[^A-Za-z0-9._-]/_/g')
116+
# The config for this benchmark runs, we save it in the test spec so that it can be fetched
117+
# later by the upload script
118+
sed -i -e 's,{{ benchmark_config_id }},'"${BENCHMARK_CONFIG_ID}"',g' android-llm-device-farm-test-spec.yml.j2
119+
120+
cp android-llm-device-farm-test-spec.yml.j2 android-llm-device-farm-test-spec.yml
113121
# Just print the test spec for debugging
114122
cat android-llm-device-farm-test-spec.yml
115123
124+
# Save the benchmark configs so that we can use it later in the dashboard
125+
echo "${BENCHMARK_CONFIG}" > "${BENCHMARK_CONFIG_ID}.json"
126+
echo "benchmark-config-id=${BENCHMARK_CONFIG_ID}" >> $GITHUB_OUTPUT
127+
116128
- name: Upload the spec
117129
uses: seemethere/upload-artifact-s3@v5
118130
with:
@@ -123,6 +135,16 @@ jobs:
123135
if-no-files-found: error
124136
path: extension/benchmark/android/benchmark/android-llm-device-farm-test-spec.yml
125137

138+
- name: Update the benchmark configs
139+
uses: seemethere/upload-artifact-s3@v5
140+
with:
141+
s3-bucket: gha-artifacts
142+
s3-prefix: |
143+
${{ github.repository }}/${{ github.run_id }}/artifacts/benchmark-configs/
144+
retention-days: 1
145+
if-no-files-found: error
146+
path: extension/benchmark/android/benchmark/${{ steps.prepare.outputs.benchmark-config-id }}.json
147+
126148
export-models:
127149
name: export-models
128150
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
@@ -397,6 +419,20 @@ jobs:
397419
398420
ls -lah artifacts
399421
422+
- name: Download the list of benchmark configs from S3
423+
env:
424+
BENCHMARK_CONFIGS_DIR: s3://gha-artifacts/${{ github.repository }}/${{ github.run_id }}/artifacts/benchmark-configs/
425+
shell: bash
426+
run: |
427+
set -eux
428+
429+
mkdir -p benchmark-configs
430+
pushd benchmark-configs
431+
${CONDA_RUN} aws s3 sync "${BENCHMARK_CONFIGS_DIR}" .
432+
popd
433+
434+
ls -lah benchmark-configs
435+
400436
- name: Extract the benchmark results JSON
401437
shell: bash
402438
run: |
@@ -414,7 +450,8 @@ jobs:
414450
--head-branch ${{ github.head_ref || github.ref_name }} \
415451
--workflow-name "${{ github.workflow }}" \
416452
--workflow-run-id ${{ github.run_id }} \
417-
--workflow-run-attempt ${{ github.run_attempt }}
453+
--workflow-run-attempt ${{ github.run_attempt }} \
454+
--benchmark-configs benchmark-configs
418455
done
419456
420457
for SCHEMA in v2 v3; do

0 commit comments

Comments
 (0)