Skip to content

Commit 237750e

Browse files
committed
[CI][Benchmark] Decouple results from existing file structure, fetch results from git instead of local
1 parent 18e5291 commit 237750e

File tree

4 files changed

+81
-59
lines changed

4 files changed

+81
-59
lines changed

devops/actions/run-tests/benchmark/action.yml

Lines changed: 59 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -27,16 +27,25 @@ runs:
2727
shell: bash
2828
env:
2929
TARGET_DEVICE: ${{ inputs.target_devices }}
30+
RUNNER_NAME: ${{ runner.name }}
3031
run: |
3132
case "$RUNNER_TAG" in
32-
'["Linux", "gen12"]' | '["Linux", "pvc"]') ;;
33+
'["PVC_PERF"]' ) ;;
3334
*)
3435
echo "#"
35-
echo "# WARNING: Only gen12/pvc on Linux is fully supported."
36+
echo "# WARNING: Only specific tuned runners are fully supported."
3637
echo "# This workflow is not guaranteed to work with other runners."
3738
echo "#" ;;
3839
esac
3940
41+
# Ensure runner name has nothing injected
42+
# TODO: in terms of security, is this overkill?
43+
if [ -z "$(printf '%s' "$RUNNER_NAME" | grep -oE '^[a-zA-Z0-9_-]+$')" ]; then
44+
echo "Bad runner name, please ensure runner name is [a-zA-Z0-9_-]."
45+
exit 1
46+
fi
47+
echo "RUNNER_NAME=$RUNNER_NAME" >> $GITHUB_ENV
48+
4049
# input.target_devices is not directly used, as this allows code injection
4150
case "$TARGET_DEVICE" in
4251
level_zero:*) ;;
@@ -46,11 +55,11 @@ runs:
4655
echo "# This workflow is not guaranteed to work with other backends."
4756
echo "#" ;;
4857
esac
58+
echo "ONEAPI_DEVICE_SELECTOR=$TARGET_DEVICE" >> $GITHUB_ENV
59+
4960
- name: Compute CPU core range to run benchmarks on
5061
shell: bash
5162
run: |
52-
# Taken from ur-benchmark-reusable.yml:
53-
5463
# Compute the core range for the first NUMA node; second node is used by
5564
# UMF. Skip the first 4 cores as the kernel is likely to schedule more
5665
# work on these.
@@ -67,65 +76,62 @@ runs:
6776
6877
ZE_AFFINITY_MASK=0
6978
echo "ZE_AFFINITY_MASK=$ZE_AFFINITY_MASK" >> $GITHUB_ENV
79+
- name: Checkout results repo
80+
shell: bash
81+
run: |
82+
git clone -b unify-ci https://github.com/intel/llvm-ci-perf-results
7083
- name: Run compute-benchmarks
7184
shell: bash
7285
run: |
73-
cat << EOF
74-
#
75-
# NOTE TO DEVELOPERS:
76-
#
77-
78-
Check latter steps of the workflow: This job produces an artifact with:
79-
- benchmark results from passing/failing tests
80-
- log containing all failing (too slow) benchmarks
81-
- log containing all erroring benchmarks
82-
83-
While this step in the workflow provides debugging output describing this
84-
information, it might be easier to inspect the logs from the artifact
85-
instead.
86-
87-
EOF
88-
export ONEAPI_DEVICE_SELECTOR="${{ inputs.target_devices }}"
86+
# TODO generate summary + display helpful message here
8987
export CMPLR_ROOT=./toolchain
9088
echo "-----"
9189
sycl-ls
9290
echo "-----"
9391
pip install --user --break-system-packages -r ./devops/scripts/benchmarks/requirements.txt
94-
taskset -c "$CORES" ./devops/scripts/benchmarks/main.py "$(realpath ./llvm_test_workdir)" --sycl "$(realpath ./toolchain)" --save baseline --preset Minimal
92+
echo "-----"
93+
mkdir -p "./llvm-ci-perf-results/$RUNNER_NAME"
94+
taskset -c "$CORES" ./devops/scripts/benchmarks/main.py \
95+
"$(realpath ./llvm_test_workdir)" \
96+
--sycl "$(realpath ./toolchain)" \
97+
--save baseline \
98+
--output-html remote \
99+
--results-dir "./llvm-ci-perf-results/$RUNNER_NAME" \
100+
--output-dir "./llvm-ci-perf-results/$RUNNER_NAME" \
101+
--preset Minimal
95102
echo "-----"
96103
ls
97-
# - name: Push compute-benchmarks results
98-
# if: always()
99-
# shell: bash
100-
# run: |
101-
# # TODO -- waiting on security clearance
102-
# # Load configuration values
103-
# $(python ./devops/scripts/benchmarking/load_config.py ./devops constants)
104-
#
105-
# cd "./llvm-ci-perf-results"
106-
# git config user.name "SYCL Benchmarking Bot"
107-
# git config user.email "[email protected]"
108-
# git pull
109-
# git add .
110-
# # Make sure changes have been made
111-
# if git diff --quiet && git diff --cached --quiet; then
112-
# echo "No new results added, skipping push."
113-
# else
114-
# git commit -m "[GHA] Upload compute-benchmarks results from https://github.com/intel/llvm/actions/runs/${{ github.run_id }}"
115-
# git push "https://[email protected]/$SANITIZED_PERF_RES_GIT_REPO.git" "$SANITIZED_PERF_RES_GIT_BRANCH"
116-
# fi
117-
- name: Find benchmark result artifact here
104+
- name: Push compute-benchmarks results
118105
if: always()
119106
shell: bash
120107
run: |
121-
cat << EOF
122-
#
123-
# Artifact link for benchmark results here:
124-
#
125-
EOF
126-
- name: Archive compute-benchmark results
127-
if: always()
128-
uses: actions/upload-artifact@v4
129-
with:
130-
name: Compute-benchmark run ${{ github.run_id }} (${{ runner.name }})
131-
path: ./artifact
108+
# TODO redo configuration
109+
# $(python ./devops/scripts/benchmarking/load_config.py ./devops constants)
110+
111+
cd "./llvm-ci-perf-results"
112+
git config user.name "SYCL Benchmarking Bot"
113+
git config user.email "[email protected]"
114+
git pull
115+
git add .
116+
# Make sure changes have been made
117+
if git diff --quiet && git diff --cached --quiet; then
118+
echo "No new results added, skipping push."
119+
else
120+
git commit -m "[GHA] Upload compute-benchmarks results from https://github.com/intel/llvm/actions/runs/${{ github.run_id }}"
121+
git push "https://[email protected]/intel/llvm-ci-perf-results.git" unify-ci
122+
fi
123+
# - name: Find benchmark result artifact here
124+
# if: always()
125+
# shell: bash
126+
# run: |
127+
# cat << EOF
128+
# #
129+
# # Artifact link for benchmark results here:
130+
# #
131+
# EOF
132+
# - name: Archive compute-benchmark results
133+
# if: always()
134+
# uses: actions/upload-artifact@v4
135+
# with:
136+
# name: Compute-benchmark run ${{ github.run_id }} (${{ runner.name }})
137+
# path: ./artifact

devops/scripts/benchmarks/main.py

Lines changed: 20 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -265,11 +265,15 @@ def main(directory, additional_env_vars, save_name, compare_names, filter):
265265
this_name, chart_data, failures, options.output_markdown
266266
)
267267

268-
with open("benchmark_results.md", "w") as file:
268+
md_path = options.output_directory
269+
if options.output_directory is None:
270+
md_path = os.getcwd()
271+
272+
with open(os.path.join(md_path, "benchmark_results.md"), "w") as file:
269273
file.write(markdown_content)
270274

271275
print(
272-
f"Markdown with benchmark results has been written to {os.getcwd()}/benchmark_results.md"
276+
f"Markdown with benchmark results has been written to {md_path}/benchmark_results.md"
273277
)
274278

275279
saved_name = save_name if save_name is not None else this_name
@@ -283,7 +287,10 @@ def main(directory, additional_env_vars, save_name, compare_names, filter):
283287
compare_names.append(saved_name)
284288

285289
if options.output_html:
286-
generate_html(history.runs, compare_names)
290+
html_path = options.output_directory
291+
if options.output_directory is None:
292+
html_path = os.path.join(os.path.dirname(__file__), "html")
293+
generate_html(history.runs, compare_names, html_path)
287294

288295

289296
def validate_and_parse_env_args(env_args):
@@ -398,6 +405,12 @@ def validate_and_parse_env_args(env_args):
398405
const=options.output_html,
399406
choices=["local", "remote"],
400407
)
408+
parser.add_argument(
409+
"--output-dir",
410+
type=str,
411+
help="Location for output files, if --output-html or --output_markdown was specified.",
412+
default=None
413+
)
401414
parser.add_argument(
402415
"--dry-run",
403416
help="Do not run any actual benchmarks",
@@ -486,6 +499,10 @@ def validate_and_parse_env_args(env_args):
486499
if args.compute_runtime is not None:
487500
options.build_compute_runtime = True
488501
options.compute_runtime_tag = args.compute_runtime
502+
if args.output_dir is not None:
503+
if not os.path.isdir(args.output_dir):
504+
parser.error("Specified --output-dir is not a valid path")
505+
options.output_directory = os.path.abspath(args.output_dir)
489506

490507
benchmark_filter = re.compile(args.filter) if args.filter else None
491508

devops/scripts/benchmarks/options.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ class Options:
3131
compare_max: int = 10 # average/median over how many results
3232
output_markdown: MarkdownSize = MarkdownSize.SHORT
3333
output_html: str = "local"
34+
output_directory: str = None
3435
dry_run: bool = False
3536
stddev_threshold: float = 0.02
3637
iterations_stddev: int = 5

devops/scripts/benchmarks/output_html.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,7 @@
88
from options import options
99

1010

11-
def generate_html(benchmark_runs: list, compare_names: list[str]):
12-
# create path to data.js in html folder
13-
html_path = os.path.join(os.path.dirname(__file__), "html")
11+
def generate_html(benchmark_runs: list, compare_names: list[str], html_path: str):
1412
benchmark_runs.sort(key=lambda run: run.date, reverse=True)
1513

1614
if options.output_html == "local":

0 commit comments

Comments
 (0)