Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
112 changes: 59 additions & 53 deletions devops/actions/run-tests/benchmark/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,16 +27,25 @@ runs:
shell: bash
env:
TARGET_DEVICE: ${{ inputs.target_devices }}
RUNNER_NAME: ${{ runner.name }}
run: |
case "$RUNNER_TAG" in
'["Linux", "gen12"]' | '["Linux", "pvc"]') ;;
'["PVC_PERF"]' ) ;;
*)
echo "#"
echo "# WARNING: Only gen12/pvc on Linux is fully supported."
echo "# WARNING: Only specific tuned runners are fully supported."
echo "# This workflow is not guaranteed to work with other runners."
echo "#" ;;
esac

# Ensure runner name has nothing injected
# TODO: in terms of security, is this overkill?
if [ -z "$(printf '%s' "$RUNNER_NAME" | grep -oE '^[a-zA-Z0-9_-]+$')" ]; then
echo "Bad runner name, please ensure runner name is [a-zA-Z0-9_-]."
exit 1
fi
echo "RUNNER_NAME=$RUNNER_NAME" >> $GITHUB_ENV

# input.target_devices is not directly used, as this allows code injection
case "$TARGET_DEVICE" in
level_zero:*) ;;
Expand All @@ -46,11 +55,11 @@ runs:
echo "# This workflow is not guaranteed to work with other backends."
echo "#" ;;
esac
echo "ONEAPI_DEVICE_SELECTOR=$TARGET_DEVICE" >> $GITHUB_ENV

- name: Compute CPU core range to run benchmarks on
shell: bash
run: |
# Taken from ur-benchmark-reusable.yml:

# Compute the core range for the first NUMA node; second node is used by
# UMF. Skip the first 4 cores as the kernel is likely to schedule more
# work on these.
Expand All @@ -67,65 +76,62 @@ runs:

ZE_AFFINITY_MASK=0
echo "ZE_AFFINITY_MASK=$ZE_AFFINITY_MASK" >> $GITHUB_ENV
- name: Checkout results repo
shell: bash
run: |
git clone -b unify-ci https://github.com/intel/llvm-ci-perf-results
- name: Run compute-benchmarks
shell: bash
run: |
cat << EOF
#
# NOTE TO DEVELOPERS:
#

Check latter steps of the workflow: This job produces an artifact with:
- benchmark results from passing/failing tests
- log containing all failing (too slow) benchmarks
- log containing all erroring benchmarks

While this step in the workflow provides debugging output describing this
information, it might be easier to inspect the logs from the artifact
instead.

EOF
export ONEAPI_DEVICE_SELECTOR="${{ inputs.target_devices }}"
# TODO generate summary + display helpful message here
export CMPLR_ROOT=./toolchain
echo "-----"
sycl-ls
echo "-----"
pip install --user --break-system-packages -r ./devops/scripts/benchmarks/requirements.txt
taskset -c "$CORES" ./devops/scripts/benchmarks/main.py "$(realpath ./llvm_test_workdir)" --sycl "$(realpath ./toolchain)" --save baseline --preset Minimal
echo "-----"
mkdir -p "./llvm-ci-perf-results/$RUNNER_NAME"
taskset -c "$CORES" ./devops/scripts/benchmarks/main.py \
"$(realpath ./llvm_test_workdir)" \
--sycl "$(realpath ./toolchain)" \
--save baseline \
--output-html remote \
--results-dir "./llvm-ci-perf-results/$RUNNER_NAME" \
--output-dir "./llvm-ci-perf-results/$RUNNER_NAME" \
--preset Minimal
echo "-----"
ls
# - name: Push compute-benchmarks results
# if: always()
# shell: bash
# run: |
# # TODO -- waiting on security clearance
# # Load configuration values
# $(python ./devops/scripts/benchmarking/load_config.py ./devops constants)
#
# cd "./llvm-ci-perf-results"
# git config user.name "SYCL Benchmarking Bot"
# git config user.email "[email protected]"
# git pull
# git add .
# # Make sure changes have been made
# if git diff --quiet && git diff --cached --quiet; then
# echo "No new results added, skipping push."
# else
# git commit -m "[GHA] Upload compute-benchmarks results from https://github.com/intel/llvm/actions/runs/${{ github.run_id }}"
# git push "https://[email protected]/$SANITIZED_PERF_RES_GIT_REPO.git" "$SANITIZED_PERF_RES_GIT_BRANCH"
# fi
- name: Find benchmark result artifact here
- name: Push compute-benchmarks results
if: always()
shell: bash
run: |
cat << EOF
#
# Artifact link for benchmark results here:
#
EOF
- name: Archive compute-benchmark results
if: always()
uses: actions/upload-artifact@v4
with:
name: Compute-benchmark run ${{ github.run_id }} (${{ runner.name }})
path: ./artifact
# TODO redo configuration
# $(python ./devops/scripts/benchmarking/load_config.py ./devops constants)

cd "./llvm-ci-perf-results"
git config user.name "SYCL Benchmarking Bot"
git config user.email "[email protected]"
git pull
git add .
# Make sure changes have been made
if git diff --quiet && git diff --cached --quiet; then
echo "No new results added, skipping push."
else
git commit -m "[GHA] Upload compute-benchmarks results from https://github.com/intel/llvm/actions/runs/${{ github.run_id }}"
git push "https://[email protected]/intel/llvm-ci-perf-results.git" unify-ci
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this might fail if a different job pushed changes between this and when the repo was cloned, but we can fix that later.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Will do

Thanks for the quick review btw! Will be cherry-picking into unify-benchmark-ci

fi
# - name: Find benchmark result artifact here
# if: always()
# shell: bash
# run: |
# cat << EOF
# #
# # Artifact link for benchmark results here:
# #
# EOF
# - name: Archive compute-benchmark results
# if: always()
# uses: actions/upload-artifact@v4
# with:
# name: Compute-benchmark run ${{ github.run_id }} (${{ runner.name }})
# path: ./artifact
23 changes: 20 additions & 3 deletions devops/scripts/benchmarks/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -265,11 +265,15 @@ def main(directory, additional_env_vars, save_name, compare_names, filter):
this_name, chart_data, failures, options.output_markdown
)

with open("benchmark_results.md", "w") as file:
md_path = options.output_directory
if options.output_directory is None:
md_path = os.getcwd()

with open(os.path.join(md_path, "benchmark_results.md"), "w") as file:
file.write(markdown_content)

print(
f"Markdown with benchmark results has been written to {os.getcwd()}/benchmark_results.md"
f"Markdown with benchmark results has been written to {md_path}/benchmark_results.md"
)

saved_name = save_name if save_name is not None else this_name
Expand All @@ -283,7 +287,10 @@ def main(directory, additional_env_vars, save_name, compare_names, filter):
compare_names.append(saved_name)

if options.output_html:
generate_html(history.runs, compare_names)
html_path = options.output_directory
if options.output_directory is None:
html_path = os.path.join(os.path.dirname(__file__), "html")
generate_html(history.runs, compare_names, html_path)


def validate_and_parse_env_args(env_args):
Expand Down Expand Up @@ -398,6 +405,12 @@ def validate_and_parse_env_args(env_args):
const=options.output_html,
choices=["local", "remote"],
)
parser.add_argument(
"--output-dir",
type=str,
help="Location for output files, if --output-html or --output_markdown was specified.",
default=None
)
parser.add_argument(
"--dry-run",
help="Do not run any actual benchmarks",
Expand Down Expand Up @@ -486,6 +499,10 @@ def validate_and_parse_env_args(env_args):
if args.compute_runtime is not None:
options.build_compute_runtime = True
options.compute_runtime_tag = args.compute_runtime
if args.output_dir is not None:
if not os.path.isdir(args.output_dir):
parser.error("Specified --output-dir is not a valid path")
options.output_directory = os.path.abspath(args.output_dir)

benchmark_filter = re.compile(args.filter) if args.filter else None

Expand Down
1 change: 1 addition & 0 deletions devops/scripts/benchmarks/options.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ class Options:
compare_max: int = 10 # average/median over how many results
output_markdown: MarkdownSize = MarkdownSize.SHORT
output_html: str = "local"
output_directory: str = None
dry_run: bool = False
stddev_threshold: float = 0.02
iterations_stddev: int = 5
Expand Down
4 changes: 1 addition & 3 deletions devops/scripts/benchmarks/output_html.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,7 @@
from options import options


def generate_html(benchmark_runs: list, compare_names: list[str]):
# create path to data.js in html folder
html_path = os.path.join(os.path.dirname(__file__), "html")
def generate_html(benchmark_runs: list, compare_names: list[str], html_path: str):
benchmark_runs.sort(key=lambda run: run.date, reverse=True)

if options.output_html == "local":
Expand Down
Loading