|
| 1 | +name: 'Run Benchmarks' |
| 2 | + |
| 3 | +# This action assumes the following prerequisites: |
| 4 | +# |
| 5 | +# - SYCL is placed in ./toolchain -- TODO change this |
| 6 | +# - /devops has been checked out in ./devops. |
| 7 | +# - env.GITHUB_TOKEN was properly set, because according to Github, that's |
| 8 | +# apparently the recommended way to pass a secret into a github action: |
| 9 | + |
| 10 | +# https://docs.github.com/en/actions/security-for-github-actions/security-guides/using-secrets-in-github-actions#accessing-your-secrets |
| 11 | +# |
| 12 | +# - env.RUNNER_TAG set to the runner tag used to run this workflow: Currently, |
| 13 | +# only specific runners are fully supported. |
| 14 | + |
| 15 | +inputs: |
| 16 | + target_devices: |
| 17 | + type: string |
| 18 | + required: True |
| 19 | + |
| 20 | +runs: |
| 21 | + using: "composite" |
| 22 | + steps: |
| 23 | + - name: Check specified runner type / target backend |
| 24 | + shell: bash |
| 25 | + env: |
| 26 | + TARGET_DEVICE: ${{ inputs.target_devices }} |
| 27 | + RUNNER_NAME: ${{ runner.name }} |
| 28 | + run: | |
| 29 | + case "$RUNNER_TAG" in |
| 30 | + '["PVC_PERF"]' ) ;; |
| 31 | + *) |
| 32 | + echo "#" |
| 33 | + echo "# WARNING: Only specific tuned runners are fully supported." |
| 34 | + echo "# This workflow is not guaranteed to work with other runners." |
| 35 | + echo "#" ;; |
| 36 | + esac |
| 37 | +
|
| 38 | + # Ensure runner name has nothing injected |
| 39 | + # TODO: in terms of security, is this overkill? |
| 40 | + if [ -z "$(printf '%s' "$RUNNER_NAME" | grep -oE '^[a-zA-Z0-9_-]+$')" ]; then |
| 41 | + echo "Bad runner name, please ensure runner name is [a-zA-Z0-9_-]." |
| 42 | + exit 1 |
| 43 | + fi |
| 44 | + echo "RUNNER_NAME=$RUNNER_NAME" >> $GITHUB_ENV |
| 45 | +
|
| 46 | + # input.target_devices is not directly used, as this allows code injection |
| 47 | + case "$TARGET_DEVICE" in |
| 48 | + level_zero:*) ;; |
| 49 | + *) |
| 50 | + echo "#" |
| 51 | + echo "# WARNING: Only level_zero backend is fully supported." |
| 52 | + echo "# This workflow is not guaranteed to work with other backends." |
| 53 | + echo "#" ;; |
| 54 | + esac |
| 55 | + echo "ONEAPI_DEVICE_SELECTOR=$TARGET_DEVICE" >> $GITHUB_ENV |
| 56 | +
|
| 57 | + - name: Compute CPU core range to run benchmarks on |
| 58 | + shell: bash |
| 59 | + run: | |
| 60 | + # Compute the core range for the first NUMA node; second node is used by |
| 61 | + # UMF. Skip the first 4 cores as the kernel is likely to schedule more |
| 62 | + # work on these. |
| 63 | + CORES="$(lscpu | awk ' |
| 64 | + /NUMA node0 CPU|On-line CPU/ {line=$0} |
| 65 | + END { |
| 66 | + split(line, a, " ") |
| 67 | + split(a[4], b, ",") |
| 68 | + sub(/^0/, "4", b[1]) |
| 69 | + print b[1] |
| 70 | + }')" |
| 71 | + echo "CPU core range to use: $CORES" |
| 72 | + echo "CORES=$CORES" >> $GITHUB_ENV |
| 73 | +
|
| 74 | + ZE_AFFINITY_MASK=0 |
| 75 | + echo "ZE_AFFINITY_MASK=$ZE_AFFINITY_MASK" >> $GITHUB_ENV |
| 76 | + - name: Checkout results repo |
| 77 | + shell: bash |
| 78 | + run: | |
| 79 | + git clone -b unify-ci https://github.com/intel/llvm-ci-perf-results |
| 80 | + - name: Run compute-benchmarks |
| 81 | + shell: bash |
| 82 | + run: | |
| 83 | + # TODO generate summary + display helpful message here |
| 84 | + export CMPLR_ROOT=./toolchain |
| 85 | + echo "-----" |
| 86 | + sycl-ls |
| 87 | + echo "-----" |
| 88 | + pip install --user --break-system-packages -r ./devops/scripts/benchmarks/requirements.txt |
| 89 | + echo "-----" |
| 90 | + mkdir -p "./llvm-ci-perf-results/$RUNNER_NAME" |
| 91 | + taskset -c "$CORES" ./devops/scripts/benchmarks/main.py \ |
| 92 | + "$(realpath ./llvm_test_workdir)" \ |
| 93 | + --sycl "$(realpath ./toolchain)" \ |
| 94 | + --save baseline \ |
| 95 | + --output-html remote \ |
| 96 | + --results-dir "./llvm-ci-perf-results/$RUNNER_NAME" \ |
| 97 | + --output-dir "./llvm-ci-perf-results/$RUNNER_NAME" \ |
| 98 | + --preset Minimal |
| 99 | + echo "-----" |
| 100 | + ls |
| 101 | + - name: Push compute-benchmarks results |
| 102 | + if: always() |
| 103 | + shell: bash |
| 104 | + run: | |
| 105 | + # TODO redo configuration |
| 106 | + # $(python ./devops/scripts/benchmarking/load_config.py ./devops constants) |
| 107 | +
|
| 108 | + cd "./llvm-ci-perf-results" |
| 109 | + git config user.name "SYCL Benchmarking Bot" |
| 110 | + git config user.email "[email protected]" |
| 111 | + git pull |
| 112 | + git add . |
| 113 | + # Make sure changes have been made |
| 114 | + if git diff --quiet && git diff --cached --quiet; then |
| 115 | + echo "No new results added, skipping push." |
| 116 | + else |
| 117 | + git commit -m "[GHA] Upload compute-benchmarks results from https://github.com/intel/llvm/actions/runs/${{ github.run_id }}" |
| 118 | + git push "https://[email protected]/intel/llvm-ci-perf-results.git" unify-ci |
| 119 | + fi |
| 120 | +# - name: Find benchmark result artifact here |
| 121 | +# if: always() |
| 122 | +# shell: bash |
| 123 | +# run: | |
| 124 | +# cat << EOF |
| 125 | +# # |
| 126 | +# # Artifact link for benchmark results here: |
| 127 | +# # |
| 128 | +# EOF |
| 129 | +# - name: Archive compute-benchmark results |
| 130 | +# if: always() |
| 131 | +# uses: actions/upload-artifact@v4 |
| 132 | +# with: |
| 133 | +# name: Compute-benchmark run ${{ github.run_id }} (${{ runner.name }}) |
| 134 | +# path: ./artifact |
0 commit comments