@@ -27,16 +27,25 @@ runs:
2727 shell : bash
2828 env :
2929 TARGET_DEVICE : ${{ inputs.target_devices }}
30+ RUNNER_NAME : ${{ runner.name }}
3031 run : |
3132 case "$RUNNER_TAG" in
32- '["Linux", "gen12" ]' | '["Linux", "pvc"]' ) ;;
33+ '["PVC_PERF" ]' ) ;;
3334 *)
3435 echo "#"
35- echo "# WARNING: Only gen12/pvc on Linux is fully supported."
36+ echo "# WARNING: Only specific tuned runners are fully supported."
3637 echo "# This workflow is not guaranteed to work with other runners."
3738 echo "#" ;;
3839 esac
3940
41+ # Ensure runner name has nothing injected
42+ # TODO: in terms of security, is this overkill?
43+ if [ -z "$(printf '%s' "$RUNNER_NAME" | grep -oE '^[a-zA-Z0-9_-]+$')" ]; then
44+ echo "Bad runner name, please ensure runner name is [a-zA-Z0-9_-]."
45+ exit 1
46+ fi
47+ echo "RUNNER_NAME=$RUNNER_NAME" >> $GITHUB_ENV
48+
4049 # input.target_devices is not directly used, as this allows code injection
4150 case "$TARGET_DEVICE" in
4251 level_zero:*) ;;
@@ -46,11 +55,11 @@ runs:
4655 echo "# This workflow is not guaranteed to work with other backends."
4756 echo "#" ;;
4857 esac
58+ echo "ONEAPI_DEVICE_SELECTOR=$TARGET_DEVICE" >> $GITHUB_ENV
59+
4960 - name : Compute CPU core range to run benchmarks on
5061 shell : bash
5162 run : |
52- # Taken from ur-benchmark-reusable.yml:
53-
5463 # Compute the core range for the first NUMA node; second node is used by
5564 # UMF. Skip the first 4 cores as the kernel is likely to schedule more
5665 # work on these.
@@ -67,65 +76,62 @@ runs:
6776
6877 ZE_AFFINITY_MASK=0
6978 echo "ZE_AFFINITY_MASK=$ZE_AFFINITY_MASK" >> $GITHUB_ENV
79+ - name : Checkout results repo
80+ shell : bash
81+ run : |
82+ git clone -b unify-ci https://github.com/intel/llvm-ci-perf-results
7083 - name : Run compute-benchmarks
7184 shell : bash
7285 run : |
73- cat << EOF
74- #
75- # NOTE TO DEVELOPERS:
76- #
77-
78- Check latter steps of the workflow: This job produces an artifact with:
79- - benchmark results from passing/failing tests
80- - log containing all failing (too slow) benchmarks
81- - log containing all erroring benchmarks
82-
83- While this step in the workflow provides debugging output describing this
84- information, it might be easier to inspect the logs from the artifact
85- instead.
86-
87- EOF
88- export ONEAPI_DEVICE_SELECTOR="${{ inputs.target_devices }}"
86+ # TODO generate summary + display helpful message here
8987 export CMPLR_ROOT=./toolchain
9088 echo "-----"
9189 sycl-ls
9290 echo "-----"
9391 pip install --user --break-system-packages -r ./devops/scripts/benchmarks/requirements.txt
94- taskset -c "$CORES" ./devops/scripts/benchmarks/main.py "$(realpath ./llvm_test_workdir)" --sycl "$(realpath ./toolchain)" --save baseline --preset Minimal
92+ echo "-----"
93+ mkdir -p "./llvm-ci-perf-results/$RUNNER_NAME"
94+ taskset -c "$CORES" ./devops/scripts/benchmarks/main.py \
95+ "$(realpath ./llvm_test_workdir)" \
96+ --sycl "$(realpath ./toolchain)" \
97+ --save baseline \
98+ --output-html remote \
99+ --results-dir "./llvm-ci-perf-results/$RUNNER_NAME"
100+ --output-dir ./llvm_test_workdir \
101+ --preset Minimal
95102 echo "-----"
96103 ls
97- # - name: Push compute-benchmarks results
98- # if: always()
99- # shell: bash
100- # run: |
101- # # TODO -- waiting on security clearance
102- # # Load configuration values
103- # $(python ./devops/scripts/benchmarking/load_config.py ./devops constants)
104- #
105- # cd "./llvm-ci-perf-results"
106- # git config user.name "SYCL Benchmarking Bot"
107- # git config user.email "[email protected] "108- # git pull
109- # git add .
110- # # Make sure changes have been made
111- # if git diff --quiet && git diff --cached --quiet; then
112- # echo "No new results added, skipping push."
113- # else
114- # git commit -m "[GHA] Upload compute-benchmarks results from https://github.com/intel/llvm/actions/runs/${{ github.run_id }}"
115- # git push "https://[email protected] /$SANITIZED_PERF_RES_GIT_REPO.git" "$SANITIZED_PERF_RES_GIT_BRANCH"116- # fi
117- - name : Find benchmark result artifact here
104+ - name : Push compute-benchmarks results
118105 if : always()
119106 shell : bash
120107 run : |
121- cat << EOF
122- #
123- # Artifact link for benchmark results here:
124- #
125- EOF
126- - name : Archive compute-benchmark results
127- if : always()
128- uses : actions/upload-artifact@v4
129- with :
130- name : Compute-benchmark run ${{ github.run_id }} (${{ runner.name }})
131- path : ./artifact
108+ # TODO redo configuration
109+ # $(python ./devops/scripts/benchmarking/load_config.py ./devops constants)
110+
111+ cd "./llvm-ci-perf-results"
112+ git config user.name "SYCL Benchmarking Bot"
113+ git config user.email "[email protected] " 114+ git pull
115+ git add .
116+ # Make sure changes have been made
117+ if git diff --quiet && git diff --cached --quiet; then
118+ echo "No new results added, skipping push."
119+ else
120+ git commit -m "[GHA] Upload compute-benchmarks results from https://github.com/intel/llvm/actions/runs/${{ github.run_id }}"
121+ git push "https://[email protected] /intel/llvm-ci-perf-results.git" unify-ci 122+ fi
123+ # - name: Find benchmark result artifact here
124+ # if: always()
125+ # shell: bash
126+ # run: |
127+ # cat << EOF
128+ # #
129+ # # Artifact link for benchmark results here:
130+ # #
131+ # EOF
132+ # - name: Archive compute-benchmark results
133+ # if: always()
134+ # uses: actions/upload-artifact@v4
135+ # with:
136+ # name: Compute-benchmark run ${{ github.run_id }} (${{ runner.name }})
137+ # path: ./artifact
0 commit comments