2828          deployment branch will be used. 
2929         required : false 
3030        default : ' ' 
31+       save_name :
32+         type : string 
33+         description : | 
34+           Specify a custom name to use for the benchmark result: If uploading 
35+           results, this will be the name used to refer results from the current 
36+           run. 
37+          required : false 
38+         default : ' ' 
3139      upload_results :
3240        type : string  #  true/false: workflow_dispatch does not support booleans
41+         description : | 
42+           Upload results to https://intel.github.io/llvm/benchmarks/. 
3343         required : true 
3444      runner :
3545        type : string 
6777          Leave both pr_no and commit_hash empty to use latest commit. 
6878         required : false 
6979        default : ' ' 
80+       save_name :
81+         type : string 
82+         description : | 
83+           Name to use for the benchmark result: 
84+          required : false 
85+         default : ' ' 
7086      upload_results :
71-         description : ' Save and upload results' 
87+         description : ' Save and upload results (to https://intel.github.io/llvm/benchmarks) ' 
7288        type : choice 
7389        options :
7490          - false 
90106permissions : read-all 
91107
92108jobs :
109+   sanitize_inputs :
110+     name : Sanitize inputs 
111+     runs-on : ubuntu-latest 
112+     env :
113+       COMMIT_HASH : ${{ inputs.commit_hash }} 
114+       PR_NO : ${{ inputs.pr_no }} 
115+       SAVE_NAME : ${{ inputs.save_name }} 
116+     outputs :
117+       benchmark_save_name : ${{ steps.sanitize.outputs.benchmark_save_name }} 
118+       build_ref : ${{ steps.sanitize.outputs.build_ref }} 
119+     steps :
120+       - id : sanitize 
121+         run : | 
122+           # Validate user inputs: 
123+           # usage: check_if_nonempty <var> <regex to check var against> <err message> 
124+           check_nonempty() { 
125+             [ -z "$1" ] && return 
126+             if [ -z "$(echo "$1" | grep -P "$2")" ]; then 
127+               echo "$3" 
128+               exit 1 
129+             fi 
130+           } 
131+           check_nonempty "$COMMIT_HASH" '^[0-9a-f]{7,}$' "Bad commit hash (or hash short)." 
132+           check_nonempty "$PR_NO" '^[0-9]+$' "Bad PR number." 
133+           check_nonempty "$SAVE_NAME" '^[A-Za-z][A-Za-z0-9_-]+$' "Bad save name." 
134+ 
135+           BENCHMARK_SAVE_NAME="" 
136+           BUILD_REF="${{ github.ref }}" 
137+           if [ -n "$SAVE_NAME" ]; then 
138+             BENCHMARK_SAVE_NAME="$(echo "$SAVE_NAME" | tr -cd 'A-Za-z0-9_-')" 
139+           fi; 
140+           if [ -n "$COMMIT_HASH" ]; then 
141+             echo "Using commit hash $COMMIT_HASH for build..." 
142+             BUILD_REF="$COMMIT_HASH" 
143+             shortened_commit="$(echo "$COMMIT_HASH" | cut -c 1-7)" 
144+             [ -z "$BENCHMARK_SAVE_NAME" ] && BENCHMARK_SAVE_NAME="Commit_${shortened_commit}" 
145+           elif [ -n "$PR_NO" ]; then 
146+             echo "Using PR no. $PR_NO for build..." 
147+             BUILD_REF="refs/pull/$PR_NO/head" 
148+             [ -z "$BENCHMARK_SAVE_NAME" ] && BENCHMARK_SAVE_NAME="PR_${PR_NO}" 
149+           fi 
150+           [ -z "$BENCHMARK_SAVE_NAME" ] && BENCHMARK_SAVE_NAME="Baseline" 
151+ 
152+           echo "benchmark_save_name=$BENCHMARK_SAVE_NAME" >> $GITHUB_OUTPUT 
153+           echo "build_ref=$BUILD_REF" >> $GITHUB_OUTPUT 
154+ 
155+           echo "Final sanitized values:" 
156+           cat $GITHUB_OUTPUT 
157+ 
93158   build_sycl :
94159    name : Build SYCL 
160+     needs : [ sanitize_inputs ] 
95161    uses : ./.github/workflows/sycl-linux-build.yml 
96162    with :
97-       build_ref : | 
98-         ${{ 
99-           inputs.commit_hash != '' && inputs.commit_hash || 
100-           inputs.pr_no != '' && format('refs/pull/{0}/head', inputs.pr_no) || 
101-           github.ref 
102-         }} 
163+       build_ref : ${{ needs.sanitize_inputs.outputs.build_ref }} 
103164      build_cache_root : " /__w/" 
104165      build_cache_suffix : " prod_noassert" 
105166      build_configure_extra_args : " --no-assertions" 
@@ -112,14 +173,12 @@ jobs:
112173
113174  run_benchmarks_build :
114175    name : Run Benchmarks on Build 
115-     needs : [ build_sycl ] 
176+     needs : [ build_sycl, sanitize_inputs  ] 
116177    strategy :
117178      matrix :
118179        include :
119-           - ref : ${{ inputs.commit_hash != '' && inputs.commit_hash || inputs.pr_no != '' && format('refs/pull/{0}/head', inputs.pr_no) || github.ref }} 
120-             save_name : ${{ inputs.commit_hash != '' && format('Commit{0}', inputs.commit_hash) || inputs.pr_no != '' && format('PR{0}', inputs.pr_no) || 'Baseline' }} 
121-             #  Set default values if not specified:
122-             runner : ${{ inputs.runner || '["PVC_PERF"]' }} 
180+           #  Set default values if not specified:
181+           - runner : ${{ inputs.runner || '["PVC_PERF"]' }} 
123182            backend : ${{ inputs.backend || 'level_zero:gpu' }} 
124183    uses : ./.github/workflows/sycl-linux-run-tests.yml 
125184    secrets : inherit 
@@ -131,9 +190,9 @@ jobs:
131190      target_devices : ${{ matrix.backend }} 
132191      tests_selector : benchmarks 
133192      benchmark_upload_results : ${{ inputs.upload_results }} 
134-       benchmark_save_name : ${{ matrix.save_name  }} 
193+       benchmark_save_name : ${{ needs.sanitize_inputs.outputs.benchmark_save_name  }} 
135194      benchmark_preset : ${{ inputs.preset }} 
136-       repo_ref : ${{ matrix.ref  }} 
195+       repo_ref : ${{ needs.sanitize_inputs.outputs.build_ref  }} 
137196      toolchain_artifact : ${{ needs.build_sycl.outputs.toolchain_artifact }} 
138197      toolchain_artifact_filename : ${{ needs.build_sycl.outputs.toolchain_artifact_filename }} 
139198      toolchain_decompress_command : ${{ needs.build_sycl.outputs.toolchain_decompress_command }} 
0 commit comments