@@ -19,6 +19,10 @@ inputs:
1919  upload_results :
2020    type : string 
2121    required : True 
22+   build_hash :
23+     type : string 
24+     required : False 
25+     default : ' ' 
2226
2327runs :
2428  using : " composite" 
8185    run : | 
8286      git clone -b unify-ci https://github.com/intel/llvm-ci-perf-results 
8387   - name : Run compute-benchmarks 
88+     env :
89+       BUILD_HASH : ${{ inputs.build_hash }} 
8490    shell : bash 
8591    run : | 
8692      # TODO generate summary + display helpful message here 
@@ -91,16 +97,22 @@ runs:
9197      pip install --user --break-system-packages -r ./devops/scripts/benchmarks/requirements.txt 
9298      echo "-----" 
9399      mkdir -p "./llvm-ci-perf-results/$RUNNER_NAME" 
100+ 
101+       # TODO accomodate for different GPUs and backends 
102+       SAVE_NAME="Baseline_PVC_L0" 
103+       if [ -n "$BUILD_HASH" ]; then 
104+           SAVE_NAME="Commit_PVC_$BUILD_HASH" 
105+       fi 
106+ 
94107      taskset -c "$CORES" ./devops/scripts/benchmarks/main.py \ 
95108        "$(realpath ./llvm_test_workdir)" \ 
96109        --sycl "$(realpath ./toolchain)" \ 
97-         --save baseline  \ 
110+         --save "$SAVE_NAME"  \ 
98111        --output-html remote \ 
99112        --results-dir "./llvm-ci-perf-results/$RUNNER_NAME" \ 
100113        --output-dir "./llvm-ci-perf-results/$RUNNER_NAME" \ 
101114        --preset Minimal 
102115      echo "-----" 
103-       ls 
104116   - name : Push compute-benchmarks results 
105117    if : inputs.upload_results == 'true' && always() 
106118    shell : bash 
@@ -120,18 +132,4 @@ runs:
120132        git commit -m "[GHA] Upload compute-benchmarks results from https://github.com/intel/llvm/actions/runs/${{ github.run_id }}" 
121133        git push "https://[email protected] /intel/llvm-ci-perf-results.git" unify-ci 122134      fi 
123- #   - name: Find benchmark result artifact here
124- #     if: always()
125- #     shell: bash
126- #     run: |
127- #       cat << EOF
128- #       #
129- #       # Artifact link for benchmark results here:
130- #       #
131- #       EOF
132- #   - name: Archive compute-benchmark results
133- #     if: always()
134- #     uses: actions/upload-artifact@v4
135- #     with:
136- #       name: Compute-benchmark run ${{ github.run_id }} (${{ runner.name }})
137- #       path: ./artifact
135+ 
0 commit comments