11#  Executes benchmarks implemented in this repository using scripts
2- #  for results visualization from intel/llvm (unified-runtime dir) .
2+ #  for results visualization from intel/llvm.
33name : Benchmarks 
44
55on :
1414        required : false 
1515        type : string 
1616        default : ' ' 
17-       upload_report :
17+       runner :
1818        required : false 
19-         type : boolean 
20-         default : false 
19+         type : string 
20+         default : ' L0_PERF ' 
2121
2222permissions :
23-   contents : read 
23+   contents : write 
2424  pull-requests : write 
2525
2626env :
3232    name : Benchmarks 
3333    #  run only on upstream; forks will not have the HW
3434    if : github.repository == 'oneapi-src/unified-memory-framework' 
35-     runs-on : L0_PERF 
35+     runs-on : ${{ inputs.runner }} 
3636
3737    steps :
38-     #  Workspace on self-hosted runners is not cleaned automatically.
39-     #  We have to delete the files created outside of using actions.
40-     - name : Cleanup self-hosted workspace 
41-       if : always() 
42-       run : | 
43-         ls -la ./ 
44-         rm -rf ./* || true  
45- 
4638    - name : Add comment to PR 
4739      uses : actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea  #  v7.0.1
4840      if : ${{ always() && inputs.pr_no != 0 }} 
@@ -97,23 +89,32 @@ jobs:
9789name : Build UMF 
9890      run : cmake --build ${{env.BUILD_DIR}} -j $(nproc) 
9991
100-     #  Get scripts for benchmark data visualization.
101-     #  Use specific tag, as the scripts or files' location may change.
102-     - name : Checkout SYCL 
92+     - name : Checkout UMF results branch 
93+       uses : actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683  #  v4.2.2
94+       with :
95+         ref : benchmark-results 
96+         path : results-repo 
97+ 
98+     #  Get scripts for benchmark data visualization (from SYCL repo).
99+     #  Use specific ref, as the scripts or files' location may change.
100+     - name : Checkout benchmark scripts 
103101      uses : actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683  #  v4.2.2
104102      with :
105103        repository : intel/llvm 
106-         #  [BENCHMARK] fix default timeout parameter
107-         #  https://github.com/intel/llvm/pull/17412
108-         ref : 357e9e0b253b7eba105d044e38452b3c09169f8a 
109-         path : sycl-repo 
110-         fetch-depth : 1 
104+         #  Note: The same ref is used in docs build (for dashboard generation)!
105+         # 
106+         #  20.03.2025
107+         #  branch: unify-benchmark-ci
108+         ref : cae7049c78c697b3ac94f931716d9efb53addcd8 
109+         path : sc 
110+         sparse-checkout : | 
111+           devops/scripts/benchmarks 
111112
112113name : Install benchmarking scripts deps 
113114      run : | 
114115        python -m venv .venv 
115116        source .venv/bin/activate 
116-         pip install -r ${{github.workspace}}/sycl-repo/unified-runtime/third_party/benchmark_requirements .txt 
117+         pip install -r ${{github.workspace}}/sc/devops/scripts/benchmarks/requirements .txt 
117118
118119name : Set core range and GPU mask 
119120      run : | 
@@ -135,22 +136,21 @@ jobs:
135136
136137name : Run UMF benchmarks 
137138      id : benchmarks 
138-       working-directory : ${{env.BUILD_DIR}} 
139139      run : > 
140-         source ${{github.workspace}}/ .venv/bin/activate && 
141-         taskset -c ${{ env.CORES }} ${{ github.workspace }}/sycl-repo/unified-runtime /scripts/benchmarks/main.py 
140+         source .venv/bin/activate && 
141+         taskset -c ${{ env.CORES }} ./sc/devops /scripts/benchmarks/main.py 
142142        ~/bench_workdir_umf 
143143        --umf ${{env.BUILD_DIR}} 
144-         --compare baseline 
145144        --timeout 3000 
146-         ${{ inputs.upload_report && '--output-html' || '' }} 
147-         ${{ inputs.pr_no != 0 && '--output-markdown' || '' }} 
145+         --output-html remote 
146+         --results-dir ${{ github.workspace }}/results-repo 
147+         --output-markdown 
148148        ${{ inputs.bench_script_params }} 
149149
150150#  In case it failed to add a comment, we can still print the results.
151151    - name : Print benchmark results 
152-       if : ${{ always() && inputs.pr_no != 0  }} 
153-       run : cat ${{env.BUILD_DIR }}/benchmark_results.md 
152+       if : ${{ always() }} 
153+       run : cat ${{ github.workspace  }}/benchmark_results.md || true  
154154
155155    - name : Add comment to PR 
156156      uses : actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea  #  v7.0.1
@@ -160,7 +160,7 @@ jobs:
160160          let markdown = "" 
161161          try { 
162162            const fs = require('fs'); 
163-             markdown = fs.readFileSync('${{env.BUILD_DIR }}/benchmark_results.md', 'utf8'); 
163+             markdown = fs.readFileSync('${{ github.workspace  }}/benchmark_results.md', 'utf8'); 
164164          } catch(err) { 
165165          } 
166166
@@ -177,15 +177,42 @@ jobs:
177177            repo: context.repo.repo, 
178178            body: body 
179179          }) 
180- 
181-     - name : Upload HTML report 
182-       if : ${{ always() && inputs.upload_report }} 
183-       uses : actions/cache/save@1bd1e32a3bdc45362d1e726936510720a7c30a57  #  v4.2.0
184-       with :
185-         path : umf-repo/build/benchmark_results.html 
186-         key : benchmark-results-${{ github.run_id }} 
187180
188-     - name : Get information about platform 
189-       if : ${{ always() }} 
190-       working-directory : ${{env.UMF_DIR}} 
191-       run : .github/scripts/get_system_info.sh 
181+ name : Commit data.json and results directory 
182+       working-directory : results-repo 
183+       run : | 
184+         git config --global user.name "GitHub Actions Bot" 
185+         git config --global user.email "[email protected] " 186+ 
187+         for attempt in {1..5}; do 
188+           echo "Attempt #$attempt to push changes" 
189+ 
190+           rm -f data.json 
191+           cp ${{ github.workspace }}/sc/devops/scripts/benchmarks/html/data.json . 
192+ 
193+           git add data.json results/ 
194+           git commit -m "Add benchmark results and data.json" 
195+ 
196+           results_file=$(git diff HEAD~1 --name-only -- results/ | head -n 1) 
197+ 
198+           if git push origin benchmark-results; then 
199+             echo "Push succeeded" 
200+             break 
201+           fi 
202+ 
203+           echo "Push failed, retrying..." 
204+ 
205+           if [ -n "$results_file" ]; then 
206+             mv $results_file ${{ github.workspace }}/temp_$(basename $results_file) 
207+ 
208+             git reset --hard origin/benchmark-results 
209+             git pull origin benchmark-results 
210+ 
211+             new_file="results/$(basename "$results_file")" 
212+             mv ${{ github.workspace }}/temp_$(basename $results_file) $new_file 
213+           fi 
214+ 
215+           echo "Regenerating data.json" 
216+           (cd ${{ github.workspace }} && ${{ github.workspace }}/sc/devops/scripts/benchmarks/main.py ~/bench_workdir_umf --dry-run --results-dir ${{ github.workspace }}/results-repo --output-html remote) 
217+ 
218+         done 
0 commit comments