11# Executes benchmarks implemented in this repository using scripts
2- # for results visualization from intel/llvm (unified-runtime dir) .
2+ # for results visualization from intel/llvm.
33name : Benchmarks
44
55on :
1414 required : false
1515 type : string
1616 default : ' '
17- upload_report :
18- required : false
19- type : boolean
20- default : false
2117
2218permissions :
23- contents : read
19+ contents : write
2420 pull-requests : write
2521
2622env :
3834 # Workspace on self-hosted runners is not cleaned automatically.
3935 # We have to delete the files created outside of using actions.
4036 - name : Cleanup self-hosted workspace
41- if : always()
37+ if : false
4238 run : |
4339 ls -la ./
4440 rm -rf ./* || true
@@ -97,23 +93,32 @@ jobs:
9793 - name : Build UMF
9894 run : cmake --build ${{env.BUILD_DIR}} -j $(nproc)
9995
100- # Get scripts for benchmark data visualization.
101- # Use specific tag, as the scripts or files' location may change.
102- - name : Checkout SYCL
96+ - name : Checkout UMF results branch
97+ uses : actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
98+ with :
99+ ref : benchmark-results
100+ path : results-repo
101+
102+ # Get scripts for benchmark data visualization (from SYCL repo).
103+ # Use specific ref, as the scripts or files' location may change.
104+ - name : Checkout benchmark scripts
103105 uses : actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
104106 with :
105107 repository : intel/llvm
106- # [BENCHMARK] fix default timeout parameter
107- # https://github.com/intel/llvm/pull/17412
108- ref : 357e9e0b253b7eba105d044e38452b3c09169f8a
109- path : sycl-repo
110- fetch-depth : 1
108+ # Note: The same ref is used in docs build (for dashboard generation)!
109+ #
110+ # 20.03.2025
111+ # branch: unify-benchmark-ci
112+ ref : cae7049c78c697b3ac94f931716d9efb53addcd8
113+ path : sc
114+ sparse-checkout : |
115+ devops/scripts/benchmarks
111116
112117 - name : Install benchmarking scripts deps
113118 run : |
114119 python -m venv .venv
115120 source .venv/bin/activate
116- pip install -r ${{github.workspace}}/sycl-repo/unified-runtime/third_party/benchmark_requirements .txt
121+ pip install -r ${{github.workspace}}/sc/devops/scripts/benchmarks/requirements .txt
117122
118123 - name : Set core range and GPU mask
119124 run : |
@@ -135,22 +140,21 @@ jobs:
135140
136141 - name : Run UMF benchmarks
137142 id : benchmarks
138- working-directory : ${{env.BUILD_DIR}}
139143 run : >
140- source ${{github.workspace}}/ .venv/bin/activate &&
141- taskset -c ${{ env.CORES }} ${{ github.workspace }}/sycl-repo/unified-runtime /scripts/benchmarks/main.py
144+ source .venv/bin/activate &&
145+ taskset -c ${{ env.CORES }} ./sc/devops /scripts/benchmarks/main.py
142146 ~/bench_workdir_umf
143147 --umf ${{env.BUILD_DIR}}
144- --compare baseline
145148 --timeout 3000
146- ${{ inputs.upload_report && '--output-html' || '' }}
147- ${{ inputs.pr_no != 0 && '--output-markdown' || '' }}
149+ --output-html remote
150+ --results-dir ${{ github.workspace }}/results-repo
151+ --output-markdown
148152 ${{ inputs.bench_script_params }}
149153
150154 # In case it failed to add a comment, we can still print the results.
151155 - name : Print benchmark results
152156 if : ${{ always() && inputs.pr_no != 0 }}
153- run : cat ${{env.BUILD_DIR}}/ benchmark_results.md
157+ run : cat ./sc/devops/scripts/benchmarks/ benchmark_results.md || true
154158
155159 - name : Add comment to PR
156160 uses : actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
@@ -160,7 +164,7 @@ jobs:
160164 let markdown = ""
161165 try {
162166 const fs = require('fs');
163- markdown = fs.readFileSync('${{env.BUILD_DIR}} /benchmark_results.md', 'utf8');
167+ markdown = fs.readFileSync('./sc/devops/scripts/benchmarks /benchmark_results.md', 'utf8');
164168 } catch(err) {
165169 }
166170
@@ -177,15 +181,42 @@ jobs:
177181 repo: context.repo.repo,
178182 body: body
179183 })
180-
181- - name : Upload HTML report
182- if : ${{ always() && inputs.upload_report }}
183- uses : actions/cache/save@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
184- with :
185- path : umf-repo/build/benchmark_results.html
186- key : benchmark-results-${{ github.run_id }}
187184
188- - name : Get information about platform
189- if : ${{ always() }}
190- working-directory : ${{env.UMF_DIR}}
191- run : .github/scripts/get_system_info.sh
185+ - name : Commit data.json and results directory
186+ working-directory : results-repo
187+ run : |
188+ git config --global user.name "GitHub Actions Bot"
189+ git config --global user.email "[email protected] " 190+
191+ for attempt in {1..5}; do
192+ echo "Attempt $attempt to push changes"
193+
194+ rm -f data.json
195+ cp ${{ github.workspace }}/sc/devops/scripts/benchmarks/html/data.json .
196+
197+ git add data.json results/
198+ git commit -m "Add benchmark results and data.json"
199+
200+ results_file=$(git diff HEAD~1 --name-only -- results/ | head -n 1)
201+
202+ if git push origin benchmark-results; then
203+ echo "Push succeeded"
204+ break
205+ fi
206+
207+ echo "Push failed, retrying..."
208+
209+ if [ -n "$results_file" ]; then
210+ mv $results_file ${{ github.workspace }}/temp_$(basename $results_file)
211+
212+ git reset --hard origin/benchmark-results
213+ git pull origin benchmark-results
214+
215+ new_file="results/$(basename "$results_file")"
216+ mv ${{ github.workspace }}/temp_$(basename $results_file) $new_file
217+ fi
218+
219+ echo "Regenerating data.json"
220+ (cd ${{ github.workspace }} && ${{ github.workspace }}/sc/devops/scripts/benchmarks/main.py ~/bench_workdir_umf --dry-run --results-dir ${{ github.workspace }}/results-repo --output-html remote)
221+
222+ done
0 commit comments