File tree Expand file tree Collapse file tree 4 files changed +70
-2
lines changed
actions/run-tests/benchmark_v2 Expand file tree Collapse file tree 4 files changed +70
-2
lines changed Original file line number Diff line number Diff line change 122122 type : string
123123 default : ' '
124124 required : False
125+ benchmark_preset :
126+ type : string
127+ default : ' Minimal'
128+ required : False
125129
126130 workflow_dispatch :
127131 inputs :
@@ -356,6 +360,7 @@ jobs:
356360 target_devices : ${{ inputs.target_devices }}
357361 upload_results : ${{ inputs.benchmark_upload_results }}
358362 save_name : ${{ inputs.benchmark_save_name }}
363+ preset : ${{ inputs.benchmark_preset }}
359364 env :
360365 RUNNER_TAG : ${{ inputs.runner }}
361366 GITHUB_TOKEN : ${{ secrets.LLVM_SYCL_BENCHMARK_TOKEN }}
Original file line number Diff line number Diff line change 55 - cron : ' 0 1 * * *' # 2 hrs earlier than sycl-nightly.yml
66 workflow_call :
77 inputs :
8+ preset :
9+ type : string
10+ description : |
11+ Benchmark presets to run: See /devops/scripts/benchmarks/presets.py
12+ required : false
13+ default : ' Minimal' # Only compute-benchmarks
814 pr_no :
915 type : string
1016 description : |
4046
4147 workflow_dispatch :
4248 inputs :
49+ preset :
50+ type : choice
51+ description : |
52+ Benchmark presets to run, See /devops/scripts/benchmarks/presets.py. Hint: Minimal is compute-benchmarks only.
53+ options :
54+ - Full
55+ - SYCL
56+ - Minimal
57+ - Normal
58+ - Test
59+ default : ' Minimal' # Only compute-benchmarks
4360 pr_no :
4461 type : string
4562 description : |
@@ -125,6 +142,7 @@ jobs:
125142 tests_selector : benchmark_v2
126143 benchmark_upload_results : ${{ inputs.upload_results }}
127144 benchmark_save_name : ${{ matrix.save_name }}
145+ benchmark_preset : ${{ inputs.preset }}
128146 repo_ref : ${{ matrix.ref }}
129147 devops_ref : ${{ github.ref }}
130148 sycl_toolchain_artifact : sycl_linux_default
@@ -154,4 +172,5 @@ jobs:
154172 tests_selector : benchmark_v2
155173 benchmark_save_name : Baseline
156174 benchmark_upload_results : ${{ inputs.upload_results }}
175+ benchmark_preset : ${{ inputs.preset }}
157176 repo_ref : ${{ github.ref }} # TODO figure out nightly commit hash
Original file line number Diff line number Diff line change @@ -22,7 +22,9 @@ inputs:
2222 save_name :
2323 type : string
2424 required : True
25- default : ' '
25+ preset :
26+ type : string
27+ required : True
2628
2729runs :
2830 using : " composite"
3234 env :
3335 TARGET_DEVICE : ${{ inputs.target_devices }}
3436 RUNNER_NAME : ${{ runner.name }}
37+ PRESET : ${{ inputs.preset }}
3538 run : |
3639 case "$RUNNER_TAG" in
3740 '["PVC_PERF"]' ) ;;
6164 esac
6265 echo "ONEAPI_DEVICE_SELECTOR=$TARGET_DEVICE" >> $GITHUB_ENV
6366
67+ # Make sure specified preset is a known value and is not malicious
68+ python3 ./devops/scripts/benchmarks/preset.py "$PRESET"
69+ [ "$?" -ne 0 ] && exit 1 # Stop workflow if invalid preset
70+ echo "PRESET=$PRESET" >> $GITHUB_ENV
6471 - name : Compute CPU core range to run benchmarks on
6572 shell : bash
6673 run : |
@@ -117,7 +124,7 @@ runs:
117124 --output-html remote \
118125 --results-dir "./llvm-ci-perf-results/$RUNNER_NAME" \
119126 --output-dir "./llvm-ci-perf-results/$RUNNER_NAME" \
120- --preset Minimal \
127+ --preset "$PRESET" \
121128 --timestamp-override "$SAVE_TIMESTAMP"
122129 echo "-----"
123130 python3 ./devops/scripts/benchmarks/compare.py to_hist \
Original file line number Diff line number Diff line change 33# See LICENSE.TXT
44# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
55
6+ import argparse
7+
68presets : dict [str , list [str ]] = {
79 "Full" : [
810 "Compute Benchmarks" ,
@@ -36,3 +38,38 @@ def enabled_suites(preset: str) -> list[str]:
3638 return presets [preset ]
3739 except KeyError :
3840 raise ValueError (f"Preset '{ preset } ' not found." )
41+
42+
43+ def main ():
44+ parser = argparse .ArgumentParser (description = "Benchmark Preset Utilities" )
45+ subparsers = parser .add_subparsers (dest = "command" , required = True )
46+
47+ query_parser = subparsers .add_parser (
48+ "query" ,
49+ help = "Query benchmarks ran by a preset (as defined in presets.py)"
50+ )
51+ validate_parser .add_argument (
52+ "preset_to_query" ,
53+ type = str ,
54+ help = "preset name to query"
55+ )
56+ validate_parser .add_argument (
57+ "-q" , "--quiet" ,
58+ action = "store_true" ,
59+ help = "Disable stdout messages: Useful if you want to check if a preset exists within a shell script."
60+ )
61+
62+ args = parser .parse_args ()
63+ if args .command == 'query' :
64+ if args .preset_to_query in presets :
65+ if not args .quiet :
66+ print (f"Benchmark suites to be ran in { args .preset_to_query } :" )
67+ for suite in presets [args .preset_to_query ]:
68+ print (suite )
69+ exit (0 )
70+ else :
71+ if not args .quiet : print (f"Error: No preset named '{ args .preset_to_query } '." )
72+ exit (1 )
73+
74+ if __name__ == "__main__" :
75+ main ()
You can’t perform that action at this time.
0 commit comments