66
77usage () {
88 >&2 echo " Usage: $0 <compute-benchmarks git repo> -t <runner type> [-B <compute-benchmarks build path>]
9- -t Specify runner type -- Required
9+ -n Github runner name -- Required
1010 -B Path to clone and build compute-benchmarks on
1111 -p Path to compute-benchmarks (or directory to build compute-benchmarks in)
1212 -r Github repo to use for compute-benchmarks origin, in format <org>/<name>
@@ -84,45 +84,51 @@ build_compute_bench() {
8484# | tee -a $3 # Print to summary file
8585# }
8686
87- # ##
88- STATUS_SUCCESS=0
89- STATUS_ERROR=1
90- # ##
91-
9287# Check if the number of samples for a given test case is less than a threshold
9388# set in benchmark-ci.conf
89+ #
90+ # Usage: <relative path of directory containing test case results>
9491samples_under_threshold () {
95- mkdir -p $1
96- file_count=" $( find $1 -maxdepth 1 -type f | wc -l ) "
92+ [ ! -d " $PERF_RES_PATH / $1 " ] && return 1 # Directory doesn't exist
93+ file_count=" $( find " $PERF_RES_PATH / $1 " -maxdepth 1 -type f | wc -l ) "
9794 [ " $file_count " -lt " $AVERAGE_THRESHOLD " ]
9895}
9996
97+ # Check for a regression via compare.py
98+ #
99+ # Usage: check_regression <relative path of output csv>
100100check_regression () {
101- if samples_under_threshold " $PERF_RES_PATH /$RUNNER /$1 " ; then
102- echo " Not enough samples to construct an average, performance check skipped!"
103- return $STATUS_SUCCESS
101+ csv_relpath=" $( dirname $1 ) "
102+ csv_name=" $( basename $1 ) "
103+ if samples_under_threshold " $csv_relpath " ; then
104+ echo " Not enough samples to construct a good average, performance\
105+ check skipped!"
106+ return 0 # Success status
104107 fi
105- BENCHMARKING_ROOT=" $BENCHMARKING_ROOT " python " $BENCHMARKING_ROOT /compare.py" " $RUNNER " " $1 " " $2 "
108+ BENCHMARKING_ROOT=" $BENCHMARKING_ROOT " \
109+ python " $BENCHMARKING_ROOT /compare.py" " $csv_relpath " " $csv_name "
106110 return $?
107111}
108112
109113# Move the results of our benchmark into the git repo
114+ #
115+ # Usage: cache <relative path of output csv>
110116cache () {
111- mv " $2 " " $PERF_RES_PATH /$RUNNER / $1 / "
117+ mv " $OUTPUT_PATH / $1 " " $PERF_RES_PATH /$1 "
112118}
113119
114- # Check for a regression, and cache if no regression found
120+ # Check for a regression + cache if no regression found
121+ #
122+ # Usage: check_and_cache <relative path of output csv>
115123check_and_cache () {
116- echo " Checking $testcase ..."
117- if check_regression $1 $2 ; then
124+ echo " Checking $1 ..."
125+ if check_regression $1 ; then
118126 if [ " $CACHE_RESULTS " -eq " 1" ]; then
119- echo " Caching $testcase ..."
120- cache $1 $2
127+ echo " Caching $1 ..."
128+ cache $1
121129 fi
122130 else
123- if [ " $CACHE_RESULTS " -eq " 1" ]; then
124- echo " Not caching!"
125- fi
131+ [ " $CACHE_RESULTS " -eq " 1" ] && echo " Regression found -- Not caching!"
126132 fi
127133}
128134
@@ -133,24 +139,39 @@ process_benchmarks() {
133139 echo " ### Running and processing selected benchmarks ###"
134140 if [ -z " $TESTS_CONFIG " ]; then
135141 echo " Setting tests to run via cli is not currently supported."
136- exit $STATUS_ERROR
142+ exit 1
137143 else
138144 rm " $BENCHMARK_ERROR_LOG " " $BENCHMARK_SLOW_LOG " 2> /dev/null
139- # Ignore lines in the test config starting with #'s
145+ # Loop through each line of enabled_tests.conf, but ignore lines in the
146+ # test config starting with #'s:
140147 grep " ^[^#]" " $TESTS_CONFIG " | while read -r testcase; do
141148 echo " # Running $testcase ..."
142149
143- test_csv_output=" $OUTPUT_PATH /$RUNNER /$testcase -$TIMESTAMP .csv"
144- mkdir -p " $OUTPUT_PATH /$RUNNER /"
145- $COMPUTE_BENCH_PATH /build/bin/$testcase --csv --iterations=" $COMPUTE_BENCH_ITERATIONS " | tail +8 > " $test_csv_output "
146- # The tail +8 filters out initial debug prints not in csv format
150+ # The benchmark results git repo and this script's output both share
151+ # the following directory structure:
152+ #
153+ # /<device selector>/<runner>/<test name>
154+ #
155+ # Figure out the relative path of our testcase result in both
156+ # directories:
157+ test_dir_relpath=" $DEVICE_SELECTOR_DIRNAME /$RUNNER /$testcase "
158+ mkdir -p " $OUTPUT_PATH /$test_dir_relpath " # Ensure directory exists
159+ # TODO generate runner config txt if not exist
160+ output_csv_relpath=" $test_dir_relpath /$testcase -$TIMESTAMP .csv"
161+
162+ output_csv=" $OUTPUT_PATH /$output_csv_relpath " # Real output path
163+ $COMPUTE_BENCH_PATH /build/bin/$testcase --csv \
164+ --iterations=" $COMPUTE_BENCH_ITERATIONS " \
165+ | tail +8 > " $output_csv "
166+ # The tail +8 filters out header lines not in csv format
147167
148- if [ " $? " -eq 0 ] && [ -s " $test_csv_output " ]; then
149- check_and_cache $testcase $test_csv_output
168+ exit_status=" $? "
169+ if [ " $exit_status " -eq 0 ] && [ -s " $output_csv " ]; then
170+ check_and_cache $output_csv_relpath
150171 else
151- # TODO consider capturing error for logging
152- echo " ERROR @ $test_case "
153- echo " -- $testcase : error $? " >> " $BENCHMARK_ERROR_LOG "
172+ # TODO consider capturing stderr for logging
173+ echo " [ ERROR] $testcase returned exit status $exit_status "
174+ echo " -- $testcase : error $exit_status " >> " $BENCHMARK_ERROR_LOG "
154175 fi
155176 done
156177 fi
@@ -163,13 +184,13 @@ process_results() {
163184 printf " \n### Tests performing over acceptable range of average: ###\n"
164185 cat " $BENCHMARK_SLOW_LOG "
165186 echo " "
166- fail=1
187+ fail=2
167188 fi
168189 if [ -s " $BENCHMARK_ERROR_LOG " ]; then
169190 printf " \n### Tests that failed to run: ###\n"
170191 cat " $BENCHMARK_ERROR_LOG "
171192 echo " "
172- fail=2
193+ fail=1
173194 fi
174195 exit $fail
175196}
@@ -203,40 +224,24 @@ load_configs() {
203224
204225 . $BENCHMARKING_ROOT /utils.sh
205226 load_all_configs " $BENCHMARK_CI_CONFIG "
206-
207- # Debug
208- # echo "PERF_RES_GIT_REPO: $PERF_RES_GIT_REPO"
209- # echo "PERF_RES_BRANCH: $PERF_RES_BRANCH"
210- # echo "PERF_RES_PATH: $PERF_RES_PATH"
211- # echo "COMPUTE_BENCH_GIT_REPO: $COMPUTE_BENCH_GIT_REPO"
212- # echo "COMPUTE_BENCH_BRANCH: $COMPUTE_BENCH_BRANCH"
213- # echo "COMPUTE_BENCH_PATH: $COMPUTE_BENCH_PATH"
214- # echo "COMPUTE_BENCH_COMPILE_FLAGS: $COMPUTE_BENCH_COMPILE_FLAGS"
215- # echo "OUTPUT_PATH: $OUTPUT_PATH"
216- # echo "METRICS_VARIANCE: $METRICS_VARIANCE"
217- # echo "METRICS_RECORDED: $METRICS_RECORDED"
218- # echo "AVERAGE_THRESHOLD: $AVERAGE_THRESHOLD"
219- # echo "AVERAGE_CUTOFF_RANGE: $AVERAGE_CUTOFF_RANGE"
220- # echo "TIMESTAMP_FORMAT: $TIMESTAMP_FORMAT"
221- # echo "BENCHMARK_SLOW_LOG: $BENCHMARK_SLOW_LOG"
222- # echo "BENCHMARK_ERROR_LOG: $BENCHMARK_ERROR_LOG"
223- echo " Configured runner types: $RUNNER_TYPES "
224227}
225228
226- load_configs
229+ # ####
227230
228231COMPUTE_BENCH_COMPILE_FLAGS=" "
229232CACHE_RESULTS=" 0"
230233TIMESTAMP=" $( date +" $TIMESTAMP_FORMAT " ) "
231234
232- # CLI overrides to configuration options
233- while getopts " p:b:r:f:t:cCs" opt; do
235+ load_configs
236+
237+ # CLI flags + overrides to configuration options:
238+ while getopts " p:b:r:f:n:cCs" opt; do
234239 case $opt in
235240 p) COMPUTE_BENCH_PATH=$OPTARG ;;
236241 r) COMPUTE_BENCH_GIT_REPO=$OPTARG ;;
237242 b) COMPUTE_BENCH_BRANCH=$OPTARG ;;
238243 f) COMPUTE_BENCH_COMPILE_FLAGS=$OPTARG ;;
239- t) RUNNER_TYPE =$OPTARG ;;
244+ n) RUNNER =$OPTARG ;;
240245 # Cleanup status is saved in a var to ensure all arguments are processed before
241246 # performing cleanup
242247 c) _cleanup=1 ;;
@@ -246,28 +251,40 @@ while getopts "p:b:r:f:t:cCs" opt; do
246251 esac
247252done
248253
254+ # Check all necessary variables exist:
249255if [ -z " $CMPLR_ROOT " ]; then
250256 echo " Please set \$ CMPLR_ROOT first; it is needed by compute-benchmarks to build."
251257 exit 1
258+ elif [ -z " $ONEAPI_DEVICE_SELECTOR " ]; then
259+ echo " Please set \$ ONEAPI_DEVICE_SELECTOR first to specify which device to use."
260+ exit 1
261+ elif [ -z " $RUNNER " ]; then
262+ echo " Please specify runner name using -n first; it is needed for storing/comparing benchmark results."
263+ exit 1
252264fi
253- if [ -z " $RUNNER_TYPE " ]; then
254- echo " Please specify runner type using -t first; it is needed for comparing benchmark results"
265+
266+ # Make sure ONEAPI_DEVICE_SELECTOR doesn't try to enable multiple devices at the
267+ # same time, or use specific device id's
268+ _dev_sel_backend_re=" $( sed ' s/,/|/g' <<< " $DEVICE_SELECTOR_ENABLED_BACKENDS" ) "
269+ _dev_sel_device_re=" $( sed ' s/,/|/g' <<< " $DEVICE_SELECTOR_ENABLED_DEVICES" ) "
270+ _dev_sel_re=" s/($_dev_sel_backend_re ):($_dev_sel_device_re )//"
271+ if [ -n " $( sed -E " $_dev_sel_re " <<< " $ONEAPI_DEVICE_SELECTOR" ) " ]; then
272+ echo " Unsupported \$ ONEAPI_DEVICE_SELECTOR value: please ensure only one \
273+ device is selected, and devices are not selected by indices."
274+ echo " Enabled backends: $DEVICE_SELECTOR_ENABLED_BACKENDS "
275+ echo " Enabled device types: $DEVICE_SELECTOR_ENABLED_DEVICES "
255276 exit 1
256- else
257- # Identify runner being used
258- runner_regex=" $( printf " $RUNNER_TYPES " | sed ' s/,/|/g' ) "
259- RUNNER=" $( printf " $RUNNER_TYPE " | grep -o -E " \b($runner_regex )\b" ) "
260- if [ -z " $RUNNER " ]; then
261- echo " Unknown runner type! Configured runners: $RUNNER_TYPES "
262- exit 1
263- fi
264- echo " Chosen runner: $RUNNER "
265277fi
278+ # ONEAPI_DEVICE_SELECTOR values are not valid directory names in unix: this
279+ # value lets us use ONEAPI_DEVICE_SELECTOR as actual directory names
280+ DEVICE_SELECTOR_DIRNAME=" $( sed ' s/:/-/' <<< " $ONEAPI_DEVICE_SELECTOR" ) "
266281
282+ # Clean up and delete all cached files if specified:
267283[ ! -z " $_cleanup " ] && cleanup
268-
284+ # Clone and build only if they aren't already cached/deleted:
269285[ ! -d " $PERF_RES_PATH " ] && clone_perf_res
270286[ ! -d " $COMPUTE_BENCH_PATH " ] && clone_compute_bench
271287[ ! -d " $COMPUTE_BENCH_PATH /build" ] && build_compute_bench
288+ # Process benchmarks:
272289process_benchmarks
273290process_results
0 commit comments