77usage () {
88 >&2 echo " Usage: $0 <compute-benchmarks git repo> -t <runner type> [-B <compute-benchmarks build path>]
99 -n Github runner name -- Required
10- -B Path to clone and build compute-benchmarks on
11- -p Path to compute-benchmarks (or directory to build compute-benchmarks in)
12- -r Github repo to use for compute-benchmarks origin, in format <org>/<name>
13- -b Git branch to use within compute-benchmarks
14- -f Compile flags passed into building compute-benchmarks
1510 -c Clean up working directory
1611 -C Clean up working directory and exit
1712 -s Cache results
@@ -21,24 +16,22 @@ This script builds and runs benchmarks from compute-benchmarks."
2116}
2217
2318clone_perf_res () {
24- echo " ### Cloning llvm-ci-perf-res ($SANITIZED_PERF_RES_GIT_REPO :$SANITIZED_PERF_RES_GIT_BRANCH ) ###"
25- mkdir -p " $( dirname " $SANITIZED_PERF_RES_PATH " ) "
26- git clone -b " $SANITIZED_PERF_RES_GIT_BRANCH " " https://github.com/$SANITIZED_PERF_RES_GIT_REPO " " $SANITIZED_PERF_RES_PATH "
19+ echo " ### Cloning llvm-ci-perf-results ($SANITIZED_PERF_RES_GIT_REPO :$SANITIZED_PERF_RES_GIT_BRANCH ) ###"
20+ git clone -b " $SANITIZED_PERF_RES_GIT_BRANCH " " https://github.com/$SANITIZED_PERF_RES_GIT_REPO " ./llvm-ci-perf-results
2721 [ " $? " -ne 0 ] && exit $?
2822}
2923
3024clone_compute_bench () {
3125 echo " ### Cloning compute-benchmarks ($SANITIZED_COMPUTE_BENCH_GIT_REPO :$SANITIZED_COMPUTE_BENCH_GIT_BRANCH ) ###"
32- mkdir -p " $( dirname " $SANITIZED_COMPUTE_BENCH_PATH " ) "
3326 git clone -b " $SANITIZED_COMPUTE_BENCH_GIT_BRANCH " \
3427 --recurse-submodules " https://github.com/$SANITIZED_COMPUTE_BENCH_GIT_REPO " \
35- " $SANITIZED_COMPUTE_BENCH_PATH "
28+ ./compute-benchmarks
3629 [ " $? " -ne 0 ] && exit " $? "
3730}
3831
3932build_compute_bench () {
4033 echo " ### Building compute-benchmarks ($SANITIZED_COMPUTE_BENCH_GIT_REPO :$SANITIZED_COMPUTE_BENCH_GIT_BRANCH ) ###"
41- mkdir " $SANITIZED_COMPUTE_BENCH_PATH / build" && cd " $SANITIZED_COMPUTE_BENCH_PATH / build" &&
34+ mkdir ./compute-benchmarks/ build && cd ./compute-benchmarks/ build &&
4235 # No reason to turn on ccache, if this docker image will be disassembled later on
4336 cmake .. -DBUILD_SYCL=ON -DBUILD_L0=OFF -DBUILD=OCL=OFF -DCCACHE_ALLOWED=FALSE
4437 # TODO enable mechanism for opting into L0 and OCL -- the concept is to
@@ -66,8 +59,8 @@ build_compute_bench() {
6659#
6760# Usage: <relative path of directory containing test case results>
6861samples_under_threshold () {
69- [ ! -d " $SANITIZED_PERF_RES_PATH /$1 " ] && return 1 # Directory doesn't exist
70- file_count=" $( find " $SANITIZED_PERF_RES_PATH /$1 " -maxdepth 1 -type f | wc -l ) "
62+ [ ! -d " ./llvm-ci-perf-results /$1 " ] && return 1 # Directory doesn't exist
63+ file_count=" $( find " ./llvm-ci-perf-results /$1 " -maxdepth 1 -type f | wc -l ) "
7164 [ " $file_count " -lt " $SANITIZED_AVERAGE_MIN_THRESHOLD " ]
7265}
7366
@@ -92,9 +85,9 @@ check_regression() {
9285#
9386# Usage: cache <relative path of output csv>
9487cache () {
95- mkdir -p " $( dirname " $SANITIZED_ARTIFACT_PASSING_CACHE / $1 " ) " " $( dirname " $SANITIZED_PERF_RES_PATH / $1 " ) "
96- cp " $SANITIZED_ARTIFACT_OUTPUT_CACHE / $1 " " $SANITIZED_ARTIFACT_PASSING_CACHE /$1 "
97- mv " $SANITIZED_ARTIFACT_OUTPUT_CACHE / $1 " " $SANITIZED_PERF_RES_PATH /$1 "
88+ mkdir -p ./artifact/passing_tests/ ./artifact/failed_tests
89+ cp " ./artifact/failed_tests/ $1 " " ./artifact/passing_tests /$1 "
90+ mv " ./artifact/failed_tests/ $1 " " ./llvm-ci-perf-results /$1 "
9891}
9992
10093# Check for a regression + cache if no regression found
@@ -114,15 +107,13 @@ check_and_cache() {
114107
115108# Run and process the results of each enabled benchmark in enabled_tests.conf
116109process_benchmarks () {
117- mkdir -p " $SANITIZED_PERF_RES_PATH "
118-
119110 echo " ### Running and processing selected benchmarks ###"
120111 if [ -z " $TESTS_CONFIG " ]; then
121112 echo " Setting tests to run via cli is not currently supported."
122113 exit 1
123114 else
124- rm " $SANITIZED_BENCHMARK_LOG_ERROR " " $SANITIZED_BENCHMARK_LOG_SLOW " 2> /dev/null
125- mkdir -p " $( dirname " $SANITIZED_BENCHMARK_LOG_ERROR " ) " " $( dirname " $SANITIZED_BENCHMARK_LOG_SLOW " ) "
115+ rm ./artifact/benchmarks_errored.log ./artifact/benchmarks_failed.log
116+ mkdir -p ./artifact
126117 # Loop through each line of enabled_tests.conf, but ignore lines in the
127118 # test config starting with #'s:
128119 grep " ^[^#]" " $TESTS_CONFIG " | while read -r testcase; do
@@ -145,11 +136,13 @@ process_benchmarks() {
145136 # Figure out the relative path of our testcase result:
146137 test_dir_relpath=" $DEVICE_SELECTOR_DIRNAME /$RUNNER /$testcase "
147138 output_csv_relpath=" $test_dir_relpath /$testcase -$TIMESTAMP .csv"
148- mkdir -p " $SANITIZED_ARTIFACT_OUTPUT_CACHE /$test_dir_relpath " # Ensure directory exists
149- # TODO generate runner config txt if not exist
139+ mkdir -p " ./artifact/failed_tests/$test_dir_relpath " # Ensure directory exists
150140
151- output_csv=" $SANITIZED_ARTIFACT_OUTPUT_CACHE /$output_csv_relpath "
152- $SANITIZED_COMPUTE_BENCH_PATH /build/bin/$testcase --csv \
141+ # Tests are first placed in ./artifact/failed_tests, and are only
142+ # moved to passing_tests or the performance results repo if the
143+ # benchmark results are passing
144+ output_csv=" ./artifact/failed_tests/$output_csv_relpath "
145+ " ./compute-benchmarks/build/bin/$testcase " --csv \
153146 --iterations=" $SANITIZED_COMPUTE_BENCH_ITERATIONS " \
154147 | tail +8 > " $output_csv "
155148 # The tail +8 filters out header lines not in csv format
@@ -158,9 +151,8 @@ process_benchmarks() {
158151 if [ " $exit_status " -eq 0 ] && [ -s " $output_csv " ]; then
159152 check_and_cache $output_csv_relpath
160153 else
161- # TODO consider capturing stderr for logging
162154 echo " [ERROR] $testcase returned exit status $exit_status "
163- echo " -- $testcase : error $exit_status " >> " $SANITIZED_BENCHMARK_LOG_ERROR "
155+ echo " -- $testcase : error $exit_status " >> ./artifact/benchmarks_errored.log
164156 fi
165157 done
166158 fi
@@ -169,15 +161,15 @@ process_benchmarks() {
169161# Handle failures + produce a report on what failed
170162process_results () {
171163 fail=0
172- if [ -s " $SANITIZED_BENCHMARK_LOG_SLOW " ]; then
164+ if [ -s ./artifact/benchmarks_failed.log ]; then
173165 printf " \n### Tests performing over acceptable range of average: ###\n"
174- cat " $SANITIZED_BENCHMARK_LOG_SLOW "
166+ cat ./artifact/benchmarks_failed.log
175167 echo " "
176168 fail=2
177169 fi
178- if [ -s " $SANITIZED_BENCHMARK_LOG_ERROR " ]; then
170+ if [ -s ./artifact/benchmarks_errored.log ]; then
179171 printf " \n### Tests that failed to run: ###\n"
180- cat " $SANITIZED_BENCHMARK_LOG_ERROR "
172+ cat ./artifact/benchmarks_errored.log
181173 echo " "
182174 fail=1
183175 fi
@@ -186,8 +178,8 @@ process_results() {
186178
187179cleanup () {
188180 echo " ### Cleaning up compute-benchmark builds from prior runs ###"
189- rm -rf " $SANITIZED_COMPUTE_BENCH_PATH "
190- rm -rf " $SANITIZED_PERF_RES_PATH "
181+ rm -rf ./compute-benchmarks
182+ rm -rf ./llvm-ci-perf-results
191183 [ ! -z " $_exit_after_cleanup " ] && exit
192184}
193185
@@ -229,16 +221,19 @@ load_configs
229221
230222COMPUTE_BENCH_COMPILE_FLAGS=" "
231223CACHE_RESULTS=" 0"
232- TIMESTAMP=" $( date +" $SANITIZED_TIMESTAMP_FORMAT " ) "
224+ # Timestamp format is YYYYMMDD_HHMMSS
225+ TIMESTAMP=" $( date +%Y%m%d_%H%M%S) "
233226
234227# CLI flags + overrides to configuration options:
235- while getopts " p:b:r:f: n:cCs" opt; do
228+ while getopts " n:cCs" opt; do
236229 case " $opt " in
237- p) COMPUTE_BENCH_PATH=" $OPTARG " ;;
238- r) COMPUTE_BENCH_GIT_REPO=" $OPTARG " ;;
239- b) COMPUTE_BENCH_BRANCH=" $OPTARG " ;;
240- f) COMPUTE_BENCH_COMPILE_FLAGS=" $OPTARG " ;;
241- n) RUNNER=" $OPTARG " ;;
230+ n)
231+ if [ -n " $( printf " %s" " $OPTARG " | sed " s/[a-zA-Z0-9_-]*//g" ) " ]; then
232+ echo " Illegal characters in runner name."
233+ exit 1
234+ fi
235+ RUNNER=" $OPTARG "
236+ ;;
242237 # Cleanup status is saved in a var to ensure all arguments are processed before
243238 # performing cleanup
244239 c) _cleanup=1 ;;
@@ -279,9 +274,9 @@ DEVICE_SELECTOR_DIRNAME="$(echo "$ONEAPI_DEVICE_SELECTOR" | sed 's/:/-/')"
279274# Clean up and delete all cached files if specified:
280275[ ! -z " $_cleanup " ] && cleanup
281276# Clone and build only if they aren't already cached/deleted:
282- [ ! -d " $SANITIZED_PERF_RES_PATH " ] && clone_perf_res
283- [ ! -d " $SANITIZED_COMPUTE_BENCH_PATH " ] && clone_compute_bench
284- [ ! -d " $SANITIZED_COMPUTE_BENCH_PATH / build" ] && build_compute_bench
277+ [ ! -d ./llvm-ci-perf-results ] && clone_perf_res
278+ [ ! -d ./compute-benchmarks ] && clone_compute_bench
279+ [ ! -d ./compute-benchmarks/ build ] && build_compute_bench
285280# Process benchmarks:
286281process_benchmarks
287282process_results
0 commit comments