@@ -26,72 +26,85 @@ export PL_RUN_STANDALONE_TESTS=1
2626defaults=" -m coverage run --source ${source} --append -m pytest --no-header -v -s --timeout 120 "
2727echo " Using defaults: ${defaults} "
2828
29- # get the testing location as the first argument
30- test_path=$1
31- printf " source path: $test_path \n"
29+ # get the list of parametrizations. we need to call them separately. the last two lines are removed.
30+ # note: if there's a syntax error, this will fail with some garbled output
31+ python3 -um pytest -q --collect-only --pythonwarnings ignore 2>&1 > $COLLECTED_TESTS_FILE
32+ # early terminate if collection failed (e.g. syntax error)
33+ if [[ $? != 0 ]]; then
34+ cat $COLLECTED_TESTS_FILE
35+ exit 1
36+ fi
3237
33- # collect all tests with parametrization based filtering with PL_RUN_STANDALONE_TESTS
34- standalone_tests=$( python3 -m pytest $test_path -q --collect-only --pythonwarnings ignore)
35- printf " Collected tests: \n $standalone_tests \n"
36- # match only lines with tests
37- parametrizations=$( perl -nle ' print $& while m{\S+::test_\S+}g' <<< " $standalone_tests" )
38- # convert the list to be array
39- parametrizations_arr=($parametrizations )
40- report=' '
38+ # removes the last line of the file
39+ sed -i ' $d' $COLLECTED_TESTS_FILE
4140
42- rm -f standalone_test_output.txt # in case it exists, remove it
43- rm -f testnames.txt
41+ # Get test list and run each test individually
42+ tests=($( grep -oP ' \S+::test_\S+' " $COLLECTED_TESTS_FILE " ) )
43+ test_count=${# tests[@]}
44+ # present the collected tests
45+ printf " collected $test_count tests:\n-------------------\n"
46+ echo $( IFS=' \n' ; echo " ${tests[@]} " )
47+ printf " \n===================\n"
4448
45- function show_batched_output {
46- if [ -f standalone_test_output.txt ]; then # if exists
47- cat standalone_test_output.txt
48- # heuristic: stop if there's mentions of errors. this can prevent false negatives when only some of the ranks fail
49- if perl -nle ' print if /error|(?<!(?-i)on_)exception|traceback|(?<!(?-i)x)failed/i' standalone_test_output.txt | grep -qv -f testnames.txt; then
50- echo " Potential error! Stopping."
51- perl -nle ' print if /error|(?<!(?-i)on_)exception|traceback|(?<!(?-i)x)failed/i' standalone_test_output.txt
52- rm standalone_test_output.txt
53- exit 1
54- fi
55- rm standalone_test_output.txt
56- fi
57- }
58- trap show_batched_output EXIT # show the output on exit
59-
60- # remove the "tests/tests_pytorch/" path suffixes
61- path_prefix=$( basename " $( dirname " $( pwd) " ) " ) /$( basename " $( pwd) " ) " /" # https://stackoverflow.com/a/8223345
49+ # if test count is one print warning
50+ if [[ $test_count -eq 1 ]]; then
51+ printf " WARNING: only one test found!\n"
52+ elif [ $test_count -eq 0 ]; then
53+ printf " ERROR: no tests found!\n"
54+ exit 1
55+ fi
6256
63- for i in " ${! parametrizations_arr[@]} " ; do
64- parametrization=${parametrizations_arr[$i]// $path_prefix / }
65- prefix=" $(( i+ 1 )) /${# parametrizations_arr[@]} "
57+ # clear all the collected reports
58+ rm -f parallel_test_output-* .txt # in case it exists, remove it
6659
67- echo " $prefix : Running $parametrization "
68- echo $parametrization | sed ' s/\[[^][]*\]//g' >> testnames.txt
6960
70- # fix the port to avoid race condition when batched distributed tests select the port randomly
71- export MASTER_PORT=$(( 29500 + $i % $test_batch_size ))
61+ status=0 # reset the script status
62+ report=" " # final report
63+ pids=() # array of PID for running tests
64+ test_ids=() # array of indexes of running tests
65+ printf " Running $test_count tests in batches of $test_batch_size \n"
66+ for i in " ${! tests[@]} " ; do
67+ test=${tests[$i]}
68+ printf " Running test $(( i+ 1 )) /$test_count : $test \n"
7269
7370 # execute the test in the background
74- # redirect to a log file that buffers test output. since the tests will run in the background, we cannot let them
75- # output to std{out,err} because the outputs would be garbled together
76- python3 ${defaults} " $parametrization " & >> standalone_test_output.txt &
77- # save the PID in an array
78- pids[${i} ]=$!
79- # add row to the final report
80- report+=" Ran\t$parametrization \n"
71+ # redirect to a log file that buffers test output. since the tests will run in the background,
72+ # we cannot let them output to std{out,err} because the outputs would be garbled together
73+ python3 ${defaults} " $test " 2>&1 > " standalone_test_output-$i .txt" &
74+ test_ids+=($i ) # save the test's id in an array with running tests
75+ pids+=($! ) # save the PID in an array with running tests
8176
82- if (( ($i + 1 ) % $test_batch_size == 0 )) ; then
77+ # if we reached the batch size, wait for all tests to finish
78+ if (( (($i + 1 ) % $test_batch_size == 0 ) || $i == $test_count - 1 )) ; then
79+ printf " Waiting for batch to finish: $( IFS=' ' ; echo " ${pids[@]} " ) \n"
8380 # wait for running tests
84- for pid in ${pids[*]} ; do wait $pid ; done
85- unset pids # empty the array
86- show_batched_output
81+ for j in " ${! test_ids[@]} " ; do
82+ i=${test_ids[$j]} # restore the global test's id
83+ pid=${pids[$j]} # restore the particular PID
84+ test=${tests[$i]} # restore the test name
85+ printf " Waiting for $tests >> standalone_test_output-$i .txt (PID: $pid )\n"
86+ wait -n $pid
87+ # get the exit status of the test
88+ test_status=$?
89+ # add row to the final report
90+ report+=" Ran\t$test \t>> exit:$test_status \n"
91+ if [[ $test_status != 0 ]]; then
92+ # show the output of the failed test
93+ cat " standalone_test_output-$i .txt"
94+ # Process exited with a non-zero exit status
95+ status=$test_status
96+ fi
97+ done
98+ test_ids=() # reset the test's id array
99+ pids=() # reset the PID array
87100 fi
88101done
89- # wait for leftover tests
90- for pid in ${pids[*]} ; do wait $pid ; done
91- show_batched_output
92102
93103# echo test report
94104printf ' =%.s' {1..80}
95105printf " \n$report "
96106printf ' =%.s' {1..80}
97107printf ' \n'
108+
109+ # exit with the worse test result
110+ exit $status
0 commit comments