Skip to content

Commit 2bc5af6

Browse files
committed
tests/run-tests.py: Factor out helper function to create test report.
This commit factors existing code in `run-tests.py` into a new helper function `create_test_report()`. That function prints out a summary of the test run (eg number of tests passed, number failed, number skipped) and creates the corresponding `_results.json` file. This is done so `create_test_report()` can be reused by the other test runners. The `test_count` counter is now gone, and instead the number of passed plus number of failed tests is used as an equivalent count. For consistency this commit makes a minor change to the printed output of `run-tests.py`: instead of printing a shorthand name for tests that failed or skipped, it now prints the full name. Eg what was previously printed as `attrtuple2` is now printed as `basics/attrtuple2.py`. This makes the output a little longer (when there are failed/skipped tests) but helps to disambiguate the test name, eg which directory it's in. Signed-off-by: Damien George <[email protected]>
1 parent e4d556b commit 2bc5af6

File tree

1 file changed

+28
-22
lines changed

1 file changed

+28
-22
lines changed

tests/run-tests.py

Lines changed: 28 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -616,7 +616,6 @@ def run_script_on_remote_target(self, args, test_file, is_special):
616616

617617

618618
def run_tests(pyb, tests, args, result_dir, num_threads=1):
619-
test_count = ThreadSafeCounter()
620619
testcase_count = ThreadSafeCounter()
621620
test_results = ThreadSafeCounter([])
622621

@@ -903,7 +902,7 @@ def run_one_test(test_file):
903902

904903
if skip_it:
905904
print("skip ", test_file)
906-
test_results.append((test_name, test_file, "skip", ""))
905+
test_results.append((test_file, "skip", ""))
907906
return
908907

909908
# Run the test on the MicroPython target.
@@ -918,11 +917,11 @@ def run_one_test(test_file):
918917
# start-up code (eg boot.py) when preparing to run the next test.
919918
pyb.read_until(1, b"raw REPL; CTRL-B to exit\r\n")
920919
print("skip ", test_file)
921-
test_results.append((test_name, test_file, "skip", ""))
920+
test_results.append((test_file, "skip", ""))
922921
return
923922
elif output_mupy == b"SKIP-TOO-LARGE\n":
924923
print("lrge ", test_file)
925-
test_results.append((test_name, test_file, "skip", "too large"))
924+
test_results.append((test_file, "skip", "too large"))
926925
return
927926

928927
# Look at the output of the test to see if unittest was used.
@@ -1005,7 +1004,7 @@ def run_one_test(test_file):
10051004
# Print test summary, update counters, and save .exp/.out files if needed.
10061005
if test_passed:
10071006
print("pass ", test_file, extra_info)
1008-
test_results.append((test_name, test_file, "pass", ""))
1007+
test_results.append((test_file, "pass", ""))
10091008
rm_f(filename_expected)
10101009
rm_f(filename_mupy)
10111010
else:
@@ -1017,9 +1016,7 @@ def run_one_test(test_file):
10171016
rm_f(filename_expected) # in case left over from previous failed run
10181017
with open(filename_mupy, "wb") as f:
10191018
f.write(output_mupy)
1020-
test_results.append((test_name, test_file, "fail", ""))
1021-
1022-
test_count.increment()
1019+
test_results.append((test_file, "fail", ""))
10231020

10241021
# Print a note if this looks like it might have been a misfired unittest
10251022
if not uses_unittest and not test_passed:
@@ -1046,19 +1043,27 @@ def run_one_test(test_file):
10461043
print(line)
10471044
sys.exit(1)
10481045

1049-
test_results = test_results.value
1050-
passed_tests = list(r for r in test_results if r[2] == "pass")
1051-
skipped_tests = list(r for r in test_results if r[2] == "skip" and r[3] != "too large")
1046+
# Return test results.
1047+
return test_results.value, testcase_count.value
1048+
1049+
1050+
# Print a summary of the results and save them to a JSON file.
1051+
# Returns True if everything succeeded, False otherwise.
1052+
def create_test_report(args, test_results, testcase_count=None):
1053+
passed_tests = list(r for r in test_results if r[1] == "pass")
1054+
skipped_tests = list(r for r in test_results if r[1] == "skip" and r[2] != "too large")
10521055
skipped_tests_too_large = list(
1053-
r for r in test_results if r[2] == "skip" and r[3] == "too large"
1056+
r for r in test_results if r[1] == "skip" and r[2] == "too large"
10541057
)
1055-
failed_tests = list(r for r in test_results if r[2] == "fail")
1058+
failed_tests = list(r for r in test_results if r[1] == "fail")
1059+
1060+
num_tests_performed = len(passed_tests) + len(failed_tests)
1061+
1062+
testcase_count_info = ""
1063+
if testcase_count is not None:
1064+
testcase_count_info = " ({} individual testcases)".format(testcase_count)
1065+
print("{} tests performed{}".format(num_tests_performed, testcase_count_info))
10561066

1057-
print(
1058-
"{} tests performed ({} individual testcases)".format(
1059-
test_count.value, testcase_count.value
1060-
)
1061-
)
10621067
print("{} tests passed".format(len(passed_tests)))
10631068

10641069
if len(skipped_tests) > 0:
@@ -1088,15 +1093,15 @@ def to_json(obj):
10881093
return obj.pattern
10891094
return obj
10901095

1091-
with open(os.path.join(result_dir, RESULTS_FILE), "w") as f:
1096+
with open(os.path.join(args.result_dir, RESULTS_FILE), "w") as f:
10921097
json.dump(
10931098
{
10941099
# The arguments passed on the command-line.
10951100
"args": vars(args),
10961101
# A list of all results of the form [(test, result, reason), ...].
1097-
"results": list(test[1:] for test in test_results),
1102+
"results": list(test for test in test_results),
10981103
# A list of failed tests. This is deprecated, use the "results" above instead.
1099-
"failed_tests": [test[1] for test in failed_tests],
1104+
"failed_tests": [test[0] for test in failed_tests],
11001105
},
11011106
f,
11021107
default=to_json,
@@ -1350,7 +1355,8 @@ def main():
13501355

13511356
try:
13521357
os.makedirs(args.result_dir, exist_ok=True)
1353-
res = run_tests(pyb, tests, args, args.result_dir, args.jobs)
1358+
test_results, testcase_count = run_tests(pyb, tests, args, args.result_dir, args.jobs)
1359+
res = create_test_report(args, test_results, testcase_count)
13541360
finally:
13551361
if pyb:
13561362
pyb.close()

0 commit comments

Comments
 (0)