Skip to content

Commit ee42f83

Browse files
committed
when we hit a timeout, consider the data we have and also the tags
1 parent b927eb7 commit ee42f83

File tree

1 file changed

+70
-11
lines changed

1 file changed

+70
-11
lines changed

graalpython/com.oracle.graal.python.test/src/python_unittests.py

Lines changed: 70 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@
5050
from multiprocessing import Pool, TimeoutError
5151
from pprint import pformat
5252

53-
53+
5454
import argparse
5555
import sys
5656
from time import gmtime, strftime
@@ -67,10 +67,12 @@
6767
HTML_RESULTS_NAME = "{}.html".format(_BASE_NAME)
6868
LATEST_HTML_NAME = "latest.html"
6969

70+
TIMEOUT_LINE = "\nTEST TIMED OUT WITH GRAAL PYTHON RUNNER"
71+
7072
HR = "".join(['-' for _ in range(120)])
7173

7274
PTRN_ERROR = re.compile(r'^(?P<error>[A-Z][a-z][a-zA-Z]+):(?P<message>.*)$')
73-
PTRN_UNITTEST = re.compile(r'^#### running: graalpython/lib-python/3/test/(?P<unittest>[\w.]+).*$', re.DOTALL)
75+
PTRN_UNITTEST = re.compile(r'^#### running: (?P<unittest_path>graalpython/lib-python/3/test/(?P<unittest>[\w.]+)).*$', re.DOTALL)
7476
PTRN_NUM_TESTS = re.compile(r'^Ran (?P<num_tests>\d+) test.*$')
7577
PTRN_FAILED = re.compile(
7678
r'^FAILED \((failures=(?P<failures>\d+))?(, )?(errors=(?P<errors>\d+))?(, )?(skipped=(?P<skipped>\d+))?\)$')
@@ -149,6 +151,8 @@ def _run_cmd(cmd, timeout=TIMEOUT, capture_on_failure=True):
149151
cmd_string = ' '.join(cmd)
150152
log("[EXEC] starting '{}' ...".format(cmd_string))
151153

154+
expired = False
155+
152156
start_time = time.monotonic()
153157
# os.setsid is used to create a process group, to be able to call os.killpg upon timeout
154158
proc = subprocess.Popen(cmd, preexec_fn=os.setsid, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
@@ -161,12 +165,16 @@ def _run_cmd(cmd, timeout=TIMEOUT, capture_on_failure=True):
161165
msg = "TimeoutExpired: {:.3f}s".format(delta)
162166
tail = get_tail(output.decode('utf-8', 'ignore'))
163167
log("[ERR] timeout '{}' after {:.3f}s, killing process group {}, last lines of output:\n{}\n{}", cmd_string, delta, proc.pid, tail, HR)
168+
expired = True
164169
else:
165170
delta = time.monotonic() - start_time
166171
log("[EXEC] finished '{}' with exit code {} in {:.3f}s", cmd_string, proc.returncode, delta)
167172
msg = "Finished: {:.3f}s".format(delta)
168-
169-
return proc.returncode == 0, output.decode("utf-8", "ignore"), msg
173+
174+
output = output.decode("utf-8", "ignore")
175+
if expired:
176+
output += TIMEOUT_LINE
177+
return proc.returncode == 0, output, msg
170178

171179

172180
def scp(results_file_path, destination_path, destination_name=None):
@@ -200,16 +208,16 @@ def run_unittests(unittests, timeout, with_cpython=False):
200208

201209
start_time = time.monotonic()
202210
pool = Pool(processes=(os.cpu_count() // 4) or 1) # to account for hyperthreading and some additional overhead
203-
211+
204212
out = []
205213
def callback(result):
206214
out.append(result)
207215
log("[PROGRESS] {} / {}: \t {:.1f}%", len(out), num_unittests, len(out) * 100 / num_unittests)
208-
216+
209217
# schedule all unittest runs
210218
for ut in unittests:
211219
pool.apply_async(_run_unittest, args=(ut, timeout, with_cpython), callback=callback)
212-
220+
213221
pool.close()
214222
pool.join()
215223
pool.terminate()
@@ -336,6 +344,7 @@ def process_output(output_lines):
336344
if isinstance(output_lines, str):
337345
output_lines = output_lines.split("\n")
338346

347+
current_unittest_path = None
339348
unittests = []
340349
# stats tracked per unittest
341350
unittest_tests = defaultdict(list)
@@ -346,6 +355,7 @@ def process_output(output_lines):
346355
for line in output_lines:
347356
match = re.match(PTRN_UNITTEST, line)
348357
if match:
358+
current_unittest_path = match.group('unittest_path')
349359
unittest = match.group('unittest')
350360
unittests.append(unittest)
351361
unittest_tests.clear()
@@ -415,6 +425,55 @@ def process_output(output_lines):
415425
stats[unittests[-1]].num_skipped = int(skipped) if skipped else 0
416426
continue
417427

428+
if line.strip() == TIMEOUT_LINE.strip():
429+
if current_unittest_path is None or len(unittests) == 0:
430+
# we timed out here before even running something
431+
continue
432+
ran_tests = {}
433+
fails = 0
434+
errs = 0
435+
ok = 0
436+
skip = 0
437+
for test,status in unittest_tests.items():
438+
status = " ".join(status).lower()
439+
ran_tests[test.strip()] = status
440+
if "skipped" in status:
441+
skip += 1
442+
elif "fail" in status:
443+
fails += 1
444+
elif "ok" in status:
445+
ok += 1
446+
else:
447+
errs += 1
448+
449+
tagfile = ".".join([os.path.splitext(unittests[-1])[0], "txt"])
450+
prefix = os.path.splitext(current_unittest_path)[0].replace("/", ".")
451+
import glob
452+
candidates = glob.glob("**/" + tagfile, recursive=True)
453+
for candidate in candidates:
454+
with open(candidate) as f:
455+
for tagline in f.readlines():
456+
tagline = tagline.replace(prefix, "__main__") # account different runner for tagged and this
457+
tagline = tagline.replace("*", "").strip()
458+
tstcls, tst = tagline.rsplit(".", 1)
459+
test = "{} ({})".format(tst, tstcls)
460+
if test not in ran_tests:
461+
ran_tests[test] = "ok"
462+
# count the tagged test we didn't get to as an additional passing test
463+
ok += 1
464+
else:
465+
status = ran_tests[test]
466+
if "error" in status or "fail" in status:
467+
# interesting: it's tagged but failed here
468+
log("{} did not pass here but is tagged as passing", test)
469+
470+
stats[unittests[-1]].num_tests = ok + fails + errs + skip
471+
stats[unittests[-1]].num_fails = fails
472+
stats[unittests[-1]].num_errors = errs
473+
stats[unittests[-1]].num_skipped = skip
474+
unittest_tests.clear()
475+
continue
476+
418477
return unittests, error_messages, java_exceptions, stats
419478

420479

@@ -811,7 +870,7 @@ def format_val(row, k):
811870
'<b>{}</b>'.format(r[Col.UNITTEST])
812871
for r in rows if r[Col.NUM_ERRORS] == -1
813872
])
814-
873+
815874
usecase_scores = dict()
816875
for usecase_name, usecase_modules in USE_CASE_GROUPS.items():
817876
score_sum = 0
@@ -821,9 +880,9 @@ def format_val(row, k):
821880
if r[Col.NUM_PASSES] > 0 and r[Col.NUM_TESTS] > 0:
822881
score_sum += r[Col.NUM_PASSES] / r[Col.NUM_TESTS]
823882
usecase_scores[usecase_name] = score_sum / len(usecase_modules)
824-
825-
826-
use_case_stats_info = ul("<b>Summary per Use Case</b>",
883+
884+
885+
use_case_stats_info = ul("<b>Summary per Use Case</b>",
827886
[ grid((progress_bar(avg_score * 100, color="info"), 3), '<b>{}</b>'.format(usecase_name)) +
828887
grid(", ".join(USE_CASE_GROUPS[usecase_name])) for usecase_name, avg_score in usecase_scores.items()])
829888

0 commit comments

Comments
 (0)