Skip to content

Commit d0232d2

Browse files
authored
Add test framework logging function. NFC (#25164)
Followup to #25081
1 parent f5a2539 commit d0232d2

File tree

3 files changed

+41
-31
lines changed

3 files changed

+41
-31
lines changed

test/common.py

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -155,6 +155,14 @@ def configure(data_dir):
155155
requires_network = unittest.skipIf(os.getenv('EMTEST_SKIP_NETWORK_TESTS'), 'This test requires network access')
156156

157157

158+
def errlog(*args):
159+
"""Shorthand for print with file=sys.stderr
160+
161+
Use this for all internal test framework logging..
162+
"""
163+
print(*args, file=sys.stderr)
164+
165+
158166
def load_previous_test_run_results():
159167
try:
160168
return json.load(open(PREVIOUS_TEST_RUN_RESULTS_FILE))
@@ -1420,9 +1428,9 @@ def tearDown(self):
14201428
left_over_files = set(temp_files_after_run) - set(self.temp_files_before_run)
14211429
left_over_files = [f for f in left_over_files if not any(f.startswith(p) for p in ignorable_file_prefixes)]
14221430
if len(left_over_files):
1423-
print('ERROR: After running test, there are ' + str(len(left_over_files)) + ' new temporary files/directories left behind:', file=sys.stderr)
1431+
errlog('ERROR: After running test, there are ' + str(len(left_over_files)) + ' new temporary files/directories left behind:')
14241432
for f in left_over_files:
1425-
print('leaked file: ' + f, file=sys.stderr)
1433+
errlog('leaked file: ', f)
14261434
self.fail('Test leaked ' + str(len(left_over_files)) + ' temporary files!')
14271435

14281436
def get_setting(self, key, default=None):
@@ -1854,15 +1862,15 @@ def get_library(self, name, generated_libs, configure=['sh', './configure'], #
18541862
cache_name = ''.join([(c if c in valid_chars else '_') for c in cache_name])
18551863

18561864
if not force_rebuild and self.library_cache.get(cache_name):
1857-
print('<load %s from cache> ' % cache_name, file=sys.stderr)
1865+
errlog('<load %s from cache> ' % cache_name)
18581866
generated_libs = []
18591867
for basename, contents in self.library_cache[cache_name]:
18601868
bc_file = os.path.join(build_dir, cache_name + '_' + basename)
18611869
write_binary(bc_file, contents)
18621870
generated_libs.append(bc_file)
18631871
return generated_libs
18641872

1865-
print(f'<building and saving {cache_name} into cache>', file=sys.stderr)
1873+
errlog(f'<building and saving {cache_name} into cache>')
18661874
if configure and configure_args:
18671875
# Make to copy to avoid mutating default param
18681876
configure = list(configure)

test/parallel_testsuite.py

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
from tools import utils
1515

1616
import common
17+
from common import errlog
1718

1819
from tools.shared import cap_max_workers_in_pool
1920
from tools.utils import WINDOWS
@@ -109,7 +110,7 @@ def run(self, result):
109110
tests = list(self if self.failing_and_slow_first else self.reversed_tests())
110111
contains_browser_test = any(test.is_browser_test() for test in tests)
111112
use_cores = cap_max_workers_in_pool(min(self.max_cores, len(tests), num_cores()), contains_browser_test)
112-
print('Using %s parallel test processes' % use_cores, file=sys.stderr)
113+
errlog(f'Using {use_cores} parallel test processes')
113114
with multiprocessing.Manager() as manager:
114115
# Give each worker a unique ID.
115116
worker_id_counter = manager.Value('i', 0) # 'i' for integer, starting at 0
@@ -124,7 +125,7 @@ def run(self, result):
124125
# or the progress bar.
125126
failfast_event = progress_counter = lock = None
126127
if self.failfast:
127-
print('The version of python being used is not compatible with --failfast')
128+
errlog('The version of python being used is not compatible with --failfast')
128129
sys.exit(1)
129130
else:
130131
failfast_event = manager.Event() if self.failfast else None
@@ -137,7 +138,7 @@ def run(self, result):
137138
num_tear_downs = sum([pool.apply(tear_down, ()) for i in range(use_cores)])
138139
# Assert the assumed behavior above hasn't changed.
139140
if num_tear_downs != use_cores:
140-
print(f'Expected {use_cores} teardowns, got {num_tear_downs}')
141+
errlog(f'Expected {use_cores} teardowns, got {num_tear_downs}')
141142

142143
# Filter out the None results which can occur in failfast mode.
143144
if self.failfast:
@@ -181,9 +182,9 @@ def reversed_tests(self):
181182
return sorted(self, key=str, reverse=True)
182183

183184
def combine_results(self, result, buffered_results):
184-
print('', file=sys.stderr)
185-
print('DONE: combining results on main thread', file=sys.stderr)
186-
print('', file=sys.stderr)
185+
errlog('')
186+
errlog('DONE: combining results on main thread')
187+
errlog('')
187188
# Sort the results back into alphabetical order. Running the tests in
188189
# parallel causes mis-orderings, this makes the results more readable.
189190
results = sorted(buffered_results, key=lambda res: str(res.test))
@@ -295,32 +296,32 @@ def compute_progress(self):
295296
return val
296297

297298
def addSuccess(self, test):
298-
print(f'{self.compute_progress()}{test} ... ok ({self.calculateElapsed():.2f}s)', file=sys.stderr)
299+
errlog(f'{self.compute_progress()}{test} ... ok ({self.calculateElapsed():.2f}s)')
299300
self.buffered_result = BufferedTestSuccess(test)
300301
self.test_result = 'success'
301302

302303
def addExpectedFailure(self, test, err):
303-
print(f'{self.compute_progress()}{test} ... expected failure ({self.calculateElapsed():.2f}s)', file=sys.stderr)
304+
errlog(f'{self.compute_progress()}{test} ... expected failure ({self.calculateElapsed():.2f}s)')
304305
self.buffered_result = BufferedTestExpectedFailure(test, err)
305306
self.test_result = 'expected failure'
306307

307308
def addUnexpectedSuccess(self, test):
308-
print(f'{self.compute_progress()}{test} ... unexpected success ({self.calculateElapsed():.2f}s)', file=sys.stderr)
309+
errlog(f'{self.compute_progress()}{test} ... unexpected success ({self.calculateElapsed():.2f}s)')
309310
self.buffered_result = BufferedTestUnexpectedSuccess(test)
310311
self.test_result = 'unexpected success'
311312

312313
def addSkip(self, test, reason):
313-
print(f"{self.compute_progress()}{test} ... skipped '{reason}'", file=sys.stderr)
314+
errlog(f"{self.compute_progress()}{test} ... skipped '{reason}'")
314315
self.buffered_result = BufferedTestSkip(test, reason)
315316
self.test_result = 'skipped'
316317

317318
def addFailure(self, test, err):
318-
print(f'{self.compute_progress()}{test} ... FAIL', file=sys.stderr)
319+
errlog(f'{self.compute_progress()}{test} ... FAIL')
319320
self.buffered_result = BufferedTestFailure(test, err)
320321
self.test_result = 'failed'
321322

322323
def addError(self, test, err):
323-
print(f'{self.compute_progress()}{test} ... ERROR', file=sys.stderr)
324+
errlog(f'{self.compute_progress()}{test} ... ERROR')
324325
self.buffered_result = BufferedTestError(test, err)
325326
self.test_result = 'errored'
326327

test/runner.py

Lines changed: 16 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@
4040
import jsrun
4141
import parallel_testsuite
4242
import common
43+
from common import errlog
4344
from tools import shared, config, utils
4445

4546

@@ -108,11 +109,11 @@
108109

109110
def check_js_engines():
110111
if not all(jsrun.check_engine(e) for e in config.JS_ENGINES):
111-
print('Not all the JS engines in JS_ENGINES appears to work.')
112+
errlog('Not all the JS engines in JS_ENGINES appears to work.')
112113
sys.exit(1)
113114

114115
if common.EMTEST_ALL_ENGINES:
115-
print('(using ALL js engines)')
116+
errlog('(using ALL js engines)')
116117

117118

118119
def get_and_import_modules():
@@ -166,7 +167,7 @@ def tests_with_expanded_wildcards(args, all_tests):
166167
else:
167168
new_args += [arg]
168169
if not new_args and args:
169-
print('No tests found to run in set: ' + str(args))
170+
errlog('No tests found to run in set: ' + str(args))
170171
sys.exit(1)
171172
return new_args
172173

@@ -189,7 +190,7 @@ def skip_requested_tests(args, modules):
189190
if arg.startswith('skip:'):
190191
which = arg.split('skip:')[1]
191192
os.environ['EMTEST_SKIP'] = os.environ['EMTEST_SKIP'] + ' ' + which
192-
print('will skip "%s"' % which, file=sys.stderr)
193+
errlog(f'will skip "{which}"')
193194
skip_test(which, modules)
194195
args[i] = None
195196
return [a for a in args if a is not None]
@@ -430,7 +431,7 @@ def run_tests(options, suites):
430431
total_core_time = 0
431432
run_start_time = time.perf_counter()
432433
for mod_name, suite in suites:
433-
print('Running %s: (%s tests)' % (mod_name, suite.countTestCases()), file=sys.stderr)
434+
errlog('Running %s: (%s tests)' % (mod_name, suite.countTestCases()))
434435
res = testRunner.run(suite)
435436
msg = ('%s: %s run, %s errors, %s failures, %s skipped' %
436437
(mod_name, res.testsRun, len(res.errors), len(res.failures), len(res.skipped)))
@@ -440,14 +441,14 @@ def run_tests(options, suites):
440441
total_core_time += res.core_time
441442
total_run_time = time.perf_counter() - run_start_time
442443
if total_core_time > 0:
443-
print('Total core time: %.3fs. Wallclock time: %.3fs. Parallelization: %.2fx.' % (total_core_time, total_run_time, total_core_time / total_run_time), file=sys.stderr)
444+
errlog('Total core time: %.3fs. Wallclock time: %.3fs. Parallelization: %.2fx.' % (total_core_time, total_run_time, total_core_time / total_run_time))
444445

445446
if len(resultMessages) > 1:
446-
print('====================')
447-
print()
448-
print('TEST SUMMARY')
447+
errlog('====================')
448+
errlog()
449+
errlog('TEST SUMMARY')
449450
for msg in resultMessages:
450-
print(' ' + msg)
451+
errlog(' ' + msg)
451452

452453
if options.bell:
453454
sys.stdout.write('\a')
@@ -518,12 +519,12 @@ def main():
518519
# Some options make sense being set in the environment, others not-so-much.
519520
# TODO(sbc): eventually just make these command-line only.
520521
if os.getenv('EMTEST_SAVE_DIR'):
521-
print('ERROR: use --save-dir instead of EMTEST_SAVE_DIR=1, and --no-clean instead of EMTEST_SAVE_DIR=2')
522+
errlog('ERROR: use --save-dir instead of EMTEST_SAVE_DIR=1, and --no-clean instead of EMTEST_SAVE_DIR=2')
522523
return 1
523524
if os.getenv('EMTEST_REBASELINE'):
524-
print('Prefer --rebaseline over setting $EMTEST_REBASELINE')
525+
errlog('Prefer --rebaseline over setting $EMTEST_REBASELINE')
525526
if os.getenv('EMTEST_VERBOSE'):
526-
print('Prefer --verbose over setting $EMTEST_VERBOSE')
527+
errlog('Prefer --verbose over setting $EMTEST_VERBOSE')
527528

528529
# We set the environments variables here and then call configure,
529530
# to apply them. This means the python's multiprocessing child
@@ -577,7 +578,7 @@ def prepend_default(arg):
577578
tests = args_for_random_tests(tests, modules)
578579

579580
if not tests:
580-
print('ERROR: no tests to run')
581+
errlog('ERROR: no tests to run')
581582
return 1
582583

583584
if not options.start_at and options._continue:
@@ -586,7 +587,7 @@ def prepend_default(arg):
586587

587588
suites, unmatched_tests = load_test_suites(tests, modules, options)
588589
if unmatched_tests:
589-
print('ERROR: could not find the following tests: ' + ' '.join(unmatched_tests))
590+
errlog('ERROR: could not find the following tests: ' + ' '.join(unmatched_tests))
590591
return 1
591592

592593
num_failures = run_tests(options, suites)

0 commit comments

Comments
 (0)