Skip to content

Commit bb821d5

Browse files
Merge pull request #113 from bridadan/remove_circular_dep_htrun
Remove circular dependency on htrun
2 parents f622535 + 658b429 commit bb821d5

File tree

5 files changed

+416
-419
lines changed

5 files changed

+416
-419
lines changed

packages/mbed-greentea/mbed_greentea/mbed_greentea_cli.py

Lines changed: 203 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,6 @@
3636
from mbed_os_tools.test.mbed_test_api import (
3737
get_test_build_properties,
3838
get_test_spec,
39-
run_host_test,
4039
log_mbed_devices_in_table,
4140
TEST_RESULTS,
4241
TEST_RESULT_OK,
@@ -67,6 +66,8 @@
6766
from mbed_os_tools.test.tests_spec import TestBinary
6867
from mbed_os_tools.test.mbed_target_info import get_platform_property
6968

69+
from .mbed_test_api import run_host_test
70+
7071
import mbed_os_tools.detect
7172
import mbed_os_tools.test.host_tests_plugins as host_tests_plugins
7273
from mbed_os_tools.test.mbed_greentea_cli import (
@@ -75,7 +76,6 @@
7576
LOCAL_HOST_TESTS_DIR,
7677
get_local_host_tests_dir,
7778
create_filtered_test_list,
78-
run_test_thread,
7979
)
8080

8181
LOCAL_HOST_TESTS_DIR = './test/host_tests' # Used by mbedhtrun -e <dir>
@@ -368,6 +368,207 @@ def main():
368368

369369
return(cli_ret)
370370

371+
def run_test_thread(test_result_queue, test_queue, opts, mut, build, build_path, greentea_hooks):
372+
test_exec_retcode = 0
373+
test_platforms_match = 0
374+
test_report = {}
375+
376+
disk = mut['mount_point']
377+
port = mut['serial_port']
378+
micro = mut['platform_name']
379+
program_cycle_s = get_platform_property(micro, "program_cycle_s")
380+
forced_reset_timeout = get_platform_property(micro, "forced_reset_timeout")
381+
copy_method = get_platform_property(micro, "copy_method")
382+
reset_method = get_platform_property(micro, "reset_method")
383+
384+
while not test_queue.empty():
385+
try:
386+
test = test_queue.get(False)
387+
except Exception as e:
388+
gt_logger.gt_log_err(str(e))
389+
break
390+
391+
test_result = 'SKIPPED'
392+
393+
if opts.copy_method:
394+
copy_method = opts.copy_method
395+
elif not copy_method:
396+
copy_method = 'shell'
397+
398+
if opts.reset_method:
399+
reset_method = opts.reset_method
400+
401+
verbose = opts.verbose_test_result_only
402+
enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests)
403+
404+
test_platforms_match += 1
405+
host_test_result = run_host_test(test['image_path'],
406+
disk,
407+
port,
408+
build_path,
409+
mut['target_id'],
410+
micro=micro,
411+
copy_method=copy_method,
412+
reset=reset_method,
413+
program_cycle_s=program_cycle_s,
414+
forced_reset_timeout=forced_reset_timeout,
415+
digest_source=opts.digest_source,
416+
json_test_cfg=opts.json_test_configuration,
417+
enum_host_tests_path=enum_host_tests_path,
418+
global_resource_mgr=opts.global_resource_mgr,
419+
fast_model_connection=opts.fast_model_connection,
420+
num_sync_packtes=opts.num_sync_packtes,
421+
tags=opts.tags,
422+
retry_count=opts.retry_count,
423+
polling_timeout=opts.polling_timeout,
424+
verbose=verbose)
425+
426+
# Some error in htrun, abort test execution
427+
if isinstance(host_test_result, int):
428+
# int(host_test_result) > 0 - Call to mbedhtrun failed
429+
# int(host_test_result) < 0 - Something went wrong while executing mbedhtrun
430+
gt_logger.gt_log_err("run_test_thread.run_host_test() failed, aborting...")
431+
break
432+
433+
# If execution was successful 'run_host_test' return tuple with results
434+
single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary, memory_metrics = host_test_result
435+
test_result = single_test_result
436+
437+
build_path_abs = os.path.abspath(build_path)
438+
439+
if single_test_result != TEST_RESULT_OK:
440+
test_exec_retcode += 1
441+
442+
if single_test_result in [TEST_RESULT_OK, TEST_RESULT_FAIL]:
443+
if greentea_hooks:
444+
# Test was successful
445+
# We can execute test hook just after test is finished ('hook_test_end')
446+
format = {
447+
"test_name": test['test_bin'],
448+
"test_bin_name": os.path.basename(test['image_path']),
449+
"image_path": test['image_path'],
450+
"build_path": build_path,
451+
"build_path_abs": build_path_abs,
452+
"build_name": build,
453+
}
454+
greentea_hooks.run_hook_ext('hook_test_end', format)
455+
456+
# Update report for optional reporting feature
457+
test_suite_name = test['test_bin'].lower()
458+
if build not in test_report:
459+
test_report[build] = {}
460+
461+
if test_suite_name not in test_report[build]:
462+
test_report[build][test_suite_name] = {}
463+
464+
if not test_cases_summary and not result_test_cases:
465+
gt_logger.gt_log_warn("test case summary event not found")
466+
gt_logger.gt_log_tab("no test case report present, assuming test suite to be a single test case!")
467+
468+
# We will map test suite result to test case to
469+
# output valid test case in report
470+
471+
# Generate "artificial" test case name from test suite name#
472+
# E.g:
473+
# mbed-drivers-test-dev_null -> dev_null
474+
test_case_name = test_suite_name
475+
test_str_idx = test_suite_name.find("-test-")
476+
if test_str_idx != -1:
477+
test_case_name = test_case_name[test_str_idx + 6:]
478+
479+
gt_logger.gt_log_tab("test suite: %s"% test_suite_name)
480+
gt_logger.gt_log_tab("test case: %s"% test_case_name)
481+
482+
# Test case result: OK, FAIL or ERROR
483+
tc_result_text = {
484+
"OK": "OK",
485+
"FAIL": "FAIL",
486+
}.get(single_test_result, 'ERROR')
487+
488+
# Test case integer success code OK, FAIL and ERROR: (0, >0, <0)
489+
tc_result = {
490+
"OK": 0,
491+
"FAIL": 1024,
492+
"ERROR": -1024,
493+
}.get(tc_result_text, '-2048')
494+
495+
# Test case passes and failures: (1 pass, 0 failures) or (0 passes, 1 failure)
496+
tc_passed, tc_failed = {
497+
0: (1, 0),
498+
}.get(tc_result, (0, 1))
499+
500+
# Test case report build for whole binary
501+
# Add test case made from test suite result to test case report
502+
result_test_cases = {
503+
test_case_name: {
504+
'duration': single_testduration,
505+
'time_start': 0.0,
506+
'time_end': 0.0,
507+
'utest_log': single_test_output.splitlines(),
508+
'result_text': tc_result_text,
509+
'passed': tc_passed,
510+
'failed': tc_failed,
511+
'result': tc_result,
512+
}
513+
}
514+
515+
# Test summary build for whole binary (as a test case)
516+
test_cases_summary = (tc_passed, tc_failed, )
517+
518+
gt_logger.gt_log("test on hardware with target id: %s"% (mut['target_id']))
519+
gt_logger.gt_log("test suite '%s' %s %s in %.2f sec"% (test['test_bin'],
520+
'.' * (80 - len(test['test_bin'])),
521+
test_result,
522+
single_testduration))
523+
524+
# Test report build for whole binary
525+
test_report[build][test_suite_name]['single_test_result'] = single_test_result
526+
test_report[build][test_suite_name]['single_test_output'] = single_test_output
527+
test_report[build][test_suite_name]['elapsed_time'] = single_testduration
528+
test_report[build][test_suite_name]['platform_name'] = micro
529+
test_report[build][test_suite_name]['copy_method'] = copy_method
530+
test_report[build][test_suite_name]['testcase_result'] = result_test_cases
531+
test_report[build][test_suite_name]['memory_metrics'] = memory_metrics
532+
533+
test_report[build][test_suite_name]['build_path'] = build_path
534+
test_report[build][test_suite_name]['build_path_abs'] = build_path_abs
535+
test_report[build][test_suite_name]['image_path'] = test['image_path']
536+
test_report[build][test_suite_name]['test_bin_name'] = os.path.basename(test['image_path'])
537+
538+
passes_cnt, failures_cnt = 0, 0
539+
for tc_name in sorted(result_test_cases.keys()):
540+
gt_logger.gt_log_tab("test case: '%s' %s %s in %.2f sec"% (tc_name,
541+
'.' * (80 - len(tc_name)),
542+
result_test_cases[tc_name].get('result_text', '_'),
543+
result_test_cases[tc_name].get('duration', 0.0)))
544+
if result_test_cases[tc_name].get('result_text', '_') == 'OK':
545+
passes_cnt += 1
546+
else:
547+
failures_cnt += 1
548+
549+
if test_cases_summary:
550+
passes, failures = test_cases_summary
551+
gt_logger.gt_log("test case summary: %d pass%s, %d failur%s"% (passes,
552+
'' if passes == 1 else 'es',
553+
failures,
554+
'e' if failures == 1 else 'es'))
555+
if passes != passes_cnt or failures != failures_cnt:
556+
gt_logger.gt_log_err("utest test case summary mismatch: utest reported passes and failures miscount!")
557+
gt_logger.gt_log_tab("reported by utest: passes = %d, failures %d)"% (passes, failures))
558+
gt_logger.gt_log_tab("test case result count: passes = %d, failures %d)"% (passes_cnt, failures_cnt))
559+
560+
if single_test_result != 'OK' and not verbose and opts.report_fails:
561+
# In some cases we want to print console to see why test failed
562+
# even if we are not in verbose mode
563+
gt_logger.gt_log_tab("test failed, reporting console output (specified with --report-fails option)")
564+
print
565+
print(single_test_output)
566+
567+
#greentea_release_target_id(mut['target_id'], gt_instance_uuid)
568+
test_result_queue.put({'test_platforms_match': test_platforms_match,
569+
'test_exec_retcode': test_exec_retcode,
570+
'test_report': test_report})
571+
return
371572

372573
def main_cli(opts, args, gt_instance_uuid=None):
373574
"""! This is main CLI function with all command line parameters
@@ -447,7 +648,6 @@ def get_parallel_value(value):
447648
None,
448649
None,
449650
None,
450-
hooks=greentea_hooks,
451651
digest_source=opts.digest_source,
452652
enum_host_tests_path=enum_host_tests_path,
453653
verbose=verbose)

0 commit comments

Comments
 (0)