From 0915aa36958c20db1ec90b6bb0f8c4b6a33eaaed Mon Sep 17 00:00:00 2001 From: Anas Nashif Date: Thu, 7 Nov 2024 17:15:38 -0500 Subject: [PATCH 01/14] twister: count filtered testcases as such, not as skipped When a suite is filtered, its testcases are also filtered and not skipped. Signed-off-by: Anas Nashif --- scripts/pylib/twister/twisterlib/runner.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/pylib/twister/twisterlib/runner.py b/scripts/pylib/twister/twisterlib/runner.py index 201a869a82a9f..55bf2daa2350f 100644 --- a/scripts/pylib/twister/twisterlib/runner.py +++ b/scripts/pylib/twister/twisterlib/runner.py @@ -877,8 +877,8 @@ def process(self, pipeline, done, message, lock, results): logger.debug("filtering %s" % self.instance.name) self.instance.status = TwisterStatus.FILTER self.instance.reason = "runtime filter" - results.skipped_runtime_increment() - self.instance.add_missing_case_status(TwisterStatus.SKIP) + results.filtered_cases_increment() + self.instance.add_missing_case_status(TwisterStatus.FILTER) next_op = 'report' else: next_op = 'cmake' @@ -911,7 +911,7 @@ def process(self, pipeline, done, message, lock, results): self.instance.status = TwisterStatus.FILTER self.instance.reason = "runtime filter" results.skipped_runtime_increment() - self.instance.add_missing_case_status(TwisterStatus.SKIP) + self.instance.add_missing_case_status(TwisterStatus.FILTER) next_op = 'report' else: next_op = 'build' From 397f9a4c4bab4bb78adcc1f8286d83de9d5e6374 Mon Sep 17 00:00:00 2001 From: Anas Nashif Date: Thu, 7 Nov 2024 17:18:27 -0500 Subject: [PATCH 02/14] twister: use consistent language for test configurations In the summary, use "configurations" instead of instances. Signed-off-by: Anas Nashif --- scripts/pylib/twister/twisterlib/runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/pylib/twister/twisterlib/runner.py b/scripts/pylib/twister/twisterlib/runner.py index 55bf2daa2350f..e9529604b3ff9 100644 --- a/scripts/pylib/twister/twisterlib/runner.py +++ b/scripts/pylib/twister/twisterlib/runner.py @@ -1696,7 +1696,7 @@ def update_counting_before_pipeline(self): self.results.error_increment() def show_brief(self): - logger.info("%d test scenarios (%d test instances) selected, " + logger.info("%d test scenarios (%d configurations) selected, " "%d configurations filtered (%d by static filter, %d at runtime)." % (len(self.suites), len(self.instances), self.results.skipped_configs, From 2679d1776cd15bb2d42eb744fb1436ef4cd4266f Mon Sep 17 00:00:00 2001 From: Anas Nashif Date: Thu, 7 Nov 2024 17:28:24 -0500 Subject: [PATCH 03/14] twister: do not report filtered cases in summary It is confusing to report filtered testcases as testcases that were selected but not exexuted. If they are filtered, then there should not be considered as selected. Signed-off-by: Anas Nashif --- scripts/pylib/twister/twisterlib/reports.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/scripts/pylib/twister/twisterlib/reports.py b/scripts/pylib/twister/twisterlib/reports.py index a1dbe68b5a790..de0ae1f9b346a 100644 --- a/scripts/pylib/twister/twisterlib/reports.py +++ b/scripts/pylib/twister/twisterlib/reports.py @@ -615,12 +615,11 @@ def summary(self, results, ignore_unrecognized_sections, duration): f'{", " + str(results.none_cases) + " without a status" if results.none_cases else ""}' f' on {len(self.filtered_platforms)} out of total {total_platforms} platforms ({platform_rate:02.2f}%).' ) - if results.skipped_cases or results.filtered_cases or results.notrun_cases: + if results.skipped_cases or results.notrun_cases: logger.info( - f'{results.skipped_cases + results.filtered_cases} selected test cases not executed:' \ + f'{results.skipped_cases} selected test cases not executed:' \ f'{" " + str(results.skipped_cases) + " skipped" if results.skipped_cases else ""}' \ - f'{(", " if results.skipped_cases else " ") + str(results.filtered_cases) + " filtered" if results.filtered_cases else ""}' \ - f'{(", " if results.skipped_cases or results.filtered_cases else " ") + str(results.notrun_cases) + " not run (built only)" if results.notrun_cases else ""}' \ + f'{(", " if results.skipped_cases else " ") + str(results.notrun_cases) + " not run (built only)" if results.notrun_cases else ""}' \ f'.' ) From 42d067e42b23c1e0ea9e18cbf8565ab723e50c9c Mon Sep 17 00:00:00 2001 From: Anas Nashif Date: Thu, 7 Nov 2024 18:46:24 -0500 Subject: [PATCH 04/14] twister: stats: not run tests are also part not executed summary Add notrun tests to the count of non-executed tests. Signed-off-by: Anas Nashif --- scripts/pylib/twister/twisterlib/reports.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/pylib/twister/twisterlib/reports.py b/scripts/pylib/twister/twisterlib/reports.py index de0ae1f9b346a..670d8d2e8ea8d 100644 --- a/scripts/pylib/twister/twisterlib/reports.py +++ b/scripts/pylib/twister/twisterlib/reports.py @@ -617,7 +617,7 @@ def summary(self, results, ignore_unrecognized_sections, duration): ) if results.skipped_cases or results.notrun_cases: logger.info( - f'{results.skipped_cases} selected test cases not executed:' \ + f'{results.skipped_cases + results.notrun_cases} selected test cases not executed:' \ f'{" " + str(results.skipped_cases) + " skipped" if results.skipped_cases else ""}' \ f'{(", " if results.skipped_cases else " ") + str(results.notrun_cases) + " not run (built only)" if results.notrun_cases else ""}' \ f'.' From 1768a8b7f072299291327c8e7a14dc1b60fbf1d1 Mon Sep 17 00:00:00 2001 From: Anas Nashif Date: Wed, 30 Oct 2024 08:46:12 -0400 Subject: [PATCH 05/14] twister: remove verbose debug message about adding platforms This is very verbose and very long sometimes getting the way when trying to debug a problem. Signed-off-by: Anas Nashif --- scripts/pylib/twister/twisterlib/testplan.py | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/pylib/twister/twisterlib/testplan.py b/scripts/pylib/twister/twisterlib/testplan.py index 4a9f4cb86e5f8..da8eaa0a74908 100755 --- a/scripts/pylib/twister/twisterlib/testplan.py +++ b/scripts/pylib/twister/twisterlib/testplan.py @@ -439,7 +439,6 @@ def init_and_add_platforms(data, board, target, qualifier, aliases): raise Exception(f"Duplicate platform identifier {platform.name} found") if not platform.twister: return - logger.debug(f"Adding platform {platform.name} with aliases {platform.aliases}") self.platforms.append(platform) for board in known_boards.values(): From 2b9f4cd70842f6d6c297b0cb275119df919a26c9 Mon Sep 17 00:00:00 2001 From: Anas Nashif Date: Fri, 8 Nov 2024 07:25:44 -0500 Subject: [PATCH 06/14] twister: status inconsistencies are now warnings Do not report status issues as errors, very confusing and developer end up looking at the wrong thing, instead, treat those as warnings and count them and report them at the end. Signed-off-by: Anas Nashif --- scripts/pylib/twister/twisterlib/reports.py | 2 +- scripts/pylib/twister/twisterlib/runner.py | 24 ++++++++++++++++++--- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/scripts/pylib/twister/twisterlib/reports.py b/scripts/pylib/twister/twisterlib/reports.py index 670d8d2e8ea8d..a0ea7defe79c7 100644 --- a/scripts/pylib/twister/twisterlib/reports.py +++ b/scripts/pylib/twister/twisterlib/reports.py @@ -596,7 +596,7 @@ def summary(self, results, ignore_unrecognized_sections, duration): f" {f'{TwisterStatus.get_color(TwisterStatus.NOTRUN)}{results.notrun}{Fore.RESET}' if results.notrun else f'{results.notrun}'} built (not run)," f" {f'{TwisterStatus.get_color(TwisterStatus.FAIL)}{results.failed}{Fore.RESET}' if results.failed else f'{results.failed}'} failed," f" {f'{TwisterStatus.get_color(TwisterStatus.ERROR)}{results.error}{Fore.RESET}' if results.error else f'{results.error}'} errored," - f" with {f'{Fore.YELLOW}{self.plan.warnings}{Fore.RESET}' if self.plan.warnings else 'no'} warnings" + f" with {f'{Fore.YELLOW}{self.plan.warnings + results.warnings}{Fore.RESET}' if (self.plan.warnings + results.warnings) else 'no'} warnings" f" in {duration:.2f} seconds." ) diff --git a/scripts/pylib/twister/twisterlib/runner.py b/scripts/pylib/twister/twisterlib/runner.py index e9529604b3ff9..8f8ecc7e9c2a8 100644 --- a/scripts/pylib/twister/twisterlib/runner.py +++ b/scripts/pylib/twister/twisterlib/runner.py @@ -129,6 +129,7 @@ def __init__(self, total=0): self._none_cases = Value('i', 0) self._started_cases = Value('i', 0) + self._warnings = Value('i', 0) self.lock = Lock() @@ -186,6 +187,20 @@ def summary(self): print(f" └─ {'Test cases only started: ':<25}{self.started_cases:>{executed_cases_n_length}}") print("--------------------------------------------------") + @property + def warnings(self): + with self._warnings.get_lock(): + return self._warnings.value + + @warnings.setter + def warnings(self, value): + with self._warnings.get_lock(): + self._warnings.value = value + + def warnings_increment(self, value=1): + with self._warnings.get_lock(): + self._warnings.value += value + @property def cases(self): with self._cases.get_lock(): @@ -1324,15 +1339,18 @@ def _add_instance_testcases_to_status_counts(instance, results, decrement=False) # but having those statuses in this part of processing is an error. case TwisterStatus.NONE: results.none_cases_increment(increment_value) - logger.error(f'A None status detected in instance {instance.name},' + logger.warning(f'A None status detected in instance {instance.name},' f' test case {tc.name}.') + results.warnings_increment(1) case TwisterStatus.STARTED: results.started_cases_increment(increment_value) - logger.error(f'A started status detected in instance {instance.name},' + logger.warning(f'A started status detected in instance {instance.name},' f' test case {tc.name}.') + results.warnings_increment(1) case _: - logger.error(f'An unknown status "{tc.status}" detected in instance {instance.name},' + logger.warning(f'An unknown status "{tc.status}" detected in instance {instance.name},' f' test case {tc.name}.') + results.warnings_increment(1) def report_out(self, results): From aa3853cb46de75c3812d454b3487cf2b59b7cd91 Mon Sep 17 00:00:00 2001 From: Anas Nashif Date: Fri, 8 Nov 2024 10:19:26 -0500 Subject: [PATCH 07/14] twister: stats: 'Executed test cases' -> 'Completed test cases' Built tests are not executed, change column title. Signed-off-by: Anas Nashif --- scripts/pylib/twister/twisterlib/runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/pylib/twister/twisterlib/runner.py b/scripts/pylib/twister/twisterlib/runner.py index 8f8ecc7e9c2a8..4eabdcd7efaff 100644 --- a/scripts/pylib/twister/twisterlib/runner.py +++ b/scripts/pylib/twister/twisterlib/runner.py @@ -173,7 +173,7 @@ def summary(self): print(f"{'Total test cases: ':<18}{self.cases}") print(f"├─ {'Filtered test cases: ':<21}{self.filtered_cases:>{total_cases_n_length}}") print(f"├─ {'Skipped test cases: ':<21}{self.skipped_cases:>{total_cases_n_length}}") - print(f"└─ {'Executed test cases: ':<21}{executed_cases:>{total_cases_n_length}}") + print(f"└─ {'Selected test cases: ':<21}{executed_cases:>{total_cases_n_length}}") print(f" ├─ {'Passed test cases: ':<25}{self.passed_cases:>{executed_cases_n_length}}") print(f" ├─ {'Built only test cases: ':<25}{self.notrun_cases:>{executed_cases_n_length}}") print(f" ├─ {'Blocked test cases: ':<25}{self.blocked_cases:>{executed_cases_n_length}}") From 4dcbef349ab26709d80d3323060909ea939d268a Mon Sep 17 00:00:00 2001 From: Anas Nashif Date: Sat, 9 Nov 2024 05:51:40 -0500 Subject: [PATCH 08/14] twisters: tests: adapt twister testsuite Adapt tests for new changes in twister. Signed-off-by: Anas Nashif --- scripts/tests/twister/test_runner.py | 10 +++++----- scripts/tests/twister_blackbox/test_platform.py | 6 +++--- scripts/tests/twister_blackbox/test_runner.py | 4 ++-- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/scripts/tests/twister/test_runner.py b/scripts/tests/twister/test_runner.py index b6cd151d78ad0..172c8dcc10058 100644 --- a/scripts/tests/twister/test_runner.py +++ b/scripts/tests/twister/test_runner.py @@ -224,7 +224,7 @@ def test_executioncounter(capfd): 'Total test cases: 25\n' '├─ Filtered test cases: 0\n' '├─ Skipped test cases: 6\n' - '└─ Executed test cases: 19\n' + '└─ Selected test cases: 19\n' ' ├─ Passed test cases: 0\n' ' ├─ Built only test cases: 0\n' ' ├─ Blocked test cases: 0\n' @@ -914,8 +914,8 @@ def mock_getsize(filename, *args, **kwargs): {'op': 'report', 'test': mock.ANY}, TwisterStatus.FILTER, 'runtime filter', - 1, - (TwisterStatus.SKIP,) + 0, + (TwisterStatus.FILTER,) ), ( {'op': 'filter'}, @@ -1025,7 +1025,7 @@ def mock_getsize(filename, *args, **kwargs): TwisterStatus.FILTER, 'runtime filter', 1, - (TwisterStatus.SKIP,) + (TwisterStatus.FILTER,) # this is a tuple ), ( {'op': 'cmake'}, @@ -2627,7 +2627,7 @@ def test_twisterrunner_show_brief(caplog): tr.show_brief() - log = '2 test scenarios (5 test instances) selected,' \ + log = '2 test scenarios (5 configurations) selected,' \ ' 4 configurations filtered (3 by static filter, 1 at runtime).' assert log in caplog.text diff --git a/scripts/tests/twister_blackbox/test_platform.py b/scripts/tests/twister_blackbox/test_platform.py index 75a6cda909309..43ff3346cf8af 100644 --- a/scripts/tests/twister_blackbox/test_platform.py +++ b/scripts/tests/twister_blackbox/test_platform.py @@ -37,7 +37,7 @@ class TestPlatform: 'failed_configurations': 0, 'errored_configurations': 0, 'executed_test_cases': 8, - 'skipped_test_cases': 5, + 'skipped_test_cases': 2, 'platform_count': 3, 'executed_on_platform': 4, 'only_built': 2 @@ -58,7 +58,7 @@ class TestPlatform: 'failed_configurations': 0, 'errored_configurations': 0, 'executed_test_cases': 0, - 'skipped_test_cases': 3, + 'skipped_test_cases': 0, 'platform_count': 3, 'executed_on_platform': 0, 'only_built': 0 @@ -264,7 +264,7 @@ def test_emulation_only(self, capfd, out_path, test_path, test_platforms, expect self.loader.exec_module(self.twister_module) select_regex = r'^INFO - (?P[0-9]+) test scenarios' \ - r' \((?P[0-9]+) test instances\) selected,' \ + r' \((?P[0-9]+) configurations\) selected,' \ r' (?P[0-9]+) configurations filtered' \ r' \((?P[0-9]+) by static filter,' \ r' (?P[0-9]+) at runtime\)\.$' diff --git a/scripts/tests/twister_blackbox/test_runner.py b/scripts/tests/twister_blackbox/test_runner.py index 0ace7fb05157b..1ab50522c5ebf 100644 --- a/scripts/tests/twister_blackbox/test_runner.py +++ b/scripts/tests/twister_blackbox/test_runner.py @@ -260,7 +260,7 @@ def test_runtest_only(self, capfd, out_path, test_path, test_platforms, expected select_regex = r'^INFO - (?P[0-9]+) test scenarios' \ - r' \((?P[0-9]+) test instances\) selected,' \ + r' \((?P[0-9]+) configurations\) selected,' \ r' (?P[0-9]+) configurations filtered' \ r' \((?P[0-9]+) by static filter,' \ r' (?P[0-9]+) at runtime\)\.$' @@ -627,7 +627,7 @@ def test_only_failed(self, capfd, out_path, test_path, test_platforms, expected) self.loader.exec_module(self.twister_module) select_regex = r'^INFO - (?P[0-9]+) test scenarios' \ - r' \((?P[0-9]+) test instances\) selected,' \ + r' \((?P[0-9]+) configurations\) selected,' \ r' (?P[0-9]+) configurations filtered' \ r' \((?P[0-9]+) by static filter,' \ r' (?P[0-9]+) at runtime\)\.$' From e8d4b4b2eddc23e53621ccd64293d7bf1c772341 Mon Sep 17 00:00:00 2001 From: Anas Nashif Date: Sat, 9 Nov 2024 18:46:50 +0000 Subject: [PATCH 09/14] twister: stats: remove double counting of statuses We have been double counting some statuses, remove to get the stats right. Signed-off-by: Anas Nashif --- scripts/pylib/twister/twisterlib/runner.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/scripts/pylib/twister/twisterlib/runner.py b/scripts/pylib/twister/twisterlib/runner.py index 4eabdcd7efaff..755d011d05aa6 100644 --- a/scripts/pylib/twister/twisterlib/runner.py +++ b/scripts/pylib/twister/twisterlib/runner.py @@ -892,7 +892,6 @@ def process(self, pipeline, done, message, lock, results): logger.debug("filtering %s" % self.instance.name) self.instance.status = TwisterStatus.FILTER self.instance.reason = "runtime filter" - results.filtered_cases_increment() self.instance.add_missing_case_status(TwisterStatus.FILTER) next_op = 'report' else: @@ -1387,15 +1386,8 @@ def report_out(self, results): results.skipped_configs_increment() elif instance.status == TwisterStatus.PASS: results.passed_increment() - for case in instance.testcases: - # test cases skipped at the test case level - if case.status == TwisterStatus.SKIP: - results.skipped_cases_increment() elif instance.status == TwisterStatus.NOTRUN: results.notrun_increment() - for case in instance.testcases: - if case.status == TwisterStatus.SKIP: - results.skipped_cases_increment() else: logger.debug(f"Unknown status = {instance.status}") status = Fore.YELLOW + "UNKNOWN" + Fore.RESET From 8434acb42a06d93f278b4632ad204c67fe658d82 Mon Sep 17 00:00:00 2001 From: Anas Nashif Date: Sun, 10 Nov 2024 11:30:23 +0000 Subject: [PATCH 10/14] twister: stats: skipped testcases are part of selected group Count skipped cases as part of selected group in stats. Signed-off-by: Anas Nashif --- scripts/pylib/twister/twisterlib/runner.py | 22 +++++++++++----------- scripts/tests/twister/test_runner.py | 4 ++-- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/scripts/pylib/twister/twisterlib/runner.py b/scripts/pylib/twister/twisterlib/runner.py index 755d011d05aa6..ce2d9b7560292 100644 --- a/scripts/pylib/twister/twisterlib/runner.py +++ b/scripts/pylib/twister/twisterlib/runner.py @@ -144,7 +144,7 @@ def _find_number_length(n): return length def summary(self): - executed_cases = self.cases - self.skipped_cases - self.filtered_cases + selected_cases = self.cases - self.filtered_cases completed_configs = self.done - self.skipped_filter # Find alignment length for aesthetic printing @@ -153,7 +153,7 @@ def summary(self): completed_suites_n_length = self._find_number_length(completed_configs) skipped_suites_n_length = self._find_number_length(self.skipped_configs) total_cases_n_length = self._find_number_length(self.cases) - executed_cases_n_length = self._find_number_length(executed_cases) + selected_cases_n_length = self._find_number_length(selected_cases) print("--------------------------------------------------") print(f"{'Total test suites: ':<23}{self.total:>{suites_n_length}}") # actually test instances @@ -172,19 +172,19 @@ def summary(self): print("---------------------- ----------------------") print(f"{'Total test cases: ':<18}{self.cases}") print(f"├─ {'Filtered test cases: ':<21}{self.filtered_cases:>{total_cases_n_length}}") - print(f"├─ {'Skipped test cases: ':<21}{self.skipped_cases:>{total_cases_n_length}}") - print(f"└─ {'Selected test cases: ':<21}{executed_cases:>{total_cases_n_length}}") - print(f" ├─ {'Passed test cases: ':<25}{self.passed_cases:>{executed_cases_n_length}}") - print(f" ├─ {'Built only test cases: ':<25}{self.notrun_cases:>{executed_cases_n_length}}") - print(f" ├─ {'Blocked test cases: ':<25}{self.blocked_cases:>{executed_cases_n_length}}") - print(f" ├─ {'Failed test cases: ':<25}{self.failed_cases:>{executed_cases_n_length}}") - print(f" {'├' if self.none_cases or self.started_cases else '└'}─ {'Errors in test cases: ':<25}{self.error_cases:>{executed_cases_n_length}}") + print(f"└─ {'Selected test cases: ':<21}{selected_cases:>{total_cases_n_length}}") + print(f" ├─ {'Passed test cases: ':<25}{self.passed_cases:>{selected_cases_n_length}}") + print(f" ├─ {'Skipped test cases: ':<25}{self.skipped_cases:>{total_cases_n_length}}") + print(f" ├─ {'Built only test cases: ':<25}{self.notrun_cases:>{selected_cases_n_length}}") + print(f" ├─ {'Blocked test cases: ':<25}{self.blocked_cases:>{selected_cases_n_length}}") + print(f" ├─ {'Failed test cases: ':<25}{self.failed_cases:>{selected_cases_n_length}}") + print(f" {'├' if self.none_cases or self.started_cases else '└'}─ {'Errors in test cases: ':<25}{self.error_cases:>{selected_cases_n_length}}") if self.none_cases or self.started_cases: print(f" ├──── The following test case statuses should not appear in a proper execution ───") if self.none_cases: - print(f" {'├' if self.started_cases else '└'}─ {'Statusless test cases: ':<25}{self.none_cases:>{executed_cases_n_length}}") + print(f" {'├' if self.started_cases else '└'}─ {'Statusless test cases: ':<25}{self.none_cases:>{selected_cases_n_length}}") if self.started_cases: - print(f" └─ {'Test cases only started: ':<25}{self.started_cases:>{executed_cases_n_length}}") + print(f" └─ {'Test cases only started: ':<25}{self.started_cases:>{selected_cases_n_length}}") print("--------------------------------------------------") @property diff --git a/scripts/tests/twister/test_runner.py b/scripts/tests/twister/test_runner.py index 172c8dcc10058..532fca5e37aaf 100644 --- a/scripts/tests/twister/test_runner.py +++ b/scripts/tests/twister/test_runner.py @@ -223,9 +223,9 @@ def test_executioncounter(capfd): '---------------------- ----------------------\n' 'Total test cases: 25\n' '├─ Filtered test cases: 0\n' - '├─ Skipped test cases: 6\n' - '└─ Selected test cases: 19\n' + '└─ Selected test cases: 25\n' ' ├─ Passed test cases: 0\n' + ' ├─ Skipped test cases: 6\n' ' ├─ Built only test cases: 0\n' ' ├─ Blocked test cases: 0\n' ' ├─ Failed test cases: 0\n' From 52c724d0ded4a3eaaa4b7995b20156995447e2c9 Mon Sep 17 00:00:00 2001 From: Anas Nashif Date: Mon, 11 Nov 2024 15:55:18 +0000 Subject: [PATCH 11/14] twister: stats: skipped_configs -> filtered_configs Rename variables leading to confusion between skipped and filtered suites. Signed-off-by: Anas Nashif --- scripts/pylib/twister/twisterlib/reports.py | 8 ++-- scripts/pylib/twister/twisterlib/runner.py | 50 ++++++++++----------- scripts/tests/twister/test_runner.py | 24 +++++----- 3 files changed, 41 insertions(+), 41 deletions(-) diff --git a/scripts/pylib/twister/twisterlib/reports.py b/scripts/pylib/twister/twisterlib/reports.py index a0ea7defe79c7..c0f06fc514288 100644 --- a/scripts/pylib/twister/twisterlib/reports.py +++ b/scripts/pylib/twister/twisterlib/reports.py @@ -584,14 +584,14 @@ def summary(self, results, ignore_unrecognized_sections, duration): if float(handler_time) > 0: run += 1 - if results.total and results.total != results.skipped_configs: - pass_rate = (float(results.passed) / float(results.total - results.skipped_configs)) + if results.total and results.total != results.filtered_configs: + pass_rate = (float(results.passed) / float(results.total - results.filtered_configs)) else: pass_rate = 0 logger.info( f"{TwisterStatus.get_color(TwisterStatus.FAIL) if failed else TwisterStatus.get_color(TwisterStatus.PASS)}{results.passed}" - f" of {results.total - results.skipped_configs}{Fore.RESET}" + f" of {results.total - results.filtered_configs}{Fore.RESET}" f" executed test configurations passed ({pass_rate:.2%})," f" {f'{TwisterStatus.get_color(TwisterStatus.NOTRUN)}{results.notrun}{Fore.RESET}' if results.notrun else f'{results.notrun}'} built (not run)," f" {f'{TwisterStatus.get_color(TwisterStatus.FAIL)}{results.failed}{Fore.RESET}' if results.failed else f'{results.failed}'} failed," @@ -623,7 +623,7 @@ def summary(self, results, ignore_unrecognized_sections, duration): f'.' ) - built_only = results.total - run - results.skipped_configs + built_only = results.total - run - results.filtered_configs logger.info(f"{Fore.GREEN}{run}{Fore.RESET} test configurations executed on platforms, \ {TwisterStatus.get_color(TwisterStatus.NOTRUN)}{built_only}{Fore.RESET} test configurations were only built.") diff --git a/scripts/pylib/twister/twisterlib/runner.py b/scripts/pylib/twister/twisterlib/runner.py index ce2d9b7560292..7b920c49dbb6a 100644 --- a/scripts/pylib/twister/twisterlib/runner.py +++ b/scripts/pylib/twister/twisterlib/runner.py @@ -67,11 +67,11 @@ def __init__(self, total=0): total = yaml test scenarios * applicable platforms done := instances that reached report_out stage of the pipeline - done = skipped_configs + passed + failed + error + done = filtered_configs + passed + failed + error completed = done - skipped_filter - skipped_configs = skipped_runtime + skipped_filter + filtered_configs = skipped_runtime + skipped_filter - pass rate = passed / (total - skipped_configs) + pass rate = passed / (total - filtered_configs) case pass rate = passed_cases / (cases - filtered_cases - skipped_cases) ''' # instances that go through the pipeline @@ -91,7 +91,7 @@ def __init__(self, total=0): # static filter + runtime filter + build skipped # updated by update_counting_before_pipeline() and report_out() - self._skipped_configs = Value('i', 0) + self._filtered_configs = Value('i', 0) # cmake filter + build skipped # updated by report_out() @@ -151,7 +151,7 @@ def summary(self): suites_n_length = self._find_number_length(self.total if self.total > self.done else self.done) processed_suites_n_length = self._find_number_length(self.done) completed_suites_n_length = self._find_number_length(completed_configs) - skipped_suites_n_length = self._find_number_length(self.skipped_configs) + skipped_suites_n_length = self._find_number_length(self.filtered_configs) total_cases_n_length = self._find_number_length(self.cases) selected_cases_n_length = self._find_number_length(selected_cases) @@ -159,14 +159,14 @@ def summary(self): print(f"{'Total test suites: ':<23}{self.total:>{suites_n_length}}") # actually test instances print(f"{'Processed test suites: ':<23}{self.done:>{suites_n_length}}") print(f"├─ {'Filtered test suites (static): ':<37}{self.skipped_filter:>{processed_suites_n_length}}") - print(f"└─ {'Completed test suites: ':<37}{completed_configs:>{processed_suites_n_length}}") - print(f" ├─ {'Filtered test suites (at runtime): ':<37}{self.skipped_runtime:>{completed_suites_n_length}}") + print(f"└─ {'Selected test suites: ':<37}{completed_configs:>{processed_suites_n_length}}") + print(f" ├─ {'Skipped test suites: ':<37}{self.skipped_runtime:>{completed_suites_n_length}}") print(f" ├─ {'Passed test suites: ':<37}{self.passed:>{completed_suites_n_length}}") print(f" ├─ {'Built only test suites: ':<37}{self.notrun:>{completed_suites_n_length}}") print(f" ├─ {'Failed test suites: ':<37}{self.failed:>{completed_suites_n_length}}") print(f" └─ {'Errors in test suites: ':<37}{self.error:>{completed_suites_n_length}}") print(f"") - print(f"{'Filtered test suites: ':<21}{self.skipped_configs}") + print(f"{'Filtered test suites: ':<21}{self.filtered_configs}") print(f"├─ {'Filtered test suites (static): ':<37}{self.skipped_filter:>{skipped_suites_n_length}}") print(f"└─ {'Filtered test suites (at runtime): ':<37}{self.skipped_runtime:>{skipped_suites_n_length}}") print("---------------------- ----------------------") @@ -412,18 +412,18 @@ def notrun_increment(self, value=1): self._notrun.value += value @property - def skipped_configs(self): - with self._skipped_configs.get_lock(): - return self._skipped_configs.value + def filtered_configs(self): + with self._filtered_configs.get_lock(): + return self._filtered_configs.value - @skipped_configs.setter - def skipped_configs(self, value): - with self._skipped_configs.get_lock(): - self._skipped_configs.value = value + @filtered_configs.setter + def filtered_configs(self, value): + with self._filtered_configs.get_lock(): + self._filtered_configs.value = value - def skipped_configs_increment(self, value=1): - with self._skipped_configs.get_lock(): - self._skipped_configs.value += value + def filtered_configs_increment(self, value=1): + with self._filtered_configs.get_lock(): + self._filtered_configs.value += value @property def skipped_filter(self): @@ -1381,9 +1381,9 @@ def report_out(self, results): if not self.options.verbose: self.log_info_file(self.options.inline_logs) elif instance.status == TwisterStatus.SKIP: - results.skipped_configs_increment() + results.filtered_configs_increment() elif instance.status == TwisterStatus.FILTER: - results.skipped_configs_increment() + results.filtered_configs_increment() elif instance.status == TwisterStatus.PASS: results.passed_increment() elif instance.status == TwisterStatus.NOTRUN: @@ -1439,8 +1439,8 @@ def report_out(self, results): TwisterStatus.get_color(TwisterStatus.NOTRUN), results.notrun, Fore.RESET, - TwisterStatus.get_color(TwisterStatus.SKIP) if results.skipped_configs > 0 else Fore.RESET, - results.skipped_configs, + TwisterStatus.get_color(TwisterStatus.SKIP) if results.filtered_configs > 0 else Fore.RESET, + results.filtered_configs, Fore.RESET, TwisterStatus.get_color(TwisterStatus.FAIL) if results.failed > 0 else Fore.RESET, results.failed, @@ -1699,7 +1699,7 @@ def update_counting_before_pipeline(self): for instance in self.instances.values(): if instance.status == TwisterStatus.FILTER and not instance.reason == 'runtime filter': self.results.skipped_filter_increment() - self.results.skipped_configs_increment() + self.results.filtered_configs_increment() self.results.filtered_cases_increment(len(instance.testsuite.testcases)) self.results.cases_increment(len(instance.testsuite.testcases)) elif instance.status == TwisterStatus.ERROR: @@ -1709,9 +1709,9 @@ def show_brief(self): logger.info("%d test scenarios (%d configurations) selected, " "%d configurations filtered (%d by static filter, %d at runtime)." % (len(self.suites), len(self.instances), - self.results.skipped_configs, + self.results.filtered_configs, self.results.skipped_filter, - self.results.skipped_configs - self.results.skipped_filter)) + self.results.filtered_configs - self.results.skipped_filter)) def add_tasks_to_queue(self, pipeline, build_only=False, test_only=False, retry_build_errors=False): for instance in self.instances.values(): diff --git a/scripts/tests/twister/test_runner.py b/scripts/tests/twister/test_runner.py index 532fca5e37aaf..e02f761c4956c 100644 --- a/scripts/tests/twister/test_runner.py +++ b/scripts/tests/twister/test_runner.py @@ -194,7 +194,7 @@ def test_executioncounter(capfd): ec.iteration = 2 ec.done = 9 ec.passed = 6 - ec.skipped_configs = 3 + ec.filtered_configs = 3 ec.skipped_runtime = 1 ec.skipped_filter = 2 ec.failed = 1 @@ -239,7 +239,7 @@ def test_executioncounter(capfd): assert ec.iteration == 2 assert ec.done == 9 assert ec.passed == 6 - assert ec.skipped_configs == 3 + assert ec.filtered_configs == 3 assert ec.skipped_runtime == 1 assert ec.skipped_filter == 2 assert ec.failed == 1 @@ -2043,7 +2043,7 @@ def test_projectbuilder_report_out( passed = 17, notrun = 0, failed = 2, - skipped_configs = 3, + filtered_configs = 3, skipped_runtime = 0, skipped_filter = 0, error = 1, @@ -2061,9 +2061,9 @@ def test_projectbuilder_report_out( def results_done_increment(value=1, decrement=False): results_mock.done += value * (-1 if decrement else 1) results_mock.done_increment = results_done_increment - def skipped_configs_increment(value=1, decrement=False): - results_mock.skipped_configs += value * (-1 if decrement else 1) - results_mock.skipped_configs_increment = skipped_configs_increment + def filtered_configs_increment(value=1, decrement=False): + results_mock.filtered_configs += value * (-1 if decrement else 1) + results_mock.filtered_configs_increment = filtered_configs_increment def skipped_filter_increment(value=1, decrement=False): results_mock.skipped_filter += value * (-1 if decrement else 1) results_mock.skipped_filter_increment = skipped_filter_increment @@ -2566,7 +2566,7 @@ def test_twisterrunner_update_counting_before_pipeline(): done = 0, passed = 0, failed = 0, - skipped_configs = 0, + filtered_configs = 0, skipped_runtime = 0, skipped_filter = 0, error = 0, @@ -2580,9 +2580,9 @@ def test_twisterrunner_update_counting_before_pipeline(): none_cases = 0, started_cases = 0 ) - def skipped_configs_increment(value=1, decrement=False): - tr.results.skipped_configs += value * (-1 if decrement else 1) - tr.results.skipped_configs_increment = skipped_configs_increment + def filtered_configs_increment(value=1, decrement=False): + tr.results.filtered_configs += value * (-1 if decrement else 1) + tr.results.filtered_configs_increment = filtered_configs_increment def skipped_filter_increment(value=1, decrement=False): tr.results.skipped_filter += value * (-1 if decrement else 1) tr.results.skipped_filter_increment = skipped_filter_increment @@ -2599,7 +2599,7 @@ def filtered_cases_increment(value=1, decrement=False): tr.update_counting_before_pipeline() assert tr.results.skipped_filter == 1 - assert tr.results.skipped_configs == 1 + assert tr.results.filtered_configs == 1 assert tr.results.filtered_cases == 4 assert tr.results.cases == 4 assert tr.results.error == 1 @@ -2619,7 +2619,7 @@ def test_twisterrunner_show_brief(caplog): tr = TwisterRunner(instances, suites, env=env_mock) tr.results = mock.Mock( skipped_filter = 3, - skipped_configs = 4, + filtered_configs = 4, skipped_cases = 0, cases = 0, error = 0 From 49be09c3a53d87119fe4f6a47d06b2605a269b8f Mon Sep 17 00:00:00 2001 From: Anas Nashif Date: Mon, 11 Nov 2024 15:59:55 +0000 Subject: [PATCH 12/14] twister: stats: more renames skipped -> filtered Additional variable renames leading to confusion between skipped and filtered. Signed-off-by: Anas Nashif --- scripts/pylib/twister/twisterlib/runner.py | 78 +++++++++++----------- scripts/tests/twister/test_runner.py | 42 ++++++------ 2 files changed, 60 insertions(+), 60 deletions(-) diff --git a/scripts/pylib/twister/twisterlib/runner.py b/scripts/pylib/twister/twisterlib/runner.py index 7b920c49dbb6a..4f12226b82774 100644 --- a/scripts/pylib/twister/twisterlib/runner.py +++ b/scripts/pylib/twister/twisterlib/runner.py @@ -68,8 +68,8 @@ def __init__(self, total=0): total = yaml test scenarios * applicable platforms done := instances that reached report_out stage of the pipeline done = filtered_configs + passed + failed + error - completed = done - skipped_filter - filtered_configs = skipped_runtime + skipped_filter + completed = done - filtered_static + filtered_configs = filtered_runtime + filtered_static pass rate = passed / (total - filtered_configs) case pass rate = passed_cases / (cases - filtered_cases - skipped_cases) @@ -95,11 +95,11 @@ def __init__(self, total=0): # cmake filter + build skipped # updated by report_out() - self._skipped_runtime = Value('i', 0) + self._filtered_runtime = Value('i', 0) # static filtered at yaml parsing time # updated by update_counting_before_pipeline() - self._skipped_filter = Value('i', 0) + self._filtered_static = Value('i', 0) # updated by report_out() in pipeline self._error = Value('i', 0) @@ -145,7 +145,7 @@ def _find_number_length(n): def summary(self): selected_cases = self.cases - self.filtered_cases - completed_configs = self.done - self.skipped_filter + completed_configs = self.done - self.filtered_static # Find alignment length for aesthetic printing suites_n_length = self._find_number_length(self.total if self.total > self.done else self.done) @@ -158,17 +158,17 @@ def summary(self): print("--------------------------------------------------") print(f"{'Total test suites: ':<23}{self.total:>{suites_n_length}}") # actually test instances print(f"{'Processed test suites: ':<23}{self.done:>{suites_n_length}}") - print(f"├─ {'Filtered test suites (static): ':<37}{self.skipped_filter:>{processed_suites_n_length}}") + print(f"├─ {'Filtered test suites (static): ':<37}{self.filtered_static:>{processed_suites_n_length}}") print(f"└─ {'Selected test suites: ':<37}{completed_configs:>{processed_suites_n_length}}") - print(f" ├─ {'Skipped test suites: ':<37}{self.skipped_runtime:>{completed_suites_n_length}}") + print(f" ├─ {'Skipped test suites: ':<37}{self.filtered_runtime:>{completed_suites_n_length}}") print(f" ├─ {'Passed test suites: ':<37}{self.passed:>{completed_suites_n_length}}") print(f" ├─ {'Built only test suites: ':<37}{self.notrun:>{completed_suites_n_length}}") print(f" ├─ {'Failed test suites: ':<37}{self.failed:>{completed_suites_n_length}}") print(f" └─ {'Errors in test suites: ':<37}{self.error:>{completed_suites_n_length}}") print(f"") print(f"{'Filtered test suites: ':<21}{self.filtered_configs}") - print(f"├─ {'Filtered test suites (static): ':<37}{self.skipped_filter:>{skipped_suites_n_length}}") - print(f"└─ {'Filtered test suites (at runtime): ':<37}{self.skipped_runtime:>{skipped_suites_n_length}}") + print(f"├─ {'Filtered test suites (static): ':<37}{self.filtered_static:>{skipped_suites_n_length}}") + print(f"└─ {'Filtered test suites (at runtime): ':<37}{self.filtered_runtime:>{skipped_suites_n_length}}") print("---------------------- ----------------------") print(f"{'Total test cases: ':<18}{self.cases}") print(f"├─ {'Filtered test cases: ':<21}{self.filtered_cases:>{total_cases_n_length}}") @@ -426,32 +426,32 @@ def filtered_configs_increment(self, value=1): self._filtered_configs.value += value @property - def skipped_filter(self): - with self._skipped_filter.get_lock(): - return self._skipped_filter.value + def filtered_static(self): + with self._filtered_static.get_lock(): + return self._filtered_static.value - @skipped_filter.setter - def skipped_filter(self, value): - with self._skipped_filter.get_lock(): - self._skipped_filter.value = value + @filtered_static.setter + def filtered_static(self, value): + with self._filtered_static.get_lock(): + self._filtered_static.value = value - def skipped_filter_increment(self, value=1): - with self._skipped_filter.get_lock(): - self._skipped_filter.value += value + def filtered_static_increment(self, value=1): + with self._filtered_static.get_lock(): + self._filtered_static.value += value @property - def skipped_runtime(self): - with self._skipped_runtime.get_lock(): - return self._skipped_runtime.value + def filtered_runtime(self): + with self._filtered_runtime.get_lock(): + return self._filtered_runtime.value - @skipped_runtime.setter - def skipped_runtime(self, value): - with self._skipped_runtime.get_lock(): - self._skipped_runtime.value = value + @filtered_runtime.setter + def filtered_runtime(self, value): + with self._filtered_runtime.get_lock(): + self._filtered_runtime.value = value - def skipped_runtime_increment(self, value=1): - with self._skipped_runtime.get_lock(): - self._skipped_runtime.value += value + def filtered_runtime_increment(self, value=1): + with self._filtered_runtime.get_lock(): + self._filtered_runtime.value += value @property def failed(self): @@ -924,7 +924,7 @@ def process(self, pipeline, done, message, lock, results): logger.debug("filtering %s" % self.instance.name) self.instance.status = TwisterStatus.FILTER self.instance.reason = "runtime filter" - results.skipped_runtime_increment() + results.filtered_runtime_increment() self.instance.add_missing_case_status(TwisterStatus.FILTER) next_op = 'report' else: @@ -951,7 +951,7 @@ def process(self, pipeline, done, message, lock, results): # Count skipped cases during build, for example # due to ram/rom overflow. if self.instance.status == TwisterStatus.SKIP: - results.skipped_runtime_increment() + results.filtered_runtime_increment() self.instance.add_missing_case_status(TwisterStatus.SKIP, self.instance.reason) if ret.get('returncode', 1) > 0: @@ -1353,7 +1353,7 @@ def _add_instance_testcases_to_status_counts(instance, results, decrement=False) def report_out(self, results): - total_to_do = results.total - results.skipped_filter + total_to_do = results.total - results.filtered_static total_tests_width = len(str(total_to_do)) results.done_increment() instance = self.instance @@ -1413,7 +1413,7 @@ def report_out(self, results): and self.instance.handler.seed is not None ): more_info += "/seed: " + str(self.options.seed) logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format( - results.done - results.skipped_filter, total_tests_width, total_to_do , instance.platform.name, + results.done - results.filtered_static, total_tests_width, total_to_do , instance.platform.name, instance.testsuite.name, status, more_info)) if self.options.verbose > 1: @@ -1428,11 +1428,11 @@ def report_out(self, results): else: completed_perc = 0 if total_to_do > 0: - completed_perc = int((float(results.done - results.skipped_filter) / total_to_do) * 100) + completed_perc = int((float(results.done - results.filtered_static) / total_to_do) * 100) sys.stdout.write("INFO - Total complete: %s%4d/%4d%s %2d%% built (not run): %s%4d%s, filtered: %s%4d%s, failed: %s%4d%s, error: %s%4d%s\r" % ( TwisterStatus.get_color(TwisterStatus.PASS), - results.done - results.skipped_filter, + results.done - results.filtered_static, total_to_do, Fore.RESET, completed_perc, @@ -1663,7 +1663,7 @@ def run(self): self.results.error = 0 self.results.done -= self.results.error else: - self.results.done = self.results.skipped_filter + self.results.done = self.results.filtered_static self.execute(pipeline, done_queue) @@ -1698,7 +1698,7 @@ def update_counting_before_pipeline(self): ''' for instance in self.instances.values(): if instance.status == TwisterStatus.FILTER and not instance.reason == 'runtime filter': - self.results.skipped_filter_increment() + self.results.filtered_static_increment() self.results.filtered_configs_increment() self.results.filtered_cases_increment(len(instance.testsuite.testcases)) self.results.cases_increment(len(instance.testsuite.testcases)) @@ -1710,8 +1710,8 @@ def show_brief(self): "%d configurations filtered (%d by static filter, %d at runtime)." % (len(self.suites), len(self.instances), self.results.filtered_configs, - self.results.skipped_filter, - self.results.filtered_configs - self.results.skipped_filter)) + self.results.filtered_static, + self.results.filtered_configs - self.results.filtered_static)) def add_tasks_to_queue(self, pipeline, build_only=False, test_only=False, retry_build_errors=False): for instance in self.instances.values(): diff --git a/scripts/tests/twister/test_runner.py b/scripts/tests/twister/test_runner.py index e02f761c4956c..d1716e9831874 100644 --- a/scripts/tests/twister/test_runner.py +++ b/scripts/tests/twister/test_runner.py @@ -195,8 +195,8 @@ def test_executioncounter(capfd): ec.done = 9 ec.passed = 6 ec.filtered_configs = 3 - ec.skipped_runtime = 1 - ec.skipped_filter = 2 + ec.filtered_runtime = 1 + ec.filtered_static = 2 ec.failed = 1 ec.summary() @@ -240,8 +240,8 @@ def test_executioncounter(capfd): assert ec.done == 9 assert ec.passed == 6 assert ec.filtered_configs == 3 - assert ec.skipped_runtime == 1 - assert ec.skipped_filter == 2 + assert ec.filtered_runtime == 1 + assert ec.filtered_static == 2 assert ec.failed == 1 @@ -1547,7 +1547,7 @@ def mock_determine_testcases(res): __exit__=mock.Mock(return_value=None) ) results_mock = mock.Mock() - results_mock.skipped_runtime = 0 + results_mock.filtered_runtime = 0 pb.process(pipeline_mock, done_mock, message, lock_mock, results_mock) @@ -1558,7 +1558,7 @@ def mock_determine_testcases(res): assert pb.instance.status == expected_status assert pb.instance.reason == expected_reason - assert results_mock.skipped_runtime_increment.call_args_list == [mock.call()] * expected_skipped + assert results_mock.filtered_runtime_increment.call_args_list == [mock.call()] * expected_skipped if expected_missing: pb.instance.add_missing_case_status.assert_called_with(*expected_missing) @@ -2044,8 +2044,8 @@ def test_projectbuilder_report_out( notrun = 0, failed = 2, filtered_configs = 3, - skipped_runtime = 0, - skipped_filter = 0, + filtered_runtime = 0, + filtered_static = 0, error = 1, cases = 0, filtered_cases = 0, @@ -2064,12 +2064,12 @@ def results_done_increment(value=1, decrement=False): def filtered_configs_increment(value=1, decrement=False): results_mock.filtered_configs += value * (-1 if decrement else 1) results_mock.filtered_configs_increment = filtered_configs_increment - def skipped_filter_increment(value=1, decrement=False): - results_mock.skipped_filter += value * (-1 if decrement else 1) - results_mock.skipped_filter_increment = skipped_filter_increment - def skipped_runtime_increment(value=1, decrement=False): - results_mock.skipped_runtime += value * (-1 if decrement else 1) - results_mock.skipped_runtime_increment = skipped_runtime_increment + def filtered_static_increment(value=1, decrement=False): + results_mock.filtered_static += value * (-1 if decrement else 1) + results_mock.filtered_static_increment = filtered_static_increment + def filtered_runtime_increment(value=1, decrement=False): + results_mock.filtered_runtime += value * (-1 if decrement else 1) + results_mock.filtered_runtime_increment = filtered_runtime_increment def failed_increment(value=1, decrement=False): results_mock.failed += value * (-1 if decrement else 1) results_mock.failed_increment = failed_increment @@ -2567,8 +2567,8 @@ def test_twisterrunner_update_counting_before_pipeline(): passed = 0, failed = 0, filtered_configs = 0, - skipped_runtime = 0, - skipped_filter = 0, + filtered_runtime = 0, + filtered_static = 0, error = 0, cases = 0, filtered_cases = 0, @@ -2583,9 +2583,9 @@ def test_twisterrunner_update_counting_before_pipeline(): def filtered_configs_increment(value=1, decrement=False): tr.results.filtered_configs += value * (-1 if decrement else 1) tr.results.filtered_configs_increment = filtered_configs_increment - def skipped_filter_increment(value=1, decrement=False): - tr.results.skipped_filter += value * (-1 if decrement else 1) - tr.results.skipped_filter_increment = skipped_filter_increment + def filtered_static_increment(value=1, decrement=False): + tr.results.filtered_static += value * (-1 if decrement else 1) + tr.results.filtered_static_increment = filtered_static_increment def error_increment(value=1, decrement=False): tr.results.error += value * (-1 if decrement else 1) tr.results.error_increment = error_increment @@ -2598,7 +2598,7 @@ def filtered_cases_increment(value=1, decrement=False): tr.update_counting_before_pipeline() - assert tr.results.skipped_filter == 1 + assert tr.results.filtered_static == 1 assert tr.results.filtered_configs == 1 assert tr.results.filtered_cases == 4 assert tr.results.cases == 4 @@ -2618,7 +2618,7 @@ def test_twisterrunner_show_brief(caplog): tr = TwisterRunner(instances, suites, env=env_mock) tr.results = mock.Mock( - skipped_filter = 3, + filtered_static = 3, filtered_configs = 4, skipped_cases = 0, cases = 0, From cc7e42e708e24d6002b5904e6ec172f660d9a742 Mon Sep 17 00:00:00 2001 From: Anas Nashif Date: Mon, 11 Nov 2024 17:33:05 +0000 Subject: [PATCH 13/14] twister: stats: fix suite statistics suite stats were not correct, a mixup between skipped and filtered suites was leading to inconsistent numbers. This is now fixed. Signed-off-by: Anas Nashif --- scripts/pylib/twister/twisterlib/runner.py | 37 +++++++++++++++------- scripts/tests/twister/test_runner.py | 16 ++++------ 2 files changed, 32 insertions(+), 21 deletions(-) diff --git a/scripts/pylib/twister/twisterlib/runner.py b/scripts/pylib/twister/twisterlib/runner.py index 4f12226b82774..80bec680c3b91 100644 --- a/scripts/pylib/twister/twisterlib/runner.py +++ b/scripts/pylib/twister/twisterlib/runner.py @@ -104,6 +104,7 @@ def __init__(self, total=0): # updated by report_out() in pipeline self._error = Value('i', 0) self._failed = Value('i', 0) + self._skipped = Value('i', 0) # initialized to number of test instances self._total = Value('i', total) @@ -145,30 +146,27 @@ def _find_number_length(n): def summary(self): selected_cases = self.cases - self.filtered_cases - completed_configs = self.done - self.filtered_static + completed_configs = self.done - self.filtered_static - self.filtered_runtime # Find alignment length for aesthetic printing suites_n_length = self._find_number_length(self.total if self.total > self.done else self.done) - processed_suites_n_length = self._find_number_length(self.done) completed_suites_n_length = self._find_number_length(completed_configs) - skipped_suites_n_length = self._find_number_length(self.filtered_configs) + filtered_suites_n_length = self._find_number_length(self.filtered_configs) total_cases_n_length = self._find_number_length(self.cases) selected_cases_n_length = self._find_number_length(selected_cases) print("--------------------------------------------------") print(f"{'Total test suites: ':<23}{self.total:>{suites_n_length}}") # actually test instances print(f"{'Processed test suites: ':<23}{self.done:>{suites_n_length}}") - print(f"├─ {'Filtered test suites (static): ':<37}{self.filtered_static:>{processed_suites_n_length}}") - print(f"└─ {'Selected test suites: ':<37}{completed_configs:>{processed_suites_n_length}}") - print(f" ├─ {'Skipped test suites: ':<37}{self.filtered_runtime:>{completed_suites_n_length}}") + print(f"└─{'Filtered test suites: ':<21}{self.filtered_configs}") + print(f" ├─ {'Filtered test suites (static): ':<37}{self.filtered_static:>{filtered_suites_n_length}}") + print(f" └─ {'Filtered test suites (at runtime): ':<37}{self.filtered_runtime:>{filtered_suites_n_length}}") + print(f"└─ {'Selected test suites: ':<37}{completed_configs:>{completed_suites_n_length}}") + print(f" ├─ {'Skipped test suites: ':<37}{self.skipped:>{completed_suites_n_length}}") print(f" ├─ {'Passed test suites: ':<37}{self.passed:>{completed_suites_n_length}}") print(f" ├─ {'Built only test suites: ':<37}{self.notrun:>{completed_suites_n_length}}") print(f" ├─ {'Failed test suites: ':<37}{self.failed:>{completed_suites_n_length}}") print(f" └─ {'Errors in test suites: ':<37}{self.error:>{completed_suites_n_length}}") - print(f"") - print(f"{'Filtered test suites: ':<21}{self.filtered_configs}") - print(f"├─ {'Filtered test suites (static): ':<37}{self.filtered_static:>{skipped_suites_n_length}}") - print(f"└─ {'Filtered test suites (at runtime): ':<37}{self.filtered_runtime:>{skipped_suites_n_length}}") print("---------------------- ----------------------") print(f"{'Total test cases: ':<18}{self.cases}") print(f"├─ {'Filtered test cases: ':<21}{self.filtered_cases:>{total_cases_n_length}}") @@ -341,6 +339,20 @@ def started_cases_increment(self, value=1): with self._started_cases.get_lock(): self._started_cases.value += value + @property + def skipped(self): + with self._skipped.get_lock(): + return self._skipped.value + + @skipped.setter + def skipped(self, value): + with self._skipped.get_lock(): + self._skipped.value = value + + def skipped_increment(self, value=1): + with self._skipped.get_lock(): + self._skipped.value += value + @property def error(self): with self._error.get_lock(): @@ -892,6 +904,7 @@ def process(self, pipeline, done, message, lock, results): logger.debug("filtering %s" % self.instance.name) self.instance.status = TwisterStatus.FILTER self.instance.reason = "runtime filter" + results.filtered_runtime_increment() self.instance.add_missing_case_status(TwisterStatus.FILTER) next_op = 'report' else: @@ -951,7 +964,7 @@ def process(self, pipeline, done, message, lock, results): # Count skipped cases during build, for example # due to ram/rom overflow. if self.instance.status == TwisterStatus.SKIP: - results.filtered_runtime_increment() + results.skipped_increment() self.instance.add_missing_case_status(TwisterStatus.SKIP, self.instance.reason) if ret.get('returncode', 1) > 0: @@ -1381,7 +1394,7 @@ def report_out(self, results): if not self.options.verbose: self.log_info_file(self.options.inline_logs) elif instance.status == TwisterStatus.SKIP: - results.filtered_configs_increment() + results.skipped_increment() elif instance.status == TwisterStatus.FILTER: results.filtered_configs_increment() elif instance.status == TwisterStatus.PASS: diff --git a/scripts/tests/twister/test_runner.py b/scripts/tests/twister/test_runner.py index d1716e9831874..c19ff08babc10 100644 --- a/scripts/tests/twister/test_runner.py +++ b/scripts/tests/twister/test_runner.py @@ -209,17 +209,15 @@ def test_executioncounter(capfd): '--------------------------------------------------\n' 'Total test suites: 12\n' 'Processed test suites: 9\n' - '├─ Filtered test suites (static): 2\n' - '└─ Completed test suites: 7\n' - ' ├─ Filtered test suites (at runtime): 1\n' + '└─Filtered test suites: 3\n' + ' ├─ Filtered test suites (static): 2\n' + ' └─ Filtered test suites (at runtime): 1\n' + '└─ Selected test suites: 6\n' + ' ├─ Skipped test suites: 0\n' ' ├─ Passed test suites: 6\n' ' ├─ Built only test suites: 0\n' ' ├─ Failed test suites: 1\n' ' └─ Errors in test suites: 2\n' - '\n' - 'Filtered test suites: 3\n' - '├─ Filtered test suites (static): 2\n' - '└─ Filtered test suites (at runtime): 1\n' '---------------------- ----------------------\n' 'Total test cases: 25\n' '├─ Filtered test cases: 0\n' @@ -914,7 +912,7 @@ def mock_getsize(filename, *args, **kwargs): {'op': 'report', 'test': mock.ANY}, TwisterStatus.FILTER, 'runtime filter', - 0, + 1, (TwisterStatus.FILTER,) ), ( @@ -1091,7 +1089,7 @@ def mock_getsize(filename, *args, **kwargs): {'op': 'gather_metrics', 'test': mock.ANY}, mock.ANY, mock.ANY, - 1, + 0, (TwisterStatus.SKIP, mock.ANY) ), ( From 6c3a3fa7813d8d44e1230d5e3d95b575207a8ccd Mon Sep 17 00:00:00 2001 From: Anas Nashif Date: Tue, 12 Nov 2024 06:12:22 -0500 Subject: [PATCH 14/14] twister: stats: use anytree to create summary Do not create the tree structure manually, use anytree instead. Signed-off-by: Anas Nashif --- scripts/pylib/twister/twisterlib/runner.py | 69 +++++++++++----------- scripts/tests/twister/test_runner.py | 43 +++++++------- 2 files changed, 54 insertions(+), 58 deletions(-) diff --git a/scripts/pylib/twister/twisterlib/runner.py b/scripts/pylib/twister/twisterlib/runner.py index 80bec680c3b91..128bc598ed9c4 100644 --- a/scripts/pylib/twister/twisterlib/runner.py +++ b/scripts/pylib/twister/twisterlib/runner.py @@ -57,6 +57,7 @@ logger = logging.getLogger('twister') logger.setLevel(logging.DEBUG) import expr_parser +from anytree import Node, RenderTree class ExecutionCounter(object): @@ -146,44 +147,42 @@ def _find_number_length(n): def summary(self): selected_cases = self.cases - self.filtered_cases - completed_configs = self.done - self.filtered_static - self.filtered_runtime - - # Find alignment length for aesthetic printing - suites_n_length = self._find_number_length(self.total if self.total > self.done else self.done) - completed_suites_n_length = self._find_number_length(completed_configs) - filtered_suites_n_length = self._find_number_length(self.filtered_configs) - total_cases_n_length = self._find_number_length(self.cases) - selected_cases_n_length = self._find_number_length(selected_cases) - - print("--------------------------------------------------") - print(f"{'Total test suites: ':<23}{self.total:>{suites_n_length}}") # actually test instances - print(f"{'Processed test suites: ':<23}{self.done:>{suites_n_length}}") - print(f"└─{'Filtered test suites: ':<21}{self.filtered_configs}") - print(f" ├─ {'Filtered test suites (static): ':<37}{self.filtered_static:>{filtered_suites_n_length}}") - print(f" └─ {'Filtered test suites (at runtime): ':<37}{self.filtered_runtime:>{filtered_suites_n_length}}") - print(f"└─ {'Selected test suites: ':<37}{completed_configs:>{completed_suites_n_length}}") - print(f" ├─ {'Skipped test suites: ':<37}{self.skipped:>{completed_suites_n_length}}") - print(f" ├─ {'Passed test suites: ':<37}{self.passed:>{completed_suites_n_length}}") - print(f" ├─ {'Built only test suites: ':<37}{self.notrun:>{completed_suites_n_length}}") - print(f" ├─ {'Failed test suites: ':<37}{self.failed:>{completed_suites_n_length}}") - print(f" └─ {'Errors in test suites: ':<37}{self.error:>{completed_suites_n_length}}") - print("---------------------- ----------------------") - print(f"{'Total test cases: ':<18}{self.cases}") - print(f"├─ {'Filtered test cases: ':<21}{self.filtered_cases:>{total_cases_n_length}}") - print(f"└─ {'Selected test cases: ':<21}{selected_cases:>{total_cases_n_length}}") - print(f" ├─ {'Passed test cases: ':<25}{self.passed_cases:>{selected_cases_n_length}}") - print(f" ├─ {'Skipped test cases: ':<25}{self.skipped_cases:>{total_cases_n_length}}") - print(f" ├─ {'Built only test cases: ':<25}{self.notrun_cases:>{selected_cases_n_length}}") - print(f" ├─ {'Blocked test cases: ':<25}{self.blocked_cases:>{selected_cases_n_length}}") - print(f" ├─ {'Failed test cases: ':<25}{self.failed_cases:>{selected_cases_n_length}}") - print(f" {'├' if self.none_cases or self.started_cases else '└'}─ {'Errors in test cases: ':<25}{self.error_cases:>{selected_cases_n_length}}") + selected_configs = self.done - self.filtered_static - self.filtered_runtime + + + root = Node("Summary") + + Node(f"Total test suites: {self.total}", parent=root) + processed_suites = Node(f"Processed test suites: {self.done}", parent=root) + filtered_suites = Node(f"Filtered test suites: {self.filtered_configs}", parent=processed_suites) + Node(f"Filtered test suites (static): {self.filtered_static}", parent=filtered_suites) + Node(f"Filtered test suites (at runtime): {self.filtered_runtime}", parent=filtered_suites) + selected_suites = Node(f"Selected test suites: {selected_configs}", parent=processed_suites) + Node(f"Skipped test suites: {self.skipped}", parent=selected_suites) + Node(f"Passed test suites: {self.passed}", parent=selected_suites) + Node(f"Built only test suites: {self.notrun}", parent=selected_suites) + Node(f"Failed test suites: {self.failed}", parent=selected_suites) + Node(f"Errors in test suites: {self.error}", parent=selected_suites) + + total_cases = Node(f"Total test cases: {self.cases}", parent=root) + Node(f"Filtered test cases: {self.filtered_cases}", parent=total_cases) + selected_cases_node = Node(f"Selected test cases: {selected_cases}", parent=total_cases) + Node(f"Passed test cases: {self.passed_cases}", parent=selected_cases_node) + Node(f"Skipped test cases: {self.skipped_cases}", parent=selected_cases_node) + Node(f"Built only test cases: {self.notrun_cases}", parent=selected_cases_node) + Node(f"Blocked test cases: {self.blocked_cases}", parent=selected_cases_node) + Node(f"Failed test cases: {self.failed_cases}", parent=selected_cases_node) + error_cases_node = Node(f"Errors in test cases: {self.error_cases}", parent=selected_cases_node) + if self.none_cases or self.started_cases: - print(f" ├──── The following test case statuses should not appear in a proper execution ───") + Node("The following test case statuses should not appear in a proper execution", parent=error_cases_node) if self.none_cases: - print(f" {'├' if self.started_cases else '└'}─ {'Statusless test cases: ':<25}{self.none_cases:>{selected_cases_n_length}}") + Node(f"Statusless test cases: {self.none_cases}", parent=error_cases_node) if self.started_cases: - print(f" └─ {'Test cases only started: ':<25}{self.started_cases:>{selected_cases_n_length}}") - print("--------------------------------------------------") + Node(f"Test cases only started: {self.started_cases}", parent=error_cases_node) + + for pre, _, node in RenderTree(root): + print("%s%s" % (pre, node.name)) @property def warnings(self): diff --git a/scripts/tests/twister/test_runner.py b/scripts/tests/twister/test_runner.py index c19ff08babc10..3ab7de2fb935f 100644 --- a/scripts/tests/twister/test_runner.py +++ b/scripts/tests/twister/test_runner.py @@ -206,29 +206,26 @@ def test_executioncounter(capfd): sys.stderr.write(err) assert ( - '--------------------------------------------------\n' - 'Total test suites: 12\n' - 'Processed test suites: 9\n' - '└─Filtered test suites: 3\n' - ' ├─ Filtered test suites (static): 2\n' - ' └─ Filtered test suites (at runtime): 1\n' - '└─ Selected test suites: 6\n' - ' ├─ Skipped test suites: 0\n' - ' ├─ Passed test suites: 6\n' - ' ├─ Built only test suites: 0\n' - ' ├─ Failed test suites: 1\n' - ' └─ Errors in test suites: 2\n' - '---------------------- ----------------------\n' - 'Total test cases: 25\n' - '├─ Filtered test cases: 0\n' - '└─ Selected test cases: 25\n' - ' ├─ Passed test cases: 0\n' - ' ├─ Skipped test cases: 6\n' - ' ├─ Built only test cases: 0\n' - ' ├─ Blocked test cases: 0\n' - ' ├─ Failed test cases: 0\n' - ' └─ Errors in test cases: 0\n' - '--------------------------------------------------\n' +"├── Total test suites: 12\n" +"├── Processed test suites: 9\n" +"│ ├── Filtered test suites: 3\n" +"│ │ ├── Filtered test suites (static): 2\n" +"│ │ └── Filtered test suites (at runtime): 1\n" +"│ └── Selected test suites: 6\n" +"│ ├── Skipped test suites: 0\n" +"│ ├── Passed test suites: 6\n" +"│ ├── Built only test suites: 0\n" +"│ ├── Failed test suites: 1\n" +"│ └── Errors in test suites: 2\n" +"└── Total test cases: 25\n" +" ├── Filtered test cases: 0\n" +" └── Selected test cases: 25\n" +" ├── Passed test cases: 0\n" +" ├── Skipped test cases: 6\n" +" ├── Built only test cases: 0\n" +" ├── Blocked test cases: 0\n" +" ├── Failed test cases: 0\n" +" └── Errors in test cases: 0\n" ) in out assert ec.cases == 25