diff --git a/.github/workflows/db.yaml b/.github/workflows/db.yaml new file mode 100644 index 00000000000..ca4a1d6e81a --- /dev/null +++ b/.github/workflows/db.yaml @@ -0,0 +1,117 @@ +name: Create Test and Source DB + +on: + push: + branches: + - main + - v*-branch + - collab-* + pull_request: + branches: + - main + - v*-branch + - collab-* + schedule: + # Run at 17:00 UTC on every Saturday + - cron: '0 17 * * 6' + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.head_ref || github.ref }} + cancel-in-progress: true + +jobs: + test-source-db: + runs-on: ubuntu-24.04 + strategy: + fail-fast: false + matrix: + subset: [1,2,3,4,5,6,7,8,9,10 ,11,12,13,14,15,16,17,18,19,20] + timeout-minutes: 1440 + env: + TWISTER_COMMON: ' --test-config tests/test_config_ci.yaml --no-detailed-test-id --force-color --inline-logs -v -N -M --cmake-only -j 8' + COMMIT_RANGE: ${{ github.event.pull_request.base.sha }}..${{ github.event.pull_request.head.sha }} + BASE_REF: ${{ github.base_ref }} + LLVM_TOOLCHAIN_PATH: /usr/lib/llvm-16 + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + ref: ${{ github.event.pull_request.head.sha }} + path: zephyr + fetch-depth: 0 + persist-credentials: false + + - name: Set Up Python 3.12 + uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0 + with: + python-version: 3.12 + cache: pip + cache-dependency-path: scripts/requirements-actions.txt + + - name: install-packages + working-directory: zephyr + run: | + pip install -r scripts/requirements-actions.txt --require-hashes + sudo apt-get update -y + sudo apt-get install -y lcov gperf + + - name: Setup Zephyr project + uses: zephyrproject-rtos/action-zephyr-setup@f7b70269a8eb01f70c8e710891e4c94972a2f6b4 # v1.0.6 + with: + app-path: zephyr + toolchains: all + + - name: Run Tests with Twister + working-directory: zephyr + id: run_twister + run: | + export ZEPHYR_BASE=${PWD} + export ZEPHYR_TOOLCHAIN_VARIANT=zephyr + ./scripts/twister --subset ${{matrix.subset}}/${{ strategy.job-total }} ${TWISTER_COMMON} ${PUSH_OPTIONS} + python ./scripts/ci/db/gen_test_database.py --directory twister-out/ --output db_${{matrix.subset}}.json + + - name: Upload Database + if: always() + uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1 + with: + name: Test and Source DB (Subset ${{ matrix.subset }}) + if-no-files-found: ignore + path: | + zephyr/db_${{matrix.subset}}.json + + test-db-results: + name: "Publish Merged DB" + needs: + - test-source-db + runs-on: ubuntu-22.04 + permissions: + checks: write # to create the check run entry with Twister test results + # the build-and-test job might be skipped, we don't need to run this job then + if: success() || failure() + + steps: + - name: Install packages + run: | + sudo apt-get update -y + sudo apt install -y jq + + - name: Download Artifacts + uses: actions/download-artifact@cc203385981b70ca67e1cc392babf9cc229d5806 # v4.1.9 + with: + path: artifacts + + - name: Merge DB files + run: | + jq -s '.' artifacts/*/*.json > db.json + + - name: Upload Merged DB + if: always() + uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1 + with: + name: Merged DB + if-no-files-found: ignore + path: | + db.json diff --git a/.github/workflows/footprint-tracking.yml b/.github/workflows/footprint-tracking.yml index 72c9f8eeb19..725af191b08 100644 --- a/.github/workflows/footprint-tracking.yml +++ b/.github/workflows/footprint-tracking.yml @@ -25,12 +25,8 @@ concurrency: jobs: footprint-tracking: - runs-on: - group: zephyr-runner-v2-linux-x64-4xlarge + runs-on: ubuntu-24.04 if: github.repository_owner == 'zephyrproject-rtos' - container: - image: ghcr.io/zephyrproject-rtos/ci-repo-cache:v0.27.4.20241026 - options: '--entrypoint /bin/bash' defaults: run: shell: bash @@ -39,32 +35,10 @@ jobs: env: ZEPHYR_TOOLCHAIN_VARIANT: zephyr steps: - - name: Apply container owner mismatch workaround - run: | - # FIXME: The owner UID of the GITHUB_WORKSPACE directory may not - # match the container user UID because of the way GitHub - # Actions runner is implemented. Remove this workaround when - # GitHub comes up with a fundamental fix for this problem. - git config --global --add safe.directory ${GITHUB_WORKSPACE} - - - name: Print cloud service information - run: | - echo "ZEPHYR_RUNNER_CLOUD_PROVIDER = ${ZEPHYR_RUNNER_CLOUD_PROVIDER}" - echo "ZEPHYR_RUNNER_CLOUD_NODE = ${ZEPHYR_RUNNER_CLOUD_NODE}" - echo "ZEPHYR_RUNNER_CLOUD_POD = ${ZEPHYR_RUNNER_CLOUD_POD}" - - - name: Update PATH for west - run: | - echo "$HOME/.local/bin" >> $GITHUB_PATH - - - name: Install packages - run: | - sudo apt-get update - sudo apt-get install -y python3-venv - - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: + path: zephyr ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 0 @@ -76,19 +50,10 @@ jobs: cache-dependency-path: scripts/requirements-actions.txt - name: Install Python packages + working-directory: zephyr run: | pip install -r scripts/requirements-actions.txt --require-hashes - - name: Environment Setup - run: | - echo "ZEPHYR_SDK_INSTALL_DIR=/opt/toolchains/zephyr-sdk-$( cat SDK_VERSION )" >> $GITHUB_ENV - - - name: west setup - run: | - west init -l . || true - west config --global update.narrow true - west update 2>&1 1> west.update.log || west update 2>&1 1> west.update2.log - - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722 # v4.1.0 with: @@ -96,7 +61,19 @@ jobs: aws-secret-access-key: ${{ secrets.AWS_TESTING_SECRET_ACCESS_KEY }} aws-region: us-east-1 + - name: Setup Zephyr project + uses: zephyrproject-rtos/action-zephyr-setup@f7b70269a8eb01f70c8e710891e4c94972a2f6b4 # v1.0.6 + with: + app-path: zephyr + toolchains: all + + - name: Install additional packages + run: | + sudo apt-get update -y + sudo apt-get install -y gperf + - name: Record Footprint + working-directory: zephyr env: BASE_REF: ${{ github.base_ref }} run: | @@ -104,12 +81,12 @@ jobs: ./scripts/footprint/track.py -p scripts/footprint/plan.txt - name: Upload footprint data + working-directory: zephyr run: | - python3 -m venv .venv - . .venv/bin/activate aws s3 sync --quiet footprint_data/ s3://testing.zephyrproject.org/footprint_data/ - name: Transform Footprint data to Twister JSON reports + working-directory: zephyr run: | shopt -s globstar export ZEPHYR_BASE=${PWD} @@ -119,6 +96,7 @@ jobs: ./footprint_data/**/ - name: Upload to ElasticSearch + working-directory: zephyr env: ELASTICSEARCH_KEY: ${{ secrets.ELASTICSEARCH_KEY }} ELASTICSEARCH_SERVER: "https://elasticsearch.zephyrproject.io:443" diff --git a/.github/workflows/manifest.yml b/.github/workflows/manifest.yml index d88ddec1950..65357fdf10f 100644 --- a/.github/workflows/manifest.yml +++ b/.github/workflows/manifest.yml @@ -16,7 +16,7 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: path: zephyrproject/zephyr - ref: ${{ github.event.pull_request.head.sha }} + #ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 0 persist-credentials: false diff --git a/.github/workflows/twister-prep.yaml b/.github/workflows/twister-prep.yaml index 5799d776c43..c630ed7e063 100644 --- a/.github/workflows/twister-prep.yaml +++ b/.github/workflows/twister-prep.yaml @@ -20,7 +20,7 @@ jobs: prep_pr: if: github.repository_owner == 'zephyrproject-rtos' && github.event_name == 'pull_request' runs-on: - group: zephyr-runner-v2-linux-x64-4xlarge + group: test-runner-v2-linux-x64-4xlarge container: image: ghcr.io/zephyrproject-rtos/ci-repo-cache:v0.27.4.20241026 options: '--entrypoint /bin/bash' diff --git a/.github/workflows/twister.yaml b/.github/workflows/twister.yaml index b8db6779400..3cbfcc93ffd 100644 --- a/.github/workflows/twister.yaml +++ b/.github/workflows/twister.yaml @@ -28,7 +28,7 @@ jobs: twister-build: runs-on: - group: zephyr-runner-v2-linux-x64-4xlarge + group: test-runner-v2-linux-x64-4xlarge needs: twister-build-prep if: needs.twister-build-prep.outputs.size != 0 container: @@ -168,13 +168,7 @@ jobs: run: | export ZEPHYR_BASE=${PWD} export ZEPHYR_TOOLCHAIN_VARIANT=zephyr - ./scripts/twister --subset ${{matrix.subset}}/${{ strategy.job-total }} ${TWISTER_COMMON} ${WEEKLY_OPTIONS} - if [ "${{matrix.subset}}" = "1" ]; then - ./scripts/zephyr_module.py --twister-out module_tests.args - if [ -s module_tests.args ]; then - ./scripts/twister +module_tests.args --outdir module_tests ${TWISTER_COMMON} ${WEEKLY_OPTIONS} - fi - fi + ./scripts/twister --cmake-only --subset ${{matrix.subset}}/${{ strategy.job-total }} ${TWISTER_COMMON} ${WEEKLY_OPTIONS} - name: Print ccache stats if: always() diff --git a/scripts/ci/db/find_test_combo.py b/scripts/ci/db/find_test_combo.py new file mode 100644 index 00000000000..ab1a7db20d2 --- /dev/null +++ b/scripts/ci/db/find_test_combo.py @@ -0,0 +1,87 @@ +#!/usr/bin/env python3 + +import json +import os +import re +import sys +from pathlib import Path +from collections import defaultdict +import argparse + +if "ZEPHYR_BASE" not in os.environ: + exit("$ZEPHYR_BASE environment variable undefined.") + +# These are globally used variables. They are assigned in __main__ and are visible in further methods +# however, pylint complains that it doesn't recognize them when used (used-before-assignment). +zephyr_base = Path(os.environ['ZEPHYR_BASE']) + +sys.path.insert(0, os.path.join(zephyr_base / "scripts")) +from get_maintainer import Maintainers + +def load_database(database_path): + with open(database_path, 'r') as file: + return json.load(file) + +def find_areas(files): + maintf = zephyr_base / "MAINTAINERS.yml" + maintainer_file = Maintainers(maintf) + + num_files = 0 + all_areas = set() + + for changed_file in files: + num_files += 1 + print(f"file: {changed_file}") + areas = maintainer_file.path2areas(changed_file) + + if not areas: + continue + all_areas.update(areas) + tests = [] + for area in all_areas: + for suite in area.tests: + tests.append(f"{suite}.*") + return tests + +def find_best_coverage(database, changed_files, tests): + coverage = defaultdict(lambda: defaultdict(int)) + + for file in changed_files: + if file in database: + for entry in database[file]: + if not any(re.search(f"^{test}", entry["testsuite_id"]) for test in tests): + print("skip") + continue + testsuite_id = entry["testsuite_id"] + platform = entry["platform"] + coverage[testsuite_id][platform] += 1 + + best_coverage = [] + for testsuite_id, platforms in coverage.items(): + for platform, count in platforms.items(): + best_coverage.append((testsuite_id, platform, count)) + + best_coverage.sort(key=lambda x: x[2], reverse=True) + return best_coverage + +def main(database_path, changed_files): + tests = find_areas(changed_files) + database = load_database(database_path) + best_coverage = find_best_coverage(database, changed_files, tests) + + if best_coverage: + print("Best coverage testsuites and platforms:") + for testsuite_id, platform, count in best_coverage: + print(f"Testsuite: {testsuite_id}, Platform: {platform}, Coverage: {count}") + else: + print("No matching testsuites found for the provided files.") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Find the best coverage testsuites and platforms for changed files.") + parser.add_argument("--database", help="Path to the testsuite database JSON file") + parser.add_argument("--changed-files", action="append", help="List of changed files") + + args = parser.parse_args() + + main(args.database, args.changed_files) diff --git a/scripts/ci/db/gen_test_database.py b/scripts/ci/db/gen_test_database.py new file mode 100644 index 00000000000..a2894ad01e9 --- /dev/null +++ b/scripts/ci/db/gen_test_database.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 +import os +import json +import argparse + +def find_compile_commands_files(directory): + compile_commands_files = [] + for root, _, files in os.walk(directory): + for file in files: + if file.startswith("compile_commands_") and file.endswith(".json"): + compile_commands_files.append(os.path.join(root, file)) + return compile_commands_files + +def generate_testsuite_database(directory, output_file): + compile_commands_files = find_compile_commands_files(directory) + database = {} + + for file_path in compile_commands_files: + with open(file_path, 'r') as file: + data = json.load(file) + testsuite_id = data.get("testsuite_id") + platform = data.get("platform") + files = data.get("files", []) + + for file in files: + if file not in database: + database[file] = [] + database[file].append({"testsuite_id": testsuite_id, "platform": platform}) + + with open(output_file, 'w') as output: + json.dump(database, output, indent=4) + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Generate a testsuite database from compile_commands files.") + parser.add_argument("--directory", required=True, help="Directory to search for compile_commands files") + parser.add_argument("--output", required=True, help="Output file for the generated database") + args = parser.parse_args() + + generate_testsuite_database(args.directory, args.output) + print(f"Database generated and saved to {args.output}") diff --git a/scripts/ci/test_plan_v2.py b/scripts/ci/test_plan_v2.py new file mode 100755 index 00000000000..a69e40da6e4 --- /dev/null +++ b/scripts/ci/test_plan_v2.py @@ -0,0 +1,556 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2021 Intel Corporation + +# A script to generate twister options based on modified files. + +import re, os +import argparse +import yaml +import fnmatch +import subprocess +import json +import logging +import sys +import glob +from pathlib import Path +from git import Repo +from west.manifest import Manifest + +try: + # Use the C LibYAML parser if available, rather than the Python parser. + from yaml import CSafeLoader as SafeLoader +except ImportError: + from yaml import SafeLoader # type: ignore + +try: + from yaml import CSafeLoader as SafeLoader +except ImportError: + from yaml import SafeLoader + +if "ZEPHYR_BASE" not in os.environ: + exit("$ZEPHYR_BASE environment variable undefined.") + +# These are globaly used variables. They are assigned in __main__ and are visible in further methods +# however, pylint complains that it doesn't recognized them when used (used-before-assignment). +zephyr_base = Path(os.environ['ZEPHYR_BASE']) + +sys.path.insert(0, os.path.join(zephyr_base / "scripts")) +from get_maintainer import Maintainers +from pylib.twister.twisterlib.statuses import TwisterStatus + +logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO) +logging.getLogger("pykwalify.core").setLevel(50) + +sys.path.append(os.path.join(zephyr_base, 'scripts')) +import list_boards + + +def _get_match_fn(globs, regexes): + # Constructs a single regex that tests for matches against the globs in + # 'globs' and the regexes in 'regexes'. Parts are joined with '|' (OR). + # Returns the search() method of the compiled regex. + # + # Returns None if there are neither globs nor regexes, which should be + # interpreted as no match. + + if not (globs or regexes): + return None + + regex = "" + + if globs: + glob_regexes = [] + for glob in globs: + # Construct a regex equivalent to the glob + glob_regex = glob.replace(".", "\\.").replace("*", "[^/]*") \ + .replace("?", "[^/]") + + if not glob.endswith("/"): + # Require a full match for globs that don't end in / + glob_regex += "$" + + glob_regexes.append(glob_regex) + + # The glob regexes must anchor to the beginning of the path, since we + # return search(). (?:) is a non-capturing group. + regex += "^(?:{})".format("|".join(glob_regexes)) + + if regexes: + if regex: + regex += "|" + regex += "|".join(regexes) + + return re.compile(regex).search + +class Filters: + def __init__(self, repository_path, commits, ignore_path, testsuite_root, + platforms=[], detailed_test_id=True, quarantine_list=None, tc_roots_th=20): + + self.modified_files = [] + self.resolved_files = [] + + self.testsuite_root = testsuite_root + self.twister_options = [] + self.full_twister = False + self.all_tests = [] + self.tag_options = [] + self.platforms = platforms + self.detailed_test_id = detailed_test_id + self.ignore_path = ignore_path + self.quarantine_list = quarantine_list + self.tc_roots_th = tc_roots_th + self.commits = commits + self.repository_path = repository_path + self.git_repo = None + + def init(self): + commits = None + if self.commits: + commits = self.commits + self.git_repo = Repo(self.repository_path) + commit = self.git_repo.git.diff("--name-only", commits) + self.modified_files = commit.split("\n") + + if self.modified_files: + logging.info("Changed files:") + logging.info("+++++++++++++++++++++++++++") + for file in self.modified_files: + logging.info(file) + logging.info("+++++++++++++++++++++++++++") + + def process(self): + self.find_excludes() + if 'west.yml' in self.modified_files and args.commits is not None: + self.find_modules() + + self.find_tests() + if not self.platforms: + self.find_archs() + self.find_boards() + self.find_areas() + + self.post_filter() + + def finalize(self, output_file, ntests_per_builder): + # remove duplicates and filtered test cases + dup_free = [] + dup_free_set = set() + errors = 0 + + unfiltered_suites = list(filter(lambda t: t.get('status', None) is None, self.all_tests)) + logging.info(f'Total tests gathered: {len(unfiltered_suites)}') + for ts in unfiltered_suites: + n = ts.get("name") + a = ts.get("arch") + p = ts.get("platform") + if TwisterStatus(ts.get('status')) == TwisterStatus.ERROR: + logging.info(f"Error found: {n} on {p} ({ts.get('reason')})") + errors += 1 + if (n, a, p,) not in dup_free_set: + dup_free.append(ts) + dup_free_set.add((n, a, p,)) + + logging.info(f'Total tests to be run (after removing duplicates): {len(dup_free)}') + with open(".testplan", "w") as tp: + total_tests = len(dup_free) + if total_tests and total_tests < ntests_per_builder: + nodes = 1 + else: + nodes = round(total_tests / ntests_per_builder) + + tp.write(f"TWISTER_TESTS={total_tests}\n") + tp.write(f"TWISTER_NODES={nodes}\n") + tp.write(f"TWISTER_FULL={self.full_twister}\n") + logging.info(f'Total nodes to launch: {nodes}') + + # write plan + if dup_free: + data = {} + data['testsuites'] = dup_free + with open(output_file, 'w', newline='') as json_file: + json.dump(data, json_file, indent=4, separators=(',',':')) + + return errors + + def get_plan(self, options, integration=False, use_testsuite_root=True): + fname = "_test_plan_partial.json" + cmd = [f"{zephyr_base}/scripts/twister", "-c"] + options + ["--save-tests", fname ] + if not self.detailed_test_id: + cmd += ["--no-detailed-test-id"] + if self.testsuite_root and use_testsuite_root: + for root in self.testsuite_root: + cmd+=["-T", root] + if integration: + cmd.append("--integration") + if self.quarantine_list: + for q in self.quarantine_list: + cmd += ["--quarantine-list", q] + + logging.info(" ".join(cmd)) + _ = subprocess.call(cmd) + with open(fname, newline='') as jsonfile: + json_data = json.load(jsonfile) + suites = json_data.get("testsuites", []) + print(suites) + unfiltered_suites = list(filter(lambda t: TwisterStatus(t.get('status', None)) is TwisterStatus.NONE, suites)) + logging.info(f"Added {len(unfiltered_suites)} suites to plan.") + self.all_tests.extend(suites) + + # finally, remove generated test plan + if os.path.exists(fname): + os.remove(fname) + + def find_modules(self): + logging.info(f"------------------- modules --------------") + logging.info("Manifest file 'west.yml' changed") + old_manifest_content = self.git_repo.git.show(f"{self.commits[:-2]}:west.yml") + with open("west_old.yml", "w") as manifest: + manifest.write(old_manifest_content) + old_manifest = Manifest.from_file("west_old.yml") + new_manifest = Manifest.from_file("west.yml") + old_projs = set((p.name, p.revision) for p in old_manifest.projects) + new_projs = set((p.name, p.revision) for p in new_manifest.projects) + logging.debug(f'old_projs: {old_projs}') + logging.debug(f'new_projs: {new_projs}') + # Removed projects + rprojs = set(filter(lambda p: p[0] not in list(p[0] for p in new_projs), + old_projs - new_projs)) + # Updated projects + uprojs = set(filter(lambda p: p[0] in list(p[0] for p in old_projs), + new_projs - old_projs)) + # Added projects + aprojs = new_projs - old_projs - uprojs + + # All projs + projs = rprojs | uprojs | aprojs + projs_names = [name for name, rev in projs] + + logging.debug(f'rprojs: {rprojs}') + logging.debug(f'uprojs: {uprojs}') + logging.debug(f'aprojs: {aprojs}') + logging.debug(f'project: {projs_names}') + + _options = [] + if self.platforms: + for platform in self.platforms: + _options.extend(["-p", platform]) + + for prj in projs_names: + _options.extend(["-t", prj ]) + + self.get_plan(_options, integration=True) + self.resolved_files.append('west.yml') + + def find_archs(self): + logging.info(f"------------------- arches --------------") + # we match both arch//* and include/zephyr/arch/ and skip common. + archs = set() + + remaining = self.get_remaining_files() + for f in remaining: + _match = re.match(r"^arch\/([^/]+)\/", f) + if not _match: + _match = re.match(r"^include\/zephyr\/arch\/([^/]+)\/", f) + if _match: + if _match.group(1) != 'common': + archs.add(_match.group(1)) + # Modified file is treated as resolved, since a matching scope was found + self.resolved_files.append(f) + else: + _global_change = True + + _options = [] + for arch in archs: + _options.extend(["-a", arch ]) + + if _options: + logging.info(f'Potential architecture filters...') + if self.platforms: + for platform in self.platforms: + _options.extend(["-p", platform]) + + self.get_plan(_options, True) + else: + self.get_plan(_options, True) + + def find_boards(self): + logging.info(f"------------------- boards --------------") + changed_boards = set() + matched_boards = {} + resolved_files = [] + + remaining = self.get_remaining_files() + for file in remaining: + if file.endswith(".rst") or file.endswith(".png") or file.endswith(".jpg"): + continue + if file.startswith("boards/"): + changed_boards.add(file) + resolved_files.append(file) + + roots = [zephyr_base] + if self.repository_path != zephyr_base: + roots.append(self.repository_path) + + # Look for boards in monitored repositories + lb_args = argparse.Namespace(**{'arch_roots': roots, 'board_roots': roots, 'board': None, 'soc_roots':roots, + 'board_dir': None}) + known_boards = list_boards.find_v2_boards(lb_args) + + for changed in changed_boards: + for board in known_boards: + c = (zephyr_base / changed).resolve() + if c.is_relative_to(board.dir.resolve()): + for file in glob.glob(os.path.join(board.dir, f"{board.name}*.yaml")): + with open(file, 'r', encoding='utf-8') as f: + b = yaml.load(f.read(), Loader=SafeLoader) + matched_boards[b['identifier']] = board + + if matched_boards: + logging.info(f"found boards: {','.join(matched_boards.keys())}") + # If modified file is caught by "find_boards" workflow (change in "boards" dir AND board recognized) + # it means a proper testing scope for this file was found and this file can be removed + # from further consideration + for _, board in matched_boards.items(): + self.resolved_files.extend(list(filter(lambda f: str(board.dir.relative_to(zephyr_base)) in f, resolved_files))) + + _options = [] + if len(matched_boards) > 20: + logging.warning(f"{len(matched_boards)} boards changed, this looks like a global change, " + "skipping test handling, revert to default.") + logging.info("trigger full twister") + self.full_twister = True + return + + for board in matched_boards: + _options.extend(["-p", board ]) + + if _options: + logging.info(f'Potential board filters...') + self.get_plan(_options) + + def find_tests(self): + logging.info(f"------------------- tests --------------") + tests = set() + remaining = self.get_remaining_files() + for f in remaining: + if f.endswith(".rst"): + continue + d = os.path.dirname(f) + scope_found = False + while not scope_found and d: + head, tail = os.path.split(d) + if os.path.exists(os.path.join(d, "testcase.yaml")) or \ + os.path.exists(os.path.join(d, "sample.yaml")): + tests.add(d) + # Modified file is treated as resolved, since a matching scope was found + self.resolved_files.append(f) + scope_found = True + elif tail == "common": + # Look for yamls in directories collocated with common + + yamls_found = [yaml for yaml in glob.iglob(head + '/**/testcase.yaml', recursive=True)] + yamls_found.extend([yaml for yaml in glob.iglob(head + '/**/sample.yaml', recursive=True)]) + if yamls_found: + for yaml in yamls_found: + tests.add(os.path.dirname(yaml)) + self.resolved_files.append(f) + scope_found = True + else: + d = os.path.dirname(d) + else: + d = os.path.dirname(d) + + _options = [] + for t in tests: + _options.extend(["-T", t ]) + + if len(tests) > self.tc_roots_th: + logging.warning(f"{len(tests)} tests changed, this looks like a global change, " + "skipping test handling, revert to default") + logging.info("trigger full twister") + self.full_twister = True + return + + if _options: + logging.info(f'Potential test filters...({len(tests)} changed...)') + if self.platforms: + for platform in self.platforms: + _options.extend(["-p", platform]) + self.get_plan(_options, integration=True, use_testsuite_root=False) + + def get_remaining_files(self): + remaining = set(self.modified_files).difference(set(self.resolved_files)) + logging.debug(f"Remaining files: {remaining}") + return remaining + + def find_areas(self): + logging.info(f"------------------- areas --------------") + maintf = zephyr_base / "MAINTAINERS.yml" + maintainer_file = Maintainers(maintf) + + num_files = 0 + all_areas = set() + remaining = self.get_remaining_files() + for changed_file in remaining: + num_files += 1 + logging.info(f"file: {changed_file}") + areas = maintainer_file.path2areas(changed_file) + + if not areas: + continue + self.resolved_files.append(changed_file) + all_areas.update(areas) + + config_path = "tests/test_config.yaml" + with open(config_path, encoding="utf-8") as f: + contents = f.read() + + try: + raw = yaml.load(contents, Loader=SafeLoader) + except yaml.YAMLError as e: + logging.error(f"error parsing configuration file: {e}") + + levels = raw.get('levels', []) + l = {} + l['name'] = 'custom' + l['description'] = 'custom' + adds = [] + for area in all_areas: + logging.info(f"area {area.name} changed..") + for suite in area.tests: + adds.append(f"{suite}.*") + + l['adds'] = adds + levels.append(l) + with open('custom_config.yaml', 'w', encoding="utf-8") as fp: + fp.write(yaml.dump(raw)) + + self.get_plan(["--test-config", "custom_config.yaml", "--level", "custom"], integration=True) + + def find_excludes(self): + logging.info(f"------------------- excludes --------------") + with open(self.ignore_path, "r") as twister_ignore: + ignores = twister_ignore.read().splitlines() + ignores = filter(lambda x: not x.startswith("#"), ignores) + + found = set() + + for pattern in ignores: + if pattern: + found.update(fnmatch.filter(self.modified_files, pattern)) + + self.resolved_files.extend(found) + + + logging.debug(f"files to be ignored: {found}") + files_not_resolved = list(filter(lambda x: x not in found, self.modified_files)) + logging.debug(f"not resolved files: {files_not_resolved}") + + def post_filter(self): + logging.info(f"------------------- post filters --------------") + _options = [] + if self.full_twister: + logging.info(f'Need to run full twister...') + if self.platforms: + for platform in self.platforms: + _options.extend(["-p", platform]) + + _options.extend(self.tag_options) + self.get_plan(_options) + else: + _options.extend(self.tag_options) + self.get_plan(_options, True) + elif self.tag_options: + for platform in self.platforms: + _options.extend(["-p", platform]) + + _options.extend(self.tag_options) + self.get_plan(_options, True) + else: + logging.info(f'No twister needed or partial twister run only...') + +def parse_args(): + parser = argparse.ArgumentParser( + description="Generate twister argument files based on modified file", + allow_abbrev=False) + parser.add_argument('-c', '--commits', default=None, + help="Commit range in the form: a..b") + + parser.add_argument('-m', '--modified-files', default=None, + help="File with information about changed/deleted/added files.") + + parser.add_argument('-o', '--output-file', default="testplan.json", + help="JSON file with the test plan to be passed to twister") + + parser.add_argument('-p', '--platform', action="append", + help="Limit this for a platform or a list of platforms.") + + parser.add_argument('-t', '--tests_per_builder', default=700, type=int, + help="Number of tests per builder") + + parser.add_argument('-n', '--default-matrix', default=10, type=int, + help="Number of tests per builder") + + parser.add_argument('--testcase-roots-threshold', default=20, type=int, + help="Threshold value for number of modified testcase roots, up to which an optimized scope is still applied." + "When exceeded, full scope will be triggered") + + parser.add_argument('--detailed-test-id', action='store_true', + help="Include paths to tests' locations in tests' names.") + + parser.add_argument("--no-detailed-test-id", dest='detailed_test_id', action="store_false", + help="Don't put paths into tests' names.") + + parser.add_argument('-r', '--repo-to-scan', default=zephyr_base, type=Path, + help="Repo to scan") + + parser.add_argument('--ignores-file', type=Path, + default=os.path.join(zephyr_base, 'scripts', 'ci', 'twister_ignore.txt'), + help="Path to a text file with patterns of files to be matched against changed files") + + parser.add_argument( + "-T", "--testsuite-root", action="append", default=[], + help="Base directory to recursively search for test cases. All " + "testcase.yaml files under here will be processed. May be " + "called multiple times. Defaults to the 'samples/' and " + "'tests/' directories at the base of the Zephyr tree.") + + parser.add_argument( + "--quarantine-list", action="append", metavar="FILENAME", + help="Load list of test scenarios under quarantine. The entries in " + "the file need to correspond to the test scenarios names as in " + "corresponding tests .yaml files. These scenarios " + "will be skipped with quarantine as the reason.") + + # Include paths in names by default. + parser.set_defaults(detailed_test_id=True) + + return parser.parse_args() + + +def _main(): + args = parse_args() + if args.repo_to_scan: + repository_path = Path(args.repo_to_scan) + else: + repository_path = zephyr_base + + suite_filter = Filters( + repository_path, + args.commits, + args.ignores_file, + args.testsuite_root, + args.platform or [], + args.detailed_test_id, + args.quarantine_list, + args.testcase_roots_threshold) + + suite_filter.init() + suite_filter.process() + errors = suite_filter.finalize(args.output_file, args.tests_per_builder) + + sys.exit(errors) + +if __name__ == "__main__": + _main() diff --git a/scripts/pylib/twister/twisterlib/runner.py b/scripts/pylib/twister/twisterlib/runner.py index 801e68c8c95..39239843e5d 100644 --- a/scripts/pylib/twister/twisterlib/runner.py +++ b/scripts/pylib/twister/twisterlib/runner.py @@ -15,6 +15,8 @@ import subprocess import sys import time +import json +import hashlib import traceback from math import log10 from multiprocessing import Lock, Process, Value @@ -1013,6 +1015,33 @@ def process(self, pipeline, done, message, lock, results): elif op == "cmake": try: ret = self.cmake() + compile_commands_path = os.path.join(self.instance.build_dir, + 'compile_commands.json') + if os.path.exists(compile_commands_path): + with open(compile_commands_path, 'r') as f: + compile_commands = json.load(f) + + # Extract only the file names from the compile commands + file_names = [entry['file'] for entry in compile_commands] + + # Create a unique filename using a SHA hash based on the testsuite identifier and platform + unique_id = f"{self.instance.testsuite.id}_{self.instance.platform.name}" + sha_hash = hashlib.sha256(unique_id.encode()).hexdigest() + instance_file = os.path.join(self.options.outdir, f'compile_commands_{sha_hash}.json') + + # Add testsuite id and platform to the data + file_names_relative = list(set([ + os.path.relpath(file, start=ZEPHYR_BASE) for file in file_names + if not file.startswith(self.options.outdir) + ])) + data = { + "testsuite_id": self.instance.testsuite.id, + "platform": self.instance.platform.name, + "files": file_names_relative + } + + with open(instance_file, 'w') as f: + json.dump(data, f, indent=4) if self.instance.status in [TwisterStatus.FAIL, TwisterStatus.ERROR]: next_op = 'report' elif self.options.cmake_only: