diff --git a/.github/actions/veristat_baseline_compare/action.yml b/.github/actions/veristat_baseline_compare/action.yml new file mode 100644 index 0000000000000..f6dd81d19e4b3 --- /dev/null +++ b/.github/actions/veristat_baseline_compare/action.yml @@ -0,0 +1,49 @@ +name: 'run-veristat' +description: 'Run veristat benchmark' +inputs: + veristat_output: + description: 'Veristat output filepath' + required: true + baseline_name: + description: 'Veristat baseline cache name' + required: true +runs: + using: "composite" + steps: + - uses: actions/upload-artifact@v4 + with: + name: ${{ inputs.baseline_name }} + if-no-files-found: error + path: ${{ github.workspace }}/${{ inputs.veristat_output }} + + # For pull request: + # - get baseline log from cache + # - compare it to current run + - if: ${{ github.event_name == 'pull_request' }} + uses: actions/cache/restore@v4 + with: + key: ${{ github.base_ref }}-${{ inputs.baseline_name }}- + restore-keys: | + ${{ github.base_ref }}-${{ inputs.baseline_name }} + path: '${{ github.workspace }}/${{ inputs.baseline_name }}' + + - if: ${{ github.event_name == 'pull_request' }} + name: Show veristat comparison + shell: bash + run: ./.github/scripts/compare-veristat-results.sh + env: + BASELINE_PATH: ${{ github.workspace }}/${{ inputs.baseline_name }} + VERISTAT_OUTPUT: ${{ inputs.veristat_output }} + + # For push: just put baseline log to cache + - if: ${{ github.event_name == 'push' }} + shell: bash + run: | + mv "${{ github.workspace }}/${{ inputs.veristat_output }}" \ + "${{ github.workspace }}/${{ inputs.baseline_name }}" + + - if: ${{ github.event_name == 'push' }} + uses: actions/cache/save@v4 + with: + key: ${{ github.ref_name }}-${{ inputs.baseline_name }}-${{ github.run_id }} + path: '${{ github.workspace }}/${{ inputs.baseline_name }}' diff --git a/.github/scripts/collect-scx-bpf-progs.sh b/.github/scripts/collect-scx-bpf-progs.sh new file mode 100755 index 0000000000000..60a86c84e6387 --- /dev/null +++ b/.github/scripts/collect-scx-bpf-progs.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +set -euo pipefail + +PROGS_DIR=$1 + +mkdir -p "${PROGS_DIR}" + +find "${SCX_BUILD_OUTPUT}" -type f -name "*.bpf.o" -printf '%P\0' | \ +while IFS= read -r -d '' prog; do + obj_name=$(echo "${prog}" | tr / _) + cp -v "${SCX_BUILD_OUTPUT}/$prog" "${PROGS_DIR}/${obj_name}" +done diff --git a/.github/scripts/compare-veristat-results.sh b/.github/scripts/compare-veristat-results.sh new file mode 100755 index 0000000000000..5bc761a9f8792 --- /dev/null +++ b/.github/scripts/compare-veristat-results.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +if [[ ! -f "${BASELINE_PATH}" ]]; then + echo "# No ${BASELINE_PATH} available" >> "${GITHUB_STEP_SUMMARY}" + + echo "No ${BASELINE_PATH} available" + echo "Printing veristat results" + cat "${VERISTAT_OUTPUT}" + + exit 0 +fi + +veristat=$(realpath selftests/bpf/veristat) +cmp_out=$(mktemp veristate_compare_out_XXXXXX.csv) + +$veristat \ + --output-format csv \ + --emit file,prog,verdict,states \ + --compare "${BASELINE_PATH}" "${VERISTAT_OUTPUT}" > $cmp_out + +python3 ./.github/scripts/veristat_compare.py $cmp_out +exit_code=$? + +echo +# if comparison failed, print verifier log for failure mismatches +if [[ -n "$VERISTAT_DUMP_LOG_ON_FAILURE" && $exit_code -ne 0 ]]; then + cat $cmp_out | tail -n +1 | \ + while read -r line; do + verdict=$(echo $line | cut -d',' -f4) + verdict_diff=$(echo $line | cut -d',' -f5) + if [[ "$verdict" == "failure" && "$verdict_diff" == "MISMATCH" ]]; then + file=$(echo $line | cut -d',' -f1) + prog=$(echo $line | cut -d',' -f2) + echo "VERIFIER LOG FOR $file/$prog:" + echo "==================================================================" + $veristat -v $VERISTAT_OBJECTS_DIR/$file -f $prog 2>&1 + echo "==================================================================" + fi + done +fi + +exit $exit_code diff --git a/.github/scripts/download-gcc-bpf.sh b/.github/scripts/download-gcc-bpf.sh new file mode 100755 index 0000000000000..894584a01b2ec --- /dev/null +++ b/.github/scripts/download-gcc-bpf.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +set -euo pipefail + +GCC_BPF_RELEASE_GH_REPO=$1 +INSTALL_DIR=$(realpath $2) + +cd /tmp + +tag=$(gh release list -L 1 -R ${GCC_BPF_RELEASE_GH_REPO} --json tagName -q .[].tagName) +if [[ -z "$tag" ]]; then + echo "Could not find latest GCC BPF release at ${GCC_BPF_RELEASE_GH_REPO}" + exit 1 +fi + +url="https://github.com/${GCC_BPF_RELEASE_GH_REPO}/releases/download/${tag}/${tag}.tar.zst" +echo "Downloading $url" +wget -q "$url" + +tarball=${tag}.tar.zst +dir=$(tar tf $tarball | head -1 || true) + +echo "Extracting $tarball ..." +tar -I zstd -xf $tarball && rm -f $tarball + +rm -rf $INSTALL_DIR +mv -v $dir $INSTALL_DIR + +cd - + diff --git a/.github/scripts/matrix.py b/.github/scripts/matrix.py new file mode 100644 index 0000000000000..2c9e8aff3feee --- /dev/null +++ b/.github/scripts/matrix.py @@ -0,0 +1,275 @@ +#!/usr/bin/env python3 + +import dataclasses +import json +import os + +from enum import Enum +from typing import Any, Dict, Final, List, Optional, Set, Union + +import requests +import requests.utils + +MANAGED_OWNER: Final[str] = "kernel-patches" +MANAGED_REPOS: Final[Set[str]] = { + f"{MANAGED_OWNER}/bpf", + f"{MANAGED_OWNER}/vmtest", +} + +DEFAULT_SELF_HOSTED_RUNNER_TAGS: Final[List[str]] = ["self-hosted", "docker-noble-main"] +DEFAULT_GITHUB_HOSTED_RUNNER: Final[str] = "ubuntu-24.04" +DEFAULT_GCC_VERSION: Final[int] = 15 +DEFAULT_LLVM_VERSION: Final[int] = 21 + +RUNNERS_BUSY_THRESHOLD: Final[float] = 0.8 + + +class Arch(str, Enum): + """ + CPU architecture supported by CI. + """ + + AARCH64 = "aarch64" + S390X = "s390x" + X86_64 = "x86_64" + + +class Compiler(str, Enum): + GCC = "gcc" + LLVM = "llvm" + + +def query_runners_from_github() -> List[Dict[str, Any]]: + if "GITHUB_TOKEN" not in os.environ: + return [] + token = os.environ["GITHUB_TOKEN"] + headers = { + "Authorization": f"token {token}", + "Accept": "application/vnd.github.v3+json", + } + owner = os.environ["GITHUB_REPOSITORY_OWNER"] + url: Optional[str] = f"https://api.github.com/orgs/{owner}/actions/runners" + # GitHub returns 30 runners per page, fetch all + all_runners = [] + try: + while url is not None: + response = requests.get(url, headers=headers) + if response.status_code != 200: + print(f"Failed to query runners: {response.status_code}") + print(f"response: {response.text}") + return [] + data = response.json() + all_runners.extend(data.get("runners", [])) + # Check for next page URL in Link header + url = None + if "Link" in response.headers: + links = requests.utils.parse_header_links(response.headers["Link"]) + for link in links: + if link["rel"] == "next": + url = link["url"] + break + return all_runners + except Exception as e: + print(f"Warning: Failed to query runner status due to exception: {e}") + return [] + + +all_runners_cached: Optional[List[Dict[str, Any]]] = None + + +def all_runners() -> List[Dict[str, Any]]: + global all_runners_cached + if all_runners_cached is None: + print("Querying runners from GitHub...") + all_runners_cached = query_runners_from_github() + print(f"Github returned {len(all_runners_cached)} runners") + counts = count_by_status(all_runners_cached) + print( + f"Busy: {counts['busy']}, Idle: {counts['idle']}, Offline: {counts['offline']}" + ) + return all_runners_cached + + +def runner_labels(runner: Dict[str, Any]) -> List[str]: + return [label["name"] for label in runner["labels"]] + + +def is_self_hosted_runner(runner: Dict[str, Any]) -> bool: + labels = runner_labels(runner) + for label in DEFAULT_SELF_HOSTED_RUNNER_TAGS: + if label not in labels: + return False + return True + + +def self_hosted_runners() -> List[Dict[str, Any]]: + runners = all_runners() + return [r for r in runners if is_self_hosted_runner(r)] + + +def runners_by_arch(arch: Arch) -> List[Dict[str, Any]]: + runners = self_hosted_runners() + return [r for r in runners if arch.value in runner_labels(r)] + + +def count_by_status(runners: List[Dict[str, Any]]) -> Dict[str, int]: + result = {"busy": 0, "idle": 0, "offline": 0} + for runner in runners: + if runner["status"] == "online": + if runner["busy"]: + result["busy"] += 1 + else: + result["idle"] += 1 + else: + result["offline"] += 1 + return result + + +@dataclasses.dataclass +class BuildConfig: + arch: Arch + kernel_compiler: Compiler = Compiler.GCC + gcc_version: int = DEFAULT_GCC_VERSION + llvm_version: int = DEFAULT_LLVM_VERSION + kernel: str = "LATEST" + run_veristat: bool = False + parallel_tests: bool = False + build_release: bool = False + + @property + def runs_on(self) -> List[str]: + if is_managed_repo(): + return DEFAULT_SELF_HOSTED_RUNNER_TAGS + [self.arch.value] + else: + return [DEFAULT_GITHUB_HOSTED_RUNNER] + + @property + def build_runs_on(self) -> List[str]: + if not is_managed_repo(): + return [DEFAULT_GITHUB_HOSTED_RUNNER] + + # @Temporary: disable codebuild runners for cross-compilation jobs + match self.arch: + case Arch.S390X: + return DEFAULT_SELF_HOSTED_RUNNER_TAGS + [Arch.X86_64.value] + case Arch.AARCH64: + return DEFAULT_SELF_HOSTED_RUNNER_TAGS + [Arch.X86_64.value] + + # For managed repos, check the busyness of relevant self-hosted runners + # If they are too busy, use codebuild + runner_arch = self.arch + runners = runners_by_arch(runner_arch) + counts = count_by_status(runners) + online = counts["idle"] + counts["busy"] + busy = counts["busy"] + # if online <= 0, then something is wrong, don't use codebuild + if online > 0 and busy / online > RUNNERS_BUSY_THRESHOLD: + return ["codebuild"] + else: + return DEFAULT_SELF_HOSTED_RUNNER_TAGS + [runner_arch.value] + + @property + def tests(self) -> Dict[str, Any]: + tests_list = [ + "test_progs", + "test_progs_parallel", + "test_progs_no_alu32", + "test_progs_no_alu32_parallel", + "test_verifier", + ] + + if self.arch.value != "s390x": + tests_list.append("test_maps") + + if self.llvm_version >= 18: + tests_list.append("test_progs_cpuv4") + + if self.arch in [Arch.X86_64, Arch.AARCH64]: + tests_list.append("sched_ext") + + # Don't run GCC BPF runner, because too many tests are failing + # See: https://lore.kernel.org/bpf/87bjw6qpje.fsf@oracle.com/ + # if self.arch == Arch.X86_64: + # tests_list.append("test_progs-bpf_gcc") + + if not self.parallel_tests: + tests_list = [test for test in tests_list if not test.endswith("parallel")] + + return {"include": [generate_test_config(test) for test in tests_list]} + + def to_dict(self) -> Dict[str, Any]: + return { + "arch": self.arch.value, + "kernel_compiler": self.kernel_compiler.value, + "gcc_version": DEFAULT_GCC_VERSION, + "llvm_version": DEFAULT_LLVM_VERSION, + "kernel": self.kernel, + "run_veristat": self.run_veristat, + "parallel_tests": self.parallel_tests, + "build_release": self.build_release, + "runs_on": self.runs_on, + "tests": self.tests, + "build_runs_on": self.build_runs_on, + } + + +def is_managed_repo() -> bool: + return ( + os.environ["GITHUB_REPOSITORY_OWNER"] == MANAGED_OWNER + and os.environ["GITHUB_REPOSITORY"] in MANAGED_REPOS + ) + + +def set_output(name, value): + """Write an output variable to the GitHub output file.""" + with open(os.getenv("GITHUB_OUTPUT"), "a", encoding="utf-8") as file: + file.write(f"{name}={value}\n") + + +def generate_test_config(test: str) -> Dict[str, Union[str, int]]: + """Create the configuration for the provided test.""" + is_parallel = test.endswith("_parallel") + config = { + "test": test, + "continue_on_error": is_parallel, + # While in experimental mode, parallel jobs may get stuck + # anywhere, including in user space where the kernel won't detect + # a problem and panic. We add a second layer of (smaller) timeouts + # here such that if we get stuck in a parallel run, we hit this + # timeout and fail without affecting the overall job success (as + # would be the case if we hit the job-wide timeout). For + # non-experimental jobs, 360 is the default which will be + # superseded by the overall workflow timeout (but we need to + # specify something). + "timeout_minutes": 30 if is_parallel else 360, + } + return config + + +if __name__ == "__main__": + matrix = [ + BuildConfig( + arch=Arch.X86_64, + run_veristat=True, + parallel_tests=True, + ), + BuildConfig( + arch=Arch.X86_64, + kernel_compiler=Compiler.LLVM, + build_release=True, + ), + BuildConfig( + arch=Arch.AARCH64, + ), + BuildConfig( + arch=Arch.S390X, + ), + ] + + # Outside of managed repositories only run on x86_64 + if not is_managed_repo(): + matrix = [config for config in matrix if config.arch == Arch.X86_64] + + json_matrix = json.dumps({"include": [config.to_dict() for config in matrix]}) + print(json.dumps(json.loads(json_matrix), indent=4)) + set_output("build_matrix", json_matrix) diff --git a/.github/scripts/tests/test_veristat_compare.py b/.github/scripts/tests/test_veristat_compare.py new file mode 100644 index 0000000000000..b65b69295235d --- /dev/null +++ b/.github/scripts/tests/test_veristat_compare.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python3 + +import unittest +from typing import Iterable, List + +from ..veristat_compare import parse_table, VeristatFields + + +def gen_csv_table(records: Iterable[str]) -> List[str]: + return [ + ",".join(VeristatFields.headers()), + *records, + ] + + +class TestVeristatCompare(unittest.TestCase): + def test_parse_table_ignore_new_prog(self): + table = gen_csv_table( + [ + "prog_file.bpf.o,prog_name,N/A,success,N/A,N/A,1,N/A", + ] + ) + veristat_info = parse_table(table) + self.assertEqual(veristat_info.table, []) + self.assertFalse(veristat_info.changes) + self.assertFalse(veristat_info.new_failures) + + def test_parse_table_ignore_removed_prog(self): + table = gen_csv_table( + [ + "prog_file.bpf.o,prog_name,success,N/A,N/A,1,N/A,N/A", + ] + ) + veristat_info = parse_table(table) + self.assertEqual(veristat_info.table, []) + self.assertFalse(veristat_info.changes) + self.assertFalse(veristat_info.new_failures) + + def test_parse_table_new_failure(self): + table = gen_csv_table( + [ + "prog_file.bpf.o,prog_name,success,failure,MISMATCH,1,1,+0 (+0.00%)", + ] + ) + veristat_info = parse_table(table) + self.assertEqual( + veristat_info.table, + [["prog_file.bpf.o", "prog_name", "success -> failure (!!)", "+0.00 %"]], + ) + self.assertTrue(veristat_info.changes) + self.assertTrue(veristat_info.new_failures) + + def test_parse_table_new_changes(self): + table = gen_csv_table( + [ + "prog_file.bpf.o,prog_name,failure,success,MISMATCH,0,0,+0 (+0.00%)", + "prog_file.bpf.o,prog_name_increase,failure,failure,MATCH,1,2,+1 (+100.00%)", + "prog_file.bpf.o,prog_name_decrease,success,success,MATCH,1,1,-1 (-100.00%)", + ] + ) + veristat_info = parse_table(table) + self.assertEqual( + veristat_info.table, + [ + ["prog_file.bpf.o", "prog_name", "failure -> success", "+0.00 %"], + ["prog_file.bpf.o", "prog_name_increase", "failure", "+100.00 %"], + ["prog_file.bpf.o", "prog_name_decrease", "success", "-100.00 %"], + ], + ) + self.assertTrue(veristat_info.changes) + self.assertFalse(veristat_info.new_failures) + + +if __name__ == "__main__": + unittest.main() diff --git a/.github/scripts/tmpfsify-workspace.sh b/.github/scripts/tmpfsify-workspace.sh new file mode 100755 index 0000000000000..6fd62b4ad2a49 --- /dev/null +++ b/.github/scripts/tmpfsify-workspace.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +set -x -euo pipefail + +TMPFS_SIZE=20 # GB +MEM_TOTAL=$(awk '/MemTotal/ {print int($2/1024)}' /proc/meminfo) + +# sanity check: total mem is at least double TMPFS_SIZE +if [ $MEM_TOTAL -lt $(($TMPFS_SIZE*1024*2)) ]; then + echo "tmpfsify-workspace.sh: will not allocate tmpfs, total memory is too low (${MEM_TOTAL}MB)" + exit 0 +fi + +dir="$(basename "$GITHUB_WORKSPACE")" +cd "$(dirname "$GITHUB_WORKSPACE")" +mv "${dir}" "${dir}.backup" +mkdir "${dir}" +sudo mount -t tmpfs -o size=${TMPFS_SIZE}G tmpfs "${dir}" +rsync -a "${dir}.backup/" "${dir}" +cd - + diff --git a/.github/scripts/veristat_compare.py b/.github/scripts/veristat_compare.py new file mode 100644 index 0000000000000..07271b8cbd3aa --- /dev/null +++ b/.github/scripts/veristat_compare.py @@ -0,0 +1,263 @@ +#!/usr/bin/env python3 + +# This script reads a CSV file produced by the following invocation: +# +# veristat --emit file,prog,verdict,states \ +# --output-format csv \ +# --compare ... +# +# And produces a markdown summary for the file. +# The summary is printed to standard output and appended to a file +# pointed to by GITHUB_STEP_SUMMARY variable. +# +# Script exits with return code 1 if there are new failures in the +# veristat results. +# +# For testing purposes invoke as follows: +# +# GITHUB_STEP_SUMMARY=/dev/null python3 veristat-compare.py test.csv +# +# File format (columns): +# 0. file_name +# 1. prog_name +# 2. verdict_base +# 3. verdict_comp +# 4. verdict_diff +# 5. total_states_base +# 6. total_states_comp +# 7. total_states_diff +# +# Records sample: +# file-a,a,success,failure,MISMATCH,12,12,+0 (+0.00%) +# file-b,b,success,success,MATCH,67,67,+0 (+0.00%) +# +# For better readability suffixes '_OLD' and '_NEW' +# are used instead of '_base' and '_comp' for variable +# names etc. + +import io +import os +import sys +import re +import csv +import logging +import argparse +import enum +from dataclasses import dataclass +from typing import Dict, Iterable, List, Final + + +TRESHOLD_PCT: Final[int] = 0 + +SUMMARY_HEADERS = ["File", "Program", "Verdict", "States Diff (%)"] + +# expected format: +0 (+0.00%) / -0 (-0.00%) +TOTAL_STATES_DIFF_REGEX = ( + r"(?P[+-]\d+) \((?P[+-]\d+\.\d+)\%\)" +) + + +TEXT_SUMMARY_TEMPLATE: Final[str] = ( + """ +# {title} + +{table} +""".strip() +) + +HTML_SUMMARY_TEMPLATE: Final[str] = ( + """ +# {title} + +
+Click to expand + +{table} +
+""".strip() +) + +GITHUB_MARKUP_REPLACEMENTS: Final[Dict[str, str]] = { + "->": "→", + "(!!)": ":bangbang:", +} + +NEW_FAILURE_SUFFIX: Final[str] = "(!!)" + + +class VeristatFields(str, enum.Enum): + FILE_NAME = "file_name" + PROG_NAME = "prog_name" + VERDICT_OLD = "verdict_base" + VERDICT_NEW = "verdict_comp" + VERDICT_DIFF = "verdict_diff" + TOTAL_STATES_OLD = "total_states_base" + TOTAL_STATES_NEW = "total_states_comp" + TOTAL_STATES_DIFF = "total_states_diff" + + @classmethod + def headers(cls) -> List[str]: + return [ + cls.FILE_NAME, + cls.PROG_NAME, + cls.VERDICT_OLD, + cls.VERDICT_NEW, + cls.VERDICT_DIFF, + cls.TOTAL_STATES_OLD, + cls.TOTAL_STATES_NEW, + cls.TOTAL_STATES_DIFF, + ] + + +@dataclass +class VeristatInfo: + table: list + changes: bool + new_failures: bool + + def get_results_title(self) -> str: + if self.new_failures: + return "There are new veristat failures" + + if self.changes: + return "There are changes in verification performance" + + return "No changes in verification performance" + + def get_results_summary(self, markup: bool = False) -> str: + title = self.get_results_title() + if not self.table: + return f"# {title}\n" + + template = TEXT_SUMMARY_TEMPLATE + table = format_table(headers=SUMMARY_HEADERS, rows=self.table) + + if markup: + template = HTML_SUMMARY_TEMPLATE + table = github_markup_decorate(table) + + return template.format(title=title, table=table) + + +def get_state_diff(value: str) -> float: + if value == "N/A": + return 0.0 + + matches = re.match(TOTAL_STATES_DIFF_REGEX, value) + if not matches: + raise ValueError(f"Failed to parse total states diff field value '{value}'") + + if percentage_diff := matches.group("percentage_diff"): + return float(percentage_diff) + + raise ValueError(f"Invalid {VeristatFields.TOTAL_STATES_DIFF} field value: {value}") + + +def parse_table(csv_file: Iterable[str]) -> VeristatInfo: + reader = csv.DictReader(csv_file) + assert reader.fieldnames == VeristatFields.headers() + + new_failures = False + changes = False + table = [] + + for record in reader: + add = False + + verdict_old, verdict_new = ( + record[VeristatFields.VERDICT_OLD], + record[VeristatFields.VERDICT_NEW], + ) + + # Ignore results from completely new and removed programs + if "N/A" in [verdict_new, verdict_old]: + continue + + if record[VeristatFields.VERDICT_DIFF] == "MISMATCH": + changes = True + add = True + verdict = f"{verdict_old} -> {verdict_new}" + if verdict_new == "failure": + new_failures = True + verdict += f" {NEW_FAILURE_SUFFIX}" + else: + verdict = record[VeristatFields.VERDICT_NEW] + + diff = get_state_diff(record[VeristatFields.TOTAL_STATES_DIFF]) + if abs(diff) > TRESHOLD_PCT: + changes = True + add = True + + if not add: + continue + + table.append( + [ + record[VeristatFields.FILE_NAME], + record[VeristatFields.PROG_NAME], + verdict, + f"{diff:+.2f} %", + ] + ) + + return VeristatInfo(table=table, changes=changes, new_failures=new_failures) + + +def github_markup_decorate(input_str: str) -> str: + for text, markup in GITHUB_MARKUP_REPLACEMENTS.items(): + input_str = input_str.replace(text, markup) + return input_str + + +def format_table(headers: List[str], rows: List[List[str]]) -> str: + column_width = [ + max(len(row[column_idx]) for row in [headers] + rows) + for column_idx in range(len(headers)) + ] + + # Row template string in the following format: + # "{0:8}|{1:10}|{2:15}|{3:7}|{4:10}" + row_template = "|".join( + f"{{{idx}:{width}}}" for idx, width in enumerate(column_width) + ) + row_template_nl = f"|{row_template}|\n" + + with io.StringIO() as out: + out.write(row_template_nl.format(*headers)) + + separator_row = ["-" * width for width in column_width] + out.write(row_template_nl.format(*separator_row)) + + for row in rows: + row_str = row_template_nl.format(*row) + out.write(row_str) + + return out.getvalue() + + +def main(compare_csv_filename: os.PathLike, output_filename: os.PathLike) -> None: + with open(compare_csv_filename, newline="", encoding="utf-8") as csv_file: + veristat_results = parse_table(csv_file) + + sys.stdout.write(veristat_results.get_results_summary()) + + with open(output_filename, encoding="utf-8", mode="a") as file: + file.write(veristat_results.get_results_summary(markup=True)) + + if veristat_results.new_failures: + return 1 + + return 0 + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Print veristat comparison output as markdown step summary" + ) + parser.add_argument("filename") + args = parser.parse_args() + summary_filename = os.getenv("GITHUB_STEP_SUMMARY") + if not summary_filename: + logging.error("GITHUB_STEP_SUMMARY environment variable is not set") + sys.exit(1) + sys.exit(main(args.filename, summary_filename)) diff --git a/.github/workflows/ai-code-review.yml b/.github/workflows/ai-code-review.yml new file mode 100644 index 0000000000000..1e06d2d44abf6 --- /dev/null +++ b/.github/workflows/ai-code-review.yml @@ -0,0 +1,132 @@ +name: AI Code Review + +permissions: + contents: read + id-token: write + issues: write + pull-requests: write + +on: + pull_request: + types: [opened, review_requested] + +jobs: + get-commits: + # This codition is an indicator that we are running in a context of PR owned by kernel-patches org + if: ${{ github.repository == 'kernel-patches/bpf' && vars.AWS_REGION }} + runs-on: 'ubuntu-latest' + continue-on-error: true + outputs: + commits: ${{ steps.get-commits.outputs.commits }} + steps: + - name: Checkout Linux source tree + uses: actions/checkout@v5 + with: + fetch-depth: 32 + + # Get the list of commits and trigger a review job for each separate commit + # As a safeguard, check no more than the first 50 commits + - name: Get PR commits + id: get-commits + run: | + tmp=$(mktemp) + git rev-list ${{ github.event.pull_request.base.sha }}..${{ github.event.pull_request.head.sha }} | head -n 50 > pr_commits.txt + cat pr_commits.txt | jq -R -s -c 'split("\n")[:-1]' > $tmp + echo "commits=$(cat $tmp)" >> $GITHUB_OUTPUT + + + ai-review: + needs: get-commits + runs-on: 'ubuntu-latest' + strategy: + matrix: + commit: ${{ fromJson(needs.get-commits.outputs.commits) }} + fail-fast: false + env: + AWS_REGION: us-west-2 + steps: + - name: Checkout CI code + uses: actions/checkout@v5 + with: + sparse-checkout: | + .github + ci + + - name: Generate GitHub App token + id: app-token + uses: actions/create-github-app-token@v2 + with: + app-id: ${{ secrets.KP_REVIEW_BOT_APP_ID }} + private-key: ${{ secrets.KP_REVIEW_BOT_APP_PRIVATE_KEY }} + + - name: Configure AWS Credentials (OIDC) + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.AWS_BEDROCK_ROLE }} + aws-region: us-west-2 + + - name: Set up .claude/settings.json + shell: bash + run: | + mkdir -p ~/.claude + cp ci/claude/settings.json ~/.claude/settings.json + + - name: Checkout Linux source tree + uses: actions/checkout@v5 + with: + fetch-depth: 32 + ref: ${{ matrix.commit }} + + - name: Get patch subject + id: get-patch-subject + shell: bash + run: | + subject=$(git log -1 --pretty=format:"%s" ${{ matrix.commit }}) + echo "subject=$subject" >> $GITHUB_OUTPUT + + - name: Checkout prompts repo + uses: actions/checkout@v5 + with: + repository: 'masoncl/review-prompts' + path: 'review' + + - uses: anthropics/claude-code-action@v1 + with: + github_token: ${{ steps.app-token.outputs.token }} + use_bedrock: "true" + claude_args: '--max-turns 100' + prompt: | + Current directory is the root of a Linux Kernel git repository. + Using the prompt `review/review-core.md` and the prompt directory `review` + do a code review of the top commit in the Linux repository. + + # If Claude produced review-inline.txt then it found something + # Post a comment on PR and fail the job + - name: Check review-inline.txt + id: check_review + shell: bash + run: | + review_file=$(find ${{ github.workspace }} -name review-inline.txt) + if [ -s "$review_file" ]; then + cat $review_file || true + echo "review_file=$review_file" >> $GITHUB_OUTPUT + fi + + - name: Comment on PR + if: steps.check_review.outputs.review_file != '' + uses: actions/github-script@v8 + env: + REVIEW_FILE: ${{ steps.check_review.outputs.review_file }} + PATCH_SUBJECT: ${{ steps.get-patch-subject.outputs.subject }} + with: + github-token: ${{ steps.app-token.outputs.token }} + script: | + const commentScript = require('./ci/claude/post-pr-comment.js'); + await commentScript({github, context}); + + - name: Fail CI job if review file exists + if: steps.check_review.outputs.review_file != '' + run: | + echo "Review file found - failing the CI job" + exit 42 + diff --git a/.github/workflows/gcc-bpf.yml b/.github/workflows/gcc-bpf.yml new file mode 100644 index 0000000000000..5f05234399d33 --- /dev/null +++ b/.github/workflows/gcc-bpf.yml @@ -0,0 +1,103 @@ +name: Testing GCC BPF compiler + +on: + workflow_call: + inputs: + runs_on: + required: true + type: string + arch: + required: true + type: string + gcc_version: + required: true + type: string + llvm_version: + required: true + type: string + toolchain: + required: true + type: string + toolchain_full: + required: true + type: string + download_sources: + required: true + type: boolean + +jobs: + test: + name: GCC BPF + runs-on: >- + ${{ + contains(fromJSON(inputs.runs_on), 'codebuild') + && format('codebuild-bpf-ci-{0}-{1}', github.run_id, github.run_attempt) + || fromJSON(inputs.runs_on) + }} + env: + ARCH: ${{ inputs.arch }} + BPF_NEXT_BASE_BRANCH: 'master' + GCC_BPF_INSTALL_DIR: ${{ github.workspace }}/gcc-bpf + GCC_BPF_RELEASE_REPO: 'theihor/gcc-bpf' + KBUILD_OUTPUT: ${{ github.workspace }}/src/kbuild-output + REPO_ROOT: ${{ github.workspace }}/src + + steps: + + - uses: actions/checkout@v4 + with: + sparse-checkout: | + .github + ci + + - if: ${{ inputs.download_sources }} + name: Download bpf-next tree + uses: libbpf/ci/get-linux-source@v3 + with: + dest: ${{ env.REPO_ROOT }} + rev: ${{ env.BPF_NEXT_BASE_BRANCH }} + + - if: ${{ ! inputs.download_sources }} + name: Checkout ${{ github.repository }} to ./src + uses: actions/checkout@v4 + with: + path: 'src' + + - uses: libbpf/ci/patch-kernel@v3 + with: + patches-root: '${{ github.workspace }}/ci/diffs' + repo-root: ${{ env.REPO_ROOT }} + + - uses: actions/download-artifact@v4 + with: + name: vmlinux-${{ inputs.arch }}-${{ inputs.toolchain_full }} + path: ${{ env.REPO_ROOT }} + + - name: Untar artifacts + working-directory: ${{ env.REPO_ROOT }} + run: zstd -d -T0 vmlinux-${{ inputs.arch }}-${{ inputs.toolchain_full }}.tar.zst --stdout | tar -xf - + + - name: Setup build environment + uses: libbpf/ci/setup-build-env@v3 + with: + arch: ${{ inputs.arch }} + gcc-version: ${{ inputs.gcc_version }} + llvm-version: ${{ inputs.llvm_version }} + + - name: Download GCC BPF compiler + shell: bash + env: + GH_TOKEN: ${{ github.token }} + run: .github/scripts/download-gcc-bpf.sh ${{ env.GCC_BPF_RELEASE_REPO }} ${{ env.GCC_BPF_INSTALL_DIR }} + + - name: Build selftests/bpf/test_progs-bpf_gcc + uses: libbpf/ci/build-selftests@v3 + env: + BPF_GCC: ${{ env.GCC_BPF_INSTALL_DIR }} + MAX_MAKE_JOBS: 32 + SELFTESTS_BPF_TARGETS: 'test_progs-bpf_gcc' + with: + arch: ${{ inputs.arch }} + kernel-root: ${{ env.REPO_ROOT }} + llvm-version: ${{ inputs.llvm_version }} + toolchain: ${{ inputs.toolchain }} diff --git a/.github/workflows/kernel-build-test.yml b/.github/workflows/kernel-build-test.yml new file mode 100644 index 0000000000000..ceb47761e905b --- /dev/null +++ b/.github/workflows/kernel-build-test.yml @@ -0,0 +1,167 @@ +name: Reusable Build/Test/Veristat workflow + +on: + workflow_call: + inputs: + arch: + required: true + type: string + description: The architecture to build against, e.g x86_64, aarch64, s390x... + toolchain_full: + required: true + type: string + description: The toolchain and for llvm, its version, e.g gcc, llvm-15 + toolchain: + required: true + type: string + description: The toolchain, e.g gcc, llvm + runs_on: + required: true + type: string + description: The runners to run the test on. This is a json string representing an array of labels. + build_runs_on: + required: true + type: string + description: The runners to run the builds on. This is a json string representing an array of labels. + gcc_version: + required: true + type: string + description: GCC version to install + llvm_version: + required: true + type: string + description: LLVM version to install + kernel: + required: true + type: string + description: The kernel to run the test against. For KPD this is always LATEST, which runs against a newly built kernel. + tests: + required: true + type: string + description: A serialized json array with the tests to be running, it must follow the json-matrix format, https://www.jitsejan.com/use-github-actions-with-json-file-as-matrix + run_veristat: + required: true + type: boolean + description: Whether or not to run the veristat job. + run_tests: + required: true + type: boolean + description: Whether or not to run the test job. + download_sources: + required: true + type: boolean + description: Whether to download the linux sources into the working directory. + default: false + build_release: + required: true + type: boolean + description: Build selftests with -O2 optimization in addition to non-optimized build. + default: false + secrets: + AWS_ROLE_ARN: + required: true + +jobs: + + # Build kernel and selftest + build: + uses: ./.github/workflows/kernel-build.yml + with: + arch: ${{ inputs.arch }} + toolchain_full: ${{ inputs.toolchain_full }} + toolchain: ${{ inputs.toolchain }} + runs_on: ${{ inputs.build_runs_on }} + gcc_version: ${{ inputs.gcc_version }} + llvm_version: ${{ inputs.llvm_version }} + kernel: ${{ inputs.kernel }} + download_sources: ${{ inputs.download_sources }} + + build-release: + if: ${{ inputs.build_release }} + uses: ./.github/workflows/kernel-build.yml + with: + arch: ${{ inputs.arch }} + toolchain_full: ${{ inputs.toolchain_full }} + toolchain: ${{ inputs.toolchain }} + runs_on: ${{ inputs.build_runs_on }} + gcc_version: ${{ inputs.gcc_version }} + llvm_version: ${{ inputs.llvm_version }} + kernel: ${{ inputs.kernel }} + download_sources: ${{ inputs.download_sources }} + release: true + + test: + if: ${{ inputs.run_tests }} + uses: ./.github/workflows/kernel-test.yml + # Setting name to test here to avoid lengthy autogenerated names due to matrix + # e.g build-and-test x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc + name: "test" + needs: [build] + strategy: + fail-fast: false + matrix: ${{ fromJSON(inputs.tests) }} + with: + arch: ${{ inputs.arch }} + toolchain_full: ${{ inputs.toolchain_full }} + runs_on: ${{ inputs.runs_on }} + kernel: ${{ inputs.kernel }} + test: ${{ matrix.test }} + continue_on_error: ${{ toJSON(matrix.continue_on_error) }} + timeout_minutes: ${{ matrix.timeout_minutes }} + + veristat-kernel: + if: ${{ inputs.run_veristat }} + uses: ./.github/workflows/veristat-kernel.yml + needs: [build] + permissions: + id-token: write + contents: read + with: + arch: ${{ inputs.arch }} + toolchain_full: ${{ inputs.toolchain_full }} + runs_on: ${{ inputs.runs_on }} + + veristat-meta: + # Check for vars.AWS_REGION is necessary to skip this job in case of a PR from a fork. + if: ${{ inputs.run_veristat && github.repository_owner == 'kernel-patches' && vars.AWS_REGION }} + uses: ./.github/workflows/veristat-meta.yml + needs: [build] + permissions: + id-token: write + contents: read + with: + arch: ${{ inputs.arch }} + toolchain_full: ${{ inputs.toolchain_full }} + aws_region: ${{ vars.AWS_REGION }} + runs_on: ${{ inputs.runs_on }} + secrets: + AWS_ROLE_ARN: ${{ secrets.AWS_ROLE_ARN }} + + veristat-scx: + if: ${{ inputs.run_veristat }} + uses: ./.github/workflows/veristat-scx.yml + needs: [build] + permissions: + id-token: write + contents: read + with: + arch: ${{ inputs.arch }} + toolchain_full: ${{ inputs.toolchain_full }} + runs_on: ${{ inputs.runs_on }} + llvm_version: ${{ inputs.llvm_version }} + + gcc-bpf: + name: 'GCC BPF' + if: ${{ inputs.arch == 'x86_64' }} + uses: ./.github/workflows/gcc-bpf.yml + needs: [build] + with: + # GCC BPF does not need /dev/kvm, so use the "build" runners + runs_on: ${{ inputs.build_runs_on }} + arch: ${{ inputs.arch }} + gcc_version: ${{ inputs.gcc_version }} + llvm_version: ${{ inputs.llvm_version }} + toolchain: ${{ inputs.toolchain }} + toolchain_full: ${{ inputs.toolchain_full }} + download_sources: ${{ inputs.download_sources }} + diff --git a/.github/workflows/kernel-build.yml b/.github/workflows/kernel-build.yml new file mode 100644 index 0000000000000..296f5df43f696 --- /dev/null +++ b/.github/workflows/kernel-build.yml @@ -0,0 +1,231 @@ + +name: Reusable build workflow + +on: + workflow_call: + inputs: + arch: + required: true + type: string + description: The architecture to build against, e.g x86_64, aarch64, s390x... + toolchain_full: + required: true + type: string + description: The toolchain and for llvm, its version, e.g gcc, llvm-15 + toolchain: + required: true + type: string + description: The toolchain, e.g gcc, llvm + runs_on: + required: true + type: string + description: The runners to run the test on. This is a json string representing an array of labels. + gcc_version: + required: true + type: string + description: GCC version to install + llvm_version: + required: true + type: string + description: LLVM version to install + kernel: + required: true + type: string + description: The kernel to run the test against. For KPD this is always LATEST, which runs against a newly built kernel. + download_sources: + required: true + type: boolean + description: Whether to download the linux sources into the working directory. + default: false + release: + required: false + type: boolean + description: Build selftest with -O2 optimization + default: false + +jobs: + build: + name: build kernel and selftests ${{ inputs.release && '-O2' || '' }} + # To run on CodeBuild, runs-on value must correspond to the AWS + # CodeBuild project associated with the kernel-patches webhook + # However matrix.py passes just a 'codebuild' string + runs-on: >- + ${{ + contains(fromJSON(inputs.runs_on), 'codebuild') + && format('codebuild-bpf-ci-{0}-{1}', github.run_id, github.run_attempt) + || fromJSON(inputs.runs_on) + }} + env: + ARTIFACTS_ARCHIVE: "vmlinux-${{ inputs.arch }}-${{ inputs.toolchain_full }}.tar.zst" + BPF_NEXT_BASE_BRANCH: 'master' + BPF_NEXT_FETCH_DEPTH: 64 # A bit of history is needed to facilitate incremental builds + CROSS_COMPILE: ${{ inputs.arch != 'x86_64' && 'true' || '' }} + BUILD_SCHED_EXT_SELFTESTS: ${{ inputs.arch == 'x86_64' || inputs.arch == 'aarch64' && 'true' || '' }} + KBUILD_OUTPUT: ${{ github.workspace }}/kbuild-output + KERNEL: ${{ inputs.kernel }} + KERNEL_ROOT: ${{ github.workspace }} + REFERENCE_REPO_PATH: /libbpfci/mirrors/linux + REPO_PATH: "" + REPO_ROOT: ${{ github.workspace }} + RUNNER_TYPE: ${{ contains(fromJSON(inputs.runs_on), 'codebuild') && 'codebuild' || 'default' }} + steps: + + # git version 2.43.0 (current Ubuntu 24 installation) + # does not support git clone --revision option + # so make sure latest git is installed + - name: Install latest git + shell: bash + run: | + sudo apt-get update + sudo apt-get install -y software-properties-common + sudo add-apt-repository -y ppa:git-core/ppa + sudo apt-get update + sudo apt-get install -y git + git --version + + - uses: actions/checkout@v4 + with: + sparse-checkout: | + .github + ci + + - if: ${{ env.RUNNER_TYPE == 'codebuild' }} + shell: bash + run: .github/scripts/tmpfsify-workspace.sh + + - if: ${{ ! inputs.download_sources }} + name: git clone ${{ github.repository }}@${{ github.sha }} + shell: bash + run: | + if [ -d "${{ env.REFERENCE_REPO_PATH }}" ]; then + git clone \ + --revision ${{ github.sha }} \ + --reference-if-able ${{ env.REFERENCE_REPO_PATH }} \ + https://github.com/${{ github.repository }}.git .kernel + else + git clone \ + --revision ${{ github.sha }} \ + --depth ${{ inputs.download_sources && 1 || env.BPF_NEXT_FETCH_DEPTH }} \ + https://github.com/${{ github.repository }}.git .kernel + fi + + - if: ${{ inputs.download_sources }} + name: Download bpf-next tree + env: + FETCH_DEPTH: ${{ env.BPF_NEXT_FETCH_DEPTH }} + uses: libbpf/ci/get-linux-source@v3 + with: + dest: '.kernel' + rev: ${{ env.BPF_NEXT_BASE_BRANCH }} + + - uses: libbpf/ci/prepare-incremental-build@v3 + with: + repo-root: '.kernel' + base-branch: >- + ${{ inputs.download_sources && env.BPF_NEXT_BASE_BRANCH + || github.event_name == 'pull_request' && github.base_ref + || github.ref_name + }} + arch: ${{ inputs.arch }} + toolchain_full: ${{ inputs.toolchain_full }} + kbuild-output: ${{ env.KBUILD_OUTPUT }} + + - name: Move linux source in place + shell: bash + run: | + cd .kernel + rm -rf .git .github ci + mv -t .. $(ls -A) + cd .. + rmdir .kernel + + - uses: libbpf/ci/patch-kernel@v3 + with: + patches-root: '${{ github.workspace }}/ci/diffs' + repo-root: ${{ env.REPO_ROOT }} + + - name: Setup build environment + uses: libbpf/ci/setup-build-env@v3 + with: + arch: ${{ inputs.arch }} + gcc-version: ${{ inputs.gcc_version }} + llvm-version: ${{ inputs.llvm_version }} + pahole: master + + # We have to setup qemu+binfmt in order to enable cross-compation of selftests. + # During selftests build, freshly built bpftool is executed. + # On self-hosted bare-metal hosts binfmt is pre-configured. + - if: ${{ env.RUNNER_TYPE == 'codebuild' && env.CROSS_COMPILE }} + name: Set up docker + uses: docker/setup-docker-action@v4 + - if: ${{ env.RUNNER_TYPE == 'codebuild' && env.CROSS_COMPILE }} + name: Setup binfmt and qemu + uses: docker/setup-qemu-action@v3 + with: + image: tonistiigi/binfmt:qemu-v9.2.0 + + - name: Build kernel image + uses: libbpf/ci/build-linux@v3 + with: + arch: ${{ inputs.arch }} + toolchain: ${{ inputs.toolchain }} + kbuild-output: ${{ env.KBUILD_OUTPUT }} + max-make-jobs: 32 + llvm-version: ${{ inputs.llvm_version }} + + - name: Build selftests/bpf + uses: libbpf/ci/build-selftests@v3 + env: + MAX_MAKE_JOBS: 32 + RELEASE: ${{ inputs.release && '1' || '' }} + with: + arch: ${{ inputs.arch }} + kernel-root: ${{ env.KERNEL_ROOT }} + llvm-version: ${{ inputs.llvm_version }} + toolchain: ${{ inputs.toolchain }} + + - if: ${{ env.BUILD_SCHED_EXT_SELFTESTS }} + name: Build selftests/sched_ext + uses: libbpf/ci/build-scx-selftests@v3 + with: + kbuild-output: ${{ env.KBUILD_OUTPUT }} + repo-root: ${{ env.REPO_ROOT }} + arch: ${{ inputs.arch }} + toolchain: ${{ inputs.toolchain }} + llvm-version: ${{ inputs.llvm_version }} + max-make-jobs: 32 + + - if: ${{ github.event_name != 'push' }} + name: Build samples + uses: libbpf/ci/build-samples@v3 + with: + arch: ${{ inputs.arch }} + toolchain: ${{ inputs.toolchain }} + kbuild-output: ${{ env.KBUILD_OUTPUT }} + max-make-jobs: 32 + llvm-version: ${{ inputs.llvm_version }} + - name: Tar artifacts + id: tar-artifacts + uses: libbpf/ci/tar-artifacts@v3 + env: + ARCHIVE_BPF_SELFTESTS: 'true' + ARCHIVE_MAKE_HELPERS: 'true' + ARCHIVE_SCHED_EXT_SELFTESTS: ${{ env.BUILD_SCHED_EXT_SELFTESTS }} + with: + arch: ${{ inputs.arch }} + archive: ${{ env.ARTIFACTS_ARCHIVE }} + kbuild-output: ${{ env.KBUILD_OUTPUT }} + repo-root: ${{ env.REPO_ROOT }} + - if: ${{ github.event_name != 'push' }} + name: Remove KBUILD_OUTPUT content + shell: bash + run: | + # Remove $KBUILD_OUTPUT to prevent cache creation for pull requests. + # Only on pushed changes are build artifacts actually cached, because + # of github.com/actions/cache's cache isolation logic. + rm -rf "${KBUILD_OUTPUT}" + - uses: actions/upload-artifact@v4 + with: + name: vmlinux-${{ inputs.arch }}-${{ inputs.toolchain_full }}${{ inputs.release && '-release' || '' }} + if-no-files-found: error + path: ${{ env.ARTIFACTS_ARCHIVE }} diff --git a/.github/workflows/kernel-test.yml b/.github/workflows/kernel-test.yml new file mode 100644 index 0000000000000..2885f2759de4a --- /dev/null +++ b/.github/workflows/kernel-test.yml @@ -0,0 +1,96 @@ +name: Reusable test workflow + +on: + workflow_call: + inputs: + arch: + required: true + type: string + description: The architecture to build against, e.g x86_64, aarch64, s390x... + toolchain_full: + required: true + type: string + description: The toolchain and for llvm, its version, e.g gcc, llvm-15 + runs_on: + required: true + type: string + description: The runners to run the test on. This is a json string representing an array of labels. + kernel: + required: true + type: string + description: The kernel to run the test against. For KPD this is always LATEST, which runs against a newly built kernel. + test: + required: true + type: string + description: The test to run in the vm, e.g test_progs, test_maps, test_progs_no_alu32... + continue_on_error: + required: true + type: string + description: Whether to continue on error. This is typically set to true for parallel tests which are currently known to fail, but we don't want to fail the whole CI because of that. + timeout_minutes: + required: true + type: number + description: In case a test runs for too long, after how many seconds shall we timeout and error. + +jobs: + test: + name: ${{ inputs.test }} on ${{ inputs.arch }} with ${{ inputs.toolchain_full }} + runs-on: ${{ fromJSON(inputs.runs_on) }} + timeout-minutes: 100 + env: + ARCH: ${{ inputs.arch }} + KERNEL: ${{ inputs.kernel }} + REPO_ROOT: ${{ github.workspace }} + REPO_PATH: "" + # https://github.com/actions/runner/issues/1483#issuecomment-1031671517 + # booleans are weird in GH. + CONTINUE_ON_ERROR: ${{ inputs.continue_on_error }} + DEPLOYMENT: ${{ github.repository == 'kernel-patches/bpf' && 'prod' || 'rc' }} + ALLOWLIST_FILE: /tmp/allowlist + DENYLIST_FILE: /tmp/denylist + steps: + - uses: actions/checkout@v4 + with: + sparse-checkout: | + .github + ci + + - uses: actions/download-artifact@v4 + with: + name: vmlinux-${{ inputs.arch }}-${{ inputs.toolchain_full }} + path: . + + - name: Untar artifacts + # zstd is installed by default in the runner images. + run: zstd -d -T0 vmlinux-${{ inputs.arch }}-${{ inputs.toolchain_full }}.tar.zst --stdout | tar -xf - + + - name: Run selftests + uses: libbpf/ci/run-vmtest@v3 + # https://github.com/actions/runner/issues/1483#issuecomment-1031671517 + # booleans are weird in GH. + continue-on-error: ${{ fromJSON(env.CONTINUE_ON_ERROR) }} + timeout-minutes: ${{ inputs.timeout_minutes }} + env: + ARCH: ${{ inputs.arch }} + DEPLOYMENT: ${{ env.DEPLOYMENT }} + KERNEL_TEST: ${{ inputs.test }} + SELFTESTS_BPF: ${{ github.workspace }}/selftests/bpf + VMTEST_CONFIGS: ${{ github.workspace }}/ci/vmtest/configs + TEST_PROGS_TRAFFIC_MONITOR: ${{ inputs.arch == 'x86_64' && 'true' || '' }} + TEST_PROGS_WATCHDOG_TIMEOUT: 600 + with: + arch: ${{ inputs.arch }} + vmlinuz: '${{ github.workspace }}/vmlinuz' + kernel-root: ${{ env.REPO_ROOT }} + max-cpu: 8 + kernel-test: ${{ inputs.test }} + # Here we must use kbuild-output local to the repo, because + # it was extracted from the artifacts. + kbuild-output: ${{ env.REPO_ROOT }}/kbuild-output + + - if: ${{ always() }} + uses: actions/upload-artifact@v4 + with: + name: tmon-logs-${{ inputs.arch }}-${{ inputs.toolchain_full }}-${{ inputs.test }} + if-no-files-found: ignore + path: /tmp/tmon_pcap/* diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 0000000000000..1c910fd297309 --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,65 @@ +name: "lint" + +on: + pull_request: + push: + branches: + - master + +jobs: + shellcheck: + # This workflow gets injected into other Linux repositories, but we don't + # want it to run there. + if: ${{ github.repository == 'kernel-patches/vmtest' }} + name: ShellCheck + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Run ShellCheck + uses: ludeeus/action-shellcheck@master + env: + SHELLCHECK_OPTS: --severity=warning --exclude=SC1091 + + # Ensure some consistency in the formatting. + lint: + if: ${{ github.repository == 'kernel-patches/vmtest' }} + name: Lint + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Run black + uses: psf/black@stable + with: + src: ./.github/scripts + + validate_matrix: + if: ${{ github.repository == 'kernel-patches/vmtest' }} + name: Validate matrix.py + runs-on: ubuntu-latest + env: + GITHUB_REPOSITORY_OWNER: ${{ matrix.owner }} + GITHUB_REPOSITORY: ${{ matrix.repository }} + GITHUB_OUTPUT: /dev/stdout + strategy: + matrix: + owner: ['kernel-patches', 'foo'] + repository: ['bpf', 'vmtest', 'bar'] + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: run script + run: | + python3 .github/scripts/matrix.py + + unittests: + if: ${{ github.repository == 'kernel-patches/vmtest' }} + name: Unittests + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Run unittests + run: python3 -m unittest scripts/tests/*.py + working-directory: .github diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000000000..24773459a252d --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,73 @@ +name: bpf-ci + +on: + pull_request: + push: + branches: + - bpf_base + - bpf-next_base + - bpf-net_base + - for-next_base + +concurrency: + group: ci-test-${{ github.ref_name }} + cancel-in-progress: true + +jobs: + set-matrix: + # FIXME: set-matrix is lightweight, run it on any self-hosted machines for kernel-patches org + # so we do not wait for GH hosted runners when there potentially all are busy because of bpf-rc + # repo for instance. + # This could be somehow fixed long term by making this action/workflow re-usable and letting the called + # specify what to run on. + runs-on: ${{ github.repository_owner == 'kernel-patches' && 'x86_64' || 'ubuntu-latest' }} + permissions: read-all + outputs: + build-matrix: ${{ steps.set-matrix-impl.outputs.build_matrix }} + steps: + - uses: actions/checkout@v4 + with: + sparse-checkout: | + .github + ci + - name: Install script dependencies + shell: bash + run: | + sudo apt-get -y update + sudo apt-get -y install python3-requests + - id: set-matrix-impl + env: + GITHUB_TOKEN: ${{ secrets.GH_PAT_READ_RUNNERS }} + run: | + python3 .github/scripts/matrix.py + + build-and-test: + # Setting name to arch-compiler here to avoid lengthy autogenerated names due to matrix + # e.g build-and-test x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc + name: ${{ matrix.arch }} ${{ matrix.kernel_compiler }}-${{ matrix.kernel_compiler == 'gcc' && matrix.gcc_version || matrix.llvm_version }} + uses: ./.github/workflows/kernel-build-test.yml + needs: [set-matrix] + permissions: + id-token: write + contents: read + strategy: + fail-fast: false + matrix: ${{ fromJSON(needs.set-matrix.outputs.build-matrix) }} + with: + arch: ${{ matrix.arch }} + toolchain: ${{ matrix.kernel_compiler }} + toolchain_full: ${{ matrix.kernel_compiler }}-${{ matrix.kernel_compiler == 'gcc' && matrix.gcc_version || matrix.llvm_version }} + runs_on: ${{ toJSON(matrix.runs_on) }} + build_runs_on: ${{ toJSON(matrix.build_runs_on) }} + gcc_version: ${{ matrix.gcc_version }} + llvm_version: ${{ matrix.llvm_version }} + kernel: ${{ matrix.kernel }} + tests: ${{ toJSON(matrix.tests) }} + run_veristat: ${{ matrix.run_veristat }} + # We only run tests on pull requests. + run_tests: ${{ github.event_name != 'push' }} + # Download sources + download_sources: ${{ github.repository == 'kernel-patches/vmtest' }} + build_release: ${{ matrix.build_release }} + secrets: + AWS_ROLE_ARN: ${{ secrets.AWS_ROLE_ARN }} diff --git a/.github/workflows/veristat-kernel.yml b/.github/workflows/veristat-kernel.yml new file mode 100644 index 0000000000000..8c9ba715bf277 --- /dev/null +++ b/.github/workflows/veristat-kernel.yml @@ -0,0 +1,66 @@ +name: veristat_kernel + +on: + workflow_call: + inputs: + arch: + required: true + type: string + description: The architecture to build against, e.g x86_64, aarch64, s390x... + toolchain_full: + required: true + type: string + description: Toolchain identifier, such as llvm-20 + runs_on: + required: true + type: string + description: The runners to run the test on. This is a json string representing an array of labels. + +jobs: + veristat: + name: veristat-kernel + runs-on: ${{ fromJSON(inputs.runs_on) }} + timeout-minutes: 100 + permissions: + id-token: write + contents: read + env: + KERNEL: LATEST + REPO_ROOT: ${{ github.workspace }} + REPO_PATH: "" + KBUILD_OUTPUT: kbuild-output/ + ARCH_AND_TOOL: ${{ inputs.arch }}-${{ inputs.toolchain_full }} + VERISTAT_DUMP_LOG_ON_FAILURE: 'true' + VERISTAT_TARGET: kernel + + steps: + + - uses: actions/checkout@v4 + with: + sparse-checkout: | + .github + ci + + - uses: actions/download-artifact@v4 + with: + name: vmlinux-${{ env.ARCH_AND_TOOL }} + path: . + + - name: Untar artifacts + run: zstd -d -T0 vmlinux-${{ env.ARCH_AND_TOOL }}.tar.zst --stdout | tar -xf - + + - name: Run veristat + uses: libbpf/ci/run-vmtest@v3 + with: + arch: x86_64 + vmlinuz: '${{ github.workspace }}/vmlinuz' + kernel-root: '.' + max-cpu: 8 + kernel-test: 'run_veristat' + output-dir: '${{ github.workspace }}' + + - name: Compare and save veristat.kernel.csv + uses: ./.github/actions/veristat_baseline_compare + with: + veristat_output: veristat-kernel + baseline_name: ${{ env.ARCH_AND_TOOL}}-baseline-veristat-kernel diff --git a/.github/workflows/veristat-meta.yml b/.github/workflows/veristat-meta.yml new file mode 100644 index 0000000000000..675127d322491 --- /dev/null +++ b/.github/workflows/veristat-meta.yml @@ -0,0 +1,88 @@ +name: veristat_meta + +on: + workflow_call: + inputs: + arch: + required: true + type: string + description: The architecture to build against, e.g x86_64, aarch64, s390x... + toolchain_full: + required: true + type: string + description: Toolchain identifier, such as llvm-20 + runs_on: + required: true + type: string + description: The runners to run the test on. This is a json string representing an array of labels. + aws_region: + required: true + type: string + description: The AWS region where we pull bpf objects to run against veristat. + secrets: + AWS_ROLE_ARN: + required: true + description: The AWS role used by GH to pull BPF objects from AWS. + +jobs: + veristat: + name: veristat-meta + runs-on: ${{ fromJSON(inputs.runs_on) }} + timeout-minutes: 100 + permissions: + id-token: write + contents: read + env: + KERNEL: LATEST + REPO_ROOT: ${{ github.workspace }} + REPO_PATH: "" + KBUILD_OUTPUT: kbuild-output/ + ARCH_AND_TOOL: ${{ inputs.arch }}-${{ inputs.toolchain_full }} + VERISTAT_TARGET: meta + + steps: + + - uses: actions/checkout@v4 + with: + sparse-checkout: | + .github + ci + + - uses: actions/download-artifact@v4 + with: + name: vmlinux-${{ env.ARCH_AND_TOOL }} + path: . + + - name: Untar artifacts + run: zstd -d -T0 vmlinux-${{ env.ARCH_AND_TOOL }}.tar.zst --stdout | tar -xf - + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v3 + with: + aws-region: ${{ inputs.aws_region }} + role-to-assume: ${{ secrets.AWS_ROLE_ARN }} + role-session-name: github-action-bpf-ci + + - name: Download BPF objects + run: | + mkdir ./bpf_objects + aws s3 sync s3://veristat-bpf-binaries ./bpf_objects + env: + AWS_ROLE_ARN: ${{ secrets.AWS_ROLE_ARN }} + + - name: Run veristat + uses: libbpf/ci/run-vmtest@v3 + with: + arch: x86_64 + vmlinuz: '${{ github.workspace }}/vmlinuz' + kernel-root: '.' + max-cpu: 8 + kernel-test: 'run_veristat' + output-dir: '${{ github.workspace }}' + + - name: Compare and save veristat.meta.csv + uses: ./.github/actions/veristat_baseline_compare + with: + veristat_output: veristat-meta + baseline_name: ${{ env.ARCH_AND_TOOL}}-baseline-veristat-meta + diff --git a/.github/workflows/veristat-scx.yml b/.github/workflows/veristat-scx.yml new file mode 100644 index 0000000000000..641c142e58274 --- /dev/null +++ b/.github/workflows/veristat-scx.yml @@ -0,0 +1,103 @@ +name: veristat_kernel + +on: + workflow_call: + inputs: + arch: + required: true + type: string + description: The architecture to build against, e.g x86_64, aarch64, s390x... + toolchain_full: + required: true + type: string + description: Toolchain identifier, such as llvm-20 + runs_on: + required: true + type: string + description: The runners to run the test on. This is a json string representing an array of labels. + llvm_version: + required: true + type: string + +jobs: + + build-scheds: + name: build sched-ext/scx + runs-on: ${{ fromJSON(inputs.runs_on) }} + env: + LLVM_VERSION: ${{ inputs.llvm_version }} + SCX_BUILD_OUTPUT: ${{ github.workspace }}/scx-build-output + SCX_PROGS: ${{ github.workspace }}/scx-progs + SCX_REVISION: 737b9b13ccd15e44efb8cdba507f89e595ad3af6 + steps: + - uses: actions/checkout@v4 + with: + sparse-checkout: | + .github + ci + - uses: libbpf/ci/build-scx-scheds@v3 + with: + output-dir: ${{ env.SCX_BUILD_OUTPUT }} + - name: Collect scx progs + run: ${{ github.workspace }}/.github/scripts/collect-scx-bpf-progs.sh ${{ env.SCX_PROGS }} + - name: Upload scx progs + uses: actions/upload-artifact@v4 + with: + name: scx-progs-${{ inputs.arch }}-${{ inputs.toolchain_full }} + if-no-files-found: error + path: ${{ env.SCX_PROGS }} + + veristat: + name: veristat-scx + runs-on: ${{ fromJSON(inputs.runs_on) }} + needs: [build-scheds] + permissions: + id-token: write + contents: read + env: + KERNEL: LATEST + REPO_ROOT: ${{ github.workspace }} + REPO_PATH: "" + KBUILD_OUTPUT: kbuild-output/ + ARCH_AND_TOOL: ${{ inputs.arch }}-${{ inputs.toolchain_full }} + VERISTAT_DUMP_LOG_ON_FAILURE: 'true' + VERISTAT_TARGET: scx + SCX_PROGS: ${{ github.workspace }}/scx-progs + + steps: + + - uses: actions/checkout@v4 + with: + sparse-checkout: | + .github + ci + + - name: Download kernel build artifacts + uses: actions/download-artifact@v4 + with: + name: vmlinux-${{ env.ARCH_AND_TOOL }} + path: . + + - name: Untar kernel build artifacts + run: zstd -d -T0 vmlinux-${{ env.ARCH_AND_TOOL }}.tar.zst --stdout | tar -xf - + + - name: Download scx progs + uses: actions/download-artifact@v4 + with: + name: scx-progs-${{ inputs.arch }}-${{ inputs.toolchain_full }} + path: ${{ env.SCX_PROGS }} + + - name: Run veristat + uses: libbpf/ci/run-vmtest@v3 + with: + arch: x86_64 + vmlinuz: '${{ github.workspace }}/vmlinuz' + kernel-root: '.' + kernel-test: 'run_veristat' + output-dir: '${{ github.workspace }}' + + - name: Compare and save veristat.scx.csv + uses: ./.github/actions/veristat_baseline_compare + with: + veristat_output: veristat-scx + baseline_name: ${{ env.ARCH_AND_TOOL}}-baseline-veristat-scx diff --git a/README.md b/README.md new file mode 100644 index 0000000000000..81a0c1a644b0f --- /dev/null +++ b/README.md @@ -0,0 +1,22 @@ +# BPF CI GitHub Actions worfklows + +This repository contains GitHub Actions workflow definitions, scripts and configuration files used by those workflows. + +You can check the workflow runs on [kernel-patches/bpf actions page](https://github.com/kernel-patches/bpf/actions/workflows/test.yml). + +**"BPF CI"** refers to a continuous integration testing system targeting [BPF subsystem of the Linux Kernel](https://ebpf.io/what-is-ebpf/). + +BPF CI consists of a number of components: +- [kernel-patches/bpf](https://github.com/kernel-patches/bpf) - a copy of Linux Kernel source repository tracking [upstream bpf trees](https://web.git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git/) +- [Kernel Patches Daemon](https://github.com/kernel-patches/kernel-patches-daemon) instance - a service connecting [Patchwork](https://patchwork.kernel.org/project/netdevbpf/list/) with the GitHub repository +- [kernel-patches/vmtest](https://github.com/kernel-patches/vmtest) (this repository) - GitHub Actions workflows +- [libbpf/ci](https://github.com/libbpf/ci) - custom reusable GitHub Actions +- [kernel-patches/runner](https://github.com/kernel-patches/runner) - self-hosted GitHub Actions runners + +Of course BPF CI also has important dependencies such as: +- [selftests/bpf](https://web.git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git/tree/tools/testing/selftests/bpf) - the main test suite of BPF CI +- [selftests/sched_ext](https://web.git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git/tree/tools/testing/selftests/sched_ext) - in-kernel sched_ext test suite +- [veristat](https://web.git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git/tree/tools/testing/selftests/bpf/veristat.c) - used to catch performance and BPF verification regressions on a suite of complex BPF programs +- [vmtest](https://github.com/danobi/vmtest) - a QEMU wrapper, used to execute tests in a VM +- [GCC BPF backend](https://gcc.gnu.org/wiki/BPFBackEnd) +- Above-mentioned [Patchwork](https://patchwork.kernel.org/) instance, maintained by the Linux Foundation diff --git a/ci/claude/README.md b/ci/claude/README.md new file mode 100644 index 0000000000000..669b942d0c15e --- /dev/null +++ b/ci/claude/README.md @@ -0,0 +1,67 @@ +# AI Code Reviews in BPF CI + +## TL;DR +- **Please make sure AI is actually wrong before dismissing the review** + - An email response explaining why AI is wrong would be very helpful +- BPF CI includes [a workflow](https://github.com/kernel-patches/vmtest/blob/master/.github/workflows/ai-code-review.yml) running AI code review +- The reviews are posted as comments on [kernel-patches/bpf PRs](https://github.com/kernel-patches/bpf/pulls) +- The review comments are forwarded to the patch recipients via email by [KPD](https://github.com/kernel-patches/kernel-patches-daemon) +- Prompts are here: https://github.com/masoncl/review-prompts + +If you received an AI review for your patch submission, please try to evaluate it in the same way you would if it was written by a person, and respond. +Your response is for humans, not for AI. + +## How does it work? + +BPF CI is processing every patch series submitted to the [Linux Kernel BPF mailing list](https://lore.kernel.org/bpf/). +Against each patch the system executes various tests, such as [selftests/bpf](https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git/tree/tools/testing/selftests/bpf), and since recently it also executes automated code reviews performed by LLM-based AI. + +BPF CI runs on [Github Actions](https://docs.github.com/en/actions) workflows orchestrated by [KPD](https://github.com/kernel-patches/kernel-patches-daemon). + +The AI review is implemented with [Claude Code GitHub Action](https://github.com/anthropics/claude-code-action), which essentially installs Claude Code command-line app and a MCP server with a number of common tools available to it. + +LLMs are accessed via [AWS Bedrock](https://aws.amazon.com/bedrock), the GitHub Actions workflow authenticates to AWS account with [OIDC](https://docs.github.com/en/actions/how-tos/secure-your-work/security-harden-deployments/oidc-in-aws). + +To achieve the output that might have led you to this page, [a set of elaborate prompts](https://github.com/masoncl/review-prompts) were developed specifically targeting the Linux Kernel source code. +The workflow checks out the Linux and prompts repository and initiates the review with a trivial [trigger prompt](https://github.com/kernel-patches/vmtest/blob/master/.github/workflows/ai-code-review.yml#L91-L94). + +### Are the reviews even accurate? + +We make every effort for AI reviews to be high-signal messages. Although the nature of LLMs makes them prone to mistakes. + +At this point this is still an experiment, but the results so far have been promising. +For example, AI is pretty good at catching dumb mistakes (e.g. use-after-free) that humans can easily miss. +At the same time AI can miss context obvious to a human, such as relationships between newer and older changes. + +If you'd like to suggest an improvement to the prompts, open a PR to [review-prompts](https://github.com/masoncl/review-prompts) repository. + +### Will my patch get nacked because of the AI review? + +Paraphrasing IBM training manual: +> "A LLM can never be held accountable, therefore a LLM must never make an Ack/Nack decision" + +The review prompts are designed such that AI is only searching for the regressions it can provide evidence for. +For the majority of patches a review is not generated, so if you received one it's worth evaluating. + +It's unlikely that your patch gets discarded *just* because AI found something, especially if you address it or explain why AI is wrong. + +But if you ignore an AI review, human reviewers will likely ask for a reason. + +### What if I don't like it? + +Bring it up with the maintainers on the mailing list and elaborate. + +It is expected that AI may be mistaken. However it is also expected that the patch authors answer reasonable questions about the code changes they propose. + +If there is a technical issue (say with email notifications, formatting etc.), open an issue in [this repository](https://github.com/kernel-patches/vmtest/issues). + +### Who pays for the tokens? + +[Meta Platforms, Inc.](https://www.meta.com/) + +BPF CI in its current form has been developed and maintained by the Linux Kernel team at Meta. Most of the relevant hardware is also provided by Meta. + +### Who set this up? + +- [Chris Mason](https://github.com/masoncl) is the prompt engineer +- [Ihor Solodrai](https://github.com/theihor) is the infra plumber diff --git a/ci/claude/post-pr-comment.js b/ci/claude/post-pr-comment.js new file mode 100644 index 0000000000000..218b7709c26c5 --- /dev/null +++ b/ci/claude/post-pr-comment.js @@ -0,0 +1,33 @@ +module.exports = async ({github, context}) => { + const fs = require('fs'); + + const jobSummaryUrl = `${process.env.GITHUB_SERVER_URL}/${process.env.GITHUB_REPOSITORY}/actions/runs/${process.env.GITHUB_RUN_ID}`; + const reviewContent = fs.readFileSync(process.env.REVIEW_FILE, 'utf8'); + const subject = process.env.PATCH_SUBJECT || 'Could not determine patch subject'; + const commentBody = ` +\`\`\` +${reviewContent} +\`\`\` + +--- +AI reviewed your patch. Please fix the bug or email reply why it's not a bug. +See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md + +In-Reply-To-Subject: \`${subject}\` +CI run summary: ${jobSummaryUrl} +`; + + await github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: commentBody + }); + + await github.rest.issues.addLabels({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + labels: ["ai-review"], + }); +}; diff --git a/ci/claude/settings.json b/ci/claude/settings.json new file mode 100644 index 0000000000000..06717188a9631 --- /dev/null +++ b/ci/claude/settings.json @@ -0,0 +1,7 @@ +{ + "model": "us.anthropic.claude-sonnet-4-5-20250929-v1:0", + "permissions": { + "allow": ["Bash", "Edit", "MultiEdit", "Write"], + "defaultMode": "acceptEdits" + } +} diff --git a/ci/diffs/.keep b/ci/diffs/.keep new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/ci/diffs/0001-selftests-bpf-disable-detection-of-llvm-when-buildin.patch b/ci/diffs/0001-selftests-bpf-disable-detection-of-llvm-when-buildin.patch new file mode 100644 index 0000000000000..6497a6cc38c90 --- /dev/null +++ b/ci/diffs/0001-selftests-bpf-disable-detection-of-llvm-when-buildin.patch @@ -0,0 +1,41 @@ +From 42839864a62ee244ec280b09149b1cb439f681db Mon Sep 17 00:00:00 2001 +From: Manu Bretelle +Date: Fri, 27 Oct 2023 18:25:39 -0700 +Subject: [PATCH bpf-next] selftests/bpf: disable detection of llvm when + building bpftool + +The VMs in which we run the selftests do not have llvm installed. +We build selftests/bpftool in a host that have llvm. +bpftool currently will use llvm first and fallback to libbfd but there +is no way to disable detection from the command line. + +Removing it from the feature detection should force us to use libbfd. + +Signed-off-by: Manu Bretelle +--- + tools/bpf/bpftool/Makefile | 2 -- + 1 file changed, 2 deletions(-) + +diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile +index e9154ace80ff..01314458e25e 100644 +--- a/tools/bpf/bpftool/Makefile ++++ b/tools/bpf/bpftool/Makefile +@@ -95,7 +95,6 @@ RM ?= rm -f + FEATURE_USER = .bpftool + + FEATURE_TESTS := clang-bpf-co-re +-FEATURE_TESTS += llvm + FEATURE_TESTS += libcap + FEATURE_TESTS += libbfd + FEATURE_TESTS += libbfd-liberty +@@ -104,7 +103,6 @@ FEATURE_TESTS += disassembler-four-args + FEATURE_TESTS += disassembler-init-styled + + FEATURE_DISPLAY := clang-bpf-co-re +-FEATURE_DISPLAY += llvm + FEATURE_DISPLAY += libcap + FEATURE_DISPLAY += libbfd + FEATURE_DISPLAY += libbfd-liberty +-- +2.39.3 + diff --git a/ci/diffs/0001-selftests-bpf-work-around-latest-Clang-smartness.patch b/ci/diffs/0001-selftests-bpf-work-around-latest-Clang-smartness.patch new file mode 100644 index 0000000000000..ec1e29a8ab974 --- /dev/null +++ b/ci/diffs/0001-selftests-bpf-work-around-latest-Clang-smartness.patch @@ -0,0 +1,31 @@ +From d31a7125891994681503770cff46a119692fb2b9 Mon Sep 17 00:00:00 2001 +From: Andrii Nakryiko +Date: Mon, 11 Dec 2023 17:09:38 -0800 +Subject: [PATCH 1/1] selftests/bpf: work around latest Clang smartness + +Work around the issue while we deal with it in the Clang itself. +See [0]. + + [0] https://github.com/llvm/llvm-project/pull/73662#issuecomment-1849281758 + +Signed-off-by: Andrii Nakryiko +--- + tools/testing/selftests/bpf/progs/iters.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c +index 3aca3dc145b5..929ba6fa2105 100644 +--- a/tools/testing/selftests/bpf/progs/iters.c ++++ b/tools/testing/selftests/bpf/progs/iters.c +@@ -1420,7 +1420,7 @@ SEC("raw_tp") + __success + int iter_arr_with_actual_elem_count(const void *ctx) + { +- int i, n = loop_data.n, sum = 0; ++ unsigned i, n = loop_data.n, sum = 0; + + if (n > ARRAY_SIZE(loop_data.data)) + return 0; +-- +2.34.1 + diff --git a/ci/diffs/20251014-selftests-arg_parsing-Ensure-data-is-flushed-to-.patch b/ci/diffs/20251014-selftests-arg_parsing-Ensure-data-is-flushed-to-.patch new file mode 100644 index 0000000000000..efcdbeed208fc --- /dev/null +++ b/ci/diffs/20251014-selftests-arg_parsing-Ensure-data-is-flushed-to-.patch @@ -0,0 +1,33 @@ +From 423112d2e9b591999efa4ad74000f8f6f3f381ea Mon Sep 17 00:00:00 2001 +From: Xing Guo +Date: Tue, 14 Oct 2025 16:03:23 +0800 +Subject: [PATCH 20251015/20251015] selftests: arg_parsing: Ensure data is + flushed to disk before reading. + +Recently, I noticed a selftest failure in my local environment. The +test_parse_test_list_file writes some data to +/tmp/bpf_arg_parsing_test.XXXXXX and parse_test_list_file() will read +the data back. However, after writing data to that file, we forget to +call fsync() and it's causing testing failure in my laptop. This patch +helps fix it by adding the missing fsync() call. + +Signed-off-by: Xing Guo +--- + tools/testing/selftests/bpf/prog_tests/arg_parsing.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/tools/testing/selftests/bpf/prog_tests/arg_parsing.c b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c +index fbf0d9c2f58b..d9fcbfb72aaa 100644 +--- a/tools/testing/selftests/bpf/prog_tests/arg_parsing.c ++++ b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c +@@ -140,6 +140,7 @@ static void test_parse_test_list_file(void) + fprintf(fp, "testA/subtest2\n"); + fprintf(fp, "testC_no_eof_newline"); + fflush(fp); ++ fsync(fd); + + if (!ASSERT_OK(ferror(fp), "prepare tmp")) + goto out_fclose; +-- +2.51.0 + diff --git a/ci/diffs/20251014-selftests-bpf-make-arg_parsing.c-more-robust-to-.patch b/ci/diffs/20251014-selftests-bpf-make-arg_parsing.c-more-robust-to-.patch new file mode 100644 index 0000000000000..34104b3fc9ad8 --- /dev/null +++ b/ci/diffs/20251014-selftests-bpf-make-arg_parsing.c-more-robust-to-.patch @@ -0,0 +1,56 @@ +From 8a03969566c5447aa72469e8f09b8158e3dad8f9 Mon Sep 17 00:00:00 2001 +From: Andrii Nakryiko +Date: Tue, 14 Oct 2025 13:20:37 -0700 +Subject: [PATCH 20251014/20251015] selftests/bpf: make arg_parsing.c more + robust to crashes + +We started getting a crash in BPF CI, which seems to originate from +test_parse_test_list_file() test and is happening at this line: + + ASSERT_OK(strcmp("test_with_spaces", set.tests[0].name), "test 0 name"); + +One way we can crash there is if set.cnt zero, which is checked for with +ASSERT_EQ() above, but we proceed after this regardless of the outcome. +Instead of crashing, we should bail out with test failure early. + +Similarly, if parse_test_list_file() fails, we shouldn't be even looking +at set, so bail even earlier if ASSERT_OK() fails. + +Fixes: 64276f01dce8 ("selftests/bpf: Test_progs can read test lists from file") +Signed-off-by: Andrii Nakryiko +--- + tools/testing/selftests/bpf/prog_tests/arg_parsing.c | 9 ++++++--- + 1 file changed, 6 insertions(+), 3 deletions(-) + +diff --git a/tools/testing/selftests/bpf/prog_tests/arg_parsing.c b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c +index bb143de68875..fbf0d9c2f58b 100644 +--- a/tools/testing/selftests/bpf/prog_tests/arg_parsing.c ++++ b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c +@@ -146,9 +146,12 @@ static void test_parse_test_list_file(void) + + init_test_filter_set(&set); + +- ASSERT_OK(parse_test_list_file(tmpfile, &set, true), "parse file"); ++ if (!ASSERT_OK(parse_test_list_file(tmpfile, &set, true), "parse file")) ++ goto out_fclose; ++ ++ if (!ASSERT_EQ(set.cnt, 4, "test count")) ++ goto out_free_set; + +- ASSERT_EQ(set.cnt, 4, "test count"); + ASSERT_OK(strcmp("test_with_spaces", set.tests[0].name), "test 0 name"); + ASSERT_EQ(set.tests[0].subtest_cnt, 0, "test 0 subtest count"); + ASSERT_OK(strcmp("testA", set.tests[1].name), "test 1 name"); +@@ -158,8 +161,8 @@ static void test_parse_test_list_file(void) + ASSERT_OK(strcmp("testB", set.tests[2].name), "test 2 name"); + ASSERT_OK(strcmp("testC_no_eof_newline", set.tests[3].name), "test 3 name"); + ++out_free_set: + free_test_filter_set(&set); +- + out_fclose: + fclose(fp); + out_remove: +-- +2.51.0 + diff --git a/ci/vmtest/configs/DENYLIST b/ci/vmtest/configs/DENYLIST new file mode 100644 index 0000000000000..20a090295a607 --- /dev/null +++ b/ci/vmtest/configs/DENYLIST @@ -0,0 +1,17 @@ +# TEMPORARY +btf_dump/btf_dump: syntax +kprobe_multi_bench_attach +core_reloc/enum64val +core_reloc/size___diff_sz +core_reloc/type_based___diff_sz +test_ima # All of CI is broken on it following 6.3-rc1 merge +lwt_reroute # crashes kernel after netnext merge from 2ab1efad60ad "net/sched: cls_api: complement tcf_tfilter_dump_policy" +tc_links_ingress # started failing after net-next merge from 2ab1efad60ad "net/sched: cls_api: complement tcf_tfilter_dump_policy" +xdp_bonding/xdp_bonding_features # started failing after net merge from 359e54a93ab4 "l2tp: pass correct message length to ip6_append_data" +tc_redirect/tc_redirect_dtime # uapi breakage after net-next commit 885c36e59f46 ("net: Re-use and set mono_delivery_time bit for userspace tstamp packets") +migrate_reuseport/IPv4 TCP_NEW_SYN_RECV reqsk_timer_handler # flaky, under investigation +migrate_reuseport/IPv6 TCP_NEW_SYN_RECV reqsk_timer_handler # flaky, under investigation +connect_force_port # unreliably fails +sockmap_ktls/sockmap_ktls disconnect_after_delete* # https://lore.kernel.org/bpf/20250415163332.1836826-1-ihor.solodrai@linux.dev/ +verif_scale_pyperf600 # llvm 20 generates code that fails verification +arena_spin_lock # llvm 20 generates code that fails verification diff --git a/ci/vmtest/configs/DENYLIST.aarch64 b/ci/vmtest/configs/DENYLIST.aarch64 new file mode 100644 index 0000000000000..bdce99f3855ec --- /dev/null +++ b/ci/vmtest/configs/DENYLIST.aarch64 @@ -0,0 +1,5 @@ +cgrp_local_storage # libbpf: prog 'update_cookie_tracing': failed to attach: ERROR: strerror_r(-524)=22 +core_reloc_btfgen # run_core_reloc_tests:FAIL:run_btfgen unexpected error: 32512 (errno 22) +usdt/multispec # usdt_300_bad_attach unexpected pointer: 0x558c63d8f0 +xdp_bonding # whole test suite is very unstable on aarch64 +res_spin_lock_success # flaky diff --git a/ci/vmtest/configs/DENYLIST.rc b/ci/vmtest/configs/DENYLIST.rc new file mode 100644 index 0000000000000..8aa33e6b71443 --- /dev/null +++ b/ci/vmtest/configs/DENYLIST.rc @@ -0,0 +1,3 @@ +send_signal/send_signal_nmi # PMU events configure correctly but don't trigger NMI's for some reason (AMD nested virt) +send_signal/send_signal_nmi_thread # Same as above +token/obj_priv_implicit_token_envvar # Unknown root cause, but reliably fails diff --git a/ci/vmtest/configs/DENYLIST.s390x b/ci/vmtest/configs/DENYLIST.s390x new file mode 100644 index 0000000000000..9b90b615aea55 --- /dev/null +++ b/ci/vmtest/configs/DENYLIST.s390x @@ -0,0 +1,11 @@ +deny_namespace # not yet in bpf denylist +tc_redirect/tc_redirect_dtime # very flaky +lru_bug # not yet in bpf-next denylist +# Disabled temporarily for a crash. +# https://lore.kernel.org/bpf/c9923c1d-971d-4022-8dc8-1364e929d34c@gmail.com/ +dummy_st_ops/dummy_init_ptr_arg +fexit_bpf2bpf +tailcalls +trace_ext +xdp_bpf2bpf +xdp_metadata diff --git a/ci/vmtest/configs/DENYLIST.test_progs-bpf_gcc b/ci/vmtest/configs/DENYLIST.test_progs-bpf_gcc new file mode 100644 index 0000000000000..a3c745d1f5b52 --- /dev/null +++ b/ci/vmtest/configs/DENYLIST.test_progs-bpf_gcc @@ -0,0 +1,904 @@ +arena_htab +async_stack_depth +bad_struct_ops/invalid_prog_reuse +bpf_cookie +bpf_iter/bpf_hash_map +bpf_iter/ksym +bpf_iter/tcp4 +bpf_iter/tcp6 +bpf_iter/udp4 +bpf_iter/udp6 +bpf_iter/unix +bpf_iter_setsockopt +bpf_iter_setsockopt_unix +bpf_mod_race +bpf_nf/tc-bpf-ct +bpf_nf/xdp-ct +bpf_tcp_ca/cubic +btf_dump/btf_dump: bitfields +btf_dump/btf_dump: packing +btf_dump/btf_dump: padding +btf_dump/btf_dump: syntax +btf_map_in_map +cb_refs +cgroup_get_current_cgroup_id +cgroup_iter/cgroup_iter__self_only_css_task +cgroup_tcp_skb +cgrp_kfunc +cls_redirect/cls_redirect_dynptr +connect_force_port +core_autosize +core_read_macros +core_reloc/type_id +core_reloc/type_id___missing_targets +core_reloc_btfgen/type_id +core_reloc_btfgen/type_id___missing_targets +cpumask/test_acquire_wrong_cpumask +cpumask/test_alloc_double_release +cpumask/test_alloc_free_cpumask +cpumask/test_alloc_no_release +cpumask/test_and_or_xor +cpumask/test_copy_any_anyand +cpumask/test_cpumask_null +cpumask/test_cpumask_weight +cpumask/test_first_firstzero_cpu +cpumask/test_firstand_nocpu +cpumask/test_global_mask_array_l2_rcu +cpumask/test_global_mask_array_one_rcu +cpumask/test_global_mask_array_rcu +cpumask/test_global_mask_nested_deep_array_rcu +cpumask/test_global_mask_nested_deep_rcu +cpumask/test_global_mask_nested_rcu +cpumask/test_global_mask_no_null_check +cpumask/test_global_mask_out_of_rcu +cpumask/test_global_mask_rcu +cpumask/test_global_mask_rcu_no_null_check +cpumask/test_insert_leave +cpumask/test_insert_remove_no_release +cpumask/test_insert_remove_release +cpumask/test_intersects_subset +cpumask/test_invalid_nested_array +cpumask/test_mutate_cpumask +cpumask/test_set_clear_cpu +cpumask/test_setall_clear_cpu +cpumask/test_test_and_set_clear +crypto_basic/crypto_acquire +crypto_sanity +deny_namespace +dummy_st_ops/test_unsupported_field_sleepable +dynptr/add_dynptr_to_map1 +dynptr/add_dynptr_to_map2 +dynptr/clone_invalid1 +dynptr/clone_invalid2 +dynptr/clone_invalidate1 +dynptr/clone_invalidate2 +dynptr/clone_invalidate3 +dynptr/clone_invalidate4 +dynptr/clone_invalidate5 +dynptr/clone_invalidate6 +dynptr/clone_skb_packet_data +dynptr/clone_xdp_packet_data +dynptr/data_slice_missing_null_check1 +dynptr/data_slice_missing_null_check2 +dynptr/data_slice_out_of_bounds_map_value +dynptr/data_slice_out_of_bounds_ringbuf +dynptr/data_slice_out_of_bounds_skb +dynptr/data_slice_use_after_release1 +dynptr/data_slice_use_after_release2 +dynptr/dynptr_adjust_invalid +dynptr/dynptr_from_mem_invalid_api +dynptr/dynptr_invalidate_slice_failure +dynptr/dynptr_invalidate_slice_or_null +dynptr/dynptr_invalidate_slice_reinit +dynptr/dynptr_is_null_invalid +dynptr/dynptr_is_rdonly_invalid +dynptr/dynptr_overwrite_ref +dynptr/dynptr_partial_slot_invalidate +dynptr/dynptr_pruning_overwrite +dynptr/dynptr_pruning_type_confusion +dynptr/dynptr_read_into_slot +dynptr/dynptr_size_invalid +dynptr/dynptr_slice_var_len1 +dynptr/dynptr_slice_var_len2 +dynptr/dynptr_var_off_overwrite +dynptr/global +dynptr/invalid_data_slices +dynptr/invalid_helper1 +dynptr/invalid_helper2 +dynptr/invalid_offset +dynptr/invalid_read1 +dynptr/invalid_read2 +dynptr/invalid_read3 +dynptr/invalid_read4 +dynptr/invalid_slice_rdwr_rdonly +dynptr/invalid_write1 +dynptr/invalid_write2 +dynptr/invalid_write3 +dynptr/invalid_write4 +dynptr/release_twice +dynptr/release_twice_callback +dynptr/ringbuf_invalid_api +dynptr/ringbuf_missing_release1 +dynptr/ringbuf_missing_release2 +dynptr/ringbuf_missing_release_callback +dynptr/ringbuf_release_uninit_dynptr +dynptr/skb_invalid_ctx +dynptr/skb_invalid_ctx_fentry +dynptr/skb_invalid_ctx_fexit +dynptr/skb_invalid_data_slice1 +dynptr/skb_invalid_data_slice2 +dynptr/skb_invalid_data_slice3 +dynptr/skb_invalid_data_slice4 +dynptr/skb_invalid_slice_write +dynptr/test_dynptr_reg_type +dynptr/test_dynptr_skb_no_buff +dynptr/test_dynptr_skb_small_buff +dynptr/test_dynptr_skb_tp_btf +dynptr/test_read_write +dynptr/uninit_write_into_slot +dynptr/use_after_invalid +dynptr/xdp_invalid_ctx +dynptr/xdp_invalid_data_slice1 +dynptr/xdp_invalid_data_slice2 +exceptions/check_assert_eq_int_max +exceptions/check_assert_eq_int_min +exceptions/check_assert_eq_llong_max +exceptions/check_assert_eq_llong_min +exceptions/check_assert_eq_zero +exceptions/check_assert_ge_neg +exceptions/check_assert_ge_pos +exceptions/check_assert_ge_zero +exceptions/check_assert_generic +exceptions/check_assert_gt_neg +exceptions/check_assert_gt_pos +exceptions/check_assert_gt_zero +exceptions/check_assert_le_neg +exceptions/check_assert_le_pos +exceptions/check_assert_le_zero +exceptions/check_assert_lt_neg +exceptions/check_assert_lt_pos +exceptions/check_assert_lt_zero +exceptions/check_assert_range_s64 +exceptions/check_assert_range_u64 +exceptions/check_assert_single_range_s64 +exceptions/check_assert_single_range_u64 +exceptions/check_assert_with_return +exceptions/exception_ext +exceptions/exception_ext_mod_cb_runtime +exceptions/non-throwing extension -> non-throwing subprog +exceptions/non-throwing extension -> throwing global subprog +exceptions/non-throwing fentry -> exception_cb +exceptions/non-throwing fexit -> exception_cb +exceptions/non-throwing fmod_ret -> non-throwing global subprog +exceptions/reject_async_callback_throw +exceptions/reject_exception_throw_cb +exceptions/reject_exception_throw_cb_diff +exceptions/reject_set_exception_cb_bad_ret2 +exceptions/reject_subprog_with_lock +exceptions/reject_subprog_with_rcu_read_lock +exceptions/reject_with_cb +exceptions/reject_with_cb_reference +exceptions/reject_with_lock +exceptions/reject_with_rbtree_add_throw +exceptions/reject_with_rcu_read_lock +exceptions/reject_with_reference +exceptions/reject_with_subprog_reference +exceptions/throwing extension (with custom cb) -> exception_cb +exceptions/throwing extension -> global func in exception_cb +exceptions/throwing extension -> non-throwing global subprog +exceptions/throwing extension -> throwing global subprog +exceptions/throwing fentry -> exception_cb +exceptions/throwing fexit -> exception_cb +failures_wq +fexit_bpf2bpf/fmod_ret_freplace +fexit_bpf2bpf/func_replace +fexit_bpf2bpf/func_replace_global_func +fexit_bpf2bpf/func_replace_multi +fexit_bpf2bpf/func_sockmap_update +fexit_bpf2bpf/target_yes_callees +global_func_dead_code +global_map_resize +inner_array_lookup +irq/irq_flag_overwrite +irq/irq_flag_overwrite_partial +irq/irq_global_subprog +irq/irq_ooo_refs_array +irq/irq_restore_4_subprog +irq/irq_restore_bad_arg +irq/irq_restore_invalid +irq/irq_restore_iter +irq/irq_restore_missing_1_subprog +irq/irq_restore_missing_2 +irq/irq_restore_missing_2_subprog +irq/irq_restore_missing_3 +irq/irq_restore_missing_3_minus_2 +irq/irq_restore_missing_3_minus_2_subprog +irq/irq_restore_missing_3_subprog +irq/irq_restore_ooo +irq/irq_restore_ooo_3 +irq/irq_restore_ooo_3_subprog +irq/irq_save_bad_arg +irq/irq_save_invalid +irq/irq_save_iter +irq/irq_sleepable_helper +irq/irq_sleepable_kfunc +iters/compromise_iter_w_direct_write_and_skip_destroy_fail +iters/compromise_iter_w_direct_write_fail +iters/compromise_iter_w_helper_write_fail +iters/create_and_forget_to_destroy_fail +iters/css_task +iters/delayed_precision_mark +iters/delayed_read_mark +iters/destroy_without_creating_fail +iters/double_create_fail +iters/double_destroy_fail +iters/iter_css_lock_and_unlock +iters/iter_css_task_for_each +iters/iter_css_without_lock +iters/iter_destroy_bad_arg +iters/iter_err_too_permissive1 +iters/iter_err_too_permissive2 +iters/iter_err_too_permissive3 +iters/iter_err_unsafe_asm_loop +iters/iter_err_unsafe_c_loop +iters/iter_nested_iters +iters/iter_new_bad_arg +iters/iter_next_bad_arg +iters/iter_next_ptr_mem_not_trusted +iters/iter_next_rcu_not_trusted +iters/iter_next_rcu_or_null +iters/iter_next_trusted_or_null +iters/iter_obfuscate_counter +iters/iter_subprog_iters +iters/iter_tasks_lock_and_unlock +iters/iter_tasks_without_lock +iters/leak_iter_from_subprog_fail +iters/loop_state_deps1 +iters/loop_state_deps2 +iters/missing_null_check_fail +iters/next_after_destroy_fail +iters/next_without_new_fail +iters/read_from_iter_slot_fail +iters/stacksafe_should_not_conflate_stack_spill_and_iter +iters/testmod_seq_getter_after_bad +iters/testmod_seq_getter_before_bad +iters/wrong_sized_read_fail +jeq_infer_not_null +jit_probe_mem +kfree_skb +kfunc_call/kfunc_call_ctx +kfunc_call/kfunc_call_test1 +kfunc_call/kfunc_call_test2 +kfunc_call/kfunc_call_test4 +kfunc_call/kfunc_call_test_get_mem +kfunc_call/kfunc_call_test_ref_btf_id +kfunc_call/kfunc_call_test_static_unused_arg +kfunc_call/kfunc_syscall_test +kfunc_call/kfunc_syscall_test_null +kfunc_dynptr_param/not_ptr_to_stack +kfunc_dynptr_param/not_valid_dynptr +kfunc_param_nullable/kfunc_dynptr_nullable_test3 +kprobe_multi_test/kprobe_session_return_2 +kptr_xchg_inline +l4lb_all/l4lb_noinline +l4lb_all/l4lb_noinline_dynptr +linked_list +local_kptr_stash/drop_rb_node_off +local_kptr_stash/local_kptr_stash_local_with_root +local_kptr_stash/local_kptr_stash_plain +local_kptr_stash/local_kptr_stash_simple +local_kptr_stash/local_kptr_stash_unstash +local_kptr_stash/refcount_acquire_without_unstash +local_kptr_stash/stash_rb_nodes +log_buf/obj_load_log_buf +log_fixup/bad_core_relo_subprog +log_fixup/bad_core_relo_trunc_full +lru_bug +map_btf +map_in_map/acc_map_in_array +map_in_map/acc_map_in_htab +map_in_map/sleepable_acc_map_in_array +map_in_map/sleepable_acc_map_in_htab +map_kptr/correct_btf_id_check_size +map_kptr/inherit_untrusted_on_walk +map_kptr/kptr_xchg_possibly_null +map_kptr/kptr_xchg_ref_state +map_kptr/mark_ref_as_untrusted_or_null +map_kptr/marked_as_untrusted_or_null +map_kptr/non_const_var_off +map_kptr/non_const_var_off_kptr_xchg +map_kptr/reject_bad_type_xchg +map_kptr/reject_kptr_xchg_on_unref +map_kptr/reject_member_of_ref_xchg +map_kptr/reject_untrusted_xchg +map_kptr/success-map +map_ptr +nested_trust/test_invalid_nested_user_cpus +nested_trust/test_invalid_skb_field +percpu_alloc/array +percpu_alloc/array_sleepable +percpu_alloc/cgrp_local_storage +percpu_alloc/test_array_map_1 +percpu_alloc/test_array_map_2 +percpu_alloc/test_array_map_3 +percpu_alloc/test_array_map_4 +percpu_alloc/test_array_map_5 +percpu_alloc/test_array_map_6 +percpu_alloc/test_array_map_7 +percpu_alloc/test_array_map_8 +perf_branches/perf_branches_no_hw +pkt_access +preempt_lock/preempt_global_subprog_test +preempt_lock/preempt_lock_missing_1 +preempt_lock/preempt_lock_missing_1_subprog +preempt_lock/preempt_lock_missing_2 +preempt_lock/preempt_lock_missing_2_minus_1_subprog +preempt_lock/preempt_lock_missing_2_subprog +preempt_lock/preempt_lock_missing_3 +preempt_lock/preempt_lock_missing_3_minus_2 +preempt_lock/preempt_sleepable_helper +preempt_lock/preempt_sleepable_kfunc +preempted_bpf_ma_op +prog_run_opts +prog_tests_framework +raw_tp_null +rbtree_fail +rbtree_success +recursion +refcounted_kptr +refcounted_kptr_fail +refcounted_kptr_wrong_owner +reference_tracking/sk_lookup_success +ringbuf_multi +setget_sockopt +sk_lookup +skc_to_unix_sock +sock_addr/recvmsg4: attach prog with wrong attach type +sock_addr/recvmsg4: recvfrom (dgram) +sock_addr/recvmsg6: attach prog with wrong attach type +sock_addr/recvmsg6: recvfrom (dgram) +sock_addr/sendmsg4: attach prog with wrong attach type +sock_addr/sendmsg4: kernel_sendmsg (dgram) +sock_addr/sendmsg4: kernel_sendmsg deny (dgram) +sock_addr/sendmsg4: sendmsg (dgram) +sock_addr/sendmsg4: sendmsg deny (dgram) +sock_addr/sendmsg4: sock_sendmsg (dgram) +sock_addr/sendmsg4: sock_sendmsg deny (dgram) +sock_addr/sendmsg6: attach prog with wrong attach type +sock_addr/sendmsg6: kernel_sendmsg (dgram) +sock_addr/sendmsg6: kernel_sendmsg [::] (BSD'ism) (dgram) +sock_addr/sendmsg6: kernel_sendmsg deny (dgram) +sock_addr/sendmsg6: sendmsg (dgram) +sock_addr/sendmsg6: sendmsg IPv4-mapped IPv6 (dgram) +sock_addr/sendmsg6: sendmsg [::] (BSD'ism) (dgram) +sock_addr/sendmsg6: sendmsg deny (dgram) +sock_addr/sendmsg6: sendmsg dst IP = [::] (BSD'ism) (dgram) +sock_addr/sendmsg6: sock_sendmsg (dgram) +sock_addr/sendmsg6: sock_sendmsg [::] (BSD'ism) (dgram) +sock_addr/sendmsg6: sock_sendmsg deny (dgram) +sock_destroy/trace_tcp_destroy_sock +sock_fields +sockmap_listen/sockhash IPv4 TCP test_reuseport_mixed_groups +sockmap_listen/sockhash IPv4 TCP test_reuseport_select_connected +sockmap_listen/sockhash IPv4 UDP test_reuseport_mixed_groups +sockmap_listen/sockhash IPv4 UDP test_reuseport_select_connected +sockmap_listen/sockhash IPv6 TCP test_reuseport_mixed_groups +sockmap_listen/sockhash IPv6 TCP test_reuseport_select_connected +sockmap_listen/sockhash IPv6 UDP test_reuseport_mixed_groups +sockmap_listen/sockhash IPv6 UDP test_reuseport_select_connected +sockmap_listen/sockmap IPv4 TCP test_reuseport_mixed_groups +sockmap_listen/sockmap IPv4 TCP test_reuseport_select_connected +sockmap_listen/sockmap IPv4 UDP test_reuseport_mixed_groups +sockmap_listen/sockmap IPv4 UDP test_reuseport_select_connected +sockmap_listen/sockmap IPv6 TCP test_reuseport_mixed_groups +sockmap_listen/sockmap IPv6 TCP test_reuseport_select_connected +sockmap_listen/sockmap IPv6 UDP test_reuseport_mixed_groups +sockmap_listen/sockmap IPv6 UDP test_reuseport_select_connected +spin_lock +struct_ops_module/unsupported_ops +syscall +tailcalls/classifier_0 +tailcalls/classifier_1 +tailcalls/reject_tail_call_preempt_lock +tailcalls/reject_tail_call_rcu_lock +tailcalls/reject_tail_call_ref +tailcalls/reject_tail_call_spin_lock +tailcalls/tailcall_6 +tailcalls/tailcall_bpf2bpf_2 +tailcalls/tailcall_bpf2bpf_3 +tailcalls/tailcall_bpf2bpf_fentry +tailcalls/tailcall_bpf2bpf_fentry_entry +tailcalls/tailcall_bpf2bpf_fentry_fexit +tailcalls/tailcall_bpf2bpf_fexit +tailcalls/tailcall_bpf2bpf_hierarchy_2 +tailcalls/tailcall_bpf2bpf_hierarchy_3 +task_kfunc +task_local_storage/uptr_across_pages +task_local_storage/uptr_basic +task_local_storage/uptr_kptr_xchg +task_local_storage/uptr_map_failure_e2big +task_local_storage/uptr_map_failure_kstruct +task_local_storage/uptr_map_failure_size0 +task_local_storage/uptr_no_null_check +task_local_storage/uptr_obj_new +task_local_storage/uptr_update_failure +tc_bpf/tc_bpf_non_root +tc_redirect/tc_redirect_dtime +tcp_custom_syncookie +tcp_hdr_options +test_bpf_ma +test_global_funcs/arg_tag_ctx_kprobe +test_global_funcs/arg_tag_ctx_perf +test_global_funcs/arg_tag_ctx_raw_tp +test_global_funcs/global_func1 +test_global_funcs/global_func10 +test_global_funcs/global_func11 +test_global_funcs/global_func12 +test_global_funcs/global_func13 +test_global_funcs/global_func14 +test_global_funcs/global_func15 +test_global_funcs/global_func15_tricky_pruning +test_global_funcs/global_func17 +test_global_funcs/global_func3 +test_global_funcs/global_func5 +test_global_funcs/global_func6 +test_global_funcs/global_func7 +test_lsm/lsm_basic +test_profiler +test_strncmp/strncmp_bad_not_null_term_target +timer +timer_mim +token +tp_btf_nullable/handle_tp_btf_nullable_bare1 +tunnel +uprobe_multi_test/uprobe_sesison_return_2 +user_ringbuf/user_ringbuf_callback_bad_access1 +user_ringbuf/user_ringbuf_callback_bad_access2 +user_ringbuf/user_ringbuf_callback_const_ptr_to_dynptr_reg_off +user_ringbuf/user_ringbuf_callback_discard_dynptr +user_ringbuf/user_ringbuf_callback_invalid_return +user_ringbuf/user_ringbuf_callback_null_context_read +user_ringbuf/user_ringbuf_callback_null_context_write +user_ringbuf/user_ringbuf_callback_reinit_dynptr_mem +user_ringbuf/user_ringbuf_callback_reinit_dynptr_ringbuf +user_ringbuf/user_ringbuf_callback_submit_dynptr +user_ringbuf/user_ringbuf_callback_write_forbidden +verif_scale_pyperf100 +verif_scale_pyperf180 +verif_scale_pyperf600 +verif_scale_pyperf600_nounroll +verif_scale_seg6_loop +verif_scale_strobemeta +verif_scale_strobemeta_nounroll1 +verif_scale_strobemeta_nounroll2 +verif_scale_strobemeta_subprogs +verif_scale_sysctl_loop1 +verif_scale_sysctl_loop2 +verif_scale_xdp_loop +verifier_and/invalid_and_of_negative_number +verifier_and/invalid_range_check +verifier_arena/iter_maps2 +verifier_arena/iter_maps3 +verifier_array_access/a_read_only_array_1_2 +verifier_array_access/a_read_only_array_2_2 +verifier_array_access/a_write_only_array_1_2 +verifier_array_access/a_write_only_array_2_2 +verifier_array_access/an_array_with_a_constant_2 +verifier_array_access/an_array_with_a_register_2 +verifier_array_access/an_array_with_a_variable_2 +verifier_array_access/array_with_no_floor_check +verifier_array_access/with_a_invalid_max_check_1 +verifier_array_access/with_a_invalid_max_check_2 +verifier_basic_stack/invalid_fp_arithmetic +verifier_basic_stack/misaligned_read_from_stack +verifier_basic_stack/stack_out_of_bounds +verifier_bitfield_write +verifier_bits_iter/destroy_uninit +verifier_bits_iter/next_uninit +verifier_bits_iter/no_destroy +verifier_bounds/bounds_map_value_variant_1 +verifier_bounds/bounds_map_value_variant_2 +verifier_bounds/of_boundary_crossing_range_1 +verifier_bounds/of_boundary_crossing_range_2 +verifier_bounds/on_sign_extended_mov_test1 +verifier_bounds/on_sign_extended_mov_test2 +verifier_bounds/reg32_any_reg32_xor_3 +verifier_bounds/reg_any_reg_xor_3 +verifier_bounds/shift_of_maybe_negative_number +verifier_bounds/shift_with_64_bit_input +verifier_bounds/shift_with_oversized_count_operand +verifier_bounds/size_signed_32bit_overflow_test1 +verifier_bounds/size_signed_32bit_overflow_test2 +verifier_bounds/size_signed_32bit_overflow_test3 +verifier_bounds/size_signed_32bit_overflow_test4 +verifier_bounds/var_off_insn_off_test1 +verifier_bounds/var_off_insn_off_test2 +verifier_bounds_deduction/deducing_bounds_from_const_1 +verifier_bounds_deduction/deducing_bounds_from_const_10 +verifier_bounds_deduction/deducing_bounds_from_const_3 +verifier_bounds_deduction/deducing_bounds_from_const_5 +verifier_bounds_deduction/deducing_bounds_from_const_6 +verifier_bounds_deduction/deducing_bounds_from_const_7 +verifier_bounds_deduction/deducing_bounds_from_const_8 +verifier_bounds_deduction/deducing_bounds_from_const_9 +verifier_bounds_mix_sign_unsign/checks_mixing_signed_and_unsigned +verifier_bounds_mix_sign_unsign/signed_and_unsigned_positive_bounds +verifier_bounds_mix_sign_unsign/signed_and_unsigned_variant_10 +verifier_bounds_mix_sign_unsign/signed_and_unsigned_variant_11 +verifier_bounds_mix_sign_unsign/signed_and_unsigned_variant_12 +verifier_bounds_mix_sign_unsign/signed_and_unsigned_variant_13 +verifier_bounds_mix_sign_unsign/signed_and_unsigned_variant_14 +verifier_bounds_mix_sign_unsign/signed_and_unsigned_variant_15 +verifier_bounds_mix_sign_unsign/signed_and_unsigned_variant_2 +verifier_bounds_mix_sign_unsign/signed_and_unsigned_variant_3 +verifier_bounds_mix_sign_unsign/signed_and_unsigned_variant_5 +verifier_bounds_mix_sign_unsign/signed_and_unsigned_variant_6 +verifier_bounds_mix_sign_unsign/signed_and_unsigned_variant_8 +verifier_btf_ctx_access/ctx_access_u32_pointer_reject_16 +verifier_btf_ctx_access/ctx_access_u32_pointer_reject_32 +verifier_btf_ctx_access/ctx_access_u32_pointer_reject_8 +verifier_cfg/conditional_loop +verifier_cfg/loop2_back_edge +verifier_cfg/loop_back_edge +verifier_cfg/out_of_range_jump +verifier_cfg/out_of_range_jump2 +verifier_cfg/uncond_loop_after_cond_jmp +verifier_cfg/uncond_loop_in_subprog_after_cond_jmp +verifier_cfg/unreachable +verifier_cfg/unreachable2 +verifier_cgroup_inv_retcode/with_invalid_return_code_test1 +verifier_cgroup_inv_retcode/with_invalid_return_code_test3 +verifier_cgroup_inv_retcode/with_invalid_return_code_test5 +verifier_cgroup_inv_retcode/with_invalid_return_code_test6 +verifier_cgroup_inv_retcode/with_invalid_return_code_test7 +verifier_cgroup_skb/data_meta_for_cgroup_skb +verifier_cgroup_skb/flow_keys_for_cgroup_skb +verifier_cgroup_skb/napi_id_for_cgroup_skb +verifier_cgroup_skb/tc_classid_for_cgroup_skb +verifier_cgroup_storage/cpu_cgroup_storage_access_1 +verifier_cgroup_storage/cpu_cgroup_storage_access_2 +verifier_cgroup_storage/cpu_cgroup_storage_access_3 +verifier_cgroup_storage/cpu_cgroup_storage_access_4 +verifier_cgroup_storage/cpu_cgroup_storage_access_5 +verifier_cgroup_storage/cpu_cgroup_storage_access_6 +verifier_cgroup_storage/invalid_cgroup_storage_access_1 +verifier_cgroup_storage/invalid_cgroup_storage_access_2 +verifier_cgroup_storage/invalid_cgroup_storage_access_3 +verifier_cgroup_storage/invalid_cgroup_storage_access_4 +verifier_cgroup_storage/invalid_cgroup_storage_access_5 +verifier_cgroup_storage/invalid_cgroup_storage_access_6 +verifier_const/bprm +verifier_const/tcx1 +verifier_const/tcx4 +verifier_const/tcx7 +verifier_const_or/not_bypass_stack_boundary_checks_1 +verifier_const_or/not_bypass_stack_boundary_checks_2 +verifier_ctx/context_stores_via_bpf_atomic +verifier_ctx/ctx_pointer_to_helper_1 +verifier_ctx/ctx_pointer_to_helper_2 +verifier_ctx/ctx_pointer_to_helper_3 +verifier_ctx/make_ptr_to_ctx_unusable +verifier_ctx/null_check_4_ctx_const +verifier_ctx/null_check_8_null_bind +verifier_ctx/or_null_check_3_1 +verifier_ctx_sk_msg/of_size_in_sk_msg +verifier_ctx_sk_msg/past_end_of_sk_msg +verifier_ctx_sk_msg/read_offset_in_sk_msg +verifier_d_path/d_path_reject +verifier_direct_packet_access/access_test15_spill_with_xadd +verifier_direct_packet_access/direct_packet_access_test3 +verifier_direct_packet_access/id_in_regsafe_bad_access +verifier_direct_packet_access/packet_access_test10_write_invalid +verifier_direct_packet_access/pkt_end_reg_bad_access +verifier_direct_packet_access/pkt_end_reg_both_accesses +verifier_direct_packet_access/test16_arith_on_data_end +verifier_direct_packet_access/test23_x_pkt_ptr_4 +verifier_direct_packet_access/test26_marking_on_bad_access +verifier_direct_packet_access/test28_marking_on_bad_access +verifier_direct_stack_access_wraparound +verifier_global_ptr_args +verifier_global_subprogs +verifier_helper_access_var_len/bitwise_and_jmp_wrong_max +verifier_helper_access_var_len/jmp_signed_no_min_check +verifier_helper_access_var_len/map_adjusted_jmp_wrong_max +verifier_helper_access_var_len/memory_map_jmp_wrong_max +verifier_helper_access_var_len/memory_stack_jmp_bounds_offset +verifier_helper_access_var_len/memory_stack_jmp_wrong_max +verifier_helper_access_var_len/ptr_to_mem_or_null_2 +verifier_helper_access_var_len/ptr_to_mem_or_null_8 +verifier_helper_access_var_len/ptr_to_mem_or_null_9 +verifier_helper_access_var_len/stack_jmp_no_max_check +verifier_helper_packet_access/cls_helper_fail_range_1 +verifier_helper_packet_access/cls_helper_fail_range_2 +verifier_helper_packet_access/cls_helper_fail_range_3 +verifier_helper_packet_access/packet_ptr_with_bad_range_1 +verifier_helper_packet_access/packet_ptr_with_bad_range_2 +verifier_helper_packet_access/packet_test2_unchecked_packet_ptr +verifier_helper_packet_access/ptr_with_too_short_range_1 +verifier_helper_packet_access/ptr_with_too_short_range_2 +verifier_helper_packet_access/test11_cls_unsuitable_helper_1 +verifier_helper_packet_access/test12_cls_unsuitable_helper_2 +verifier_helper_packet_access/test15_cls_helper_fail_sub +verifier_helper_packet_access/test20_pkt_end_as_input +verifier_helper_packet_access/test7_cls_unchecked_packet_ptr +verifier_helper_packet_access/to_packet_test21_wrong_reg +verifier_helper_restricted +verifier_helper_value_access/access_to_map_empty_range +verifier_helper_value_access/access_to_map_negative_range +verifier_helper_value_access/access_to_map_possibly_empty_range +verifier_helper_value_access/access_to_map_wrong_size +verifier_helper_value_access/bounds_check_using_bad_access_1 +verifier_helper_value_access/bounds_check_using_bad_access_2 +verifier_helper_value_access/check_using_s_bad_access_1 +verifier_helper_value_access/check_using_s_bad_access_2 +verifier_helper_value_access/const_imm_negative_range_adjustment_1 +verifier_helper_value_access/const_imm_negative_range_adjustment_2 +verifier_helper_value_access/const_reg_negative_range_adjustment_1 +verifier_helper_value_access/const_reg_negative_range_adjustment_2 +verifier_helper_value_access/imm_out_of_bound_1 +verifier_helper_value_access/imm_out_of_bound_2 +verifier_helper_value_access/imm_out_of_bound_range +verifier_helper_value_access/map_out_of_bound_range +verifier_helper_value_access/map_via_variable_empty_range +verifier_helper_value_access/reg_out_of_bound_1 +verifier_helper_value_access/reg_out_of_bound_2 +verifier_helper_value_access/reg_out_of_bound_range +verifier_helper_value_access/via_const_imm_empty_range +verifier_helper_value_access/via_const_reg_empty_range +verifier_helper_value_access/via_variable_no_max_check_1 +verifier_helper_value_access/via_variable_no_max_check_2 +verifier_helper_value_access/via_variable_wrong_max_check_1 +verifier_helper_value_access/via_variable_wrong_max_check_2 +verifier_int_ptr/arg_ptr_to_long_misaligned +verifier_int_ptr/to_long_size_sizeof_long +verifier_iterating_callbacks/bpf_loop_iter_limit_overflow +verifier_iterating_callbacks/check_add_const_3regs +verifier_iterating_callbacks/check_add_const_3regs_2if +verifier_iterating_callbacks/check_add_const_regsafe_off +verifier_iterating_callbacks/iter_limit_bug +verifier_iterating_callbacks/jgt_imm64_and_may_goto +verifier_iterating_callbacks/loop_detection +verifier_iterating_callbacks/may_goto_self +verifier_iterating_callbacks/unsafe_find_vma +verifier_iterating_callbacks/unsafe_for_each_map_elem +verifier_iterating_callbacks/unsafe_on_2nd_iter +verifier_iterating_callbacks/unsafe_on_zero_iter +verifier_iterating_callbacks/unsafe_ringbuf_drain +verifier_jeq_infer_not_null/unchanged_for_jeq_false_branch +verifier_jeq_infer_not_null/unchanged_for_jne_true_branch +verifier_kfunc_prog_types/cgrp_kfunc_raw_tp +verifier_kfunc_prog_types/cpumask_kfunc_raw_tp +verifier_kfunc_prog_types/task_kfunc_raw_tp +verifier_ld_ind/ind_check_calling_conv_r1 +verifier_ld_ind/ind_check_calling_conv_r2 +verifier_ld_ind/ind_check_calling_conv_r3 +verifier_ld_ind/ind_check_calling_conv_r4 +verifier_ld_ind/ind_check_calling_conv_r5 +verifier_leak_ptr/leak_pointer_into_ctx_1 +verifier_leak_ptr/leak_pointer_into_ctx_2 +verifier_linked_scalars +verifier_loops1/bounded_recursion +verifier_loops1/infinite_loop_in_two_jumps +verifier_loops1/infinite_loop_three_jump_trick +verifier_loops1/loop_after_a_conditional_jump +verifier_lsm/bool_retval_test3 +verifier_lsm/bool_retval_test4 +verifier_lsm/disabled_hook_test1 +verifier_lsm/disabled_hook_test2 +verifier_lsm/disabled_hook_test3 +verifier_lsm/errno_zero_retval_test4 +verifier_lsm/errno_zero_retval_test5 +verifier_lsm/errno_zero_retval_test6 +verifier_lwt/not_permitted_for_lwt_prog +verifier_lwt/packet_write_for_lwt_in +verifier_lwt/packet_write_for_lwt_out +verifier_lwt/tc_classid_for_lwt_in +verifier_lwt/tc_classid_for_lwt_out +verifier_lwt/tc_classid_for_lwt_xmit +verifier_map_in_map/invalid_inner_map_pointer +verifier_map_in_map/on_the_inner_map_pointer +verifier_map_ptr/bpf_map_ptr_write_rejected +verifier_map_ptr/read_non_existent_field_rejected +verifier_map_ptr/read_with_negative_offset_rejected +verifier_map_ptr_mixing +verifier_map_ret_val +verifier_meta_access/meta_access_test10 +verifier_meta_access/meta_access_test2 +verifier_meta_access/meta_access_test3 +verifier_meta_access/meta_access_test4 +verifier_meta_access/meta_access_test5 +verifier_meta_access/meta_access_test6 +verifier_meta_access/meta_access_test9 +verifier_netfilter_ctx/with_invalid_ctx_access_test1 +verifier_netfilter_ctx/with_invalid_ctx_access_test2 +verifier_netfilter_ctx/with_invalid_ctx_access_test3 +verifier_netfilter_ctx/with_invalid_ctx_access_test4 +verifier_netfilter_ctx/with_invalid_ctx_access_test5 +verifier_netfilter_retcode/with_invalid_return_code_test1 +verifier_netfilter_retcode/with_invalid_return_code_test4 +verifier_or_jmp32_k +verifier_prevent_map_lookup +verifier_raw_stack/bytes_spilled_regs_corruption_2 +verifier_raw_stack/load_bytes_invalid_access_1 +verifier_raw_stack/load_bytes_invalid_access_2 +verifier_raw_stack/load_bytes_invalid_access_3 +verifier_raw_stack/load_bytes_invalid_access_4 +verifier_raw_stack/load_bytes_invalid_access_5 +verifier_raw_stack/load_bytes_invalid_access_6 +verifier_raw_stack/load_bytes_negative_len_2 +verifier_raw_stack/load_bytes_spilled_regs_corruption +verifier_raw_stack/skb_load_bytes_negative_len +verifier_raw_stack/skb_load_bytes_zero_len +verifier_raw_tp_writable +verifier_ref_tracking +verifier_reg_equal/subreg_equality_2 +verifier_regalloc/regalloc_and_spill_negative +verifier_regalloc/regalloc_negative +verifier_regalloc/regalloc_src_reg_negative +verifier_ringbuf/ringbuf_invalid_reservation_offset_1 +verifier_ringbuf/ringbuf_invalid_reservation_offset_2 +verifier_runtime_jit +verifier_scalar_ids/check_ids_in_regsafe +verifier_scalar_ids/check_ids_in_regsafe_2 +verifier_scalar_ids/linked_regs_broken_link_2 +verifier_search_pruning/for_u32_spills_u64_fill +verifier_search_pruning/liveness_pruning_and_write_screening +verifier_search_pruning/short_loop1 +verifier_search_pruning/should_be_verified_nop_operation +verifier_search_pruning/tracking_for_u32_spill_fill +verifier_search_pruning/varlen_map_value_access_pruning +verifier_sock/bpf_sk_fullsock_skb_sk +verifier_sock/bpf_sk_release_skb_sk +verifier_sock/bpf_tcp_sock_skb_sk +verifier_sock/dst_port_byte_load_invalid +verifier_sock/dst_port_half_load_invalid_1 +verifier_sock/dst_port_half_load_invalid_2 +verifier_sock/invalidate_pkt_pointers_by_tail_call +verifier_sock/invalidate_pkt_pointers_from_global_func +verifier_sock/map_lookup_elem_smap_key +verifier_sock/map_lookup_elem_sockhash_key +verifier_sock/map_lookup_elem_sockmap_key +verifier_sock/no_null_check_on_ret_1 +verifier_sock/no_null_check_on_ret_2 +verifier_sock/of_bpf_skc_to_helpers +verifier_sock/post_bind4_read_mark +verifier_sock/post_bind4_read_src_ip6 +verifier_sock/post_bind6_read_src_ip4 +verifier_sock/sk_1_1_value_1 +verifier_sock/sk_no_skb_sk_check_1 +verifier_sock/sk_no_skb_sk_check_2 +verifier_sock/sk_sk_type_fullsock_field_1 +verifier_sock/skb_sk_beyond_last_field_1 +verifier_sock/skb_sk_beyond_last_field_2 +verifier_sock/skb_sk_no_null_check +verifier_sock/sock_create_read_src_port +verifier_sock_addr/bind4_bad_return_code +verifier_sock_addr/bind6_bad_return_code +verifier_sock_addr/connect4_bad_return_code +verifier_sock_addr/connect6_bad_return_code +verifier_sock_addr/connect_unix_bad_return_code +verifier_sock_addr/getpeername4_bad_return_code +verifier_sock_addr/getpeername6_bad_return_code +verifier_sock_addr/getpeername_unix_bad_return_code +verifier_sock_addr/getsockname4_bad_return_code +verifier_sock_addr/getsockname6_bad_return_code +verifier_sock_addr/getsockname_unix_unix_bad_return_code +verifier_sock_addr/recvmsg4_bad_return_code +verifier_sock_addr/recvmsg6_bad_return_code +verifier_sock_addr/recvmsg_unix_bad_return_code +verifier_sock_addr/sendmsg4_bad_return_code +verifier_sock_addr/sendmsg6_bad_return_code +verifier_sock_addr/sendmsg_unix_bad_return_code +verifier_sockmap_mutate/test_flow_dissector_update +verifier_sockmap_mutate/test_raw_tp_delete +verifier_sockmap_mutate/test_raw_tp_update +verifier_sockmap_mutate/test_sockops_update +verifier_spill_fill/_6_offset_to_skb_data +verifier_spill_fill/addr_offset_to_skb_data +verifier_spill_fill/check_corrupted_spill_fill +verifier_spill_fill/fill_32bit_after_spill_64bit_clear_id +verifier_spill_fill/spill_16bit_of_32bit_fail +verifier_spill_fill/spill_32bit_of_64bit_fail +verifier_spill_fill/u64_offset_to_skb_data +verifier_spill_fill/with_invalid_reg_offset_0 +verifier_spin_lock/call_within_a_locked_region +verifier_spin_lock/lock_test2_direct_ld_st +verifier_spin_lock/lock_test3_direct_ld_st +verifier_spin_lock/lock_test4_direct_ld_st +verifier_spin_lock/lock_test7_unlock_without_lock +verifier_spin_lock/reg_id_for_map_value +verifier_spin_lock/spin_lock_test6_missing_unlock +verifier_spin_lock/spin_lock_test8_double_lock +verifier_spin_lock/spin_lock_test9_different_lock +verifier_spin_lock/test11_ld_abs_under_lock +verifier_stack_ptr/load_bad_alignment_on_off +verifier_stack_ptr/load_bad_alignment_on_reg +verifier_stack_ptr/load_out_of_bounds_high +verifier_stack_ptr/load_out_of_bounds_low +verifier_stack_ptr/to_stack_check_high_4 +verifier_stack_ptr/to_stack_check_high_5 +verifier_stack_ptr/to_stack_check_high_6 +verifier_stack_ptr/to_stack_check_high_7 +verifier_stack_ptr/to_stack_check_low_3 +verifier_stack_ptr/to_stack_check_low_4 +verifier_stack_ptr/to_stack_check_low_5 +verifier_stack_ptr/to_stack_check_low_6 +verifier_stack_ptr/to_stack_check_low_7 +verifier_subprog_precision/callback_precise_return_fail +verifier_tailcall_jit +verifier_uninit +verifier_unpriv +verifier_unpriv_perf +verifier_value/store_of_cleared_call_register +verifier_value_illegal_alu +verifier_value_or_null/map_access_from_else_condition +verifier_value_or_null/map_value_or_null_1 +verifier_value_or_null/map_value_or_null_2 +verifier_value_or_null/map_value_or_null_3 +verifier_value_or_null/multiple_map_lookup_elem_calls +verifier_value_or_null/null_check_ids_in_regsafe +verifier_value_ptr_arith/access_known_scalar_value_ptr_2 +verifier_value_ptr_arith/access_unknown_scalar_value_ptr +verifier_value_ptr_arith/access_value_ptr_known_scalar +verifier_value_ptr_arith/access_value_ptr_unknown_scalar +verifier_value_ptr_arith/access_value_ptr_value_ptr_1 +verifier_value_ptr_arith/access_value_ptr_value_ptr_2 +verifier_value_ptr_arith/lower_oob_arith_test_1 +verifier_value_ptr_arith/to_leak_tainted_dst_reg +verifier_value_ptr_arith/unknown_scalar_value_ptr_4 +verifier_value_ptr_arith/value_ptr_known_scalar_2_1 +verifier_value_ptr_arith/value_ptr_known_scalar_3 +verifier_var_off/access_max_out_of_bound +verifier_var_off/access_min_out_of_bound +verifier_var_off/stack_write_clobbers_spilled_regs +verifier_var_off/variable_offset_ctx_access +verifier_var_off/variable_offset_stack_access_unbounded +verifier_var_off/zero_sized_access_max_out_of_bound +verifier_vfs_reject +verifier_xadd/xadd_w_check_unaligned_map +verifier_xadd/xadd_w_check_unaligned_pkt +verifier_xadd/xadd_w_check_unaligned_stack +verifier_xdp_direct_packet_access/corner_case_1_bad_access_1 +verifier_xdp_direct_packet_access/corner_case_1_bad_access_10 +verifier_xdp_direct_packet_access/corner_case_1_bad_access_11 +verifier_xdp_direct_packet_access/corner_case_1_bad_access_12 +verifier_xdp_direct_packet_access/corner_case_1_bad_access_13 +verifier_xdp_direct_packet_access/corner_case_1_bad_access_14 +verifier_xdp_direct_packet_access/corner_case_1_bad_access_15 +verifier_xdp_direct_packet_access/corner_case_1_bad_access_16 +verifier_xdp_direct_packet_access/corner_case_1_bad_access_2 +verifier_xdp_direct_packet_access/corner_case_1_bad_access_3 +verifier_xdp_direct_packet_access/corner_case_1_bad_access_4 +verifier_xdp_direct_packet_access/corner_case_1_bad_access_5 +verifier_xdp_direct_packet_access/corner_case_1_bad_access_6 +verifier_xdp_direct_packet_access/corner_case_1_bad_access_7 +verifier_xdp_direct_packet_access/corner_case_1_bad_access_8 +verifier_xdp_direct_packet_access/corner_case_1_bad_access_9 +verifier_xdp_direct_packet_access/end_mangling_bad_access_1 +verifier_xdp_direct_packet_access/end_mangling_bad_access_2 +verifier_xdp_direct_packet_access/pkt_data_bad_access_1_1 +verifier_xdp_direct_packet_access/pkt_data_bad_access_1_2 +verifier_xdp_direct_packet_access/pkt_data_bad_access_1_3 +verifier_xdp_direct_packet_access/pkt_data_bad_access_1_4 +verifier_xdp_direct_packet_access/pkt_data_bad_access_2_1 +verifier_xdp_direct_packet_access/pkt_data_bad_access_2_2 +verifier_xdp_direct_packet_access/pkt_data_bad_access_2_3 +verifier_xdp_direct_packet_access/pkt_data_bad_access_2_4 +verifier_xdp_direct_packet_access/pkt_data_bad_access_2_5 +verifier_xdp_direct_packet_access/pkt_data_bad_access_2_6 +verifier_xdp_direct_packet_access/pkt_data_bad_access_2_7 +verifier_xdp_direct_packet_access/pkt_data_bad_access_2_8 +verifier_xdp_direct_packet_access/pkt_end_bad_access_1_1 +verifier_xdp_direct_packet_access/pkt_end_bad_access_1_2 +verifier_xdp_direct_packet_access/pkt_end_bad_access_2_1 +verifier_xdp_direct_packet_access/pkt_end_bad_access_2_2 +verifier_xdp_direct_packet_access/pkt_end_bad_access_2_3 +verifier_xdp_direct_packet_access/pkt_end_bad_access_2_4 +verifier_xdp_direct_packet_access/pkt_meta_bad_access_1_1 +verifier_xdp_direct_packet_access/pkt_meta_bad_access_1_2 +verifier_xdp_direct_packet_access/pkt_meta_bad_access_2_1 +verifier_xdp_direct_packet_access/pkt_meta_bad_access_2_2 +verifier_xdp_direct_packet_access/pkt_meta_bad_access_2_3 +verifier_xdp_direct_packet_access/pkt_meta_bad_access_2_4 +verify_pkcs7_sig +xdp_synproxy diff --git a/ci/vmtest/configs/DENYLIST.test_progs_cpuv4 b/ci/vmtest/configs/DENYLIST.test_progs_cpuv4 new file mode 100644 index 0000000000000..0c02eae8f5cd1 --- /dev/null +++ b/ci/vmtest/configs/DENYLIST.test_progs_cpuv4 @@ -0,0 +1 @@ +verifier_arena/basic_alloc2 diff --git a/ci/vmtest/configs/DENYLIST.x86_64 b/ci/vmtest/configs/DENYLIST.x86_64 new file mode 100644 index 0000000000000..6fc3413daab9f --- /dev/null +++ b/ci/vmtest/configs/DENYLIST.x86_64 @@ -0,0 +1 @@ +netcnt # with kvm enabled, fail with packets unexpected packets: actual 10001 != expected 10000 diff --git a/ci/vmtest/configs/config b/ci/vmtest/configs/config new file mode 100644 index 0000000000000..7a2427b22553c --- /dev/null +++ b/ci/vmtest/configs/config @@ -0,0 +1,7 @@ +CONFIG_KASAN=y +CONFIG_KASAN_GENERIC=y +CONFIG_KASAN_VMALLOC=y +CONFIG_LIVEPATCH=y +CONFIG_SAMPLES=y +CONFIG_SAMPLE_LIVEPATCH=m +# CONFIG_UBSAN=y diff --git a/ci/vmtest/configs/run-vmtest.env b/ci/vmtest/configs/run-vmtest.env new file mode 100644 index 0000000000000..c60f1db6673c7 --- /dev/null +++ b/ci/vmtest/configs/run-vmtest.env @@ -0,0 +1,42 @@ +#!/bin/bash + +# This file is sourced by libbpf/ci/run-vmtest Github Action scripts. +# +# The primary reason it exists is that assembling ALLOWLIST and +# DENYLIST for a particular test run is not a trivial operation. +# +# Users of libbpf/ci/run-vmtest action need to be able to specify a +# list of allow/denylist **files**, that later has to be correctly +# merged into a single allow/denylist passed to a test runner. +# +# Obviously it's perferrable for the scripts merging many lists into +# one to be reusable, and not copy-pasted between repositories which +# use libbpf/ci actions. And specifying the lists should be trivial. +# This file is a solution to that. + +# $SELFTESTS_BPF and $VMTEST_CONFIGS are set in the workflow, before +# libbpf/ci/run-vmtest action is called +# See .github/workflows/kernel-test.yml + +ALLOWLIST_FILES=( + "${SELFTESTS_BPF}/ALLOWLIST" + "${SELFTESTS_BPF}/ALLOWLIST.${ARCH}" + "${VMTEST_CONFIGS}/ALLOWLIST" + "${VMTEST_CONFIGS}/ALLOWLIST.${ARCH}" + "${VMTEST_CONFIGS}/ALLOWLIST.${DEPLOYMENT}" + "${VMTEST_CONFIGS}/ALLOWLIST.${KERNEL_TEST}" +) + +DENYLIST_FILES=( + "${SELFTESTS_BPF}/DENYLIST" + "${SELFTESTS_BPF}/DENYLIST.${ARCH}" + "${VMTEST_CONFIGS}/DENYLIST" + "${VMTEST_CONFIGS}/DENYLIST.${ARCH}" + "${VMTEST_CONFIGS}/DENYLIST.${DEPLOYMENT}" + "${VMTEST_CONFIGS}/DENYLIST.${KERNEL_TEST}" +) + +# Export pipe-separated strings, because bash doesn't support array export +export SELFTESTS_BPF_ALLOWLIST_FILES=$(IFS="|"; echo "${ALLOWLIST_FILES[*]}") +export SELFTESTS_BPF_DENYLIST_FILES=$(IFS="|"; echo "${DENYLIST_FILES[*]}") + diff --git a/ci/vmtest/configs/run_veristat.kernel.cfg b/ci/vmtest/configs/run_veristat.kernel.cfg new file mode 100644 index 0000000000000..807efc251073f --- /dev/null +++ b/ci/vmtest/configs/run_veristat.kernel.cfg @@ -0,0 +1,4 @@ +VERISTAT_OBJECTS_DIR="${SELFTESTS_BPF}" +VERISTAT_OBJECTS_GLOB="*.bpf.o" +VERISTAT_CFG_FILE="${SELFTESTS_BPF}/veristat.cfg" +VERISTAT_OUTPUT="veristat-kernel" diff --git a/ci/vmtest/configs/run_veristat.meta.cfg b/ci/vmtest/configs/run_veristat.meta.cfg new file mode 100644 index 0000000000000..14f08d241d206 --- /dev/null +++ b/ci/vmtest/configs/run_veristat.meta.cfg @@ -0,0 +1,4 @@ +VERISTAT_OBJECTS_DIR="${WORKING_DIR}/bpf_objects" +VERISTAT_OBJECTS_GLOB="*.o" +VERISTAT_OUTPUT="veristat-meta" +VERISTAT_CFG_FILE="${VERISTAT_CONFIGS}/veristat_meta.cfg" diff --git a/ci/vmtest/configs/run_veristat.scx.cfg b/ci/vmtest/configs/run_veristat.scx.cfg new file mode 100644 index 0000000000000..740cf8e960b32 --- /dev/null +++ b/ci/vmtest/configs/run_veristat.scx.cfg @@ -0,0 +1,3 @@ +VERISTAT_OBJECTS_DIR="${SCX_PROGS}" +VERISTAT_OBJECTS_GLOB="*.bpf.o" +VERISTAT_OUTPUT="veristat-scx" diff --git a/ci/vmtest/configs/veristat_meta.cfg b/ci/vmtest/configs/veristat_meta.cfg new file mode 100644 index 0000000000000..5ee6db25736d9 --- /dev/null +++ b/ci/vmtest/configs/veristat_meta.cfg @@ -0,0 +1,46 @@ +# List of exceptions we know about that are not going to work with veristat. + +# libbpf-tools, maintained outside of fbcode +!bcc-libbpf-tools-* + +# missing kernel function 'bictcp_cong_avoid' +!ti-tcpevent-tcp_bpf_state_fentry-tcp_bpf_state_fentry.bpf.o/bictcp_cong_avoid +# missing kernel function 'bictcp_state' +!ti-tcpevent-tcp_bpf_tracer_fentry-tcp_bpf_tracer_fentry.bpf.o/bictcp_state +# missing kernel function 'tcp_drop' +!ti-tcpevent-tcp_bpf_tracer_fentry-tcp_bpf_tracer_fentry.bpf.o/tcp_drop + +# outdated (and abandoned ?) BPF programs, can't work with modern libbpf +!schedulers-tangram-agent-bpf-blacklist-bpf_device_cgroup-device_cgroup_filter.bpf.o +!schedulers-tangram-agent-bpf-netstat-bpf_cgroup_egress-bpf_cgroup_egress.bpf.o +!schedulers-tangram-agent-bpf-netstat-bpf_cgroup_ingress-bpf_cgroup_ingress.bpf.o + +# invalid usage of global functions, seems abandoned as well +!neteng-urgd-urgd_bpf_prog-urgd_bpf_prog.o + +# missing kernel function '__send_signal' +!cea-object-introspection-OIVT-signal_bpf-signal.bpf.o/__send_signal + +# Strobelight program not passing validation properly +!strobelight-server-bpf_program-hhvm_stacks-hhvm_stacks.o/hhvm_stack + +# RDMA functionality is expected which we don't have in default kernel flavor +!neteng-netedit-bpf-ftrace-be_audit-be_audit-be_audit.bpf.o + +# Strobelight programs with >1mln instructions +!strobelight-server-bpf_program-strobelight_process_monitor_libbpf-strobelight_process_monitor_libbpf.o + +# infiniband only, doesn't work on other hardware +!neteng-netnorad-common-cpp-bpf-qp_ah_list-qp_ah_list.bpf.o/ret_query_qp + +# Droplet with >1mln instructions +!ti-droplet-bpf-vip_filter_v2_xdp-vip_filter_v2_xdp.bpf.o/vip_filter + +# sched_ext bpf_lib objects don't need to be verified separately +!third-party-scx*bpf_lib.bpf.o + +# These cause segfault in veristat due to a bug in libbpf +# Link: https://lore.kernel.org/bpf/20250718001009.610955-1-andrii@kernel.org/ +# We can include them back after a veristat release with fixed libbpf +!third-party-scx-__scx_chaos_bpf_skel_genskel-bpf.bpf.o +!third-party-scx-__scx_p2dq_bpf_skel_genskel-bpf.bpf.o diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 64997fa470532..02981b0d9b03d 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -105,7 +105,6 @@ TEST_FILES = xsk_prereqs.sh $(wildcard progs/btf_dump_test_case_*.c) # Order correspond to 'make run_tests' order TEST_PROGS := test_kmod.sh \ test_lirc_mode2.sh \ - test_tc_tunnel.sh \ test_tc_edt.sh \ test_xdping.sh \ test_bpftool_build.sh \ diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c index cdf7b66414442..0998d71fe57a3 100644 --- a/tools/testing/selftests/bpf/network_helpers.c +++ b/tools/testing/selftests/bpf/network_helpers.c @@ -766,6 +766,51 @@ int send_recv_data(int lfd, int fd, uint32_t total_bytes) return err; } +int tc_prog_attach(const char *dev, int ingress_fd, int egress_fd) +{ + int ifindex; + + if (!ASSERT_TRUE(ingress_fd >= 0 || egress_fd >= 0, + "at least one program fd is valid")) + return -1; + + ifindex = if_nametoindex(dev); + if (!ASSERT_NEQ(ifindex, 0, "get ifindex")) + return -1; + + DECLARE_LIBBPF_OPTS(bpf_tc_hook, hook, .ifindex = ifindex, + .attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS); + DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts1, .handle = 1, + .priority = 1, .prog_fd = ingress_fd); + DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts2, .handle = 1, + .priority = 1, .prog_fd = egress_fd); + int ret; + + ret = bpf_tc_hook_create(&hook); + if (!ASSERT_OK(ret, "create tc hook")) + return ret; + + if (ingress_fd >= 0) { + hook.attach_point = BPF_TC_INGRESS; + ret = bpf_tc_attach(&hook, &opts1); + if (!ASSERT_OK(ret, "bpf_tc_attach")) { + bpf_tc_hook_destroy(&hook); + return ret; + } + } + + if (egress_fd >= 0) { + hook.attach_point = BPF_TC_EGRESS; + ret = bpf_tc_attach(&hook, &opts2); + if (!ASSERT_OK(ret, "bpf_tc_attach")) { + bpf_tc_hook_destroy(&hook); + return ret; + } + } + + return 0; +} + #ifdef TRAFFIC_MONITOR struct tmonitor_ctx { pcap_t *pcap; diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h index ef208eefd571a..79a010c88e11c 100644 --- a/tools/testing/selftests/bpf/network_helpers.h +++ b/tools/testing/selftests/bpf/network_helpers.h @@ -255,6 +255,22 @@ struct tmonitor_ctx; typedef int (*tm_print_fn_t)(const char *format, va_list args); +/** + * tc_prog_attach - attach BPF program(s) to an interface + * + * Takes file descriptors pointing to at least one, at most two BPF + * programs, and attach those programs to an interface ingress, egress or + * both. + * + * @dev: string containing the interface name + * @ingress_fd: file descriptor of the program to attach to interface ingress + * @egress_fd: file descriptor of the program to attach to interface egress + * + * Returns 0 on success, -1 if no valid file descriptor has been found, if + * the interface name is invalid or if an error ocurred during attach. + */ +int tc_prog_attach(const char *dev, int ingress_fd, int egress_fd); + #ifdef TRAFFIC_MONITOR struct tmonitor_ctx *traffic_monitor_start(const char *netns, const char *test_name, const char *subtest_name); diff --git a/tools/testing/selftests/bpf/prog_tests/test_tc_tunnel.c b/tools/testing/selftests/bpf/prog_tests/test_tc_tunnel.c new file mode 100644 index 0000000000000..9ecd1ac0584c0 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_tc_tunnel.c @@ -0,0 +1,674 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + +/* + * End-to-end eBPF tunnel test suite + * The file tests BPF network tunnels implementation. For each tunnel + * type, the test validates that: + * - basic communication can first be established between the two veths + * - when adding a BPF-based encapsulation on client egress, it now fails + * to communicate with the server + * - when adding a kernel-based decapsulation on server ingress, client + * can now connect + * - when replacing the kernel-based decapsulation with a BPF-based one, + * the client can still connect + */ + +#include +#include +#include +#include +#include + +#include "test_progs.h" +#include "network_helpers.h" +#include "test_tc_tunnel.skel.h" + +#define SERVER_NS "tc-tunnel-server-ns" +#define CLIENT_NS "tc-tunnel-client-ns" +#define MAC_ADDR_VETH1 "00:11:22:33:44:55" +#define IP4_ADDR_VETH1 "192.168.1.1" +#define IP6_ADDR_VETH1 "fd::1" +#define MAC_ADDR_VETH2 "66:77:88:99:AA:BB" +#define IP4_ADDR_VETH2 "192.168.1.2" +#define IP6_ADDR_VETH2 "fd::2" + +#define TEST_NAME_MAX_LEN 64 +#define PROG_NAME_MAX_LEN 64 +#define TUNNEL_ARGS_MAX_LEN 128 +#define BUFFER_LEN 2000 +#define DEFAULT_TEST_DATA_SIZE 100 +#define GSO_TEST_DATA_SIZE BUFFER_LEN + +#define TIMEOUT_MS 1000 +#define TEST_PORT 8000 +#define UDP_PORT 5555 +#define MPLS_UDP_PORT 6635 +#define FOU_MPLS_PROTO 137 +#define VXLAN_ID 1 +#define VXLAN_PORT 8472 +#define MPLS_TABLE_ENTRIES_COUNT 65536 + +static char tx_buffer[BUFFER_LEN], rx_buffer[BUFFER_LEN]; + +struct subtest_cfg { + char *ebpf_tun_type; + char *iproute_tun_type; + char *mac_tun_type; + int ipproto; + void (*extra_decap_mod_args_cb)(struct subtest_cfg *cfg, char *dst); + bool tunnel_need_veth_mac; + bool configure_fou_rx_port; + char *tmode; + bool expect_kern_decap_failure; + bool configure_mpls; + bool test_gso; + char *tunnel_client_addr; + char *tunnel_server_addr; + char name[TEST_NAME_MAX_LEN]; + char *server_addr; + int client_egress_prog_fd; + int server_ingress_prog_fd; + char extra_decap_mod_args[TUNNEL_ARGS_MAX_LEN]; + int *server_fd; +}; + +struct connection { + int client_fd; + int server_fd; +}; + +static int build_subtest_name(struct subtest_cfg *cfg, char *dst, size_t size) +{ + int ret; + + ret = snprintf(dst, size, "%s_%s", cfg->ebpf_tun_type, + cfg->mac_tun_type); + + return ret < 0 ? ret : 0; +} + +static int set_subtest_progs(struct subtest_cfg *cfg, struct test_tc_tunnel *skel) +{ + char prog_name[PROG_NAME_MAX_LEN]; + struct bpf_program *prog; + int ret; + + ret = snprintf(prog_name, PROG_NAME_MAX_LEN, "__encap_"); + if (ret < 0) + return ret; + ret = build_subtest_name(cfg, prog_name + ret, PROG_NAME_MAX_LEN - ret); + if (ret < 0) + return ret; + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!prog) + return -1; + + cfg->client_egress_prog_fd = bpf_program__fd(prog); + cfg->server_ingress_prog_fd = bpf_program__fd(skel->progs.decap_f); + return 0; +} + +static void set_subtest_addresses(struct subtest_cfg *cfg) +{ + if (cfg->ipproto == 6) + cfg->server_addr = IP6_ADDR_VETH2; + else + cfg->server_addr = IP4_ADDR_VETH2; + + /* Some specific tunnel types need specific addressing, it then + * has been already set in the configuration table. Otherwise, + * deduce the relevant addressing from the ipproto + */ + if (cfg->tunnel_client_addr && cfg->tunnel_server_addr) + return; + + if (cfg->ipproto == 6) { + cfg->tunnel_client_addr = IP6_ADDR_VETH1; + cfg->tunnel_server_addr = IP6_ADDR_VETH2; + } else { + cfg->tunnel_client_addr = IP4_ADDR_VETH1; + cfg->tunnel_server_addr = IP4_ADDR_VETH2; + } +} + +static int run_server(struct subtest_cfg *cfg) +{ + struct nstoken *nstoken = open_netns(SERVER_NS); + int family = cfg->ipproto == 6 ? AF_INET6 : AF_INET; + + cfg->server_fd = start_reuseport_server(family, SOCK_STREAM, + cfg->server_addr, TEST_PORT, + TIMEOUT_MS, 1); + close_netns(nstoken); + if (!ASSERT_NEQ(cfg->server_fd, NULL, "start server")) + return -1; + + return 0; +} + +static void stop_server(struct subtest_cfg *cfg) +{ + close(*cfg->server_fd); + cfg->server_fd = NULL; +} + +static int check_server_rx_data(struct subtest_cfg *cfg, + struct connection *conn, int len) +{ + int err; + + memset(rx_buffer, 0, BUFFER_LEN); + err = recv(conn->server_fd, rx_buffer, len, 0); + if (!ASSERT_EQ(err, len, "check rx data len")) + return 1; + if (!ASSERT_MEMEQ(tx_buffer, rx_buffer, len, "check received data")) + return 1; + return 0; +} + +static struct connection *connect_client_to_server(struct subtest_cfg *cfg) +{ + struct network_helper_opts opts = {.timeout_ms = 500}; + int family = cfg->ipproto == 6 ? AF_INET6 : AF_INET; + struct connection *conn = NULL; + int client_fd, server_fd; + + conn = malloc(sizeof(struct connection)); + if (!conn) + return conn; + + client_fd = connect_to_addr_str(family, SOCK_STREAM, cfg->server_addr, + TEST_PORT, &opts); + + if (client_fd < 0) { + free(conn); + return NULL; + } + + server_fd = accept(*cfg->server_fd, NULL, NULL); + if (server_fd < 0) { + free(conn); + return NULL; + } + + conn->server_fd = server_fd; + conn->client_fd = client_fd; + + return conn; +} + +static void disconnect_client_from_server(struct subtest_cfg *cfg, + struct connection *conn) +{ + close(conn->server_fd); + close(conn->client_fd); + free(conn); +} + +static int send_and_test_data(struct subtest_cfg *cfg, bool must_succeed) +{ + struct connection *conn; + int err, res = -1; + + conn = connect_client_to_server(cfg); + if (!must_succeed && !ASSERT_EQ(conn, NULL, "connection that must fail")) + goto end; + else if (!must_succeed) + return 0; + + if (!ASSERT_NEQ(conn, NULL, "connection that must succeed")) + return 1; + + err = send(conn->client_fd, tx_buffer, DEFAULT_TEST_DATA_SIZE, 0); + if (!ASSERT_EQ(err, DEFAULT_TEST_DATA_SIZE, "send data from client")) + goto end; + if (check_server_rx_data(cfg, conn, DEFAULT_TEST_DATA_SIZE)) + goto end; + + if (!cfg->test_gso) { + res = 0; + goto end; + } + + err = send(conn->client_fd, tx_buffer, GSO_TEST_DATA_SIZE, 0); + if (!ASSERT_EQ(err, GSO_TEST_DATA_SIZE, "send (large) data from client")) + goto end; + if (check_server_rx_data(cfg, conn, DEFAULT_TEST_DATA_SIZE)) + goto end; + + res = 0; +end: + disconnect_client_from_server(cfg, conn); + return res; +} + +static void vxlan_decap_mod_args_cb(struct subtest_cfg *cfg, char *dst) +{ + snprintf(dst, TUNNEL_ARGS_MAX_LEN, "id %d dstport %d udp6zerocsumrx", + VXLAN_ID, VXLAN_PORT); +} + +static void udp_decap_mod_args_cb(struct subtest_cfg *cfg, char *dst) +{ + bool is_mpls = !strcmp(cfg->mac_tun_type, "mpls"); + + snprintf(dst, TUNNEL_ARGS_MAX_LEN, + "encap fou encap-sport auto encap-dport %d", + is_mpls ? MPLS_UDP_PORT : UDP_PORT); +} + +static int configure_fou_rx_port(struct subtest_cfg *cfg, bool add) +{ + bool is_mpls = strcmp(cfg->mac_tun_type, "mpls") == 0; + int fou_proto; + + if (is_mpls) + fou_proto = FOU_MPLS_PROTO; + else + fou_proto = cfg->ipproto == 6 ? 41 : 4; + + SYS(fail, "ip fou %s port %d ipproto %d%s", add ? "add" : "del", + is_mpls ? MPLS_UDP_PORT : UDP_PORT, fou_proto, + cfg->ipproto == 6 ? " -6" : ""); + + return 0; +fail: + return 1; +} + +static int add_fou_rx_port(struct subtest_cfg *cfg) +{ + return configure_fou_rx_port(cfg, true); +} + +static int del_fou_rx_port(struct subtest_cfg *cfg) +{ + return configure_fou_rx_port(cfg, false); +} + +static int update_tunnel_intf_addr(struct subtest_cfg *cfg) +{ + SYS(fail, "ip link set dev testtun0 address " MAC_ADDR_VETH2); + return 0; +fail: + return -1; +} + +static int configure_kernel_for_mpls(struct subtest_cfg *cfg) +{ + SYS(fail, "sysctl -qw net.mpls.platform_labels=%d", + MPLS_TABLE_ENTRIES_COUNT); + SYS(fail, "ip -f mpls route add 1000 dev lo"); + SYS(fail, "ip link set lo up"); + SYS(fail, "sysctl -qw net.mpls.conf.testtun0.input=1"); + SYS(fail, "sysctl -qw net.ipv4.conf.lo.rp_filter=0"); + return 0; +fail: + return -1; +} + +static int configure_encapsulation(struct subtest_cfg *cfg) +{ + int ret; + + ret = tc_prog_attach("veth1", -1, cfg->client_egress_prog_fd); + + return ret; +} + +static int configure_kernel_decapsulation(struct subtest_cfg *cfg) +{ + struct nstoken *nstoken = open_netns(SERVER_NS); + + if (cfg->configure_fou_rx_port && + !ASSERT_OK(add_fou_rx_port(cfg), "configure FOU RX port")) + goto fail; + SYS(fail, "ip link add name testtun0 type %s %s remote %s local %s %s", + cfg->iproute_tun_type, cfg->tmode ? cfg->tmode : "", + cfg->tunnel_client_addr, cfg->tunnel_server_addr, + cfg->extra_decap_mod_args); + if (cfg->tunnel_need_veth_mac && + !ASSERT_OK(update_tunnel_intf_addr(cfg), "update testtun0 mac")) + goto fail; + if (cfg->configure_mpls && + (!ASSERT_OK(configure_kernel_for_mpls(cfg), + "configure MPLS decap"))) + goto fail; + SYS(fail, "sysctl -qw net.ipv4.conf.all.rp_filter=0"); + SYS(fail, "sysctl -qw net.ipv4.conf.testtun0.rp_filter=0"); + SYS(fail, "ip link set dev testtun0 up"); + close_netns(nstoken); + return 0; +fail: + close_netns(nstoken); + return -1; +} + +static void remove_kernel_decapsulation(struct subtest_cfg *cfg) +{ + SYS_NOFAIL("ip link del testtun0"); + if (cfg->configure_mpls) + SYS_NOFAIL("ip -f mpls route del 1000 dev lo"); + if (cfg->configure_fou_rx_port) + del_fou_rx_port(cfg); +} + +static int configure_ebpf_decapsulation(struct subtest_cfg *cfg) +{ + struct nstoken *nstoken = open_netns(SERVER_NS); + + if (!cfg->expect_kern_decap_failure) + SYS(fail, "ip link del testtun0"); + + if (!ASSERT_OK(tc_prog_attach("veth2", cfg->server_ingress_prog_fd, -1), + "attach_program")) + goto fail; + close_netns(nstoken); + return 0; +fail: + close_netns(nstoken); + return -1; +} + +static void run_test(struct subtest_cfg *cfg) +{ + struct nstoken *nstoken = open_netns(CLIENT_NS); + + if (!ASSERT_OK(run_server(cfg), "run server")) + goto fail; + + /* Basic communication must work */ + if (!ASSERT_OK(send_and_test_data(cfg, true), "connect without any encap")) + goto fail; + + /* Attach encapsulation program to client */ + if (!ASSERT_OK(configure_encapsulation(cfg), "configure encapsulation")) + goto fail; + + /* If supported, insert kernel decap module, connection must succeed */ + if (!cfg->expect_kern_decap_failure) { + if (!ASSERT_OK(configure_kernel_decapsulation(cfg), + "configure kernel decapsulation")) + goto fail; + if (!ASSERT_OK(send_and_test_data(cfg, true), + "connect with encap prog and kern decap")) + goto fail; + } + + /* Replace kernel decapsulation with BPF decapsulation, test must pass */ + if (!ASSERT_OK(configure_ebpf_decapsulation(cfg), "configure ebpf decapsulation")) + goto fail; + ASSERT_OK(send_and_test_data(cfg, true), "connect with encap and decap progs"); + +fail: + stop_server(cfg); + close_netns(nstoken); +} + +static int setup(void) +{ + struct nstoken *nstoken = NULL; + int fd, err; + + fd = open("/dev/urandom", O_RDONLY); + if (!ASSERT_OK_FD(fd, "open urandom")) + goto fail; + err = read(fd, tx_buffer, BUFFER_LEN); + close(fd); + + if (!ASSERT_EQ(err, BUFFER_LEN, "read random bytes")) + goto fail; + + /* Configure the testing network */ + if (!ASSERT_OK(make_netns(CLIENT_NS), "create client ns") || + !ASSERT_OK(make_netns(SERVER_NS), "create server ns")) + goto fail; + + nstoken = open_netns(CLIENT_NS); + SYS(fail, "ip link add %s type veth peer name %s", + "veth1 mtu 1500 netns " CLIENT_NS " address " MAC_ADDR_VETH1, + "veth2 mtu 1500 netns " SERVER_NS " address " MAC_ADDR_VETH2); + SYS(fail, "ethtool -K veth1 tso off"); + SYS(fail, "ip link set veth1 up"); + close_netns(nstoken); + nstoken = open_netns(SERVER_NS); + SYS(fail, "ip link set veth2 up"); + close_netns(nstoken); + + return 0; +fail: + close_netns(nstoken); + return 1; +} + +static int subtest_setup(struct test_tc_tunnel *skel, struct subtest_cfg *cfg) +{ + struct nstoken *nstoken; + + set_subtest_addresses(cfg); + if (!ASSERT_OK(set_subtest_progs(cfg, skel), + "find subtest progs")) + return -1; + if (cfg->extra_decap_mod_args_cb) + cfg->extra_decap_mod_args_cb(cfg, cfg->extra_decap_mod_args); + + nstoken = open_netns(CLIENT_NS); + SYS(fail, "ip -4 addr add " IP4_ADDR_VETH1 "/24 dev veth1"); + SYS(fail, "ip -4 route flush table main"); + SYS(fail, "ip -4 route add " IP4_ADDR_VETH2 " mtu 1450 dev veth1"); + SYS(fail, "ip -6 addr add " IP6_ADDR_VETH1 "/64 dev veth1 nodad"); + SYS(fail, "ip -6 route flush table main"); + SYS(fail, "ip -6 route add " IP6_ADDR_VETH2 " mtu 1430 dev veth1"); + close_netns(nstoken); + + nstoken = open_netns(SERVER_NS); + SYS(fail, "ip -4 addr add " IP4_ADDR_VETH2 "/24 dev veth2"); + SYS(fail, "ip -6 addr add " IP6_ADDR_VETH2 "/64 dev veth2 nodad"); + close_netns(nstoken); + + return 0; +fail: + close_netns(nstoken); + return -1; +} + + +static void subtest_cleanup(struct subtest_cfg *cfg) +{ + struct nstoken *nstoken; + + nstoken = open_netns(CLIENT_NS); + SYS_NOFAIL("tc qdisc delete dev veth1 parent ffff:fff1"); + SYS_NOFAIL("ip a flush veth1"); + close_netns(nstoken); + nstoken = open_netns(SERVER_NS); + SYS_NOFAIL("tc qdisc delete dev veth2 parent ffff:fff1"); + SYS_NOFAIL("ip a flush veth2"); + if (!cfg->expect_kern_decap_failure) + remove_kernel_decapsulation(cfg); + close_netns(nstoken); +} + +static void cleanup(void) +{ + remove_netns(CLIENT_NS); + remove_netns(SERVER_NS); +} + +static struct subtest_cfg subtests_cfg[] = { + { + .ebpf_tun_type = "ipip", + .mac_tun_type = "none", + .iproute_tun_type = "ipip", + .ipproto = 4, + }, + { + .ebpf_tun_type = "ipip6", + .mac_tun_type = "none", + .iproute_tun_type = "ip6tnl", + .ipproto = 4, + .tunnel_client_addr = IP6_ADDR_VETH1, + .tunnel_server_addr = IP6_ADDR_VETH2, + }, + { + .ebpf_tun_type = "ip6tnl", + .iproute_tun_type = "ip6tnl", + .mac_tun_type = "none", + .ipproto = 6, + }, + { + .mac_tun_type = "none", + .ebpf_tun_type = "sit", + .iproute_tun_type = "sit", + .ipproto = 6, + .tunnel_client_addr = IP4_ADDR_VETH1, + .tunnel_server_addr = IP4_ADDR_VETH2, + }, + { + .ebpf_tun_type = "vxlan", + .mac_tun_type = "eth", + .iproute_tun_type = "vxlan", + .ipproto = 4, + .extra_decap_mod_args_cb = vxlan_decap_mod_args_cb, + .tunnel_need_veth_mac = true + }, + { + .ebpf_tun_type = "ip6vxlan", + .mac_tun_type = "eth", + .iproute_tun_type = "vxlan", + .ipproto = 6, + .extra_decap_mod_args_cb = vxlan_decap_mod_args_cb, + .tunnel_need_veth_mac = true + }, + { + .ebpf_tun_type = "gre", + .mac_tun_type = "none", + .iproute_tun_type = "gre", + .ipproto = 4, + .test_gso = true + }, + { + .ebpf_tun_type = "gre", + .mac_tun_type = "eth", + .iproute_tun_type = "gretap", + .ipproto = 4, + .tunnel_need_veth_mac = true, + .test_gso = true + }, + { + .ebpf_tun_type = "gre", + .mac_tun_type = "mpls", + .iproute_tun_type = "gre", + .ipproto = 4, + .configure_mpls = true, + .test_gso = true + }, + { + .ebpf_tun_type = "ip6gre", + .mac_tun_type = "none", + .iproute_tun_type = "ip6gre", + .ipproto = 6, + .test_gso = true, + }, + { + .ebpf_tun_type = "ip6gre", + .mac_tun_type = "eth", + .iproute_tun_type = "ip6gretap", + .ipproto = 6, + .tunnel_need_veth_mac = true, + .test_gso = true + }, + { + .ebpf_tun_type = "ip6gre", + .mac_tun_type = "mpls", + .iproute_tun_type = "ip6gre", + .ipproto = 6, + .configure_mpls = true, + .test_gso = true + }, + { + .ebpf_tun_type = "udp", + .mac_tun_type = "none", + .iproute_tun_type = "ipip", + .ipproto = 4, + .extra_decap_mod_args_cb = udp_decap_mod_args_cb, + .configure_fou_rx_port = true, + .test_gso = true + }, + { + .ebpf_tun_type = "udp", + .mac_tun_type = "eth", + .iproute_tun_type = "ipip", + .ipproto = 4, + .extra_decap_mod_args_cb = udp_decap_mod_args_cb, + .configure_fou_rx_port = true, + .expect_kern_decap_failure = true, + .test_gso = true + }, + { + .ebpf_tun_type = "udp", + .mac_tun_type = "mpls", + .iproute_tun_type = "ipip", + .ipproto = 4, + .extra_decap_mod_args_cb = udp_decap_mod_args_cb, + .configure_fou_rx_port = true, + .tmode = "mode any ttl 255", + .configure_mpls = true, + .test_gso = true + }, + { + .ebpf_tun_type = "ip6udp", + .mac_tun_type = "none", + .iproute_tun_type = "ip6tnl", + .ipproto = 6, + .extra_decap_mod_args_cb = udp_decap_mod_args_cb, + .configure_fou_rx_port = true, + .test_gso = true + }, + { + .ebpf_tun_type = "ip6udp", + .mac_tun_type = "eth", + .iproute_tun_type = "ip6tnl", + .ipproto = 6, + .extra_decap_mod_args_cb = udp_decap_mod_args_cb, + .configure_fou_rx_port = true, + .expect_kern_decap_failure = true, + .test_gso = true + }, + { + .ebpf_tun_type = "ip6udp", + .mac_tun_type = "mpls", + .iproute_tun_type = "ip6tnl", + .ipproto = 6, + .extra_decap_mod_args_cb = udp_decap_mod_args_cb, + .configure_fou_rx_port = true, + .tmode = "mode any ttl 255", + .expect_kern_decap_failure = true, + .test_gso = true + }, +}; + +void test_tc_tunnel(void) +{ + struct test_tc_tunnel *skel; + struct subtest_cfg *cfg; + int i, ret; + + skel = test_tc_tunnel__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel open and load")) + return; + + if (!ASSERT_OK(setup(), "global setup")) + return; + + for (i = 0; i < ARRAY_SIZE(subtests_cfg); i++) { + cfg = &subtests_cfg[i]; + ret = build_subtest_name(cfg, cfg->name, TEST_NAME_MAX_LEN); + if (ret < 0 || !test__start_subtest(cfg->name)) + continue; + subtest_setup(skel, cfg); + run_test(cfg); + subtest_cleanup(cfg); + } + cleanup(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_tunnel.c b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c index bae0e9de277d2..eb93099312720 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_tunnel.c +++ b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c @@ -534,85 +534,6 @@ static void ping6_dev1(void) close_netns(nstoken); } -static int attach_tc_prog(int ifindex, int igr_fd, int egr_fd) -{ - DECLARE_LIBBPF_OPTS(bpf_tc_hook, hook, .ifindex = ifindex, - .attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS); - DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts1, .handle = 1, - .priority = 1, .prog_fd = igr_fd); - DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts2, .handle = 1, - .priority = 1, .prog_fd = egr_fd); - int ret; - - ret = bpf_tc_hook_create(&hook); - if (!ASSERT_OK(ret, "create tc hook")) - return ret; - - if (igr_fd >= 0) { - hook.attach_point = BPF_TC_INGRESS; - ret = bpf_tc_attach(&hook, &opts1); - if (!ASSERT_OK(ret, "bpf_tc_attach")) { - bpf_tc_hook_destroy(&hook); - return ret; - } - } - - if (egr_fd >= 0) { - hook.attach_point = BPF_TC_EGRESS; - ret = bpf_tc_attach(&hook, &opts2); - if (!ASSERT_OK(ret, "bpf_tc_attach")) { - bpf_tc_hook_destroy(&hook); - return ret; - } - } - - return 0; -} - -static int generic_attach(const char *dev, int igr_fd, int egr_fd) -{ - int ifindex; - - if (!ASSERT_OK_FD(igr_fd, "check ingress fd")) - return -1; - if (!ASSERT_OK_FD(egr_fd, "check egress fd")) - return -1; - - ifindex = if_nametoindex(dev); - if (!ASSERT_NEQ(ifindex, 0, "get ifindex")) - return -1; - - return attach_tc_prog(ifindex, igr_fd, egr_fd); -} - -static int generic_attach_igr(const char *dev, int igr_fd) -{ - int ifindex; - - if (!ASSERT_OK_FD(igr_fd, "check ingress fd")) - return -1; - - ifindex = if_nametoindex(dev); - if (!ASSERT_NEQ(ifindex, 0, "get ifindex")) - return -1; - - return attach_tc_prog(ifindex, igr_fd, -1); -} - -static int generic_attach_egr(const char *dev, int egr_fd) -{ - int ifindex; - - if (!ASSERT_OK_FD(egr_fd, "check egress fd")) - return -1; - - ifindex = if_nametoindex(dev); - if (!ASSERT_NEQ(ifindex, 0, "get ifindex")) - return -1; - - return attach_tc_prog(ifindex, -1, egr_fd); -} - static void test_vxlan_tunnel(void) { struct test_tunnel_kern *skel = NULL; @@ -635,12 +556,12 @@ static void test_vxlan_tunnel(void) goto done; get_src_prog_fd = bpf_program__fd(skel->progs.vxlan_get_tunnel_src); set_src_prog_fd = bpf_program__fd(skel->progs.vxlan_set_tunnel_src); - if (generic_attach(VXLAN_TUNL_DEV1, get_src_prog_fd, set_src_prog_fd)) + if (tc_prog_attach(VXLAN_TUNL_DEV1, get_src_prog_fd, set_src_prog_fd)) goto done; /* load and attach bpf prog to veth dev tc hook point */ set_dst_prog_fd = bpf_program__fd(skel->progs.veth_set_outer_dst); - if (generic_attach_igr("veth1", set_dst_prog_fd)) + if (tc_prog_attach("veth1", set_dst_prog_fd, -1)) goto done; /* load and attach prog set_md to tunnel dev tc hook point at_ns0 */ @@ -648,7 +569,7 @@ static void test_vxlan_tunnel(void) if (!ASSERT_OK_PTR(nstoken, "setns src")) goto done; set_dst_prog_fd = bpf_program__fd(skel->progs.vxlan_set_tunnel_dst); - if (generic_attach_egr(VXLAN_TUNL_DEV0, set_dst_prog_fd)) + if (tc_prog_attach(VXLAN_TUNL_DEV0, -1, set_dst_prog_fd)) goto done; close_netns(nstoken); @@ -695,7 +616,7 @@ static void test_ip6vxlan_tunnel(void) goto done; get_src_prog_fd = bpf_program__fd(skel->progs.ip6vxlan_get_tunnel_src); set_src_prog_fd = bpf_program__fd(skel->progs.ip6vxlan_set_tunnel_src); - if (generic_attach(IP6VXLAN_TUNL_DEV1, get_src_prog_fd, set_src_prog_fd)) + if (tc_prog_attach(IP6VXLAN_TUNL_DEV1, get_src_prog_fd, set_src_prog_fd)) goto done; /* load and attach prog set_md to tunnel dev tc hook point at_ns0 */ @@ -703,7 +624,7 @@ static void test_ip6vxlan_tunnel(void) if (!ASSERT_OK_PTR(nstoken, "setns src")) goto done; set_dst_prog_fd = bpf_program__fd(skel->progs.ip6vxlan_set_tunnel_dst); - if (generic_attach_egr(IP6VXLAN_TUNL_DEV0, set_dst_prog_fd)) + if (tc_prog_attach(IP6VXLAN_TUNL_DEV0, -1, set_dst_prog_fd)) goto done; close_netns(nstoken); @@ -764,7 +685,7 @@ static void test_ipip_tunnel(enum ipip_encap encap) skel->progs.ipip_set_tunnel); } - if (generic_attach(IPIP_TUNL_DEV1, get_src_prog_fd, set_src_prog_fd)) + if (tc_prog_attach(IPIP_TUNL_DEV1, get_src_prog_fd, set_src_prog_fd)) goto done; ping_dev0(); @@ -797,7 +718,7 @@ static void test_xfrm_tunnel(void) /* attach tc prog to tunnel dev */ tc_prog_fd = bpf_program__fd(skel->progs.xfrm_get_state); - if (generic_attach_igr("veth1", tc_prog_fd)) + if (tc_prog_attach("veth1", tc_prog_fd, -1)) goto done; /* attach xdp prog to tunnel dev */ @@ -870,7 +791,7 @@ static void test_gre_tunnel(enum gre_test test) if (!ASSERT_OK(err, "add tunnel")) goto done; - if (generic_attach(GRE_TUNL_DEV1, get_fd, set_fd)) + if (tc_prog_attach(GRE_TUNL_DEV1, get_fd, set_fd)) goto done; ping_dev0(); @@ -911,7 +832,7 @@ static void test_ip6gre_tunnel(enum ip6gre_test test) set_fd = bpf_program__fd(skel->progs.ip6gretap_set_tunnel); get_fd = bpf_program__fd(skel->progs.ip6gretap_get_tunnel); - if (generic_attach(IP6GRE_TUNL_DEV1, get_fd, set_fd)) + if (tc_prog_attach(IP6GRE_TUNL_DEV1, get_fd, set_fd)) goto done; ping6_veth0(); @@ -954,7 +875,7 @@ static void test_erspan_tunnel(enum erspan_test test) set_fd = bpf_program__fd(skel->progs.erspan_set_tunnel); get_fd = bpf_program__fd(skel->progs.erspan_get_tunnel); - if (generic_attach(ERSPAN_TUNL_DEV1, get_fd, set_fd)) + if (tc_prog_attach(ERSPAN_TUNL_DEV1, get_fd, set_fd)) goto done; ping_dev0(); @@ -990,7 +911,7 @@ static void test_ip6erspan_tunnel(enum erspan_test test) set_fd = bpf_program__fd(skel->progs.ip4ip6erspan_set_tunnel); get_fd = bpf_program__fd(skel->progs.ip4ip6erspan_get_tunnel); - if (generic_attach(IP6ERSPAN_TUNL_DEV1, get_fd, set_fd)) + if (tc_prog_attach(IP6ERSPAN_TUNL_DEV1, get_fd, set_fd)) goto done; ping6_veth0(); @@ -1017,7 +938,7 @@ static void test_geneve_tunnel(void) set_fd = bpf_program__fd(skel->progs.geneve_set_tunnel); get_fd = bpf_program__fd(skel->progs.geneve_get_tunnel); - if (generic_attach(GENEVE_TUNL_DEV1, get_fd, set_fd)) + if (tc_prog_attach(GENEVE_TUNL_DEV1, get_fd, set_fd)) goto done; ping_dev0(); @@ -1044,7 +965,7 @@ static void test_ip6geneve_tunnel(void) set_fd = bpf_program__fd(skel->progs.ip6geneve_set_tunnel); get_fd = bpf_program__fd(skel->progs.ip6geneve_get_tunnel); - if (generic_attach(IP6GENEVE_TUNL_DEV1, get_fd, set_fd)) + if (tc_prog_attach(IP6GENEVE_TUNL_DEV1, get_fd, set_fd)) goto done; ping_dev0(); @@ -1083,7 +1004,7 @@ static void test_ip6tnl_tunnel(enum ip6tnl_test test) get_fd = bpf_program__fd(skel->progs.ip6ip6_get_tunnel); break; } - if (generic_attach(IP6TNL_TUNL_DEV1, get_fd, set_fd)) + if (tc_prog_attach(IP6TNL_TUNL_DEV1, get_fd, set_fd)) goto done; ping6_veth0(); diff --git a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c index 404124a938927..7330c61b5730c 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c +++ b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c @@ -2,23 +2,11 @@ /* In-place tunneling */ -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include -#include #include +#include +#include "bpf_tracing_net.h" #include "bpf_compiler.h" #pragma GCC diagnostic ignored "-Waddress-of-packed-member" @@ -27,6 +15,14 @@ static const int cfg_port = 8000; static const int cfg_udp_src = 20000; +#define ETH_P_MPLS_UC 0x8847 +#define ETH_P_TEB 0x6558 + +#define MPLS_LS_S_MASK 0x00000100 +#define BPF_F_ADJ_ROOM_ENCAP_L2(len) \ + (((__u64)len & BPF_ADJ_ROOM_ENCAP_L2_MASK) \ + << BPF_ADJ_ROOM_ENCAP_L2_SHIFT) + #define L2_PAD_SZ (sizeof(struct vxlanhdr) + ETH_HLEN) #define UDP_PORT 5555 @@ -36,10 +32,9 @@ static const int cfg_udp_src = 20000; #define EXTPROTO_VXLAN 0x1 -#define VXLAN_N_VID (1u << 24) -#define VXLAN_VNI_MASK bpf_htonl((VXLAN_N_VID - 1) << 8) -#define VXLAN_FLAGS 0x8 -#define VXLAN_VNI 1 +#define VXLAN_FLAGS bpf_htonl(1<<27) +#define VNI_ID 1 +#define VXLAN_VNI bpf_htonl(VNI_ID << 8) #ifndef NEXTHDR_DEST #define NEXTHDR_DEST 60 @@ -48,12 +43,6 @@ static const int cfg_udp_src = 20000; /* MPLS label 1000 with S bit (last label) set and ttl of 255. */ static const __u32 mpls_label = __bpf_constant_htonl(1000 << 12 | MPLS_LS_S_MASK | 0xff); - -struct vxlanhdr { - __be32 vx_flags; - __be32 vx_vni; -} __attribute__((packed)); - struct gre_hdr { __be16 flags; __be16 protocol; @@ -94,8 +83,8 @@ static __always_inline void set_ipv4_csum(struct iphdr *iph) static __always_inline int __encap_ipv4(struct __sk_buff *skb, __u8 encap_proto, __u16 l2_proto, __u16 ext_proto) { + struct iphdr iph_inner = {0}; __u16 udp_dst = UDP_PORT; - struct iphdr iph_inner; struct v4hdr h_outer; struct tcphdr tcph; int olen, l2_len; @@ -122,7 +111,6 @@ static __always_inline int __encap_ipv4(struct __sk_buff *skb, __u8 encap_proto, return TC_ACT_OK; /* Derive the IPv4 header fields from the IPv6 header */ - memset(&iph_inner, 0, sizeof(iph_inner)); iph_inner.version = 4; iph_inner.ihl = 5; iph_inner.tot_len = bpf_htons(sizeof(iph6_inner) + @@ -210,7 +198,7 @@ static __always_inline int __encap_ipv4(struct __sk_buff *skb, __u8 encap_proto, struct vxlanhdr *vxlan_hdr = (struct vxlanhdr *)l2_hdr; vxlan_hdr->vx_flags = VXLAN_FLAGS; - vxlan_hdr->vx_vni = bpf_htonl((VXLAN_VNI & VXLAN_VNI_MASK) << 8); + vxlan_hdr->vx_vni = VXLAN_VNI; l2_hdr += sizeof(struct vxlanhdr); } @@ -340,7 +328,7 @@ static __always_inline int __encap_ipv6(struct __sk_buff *skb, __u8 encap_proto, struct vxlanhdr *vxlan_hdr = (struct vxlanhdr *)l2_hdr; vxlan_hdr->vx_flags = VXLAN_FLAGS; - vxlan_hdr->vx_vni = bpf_htonl((VXLAN_VNI & VXLAN_VNI_MASK) << 8); + vxlan_hdr->vx_vni = VXLAN_VNI; l2_hdr += sizeof(struct vxlanhdr); } @@ -372,8 +360,8 @@ static __always_inline int __encap_ipv6(struct __sk_buff *skb, __u8 encap_proto, static int encap_ipv6_ipip6(struct __sk_buff *skb) { + struct v6hdr h_outer = {0}; struct iphdr iph_inner; - struct v6hdr h_outer; struct tcphdr tcph; struct ethhdr eth; __u64 flags; @@ -400,13 +388,12 @@ static int encap_ipv6_ipip6(struct __sk_buff *skb) return TC_ACT_SHOT; /* prepare new outer network header */ - memset(&h_outer.ip, 0, sizeof(h_outer.ip)); h_outer.ip.version = 6; h_outer.ip.hop_limit = iph_inner.ttl; - h_outer.ip.saddr.s6_addr[1] = 0xfd; - h_outer.ip.saddr.s6_addr[15] = 1; - h_outer.ip.daddr.s6_addr[1] = 0xfd; - h_outer.ip.daddr.s6_addr[15] = 2; + h_outer.ip.saddr.in6_u.u6_addr8[1] = 0xfd; + h_outer.ip.saddr.in6_u.u6_addr8[15] = 1; + h_outer.ip.daddr.in6_u.u6_addr8[1] = 0xfd; + h_outer.ip.daddr.in6_u.u6_addr8[15] = 2; h_outer.ip.payload_len = iph_inner.tot_len; h_outer.ip.nexthdr = IPPROTO_IPIP; @@ -431,7 +418,7 @@ static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto, return __encap_ipv6(skb, encap_proto, l2_proto, 0); } -SEC("encap_ipip_none") +SEC("tc") int __encap_ipip_none(struct __sk_buff *skb) { if (skb->protocol == __bpf_constant_htons(ETH_P_IP)) @@ -440,7 +427,7 @@ int __encap_ipip_none(struct __sk_buff *skb) return TC_ACT_OK; } -SEC("encap_gre_none") +SEC("tc") int __encap_gre_none(struct __sk_buff *skb) { if (skb->protocol == __bpf_constant_htons(ETH_P_IP)) @@ -449,7 +436,7 @@ int __encap_gre_none(struct __sk_buff *skb) return TC_ACT_OK; } -SEC("encap_gre_mpls") +SEC("tc") int __encap_gre_mpls(struct __sk_buff *skb) { if (skb->protocol == __bpf_constant_htons(ETH_P_IP)) @@ -458,7 +445,7 @@ int __encap_gre_mpls(struct __sk_buff *skb) return TC_ACT_OK; } -SEC("encap_gre_eth") +SEC("tc") int __encap_gre_eth(struct __sk_buff *skb) { if (skb->protocol == __bpf_constant_htons(ETH_P_IP)) @@ -467,7 +454,7 @@ int __encap_gre_eth(struct __sk_buff *skb) return TC_ACT_OK; } -SEC("encap_udp_none") +SEC("tc") int __encap_udp_none(struct __sk_buff *skb) { if (skb->protocol == __bpf_constant_htons(ETH_P_IP)) @@ -476,7 +463,7 @@ int __encap_udp_none(struct __sk_buff *skb) return TC_ACT_OK; } -SEC("encap_udp_mpls") +SEC("tc") int __encap_udp_mpls(struct __sk_buff *skb) { if (skb->protocol == __bpf_constant_htons(ETH_P_IP)) @@ -485,7 +472,7 @@ int __encap_udp_mpls(struct __sk_buff *skb) return TC_ACT_OK; } -SEC("encap_udp_eth") +SEC("tc") int __encap_udp_eth(struct __sk_buff *skb) { if (skb->protocol == __bpf_constant_htons(ETH_P_IP)) @@ -494,7 +481,7 @@ int __encap_udp_eth(struct __sk_buff *skb) return TC_ACT_OK; } -SEC("encap_vxlan_eth") +SEC("tc") int __encap_vxlan_eth(struct __sk_buff *skb) { if (skb->protocol == __bpf_constant_htons(ETH_P_IP)) @@ -505,7 +492,7 @@ int __encap_vxlan_eth(struct __sk_buff *skb) return TC_ACT_OK; } -SEC("encap_sit_none") +SEC("tc") int __encap_sit_none(struct __sk_buff *skb) { if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6)) @@ -514,7 +501,7 @@ int __encap_sit_none(struct __sk_buff *skb) return TC_ACT_OK; } -SEC("encap_ip6tnl_none") +SEC("tc") int __encap_ip6tnl_none(struct __sk_buff *skb) { if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6)) @@ -523,7 +510,7 @@ int __encap_ip6tnl_none(struct __sk_buff *skb) return TC_ACT_OK; } -SEC("encap_ipip6_none") +SEC("tc") int __encap_ipip6_none(struct __sk_buff *skb) { if (skb->protocol == __bpf_constant_htons(ETH_P_IP)) @@ -532,7 +519,7 @@ int __encap_ipip6_none(struct __sk_buff *skb) return TC_ACT_OK; } -SEC("encap_ip6gre_none") +SEC("tc") int __encap_ip6gre_none(struct __sk_buff *skb) { if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6)) @@ -541,7 +528,7 @@ int __encap_ip6gre_none(struct __sk_buff *skb) return TC_ACT_OK; } -SEC("encap_ip6gre_mpls") +SEC("tc") int __encap_ip6gre_mpls(struct __sk_buff *skb) { if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6)) @@ -550,7 +537,7 @@ int __encap_ip6gre_mpls(struct __sk_buff *skb) return TC_ACT_OK; } -SEC("encap_ip6gre_eth") +SEC("tc") int __encap_ip6gre_eth(struct __sk_buff *skb) { if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6)) @@ -559,7 +546,7 @@ int __encap_ip6gre_eth(struct __sk_buff *skb) return TC_ACT_OK; } -SEC("encap_ip6udp_none") +SEC("tc") int __encap_ip6udp_none(struct __sk_buff *skb) { if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6)) @@ -568,7 +555,7 @@ int __encap_ip6udp_none(struct __sk_buff *skb) return TC_ACT_OK; } -SEC("encap_ip6udp_mpls") +SEC("tc") int __encap_ip6udp_mpls(struct __sk_buff *skb) { if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6)) @@ -577,7 +564,7 @@ int __encap_ip6udp_mpls(struct __sk_buff *skb) return TC_ACT_OK; } -SEC("encap_ip6udp_eth") +SEC("tc") int __encap_ip6udp_eth(struct __sk_buff *skb) { if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6)) @@ -586,7 +573,7 @@ int __encap_ip6udp_eth(struct __sk_buff *skb) return TC_ACT_OK; } -SEC("encap_ip6vxlan_eth") +SEC("tc") int __encap_ip6vxlan_eth(struct __sk_buff *skb) { if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6)) @@ -693,7 +680,7 @@ static int decap_ipv6(struct __sk_buff *skb) iph_outer.nexthdr); } -SEC("decap") +SEC("tc") int decap_f(struct __sk_buff *skb) { switch (skb->protocol) { diff --git a/tools/testing/selftests/bpf/test_tc_tunnel.sh b/tools/testing/selftests/bpf/test_tc_tunnel.sh deleted file mode 100755 index cb55a908bb0d7..0000000000000 --- a/tools/testing/selftests/bpf/test_tc_tunnel.sh +++ /dev/null @@ -1,320 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0 -# -# In-place tunneling - -BPF_FILE="test_tc_tunnel.bpf.o" -# must match the port that the bpf program filters on -readonly port=8000 - -readonly ns_prefix="ns-$$-" -readonly ns1="${ns_prefix}1" -readonly ns2="${ns_prefix}2" - -readonly ns1_v4=192.168.1.1 -readonly ns2_v4=192.168.1.2 -readonly ns1_v6=fd::1 -readonly ns2_v6=fd::2 - -# Must match port used by bpf program -readonly udpport=5555 -# MPLSoverUDP -readonly mplsudpport=6635 -readonly mplsproto=137 - -readonly infile="$(mktemp)" -readonly outfile="$(mktemp)" - -setup() { - ip netns add "${ns1}" - ip netns add "${ns2}" - - ip link add dev veth1 mtu 1500 netns "${ns1}" type veth \ - peer name veth2 mtu 1500 netns "${ns2}" - - ip netns exec "${ns1}" ethtool -K veth1 tso off - - ip -netns "${ns1}" link set veth1 up - ip -netns "${ns2}" link set veth2 up - - ip -netns "${ns1}" -4 addr add "${ns1_v4}/24" dev veth1 - ip -netns "${ns2}" -4 addr add "${ns2_v4}/24" dev veth2 - ip -netns "${ns1}" -6 addr add "${ns1_v6}/64" dev veth1 nodad - ip -netns "${ns2}" -6 addr add "${ns2_v6}/64" dev veth2 nodad - - # clamp route to reserve room for tunnel headers - ip -netns "${ns1}" -4 route flush table main - ip -netns "${ns1}" -6 route flush table main - ip -netns "${ns1}" -4 route add "${ns2_v4}" mtu 1450 dev veth1 - ip -netns "${ns1}" -6 route add "${ns2_v6}" mtu 1430 dev veth1 - - sleep 1 - - dd if=/dev/urandom of="${infile}" bs="${datalen}" count=1 status=none -} - -cleanup() { - ip netns del "${ns2}" - ip netns del "${ns1}" - - if [[ -f "${outfile}" ]]; then - rm "${outfile}" - fi - if [[ -f "${infile}" ]]; then - rm "${infile}" - fi - - if [[ -n $server_pid ]]; then - kill $server_pid 2> /dev/null - fi -} - -server_listen() { - ip netns exec "${ns2}" nc "${netcat_opt}" -l "${port}" > "${outfile}" & - server_pid=$! -} - -client_connect() { - ip netns exec "${ns1}" timeout 2 nc "${netcat_opt}" -w 1 "${addr2}" "${port}" < "${infile}" - echo $? -} - -verify_data() { - wait "${server_pid}" - server_pid= - # sha1sum returns two fields [sha1] [filepath] - # convert to bash array and access first elem - insum=($(sha1sum ${infile})) - outsum=($(sha1sum ${outfile})) - if [[ "${insum[0]}" != "${outsum[0]}" ]]; then - echo "data mismatch" - exit 1 - fi -} - -wait_for_port() { - for i in $(seq 20); do - if ip netns exec "${ns2}" ss ${2:--4}OHntl | grep -q "$1"; then - return 0 - fi - sleep 0.1 - done - return 1 -} - -set -e - -# no arguments: automated test, run all -if [[ "$#" -eq "0" ]]; then - echo "ipip" - $0 ipv4 ipip none 100 - - echo "ipip6" - $0 ipv4 ipip6 none 100 - - echo "ip6ip6" - $0 ipv6 ip6tnl none 100 - - echo "sit" - $0 ipv6 sit none 100 - - echo "ip4 vxlan" - $0 ipv4 vxlan eth 2000 - - echo "ip6 vxlan" - $0 ipv6 ip6vxlan eth 2000 - - for mac in none mpls eth ; do - echo "ip gre $mac" - $0 ipv4 gre $mac 100 - - echo "ip6 gre $mac" - $0 ipv6 ip6gre $mac 100 - - echo "ip gre $mac gso" - $0 ipv4 gre $mac 2000 - - echo "ip6 gre $mac gso" - $0 ipv6 ip6gre $mac 2000 - - echo "ip udp $mac" - $0 ipv4 udp $mac 100 - - echo "ip6 udp $mac" - $0 ipv6 ip6udp $mac 100 - - echo "ip udp $mac gso" - $0 ipv4 udp $mac 2000 - - echo "ip6 udp $mac gso" - $0 ipv6 ip6udp $mac 2000 - done - - echo "OK. All tests passed" - exit 0 -fi - -if [[ "$#" -ne "4" ]]; then - echo "Usage: $0" - echo " or: $0 " - exit 1 -fi - -case "$1" in -"ipv4") - readonly addr1="${ns1_v4}" - readonly addr2="${ns2_v4}" - readonly ipproto=4 - readonly netcat_opt=-${ipproto} - readonly foumod=fou - readonly foutype=ipip - readonly fouproto=4 - readonly fouproto_mpls=${mplsproto} - readonly gretaptype=gretap - ;; -"ipv6") - readonly addr1="${ns1_v6}" - readonly addr2="${ns2_v6}" - readonly ipproto=6 - readonly netcat_opt=-${ipproto} - readonly foumod=fou6 - readonly foutype=ip6tnl - readonly fouproto="41 -6" - readonly fouproto_mpls="${mplsproto} -6" - readonly gretaptype=ip6gretap - ;; -*) - echo "unknown arg: $1" - exit 1 - ;; -esac - -readonly tuntype=$2 -readonly mac=$3 -readonly datalen=$4 - -echo "encap ${addr1} to ${addr2}, type ${tuntype}, mac ${mac} len ${datalen}" - -trap cleanup EXIT - -setup - -# basic communication works -echo "test basic connectivity" -server_listen -wait_for_port ${port} ${netcat_opt} -client_connect -verify_data - -# clientside, insert bpf program to encap all TCP to port ${port} -# client can no longer connect -ip netns exec "${ns1}" tc qdisc add dev veth1 clsact -ip netns exec "${ns1}" tc filter add dev veth1 egress \ - bpf direct-action object-file ${BPF_FILE} \ - section "encap_${tuntype}_${mac}" -echo "test bpf encap without decap (expect failure)" -server_listen -wait_for_port ${port} ${netcat_opt} -! client_connect - -if [[ "$tuntype" =~ "udp" ]]; then - # Set up fou tunnel. - ttype="${foutype}" - targs="encap fou encap-sport auto encap-dport $udpport" - # fou may be a module; allow this to fail. - modprobe "${foumod}" ||true - if [[ "$mac" == "mpls" ]]; then - dport=${mplsudpport} - dproto=${fouproto_mpls} - tmode="mode any ttl 255" - else - dport=${udpport} - dproto=${fouproto} - fi - ip netns exec "${ns2}" ip fou add port $dport ipproto ${dproto} - targs="encap fou encap-sport auto encap-dport $dport" -elif [[ "$tuntype" =~ "gre" && "$mac" == "eth" ]]; then - ttype=$gretaptype -elif [[ "$tuntype" =~ "vxlan" && "$mac" == "eth" ]]; then - ttype="vxlan" - targs="id 1 dstport 8472 udp6zerocsumrx" -elif [[ "$tuntype" == "ipip6" ]]; then - ttype="ip6tnl" - targs="" -else - ttype=$tuntype - targs="" -fi - -# tunnel address family differs from inner for SIT -if [[ "${tuntype}" == "sit" ]]; then - link_addr1="${ns1_v4}" - link_addr2="${ns2_v4}" -elif [[ "${tuntype}" == "ipip6" ]]; then - link_addr1="${ns1_v6}" - link_addr2="${ns2_v6}" -else - link_addr1="${addr1}" - link_addr2="${addr2}" -fi - -# serverside, insert decap module -# server is still running -# client can connect again -ip netns exec "${ns2}" ip link add name testtun0 type "${ttype}" \ - ${tmode} remote "${link_addr1}" local "${link_addr2}" $targs - -expect_tun_fail=0 - -if [[ "$tuntype" == "ip6udp" && "$mac" == "mpls" ]]; then - # No support for MPLS IPv6 fou tunnel; expect failure. - expect_tun_fail=1 -elif [[ "$tuntype" =~ "udp" && "$mac" == "eth" ]]; then - # No support for TEB fou tunnel; expect failure. - expect_tun_fail=1 -elif [[ "$tuntype" =~ (gre|vxlan) && "$mac" == "eth" ]]; then - # Share ethernet address between tunnel/veth2 so L2 decap works. - ethaddr=$(ip netns exec "${ns2}" ip link show veth2 | \ - awk '/ether/ { print $2 }') - ip netns exec "${ns2}" ip link set testtun0 address $ethaddr -elif [[ "$mac" == "mpls" ]]; then - modprobe mpls_iptunnel ||true - modprobe mpls_gso ||true - ip netns exec "${ns2}" sysctl -qw net.mpls.platform_labels=65536 - ip netns exec "${ns2}" ip -f mpls route add 1000 dev lo - ip netns exec "${ns2}" ip link set lo up - ip netns exec "${ns2}" sysctl -qw net.mpls.conf.testtun0.input=1 - ip netns exec "${ns2}" sysctl -qw net.ipv4.conf.lo.rp_filter=0 -fi - -# Because packets are decapped by the tunnel they arrive on testtun0 from -# the IP stack perspective. Ensure reverse path filtering is disabled -# otherwise we drop the TCP SYN as arriving on testtun0 instead of the -# expected veth2 (veth2 is where 192.168.1.2 is configured). -ip netns exec "${ns2}" sysctl -qw net.ipv4.conf.all.rp_filter=0 -# rp needs to be disabled for both all and testtun0 as the rp value is -# selected as the max of the "all" and device-specific values. -ip netns exec "${ns2}" sysctl -qw net.ipv4.conf.testtun0.rp_filter=0 -ip netns exec "${ns2}" ip link set dev testtun0 up -if [[ "$expect_tun_fail" == 1 ]]; then - # This tunnel mode is not supported, so we expect failure. - echo "test bpf encap with tunnel device decap (expect failure)" - ! client_connect -else - echo "test bpf encap with tunnel device decap" - client_connect - verify_data - server_listen - wait_for_port ${port} ${netcat_opt} -fi - -# serverside, use BPF for decap -ip netns exec "${ns2}" ip link del dev testtun0 -ip netns exec "${ns2}" tc qdisc add dev veth2 clsact -ip netns exec "${ns2}" tc filter add dev veth2 ingress \ - bpf direct-action object-file ${BPF_FILE} section decap -echo "test bpf encap with bpf decap" -client_connect -verify_data - -echo OK