diff --git a/Runner/suites/Multimedia/Camera/Camera_RDI_FrameCapture/README_Camera_RDI_FrameCapture.md b/Runner/suites/Multimedia/Camera/Camera_RDI_FrameCapture/README_Camera_RDI_FrameCapture.md new file mode 100644 index 00000000..9dc011f1 --- /dev/null +++ b/Runner/suites/Multimedia/Camera/Camera_RDI_FrameCapture/README_Camera_RDI_FrameCapture.md @@ -0,0 +1,98 @@ +# Camera RDI Frame Capture Test + +This test validates functional camera RDI (Raw Dump Interface) pipelines by: + +- Dynamically detecting all camera pipelines using `media-ctl` +- Parsing valid RDI pipelines with a Python helper script +- Streaming frames using `yavta` from detected working pipelines +- Supporting manual override of video format and frame count + +## ๐Ÿ“ Test Directory Structure + +``` +Camera_RDI_FrameCapture/ +โ”œโ”€โ”€ run.sh +โ”œโ”€โ”€ README_Camera_RDI_FrameCapture.md + +``` + +## ๐Ÿง  How It Works + +1. Detects media device node dynamically +2. Dumps the topology to a temporary file +3. Parses pipeline details using `parse_media_topology.py` +4. For each detected pipeline: + - Applies correct media-ctl `-V` and `-l` configuration + - Sets V4L2 controls pre-/post-streaming via `yavta` + - Attempts frame capture using `yavta` +5. Logs PASS/FAIL/SKIP per pipeline +6. Generates a `.res` file with final test result + +## โš™๏ธ Dependencies + +Make sure the following tools are available in the target filesystem: + +- `media-ctl` +- `yavta` +- `v4l2-ctl` +- `python3` +- Python camera pipeline parser (see `utils/camera/parse_media_topology.py`) +- Kernel module: `qcom_camss` +- Required DT nodes for `camss`, `isp`, or `camera` compatible strings + +## ๐Ÿงช Usage + +```sh +./run.sh [--format ] [--frames ] [--help] +``` + +### Examples: + +- Auto-detect and capture 10 frames per working RDI pipeline: + ```sh + ./run.sh + ``` + +- Force UYVY format and capture 5 frames: + ```sh + ./run.sh --format UYVY --frames 5 + ``` + +- Comma-seperated list of V4L2 formats to attempt per pipeline + ```sh + ./run.sh --format SRGGB10P,YUYV,UYVY --frames 5 + ``` + +## ๐Ÿ“ฆ Output + +- Captured frame files: `frame-#.bin` in current directory +- Result summary: `Camera_RDI_FrameCapture.res` +- Detailed logs through `functestlib.sh`-based `log_info`, `log_pass`, etc. + +## โœ… Pass Criteria + +- At least one pipeline successfully captures frames +- Logs include `"Captured frames"` for at least one working video node + +## โŒ Fail/Skip Criteria + +- If pipeline configuration fails or no frames captured, it is marked FAIL +- If no working pipelines are found or prerequisites are unmet, test is SKIPPED + +## ๐Ÿงผ Cleanup + +Temporary files created: +- `/tmp/v4l2_camera_RDI_dump_topo.*` +- `/tmp/v4l2_camera_RDI_dump_pipelines.*` + +They are auto-removed at the end of the test. + +## ๐Ÿ“ Notes + +- The test is dynamic and supports multiple pipelines per board +- Python script only outputs **valid** working pipelines (validated via `v4l2-ctl`) +- `run.sh` is robust, CI-ready, and skips flaky or unsupported configurations gracefully + +--- + +ยฉ Qualcomm Technologies, Inc. โ€“ All rights reserved diff --git a/Runner/suites/Multimedia/Camera/Camera_RDI_FrameCapture/run.sh b/Runner/suites/Multimedia/Camera/Camera_RDI_FrameCapture/run.sh new file mode 100755 index 00000000..35d29913 --- /dev/null +++ b/Runner/suites/Multimedia/Camera/Camera_RDI_FrameCapture/run.sh @@ -0,0 +1,211 @@ +#!/bin/sh +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause-Clear + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +INIT_ENV="" +SEARCH="$SCRIPT_DIR" +while [ "$SEARCH" != "/" ]; do + if [ -f "$SEARCH/init_env" ]; then + INIT_ENV="$SEARCH/init_env" + break + fi + SEARCH=$(dirname "$SEARCH") +done + +if [ -z "$INIT_ENV" ]; then + echo "[ERROR] Could not find init_env (starting at $SCRIPT_DIR)" >&2 + exit 1 +fi + +if [ -z "$__INIT_ENV_LOADED" ]; then + # shellcheck disable=SC1090 + . "$INIT_ENV" +fi +# shellcheck disable=SC1090,SC1091 +. "$TOOLS/functestlib.sh" + +TESTNAME="Camera_RDI_FrameCapture" +RES_FILE="./$TESTNAME.res" +test_path=$(find_test_case_by_name "$TESTNAME") +cd "$test_path" || exit 1 + +print_usage() { + cat <] [--frames ] [--help] + +Options: + --format Test one or more comma-separated formats (e.g., UYVY,NV12) + --frames Number of frames to capture per pipeline (default: 10) + --help Show this help message +EOF +} + +log_info "----------------------------------------------------------------------" +log_info "------------------- Starting $TESTNAME Testcase ----------------------" +log_info "=== Test Initialization ===" + +# --------- Argument Parsing --------- +USER_FORMAT="" +FRAMES=10 +while [ $# -gt 0 ]; do + case "$1" in + --format) + shift + USER_FORMAT="$1" + ;; + --frames) + shift + FRAMES="$1" + ;; + --help) + print_usage + exit 0 + ;; + *) + log_error "Unknown argument: $1" + print_usage + exit 1 + ;; + esac + shift +done + +# --------- Prechecks --------- +if ! dt_confirm_node_or_compatible "isp" "cam" "camss"; then + log_skip "$TESTNAME SKIP โ€“ No ISP/camera node/compatible found in DT" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +DRIVER_MOD="qcom_camss" +DMESG_MODULES='qcom_camss|camss|isp' +DMESG_EXCLUDE='dummy regulator|supply [^ ]+ not found|using dummy regulator|Failed to create device link|reboot-mode.*-EEXIST|can.t register reboot mode' + +if ! is_module_loaded "$DRIVER_MOD"; then + log_skip "$TESTNAME SKIP โ€“ Driver module $DRIVER_MOD not loaded" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +if scan_dmesg_errors "$SCRIPT_DIR" "$DMESG_MODULES" "$DMESG_EXCLUDE"; then + log_skip "$TESTNAME SKIP โ€“ $DRIVER_MOD probe errors detected in dmesg" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +# --------- Dependency Checks --------- +check_dependencies media-ctl yavta python3 v4l2-ctl || { + log_skip "$TESTNAME SKIP โ€“ Required tools missing" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +} + +# --------- Media Node Detection --------- +MEDIA_NODE=$(detect_media_node) +if [ -z "$MEDIA_NODE" ]; then + log_skip "$TESTNAME SKIP โ€“ Media node not found" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi +log_info "Detected media node: $MEDIA_NODE" + +# --------- Pipeline Detection --------- +TOPO_FILE=$(mktemp "/tmp/${TESTNAME}_topo.XXXXXX") +TMP_PIPELINES_FILE=$(mktemp "/tmp/${TESTNAME}_blocks.XXXXXX") +trap 'rm -f "$TOPO_FILE" "$TMP_PIPELINES_FILE"' EXIT + +media-ctl -p -d "$MEDIA_NODE" >"$TOPO_FILE" 2>/dev/null +PYTHON_PIPELINES=$(run_camera_pipeline_parser "$TOPO_FILE") +if [ -z "$PYTHON_PIPELINES" ]; then + log_skip "$TESTNAME SKIP โ€“ No valid pipelines found" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +printf "%s\n" "$PYTHON_PIPELINES" > "$TMP_PIPELINES_FILE" + +log_info "User format override: ${USER_FORMAT:-}" +log_info "Frame count per pipeline: $FRAMES" + +# --------- Pipeline Processing --------- +PASS=0; FAIL=0; SKIP=0; COUNT=0 +block="" + +while IFS= read -r line || [ -n "$line" ]; do + if [ "$line" = "--" ]; then + COUNT=$((COUNT+1)) + TMP="/tmp/cam_block.$$.$COUNT" + printf "%s\n" "$block" > "$TMP" + + # Parses block and sets SENSOR, VIDEO, YAVTA_DEV, FMT, etc + parse_pipeline_block "$TMP" + rm -f "$TMP" + + # -------- Multi-format support (POSIX style, no arrays) -------- + FORMATS_LIST="$USER_FORMAT" + if [ -z "$FORMATS_LIST" ]; then + # No user override, use detected format for this pipeline only + FORMATS_LIST="$YAVTA_FMT" + fi + + OLD_IFS="$IFS" + IFS=',' + for FMT_OVERRIDE in $FORMATS_LIST; do + FMT_OVERRIDE=$(printf '%s' "$FMT_OVERRIDE" | sed 's/^ *//;s/ *$//') + TARGET_FORMAT="$FMT_OVERRIDE" + [ -z "$TARGET_FORMAT" ] && TARGET_FORMAT="$YAVTA_FMT" + + log_info "----- Pipeline $COUNT: ${SENSOR:-unknown} $VIDEO $TARGET_FORMAT -----" + + if [ -z "$VIDEO" ] || [ "$VIDEO" = "None" ] || [ -z "$YAVTA_DEV" ]; then + log_skip "$SENSOR: Invalid pipeline โ€“ skipping" + SKIP=$((SKIP+1)) + continue + fi + + configure_pipeline_block "$MEDIA_NODE" "$TARGET_FORMAT" + execute_capture_block "$FRAMES" "$TARGET_FORMAT" + RET=$? + + case "$RET" in + 0) + log_pass "$SENSOR $VIDEO $TARGET_FORMAT PASS" + PASS=$((PASS+1)) + ;; + 1) + log_fail "$SENSOR $VIDEO $TARGET_FORMAT FAIL (capture failed)" + FAIL=$((FAIL+1)) + ;; + 2) + log_skip "$SENSOR $VIDEO $TARGET_FORMAT SKIP (unsupported format)" + SKIP=$((SKIP+1)) + ;; + 3) + log_skip "$SENSOR $VIDEO missing data โ€“ skipping" + SKIP=$((SKIP+1)) + ;; + esac + done + IFS="$OLD_IFS" + block="" + else + if [ -z "$block" ]; then + block="$line" + else + block="$block +$line" + fi + fi +done < "$TMP_PIPELINES_FILE" + +log_info "Test Summary: Passed: $PASS, Failed: $FAIL, Skipped: $SKIP" +if [ "$PASS" -gt 0 ]; then + echo "$TESTNAME PASS" >"$RES_FILE" +elif [ "$FAIL" -gt 0 ]; then + echo "$TESTNAME FAIL" >"$RES_FILE" +else + echo "$TESTNAME SKIP" >"$RES_FILE" +fi + +exit 0 diff --git a/Runner/utils/camera/parse_media_topology.py b/Runner/utils/camera/parse_media_topology.py new file mode 100755 index 00000000..bd30c5b9 --- /dev/null +++ b/Runner/utils/camera/parse_media_topology.py @@ -0,0 +1,245 @@ +#!/usr/bin/env python3 +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause-Clear + +import sys +import re +import subprocess +import argparse + +def fourcc_map(fmt): + mapping = { + "SRGGB10_1X10": "SRGGB10P", + "RGGB8_1X8": "RGGB8", + "BGGR10_1X10": "BGGR10P", + "SRGGB8_1X8": "SRGGB8", + "YUYV8_2X8": "YUYV", + "UYVY8_2X8": "UYVY", + # Add more as needed + } + return mapping.get(fmt, fmt) + +def parse_entities(lines): + entities = {} + entity_pat = re.compile(r"^- entity (\d+): ([^\(]+)") + device_pat = re.compile(r"device node name (.+)") + type_pat = re.compile(r"type V4L2 subdev subtype (\w+)") + pad_pat = re.compile(r"^\s*pad(\d+): (\w+)") + link_pat = re.compile(r'-> "([^"]+)":(\d+) \[([^\]]*)\]') + fmt_pat = re.compile(r"\[stream:(\d+) fmt:([^\s/]+)/(\d+)x(\d+)") + fmt2_pat = re.compile(r"\[fmt:([^\s/]+)/(\d+)x(\d+)") + sensor_pat = re.compile(r"type V4L2 subdev subtype Sensor flags") + cur_entity = None + cur_pad = None + for line in lines: + line = line.rstrip('\n') + m = entity_pat.match(line) + if m: + idx, name = int(m.group(1)), m.group(2).strip() + cur_entity = { + 'id': idx, 'name': name, 'pads': {}, 'devnode': None, + 'type': None, 'is_sensor': False + } + entities[idx] = cur_entity + cur_pad = None + continue + if cur_entity is None: + continue + m = device_pat.search(line) + if m: + cur_entity['devnode'] = m.group(1).strip() + m = type_pat.search(line) + if m: + cur_entity['type'] = m.group(1).strip() + if sensor_pat.search(line): + cur_entity['is_sensor'] = True + m = pad_pat.match(line) + if m: + pad_idx, pad_type = int(m.group(1)), m.group(2) + cur_entity['pads'][pad_idx] = {'type': pad_type, 'links': [], 'fmt': None, 'w': None, 'h': None} + cur_pad = pad_idx + m = link_pat.search(line) + if m and cur_pad is not None: + target, pad_idx, flags = m.group(1), int(m.group(2)), m.group(3) + cur_entity['pads'][cur_pad]['links'].append({'target': target, 'pad': pad_idx, 'flags': flags}) + m = fmt_pat.search(line) + if m and cur_pad is not None: + _, fmt, w, h = m.groups() + cur_entity['pads'][cur_pad].update({'fmt': fmt, 'w': w, 'h': h}) + m = fmt2_pat.search(line) + if m and cur_pad is not None: + fmt, w, h = m.groups() + cur_entity['pads'][cur_pad].update({'fmt': fmt, 'w': w, 'h': h}) + return entities + +def collect_supported_fourccs(video_ent, entities, fallback_fmt=None): + fourccs = set() + for pad in video_ent.get('pads', {}).values(): + if pad.get('fmt'): + fourccs.add(pad['fmt']) + if not fourccs: + video_name = video_ent['name'] + for ent in entities.values(): + for pad in ent.get('pads', {}).values(): + for link in pad.get('links', []): + if link['target'] == video_name and pad.get('fmt'): + fourccs.add(pad['fmt']) + if not fourccs and fallback_fmt: + fourccs.add(fallback_fmt) + return ','.join(sorted(fourccs)) if fourccs else 'None' + +def collect_supported_modes(video_devnode): + modes = set() + if not video_devnode or not video_devnode.startswith('/dev/video'): + return [] + try: + output = subprocess.check_output( + ["v4l2-ctl", "--device", video_devnode, "--list-formats-ext"], + encoding="utf-8", stderr=subprocess.DEVNULL + ) + current_fmt = None + for line in output.splitlines(): + line = line.strip() + if line.startswith('['): + m = re.match(r"\[\d+\]:\s+'(\w+)'", line) + if m: + current_fmt = m.group(1) + elif "Size:" in line and current_fmt: + matches = re.findall(r"(\d+)x(\d+)", line) + for (w, h) in matches: + modes.add(f"{current_fmt}/{w}x{h}") + return sorted(modes) + except Exception: + return [] + +def emit_media_ctl_v(entity, fmt, w, h): + cmds = [] + for pad_num in [0, 1]: + if pad_num in entity['pads']: + pad = entity['pads'][pad_num] + _fmt = fmt if fmt else pad.get('fmt', 'None') + _w = w if w else pad.get('w', 'None') + _h = h if h else pad.get('h', 'None') + cmds.append(f'"{entity["name"]}":{pad_num}[fmt:{_fmt}/{_w}x{_h} field:none]') + return cmds + +def build_pipeline_cmds(sensor_ent, entities): + results = [] + src_pad = sensor_ent['pads'].get(0) + if not src_pad or not src_pad['links']: + return results + for lnk in src_pad['links']: + csiphy_ent = next((e for e in entities.values() if e['name'] == lnk['target']), None) + if not csiphy_ent: continue + csid_ent = next((e for l in csiphy_ent['pads'].get(1, {}).get('links', []) if + (e := next((e for e in entities.values() if e['name'] == l['target']), None))), None) + if not csid_ent: continue + vfe_ent = next((e for l in csid_ent['pads'].get(1, {}).get('links', []) if + (e := next((e for e in entities.values() if e['name'] == l['target']), None))), None) + if not vfe_ent: continue + vid_ent = next((e for l in vfe_ent['pads'].get(1, {}).get('links', []) if + (e := next((e for e in entities.values() if e['name'] == l['target']), None))), None) + if not vid_ent or not vid_ent.get('devnode'): continue + + video_node = vid_ent['devnode'] + if not video_supports_format(video_node): + continue + + fmt = src_pad.get('fmt', 'None') + short_fmt = fourcc_map(fmt) + w = src_pad.get('w') + h = src_pad.get('h') + + results.append({ + 'SENSOR': sensor_ent['name'], + 'ENTITY': sensor_ent['id'], + 'CSIPHY': csiphy_ent['name'], + 'CSID': csid_ent['name'], + 'VFE': vfe_ent['name'], + 'VIDEO': video_node, + 'FMT': fmt, + 'W': w, + 'H': h, + 'SUBDEV': sensor_ent.get('devnode'), + 'SUPPORTED_FOURCCS': collect_supported_fourccs(vid_ent, entities, fmt), + 'SUPPORTED_MODE': collect_supported_modes(video_node), + 'MEDIA_CTL_V': ( + emit_media_ctl_v(sensor_ent, fmt, w, h) + + emit_media_ctl_v(csiphy_ent, fmt, w, h) + + emit_media_ctl_v(csid_ent, fmt, w, h) + + emit_media_ctl_v(vfe_ent, fmt, w, h) + ), + 'MEDIA_CTL_L': [ + f'"{sensor_ent["name"]}":0->"{csiphy_ent["name"]}":0[1]', + f'"{csiphy_ent["name"]}":1->"{csid_ent["name"]}":0[1]', + f'"{csid_ent["name"]}":1->"{vfe_ent["name"]}":0[1]', + f'"{vfe_ent["name"]}":1->"{vid_ent["name"]}":0[1]', + ], + 'YAVTA_DEV': video_node, + 'YAVTA_FMT': short_fmt, + 'YAVTA_W': w, + 'YAVTA_H': h, + 'YAVTA_CTRL_PRE': f"{sensor_ent.get('devnode')} 0x009f0903 0" if sensor_ent.get('devnode') else "", + 'YAVTA_CTRL': f"{sensor_ent.get('devnode')} 0x009f0903 9" if sensor_ent.get('devnode') else "", + 'YAVTA_CTRL_POST': f"{sensor_ent.get('devnode')} 0x009f0903 0" if sensor_ent.get('devnode') else "" + }) + return results + +def video_supports_format(video, fmt=None, w=None, h=None): + if not video or video == "None": + return False + try: + out = subprocess.check_output(["v4l2-ctl", "--device", video, "--list-formats-ext"], + encoding="utf-8", stderr=subprocess.DEVNULL) + if fmt: + found_fmt = False + for line in out.splitlines(): + if fmt in line: + found_fmt = True + if found_fmt and w and h and f"{w}x{h}" in line: + return True + if found_fmt and (not w or not h): + return True + return False + return True + except Exception: + return False + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("topo", help="media-ctl -p output text file") + args = parser.parse_args() + + with open(args.topo) as f: + lines = f.readlines() + + entities = parse_entities(lines) + found = False + for eid, ent in entities.items(): + if not ent.get('is_sensor'): + continue + pipelines = build_pipeline_cmds(ent, entities) + for r in pipelines: + found = True + for k in ['SENSOR','ENTITY','CSIPHY','CSID','VFE','VIDEO','FMT','W','H','SUBDEV','SUPPORTED_FOURCCS']: + print(f"{k}:{r[k]}") + for mode in r['SUPPORTED_MODE']: + print(f"SUPPORTED_MODE:{mode}") + for v in r['MEDIA_CTL_V']: + print(f"MEDIA_CTL_V:{v}") + for l in r['MEDIA_CTL_L']: + print(f"MEDIA_CTL_L:{l}") + if r['YAVTA_CTRL_PRE']: print(f"YAVTA_CTRL_PRE:{r['YAVTA_CTRL_PRE']}") + if r['YAVTA_CTRL']: print(f"YAVTA_CTRL:{r['YAVTA_CTRL']}") + if r['YAVTA_CTRL_POST']: print(f"YAVTA_CTRL_POST:{r['YAVTA_CTRL_POST']}") + print(f"YAVTA_DEV:{r['YAVTA_DEV']}") + print(f"YAVTA_FMT:{r['YAVTA_FMT']}") + print(f"YAVTA_W:{r['YAVTA_W']}") + print(f"YAVTA_H:{r['YAVTA_H']}") + print("--") + if not found: + print("SKIP: No valid camera pipelines found in topology.") + sys.exit(2) + +if __name__ == "__main__": + main() diff --git a/Runner/utils/functestlib.sh b/Runner/utils/functestlib.sh index e4a8c8d6..ab8ca48a 100755 --- a/Runner/utils/functestlib.sh +++ b/Runner/utils/functestlib.sh @@ -1674,60 +1674,45 @@ detect_ufs_partition_block() { return 1 } -# scan_dmesg_check