From 318070ebee3c14c80b4090aefd9ee39bf7bcee98 Mon Sep 17 00:00:00 2001
From: Lucas Saavedra Vaz <32426024+lucasssvaz@users.noreply.github.com>
Date: Wed, 1 Oct 2025 17:52:13 -0300
Subject: [PATCH 1/5] ci(tests): Move hardware tests to GitLab
---
.github/scripts/tests_matrix.sh | 1 +
.github/workflows/tests.yml | 19 +-
.github/workflows/tests_hw.yml | 122 -------
.../{tests_wokwi.yml => tests_hw_wokwi.yml} | 253 ++++++++++++--
.github/workflows/tests_results.yml | 130 +++++---
.gitlab-ci.yml | 24 +-
.gitlab/scripts/gen_hw_jobs.py | 311 ++++++++++++++++++
.gitlab/scripts/get_artifacts.sh | 74 +++++
.gitlab/scripts/get_results.sh | 60 ++++
.gitlab/workflows/common.yml | 3 +
.gitlab/workflows/hardware_tests_dynamic.yml | 79 +++++
.gitlab/workflows/hw_test_template.yml | 65 ++++
.gitlab/workflows/sample.yml | 16 -
tests/performance/coremark/test_coremark.py | 2 +-
tests/performance/fibonacci/test_fibonacci.py | 2 +-
.../linpack_double/test_linpack_double.py | 2 +-
.../linpack_float/test_linpack_float.py | 2 +-
tests/performance/psramspeed/ci.json | 14 +
.../performance/psramspeed/test_psramspeed.py | 2 +-
tests/performance/ramspeed/test_ramspeed.py | 2 +-
tests/performance/superpi/test_superpi.py | 2 +-
tests/requirements.txt | 8 +-
tests/validation/psram/ci.json | 14 +
tests/validation/wifi/ci.json | 2 +-
24 files changed, 956 insertions(+), 253 deletions(-)
delete mode 100644 .github/workflows/tests_hw.yml
rename .github/workflows/{tests_wokwi.yml => tests_hw_wokwi.yml} (55%)
create mode 100644 .gitlab/scripts/gen_hw_jobs.py
create mode 100644 .gitlab/scripts/get_artifacts.sh
create mode 100644 .gitlab/scripts/get_results.sh
create mode 100644 .gitlab/workflows/hardware_tests_dynamic.yml
create mode 100644 .gitlab/workflows/hw_test_template.yml
delete mode 100644 .gitlab/workflows/sample.yml
diff --git a/.github/scripts/tests_matrix.sh b/.github/scripts/tests_matrix.sh
index a8baf2ce275..01cc122753c 100644
--- a/.github/scripts/tests_matrix.sh
+++ b/.github/scripts/tests_matrix.sh
@@ -17,6 +17,7 @@ targets="'esp32','esp32s2','esp32s3','esp32c3','esp32c6','esp32h2','esp32p4'"
mkdir -p info
echo "[$wokwi_types]" > info/wokwi_types.txt
+echo "[$hw_types]" > info/hw_types.txt
echo "[$targets]" > info/targets.txt
{
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 557de11b509..8c46ef07661 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -89,23 +89,6 @@ jobs:
type: ${{ matrix.type }}
chip: ${{ matrix.chip }}
- call-hardware-tests:
- name: Hardware
- uses: ./.github/workflows/tests_hw.yml
- needs: [gen-matrix, call-build-tests]
- if: |
- github.repository == 'espressif/arduino-esp32' &&
- (github.event_name != 'pull_request' ||
- contains(github.event.pull_request.labels.*.name, 'hil_test'))
- strategy:
- fail-fast: false
- matrix:
- type: ${{ fromJson(needs.gen-matrix.outputs.hw-types) }}
- chip: ${{ fromJson(needs.gen-matrix.outputs.targets) }}
- with:
- type: ${{ matrix.type }}
- chip: ${{ matrix.chip }}
-
# This job is disabled for now
call-qemu-tests:
name: QEMU
@@ -121,4 +104,4 @@ jobs:
type: ${{ matrix.type }}
chip: ${{ matrix.chip }}
- # Wokwi tests are run after this workflow as it needs access to secrets
+ # Hardware and Wokwi tests are run after this workflow as they need access to secrets
diff --git a/.github/workflows/tests_hw.yml b/.github/workflows/tests_hw.yml
deleted file mode 100644
index d3b2ef79301..00000000000
--- a/.github/workflows/tests_hw.yml
+++ /dev/null
@@ -1,122 +0,0 @@
-name: Hardware tests
-
-on:
- workflow_call:
- inputs:
- type:
- type: string
- description: "Type of tests to run"
- required: true
- chip:
- type: string
- description: "Chip to run tests for"
- required: true
-
-permissions:
- contents: read
-
-env:
- DEBIAN_FRONTEND: noninteractive
-
-defaults:
- run:
- shell: bash
-
-jobs:
- hardware-test:
- name: Hardware ${{ inputs.chip }} ${{ inputs.type }} tests
- runs-on: ["arduino", "${{ inputs.chip }}"]
- env:
- id: ${{ github.event.pull_request.number || github.ref }}-${{ github.event.pull_request.head.sha || github.sha }}-${{ inputs.chip }}-${{ inputs.type }}
- container:
- image: python:3.10.1-bullseye
- options: --privileged --device-cgroup-rule="c 188:* rmw" --device-cgroup-rule="c 166:* rmw"
- steps:
- - name: Clean workspace
- run: |
- rm -rf ./*
- rm -rf ~/.arduino/tests
-
- - name: Check if already passed
- id: cache-results
- if: github.event.pull_request.number != null
- uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
- with:
- key: test-${{ env.id }}-results-hw
- path: |
- tests/**/*.xml
- tests/**/result_*.json
-
- - name: Evaluate if tests should be run
- id: check-tests
- run: |
- cache_exists=${{ steps.cache-results.outputs.cache-hit == 'true' }}
- enabled=true
-
- if [[ $cache_exists == 'true' ]]; then
- echo "Already ran, skipping"
- enabled=false
- fi
-
- echo "enabled=$enabled" >> $GITHUB_OUTPUT
-
- - name: Checkout user repository
- if: ${{ steps.check-tests.outputs.enabled == 'true' }}
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- with:
- # Workaround for missing files in checkout
- sparse-checkout: |
- *
-
- # setup-python currently only works on ubuntu images
- # - uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.0.4
- # if: ${{ steps.check-tests.outputs.enabled == 'true' }}
- # with:
- # cache-dependency-path: tests/requirements.txt
- # cache: 'pip'
- # python-version: '3.10.1'
-
- - name: Install dependencies
- if: ${{ steps.check-tests.outputs.enabled == 'true' }}
- run: |
- pip install -U pip
- pip install -r tests/requirements.txt --extra-index-url https://dl.espressif.com/pypi
- apt update
- apt install -y jq
-
- - name: Get binaries
- if: ${{ steps.check-tests.outputs.enabled == 'true' }}
- uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
- with:
- name: test-bin-${{ inputs.chip }}-${{ inputs.type }}
- path: |
- ~/.arduino/tests/${{ inputs.chip }}
-
- - name: List binaries
- if: ${{ steps.check-tests.outputs.enabled == 'true' }}
- run: |
- ls -laR ~/.arduino/tests
-
- - name: Run Tests
- if: ${{ steps.check-tests.outputs.enabled == 'true' }}
- run: |
- bash .github/scripts/tests_run.sh -c -type ${{ inputs.type }} -t ${{ inputs.chip }} -i 0 -m 1 -e
-
- - name: Upload ${{ inputs.chip }} ${{ inputs.type }} hardware results as cache
- uses: actions/cache/save@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
- if: steps.check-tests.outputs.enabled == 'true' && github.event.pull_request.number != null
- with:
- key: test-${{ env.id }}-results-hw
- path: |
- tests/**/*.xml
- tests/**/result_*.json
-
- - name: Upload ${{ inputs.chip }} ${{ inputs.type }} hardware results as artifacts
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
- if: always()
- with:
- name: test-results-hw-${{ inputs.chip }}-${{ inputs.type }}
- overwrite: true
- path: |
- tests/**/*.xml
- tests/**/result_*.json
diff --git a/.github/workflows/tests_wokwi.yml b/.github/workflows/tests_hw_wokwi.yml
similarity index 55%
rename from .github/workflows/tests_wokwi.yml
rename to .github/workflows/tests_hw_wokwi.yml
index 3038a7ce59c..d9937adcc87 100644
--- a/.github/workflows/tests_wokwi.yml
+++ b/.github/workflows/tests_hw_wokwi.yml
@@ -1,4 +1,4 @@
-name: Wokwi tests
+name: Hardware and Wokwi tests
on:
workflow_run:
@@ -10,6 +10,10 @@ on:
permissions:
contents: read
+env:
+ #TESTS_BRANCH: "master" # Branch that will be checked out to run the tests
+ TESTS_BRANCH: "ci/hw_gitlab"
+
jobs:
get-artifacts:
name: Get required artifacts
@@ -22,7 +26,10 @@ jobs:
ref: ${{ steps.set-ref.outputs.ref }}
base: ${{ steps.set-ref.outputs.base }}
targets: ${{ steps.set-ref.outputs.targets }}
- types: ${{ steps.set-ref.outputs.types }}
+ wokwi_types: ${{ steps.set-ref.outputs.wokwi_types }}
+ hw_types: ${{ steps.set-ref.outputs.hw_types }}
+ hw_tests_enabled: ${{ steps.set-ref.outputs.hw_tests_enabled }}
+ push_time: ${{ steps.set-ref.outputs.push_time }}
steps:
- name: Report pending
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
@@ -60,7 +67,7 @@ jobs:
name: matrix_info
path: artifacts/matrix_info
- - name: Try to read PR number
+ - name: Get info
id: set-ref
run: |
pr_num=$(jq -r '.pull_request.number' artifacts/event_file/event.json | tr -cd "[:digit:]")
@@ -83,13 +90,34 @@ jobs:
base=${{ github.ref }}
fi
- types=$(cat artifacts/matrix_info/wokwi_types.txt | tr -cd "[:alpha:],[]'")
+ hw_tests_enabled="true"
+ if [[ -n "$pr_num" ]]; then
+ # This is a PR, check for hil_test label
+ has_hil_label=$(jq -r '.pull_request.labels[]?.name' artifacts/event_file/event.json 2>/dev/null | grep -q "hil_test" && echo "true" || echo "false")
+ echo "Has hil_test label: $has_hil_label"
+
+ if [[ "$has_hil_label" != "true" ]]; then
+ echo "PR does not have hil_test label, hardware tests will be disabled"
+ hw_tests_enabled="false"
+ fi
+ fi
+
+ push_time=$(jq -r '.repository.pushed_at' artifacts/event_file/event.json | tr -cd "[:alnum:]:-")
+ if [ -z "$push_time" ] || [ "$push_time" == "null" ]; then
+ push_time=""
+ fi
+
+ wokwi_types=$(cat artifacts/matrix_info/wokwi_types.txt | tr -cd "[:alpha:],[]'")
+ hw_types=$(cat artifacts/matrix_info/hw_types.txt | tr -cd "[:alpha:],[]'")
targets=$(cat artifacts/matrix_info/targets.txt | tr -cd "[:alnum:],[]'")
echo "base = $base"
echo "targets = $targets"
- echo "types = $types"
+ echo "wokwi_types = $wokwi_types"
+ echo "hw_types = $hw_types"
echo "pr_num = $pr_num"
+ echo "hw_tests_enabled = $hw_tests_enabled"
+ echo "push_time = $push_time"
printf "$ref" >> artifacts/ref.txt
printf "Ref = "
@@ -124,28 +152,11 @@ jobs:
echo "pr_num=$pr_num" >> $GITHUB_OUTPUT
echo "base=$base" >> $GITHUB_OUTPUT
echo "targets=$targets" >> $GITHUB_OUTPUT
- echo "types=$types" >> $GITHUB_OUTPUT
+ echo "wokwi_types=$wokwi_types" >> $GITHUB_OUTPUT
+ echo "hw_types=$hw_types" >> $GITHUB_OUTPUT
echo "ref=$ref" >> $GITHUB_OUTPUT
-
- - name: Download and extract parent hardware results
- uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
- continue-on-error: true
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- run-id: ${{ github.event.workflow_run.id }}
- pattern: test-results-hw-*
- merge-multiple: true
- path: artifacts/results/hw
-
- - name: Download and extract parent GitLab results
- uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
- continue-on-error: true
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- run-id: ${{ github.event.workflow_run.id }}
- pattern: test-results-gitlab
- merge-multiple: true
- path: artifacts/results/gitlab
+ echo "hw_tests_enabled=$hw_tests_enabled" >> $GITHUB_OUTPUT
+ echo "push_time=$push_time" >> $GITHUB_OUTPUT
- name: Download and extract parent QEMU results
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
@@ -185,6 +196,194 @@ jobs:
})).data;
core.info(`${name} is ${state}`);
+ hardware-test:
+ name: Internal Hardware Tests
+ if: |
+ (github.event.workflow_run.conclusion == 'success' ||
+ github.event.workflow_run.conclusion == 'failure' ||
+ github.event.workflow_run.conclusion == 'timed_out') &&
+ needs.get-artifacts.outputs.hw_tests_enabled == 'true'
+ runs-on: ubuntu-latest
+ needs: get-artifacts
+ env:
+ id: ${{ needs.get-artifacts.outputs.ref }}-${{ github.event.workflow_run.head_sha || github.sha }}
+ permissions:
+ actions: read
+ statuses: write
+ steps:
+ - name: Report pending
+ uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
+ with:
+ script: |
+ const owner = '${{ github.repository_owner }}';
+ const repo = '${{ github.repository }}'.split('/')[1];
+ const sha = '${{ github.event.workflow_run.head_sha }}';
+ core.debug(`owner: ${owner}`);
+ core.debug(`repo: ${repo}`);
+ core.debug(`sha: ${sha}`);
+ const { context: name, state } = (await github.rest.repos.createCommitStatus({
+ context: 'Runtime Tests / Internal Hardware Tests (${{ github.event.workflow_run.event }} -> workflow_run)',
+ owner: owner,
+ repo: repo,
+ sha: sha,
+ state: 'pending',
+ target_url: 'https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}'
+ })).data;
+ core.info(`${name} is ${state}`);
+
+ - name: Check if already passed
+ id: get-cache-results
+ if: needs.get-artifacts.outputs.pr_num
+ uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
+ with:
+ key: test-${{ env.id }}-results-hw
+ path: |
+ tests/**/*.xml
+ tests/**/result_*.json
+
+ - name: Evaluate if tests should be run
+ id: check-tests
+ run: |
+ cache_exists=${{ steps.get-cache-results.outputs.cache-hit == 'true' }}
+ enabled=true
+
+ # Check cache first
+ if [[ $cache_exists == 'true' ]]; then
+ echo "Already ran, skipping GitLab pipeline trigger"
+ enabled=false
+ else
+ echo "Cache miss, hardware tests will run"
+ fi
+
+ echo "enabled=$enabled" >> $GITHUB_OUTPUT
+
+ - name: Wait for GitLab sync
+ if: ${{ steps.check-tests.outputs.enabled == 'true' }}
+ run: |
+ # A webhook to sync the repository is sent to GitLab when a commit is pushed to GitHub
+ # We wait for 10 minutes after the push to GitHub to be safe
+
+ echo "Ensuring GitLab sync has completed before triggering pipeline..."
+
+ # Use push time determined in get-artifacts job
+ push_time="${{ needs.get-artifacts.outputs.push_time }}"
+
+ if [ -n "$push_time" ]; then
+ echo "Push time: $push_time"
+
+ # Convert push time to epoch
+ push_epoch=$(date -d "$push_time" +%s 2>/dev/null || echo "")
+
+ if [ -n "$push_epoch" ]; then
+ current_epoch=$(date +%s)
+ elapsed_minutes=$(( (current_epoch - push_epoch) / 60 ))
+
+ echo "Elapsed time since push: ${elapsed_minutes} minutes"
+
+ if [ $elapsed_minutes -lt 10 ]; then
+ wait_time=$(( (10 - elapsed_minutes) * 60 ))
+ echo "Waiting ${wait_time} seconds for GitLab sync to complete..."
+ sleep $wait_time
+ else
+ echo "GitLab sync should be complete (${elapsed_minutes} minutes elapsed)"
+ fi
+ else
+ echo "Could not parse push timestamp, waiting 60 seconds as fallback..."
+ sleep 60
+ fi
+ else
+ echo "Could not determine push time, waiting 60 seconds as fallback..."
+ sleep 60
+ fi
+
+ echo "Proceeding with GitLab pipeline trigger..."
+
+ - name: Trigger GitLab Pipeline and Download Artifacts
+ if: ${{ steps.check-tests.outputs.enabled == 'true' }}
+ uses: digital-blueprint/gitlab-pipeline-trigger-action@v1.3.0
+ id: gitlab-trigger
+ with:
+ host: ${{ secrets.GITLAB_URL }}
+ id: ${{ secrets.GITLAB_PROJECT_ID }}
+ ref: ${{ env.TESTS_BRANCH }}
+ trigger_token: ${{ secrets.GITLAB_TRIGGER_TOKEN }}
+ access_token: ${{ secrets.GITLAB_ACCESS_TOKEN }}
+ download_artifacts: 'true'
+ download_artifacts_on_failure: 'true'
+ download_path: './gitlab-artifacts'
+ variables: '{"TEST_TYPES":"${{ needs.get-artifacts.outputs.hw_types }}","TEST_CHIPS":"${{ needs.get-artifacts.outputs.targets }}","PIPELINE_ID":"${{ env.id }}","BINARIES_RUN_ID":"${{ github.event.workflow_run.id }}","GITHUB_REPOSITORY":"${{ github.repository }}"}'
+
+ - name: Process Downloaded Artifacts
+ if: ${{ always() && steps.check-tests.outputs.enabled == 'true' }}
+ run: |
+ echo "GitLab Pipeline Status: ${{ steps.gitlab-trigger.outputs.status }}"
+ echo "Artifacts Downloaded: ${{ steps.gitlab-trigger.outputs.artifacts_downloaded }}"
+
+ # Create tests directory structure expected by GitHub caching
+ mkdir -p tests
+
+ # Process downloaded GitLab artifacts
+ if [ "${{ steps.gitlab-trigger.outputs.artifacts_downloaded }}" = "true" ]; then
+ echo "Processing downloaded GitLab artifacts..."
+
+ # Find and copy test result files while preserving directory structure
+ # The GitLab artifacts have the structure: gitlab-artifacts/job_*/artifacts/tests/...
+ # We want to preserve the tests/... part of the structure
+
+ for job_dir in ./gitlab-artifacts/job_*; do
+ if [ -d "$job_dir/artifacts/tests" ]; then
+ # Merge results into tests/ without failing on non-empty directories
+ echo "Merging $job_dir/artifacts/tests/ into tests/"
+ cp -a "$job_dir/artifacts/tests/." tests/
+ fi
+ done
+
+ echo "Test results found:"
+ ls -laR tests/ || echo "No test results found"
+ else
+ echo "No artifacts were downloaded from GitLab"
+ fi
+
+ - name: Upload hardware results as cache
+ uses: actions/cache/save@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
+ if: steps.check-tests.outputs.enabled == 'true' && needs.get-artifacts.outputs.pr_num
+ with:
+ key: test-${{ env.id }}-results-hw
+ path: |
+ tests/**/*.xml
+ tests/**/result_*.json
+
+ - name: Upload hardware results as artifacts
+ uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
+ if: always()
+ with:
+ name: test-results-hw
+ overwrite: true
+ path: |
+ tests/**/*.xml
+ tests/**/result_*.json
+
+ - name: Report conclusion
+ uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
+ if: always()
+ with:
+ script: |
+ const owner = '${{ github.repository_owner }}';
+ const repo = '${{ github.repository }}'.split('/')[1];
+ const sha = '${{ github.event.workflow_run.head_sha }}';
+ core.debug(`owner: ${owner}`);
+ core.debug(`repo: ${repo}`);
+ core.debug(`sha: ${sha}`);
+ const { context: name, state } = (await github.rest.repos.createCommitStatus({
+ context: 'Runtime Tests / Internal Hardware Tests (${{ github.event.workflow_run.event }} -> workflow_run)',
+ owner: owner,
+ repo: repo,
+ sha: sha,
+ state: '${{ job.status }}',
+ target_url: 'https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}'
+ })).data;
+ core.info(`${name} is ${state}`);
+
wokwi-test:
name: Wokwi ${{ matrix.chip }} ${{ matrix.type }} tests
if: |
@@ -201,7 +400,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- type: ${{ fromJson(needs.get-artifacts.outputs.types) }}
+ type: ${{ fromJson(needs.get-artifacts.outputs.wokwi_types) }}
chip: ${{ fromJson(needs.get-artifacts.outputs.targets) }}
steps:
- name: Report pending
diff --git a/.github/workflows/tests_results.yml b/.github/workflows/tests_results.yml
index ab4bb31fca5..0a2e5250277 100644
--- a/.github/workflows/tests_results.yml
+++ b/.github/workflows/tests_results.yml
@@ -2,7 +2,7 @@ name: Publish and clean test results
on:
workflow_run:
- workflows: ["Wokwi tests"]
+ workflows: ["Hardware and Wokwi tests"]
types:
- completed
@@ -11,24 +11,17 @@ permissions:
contents: read
jobs:
- unit-test-results:
- name: Unit Test Results
- if: |
- github.event.workflow_run.conclusion == 'success' ||
- github.event.workflow_run.conclusion == 'failure' ||
- github.event.workflow_run.conclusion == 'timed_out'
+ get-artifacts:
+ name: Get artifacts
runs-on: ubuntu-latest
- permissions:
- actions: write
- statuses: write
- checks: write
- pull-requests: write
- contents: write
+ outputs:
+ original_event: ${{ steps.get-info.outputs.original_event }}
+ original_action: ${{ steps.get-info.outputs.original_action }}
+ original_sha: ${{ steps.get-info.outputs.original_sha }}
+ original_ref: ${{ steps.get-info.outputs.original_ref }}
+ original_conclusion: ${{ steps.get-info.outputs.original_conclusion }}
+ original_run_id: ${{ steps.get-info.outputs.original_run_id }}
steps:
- - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- with:
- ref: gh-pages
-
- name: Download and Extract Artifacts
uses: dawidd6/action-download-artifact@07ab29fd4a977ae4d2b275087cf67563dfdf0295 # v9
with:
@@ -36,7 +29,11 @@ jobs:
path: ./artifacts
- name: Get original info
+ id: get-info
run: |
+ echo "Artifacts:"
+ ls -laR ./artifacts
+
original_event=$(cat ./artifacts/parent-artifacts/event.txt)
original_action=$(cat ./artifacts/parent-artifacts/action.txt)
original_sha=$(cat ./artifacts/parent-artifacts/sha.txt)
@@ -64,12 +61,12 @@ jobs:
# Run ID: Allow numeric characters
original_run_id=$(echo "$original_run_id" | tr -cd '[:digit:]')
- echo "original_event=$original_event" >> $GITHUB_ENV
- echo "original_action=$original_action" >> $GITHUB_ENV
- echo "original_sha=$original_sha" >> $GITHUB_ENV
- echo "original_ref=$original_ref" >> $GITHUB_ENV
- echo "original_conclusion=$original_conclusion" >> $GITHUB_ENV
- echo "original_run_id=$original_run_id" >> $GITHUB_ENV
+ echo "original_event=$original_event" >> $GITHUB_OUTPUT
+ echo "original_action=$original_action" >> $GITHUB_OUTPUT
+ echo "original_sha=$original_sha" >> $GITHUB_OUTPUT
+ echo "original_ref=$original_ref" >> $GITHUB_OUTPUT
+ echo "original_conclusion=$original_conclusion" >> $GITHUB_OUTPUT
+ echo "original_run_id=$original_run_id" >> $GITHUB_OUTPUT
echo "original_event = $original_event"
echo "original_action = $original_action"
@@ -80,32 +77,63 @@ jobs:
- name: Print links to other runs
run: |
- echo "Build, Hardware and QEMU tests: https://github.com/${{ github.repository }}/actions/runs/${{ env.original_run_id }}"
- echo "Wokwi tests: https://github.com/${{ github.repository }}/actions/runs/${{ github.event.workflow_run.id }}"
+ echo "Build and QEMU tests: https://github.com/${{ github.repository }}/actions/runs/${{ steps.get-info.outputs.original_run_id }}"
+ echo "Hardware and Wokwi tests: https://github.com/${{ github.repository }}/actions/runs/${{ github.event.workflow_run.id }}"
+
+ unit-test-results:
+ name: Unit Test Results
+ needs: get-artifacts
+ if: |
+ github.event.workflow_run.conclusion == 'success' ||
+ github.event.workflow_run.conclusion == 'failure' ||
+ github.event.workflow_run.conclusion == 'timed_out'
+ runs-on: ubuntu-latest
+ permissions:
+ actions: write
+ statuses: write
+ checks: write
+ pull-requests: write
+ contents: write
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ with:
+ ref: gh-pages
+
+ - name: Download and Extract Artifacts
+ uses: dawidd6/action-download-artifact@07ab29fd4a977ae4d2b275087cf67563dfdf0295 # v9
+ with:
+ run_id: ${{ github.event.workflow_run.id }}
+ path: ./artifacts
- name: Publish Unit Test Results
uses: EnricoMi/publish-unit-test-result-action@170bf24d20d201b842d7a52403b73ed297e6645b # v2.18.0
with:
- commit: ${{ env.original_sha }}
+ commit: ${{ needs.get-artifacts.outputs.original_sha }}
event_file: ./artifacts/parent-artifacts/event_file/event.json
- event_name: ${{ env.original_event }}
+ event_name: ${{ needs.get-artifacts.outputs.original_event }}
files: ./artifacts/**/*.xml
action_fail: true
+ action_fail_on_inconclusive: true
compare_to_earlier_commit: false
json_file: ./unity_results.json
json_suite_details: true
- name: Upload JSON
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
- if: ${{ always() }}
+ if: always()
with:
name: unity_results
overwrite: true
- path: |
- ./unity_results.json
+ path: ./unity_results.json
- name: Fail if tests failed
- if: ${{ env.original_conclusion == 'failure' || env.original_conclusion == 'timed_out' || github.event.workflow_run.conclusion == 'failure' || github.event.workflow_run.conclusion == 'timed_out' }}
+ if: |
+ needs.get-artifacts.outputs.original_conclusion == 'failure' ||
+ needs.get-artifacts.outputs.original_conclusion == 'cancelled' ||
+ needs.get-artifacts.outputs.original_conclusion == 'timed_out' ||
+ github.event.workflow_run.conclusion == 'failure' ||
+ github.event.workflow_run.conclusion == 'cancelled' ||
+ github.event.workflow_run.conclusion == 'timed_out'
run: exit 1
- name: Clean up caches
@@ -113,10 +141,10 @@ jobs:
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
with:
script: |
- const ref = process.env.original_ref;
+ const ref = '${{ needs.get-artifacts.outputs.original_ref }}';
const key_prefix = 'test-' + ref + '-';
- if (process.env.original_event == 'pull_request' && process.env.original_action != 'closed') {
+ if ('${{ needs.get-artifacts.outputs.original_event }}' == 'pull_request' && '${{ needs.get-artifacts.outputs.original_action }}' != 'closed') {
console.log('Skipping cache cleanup for open PR');
return;
}
@@ -146,12 +174,12 @@ jobs:
script: |
const owner = '${{ github.repository_owner }}';
const repo = '${{ github.repository }}'.split('/')[1];
- const sha = process.env.original_sha;
+ const sha = '${{ needs.get-artifacts.outputs.original_sha }}';
core.debug(`owner: ${owner}`);
core.debug(`repo: ${repo}`);
core.debug(`sha: ${sha}`);
const { context: name, state } = (await github.rest.repos.createCommitStatus({
- context: `Runtime Tests / Report results (${process.env.original_event} -> workflow_run -> workflow_run)`,
+ context: `Runtime Tests / Report results (${{ needs.get-artifacts.outputs.original_event }} -> workflow_run -> workflow_run)`,
owner: owner,
repo: repo,
sha: sha,
@@ -162,12 +190,24 @@ jobs:
core.info(`${name} is ${state}`);
- name: Generate report
- if: ${{ !cancelled() && (env.original_event == 'schedule' || env.original_event == 'workflow_dispatch') }} # codespell:ignore cancelled
+ if: |
+ (!cancelled() &&
+ needs.get-artifacts.outputs.original_conclusion != 'cancelled' &&
+ github.event.workflow_run.conclusion != 'cancelled') &&
+ (needs.get-artifacts.outputs.original_event == 'schedule' ||
+ needs.get-artifacts.outputs.original_event == 'workflow_dispatch')
env:
REPORT_FILE: ./runtime-test-results/RUNTIME_TEST_RESULTS.md
WOKWI_RUN_ID: ${{ github.event.workflow_run.id }}
- BUILD_RUN_ID: ${{ env.original_run_id }}
- IS_FAILING: ${{ env.original_conclusion == 'failure' || env.original_conclusion == 'timed_out' || github.event.workflow_run.conclusion == 'failure' || github.event.workflow_run.conclusion == 'timed_out' || job.status == 'failure' }}
+ BUILD_RUN_ID: ${{ needs.get-artifacts.outputs.original_run_id }}
+ IS_FAILING: |
+ needs.get-artifacts.outputs.original_conclusion == 'failure' ||
+ needs.get-artifacts.outputs.original_conclusion == 'cancelled' ||
+ needs.get-artifacts.outputs.original_conclusion == 'timed_out' ||
+ github.event.workflow_run.conclusion == 'failure' ||
+ github.event.workflow_run.conclusion == 'cancelled' ||
+ github.event.workflow_run.conclusion == 'timed_out' ||
+ job.status == 'failure'
run: |
rm -rf artifacts $REPORT_FILE
mv -f ./unity_results.json ./runtime-test-results/unity_results.json
@@ -176,7 +216,12 @@ jobs:
mv -f ./test_results.json ./runtime-test-results/test_results.json
- name: Generate badge
- if: ${{ !cancelled() && (env.original_event == 'schedule' || env.original_event == 'workflow_dispatch') }} # codespell:ignore cancelled
+ if: |
+ (!cancelled() &&
+ needs.get-artifacts.outputs.original_conclusion != 'cancelled' &&
+ github.event.workflow_run.conclusion != 'cancelled') &&
+ (needs.get-artifacts.outputs.original_event == 'schedule' ||
+ needs.get-artifacts.outputs.original_event == 'workflow_dispatch')
uses: jaywcjlove/generated-badges@0e078ae4d4bab3777ea4f137de496ab44688f5ad # v1.0.13
with:
label: Runtime Tests
@@ -186,7 +231,12 @@ jobs:
style: flat
- name: Push badge
- if: ${{ !cancelled() && (env.original_event == 'schedule' || env.original_event == 'workflow_dispatch') }} # codespell:ignore cancelled
+ if: |
+ (!cancelled() &&
+ needs.get-artifacts.outputs.original_conclusion != 'cancelled' &&
+ github.event.workflow_run.conclusion != 'cancelled') &&
+ (needs.get-artifacts.outputs.original_event == 'schedule' ||
+ needs.get-artifacts.outputs.original_event == 'workflow_dispatch')
run: |
git config user.name "github-actions[bot]"
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 89a45022bc2..3d0ecd0cb34 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,25 +1,13 @@
workflow:
rules:
- # Disable those non-protected push triggered pipelines
- - if: '$CI_COMMIT_REF_NAME != "master" && $CI_COMMIT_BRANCH !~ /^release\/v/ && $CI_COMMIT_TAG !~ /^\d+\.\d+(\.\d+)?($|-)/ && $CI_PIPELINE_SOURCE == "push"'
- when: never
- # when running merged result pipelines, CI_COMMIT_SHA represents the temp commit it created.
- # Please use PIPELINE_COMMIT_SHA at all places that require a commit sha of the original commit.
- - if: $CI_OPEN_MERGE_REQUESTS != null
- variables:
- PIPELINE_COMMIT_SHA: $CI_MERGE_REQUEST_SOURCE_BRANCH_SHA
- IS_MR_PIPELINE: 1
- - if: $CI_OPEN_MERGE_REQUESTS == null
- variables:
- PIPELINE_COMMIT_SHA: $CI_COMMIT_SHA
- IS_MR_PIPELINE: 0
- - if: '$CI_PIPELINE_SOURCE == "schedule"'
- variables:
- IS_SCHEDULED_RUN: "true"
- - when: always
+ # Allow only when triggered manually (web), via API, or by a trigger token
+ - if: "$CI_PIPELINE_SOURCE =~ /^(trigger|api|web)$/"
+ when: always
+ # Deny all other sources
+ - when: never
# Place the default settings in `.gitlab/workflows/common.yml` instead
include:
- ".gitlab/workflows/common.yml"
- - ".gitlab/workflows/sample.yml"
+ - ".gitlab/workflows/hardware_tests_dynamic.yml"
diff --git a/.gitlab/scripts/gen_hw_jobs.py b/.gitlab/scripts/gen_hw_jobs.py
new file mode 100644
index 00000000000..67d44c76e94
--- /dev/null
+++ b/.gitlab/scripts/gen_hw_jobs.py
@@ -0,0 +1,311 @@
+#!/usr/bin/env python3
+
+import argparse
+import json
+import yaml
+import os
+import sys
+from pathlib import Path
+import copy
+
+# Resolve repository root from this script location: .gitlab/scripts -> esp32 root
+SCRIPT_DIR = Path(__file__).resolve().parent
+REPO_ROOT = SCRIPT_DIR.parent.parent
+
+# Ensure we run from repo root so relative paths work consistently
+try:
+ os.chdir(REPO_ROOT)
+except Exception:
+ pass
+
+TESTS_ROOT = REPO_ROOT / "tests"
+
+
+class PrettyDumper(yaml.SafeDumper):
+ def increase_indent(self, flow=False, indentless=False):
+ return super().increase_indent(flow, False)
+
+
+def str_representer(dumper, data):
+ style = "|" if "\n" in data else None
+ return dumper.represent_scalar("tag:yaml.org,2002:str", data, style=style)
+
+
+def read_json(p: Path):
+ try:
+ with p.open("r", encoding="utf-8") as f:
+ return json.load(f)
+ except Exception:
+ return {}
+
+
+def find_tests() -> list[Path]:
+ tests = []
+ if not TESTS_ROOT.exists():
+ return tests
+ for ci in TESTS_ROOT.rglob("ci.json"):
+ if ci.is_file():
+ tests.append(ci)
+ return tests
+
+
+def find_sketch_test_dirs(types_filter: list[str]) -> list[tuple[str, Path]]:
+ """
+ Return list of (test_type, test_dir) where test_dir contains a sketch named
/.ino
+ If types_filter provided, only include those types.
+ """
+ results: list[tuple[str, Path]] = []
+ if not TESTS_ROOT.exists():
+ return results
+ for type_dir in TESTS_ROOT.iterdir():
+ if not type_dir.is_dir():
+ continue
+ test_type = type_dir.name
+ if types_filter and test_type not in types_filter:
+ continue
+ for candidate in type_dir.iterdir():
+ if not candidate.is_dir():
+ continue
+ sketch = candidate.name
+ ino = candidate / f"{sketch}.ino"
+ if ino.exists():
+ results.append((test_type, candidate))
+ return results
+
+
+def load_tags_for_test(ci_json: dict, chip: str) -> set[str]:
+ tags = set()
+ # Global tags
+ for key in "tags":
+ v = ci_json.get(key)
+ if isinstance(v, list):
+ for e in v:
+ if isinstance(e, str) and e.strip():
+ tags.add(e.strip())
+ # Per-SoC tags
+ soc_tags = ci_json.get("soc_tags")
+ if isinstance(soc_tags, dict):
+ v = soc_tags.get(chip)
+ if isinstance(v, list):
+ for e in v:
+ if isinstance(e, str) and e.strip():
+ tags.add(e.strip())
+ return tags
+
+
+def test_enabled_for_target(ci_json: dict, chip: str) -> bool:
+ targets = ci_json.get("targets")
+ if isinstance(targets, dict):
+ v = targets.get(chip)
+ if v is False:
+ return False
+ return True
+
+
+def platform_allowed(ci_json: dict, platform: str = "hardware") -> bool:
+ platforms = ci_json.get("platforms")
+ if isinstance(platforms, dict):
+ v = platforms.get(platform)
+ if v is False:
+ return False
+ return True
+
+
+def sketch_name_from_ci(ci_path: Path) -> str:
+ # The sketch directory holds .ino named as the directory
+ sketch_dir = ci_path.parent
+ return sketch_dir.name
+
+
+def sdkconfig_path_for(chip: str, sketch: str, ci_json: dict) -> Path:
+ # Match logic from tests_run.sh: if multiple FQBN entries -> build0.tmp
+ fqbn = ci_json.get("fqbn", {}) if isinstance(ci_json, dict) else {}
+ length = 0
+ if isinstance(fqbn, dict):
+ v = fqbn.get(chip)
+ if isinstance(v, list):
+ length = len(v)
+ if length <= 1:
+ return Path.home() / f".arduino/tests/{chip}/{sketch}/build.tmp/sdkconfig"
+ return Path.home() / f".arduino/tests/{chip}/{sketch}/build0.tmp/sdkconfig"
+
+
+def sdk_meets_requirements(sdkconfig: Path, ci_json: dict) -> bool:
+ # Mirror check_requirements in sketch_utils.sh
+ if not sdkconfig.exists():
+ # Build might have been skipped or failed; allow parent to skip scheduling
+ return False
+ try:
+ requires = ci_json.get("requires") or []
+ requires_any = ci_json.get("requires_any") or []
+ content = sdkconfig.read_text(encoding="utf-8", errors="ignore")
+ # AND requirements
+ for req in requires:
+ if not isinstance(req, str):
+ continue
+ if not any(line.startswith(req) for line in content.splitlines()):
+ return False
+ # OR requirements
+ if requires_any:
+ ok = any(
+ any(line.startswith(req) for line in content.splitlines())
+ for req in requires_any
+ if isinstance(req, str)
+ )
+ if not ok:
+ return False
+ return True
+ except Exception:
+ return False
+
+
+def parse_list_arg(s: str) -> list[str]:
+ if not s:
+ return []
+ txt = s.strip()
+ if txt.startswith("[") and txt.endswith("]"):
+ try:
+ return [str(x).strip() for x in json.loads(txt)]
+ except Exception:
+ # Attempt single-quote JSON -> replace with double quotes
+ try:
+ fixed = txt.replace("'", '"')
+ return [str(x).strip() for x in json.loads(fixed)]
+ except Exception:
+ pass
+ # Fallback: comma-separated
+ return [part.strip() for part in txt.split(",") if part.strip()]
+
+
+def main():
+ ap = argparse.ArgumentParser()
+ ap.add_argument("--chips", required=True, help="Comma-separated or JSON array list of SoCs")
+ ap.add_argument(
+ "--types",
+ required=False,
+ default="validation",
+ help="Comma-separated or JSON array of test type directories under tests/",
+ )
+ ap.add_argument("--out", required=True, help="Output YAML path for child pipeline")
+ ap.add_argument(
+ "--dry-run", action="store_true", help="Print planned groups/jobs and skip sdkconfig requirement checks"
+ )
+ args = ap.parse_args()
+
+ chips = parse_list_arg(args.chips)
+ types = parse_list_arg(args.types)
+
+ print(f"Inputs: chips={chips or '[]'}, types={types or '[]'}")
+ print(f"Repo root: {REPO_ROOT}")
+ print(f"Tests root: {TESTS_ROOT}")
+
+ # Aggregate mapping: (chip, frozenset(tags or generic), test_type) -> list of test paths
+ group_map: dict[tuple[str, frozenset[str], str], list[str]] = {}
+ all_ci = find_tests()
+ print(f"Discovered {len(all_ci)} ci.json files under tests/")
+
+ matched_count = 0
+ for test_type, test_path in find_sketch_test_dirs(types):
+ ci_path = test_path / "ci.json"
+ ci = read_json(ci_path) if ci_path.exists() else {}
+ test_dir = str(test_path)
+ sketch = test_path.name
+ for chip in chips:
+ tags = load_tags_for_test(ci, chip)
+ if not test_enabled_for_target(ci, chip):
+ continue
+ # Skip tests that explicitly disable the hardware platform
+ if not platform_allowed(ci, "hardware"):
+ continue
+ sdk = sdkconfig_path_for(chip, sketch, ci)
+ if not args.dry_run and not sdk_meets_requirements(sdk, ci):
+ continue
+ key_tags = tags.copy()
+ # SOC must always be one runner tag
+ key_tags.add(chip)
+ if len(key_tags) == 1:
+ # Only SOC present, add generic
+ key_tags.add("generic")
+ key = (chip, frozenset(sorted(key_tags)), test_type)
+ group_map.setdefault(key, []).append(test_dir)
+ matched_count += 1
+
+ print(f"Matched {matched_count} test entries into {len(group_map)} groups")
+
+ # Load template job
+ template_path = REPO_ROOT / ".gitlab/workflows/hw_test_template.yml"
+ template = yaml.safe_load(template_path.read_text(encoding="utf-8"))
+ if not isinstance(template, dict) or "hw-test-template" not in template:
+ print("ERROR: hw_test_template.yml missing hw-test-template")
+ sys.exit(2)
+ base_job = template["hw-test-template"]
+
+ # Build child pipeline YAML in deterministic order
+ jobs_entries = [] # list of (sort_key, job_name, job_dict)
+ for (chip, tagset, test_type), test_dirs in group_map.items():
+ tag_list = sorted(tagset)
+ # Build name suffix excluding the SOC itself to avoid duplication
+ non_soc_tags = [t for t in tag_list if t != chip]
+ tag_suffix = "-".join(non_soc_tags) if non_soc_tags else "generic"
+ job_name = f"hw-{chip}-{test_type}-{tag_suffix}"[:255]
+
+ # Clone base job and adjust (preserve key order using deepcopy)
+ job = copy.deepcopy(base_job)
+ # Ensure tags include SOC+extras
+ job["tags"] = tag_list
+ vars_block = job.get("variables", {})
+ vars_block["TEST_CHIP"] = chip
+ vars_block["TEST_TYPE"] = test_type
+ # Provide list of test directories for this job
+ vars_block["TEST_LIST"] = "\n".join(sorted(test_dirs))
+ job["variables"] = vars_block
+
+ sort_key = (chip, test_type, tag_suffix)
+ jobs_entries.append((sort_key, job_name, job))
+
+ # Order jobs by (chip, type, tag_suffix)
+ jobs = {}
+ for _, name, job in sorted(jobs_entries, key=lambda x: x[0]):
+ jobs[name] = job
+
+ if args.dry_run:
+ print("Planned hardware test jobs:")
+ for name, job in jobs.items():
+ tags = job.get("tags", [])
+ soc = job.get("variables", {}).get("TEST_CHIP")
+ ttype = job.get("variables", {}).get("TEST_TYPE")
+ tlist = job.get("variables", {}).get("TEST_LIST", "")
+ tests = [p for p in tlist.split("\n") if p]
+ print(f"- {name} tags={tags} soc={soc} type={ttype} tests={len(tests)}")
+ for t in tests:
+ print(f" * {t}")
+
+ # If no jobs matched, create a no-op job to avoid failing trigger
+ if not jobs:
+ jobs["no-op"] = {
+ "stage": "test",
+ "script": ["echo No matching hardware tests to run"],
+ "rules": [{"when": "on_success"}],
+ }
+
+ # Ensure child pipeline defines stages
+ child = {"stages": ["test"]}
+
+ for name, job in jobs.items():
+ child[name] = job
+
+ if args.dry_run:
+ print("\n--- Generated child pipeline YAML (dry run) ---")
+ PrettyDumper.add_representer(str, str_representer)
+ sys.stdout.write(yaml.dump(child, Dumper=PrettyDumper, sort_keys=False, width=4096))
+ return 0
+
+ out = Path(args.out)
+
+ PrettyDumper.add_representer(str, str_representer)
+ out.write_text(yaml.dump(child, Dumper=PrettyDumper, sort_keys=False, width=4096), encoding="utf-8")
+ print(f"Wrote child pipeline with {len(jobs)} job(s) to {out}")
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/.gitlab/scripts/get_artifacts.sh b/.gitlab/scripts/get_artifacts.sh
new file mode 100644
index 00000000000..a29e76a9ba1
--- /dev/null
+++ b/.gitlab/scripts/get_artifacts.sh
@@ -0,0 +1,74 @@
+#!/bin/bash
+# Disable shellcheck warning about $? uses.
+# shellcheck disable=SC2181
+
+set -e
+set -o pipefail
+
+echo "Downloading test binaries for $TEST_CHIP from GitHub repository $GITHUB_REPOSITORY"
+echo "Binaries run ID: $BINARIES_RUN_ID"
+echo "Looking for artifact: test-bin-$TEST_CHIP-$TEST_TYPE"
+
+# Check if GitHub token is available
+if [ -z "$GITHUB_DOWNLOAD_PAT" ]; then
+ echo "ERROR: GITHUB_DOWNLOAD_PAT not available in GitLab environment"
+ echo "Please set up GITHUB_DOWNLOAD_PAT in GitLab CI/CD variables"
+ exit 1
+fi
+
+# First, get the artifacts list and save it for debugging
+echo "Fetching artifacts list from GitHub API..."
+artifacts_response=$(curl -s -H "Authorization: token $GITHUB_DOWNLOAD_PAT" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/$GITHUB_REPOSITORY/actions/runs/$BINARIES_RUN_ID/artifacts")
+
+# Check if we got a valid response
+if [ -z "$artifacts_response" ]; then
+ echo "ERROR: Empty response from GitHub API"
+ exit 1
+fi
+
+# Check for API errors
+error_message=$(echo "$artifacts_response" | jq -r '.message // empty' 2>/dev/null)
+if [ -n "$error_message" ]; then
+ echo "ERROR: GitHub API returned error: $error_message"
+ exit 1
+fi
+
+# List all available artifacts for debugging
+echo "Available artifacts:"
+echo "$artifacts_response" | jq -r '.artifacts[]?.name // "No artifacts found"' 2>/dev/null || echo "Could not parse artifacts"
+
+# Find the download URL for our specific artifact
+download_url=$(echo "$artifacts_response" | jq -r ".artifacts[] | select(.name==\"test-bin-$TEST_CHIP-$TEST_TYPE\") | .archive_download_url" 2>/dev/null)
+
+if [ "$download_url" = "null" ] || [ -z "$download_url" ]; then
+ echo "ERROR: Could not find artifact 'test-bin-$TEST_CHIP-$TEST_TYPE'"
+ echo "This could mean:"
+ echo "1. The artifact name doesn't match exactly"
+ echo "2. The artifacts haven't been uploaded yet"
+ echo "3. The GitHub run ID is incorrect"
+ exit 1
+fi
+
+echo "Found download URL: $download_url"
+
+# Download the artifact
+echo "Downloading artifact..."
+curl -H "Authorization: token $GITHUB_DOWNLOAD_PAT" -L "$download_url" -o test-binaries.zip
+
+if [ $? -ne 0 ] || [ ! -f test-binaries.zip ]; then
+ echo "ERROR: Failed to download artifact"
+ exit 1
+fi
+
+echo "Extracting binaries..."
+unzip -q -o test-binaries.zip -d ~/.arduino/tests/"$TEST_CHIP"/
+
+if [ $? -ne 0 ]; then
+ echo "ERROR: Failed to extract binaries"
+ exit 1
+fi
+
+rm -f test-binaries.zip
+echo "Successfully downloaded and extracted test binaries"
diff --git a/.gitlab/scripts/get_results.sh b/.gitlab/scripts/get_results.sh
new file mode 100644
index 00000000000..cdfe2e64f96
--- /dev/null
+++ b/.gitlab/scripts/get_results.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+set -euo pipefail
+
+echo "Collecting artifacts from child pipeline(s)"
+
+api="$CI_API_V4_URL"
+proj="$CI_PROJECT_ID"
+parent="$CI_PIPELINE_ID"
+
+# Choose auth header (prefer PRIVATE-TOKEN if provided)
+AUTH_HEADER="JOB-TOKEN: $CI_JOB_TOKEN"
+if [ -n "${GITLAB_API_TOKEN:-}" ]; then
+ AUTH_HEADER="PRIVATE-TOKEN: $GITLAB_API_TOKEN"
+fi
+
+# Verify project is reachable
+if ! curl -sf --header "$AUTH_HEADER" "$api/projects/$proj" >/dev/null; then
+ echo "WARNING: Unable to access project $proj via API (token scope?)"
+ exit 1
+fi
+
+bridges=$(curl -s --header "$AUTH_HEADER" "$api/projects/$proj/pipelines/$parent/bridges")
+# Ensure we got a JSON array
+if ! echo "$bridges" | jq -e 'type=="array"' >/dev/null 2>&1; then
+ echo "WARNING: Unexpected bridges response:"; echo "$bridges"
+ exit 1
+fi
+
+child_ids=$(echo "$bridges" | jq -r '.[] | select(.name=="trigger-hw-tests") | .downstream_pipeline.id')
+mkdir -p aggregated
+
+for cid in $child_ids; do
+ echo "Child pipeline: $cid"
+
+ jobs=$(curl -s --header "$AUTH_HEADER" "$api/projects/$proj/pipelines/$cid/jobs?per_page=100")
+ if ! echo "$jobs" | jq -e 'type=="array"' >/dev/null 2>&1; then
+ echo "WARNING: Unable to list jobs for child $cid"; echo "$jobs"
+ exit 1
+ fi
+
+ ids=$(echo "$jobs" | jq -r '.[] | select(.artifacts_file!=null) | .id')
+ failed=false
+ for jid in $ids; do
+ echo "Downloading artifacts from job $jid"
+ curl --header "$AUTH_HEADER" -L -s "$api/projects/$proj/jobs/$jid/artifacts" -o artifact.zip || true
+ if [ -f artifact.zip ]; then
+ unzip -q -o artifact.zip -d . >/dev/null 2>&1 || true
+ else
+ echo "Job $jid has no artifacts"
+ failed=true
+ fi
+ rm -f artifact.zip
+ done
+done
+
+if $failed; then
+ echo "Some jobs failed to download artifacts"
+ exit 1
+fi
diff --git a/.gitlab/workflows/common.yml b/.gitlab/workflows/common.yml
index c7a0c5468e4..debdfaf252c 100644
--- a/.gitlab/workflows/common.yml
+++ b/.gitlab/workflows/common.yml
@@ -4,8 +4,11 @@
stages:
- pre_check
+ - generate
- build
- test
+ - trigger
+ - collect
- result
variables:
diff --git a/.gitlab/workflows/hardware_tests_dynamic.yml b/.gitlab/workflows/hardware_tests_dynamic.yml
new file mode 100644
index 00000000000..2c137f092ab
--- /dev/null
+++ b/.gitlab/workflows/hardware_tests_dynamic.yml
@@ -0,0 +1,79 @@
+###############################
+# Dynamic Hardware Tests Parent
+###############################
+
+# This parent workflow generates a dynamic child pipeline with jobs grouped
+# by SOC + runner tags derived from tests' ci.json, then triggers it and waits.
+
+generate-hw-tests:
+ stage: generate
+ image: python:3.12-bookworm
+ rules:
+ - if: $CI_PIPELINE_SOURCE == "trigger"
+ when: on_success
+ variables:
+ DEBIAN_FRONTEND: "noninteractive"
+ TEST_TYPES: $TEST_TYPES
+ TEST_CHIPS: $TEST_CHIPS
+ before_script:
+ - pip install PyYAML
+ - apt-get update
+ - apt-get install -y jq unzip curl
+ script:
+ - mkdir -p ~/.arduino/tests
+ - |
+ # Download artifacts for all requested chips/types so sdkconfig exists for grouping
+ CHIPS=$(echo "$TEST_CHIPS" | tr -d "[]' " | tr ',' ' ')
+ TYPES=$(echo "$TEST_TYPES" | tr -d "[]' " | tr ',' ' ')
+ for chip in $CHIPS; do
+ for t in $TYPES; do
+ export TEST_CHIP="$chip"
+ export TEST_TYPE="$t"
+ echo "Fetching artifacts for chip=$chip type=$t"
+ bash .gitlab/scripts/get_artifacts.sh
+ done
+ done
+ - python3 .gitlab/scripts/gen_hw_jobs.py --chips "$TEST_CHIPS" --types "$TEST_TYPES" --out child-hw-jobs.yml
+ artifacts:
+ when: always
+ expire_in: 7 days
+ paths:
+ - child-hw-jobs.yml
+
+trigger-hw-tests:
+ stage: trigger
+ needs: ["generate-hw-tests"]
+ rules:
+ - if: $CI_PIPELINE_SOURCE == "trigger"
+ when: on_success
+ variables:
+ # Forward common context to children
+ BINARIES_RUN_ID: $BINARIES_RUN_ID
+ GITHUB_REPOSITORY: $GITHUB_REPOSITORY
+ PIPELINE_ID: $PIPELINE_ID
+ trigger:
+ include:
+ - artifact: child-hw-jobs.yml
+ job: generate-hw-tests
+ strategy: depend
+
+collect-hw-results:
+ stage: result
+ image: python:3.12-bookworm
+ needs: ["trigger-hw-tests"]
+ rules:
+ - if: $CI_PIPELINE_SOURCE == "trigger"
+ when: always
+ before_script:
+ - apt-get update && apt-get install -y jq curl unzip
+ script:
+ - bash .gitlab/scripts/get_results.sh
+ artifacts:
+ name: "hw-test-results-aggregated"
+ expire_in: 7 days
+ when: always
+ paths:
+ - "tests/**/*.xml"
+ - "tests/**/result_*.json"
+ reports:
+ junit: "tests/**/*.xml"
diff --git a/.gitlab/workflows/hw_test_template.yml b/.gitlab/workflows/hw_test_template.yml
new file mode 100644
index 00000000000..1b09c2cb7eb
--- /dev/null
+++ b/.gitlab/workflows/hw_test_template.yml
@@ -0,0 +1,65 @@
+########################
+# HW Test Job Template #
+########################
+
+# This template is used to generate the pipeline for each hardware test.
+# It is triggered in hardware_tests_dynamic.yml after being generated by gen_hw_jobs.py.
+
+include:
+ - local: ".gitlab/workflows/common.yml"
+
+# Single job template to be cloned by the dynamic generator
+hw-test-template:
+ stage: test
+ image: python:3.12-bookworm
+
+ rules:
+ - when: on_success
+
+ variables:
+ RUNNER_SCRIPT_TIMEOUT: 4h
+ RUNNER_AFTER_SCRIPT_TIMEOUT: 2h
+ DEBIAN_FRONTEND: "noninteractive"
+ TEST_TYPE: $TEST_TYPE
+ TEST_CHIP: $TEST_CHIP
+ PIPELINE_ID: $PIPELINE_ID
+ BINARIES_RUN_ID: $BINARIES_RUN_ID
+ GITHUB_REPOSITORY: $GITHUB_REPOSITORY
+
+ tags:
+ - $TEST_CHIP
+
+ before_script:
+ - echo "Running hardware tests for chip:$TEST_CHIP type:$TEST_TYPE"
+ - echo "Pipeline ID:$PIPELINE_ID"
+ - echo "Running hardware tests for chip:$TEST_CHIP"
+ - apt-get update
+ - apt-get install -y jq unzip curl
+ - rm -rf ~/.arduino/tests
+ - mkdir -p ~/.arduino/tests/$TEST_CHIP
+ - echo Fetching binaries for $TEST_CHIP $TEST_TYPE
+ - bash .gitlab/scripts/get_artifacts.sh
+ - pip install -r tests/requirements.txt --extra-index-url https://dl.espressif.com/pypi
+
+ script:
+ - echo "Using binaries for $TEST_CHIP"
+ - ls -laR ~/.arduino/tests || true
+ - |
+ set -e
+ rc=0
+ while IFS= read -r d; do
+ [ -z "$d" ] && continue;
+ sketch=$(basename "$d");
+ echo Running $sketch in $d;
+ bash .github/scripts/tests_run.sh -t $TEST_CHIP -s $sketch -e || rc=$?;
+ done <<< "$TEST_LIST"; exit $rc
+
+ artifacts:
+ name: "hw-test-results-$TEST_CHIP-$TEST_TYPE"
+ expire_in: 7 days
+ when: always
+ paths:
+ - "tests/**/*.xml"
+ - "tests/**/result_*.json"
+ reports:
+ junit: "tests/**/*.xml"
diff --git a/.gitlab/workflows/sample.yml b/.gitlab/workflows/sample.yml
deleted file mode 100644
index e20cecf9e9e..00000000000
--- a/.gitlab/workflows/sample.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-hello-world:
- stage: test
- rules:
- - if: $CI_PIPELINE_SOURCE == "push"
- - if: $CI_PIPELINE_SOURCE == "web"
- - if: $CI_PIPELINE_SOURCE == "trigger"
- variables:
- PIPELINE_TRIGGER_TOKEN: $CI_PIPELINE_TRIGGER_TOKEN
- script:
- - echo "Hello, World from GitLab CI!"
- - echo "Hello World!" > sample_artifact.txt
- artifacts:
- name: "sample-artifact"
- paths:
- - sample_artifact.txt
- expire_in: 1 day
diff --git a/tests/performance/coremark/test_coremark.py b/tests/performance/coremark/test_coremark.py
index befd7c3a1c9..f314ebcfdfa 100644
--- a/tests/performance/coremark/test_coremark.py
+++ b/tests/performance/coremark/test_coremark.py
@@ -46,7 +46,7 @@ def test_coremark(dut, request):
current_folder = os.path.dirname(request.path)
file_index = 0
- report_file = os.path.join(current_folder, "result_coremark" + str(file_index) + ".json")
+ report_file = os.path.join(current_folder, dut.app.target, "result_coremark" + str(file_index) + ".json")
while os.path.exists(report_file):
report_file = report_file.replace(str(file_index) + ".json", str(file_index + 1) + ".json")
file_index += 1
diff --git a/tests/performance/fibonacci/test_fibonacci.py b/tests/performance/fibonacci/test_fibonacci.py
index cf560d9691c..c7df59bb3aa 100644
--- a/tests/performance/fibonacci/test_fibonacci.py
+++ b/tests/performance/fibonacci/test_fibonacci.py
@@ -68,7 +68,7 @@ def test_fibonacci(dut, request):
current_folder = os.path.dirname(request.path)
file_index = 0
- report_file = os.path.join(current_folder, "result_fibonacci" + str(file_index) + ".json")
+ report_file = os.path.join(current_folder, dut.app.target, "result_fibonacci" + str(file_index) + ".json")
while os.path.exists(report_file):
report_file = report_file.replace(str(file_index) + ".json", str(file_index + 1) + ".json")
file_index += 1
diff --git a/tests/performance/linpack_double/test_linpack_double.py b/tests/performance/linpack_double/test_linpack_double.py
index 0a6e2f90ef3..bd6c52cac17 100644
--- a/tests/performance/linpack_double/test_linpack_double.py
+++ b/tests/performance/linpack_double/test_linpack_double.py
@@ -49,7 +49,7 @@ def test_linpack_double(dut, request):
current_folder = os.path.dirname(request.path)
file_index = 0
- report_file = os.path.join(current_folder, "result_linpack_double" + str(file_index) + ".json")
+ report_file = os.path.join(current_folder, dut.app.target, "result_linpack_double" + str(file_index) + ".json")
while os.path.exists(report_file):
report_file = report_file.replace(str(file_index) + ".json", str(file_index + 1) + ".json")
file_index += 1
diff --git a/tests/performance/linpack_float/test_linpack_float.py b/tests/performance/linpack_float/test_linpack_float.py
index d11f6c74136..d4c333d8e70 100644
--- a/tests/performance/linpack_float/test_linpack_float.py
+++ b/tests/performance/linpack_float/test_linpack_float.py
@@ -49,7 +49,7 @@ def test_linpack_float(dut, request):
current_folder = os.path.dirname(request.path)
file_index = 0
- report_file = os.path.join(current_folder, "result_linpack_float" + str(file_index) + ".json")
+ report_file = os.path.join(current_folder, dut.app.target, "result_linpack_float" + str(file_index) + ".json")
while os.path.exists(report_file):
report_file = report_file.replace(str(file_index) + ".json", str(file_index + 1) + ".json")
file_index += 1
diff --git a/tests/performance/psramspeed/ci.json b/tests/performance/psramspeed/ci.json
index 341df103671..e981565f0ca 100644
--- a/tests/performance/psramspeed/ci.json
+++ b/tests/performance/psramspeed/ci.json
@@ -1,4 +1,18 @@
{
+ "soc_tags": {
+ "esp32": [
+ "psram"
+ ],
+ "esp32s2": [
+ "psram"
+ ],
+ "esp32s3": [
+ "octal_psram"
+ ],
+ "esp32c5": [
+ "psram"
+ ]
+ },
"platforms": {
"qemu": false,
"wokwi": false
diff --git a/tests/performance/psramspeed/test_psramspeed.py b/tests/performance/psramspeed/test_psramspeed.py
index 9e96e158504..68467478eba 100644
--- a/tests/performance/psramspeed/test_psramspeed.py
+++ b/tests/performance/psramspeed/test_psramspeed.py
@@ -93,7 +93,7 @@ def test_psramspeed(dut, request):
current_folder = os.path.dirname(request.path)
file_index = 0
- report_file = os.path.join(current_folder, "result_psramspeed" + str(file_index) + ".json")
+ report_file = os.path.join(current_folder, dut.app.target, "result_psramspeed" + str(file_index) + ".json")
while os.path.exists(report_file):
report_file = report_file.replace(str(file_index) + ".json", str(file_index + 1) + ".json")
file_index += 1
diff --git a/tests/performance/ramspeed/test_ramspeed.py b/tests/performance/ramspeed/test_ramspeed.py
index dbe1670d329..987b6c00066 100644
--- a/tests/performance/ramspeed/test_ramspeed.py
+++ b/tests/performance/ramspeed/test_ramspeed.py
@@ -93,7 +93,7 @@ def test_ramspeed(dut, request):
current_folder = os.path.dirname(request.path)
file_index = 0
- report_file = os.path.join(current_folder, "result_ramspeed" + str(file_index) + ".json")
+ report_file = os.path.join(current_folder, dut.app.target, "result_ramspeed" + str(file_index) + ".json")
while os.path.exists(report_file):
report_file = report_file.replace(str(file_index) + ".json", str(file_index + 1) + ".json")
file_index += 1
diff --git a/tests/performance/superpi/test_superpi.py b/tests/performance/superpi/test_superpi.py
index 0bd7a3477b6..4e99bbb1c1b 100644
--- a/tests/performance/superpi/test_superpi.py
+++ b/tests/performance/superpi/test_superpi.py
@@ -41,7 +41,7 @@ def test_superpi(dut, request):
current_folder = os.path.dirname(request.path)
file_index = 0
- report_file = os.path.join(current_folder, "result_superpi" + str(file_index) + ".json")
+ report_file = os.path.join(current_folder, dut.app.target, "result_superpi" + str(file_index) + ".json")
while os.path.exists(report_file):
report_file = report_file.replace(str(file_index) + ".json", str(file_index + 1) + ".json")
file_index += 1
diff --git a/tests/requirements.txt b/tests/requirements.txt
index 001b26855a9..29b7d531bd4 100644
--- a/tests/requirements.txt
+++ b/tests/requirements.txt
@@ -1,8 +1,8 @@
cryptography==44.0.1
--only-binary cryptography
pytest-cov==5.0.0
-pytest-embedded-serial-esp==2.0.0
-pytest-embedded-arduino==2.0.0
-pytest-embedded-wokwi==2.0.0
-pytest-embedded-qemu==2.0.0
+pytest-embedded-serial-esp==2.1.0
+pytest-embedded-arduino==2.1.0
+pytest-embedded-wokwi==2.1.0
+pytest-embedded-qemu==2.1.0
esptool==5.1.0
diff --git a/tests/validation/psram/ci.json b/tests/validation/psram/ci.json
index 999d3be953e..4d426d38c30 100644
--- a/tests/validation/psram/ci.json
+++ b/tests/validation/psram/ci.json
@@ -1,4 +1,18 @@
{
+ "soc_tags": {
+ "esp32": [
+ "psram"
+ ],
+ "esp32s2": [
+ "psram"
+ ],
+ "esp32s3": [
+ "octal_psram"
+ ],
+ "esp32c5": [
+ "psram"
+ ]
+ },
"platforms": {
"qemu": false
},
diff --git a/tests/validation/wifi/ci.json b/tests/validation/wifi/ci.json
index 36e91b221cb..54dd47ae9a9 100644
--- a/tests/validation/wifi/ci.json
+++ b/tests/validation/wifi/ci.json
@@ -1,5 +1,5 @@
{
- "extra_tags": [
+ "tags": [
"wifi"
],
"fqbn": {
From ea50bff5d53a862afd3d1fa023e221d6d6c93905 Mon Sep 17 00:00:00 2001
From: Lucas Saavedra Vaz <32426024+lucasssvaz@users.noreply.github.com>
Date: Thu, 2 Oct 2025 08:39:19 -0300
Subject: [PATCH 2/5] Potential fix for code scanning alert no. 492: Code
injection
Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>
---
.github/workflows/tests_hw_wokwi.yml | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/.github/workflows/tests_hw_wokwi.yml b/.github/workflows/tests_hw_wokwi.yml
index d9937adcc87..fd30b768aa1 100644
--- a/.github/workflows/tests_hw_wokwi.yml
+++ b/.github/workflows/tests_hw_wokwi.yml
@@ -259,6 +259,8 @@ jobs:
- name: Wait for GitLab sync
if: ${{ steps.check-tests.outputs.enabled == 'true' }}
+ env:
+ PUSH_TIME: ${{ needs.get-artifacts.outputs.push_time }}
run: |
# A webhook to sync the repository is sent to GitLab when a commit is pushed to GitHub
# We wait for 10 minutes after the push to GitHub to be safe
@@ -266,7 +268,7 @@ jobs:
echo "Ensuring GitLab sync has completed before triggering pipeline..."
# Use push time determined in get-artifacts job
- push_time="${{ needs.get-artifacts.outputs.push_time }}"
+ push_time="$PUSH_TIME"
if [ -n "$push_time" ]; then
echo "Push time: $push_time"
From 9fc86438952b2ba8e2c2fe55898d72af1a7bd77e Mon Sep 17 00:00:00 2001
From: Lucas Saavedra Vaz <32426024+lucasssvaz@users.noreply.github.com>
Date: Thu, 2 Oct 2025 08:41:47 -0300
Subject: [PATCH 3/5] Potential fix for code scanning alert no. 500: Code
injection
Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>
---
.github/workflows/tests_results.yml | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/.github/workflows/tests_results.yml b/.github/workflows/tests_results.yml
index 0a2e5250277..764aee0fd32 100644
--- a/.github/workflows/tests_results.yml
+++ b/.github/workflows/tests_results.yml
@@ -76,8 +76,10 @@ jobs:
echo "original_run_id = $original_run_id"
- name: Print links to other runs
+ env:
+ ORIGINAL_RUN_ID: ${{ steps.get-info.outputs.original_run_id }}
run: |
- echo "Build and QEMU tests: https://github.com/${{ github.repository }}/actions/runs/${{ steps.get-info.outputs.original_run_id }}"
+ echo "Build and QEMU tests: https://github.com/${{ github.repository }}/actions/runs/$ORIGINAL_RUN_ID"
echo "Hardware and Wokwi tests: https://github.com/${{ github.repository }}/actions/runs/${{ github.event.workflow_run.id }}"
unit-test-results:
From 87edc5006b2a8b3bfb8af6de484eac932a3cd2ab Mon Sep 17 00:00:00 2001
From: Lucas Saavedra Vaz <32426024+lucasssvaz@users.noreply.github.com>
Date: Thu, 2 Oct 2025 08:54:31 -0300
Subject: [PATCH 4/5] fix(ci): Fix CodeQL warnings
---
.github/workflows/tests_hw_wokwi.yml | 2 +-
.github/workflows/tests_results.yml | 15 ++++++++++----
.gitlab/scripts/gen_hw_jobs.py | 29 ++++++++++++++++------------
3 files changed, 29 insertions(+), 17 deletions(-)
diff --git a/.github/workflows/tests_hw_wokwi.yml b/.github/workflows/tests_hw_wokwi.yml
index fd30b768aa1..d5aa9d63da7 100644
--- a/.github/workflows/tests_hw_wokwi.yml
+++ b/.github/workflows/tests_hw_wokwi.yml
@@ -302,7 +302,7 @@ jobs:
- name: Trigger GitLab Pipeline and Download Artifacts
if: ${{ steps.check-tests.outputs.enabled == 'true' }}
- uses: digital-blueprint/gitlab-pipeline-trigger-action@v1.3.0
+ uses: digital-blueprint/gitlab-pipeline-trigger-action@20e77989b24af658ba138a0aa5291bdc657f1505 # v1.3.0
id: gitlab-trigger
with:
host: ${{ secrets.GITLAB_URL }}
diff --git a/.github/workflows/tests_results.yml b/.github/workflows/tests_results.yml
index 764aee0fd32..525b303e486 100644
--- a/.github/workflows/tests_results.yml
+++ b/.github/workflows/tests_results.yml
@@ -141,12 +141,16 @@ jobs:
- name: Clean up caches
if: always()
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
+ env:
+ ORIGINAL_REF: ${{ needs.get-artifacts.outputs.original_ref }}
+ ORIGINAL_EVENT: ${{ needs.get-artifacts.outputs.original_event }}
+ ORIGINAL_ACTION: ${{ needs.get-artifacts.outputs.original_action }}
with:
script: |
- const ref = '${{ needs.get-artifacts.outputs.original_ref }}';
+ const ref = process.env.ORIGINAL_REF;
const key_prefix = 'test-' + ref + '-';
- if ('${{ needs.get-artifacts.outputs.original_event }}' == 'pull_request' && '${{ needs.get-artifacts.outputs.original_action }}' != 'closed') {
+ if (process.env.ORIGINAL_EVENT == 'pull_request' && process.env.ORIGINAL_ACTION != 'closed') {
console.log('Skipping cache cleanup for open PR');
return;
}
@@ -172,16 +176,19 @@ jobs:
- name: Report conclusion
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
if: always()
+ env:
+ ORIGINAL_EVENT: ${{ needs.get-artifacts.outputs.original_event }}
+ ORIGINAL_SHA: ${{ needs.get-artifacts.outputs.original_sha }}
with:
script: |
const owner = '${{ github.repository_owner }}';
const repo = '${{ github.repository }}'.split('/')[1];
- const sha = '${{ needs.get-artifacts.outputs.original_sha }}';
+ const sha = process.env.ORIGINAL_SHA;
core.debug(`owner: ${owner}`);
core.debug(`repo: ${repo}`);
core.debug(`sha: ${sha}`);
const { context: name, state } = (await github.rest.repos.createCommitStatus({
- context: `Runtime Tests / Report results (${{ needs.get-artifacts.outputs.original_event }} -> workflow_run -> workflow_run)`,
+ context: `Runtime Tests / Report results (${process.env.ORIGINAL_EVENT} -> workflow_run -> workflow_run)`,
owner: owner,
repo: repo,
sha: sha,
diff --git a/.gitlab/scripts/gen_hw_jobs.py b/.gitlab/scripts/gen_hw_jobs.py
index 67d44c76e94..adefffc6181 100644
--- a/.gitlab/scripts/gen_hw_jobs.py
+++ b/.gitlab/scripts/gen_hw_jobs.py
@@ -5,20 +5,21 @@
import yaml
import os
import sys
-from pathlib import Path
import copy
+import traceback
+from pathlib import Path
-# Resolve repository root from this script location: .gitlab/scripts -> esp32 root
+# Resolve repository root from this script location
SCRIPT_DIR = Path(__file__).resolve().parent
REPO_ROOT = SCRIPT_DIR.parent.parent
+TESTS_ROOT = REPO_ROOT / "tests"
# Ensure we run from repo root so relative paths work consistently
try:
os.chdir(REPO_ROOT)
-except Exception:
- pass
-
-TESTS_ROOT = REPO_ROOT / "tests"
+except Exception as e:
+ sys.stderr.write(f"[WARN] Failed to chdir to repo root '{REPO_ROOT}': {e}\n")
+ sys.stderr.write(traceback.format_exc() + "\n")
class PrettyDumper(yaml.SafeDumper):
@@ -35,7 +36,9 @@ def read_json(p: Path):
try:
with p.open("r", encoding="utf-8") as f:
return json.load(f)
- except Exception:
+ except Exception as e:
+ sys.stderr.write(f"[WARN] Failed to parse JSON file '{p}': {e}\n")
+ sys.stderr.write(traceback.format_exc() + "\n")
return {}
@@ -155,7 +158,9 @@ def sdk_meets_requirements(sdkconfig: Path, ci_json: dict) -> bool:
if not ok:
return False
return True
- except Exception:
+ except Exception as e:
+ sys.stderr.write(f"[WARN] Failed to evaluate requirements against '{sdkconfig}': {e}\n")
+ sys.stderr.write(traceback.format_exc() + "\n")
return False
@@ -166,13 +171,13 @@ def parse_list_arg(s: str) -> list[str]:
if txt.startswith("[") and txt.endswith("]"):
try:
return [str(x).strip() for x in json.loads(txt)]
- except Exception:
- # Attempt single-quote JSON -> replace with double quotes
+ except Exception as e:
+ sys.stderr.write(f"[WARN] Failed to parse JSON list '{txt}': {e}. Retrying with quote normalization.\n")
try:
fixed = txt.replace("'", '"')
return [str(x).strip() for x in json.loads(fixed)]
- except Exception:
- pass
+ except Exception as e2:
+ sys.stderr.write(f"[WARN] Failed to parse JSON list after normalization: {e2}. Falling back to CSV parsing.\n")
# Fallback: comma-separated
return [part.strip() for part in txt.split(",") if part.strip()]
From 162ca2c5bb78a2fee5b2902600b3e70db81f3665 Mon Sep 17 00:00:00 2001
From: "pre-commit-ci-lite[bot]"
<117423508+pre-commit-ci-lite[bot]@users.noreply.github.com>
Date: Mon, 6 Oct 2025 07:55:11 +0000
Subject: [PATCH 5/5] ci(pre-commit): Apply automatic fixes
---
.gitlab/scripts/gen_hw_jobs.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/.gitlab/scripts/gen_hw_jobs.py b/.gitlab/scripts/gen_hw_jobs.py
index adefffc6181..804e245c18f 100644
--- a/.gitlab/scripts/gen_hw_jobs.py
+++ b/.gitlab/scripts/gen_hw_jobs.py
@@ -177,7 +177,9 @@ def parse_list_arg(s: str) -> list[str]:
fixed = txt.replace("'", '"')
return [str(x).strip() for x in json.loads(fixed)]
except Exception as e2:
- sys.stderr.write(f"[WARN] Failed to parse JSON list after normalization: {e2}. Falling back to CSV parsing.\n")
+ sys.stderr.write(
+ f"[WARN] Failed to parse JSON list after normalization: {e2}. Falling back to CSV parsing.\n"
+ )
# Fallback: comma-separated
return [part.strip() for part in txt.split(",") if part.strip()]