Skip to content

CI: Re-run pr-quality.yml workflow on pull request updates #386

CI: Re-run pr-quality.yml workflow on pull request updates

CI: Re-run pr-quality.yml workflow on pull request updates #386

Workflow file for this run

name: PR Quality Gate
on:
pull_request:
types: [opened, synchronize, reopened]
branches:
- leader
workflow_dispatch:
inputs:
branch:
description: 'Branch to check'
required: true
default: 'leader'
pr_number:
description: 'PR number to post comments to (optional)'
required: false
type: string
has_changes:
description: 'Whether the PR has file changes'
required: true
type: string
workflow_call:
inputs:
branch:
description: 'Branch to check'
required: false
type: string
default: 'leader'
pr_number:
description: 'PR number to post comments to (optional)'
required: false
type: string
has_changes:
description: 'Whether the PR has file changes'
required: true
type: string
secrets:
GH_TOKEN:
description: 'GitHub token (optional, uses github.token by default)'
required: false
permissions:
contents: read
checks: write
actions: read
pull-requests: write
issues: write
concurrency:
group: pr-quality-${{ github.event.pull_request.number || github.ref || github.run_id }}
cancel-in-progress: true
env:
FORCE_COLOR: 1
jobs:
prepare-env:
name: 📝 Prepare Environment
if: inputs.has_changes == 'true'
runs-on: ubuntu-latest
outputs:
session_id: ${{ steps.sanitize_id.outputs.sanitized-value }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Sanitize SESSION_ID
id: sanitize_id
uses: ./.github/actions/sanitize-string
with:
value: ${{ github.event.pull_request.number || github.ref_name || github.run_id }}
# Test Composite Actions - Sanitize String validation
- name: Test Sanitize Action - Special Chars and Truncation
id: test_special_chars
uses: ./.github/actions/sanitize-string
with:
value: 'feature/test-branch/with_special-chars!@#$%^&*()'
max-length: '30'
continue-on-error: true
- name: Verify - Special Chars and Truncation
run: |
actual="${{ steps.test_special_chars.outputs.sanitized-value }}"
expected="feature-test-branch-with_speci"
if [ "$actual" != "$expected" ]; then
echo "❌ Test Failed: Special Chars and Truncation"
echo "Expected: '$expected'"
echo "Actual: '$actual'"
exit 1
fi
echo "✅ Test Passed: Special Chars and Truncation"
continue-on-error: true
- name: Test Sanitize Action - Empty String
id: test_empty
uses: ./.github/actions/sanitize-string
with:
value: ''
max-length: '64'
continue-on-error: true
- name: Verify - Empty String
run: |
actual="${{ steps.test_empty.outputs.sanitized-value }}"
expected=""
if [ "$actual" != "$expected" ]; then
echo "❌ Test Failed: Empty String"
echo "Expected: '$expected'"
echo "Actual: '$actual'"
exit 1
fi
echo "✅ Test Passed: Empty String"
continue-on-error: true
- name: Test Sanitize Action - Already Sanitized
id: test_clean
uses: ./.github/actions/sanitize-string
with:
value: 'clean-string-123'
max-length: '64'
continue-on-error: true
- name: Verify - Already Sanitized
run: |
actual="${{ steps.test_clean.outputs.sanitized-value }}"
expected="clean-string-123"
if [ "$actual" != "$expected" ]; then
echo "❌ Test Failed: Already Sanitized"
echo "Expected: '$expected'"
echo "Actual: '$actual'"
exit 1
fi
echo "✅ Test Passed: Already Sanitized"
continue-on-error: true
- name: Test Sanitize Action - Shorter than Max Length
id: test_short
uses: ./.github/actions/sanitize-string
with:
value: 'short/string'
max-length: '64'
continue-on-error: true
- name: Verify - Shorter than Max Length
run: |
actual="${{ steps.test_short.outputs.sanitized-value }}"
expected="short-string"
if [ "$actual" != "$expected" ]; then
echo "❌ Test Failed: Shorter than Max Length"
echo "Expected: '$expected'"
echo "Actual: '$actual'"
exit 1
fi
echo "✅ Test Passed: Shorter than Max Length"
continue-on-error: true
- name: Test Sanitize Action - Duplicate Hyphens
id: test_dupes
uses: ./.github/actions/sanitize-string
with:
value: 'feature//branch'
max-length: '64'
continue-on-error: true
- name: Verify - Duplicate Hyphens
run: |
actual="${{ steps.test_dupes.outputs.sanitized-value }}"
expected="feature-branch"
if [ "$actual" != "$expected" ]; then
echo "❌ Test Failed: Duplicate Hyphens"
echo "Expected: '$expected'"
echo "Actual: '$actual'"
exit 1
fi
echo "✅ Test Passed: Duplicate Hyphens"
continue-on-error: true
- name: Test Sanitize Action - Trailing Hyphen
id: test_trailing
uses: ./.github/actions/sanitize-string
with:
value: 'feature/branch-'
max-length: '64'
continue-on-error: true
- name: Verify - Trailing Hyphen
run: |
actual="${{ steps.test_trailing.outputs.sanitized-value }}"
expected="feature-branch"
if [ "$actual" != "$expected" ]; then
echo "❌ Test Failed: Trailing Hyphen"
echo "Expected: '$expected'"
echo "Actual: '$actual'"
exit 1
fi
echo "✅ Test Passed: Trailing Hyphen"
continue-on-error: true
- name: Test Sanitize Action - At Max Length
id: test_max
uses: ./.github/actions/sanitize-string
with:
value: 'a-very-long-string-that-is-exactly-64-characters-long-and-valid'
max-length: '64'
continue-on-error: true
- name: Verify - At Max Length
run: |
actual="${{ steps.test_max.outputs.sanitized-value }}"
expected="a-very-long-string-that-is-exactly-64-characters-long-and-valid"
if [ "$actual" != "$expected" ]; then
echo "❌ Test Failed: At Max Length"
echo "Expected: '$expected'"
echo "Actual: '$actual'"
exit 1
fi
echo "✅ Test Passed: At Max Length"
continue-on-error: true
- name: Test Sanitize Action - Only Invalid Chars
id: test_invalid
uses: ./.github/actions/sanitize-string
with:
value: '!@#$%^&*()'
max-length: '64'
continue-on-error: true
- name: Verify - Only Invalid Chars
run: |
actual="${{ steps.test_invalid.outputs.sanitized-value }}"
expected=""
if [ "$actual" != "$expected" ]; then
echo "❌ Test Failed: Only Invalid Chars"
echo "Expected: '$expected'"
echo "Actual: '$actual'"
exit 1
fi
echo "✅ Test Passed: Only Invalid Chars"
continue-on-error: true
- name: Test Sanitize Action - Leading/Trailing Invalid Chars
id: test_leading_trailing
uses: ./.github/actions/sanitize-string
with:
value: '!!test-branch!!'
max-length: '64'
continue-on-error: true
- name: Verify - Leading/Trailing Invalid Chars
run: |
actual="${{ steps.test_leading_trailing.outputs.sanitized-value }}"
expected="test-branch"
if [ "$actual" != "$expected" ]; then
echo "❌ Test Failed: Leading/Trailing Invalid Chars"
echo "Expected: '$expected'"
echo "Actual: '$actual'"
exit 1
fi
echo "✅ Test Passed: Leading/Trailing Invalid Chars"
continue-on-error: true
script-tests:
name: 🧪 Script Tests
needs: [prepare-env]
if: inputs.has_changes == 'true'
runs-on: ubuntu-latest
timeout-minutes: 5
env:
SESSION_ID: ${{ needs.prepare-env.outputs.session_id }}
steps:
- name: Checkout Code
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Run Throttling Logic Integration Test
id: script-tests
shell: bash
run: |
set -o pipefail
mkdir -p logs
chmod +x ./tests/integration/test-decide-review-strategy.sh
# Pipe stderr to stdout to capture all output in the log
./tests/integration/test-decide-review-strategy.sh > >(tee -a logs/script-output.log) 2> >(tee -a logs/script-output.log >&2)
continue-on-error: true
- name: Upload Script Test Logs
if: always()
uses: actions/upload-artifact@v4
with:
name: script-test-logs-${{ env.SESSION_ID }}
path: |
logs/script-output.log
if-no-files-found: ignore
retention-days: 2
- name: Check Script Tests Result
if: always()
shell: bash
run: |
if [ "${{ steps.script-tests.outcome }}" = 'failure' ]; then
echo "::error::Script tests failed"
exit 1
fi
knip-check:
name: 🎒 Knip Check
needs: [prepare-env]
if: inputs.has_changes == 'true'
runs-on: ubuntu-latest
timeout-minutes: 5
env:
SESSION_ID: ${{ needs.prepare-env.outputs.session_id }}
steps:
- name: Checkout Code
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Setup Environment
uses: ./.github/actions/setup-env
- name: Run Knip
id: knip
shell: bash
run: |
set -o pipefail
pnpm run knip 2>&1 | tee knip-output.txt
continue-on-error: true
- name: Upload Knip Report
if: always() && steps.knip.outcome == 'failure' && hashFiles('knip-output.txt') != ''
uses: actions/upload-artifact@v4
with:
name: knip-report
path: knip-output.txt
retention-days: 2
- name: Fail on Knip Errors
if: steps.knip.outcome == 'failure'
continue-on-error: true
run: |
echo "::warning::Knip check failed but continuing workflow for now. Knip validation is under review."
exit 0
lint-check:
name: 🔍 Lint Check
needs: [prepare-env]
if: inputs.has_changes == 'true'
runs-on: ubuntu-latest
timeout-minutes: 5
env:
SESSION_ID: ${{ needs.prepare-env.outputs.session_id }}
steps:
- name: Checkout Code
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Setup Environment
uses: ./.github/actions/setup-env
- name: Lint Code
id: lint
shell: bash
run: |
set -o pipefail
./scripts/ci/run-linter.sh
continue-on-error: true
- name: Report Lint Failure
if: failure() && steps.lint.outcome == 'failure'
run: |
echo "### ❌ Lint Failure Report" >> $GITHUB_STEP_SUMMARY
echo '```text' >> $GITHUB_STEP_SUMMARY
cat logs/lint-output.log >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
- name: Check Lint Result
if: always()
shell: bash
run: |
if [ "${{ steps.lint.outcome }}" = 'failure' ]; then
echo "::error::Lint check failed"
exit 1
fi
- name: Upload Lint Report
if: always()
uses: actions/upload-artifact@v4
with:
name: lint-report
path: logs/lint-output.log
retention-days: 2
if-no-files-found: ignore
build-check:
name: 🏗️ Build Check
needs: [prepare-env]
if: inputs.has_changes == 'true'
runs-on: ubuntu-latest
timeout-minutes: 10
env:
SESSION_ID: ${{ needs.prepare-env.outputs.session_id }}
NEXTAUTH_URL: http://localhost:3000
NEXTAUTH_SECRET: build-verification-secret-do-not-use-in-production
steps:
- name: Checkout Code
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Setup Environment
uses: ./.github/actions/setup-env
- name: Verify Build
id: build
env:
NODE_ENV: production
NEXTAUTH_URL: http://localhost:3000
NEXTAUTH_SECRET: build-verification-secret-do-not-use-in-production
shell: bash
run: |
set -o pipefail
./scripts/ci/run-build.sh
continue-on-error: true
- name: Verify Artifacts
if: always() && steps.build.outcome == 'success'
run: |
if [ ! -d ".next_prod" ] || [ ! -d "dist" ]; then
echo "::error::Build artifacts were not created successfully."
exit 1
fi
echo "Build artifacts verified."
- name: Report Build Failure
if: failure() && steps.build.outcome == 'failure'
run: |
echo "### ❌ Build Failure Report" >> $GITHUB_STEP_SUMMARY
echo '```text' >> $GITHUB_STEP_SUMMARY
tail -n 50 logs/build-output.log >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
- name: Check Build Result
if: always()
shell: bash
run: |
if [ "${{ steps.build.outcome }}" = 'failure' ]; then
echo "::error::Build failed"
exit 1
fi
- name: Upload Build Report
if: always()
uses: actions/upload-artifact@v4
with:
name: build-report
path: logs/build-output.log
retention-days: 2
if-no-files-found: ignore
- name: 🔍 Pre-archive Inspection
if: always() && steps.build.outcome == 'success'
shell: bash
run: |
echo "=== Checking .next_prod directory structure ==="
if [ ! -d ".next_prod" ]; then
echo "❌ ERROR: .next_prod directory not found!"
exit 1
fi
echo "📁 .next_prod directory contents:"
ls -la .next_prod/
echo ""
echo "🔎 Checking critical files:"
if [ ! -f ".next_prod/BUILD_ID" ]; then
echo "❌ ERROR: BUILD_ID not found!"
exit 1
fi
echo "✅ BUILD_ID: $(cat .next_prod/BUILD_ID)"
if [ ! -d ".next_prod/server" ]; then
echo "❌ ERROR: server directory not found!"
exit 1
fi
echo "✅ server/ directory exists"
if [ ! -d ".next_prod/static" ]; then
echo "⚠️ WARNING: static directory not found"
else
echo "✅ static/ directory exists"
fi
echo ""
echo "📊 .next_prod directory size:"
du -sh .next_prod/
echo ""
echo "📋 Directory structure (truncated):"
(ls -R .next_prod/ || true) | head -50
- name: 📦 Create Release Archive
if: always() && steps.build.outcome == 'success'
shell: bash
run: |
echo "📦 Creating release archive with complete .next_prod directory..."
# Use explicit directory names (not wildcards) to preserve all files
# tar -c: create archive
# -z: gzip compression
# -f: output file
tar -czf release.tar.gz \
.next_prod \
dist \
scripts/start-production.sh \
scripts/deploy-artifact.sh \
scripts/verify-deployment.sh \
ecosystem.config.cjs \
package.json \
pnpm-lock.yaml \
.env.local \
server.ts \
proxy.ts \
public \
next.config.js
echo "✅ Release archive created:"
ls -lh release.tar.gz
echo ""
echo "📊 Archive contents verification:"
(tar -tzf release.tar.gz | grep -E "BUILD_ID|\.next_prod/server" || true) | head -10
- name: Upload Build Artifacts
if: always() && steps.build.outcome == 'success'
uses: actions/upload-artifact@v4
with:
name: build-artifacts-${{ env.SESSION_ID }}
path: release.tar.gz
retention-days: 2
infra-tests:
name: 🧪 Infrastructure Tests
needs: [prepare-env, knip-check, lint-check, build-check]
if: inputs.has_changes == 'true'
runs-on: ubuntu-latest
timeout-minutes: 15
env:
SESSION_ID: ${{ needs.prepare-env.outputs.session_id }}
NEXTAUTH_URL: http://localhost:3006
NEXTAUTH_SECRET: build-verification-secret-do-not-use-in-production
PORT: 3006
steps:
- name: Checkout Code
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Setup Environment
uses: ./.github/actions/setup-env
- name: Download Build Artifacts
uses: actions/download-artifact@v7
with:
name: build-artifacts-${{ env.SESSION_ID }}
path: ./
continue-on-error: true
- name: Extract Build Artifacts
shell: bash
run: |
echo "📦 Extracting release archive..."
if [ -f "release.tar.gz" ]; then
tar -xzf release.tar.gz
rm release.tar.gz
echo "✅ Archive extracted"
else
echo "⚠️ No release archive found"
fi
if [ -d ".next_prod" ] && [ -d "dist" ]; then
echo "✅ All build directories found"
else
echo "❌ Missing required build directories"
exit 1
fi
- name: Run Infra Tests
id: infra-tests
shell: bash
env:
NODE_ENV: production
run: |
set -o pipefail
./scripts/ci/run-infra-tests.sh prod
continue-on-error: true
- name: Publish Infrastructure Tests Results
uses: dorny/test-reporter@v1
if: always() && steps.infra-tests.outcome != 'skipped'
with:
name: Infrastructure (Prod)
path: test-results/infra-prod-results.xml
reporter: java-junit
continue-on-error: true
- name: Upload Infra Test Logs
if: always()
uses: actions/upload-artifact@v4
with:
name: infra-test-logs-${{ env.SESSION_ID }}
path: |
logs/infra-prod-output.log
test-results/infra-prod-results.xml
if-no-files-found: ignore
retention-days: 2
- name: Check Infra Tests Result
if: always()
shell: bash
run: |
if [ "${{ steps.infra-tests.outcome }}" = 'failure' ]; then
echo "::error::Infrastructure tests failed"
exit 1
fi
unit-tests:
name: 🧪 Unit Tests
needs: [prepare-env, knip-check, lint-check, build-check]
if: inputs.has_changes == 'true'
runs-on: ubuntu-latest
timeout-minutes: 15
env:
SESSION_ID: ${{ needs.prepare-env.outputs.session_id }}
steps:
- name: Checkout Code
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Setup Environment
uses: ./.github/actions/setup-env
- name: Download Build Artifacts
uses: actions/download-artifact@v7
with:
name: build-artifacts-${{ env.SESSION_ID }}
path: ./
continue-on-error: true
- name: Extract Build Artifacts
shell: bash
run: |
echo "📦 Extracting release archive..."
if [ -f "release.tar.gz" ]; then
tar -xzf release.tar.gz
rm release.tar.gz
echo "✅ Archive extracted"
else
echo "⚠️ No release archive found"
fi
if [ -d ".next_prod" ] && [ -d "dist" ]; then
echo "✅ All build directories found"
else
echo "❌ Missing required build directories"
exit 1
fi
- name: Run Unit Tests
id: unit-tests
shell: bash
run: |
set -o pipefail
./scripts/ci/run-unit-tests.sh
continue-on-error: true
- name: Publish Unit Tests Results
uses: dorny/test-reporter@v1
if: always() && steps.unit-tests.outcome != 'skipped'
with:
name: Unit Tests
path: test-results/unit-results.xml
reporter: java-junit
continue-on-error: true
- name: Upload Unit Test Logs
if: always()
uses: actions/upload-artifact@v4
with:
name: unit-test-logs-${{ env.SESSION_ID }}
path: |
logs/unit-output.log
test-results/unit-results.xml
if-no-files-found: ignore
retention-days: 2
- name: Check Unit Tests Result
if: always()
shell: bash
run: |
if [ "${{ steps.unit-tests.outcome }}" = 'failure' ]; then
echo "::error::Unit tests failed"
exit 1
fi
component-tests:
name: 🧪 Component Tests
needs: [prepare-env, knip-check, lint-check, build-check]
if: inputs.has_changes == 'true'
runs-on: ubuntu-latest
timeout-minutes: 15
env:
SESSION_ID: ${{ needs.prepare-env.outputs.session_id }}
steps:
- name: Checkout Code
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Setup Environment
uses: ./.github/actions/setup-env
- name: Download Build Artifacts
uses: actions/download-artifact@v7
with:
name: build-artifacts-${{ env.SESSION_ID }}
path: ./
continue-on-error: true
- name: Extract Build Artifacts
shell: bash
run: |
echo "📦 Extracting release archive..."
if [ -f "release.tar.gz" ]; then
tar -xzf release.tar.gz
rm release.tar.gz
echo "✅ Archive extracted"
else
echo "⚠️ No release archive found"
fi
if [ -d ".next_prod" ] && [ -d "dist" ]; then
echo "✅ All build directories found"
else
echo "❌ Missing required build directories"
exit 1
fi
- name: Run Component Tests
id: component-tests
shell: bash
run: |
set -o pipefail
./scripts/ci/run-component-tests.sh
continue-on-error: true
- name: Publish Component Tests Results
uses: dorny/test-reporter@v1
if: always() && steps.component-tests.outcome != 'skipped'
with:
name: Component Tests
path: test-results/component-results.xml
reporter: java-junit
continue-on-error: true
- name: Upload Component Test Logs
if: always()
uses: actions/upload-artifact@v4
with:
name: component-test-logs-${{ env.SESSION_ID }}
path: |
logs/component-output.log
test-results/component-results.xml
if-no-files-found: ignore
retention-days: 2
- name: Check Component Tests Result
if: always()
shell: bash
run: |
if [ "${{ steps.component-tests.outcome }}" = 'failure' ]; then
echo "::error::Component tests failed"
exit 1
fi
perf-tests:
name: 🧪 Performance Tests
needs: [prepare-env, knip-check, lint-check, build-check]
if: inputs.has_changes == 'true'
runs-on: ubuntu-latest
timeout-minutes: 30
env:
SESSION_ID: ${{ needs.prepare-env.outputs.session_id }}
NEXTAUTH_URL: http://localhost:3007
NEXTAUTH_SECRET: build-verification-secret-do-not-use-in-production
PORT: 3007
steps:
- name: Checkout Code
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Setup Environment
uses: ./.github/actions/setup-env
- name: Download Build Artifacts
uses: actions/download-artifact@v7
with:
name: build-artifacts-${{ env.SESSION_ID }}
path: ./
continue-on-error: true
- name: Extract Build Artifacts
shell: bash
run: |
echo "📦 Extracting release archive..."
if [ -f "release.tar.gz" ]; then
tar -xzf release.tar.gz
rm release.tar.gz
echo "✅ Archive extracted"
else
echo "⚠️ No release archive found"
fi
if [ -d ".next_prod" ] && [ -d "dist" ]; then
echo "✅ All build directories found"
else
echo "❌ Missing required build directories"
exit 1
fi
- name: Run Performance Test
id: perf-tests
shell: bash
run: |
set -o pipefail
./scripts/ci/run-perf-tests.sh
continue-on-error: true
- name: Publish Performance Test Results
uses: dorny/test-reporter@v1
if: always() && steps.perf-tests.outcome != 'skipped'
with:
name: Performance Tests Report
path: test-results/perf-results.xml
reporter: java-junit
continue-on-error: true
- name: Upload Performance Metrics
if: always()
uses: actions/upload-artifact@v4
with:
name: performance-metrics-${{ env.SESSION_ID }}
path: test-results/performance-metrics.json
if-no-files-found: ignore
- name: Upload Perf Test Logs
if: always()
uses: actions/upload-artifact@v4
with:
name: perf-test-logs-${{ env.SESSION_ID }}
path: |
logs/perf-output.log
test-results/perf-results.xml
if-no-files-found: ignore
retention-days: 2
- name: Check Performance Tests Result
if: always()
shell: bash
run: |
if [ "${{ steps.perf-tests.outcome }}" = 'failure' ]; then
echo "::error::Performance tests failed"
exit 1
fi
- name: Stop Application
if: always()
run: pnpm run kill-all || true
visual-tests:
name: 🧪 Visual Tests
needs: [prepare-env, knip-check, lint-check, build-check]
if: inputs.has_changes == 'true'
runs-on: ubuntu-latest
timeout-minutes: 30
env:
SESSION_ID: ${{ needs.prepare-env.outputs.session_id }}
NEXTAUTH_URL: http://localhost:3000
NEXTAUTH_SECRET: build-verification-secret-do-not-use-in-production
steps:
- name: Checkout Code
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Setup Environment
uses: ./.github/actions/setup-env
- name: Download Build Artifacts
uses: actions/download-artifact@v7
with:
name: build-artifacts-${{ env.SESSION_ID }}
path: ./
continue-on-error: true
- name: Extract Build Artifacts
shell: bash
run: |
echo "📦 Extracting release archive..."
if [ -f "release.tar.gz" ]; then
tar -xzf release.tar.gz
rm release.tar.gz
echo "✅ Archive extracted"
else
echo "⚠️ No release archive found"
fi
if [ -d ".next_prod" ] && [ -d "dist" ]; then
echo "✅ All build directories found"
else
echo "❌ Missing required build directories"
exit 1
fi
- name: Run Visual Tests (Playwright)
id: visual-tests
shell: bash
run: |
set -o pipefail
./scripts/ci/run-visual-tests.sh
continue-on-error: true
- name: Publish Visual Tests Results
uses: dorny/test-reporter@v1
if: always() && steps.visual-tests.outcome != 'skipped'
with:
name: Visual Tests Report
path: test-results/results.xml
reporter: java-junit
continue-on-error: true
- name: Merge Playwright Reports
if: always() && steps.visual-tests.outcome != 'skipped'
shell: bash
run: |
if [ -d "blob-report" ]; then
echo "📊 Merging Playwright blob reports..."
npx playwright merge-reports --reporter html ./blob-report || true
else
echo "⚠️ No blob-report directory found, skipping merge"
fi
continue-on-error: true
- name: Upload Visual Test Logs
if: always()
uses: actions/upload-artifact@v4
with:
name: visual-test-logs-${{ env.SESSION_ID }}
path: |
logs/visual-output.log
test-results/results.xml
playwright-report/
if-no-files-found: ignore
retention-days: 2
- name: Check Visual Tests Result
if: always()
shell: bash
run: |
if [ "${{ steps.visual-tests.outcome }}" = 'failure' ]; then
echo "::error::Visual tests failed"
exit 1
fi
copy-checks:
name: 📝 Copy checks from parent commit
if: inputs.has_changes == 'false'
runs-on: ubuntu-latest
permissions:
checks: write
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 2
- name: Copy checks from parent commit
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
# Get the parent commit SHA, use HEAD~1 for compatibility
PARENT_SHA=$(git rev-parse HEAD~1)
# Get the current commit SHA
CURRENT_SHA=$(git rev-parse HEAD)
# --- Dynamically determine the list of jobs to copy ---
echo "🔎 Determining required checks for branch '${{ github.base_ref }}'..."
REQUIRED_CHECKS_JSON=$(gh api "repos/${{ github.repository }}/branches/${{ github.base_ref }}/protection/required_status_checks" --jq '.contexts' || echo "[]")
if [ "$(echo "$REQUIRED_CHECKS_JSON" | jq 'length')" -gt 0 ]; then
echo "Found protected branch rules. Required checks:"
echo "$REQUIRED_CHECKS_JSON" | jq -r '.[]'
# Convert JSON array to a bash array
mapfile -t JOBS_TO_COPY < <(echo "$REQUIRED_CHECKS_JSON" | jq -r '.[]')
else
echo "Branch is not protected or failed to fetch rules. Using fallback list."
JOBS_TO_COPY=(
"📝 Prepare Environment"
"🎒 Knip Check"
"🔍 Lint Check"
"🏗️ Build Check"
"🧪 Infrastructure Tests"
"🧪 Unit Tests"
"🧪 Component Tests"
"🧪 Performance Tests"
"🧪 Visual Tests"
"📊 Integration Tests Summary"
"📊 Quality Report"
)
fi
# --- Polling for parent commit checks ---
echo "⏳ Waiting for checks on parent commit ($PARENT_SHA) to complete..."
TIMEOUT=600 # 10 minutes
INTERVAL=30 # 30 seconds
ELAPSED=0
CHECKS_JSON=""
while [ $ELAPSED -lt $TIMEOUT ]; do
# Fetch check runs for the parent commit
PARENT_CHECKS_JSON=$(gh api "repos/${{ github.repository }}/commits/${PARENT_SHA}/check-runs" --jq '.check_runs')
# Filter for the specific jobs we care about
RELEVANT_CHECKS_JSON=$(echo "$PARENT_CHECKS_JSON" | jq -c --argjson jobs_to_copy "$(printf '%s\n' "${JOBS_TO_COPY[@]}" | jq -R . | jq -s .)" '[.[] | select(.name as $name | $jobs_to_copy | index($name))]')
# Count how many of the relevant checks are NOT completed
IN_PROGRESS_COUNT=$(echo "$RELEVANT_CHECKS_JSON" | jq '[.[] | select(.status != "completed")] | length')
# Count how many jobs we expect and how many we found
EXPECTED_COUNT=${#JOBS_TO_COPY[@]}
FOUND_COUNT=$(echo "$RELEVANT_CHECKS_JSON" | jq 'length')
if [ "$FOUND_COUNT" -gt 0 ] && [ "$IN_PROGRESS_COUNT" -eq 0 ]; then
echo "✅ All relevant checks on parent commit have completed."
CHECKS_JSON="$RELEVANT_CHECKS_JSON"
break
fi
echo "Still waiting... ($IN_PROGRESS_COUNT checks in progress, found $FOUND_COUNT of ~${EXPECTED_COUNT}). Elapsed: ${ELAPSED}s"
ELAPSED=$((ELAPSED + INTERVAL))
sleep $INTERVAL
done
if [ -z "$CHECKS_JSON" ]; then
echo "::error::Timed out waiting for parent commit checks to complete after ${TIMEOUT} seconds."
echo "Last fetched checks:"
echo "$PARENT_CHECKS_JSON"
exit 1
fi
# --- Create new checks for the current commit ---
echo "✒️ Copying final check statuses to current commit ($CURRENT_SHA)..."
for JOB_NAME in "${JOBS_TO_COPY[@]}"; do
CHECK=$(echo "${CHECKS_JSON}" | jq --arg name "${JOB_NAME}" '.[] | select(.name == $name)')
if [ -n "$CHECK" ]; then
# Use jq's // operator to provide a default value if .conclusion is null or not present
CONCLUSION=$(echo "${CHECK}" | jq -r '.conclusion // "skipped"')
# Also provide a default for the output object
OUTPUT=$(echo "${CHECK}" | jq -c '.output // {title: "Check Skipped", summary: "The parent check conclusion was indeterminate."}')
BODY=$(jq -n --arg name "$JOB_NAME" --arg head_sha "$CURRENT_SHA" --arg status "completed" --arg conclusion "$CONCLUSION" --argjson output "$OUTPUT" '{name: $name, head_sha: $head_sha, status: $status, conclusion: $conclusion, output: $output}')
echo "Creating check: $JOB_NAME -> $CONCLUSION"
gh api "repos/${{ github.repository }}/check-runs" --method POST --input - <<< "$BODY"
else
echo "Did not find check '${JOB_NAME}' on parent. Creating a skipped check."
OUTPUT=$(jq -n '{title: "Check Skipped", summary: "This check was not found on the parent commit."}')
BODY=$(jq -n --arg name "$JOB_NAME" --arg head_sha "$CURRENT_SHA" --arg status "completed" --arg conclusion "skipped" --argjson output "$OUTPUT" '{name: $name, head_sha: $head_sha, status: $status, conclusion: $conclusion, output: $output}')
gh api "repos/${{ github.repository }}/check-runs" --method POST --input - <<< "$BODY"
fi
done
echo "🎉 All checks copied."
integration-tests:
name: 📊 Integration Tests Summary
if: always() && inputs.has_changes == 'true'
needs:
[
prepare-env,
script-tests,
infra-tests,
unit-tests,
component-tests,
perf-tests,
visual-tests,
]
runs-on: ubuntu-latest
outputs:
script-tests-outcome: ${{ needs.script-tests.result }}
infra-tests-outcome: ${{ needs.infra-tests.result }}
unit-tests-outcome: ${{ needs.unit-tests.result }}
component-tests-outcome: ${{ needs.component-tests.result }}
perf-tests-outcome: ${{ needs.perf-tests.result }}
visual-tests-outcome: ${{ needs.visual-tests.result }}
steps:
- name: Determine Tests Status
shell: bash
run: |
echo "📊 All Integration Tests Summary:"
echo " Script Tests: ${{ needs.script-tests.result }}"
echo " Infra Tests: ${{ needs.infra-tests.result }}"
echo " Unit Tests: ${{ needs.unit-tests.result }}"
echo " Component Tests: ${{ needs.component-tests.result }}"
echo " Perf Tests: ${{ needs.perf-tests.result }}"
echo " Visual Tests: ${{ needs.visual-tests.result }}"
# Fail if any test failed
if [ "${{ needs.script-tests.result }}" = "failure" ] || \
[ "${{ needs.infra-tests.result }}" = "failure" ] || \
[ "${{ needs.unit-tests.result }}" = "failure" ] || \
[ "${{ needs.component-tests.result }}" = "failure" ] || \
[ "${{ needs.perf-tests.result }}" = "failure" ] || \
[ "${{ needs.visual-tests.result }}" = "failure" ]; then
echo "❌ Some integration tests failed"
exit 1
fi
echo "✅ All integration tests passed"
quality-report:
name: 📊 Quality Report
if: always() && inputs.has_changes == 'true' && needs.knip-check.result != 'cancelled' && needs.lint-check.result != 'cancelled' && needs.build-check.result != 'cancelled' && needs.integration-tests.result != 'cancelled'
needs: [prepare-env, knip-check, lint-check, build-check, script-tests, integration-tests]
runs-on: ubuntu-latest
outputs:
failure-report-json: ${{ steps.generate-failure-report.outputs.failure_report_json }}
steps:
- uses: actions/checkout@v4
- name: Download All Artifacts
uses: actions/download-artifact@v7
with:
path: artifacts/
continue-on-error: true
- name: Create test-results directory
run: mkdir -p test-results
- name: Aggregate Check Results
id: aggregate-results
shell: bash
env:
NEEDS_CONTEXT: ${{ toJSON(needs) }}
run: |
FINAL_RESULT="success" # Default to success
HAS_FAILURE=false
HAS_CANCELLATION=false
# Check all job results from the `needs` context
for job_name in $(echo "$NEEDS_CONTEXT" | jq -r 'keys[]'); do
# Skip the prepare-env job as it's just setup
if [[ "$job_name" == "prepare-env" ]]; then
continue
fi
result=$(echo "$NEEDS_CONTEXT" | jq -r --arg jname "$job_name" '.[$jname].result')
if [[ "$result" == "failure" ]]; then
HAS_FAILURE=true
elif [[ "$result" == "cancelled" ]]; then
HAS_CANCELLATION=true
fi
done
if [[ "$HAS_FAILURE" == true ]]; then
FINAL_RESULT="failure"
elif [[ "$HAS_CANCELLATION" == true ]]; then
FINAL_RESULT="cancelled"
fi
echo "final_result=${FINAL_RESULT}" >> $GITHUB_OUTPUT
- name: Generate Failure Report
id: generate-failure-report
if: always()
env:
NEEDS_CONTEXT: ${{ toJSON(needs) }}
SESSION_ID: ${{ needs.prepare-env.outputs.session_id }}
run: |
FAILURE_REPORT_JSON="[]"
# Function to add a failure to the JSON report
add_failure() {
local name=$1
local conclusion=$2
local log_file=$3
local log_content
# Ensure log_content is a valid JSON string even if log file is missing or empty
log_content=$(tail -n 50 "$log_file" 2>/dev/null | jq -s -R . || echo '""')
FAILURE_REPORT_JSON=$(echo "$FAILURE_REPORT_JSON" | jq \
--arg name "$name" \
--arg conclusion "$conclusion" \
--argjson logs "$log_content" \
'. + [{ "name": $name, "conclusion": $conclusion, "logs": $logs }]')
}
# Check standalone jobs
for job in "knip-check" "lint-check" "build-check"; do
# Safely get job result, default to "skipped" if not found
result=$(echo "$NEEDS_CONTEXT" | jq -r --arg jname "$job" '.[$jname].result // "skipped"')
if [[ "$result" == "failure" || "$result" == "cancelled" ]]; then
log_file=""
case $job in
"knip-check") log_file="artifacts/knip-report/knip-output.txt" ;;
"lint-check") log_file="artifacts/lint-report/lint-output.log" ;;
"build-check") log_file="artifacts/build-report/build-output.log" ;;
esac
add_failure "$job" "$result" "$log_file"
fi
done
# Check integration test suites from the outputs
for test_suite_outcome in "infra-tests-outcome" "unit-tests-outcome" "component-tests-outcome" "perf-tests-outcome" "visual-tests-outcome"; do
# Safely get test suite result from nested object, default to "skipped"
result=$(echo "$NEEDS_CONTEXT" | jq -r --arg tname "$test_suite_outcome" '."integration-tests".outputs.[$tname] // "skipped"')
if [[ "$result" == "failure" || "$result" == "cancelled" ]]; then
test_suite_name=${test_suite_outcome%-outcome}
log_file=""
case $test_suite_name in
"infra-tests") log_file="artifacts/infra-test-logs-${SESSION_ID}/logs/infra-prod-output.log" ;;
"unit-tests") log_file="artifacts/unit-test-logs-${SESSION_ID}/logs/unit-output.log" ;;
"component-tests") log_file="artifacts/component-test-logs-${SESSION_ID}/logs/component-output.log" ;;
"perf-tests") log_file="artifacts/perf-test-logs-${SESSION_ID}/logs/perf-output.log" ;;
"visual-tests") log_file="artifacts/visual-test-logs-${SESSION_ID}/logs/visual-output.log" ;;
esac
add_failure "$test_suite_name" "$result" "$log_file"
fi
done
# Use a block to safely append to GITHUB_OUTPUT
{
echo "failure_report_json<<EOF"
echo "$FAILURE_REPORT_JSON"
echo "EOF"
} >> "$GITHUB_OUTPUT"
- name: Post Quality Summary with Details
if: always()
env:
KNIP_RESULT: ${{ needs.knip-check.result }}
LINT_RESULT: ${{ needs.lint-check.result }}
BUILD_RESULT: ${{ needs.build-check.result }}
INFRA_TESTS_OUTCOME: ${{ needs.integration-tests.outputs.infra-tests-outcome }}
UNIT_TESTS_OUTCOME: ${{ needs.integration-tests.outputs.unit-tests-outcome }}
COMPONENT_TESTS_OUTCOME: ${{ needs.integration-tests.outputs.component-tests-outcome }}
PERF_TESTS_OUTCOME: ${{ needs.integration-tests.outputs.perf-tests-outcome }}
VISUAL_TESTS_OUTCOME: ${{ needs.integration-tests.outputs.visual-tests-outcome }}
FINAL_RESULT: ${{ steps.aggregate-results.outputs.final_result }}
GH_TOKEN: ${{ secrets.GH_TOKEN }}
run: |
set +e
PR_NUMBER="${{ github.event.pull_request.number }}"
if [ -z "$PR_NUMBER" ] || [ "$PR_NUMBER" = "null" ]; then
echo "::warning::No Pull Request found. Skipping PR comment."
exit 0
fi
# Function to add log details to the comment body
add_log_details() {
local title="$1"
local log_file="$2"
local max_lines="$3"
if [ -f "$log_file" ]; then
echo ""
echo "### ❌ $title"
echo '```text'
# Use sed to clean ANSI color codes
tail -n "$max_lines" "$log_file" | sed -r 's/\x1b\[[0-9;]*m//g'
echo '```'
else
echo ""
echo "### ❌ $title"
echo '```text'
echo "Log file not found."
echo '```'
fi
}
# Prepare the main comment body
COMMENT_BODY="## 📋 Quality Gate Results\n\n"
COMMENT_BODY+="| Check | Status |\n"
COMMENT_BODY+="|-------|--------|\n"
COMMENT_BODY+="| Knip | $([ "$KNIP_RESULT" = "success" ] && echo '✅ success' || echo '❌ '$KNIP_RESULT) |\n"
COMMENT_BODY+="| Lint | $([ "$LINT_RESULT" = "success" ] && echo '✅ success' || echo '❌ '$LINT_RESULT) |\n"
COMMENT_BODY+="| Build | $([ "$BUILD_RESULT" = "success" ] && echo '✅ success' || echo '❌ '$BUILD_RESULT) |\n"
COMMENT_BODY+="| Infra Tests | $([ "$INFRA_TESTS_OUTCOME" = "success" ] && echo '✅ success' || echo '❌ '$INFRA_TESTS_OUTCOME) |\n"
COMMENT_BODY+="| Unit Tests | $([ "$UNIT_TESTS_OUTCOME" = "success" ] && echo '✅ success' || echo '❌ '$UNIT_TESTS_OUTCOME) |\n"
COMMENT_BODY+="| Component Tests | $([ "$COMPONENT_TESTS_OUTCOME" = "success" ] && echo '✅ success' || echo '❌ '$COMPONENT_TESTS_OUTCOME) |\n"
COMMENT_BODY+="| Perf Tests | $([ "$PERF_TESTS_OUTCOME" = "success" ] && echo '✅ success' || echo '❌ '$PERF_TESTS_OUTCOME) |\n"
COMMENT_BODY+="| Visual Tests | $([ "$VISUAL_TESTS_OUTCOME" = "success" ] && echo '✅ success' || echo '❌ '$VISUAL_TESTS_OUTCOME) |\n"
# Append log details for failed checks
LOG_DETAILS=""
if [ "$KNIP_RESULT" != "success" ]; then
LOG_DETAILS+=$(add_log_details "Knip Failure Details" "artifacts/knip-report/knip-output.txt" 50)
fi
if [ "$LINT_RESULT" != "success" ]; then
LOG_DETAILS+=$(add_log_details "Lint Failure Details" "artifacts/lint-report/lint-output.log" 50)
fi
if [ "$BUILD_RESULT" != "success" ]; then
LOG_DETAILS+=$(add_log_details "Build Failure Details" "artifacts/build-report/build-output.log" 50)
fi
if [ "$INFRA_TESTS_OUTCOME" != "success" ]; then
LOG_DETAILS+=$(add_log_details "Infrastructure Test Failure Details" "artifacts/infra-test-logs-${{ needs.prepare-env.outputs.session_id }}/logs/infra-prod-output.log" 50)
fi
if [ "$UNIT_TESTS_OUTCOME" != "success" ]; then
LOG_DETAILS+=$(add_log_details "Unit Test Failure Details" "artifacts/unit-test-logs-${{ needs.prepare-env.outputs.session_id }}/logs/unit-output.log" 50)
fi
if [ "$COMPONENT_TESTS_OUTCOME" != "success" ]; then
LOG_DETAILS+=$(add_log_details "Component Test Failure Details" "artifacts/component-test-logs-${{ needs.prepare-env.outputs.session_id }}/logs/component-output.log" 50)
fi
if [ "$VISUAL_TESTS_OUTCOME" != "success" ]; then
LOG_DETAILS+=$(add_log_details "Visual Test Failure Details" "artifacts/visual-test-logs-${{ needs.prepare-env.outputs.session_id }}/logs/visual-output.log" 50)
fi
if [ "$PERF_TESTS_OUTCOME" != "success" ]; then
LOG_DETAILS+=$(add_log_details "Performance Test Failure Details" "artifacts/perf-test-logs-${{ needs.prepare-env.outputs.session_id }}/logs/perf-output.log" 50)
fi
# Combine main body and log details
if [ -n "$LOG_DETAILS" ]; then
COMMENT_BODY+="\n${LOG_DETAILS}"
fi
# Determine summary message
if [ "$FINAL_RESULT" = "success" ]; then
SUMMARY_MSG="✅ **All quality checks passed!**"
else
SUMMARY_MSG="⚠️ **Some checks failed.** Full logs available in [workflow artifacts](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})."
fi
COMMENT_BODY+="\n${SUMMARY_MSG}\n\n---\n> Report generated for commit: \`${{ github.event.pull_request.head.sha || github.sha }}\`"
# Write the final comment to a file
echo -e "$COMMENT_BODY" > test-results/pr-comment.md
# Only post comment to PR if there are genuine failures
if [ "$FINAL_RESULT" == "failure" ]; then
gh pr comment "$PR_NUMBER" --body-file test-results/pr-comment.md || {
echo "::warning::Failed to post PR comment."
}
else
echo "Skipping PR comment because all checks passed or were cancelled."
fi
# Check if all passed
if [ "$FINAL_RESULT" = "success" ]; then
echo "✅ **All quality checks passed!**" >> $GITHUB_STEP_SUMMARY
else
echo "⚠️ **Some checks failed.** Review the workflow run logs for details." >> $GITHUB_STEP_SUMMARY
fi