Skip to content

Feat/new lookps for dns #19

Feat/new lookps for dns

Feat/new lookps for dns #19

Workflow file for this run

name: 🧪 Run Tests
on:
pull_request:
types: [opened, synchronize, reopened]
pull_request_target:
types: [opened, synchronize, reopened]
push:
branches: [main]
workflow_dispatch:
concurrency:
group: ci-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
id-token: write # For OIDC with Codecov
checks: write # For test reporting
env:
# PR context helpers for conditional logic
IS_PR: ${{ github.event_name == 'pull_request' || github.event_name == 'pull_request_target' }}
IS_FORK: ${{ (github.event_name == 'pull_request' || github.event_name == 'pull_request_target') && github.event.pull_request.head.repo.full_name != github.repository }}
HAS_BOT_TOKEN: ${{ secrets.BOT_TOKEN != '' }}
jobs:
unit-tests:
name: Unit Tests
runs-on: ubuntu-latest
env:
NODE_OPTIONS: "--max_old_space_size=8192"
steps:
# Secure checkout for external PRs
- uses: actions/checkout@v4
with:
# For PRs from forks via pull_request_target, check out the PR head to prevent token exposure
ref: ${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.sha || github.ref }}
- uses: actions/setup-node@v4
with:
node-version: 22
cache: npm
- run: npm ci
- name: Run unit tests
id: test
run: |
npx vitest run \
--coverage \
--reporter=github-actions \
--reporter=verbose \
--reporter=junit \
--outputFile=test-results.xml
continue-on-error: true
- name: Save test result
if: always()
run: echo "${{ steps.test.outcome }}" > test-result-unit.txt
- name: Upload test result
if: always()
uses: actions/upload-artifact@v4
with:
name: test-result-unit
path: test-result-unit.txt
- name: Upload coverage to Codecov
if: github.event_name == 'push' || github.event_name == 'workflow_dispatch' || (env.IS_PR == 'true' && env.IS_FORK != 'true')
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./coverage/lcov.info
flags: unittests
name: networking-toolbox
fail_ci_if_error: false
continue-on-error: true
- name: Unit Test Report
id: unit-test-report
uses: dorny/test-reporter@v1
if: always() && (env.IS_FORK != 'true' || env.HAS_BOT_TOKEN == 'true')
with:
name: Unit Tests
path: test-results.xml
reporter: java-junit
fail-on-error: false
token: ${{ env.IS_FORK == 'true' && secrets.BOT_TOKEN || secrets.GITHUB_TOKEN }}
continue-on-error: true
- name: Save unit test report data
if: always()
run: |
echo "passed=${{ steps.unit-test-report.outputs.passed || '0' }}" > unit-report-data.txt
echo "failed=${{ steps.unit-test-report.outputs.failed || '0' }}" >> unit-report-data.txt
echo "skipped=${{ steps.unit-test-report.outputs.skipped || '0' }}" >> unit-report-data.txt
echo "time=${{ steps.unit-test-report.outputs.time || '0' }}" >> unit-report-data.txt
- name: Upload unit test report data
if: always()
uses: actions/upload-artifact@v4
with:
name: unit-report-data
path: unit-report-data.txt
- name: Upload coverage artifact
if: always()
uses: actions/upload-artifact@v4
with:
name: coverage
path: coverage
- name: Fail if tests failed
if: steps.test.outcome == 'failure'
run: exit 1
api-tests:
name: API Contract Tests
runs-on: ubuntu-latest
env:
NODE_OPTIONS: "--max_old_space_size=8192"
steps:
# Secure checkout for external PRs
- uses: actions/checkout@v4
with:
# For PRs from forks via pull_request_target, check out the PR head to prevent token exposure
ref: ${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.sha || github.ref }}
- uses: actions/setup-node@v4
with:
node-version: 22
cache: npm
- run: npm ci
- name: Start development server
run: npm run dev -- --port 4175 &
- name: Wait for server
run: |
timeout 30 bash -c 'until curl -f http://localhost:4175/api >/dev/null 2>&1; do sleep 1; done'
- name: Run API contract tests
id: test
run: |
npm run test:api -- \
--reporter=github-actions \
--reporter=verbose \
--reporter=junit \
--outputFile=api-test-results.xml
continue-on-error: true
- name: Save test result
if: always()
run: echo "${{ steps.test.outcome }}" > test-result-api.txt
- name: Upload test result
if: always()
uses: actions/upload-artifact@v4
with:
name: test-result-api
path: test-result-api.txt
- name: API Test Report
id: api-test-report
uses: dorny/test-reporter@v1
if: always() && (env.IS_FORK != 'true' || env.HAS_BOT_TOKEN == 'true')
with:
name: API Contract Tests
path: api-test-results.xml
reporter: java-junit
fail-on-error: false
token: ${{ env.IS_FORK == 'true' && secrets.BOT_TOKEN || secrets.GITHUB_TOKEN }}
continue-on-error: true
- name: Save API test report data
if: always()
run: |
echo "passed=${{ steps.api-test-report.outputs.passed || '0' }}" > api-report-data.txt
echo "failed=${{ steps.api-test-report.outputs.failed || '0' }}" >> api-report-data.txt
echo "skipped=${{ steps.api-test-report.outputs.skipped || '0' }}" >> api-report-data.txt
echo "time=${{ steps.api-test-report.outputs.time || '0' }}" >> api-report-data.txt
- name: Upload API test report data
if: always()
uses: actions/upload-artifact@v4
with:
name: api-report-data
path: api-report-data.txt
- name: Fail if tests failed
if: steps.test.outcome == 'failure'
run: exit 1
e2e-tests:
name: E2E Tests
runs-on: ubuntu-latest
env:
NODE_OPTIONS: "--max_old_space_size=8192"
steps:
# Secure checkout for external PRs
- uses: actions/checkout@v4
with:
# For PRs from forks via pull_request_target, check out the PR head to prevent token exposure
ref: ${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.sha || github.ref }}
- uses: actions/setup-node@v4
with:
node-version: 22
cache: npm
- run: npm ci
- name: Get Playwright version
id: playwright-version
run: echo "version=$(npx playwright --version | awk '{print $2}')" >> $GITHUB_OUTPUT
- uses: actions/cache@v4
id: playwright-cache
with:
path: ~/.cache/ms-playwright
key: playwright-${{ runner.os }}-${{ steps.playwright-version.outputs.version }}-chromium-firefox-${{ hashFiles('**/package-lock.json') }}
- name: Install Playwright
if: steps.playwright-cache.outputs.cache-hit != 'true'
run: npx playwright install --with-deps chromium firefox
- name: Install Playwright deps only
if: steps.playwright-cache.outputs.cache-hit == 'true'
run: npx playwright install-deps chromium firefox
- name: Run e2e tests
id: test
run: npx playwright test
continue-on-error: true
- name: Save test result
if: always()
run: echo "${{ steps.test.outcome }}" > test-result-e2e.txt
- name: Upload test result
if: always()
uses: actions/upload-artifact@v4
with:
name: test-result-e2e
path: test-result-e2e.txt
- name: E2E Test Report
id: e2e-test-report
uses: dorny/test-reporter@v1
if: always() && (env.IS_FORK != 'true' || env.HAS_BOT_TOKEN == 'true')
with:
name: E2E Tests
path: e2e-results.xml
reporter: java-junit
fail-on-error: false
token: ${{ env.IS_FORK == 'true' && secrets.BOT_TOKEN || secrets.GITHUB_TOKEN }}
continue-on-error: true
- name: Save e2e test report data
if: always()
run: |
echo "passed=${{ steps.e2e-test-report.outputs.passed || '0' }}" > e2e-report-data.txt
echo "failed=${{ steps.e2e-test-report.outputs.failed || '0' }}" >> e2e-report-data.txt
echo "skipped=${{ steps.e2e-test-report.outputs.skipped || '0' }}" >> e2e-report-data.txt
echo "time=${{ steps.e2e-test-report.outputs.time || '0' }}" >> e2e-report-data.txt
- name: Upload e2e test report data
if: always()
uses: actions/upload-artifact@v4
with:
name: e2e-report-data
path: e2e-report-data.txt
- name: Upload e2e artifacts
if: always() && steps.test.outcome != 'success'
uses: actions/upload-artifact@v4
with:
name: playwright-report
path: |
playwright-report
test-results
- name: Fail if tests failed
if: steps.test.outcome == 'failure'
run: exit 1
summary:
name: Summary
runs-on: ubuntu-latest
if: always()
needs: [unit-tests, api-tests, e2e-tests]
steps:
- name: Download test results
uses: actions/download-artifact@v4
with:
pattern: test-result-*
merge-multiple: true
- name: Download test report data
uses: actions/download-artifact@v4
with:
pattern: "*-report-data"
merge-multiple: true
- name: Write summary
env:
UNIT_JOB: ${{ needs.unit-tests.result }}
API_JOB: ${{ needs.api-tests.result }}
E2E_JOB: ${{ needs.e2e-tests.result }}
run: |
# Read actual test results from artifacts (or use job result as fallback)
UNIT=$(cat test-result-unit.txt 2>/dev/null || echo "$UNIT_JOB")
API=$(cat test-result-api.txt 2>/dev/null || echo "$API_JOB")
E2E=$(cat test-result-e2e.txt 2>/dev/null || echo "$E2E_JOB")
# Helper functions
em() { case "$1" in success) echo "✅";; failure) echo "❌";; cancelled|skipped) echo "⏭️";; *) echo "❔";; esac; }
line() { r="$1"; n="$2"; cmd="$3"; s="Passing"; [ "$r" = "failure" ] && s="**Failing** - run \`$cmd\`"; [ "$r" = "skipped" ] && s="Skipped"; echo "- $(em "$r") $n: $s"; }
# Parse test report data
parse_report() {
local file=$1
if [ -f "$file" ]; then
source "$file"
echo "passed=${passed:-0} failed=${failed:-0} skipped=${skipped:-0} time=${time:-0}"
else
echo "passed=0 failed=0 skipped=0 time=0"
fi
}
# Count failures
failures=0
[ "$UNIT" = "failure" ] && failures=$((failures + 1))
[ "$API" = "failure" ] && failures=$((failures + 1))
[ "$E2E" = "failure" ] && failures=$((failures + 1))
# Get detailed test data
UNIT_DATA=$(parse_report "unit-report-data.txt")
API_DATA=$(parse_report "api-report-data.txt")
E2E_DATA=$(parse_report "e2e-report-data.txt")
# Parse unit test data
eval "$UNIT_DATA"
UNIT_PASSED=$passed; UNIT_FAILED=$failed; UNIT_SKIPPED=$skipped; UNIT_TIME=$time
# Parse API test data
eval "$API_DATA"
API_PASSED=$passed; API_FAILED=$failed; API_SKIPPED=$skipped; API_TIME=$time
# Parse e2e test data
eval "$E2E_DATA"
E2E_PASSED=$passed; E2E_FAILED=$failed; E2E_SKIPPED=$skipped; E2E_TIME=$time
# Format time helper
format_time() {
local ms=$1
if [ "$ms" -gt 60000 ]; then
echo "$((ms / 60000))m $((ms % 60000 / 1000))s"
elif [ "$ms" -gt 1000 ]; then
echo "$((ms / 1000))s"
else
echo "${ms}ms"
fi
}
{
echo "## 🧪 Test Results"
echo ""
line "$UNIT" "Unit Tests" "npm run test:coverage"
line "$API" "API Contract Tests" "npm run test:api"
line "$E2E" "E2E Tests" "npm run test:e2e"
if [ "$failures" -eq 0 ]; then
echo -e "\n🎉 **All test suites passed!**"
[ "$UNIT" = "success" ] && echo "📈 Coverage report may be available in [Codecov](https://codecov.io)"
else
echo -e "\n⚠️ **$failures test suite(s) failed**"
fi
# Detailed Unit Test Report
if [ "$UNIT_PASSED" != "0" ] || [ "$UNIT_FAILED" != "0" ] || [ "$UNIT_SKIPPED" != "0" ]; then
echo -e "\n### 📊 Unit Test Details"
echo "| Metric | Count |"
echo "|--------|-------|"
[ "$UNIT_PASSED" != "0" ] && echo "| ✅ Passed | $UNIT_PASSED |"
[ "$UNIT_FAILED" != "0" ] && echo "| ❌ Failed | $UNIT_FAILED |"
[ "$UNIT_SKIPPED" != "0" ] && echo "| ⏭️ Skipped | $UNIT_SKIPPED |"
[ "$UNIT_TIME" != "0" ] && echo "| ⏱️ Duration | $(format_time $UNIT_TIME) |"
fi
# Detailed API Test Report
if [ "$API_PASSED" != "0" ] || [ "$API_FAILED" != "0" ] || [ "$API_SKIPPED" != "0" ]; then
echo -e "\n### 🔗 API Contract Test Details"
echo "| Metric | Count |"
echo "|--------|-------|"
[ "$API_PASSED" != "0" ] && echo "| ✅ Passed | $API_PASSED |"
[ "$API_FAILED" != "0" ] && echo "| ❌ Failed | $API_FAILED |"
[ "$API_SKIPPED" != "0" ] && echo "| ⏭️ Skipped | $API_SKIPPED |"
[ "$API_TIME" != "0" ] && echo "| ⏱️ Duration | $(format_time $API_TIME) |"
fi
# Detailed E2E Test Report
if [ "$E2E_PASSED" != "0" ] || [ "$E2E_FAILED" != "0" ] || [ "$E2E_SKIPPED" != "0" ]; then
echo -e "\n### 🎭 E2E Test Details"
echo "| Metric | Count |"
echo "|--------|-------|"
[ "$E2E_PASSED" != "0" ] && echo "| ✅ Passed | $E2E_PASSED |"
[ "$E2E_FAILED" != "0" ] && echo "| ❌ Failed | $E2E_FAILED |"
[ "$E2E_SKIPPED" != "0" ] && echo "| ⏭️ Skipped | $E2E_SKIPPED |"
[ "$E2E_TIME" != "0" ] && echo "| ⏱️ Duration | $(format_time $E2E_TIME) |"
fi
} >> "$GITHUB_STEP_SUMMARY"