Phase 1: Bulk generate workflows for 697 packages #1
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # Package Test Template | |
| # | |
| # This is a TEMPLATE file stored in the tests/ directory - it will not run as a workflow. | |
| # To use it: | |
| # 1. Copy this file to .github/workflows/test-<your-package>.yml | |
| # 2. Replace all H2 placeholders with your package name (e.g., "Redis") | |
| # 3. Replace all h2 placeholders with your package slug (lowercase, e.g., "redis") | |
| # 4. Update the install commands for your package | |
| # 5. Update the version detection command | |
| # 6. Add/modify/remove test steps as needed | |
| # 7. Update package metadata in the JSON generation step | |
| # 8. Uncomment the appropriate trigger(s) in the 'on:' section | |
| # | |
| # See .github/workflows/test-nginx.yml and test-envoy.yml for real examples. Template | |
| # | |
| # This is a TEMPLATE file - it will not run automatically. | |
| # To use it: | |
| # 1. Copy this file to test-<your-package>.yml | |
| # 2. Replace all H2 placeholders with your package name (e.g., "Redis") | |
| # 3. Replace all h2 placeholders with your package slug (lowercase, e.g., "redis") | |
| # 4. Update the install commands for your package | |
| # 5. Update the version detection command | |
| # 6. Add/modify/remove test steps as needed | |
| # 7. Update package metadata in the JSON generation step | |
| # 8. Uncomment the 'push:' trigger section below (remove the workflow_dispatch if desired) | |
| # | |
| # See test-nginx.yml and test-envoy.yml for real examples. | |
| name: Test H2 on Arm64 | |
| # This is a TEMPLATE - it has no triggers and will not run. | |
| # When you copy this file, uncomment the appropriate triggers below: | |
| on: | |
| # workflow_dispatch: # Uncomment for manual testing | |
| # workflow_call: # Uncomment if called by other workflows | |
| push: | |
| branches: | |
| - main | |
| - smoke_tests | |
| paths: | |
| - 'content/opensource_packages/h2.md' | |
| - '.github/workflows/test-h2.yml' | |
| jobs: | |
| test-h2: | |
| runs-on: ubuntu-24.04-arm | |
| steps: | |
| - name: Checkout repository | |
| uses: actions/checkout@v4 | |
| - name: Set test metadata | |
| id: metadata | |
| run: | | |
| echo "timestamp=$(date -u +%Y-%m-%dT%H:%M:%SZ)" >> $GITHUB_OUTPUT | |
| echo "package_slug=h2" >> $GITHUB_OUTPUT | |
| echo "dashboard_link=/opensource_packages/h2" >> $GITHUB_OUTPUT | |
| # ============================================================ | |
| # CUSTOMIZE THIS: Install your package | |
| # ============================================================ | |
| - name: Install H2 | |
| id: install | |
| run: | | |
| echo "Installing H2..." | |
| # Example for apt packages: | |
| sudo apt-get update | |
| sudo apt-get install -y h2 | |
| # Example for downloading binaries: | |
| # sudo curl -L -o /usr/local/bin/h2 https://github.com/org/repo/releases/download/v1.0.0/h2-linux-arm64 | |
| # sudo chmod +x /usr/local/bin/h2 | |
| # Verify installation | |
| if command -v h2 &> /dev/null; then | |
| echo "H2 installed successfully" | |
| echo "install_status=success" >> $GITHUB_OUTPUT | |
| else | |
| echo "H2 installation failed" | |
| echo "install_status=failed" >> $GITHUB_OUTPUT | |
| exit 1 | |
| fi | |
| # ============================================================ | |
| # CUSTOMIZE THIS: Get the package version | |
| # ============================================================ | |
| - name: Get H2 version | |
| id: version | |
| run: | | |
| # Adjust this command based on how your package reports version | |
| VERSION=$(h2 --version 2>&1 | grep -oP '[0-9.]+' | head -1 || echo "unknown") | |
| echo "version=$VERSION" >> $GITHUB_OUTPUT | |
| echo "Detected H2 version: $VERSION" | |
| # ============================================================ | |
| # ADD YOUR TESTS BELOW | |
| # Each test should: | |
| # 1. Have a unique id (test1, test2, etc.) | |
| # 2. Track start/end time for duration | |
| # 3. Set status=passed or status=failed | |
| # 4. Exit 1 on failure | |
| # ============================================================ | |
| - name: Test 1 - Check h2 binary exists | |
| id: test1 | |
| run: | | |
| START_TIME=$(date +%s) | |
| if command -v h2 &> /dev/null; then | |
| echo "✓ h2 binary found" | |
| echo "status=passed" >> $GITHUB_OUTPUT | |
| else | |
| echo "✗ h2 binary not found" | |
| echo "status=failed" >> $GITHUB_OUTPUT | |
| exit 1 | |
| fi | |
| END_TIME=$(date +%s) | |
| echo "duration=$((END_TIME - START_TIME))" >> $GITHUB_OUTPUT | |
| - name: Test 2 - Check h2 version command | |
| id: test2 | |
| run: | | |
| START_TIME=$(date +%s) | |
| if h2 --version 2>&1 | grep -q "version\|[0-9]"; then | |
| echo "✓ h2 version command works" | |
| h2 --version | |
| echo "status=passed" >> $GITHUB_OUTPUT | |
| else | |
| echo "✗ h2 version command failed" | |
| echo "status=failed" >> $GITHUB_OUTPUT | |
| exit 1 | |
| fi | |
| END_TIME=$(date +%s) | |
| echo "duration=$((END_TIME - START_TIME))" >> $GITHUB_OUTPUT | |
| - name: Test 3 - Check h2 help output | |
| id: test3 | |
| run: | | |
| START_TIME=$(date +%s) | |
| if h2 --help 2>&1 | grep -qi "usage\|help\|options"; then | |
| echo "✓ h2 help command works" | |
| echo "status=passed" >> $GITHUB_OUTPUT | |
| else | |
| echo "✗ h2 help command failed" | |
| echo "status=failed" >> $GITHUB_OUTPUT | |
| exit 1 | |
| fi | |
| END_TIME=$(date +%s) | |
| echo "duration=$((END_TIME - START_TIME))" >> $GITHUB_OUTPUT | |
| # Add more tests as needed (test4, test5, etc.) | |
| # Examples: | |
| # - Run a simple command | |
| # - Check configuration files | |
| # - Start/stop a service | |
| # - Test basic functionality | |
| # ============================================================ | |
| # UPDATE THIS: Calculate summary based on your number of tests | |
| # Add/remove test result checks to match your tests above | |
| # ============================================================ | |
| - name: Calculate test summary | |
| if: always() | |
| id: summary | |
| run: | | |
| PASSED=0 | |
| FAILED=0 | |
| TOTAL_DURATION=0 | |
| # Test 1 | |
| if [ "${{ steps.test1.outputs.status }}" == "passed" ]; then | |
| PASSED=$((PASSED + 1)) | |
| else | |
| FAILED=$((FAILED + 1)) | |
| fi | |
| TOTAL_DURATION=$((TOTAL_DURATION + ${{ steps.test1.outputs.duration || 0 }})) | |
| # Test 2 | |
| if [ "${{ steps.test2.outputs.status }}" == "passed" ]; then | |
| PASSED=$((PASSED + 1)) | |
| else | |
| FAILED=$((FAILED + 1)) | |
| fi | |
| TOTAL_DURATION=$((TOTAL_DURATION + ${{ steps.test2.outputs.duration || 0 }})) | |
| # Test 3 | |
| if [ "${{ steps.test3.outputs.status }}" == "passed" ]; then | |
| PASSED=$((PASSED + 1)) | |
| else | |
| FAILED=$((FAILED + 1)) | |
| fi | |
| TOTAL_DURATION=$((TOTAL_DURATION + ${{ steps.test3.outputs.duration || 0 }})) | |
| # Add more tests here if you added test4, test5, etc. above | |
| echo "passed=$PASSED" >> $GITHUB_OUTPUT | |
| echo "failed=$FAILED" >> $GITHUB_OUTPUT | |
| echo "duration=$TOTAL_DURATION" >> $GITHUB_OUTPUT | |
| if [ $FAILED -eq 0 ]; then | |
| echo "overall_status=success" >> $GITHUB_OUTPUT | |
| echo "badge_status=passing" >> $GITHUB_OUTPUT | |
| else | |
| echo "overall_status=failure" >> $GITHUB_OUTPUT | |
| echo "badge_status=failing" >> $GITHUB_OUTPUT | |
| fi | |
| # ============================================================ | |
| # UPDATE THIS: Generate JSON with your package metadata and test details | |
| # ============================================================ | |
| - name: Generate test results JSON | |
| if: always() | |
| run: | | |
| mkdir -p test-results | |
| cat > test-results/h2.json << EOF | |
| { | |
| "schema_version": "1.0", | |
| "package": { | |
| "name": "H2", | |
| "version": "${{ steps.version.outputs.version }}", | |
| "language": "Database", | |
| "category": "Database" | |
| }, | |
| "run": { | |
| "id": "${{ github.run_id }}", | |
| "url": "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}", | |
| "timestamp": "${{ steps.metadata.outputs.timestamp }}", | |
| "status": "${{ steps.summary.outputs.overall_status }}", | |
| "runner": { | |
| "os": "ubuntu-24.04", | |
| "arch": "arm64" | |
| } | |
| }, | |
| "tests": { | |
| "passed": ${{ steps.summary.outputs.passed }}, | |
| "failed": ${{ steps.summary.outputs.failed }}, | |
| "skipped": 0, | |
| "duration_seconds": ${{ steps.summary.outputs.duration }}, | |
| "details": [ | |
| { | |
| "name": "Check h2 binary exists", | |
| "status": "${{ steps.test1.outputs.status }}", | |
| "duration_seconds": ${{ steps.test1.outputs.duration || 0 }} | |
| }, | |
| { | |
| "name": "Check h2 version command", | |
| "status": "${{ steps.test2.outputs.status }}", | |
| "duration_seconds": ${{ steps.test2.outputs.duration || 0 }} | |
| }, | |
| { | |
| "name": "Check h2 help output", | |
| "status": "${{ steps.test3.outputs.status }}", | |
| "duration_seconds": ${{ steps.test3.outputs.duration || 0 }} | |
| } | |
| ] | |
| }, | |
| "metadata": { | |
| "dashboard_link": "${{ steps.metadata.outputs.dashboard_link }}", | |
| "badge_status": "${{ steps.summary.outputs.badge_status }}" | |
| } | |
| } | |
| EOF | |
| echo "Generated test results:" | |
| cat test-results/h2.json | |
| # ============================================================ | |
| # STANDARD STEPS - Usually don't need to modify below here | |
| # ============================================================ | |
| - name: Upload test results | |
| if: always() | |
| uses: actions/upload-artifact@v4 | |
| with: | |
| name: h2-test-results | |
| path: test-results/h2.json | |
| retention-days: 90 | |
| - name: Commit test results to repository | |
| if: always() && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/smoke_tests') | |
| run: | | |
| git config --global user.name 'github-actions[bot]' | |
| git config --global user.email 'github-actions[bot]@users.noreply.github.com' | |
| mkdir -p data/test-results | |
| cp test-results/h2.json data/test-results/h2.json | |
| git add data/test-results/h2.json | |
| if ! git diff --staged --quiet; then | |
| git commit -m "Update h2 test results [skip ci]" | |
| # Pull with rebase and push, retry up to 5 times | |
| for i in {1..5}; do | |
| if git pull --rebase origin ${{ github.ref_name }}; then | |
| # Rebase succeeded, try to push | |
| if git push; then | |
| echo "Successfully pushed test results" | |
| break | |
| fi | |
| else | |
| # Rebase failed, likely due to conflict | |
| echo "Rebase failed, resolving conflicts..." | |
| # Accept our version of the file (the new test results) | |
| git checkout --ours data/test-results/h2.json | |
| git add data/test-results/h2.json | |
| # Continue the rebase | |
| git rebase --continue || true | |
| fi | |
| # Wait before retry | |
| echo "Retry attempt $i of 5..." | |
| sleep $((i * 2)) | |
| done | |
| else | |
| echo "No changes to commit" | |
| fi | |
| - name: Create test summary | |
| if: always() | |
| run: | | |
| echo "## H2 Test Results" >> $GITHUB_STEP_SUMMARY | |
| echo "" >> $GITHUB_STEP_SUMMARY | |
| echo "- **Version:** ${{ steps.version.outputs.version }}" >> $GITHUB_STEP_SUMMARY | |
| echo "- **Status:** ${{ steps.summary.outputs.overall_status }}" >> $GITHUB_STEP_SUMMARY | |
| echo "- **Tests Passed:** ${{ steps.summary.outputs.passed }}" >> $GITHUB_STEP_SUMMARY | |
| echo "- **Tests Failed:** ${{ steps.summary.outputs.failed }}" >> $GITHUB_STEP_SUMMARY | |
| echo "- **Duration:** ${{ steps.summary.outputs.duration }}s" >> $GITHUB_STEP_SUMMARY | |
| echo "- **Runner:** ubuntu-24.04 (arm64)" >> $GITHUB_STEP_SUMMARY | |
| echo "" >> $GITHUB_STEP_SUMMARY | |
| echo "### Test Details" >> $GITHUB_STEP_SUMMARY | |
| echo "1. Check h2 binary exists: ${{ steps.test1.outputs.status }}" >> $GITHUB_STEP_SUMMARY | |
| echo "2. Check h2 version command: ${{ steps.test2.outputs.status }}" >> $GITHUB_STEP_SUMMARY | |
| echo "3. Check h2 help output: ${{ steps.test3.outputs.status }}" >> $GITHUB_STEP_SUMMARY |