Skip to content

batch 2 final

batch 2 final #7

# Package Test Template
#
# This is a TEMPLATE file stored in the tests/ directory - it will not run as a workflow.
# To use it:
# 1. Copy this file to .github/workflows/test-<your-package>.yml
# 2. Replace all Libunwind placeholders with your package name (e.g., "Redis")
# 3. Replace all libunwind placeholders with your package slug (lowercase, e.g., "redis")
# 4. Update the install commands for your package
# 5. Update the version detection command
# 6. Add/modify/remove test steps as needed
# 7. Update package metadata in the JSON generation step
# 8. Uncomment the appropriate trigger(s) in the 'on:' section
#
# See .github/workflows/test-nginx.yml and test-envoy.yml for real examples. Template
#
# This is a TEMPLATE file - it will not run automatically.
# To use it:
# 1. Copy this file to test-<your-package>.yml
# 2. Replace all Libunwind placeholders with your package name (e.g., "Redis")
# 3. Replace all libunwind placeholders with your package slug (lowercase, e.g., "redis")
# 4. Update the install commands for your package
# 5. Update the version detection command
# 6. Add/modify/remove test steps as needed
# 7. Update package metadata in the JSON generation step
# 8. Uncomment the 'push:' trigger section below (remove the workflow_dispatch if desired)
#
# See test-nginx.yml and test-envoy.yml for real examples.
name: Test Libunwind on Arm64
# This is a TEMPLATE - it has no triggers and will not run.
# When you copy this file, uncomment the appropriate triggers below:
on:
# workflow_dispatch: # Uncomment for manual testing
workflow_call:
push:
branches:
- main
- smoke_tests
paths:
- 'content/opensource_packages/libunwind.md'
- '.github/workflows/test-libunwind.yml'
jobs:
test-libunwind:
runs-on: ubuntu-24.04-arm
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set test metadata
id: metadata
run: |
echo "timestamp=$(date -u +%Y-%m-%dT%H:%M:%SZ)" >> $GITHUB_OUTPUT
echo "package_slug=libunwind" >> $GITHUB_OUTPUT
echo "dashboard_link=/opensource_packages/libunwind" >> $GITHUB_OUTPUT
# ============================================================
# CUSTOMIZE THIS: Install your package
# ============================================================
- name: Install Libunwind
id: install
run: |
echo "Installing Libunwind (libunwind-dev)..."
sudo apt-get update
sudo apt-get install -y libunwind-dev
# Verify installation (checking for library)
if [ -f "/usr/lib/aarch64-linux-gnu/libunwind.so" ] || [ -f "/usr/lib/libunwind.so" ]; then
echo "Libunwind installed successfully"
echo "install_status=success" >> $GITHUB_OUTPUT
else
echo "Libunwind installation failed (library not found)"
echo "install_status=failed" >> $GITHUB_OUTPUT
exit 1
fi
- name: Get Libunwind version
id: version
run: |
VERSION=$(dpkg -l libunwind-dev | grep libunwind-dev | awk '{print $3}' | cut -d'-' -f1 || echo "unknown")
echo "version=$VERSION" >> $GITHUB_OUTPUT
echo "Detected Libunwind version: $VERSION"
- name: Test 1 - Check Libunwind library exists
id: test1
continue-on-error: true
run: |
START_TIME=$(date +%s)
# Search in standard and multiarch paths
if find /usr/lib -name "libunwind.so*" | grep -q "."; then
echo "✓ Libunwind library found"
echo "status=passed" >> $GITHUB_OUTPUT
else
echo "✗ Libunwind library not found"
echo "status=failed" >> $GITHUB_OUTPUT
exit 1
fi
END_TIME=$(date +%s)
echo "duration=$((END_TIME - START_TIME))" >> $GITHUB_OUTPUT
- name: Test 2 - Check Libunwind header files
id: test2
continue-on-error: true
run: |
START_TIME=$(date +%s)
if find /usr/include -name "libunwind.h" | grep -q "."; then
echo "✓ Libunwind header files found"
echo "status=passed" >> $GITHUB_OUTPUT
else
echo "✗ Libunwind header files not found"
echo "status=failed" >> $GITHUB_OUTPUT
exit 1
fi
END_TIME=$(date +%s)
echo "duration=$((END_TIME - START_TIME))" >> $GITHUB_OUTPUT
- name: Test 3 - Verify Architecture Linkage
id: test3
continue-on-error: true
run: |
START_TIME=$(date +%s)
LIB_PATH=$(find /usr/lib -name "libunwind.so*" | head -n 1)
if [ -n "$LIB_PATH" ] && file "$LIB_PATH" | grep -iq "ARM aarch64\|ARM64"; then
echo "✓ Library is correctly linked for ARM64"
echo "status=passed" >> $GITHUB_OUTPUT
else
echo "✗ Library architecture mismatch or not found"
if [ -n "$LIB_PATH" ]; then file "$LIB_PATH"; fi
echo "status=failed" >> $GITHUB_OUTPUT
exit 1
fi
END_TIME=$(date +%s)
echo "duration=$((END_TIME - START_TIME))" >> $GITHUB_OUTPUT
# Add more tests as needed (test4, test5, etc.)
# Examples:
# - Run a simple command
# - Check configuration files
# - Start/stop a service
# - Test basic functionality
# ============================================================
# UPDATE THIS: Calculate summary based on your number of tests
# Add/remove test result checks to match your tests above
# ============================================================
- name: Test 4 - Architecture Verification
id: test4
continue-on-error: true
run: |
START_TIME=$(date +%s)
echo "Checking system architecture..."
ARCH=$(uname -m)
if [ "$ARCH" = "aarch64" ]; then
echo "✓ System architecture is ARM64 ($ARCH)"
echo "status=passed" >> $GITHUB_OUTPUT
else
echo "✗ System architecture is NOT ARM64 ($ARCH)"
echo "status=failed" >> $GITHUB_OUTPUT
exit 1
fi
END_TIME=$(date +%s)
echo "duration=$((END_TIME - START_TIME))" >> $GITHUB_OUTPUT
- name: Test 5 - Functional Validation (C Compilation Test)
id: test5
continue-on-error: true
run: |
START_TIME=$(date +%s)
echo "Creating libunwind test program..."
cat <<EOF > test_unwind.c
#include <libunwind.h>
#include <stdio.h>
int main() {
unw_context_t context;
int ret = unw_getcontext(&context);
if (ret == 0) {
printf("Successfully initialized libunwind context on ARM64\n");
return 0;
}
return 1;
}
EOF
echo "Compiling and running test program..."
if gcc test_unwind.c -lunwind -o test_unwind && ./test_unwind; then
echo "✓ Functional test passed"
echo "status=passed" >> $GITHUB_OUTPUT
else
echo "✗ Functional test failed"
echo "status=failed" >> $GITHUB_OUTPUT
exit 1
fi
END_TIME=$(date +%s)
echo "duration=$((END_TIME - START_TIME))" >> $GITHUB_OUTPUT
- name: Calculate test summary
if: always()
id: summary
run: |
set +e # Disable exit on error for this step
PASSED=0
FAILED=0
TOTAL_DURATION=0
# Helper function to add duration safely
add_duration() {
local val=$1
if [[ "$val" =~ ^[0-9]+$ ]]; then
TOTAL_DURATION=$((TOTAL_DURATION + val))
fi
}
# Iterate through possible steps to check status
# We check explicit passed status, everything else is failure if not skipped (but simplification: if not passed, and supposed to run, it failed)
# Check Test 1
if [ "${{ steps.test1.outputs.status }}" == "passed" ]; then
PASSED=$((PASSED + 1))
elif [ "${{ steps.test1.conclusion }}" == "failure" ] || [ "${{ steps.test1.outcome }}" == "failure" ]; then
FAILED=$((FAILED + 1))
fi
add_duration "${{ steps.test1.outputs.duration }}"
# Check Test 2
if [ "${{ steps.test2.outputs.status }}" == "passed" ]; then
PASSED=$((PASSED + 1))
elif [ "${{ steps.test2.conclusion }}" == "failure" ] || [ "${{ steps.test2.outcome }}" == "failure" ]; then
FAILED=$((FAILED + 1))
fi
add_duration "${{ steps.test2.outputs.duration }}"
# Check Test 3
if [ "${{ steps.test3.outputs.status }}" == "passed" ]; then
PASSED=$((PASSED + 1))
elif [ "${{ steps.test3.conclusion }}" == "failure" ] || [ "${{ steps.test3.outcome }}" == "failure" ]; then
FAILED=$((FAILED + 1))
fi
add_duration "${{ steps.test3.outputs.duration }}"
# Check Test 4 - Architecture
if [ "${{ steps.test4.outputs.status }}" == "passed" ]; then
PASSED=$((PASSED + 1))
elif [ "${{ steps.test4.conclusion }}" == "failure" ] || [ "${{ steps.test4.outcome }}" == "failure" ]; then
FAILED=$((FAILED + 1))
fi
add_duration "${{ steps.test4.outputs.duration }}"
# Check Test 5 - Functional
if [ "${{ steps.test5.outputs.status }}" == "passed" ]; then
PASSED=$((PASSED + 1))
elif [ "${{ steps.test5.conclusion }}" == "failure" ] || [ "${{ steps.test5.outcome }}" == "failure" ]; then
FAILED=$((FAILED + 1))
fi
add_duration "${{ steps.test5.outputs.duration }}"
echo "passed=$PASSED" >> $GITHUB_OUTPUT
echo "failed=$FAILED" >> $GITHUB_OUTPUT
echo "duration=$TOTAL_DURATION" >> $GITHUB_OUTPUT
# Determine overall status
if [ $FAILED -eq 0 ] && [ $PASSED -gt 0 ]; then
echo "overall_status=success" >> $GITHUB_OUTPUT
echo "badge_status=passing" >> $GITHUB_OUTPUT
else
echo "overall_status=failure" >> $GITHUB_OUTPUT
echo "badge_status=failing" >> $GITHUB_OUTPUT
# Only exit 1 if we want to fail the job when tests fail
exit 1
fi
- name: Generate test results JSON
if: always()
run: |
# Fetch the direct job URL for deep-linking
JOB_ID="${{ github.job }}"
# Using GH_TOKEN to find the exact job URL.
# Reusable workflows are often named "JobID / JobID" or just "JobID"
JOB_URL=$(GH_TOKEN=${{ github.token }} gh api repos/${{ github.repository }}/actions/runs/${{ github.run_id }}/jobs --jq ".jobs[] | select(.name == \"$JOB_ID / $JOB_ID\" or .name == \"$JOB_ID\") | .html_url" | head -n 1)
# Fallback if URL calculation fails
if [ -z "$JOB_URL" ]; then
JOB_URL="${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}?check_suite_focus=true&query=job:$JOB_ID"
fi
mkdir -p test-results
cat > test-results/libunwind.json << EOF
{
"schema_version": "1.0",
"package": {
"name": "Libunwind",
"version": "${{ steps.version.outputs.version }}",
"language": "Compilers/Tools",
"category": "Compilers/Tools"
},
"run": {
"id": "${{ github.run_id }}",
"url": "$JOB_URL",
"timestamp": "${{ steps.metadata.outputs.timestamp }}",
"status": "${{ steps.summary.outputs.overall_status }}",
"runner": {
"os": "ubuntu-24.04",
"arch": "arm64"
}
},
"tests": {
"passed": ${{ steps.summary.outputs.passed }},
"failed": ${{ steps.summary.outputs.failed }},
"skipped": 0,
"duration_seconds": ${{ steps.summary.outputs.duration || 0 }},
"details": [
{
"name": "Check libunwind binary exists",
"status": "${{ steps.test1.outputs.status || 'skipped' }}",
"duration_seconds": ${{ steps.test1.outputs.duration || 0 }}
},
{
"name": "Check libunwind header files",
"status": "${{ steps.test2.outputs.status || 'skipped' }}",
"duration_seconds": ${{ steps.test2.outputs.duration || 0 }}
},
{
"name": "Verify Architecture Linkage",
"status": "${{ steps.test3.outputs.status || 'skipped' }}",
"duration_seconds": ${{ steps.test3.outputs.duration || 0 }}
},
{
"name": "Architecture Verification",
"status": "${{ steps.test4.outputs.status || 'skipped' }}",
"duration_seconds": ${{ steps.test4.outputs.duration || 0 }}
},
{
"name": "Functional Validation",
"status": "${{ steps.test5.outputs.status || 'skipped' }}",
"duration_seconds": ${{ steps.test5.outputs.duration || 0 }}
}
]
},
"metadata": {
"dashboard_link": "${{ steps.metadata.outputs.dashboard_link }}",
"badge_status": "${{ steps.summary.outputs.badge_status }}"
}
}
EOF
echo "Generated test results:"
cat test-results/libunwind.json
# ============================================================
# STANDARD STEPS - Usually don't need to modify below here
# ============================================================
- name: Upload test results
if: always()
uses: actions/upload-artifact@v4
with:
name: libunwind-test-results
path: test-results/libunwind.json
retention-days: 90
- name: Create test summary
if: always()
run: |
echo "## Libunwind Test Results" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "- **Version:** ${{ steps.version.outputs.version }}" >> $GITHUB_STEP_SUMMARY
echo "- **Status:** ${{ steps.summary.outputs.overall_status }}" >> $GITHUB_STEP_SUMMARY
echo "- **Tests Passed:** ${{ steps.summary.outputs.passed }}" >> $GITHUB_STEP_SUMMARY
echo "- **Tests Failed:** ${{ steps.summary.outputs.failed }}" >> $GITHUB_STEP_SUMMARY
echo "- **Duration:** ${{ steps.summary.outputs.duration || 0 }}s" >> $GITHUB_STEP_SUMMARY
echo "- **Runner:** ubuntu-24.04 (arm64)" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Test Details" >> $GITHUB_STEP_SUMMARY
echo "1. Check libunwind binary exists: ${{ steps.test1.outputs.status || 'skipped' }}" >> $GITHUB_STEP_SUMMARY
echo "2. Check libunwind version command: ${{ steps.test2.outputs.status || 'skipped' }}" >> $GITHUB_STEP_SUMMARY
echo "3. Check libunwind help output: ${{ steps.test3.outputs.status || 'skipped' }}" >> $GITHUB_STEP_SUMMARY
echo "4. Test 4 - Architecture Verification: ${{ steps.test4.outputs.status || 'skipped' }}" >> $GITHUB_STEP_SUMMARY
echo "5. Test 5 - Functional Validation: ${{ steps.test5.outputs.status || 'skipped' }}" >> $GITHUB_STEP_SUMMARY