Skip to content

feat: register wasmtime toolchain in MODULE.bazel for centralized ver… #180

feat: register wasmtime toolchain in MODULE.bazel for centralized ver…

feat: register wasmtime toolchain in MODULE.bazel for centralized ver… #180

Workflow file for this run

name: Performance Monitoring
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
schedule:
# Run performance tests nightly
- cron: '0 2 * * *'
workflow_dispatch:
inputs:
benchmark_type:
description: 'Type of benchmark to run'
required: false
default: 'all'
type: choice
options:
- all
- tinygo-only
- rust-only
- comparison
env:
BAZEL_VERSION: "8.4.0"
jobs:
# Performance benchmarking
performance-benchmark:
name: Performance Benchmark
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, macos-latest]
steps:
- name: Checkout code
uses: actions/checkout@v5
- name: Setup Bazel
uses: bazel-contrib/[email protected]
with:
bazelisk-cache: true
disk-cache: ${{ github.workflow }}
repository-cache: true
- name: Setup Go
uses: actions/setup-go@v6
with:
go-version: '1.23'
- name: Setup TinyGo
uses: acifani/setup-tinygo@v2
with:
tinygo-version: "0.38.0"
- name: Setup Rust
uses: dtolnay/rust-toolchain@stable
with:
toolchain: "1.82.0"
targets: wasm32-wasi
- name: Install Performance Tools
run: |
# Install hyperfine for benchmarking
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then
curl -Lo hyperfine.deb https://github.com/sharkdp/hyperfine/releases/download/v1.18.0/hyperfine_1.18.0_amd64.deb
sudo dpkg -i hyperfine.deb
elif [[ "${{ matrix.os }}" == "macos-latest" ]]; then
brew install hyperfine
fi
# Use wasmtime from Bazel toolchain (registered in MODULE.bazel)
# This ensures version consistency and reuses Bazel's cached download
echo "Extracting wasmtime from Bazel toolchain..."
# Get Bazel output base and wasmtime relative path
OUTPUT_BASE=$(bazel info output_base 2>/dev/null)
WASMTIME_REL=$(bazel cquery --output=files @wasmtime_toolchain//:wasmtime 2>/dev/null | head -1)
WASMTIME_PATH="$OUTPUT_BASE/$WASMTIME_REL"
if [ -f "$WASMTIME_PATH" ]; then
echo "Found wasmtime at: $WASMTIME_PATH"
sudo cp "$WASMTIME_PATH" /usr/local/bin/wasmtime
sudo chmod +x /usr/local/bin/wasmtime
wasmtime --version
else
echo "Error: Could not find wasmtime binary from Bazel at: $WASMTIME_PATH"
echo "Output base: $OUTPUT_BASE"
echo "Relative path: $WASMTIME_REL"
exit 1
fi
- name: Build All Components
run: |
# Build TinyGo component (regular version)
bazel build //tinygo:file_ops_component
# Build platform-specific AOT precompiled component
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then
bazel build //tinygo:file_ops_aot_linux_x64
elif [[ "${{ matrix.os }}" == "macos-latest" ]]; then
bazel build //tinygo:file_ops_aot_darwin_arm64
fi
# Note: Rust implementation is not yet available
# Future: Add Rust component builds when implemented
- name: Create Performance Test Data
run: |
mkdir -p perf_test_data
# Create test files of various sizes
dd if=/dev/zero of=perf_test_data/small.txt bs=1K count=1 # 1KB
dd if=/dev/zero of=perf_test_data/medium.txt bs=1M count=1 # 1MB
dd if=/dev/zero of=perf_test_data/large.txt bs=1M count=10 # 10MB
# Create directory with many small files
mkdir -p perf_test_data/many_files
for i in {1..100}; do
echo "File $i content" > perf_test_data/many_files/file_$i.txt
done
# Create JSON batch operation test file
cat > perf_test_data/batch_ops.json <<EOF
{
"operations": [
{"operation": "copy_file", "source": "perf_test_data/small.txt", "destination": "perf_test_data/small_copy.txt"},
{"operation": "read_file", "source": "perf_test_data/medium.txt"},
{"operation": "create_directory", "destination": "perf_test_data/test_dir"},
{"operation": "list_directory", "source": "perf_test_data/many_files"}
]
}
EOF
- name: Benchmark TinyGo Implementation
if: github.event.inputs.benchmark_type == 'all' || github.event.inputs.benchmark_type == 'tinygo-only' || github.event.inputs.benchmark_type == ''
run: |
echo "## TinyGo Performance Benchmarks" >> perf_results.md
echo "| Operation | Time (ms) | Memory (MB) | Notes |" >> perf_results.md
echo "|-----------|-----------|-------------|-------|" >> perf_results.md
# WebAssembly runtime benchmark using the component
if command -v wasmtime &> /dev/null; then
# Test the command first to see if it works
echo "Testing WASM component execution..."
if wasmtime run --dir=. bazel-bin/tinygo/file_ops_component.wasm copy_file --src perf_test_data/small.txt --dest perf_test_data/wasm_copy.txt; then
echo "✅ WASM component test passed, running benchmarks..."
# Run benchmark with proper error handling
hyperfine --export-json tinygo_wasm_benchmark.json \
--warmup 3 \
--show-output \
'wasmtime run --dir=. bazel-bin/tinygo/file_ops_component.wasm copy_file --src perf_test_data/small.txt --dest perf_test_data/wasm_copy.txt' \
|| echo "⚠️ Benchmark failed but continuing" >> perf_results.md
echo "✅ TinyGo component benchmarks completed" >> perf_results.md
else
echo "⚠️ WASM component test failed, skipping benchmarks" >> perf_results.md
echo "Component may not be compatible with the test arguments" >> perf_results.md
fi
else
echo "⚠️ wasmtime not available, skipping runtime benchmarks" >> perf_results.md
fi
- name: Benchmark TinyGo AOT Implementation
if: github.event.inputs.benchmark_type == 'all' || github.event.inputs.benchmark_type == 'tinygo-only' || github.event.inputs.benchmark_type == ''
run: |
echo "" >> perf_results.md
echo "## TinyGo AOT Performance Benchmarks" >> perf_results.md
echo "| Operation | Time (ms) | Memory (MB) | Notes |" >> perf_results.md
echo "|-----------|-----------|-------------|-------|" >> perf_results.md
# Determine AOT file path based on platform
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then
AOT_FILE="bazel-bin/tinygo/file_ops_aot_linux_x64.cwasm"
elif [[ "${{ matrix.os }}" == "macos-latest" ]]; then
AOT_FILE="bazel-bin/tinygo/file_ops_aot_darwin_arm64.cwasm"
else
echo "⚠️ Unsupported platform for AOT benchmarks" >> perf_results.md
exit 0
fi
# WebAssembly AOT runtime benchmark
if command -v wasmtime &> /dev/null; then
# Test the AOT precompiled component first
echo "Testing AOT precompiled component execution..."
if wasmtime run --allow-precompiled --dir=. "$AOT_FILE" copy_file --src perf_test_data/small.txt --dest perf_test_data/wasm_aot_copy.txt; then
echo "✅ AOT component test passed, running benchmarks..."
# Run benchmark comparing regular vs AOT
echo "### AOT vs Regular Comparison" >> perf_results.md
# Benchmark AOT precompiled component
hyperfine --export-json tinygo_aot_benchmark.json \
--warmup 3 \
--show-output \
"wasmtime run --allow-precompiled --dir=. $AOT_FILE copy_file --src perf_test_data/small.txt --dest perf_test_data/wasm_aot_copy.txt" \
|| echo "⚠️ AOT benchmark failed but continuing" >> perf_results.md
# Extract and compare results
if [ -f tinygo_wasm_benchmark.json ] && [ -f tinygo_aot_benchmark.json ]; then
REGULAR_TIME=$(jq -r '.results[0].mean' tinygo_wasm_benchmark.json 2>/dev/null || echo "N/A")
AOT_TIME=$(jq -r '.results[0].mean' tinygo_aot_benchmark.json 2>/dev/null || echo "N/A")
echo "- **Regular WASM**: ${REGULAR_TIME}s" >> perf_results.md
echo "- **AOT WASM**: ${AOT_TIME}s" >> perf_results.md
# Calculate speedup if both values are available
if [[ "$REGULAR_TIME" != "N/A" ]] && [[ "$AOT_TIME" != "N/A" ]]; then
SPEEDUP=$(echo "scale=2; $REGULAR_TIME / $AOT_TIME" | bc)
echo "- **Speedup**: ${SPEEDUP}x faster with AOT" >> perf_results.md
fi
fi
echo "" >> perf_results.md
echo "✅ TinyGo AOT component benchmarks completed" >> perf_results.md
else
echo "⚠️ AOT component test failed, skipping benchmarks" >> perf_results.md
fi
else
echo "⚠️ wasmtime not available, skipping AOT benchmarks" >> perf_results.md
fi
- name: Benchmark Rust Implementation
if: (github.event.inputs.benchmark_type == 'all' || github.event.inputs.benchmark_type == 'rust-only' || github.event.inputs.benchmark_type == '') && hashFiles('rust/**') != ''
run: |
echo "## Rust Performance Benchmarks" >> perf_results.md
# File copy benchmarks
hyperfine --export-json rust_copy_benchmark.json \
--setup 'rm -f perf_test_data/rust_copy_target.txt' \
'./bazel-bin/rust/file_ops_rust copy_file --src perf_test_data/small.txt --dest perf_test_data/rust_copy_target.txt'
# JSON batch operation benchmark
hyperfine --export-json rust_batch_benchmark.json \
'./bazel-bin/rust/file_ops_rust process_json_batch --config perf_test_data/batch_ops.json'
# WebAssembly runtime benchmark
if command -v wasmtime &> /dev/null; then
hyperfine --export-json rust_wasm_benchmark.json \
'wasmtime run --dir=. bazel-bin/rust/file_ops_component_wasm.wasm copy_file --src perf_test_data/small.txt --dest perf_test_data/rust_wasm_copy.txt'
fi
- name: Performance Comparison Analysis
if: github.event.inputs.benchmark_type == 'all' || github.event.inputs.benchmark_type == 'comparison' || github.event.inputs.benchmark_type == ''
run: |
# Install jq for JSON processing
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then
sudo apt-get update && sudo apt-get install -y jq
elif [[ "${{ matrix.os }}" == "macos-latest" ]]; then
brew install jq
fi
echo "## 📊 Performance Comparison (${{ matrix.os }})" >> perf_results.md
echo "" >> perf_results.md
# Compare TinyGo vs Rust performance
if [ -f tinygo_copy_benchmark.json ] && [ -f rust_copy_benchmark.json ]; then
TINYGO_TIME=$(jq -r '.results[0].mean' tinygo_copy_benchmark.json)
RUST_TIME=$(jq -r '.results[0].mean' rust_copy_benchmark.json)
echo "### File Copy Performance" >> perf_results.md
echo "- TinyGo: ${TINYGO_TIME}s" >> perf_results.md
echo "- Rust: ${RUST_TIME}s" >> perf_results.md
echo "" >> perf_results.md
fi
# Binary size comparison
if [ -f bazel-bin/tinygo/file_ops_component.wasm ]; then
TINYGO_SIZE=$(stat -c%s bazel-bin/tinygo/file_ops_component.wasm 2>/dev/null || stat -f%z bazel-bin/tinygo/file_ops_component.wasm)
echo "### Binary Size Comparison" >> perf_results.md
echo "- TinyGo WASM: $(echo $TINYGO_SIZE | awk '{print int($1/1024)}') KB" >> perf_results.md
fi
# Rust component not yet implemented
# if [ -f bazel-bin/rust/file_ops_component.wasm ]; then
# RUST_SIZE=$(stat -c%s bazel-bin/rust/file_ops_component.wasm 2>/dev/null || stat -f%z bazel-bin/rust/file_ops_component.wasm)
# echo "- Rust WASM: $(echo $RUST_SIZE | awk '{print int($1/1024)}') KB" >> perf_results.md
# fi
- name: Upload Performance Results
uses: actions/upload-artifact@v4
with:
name: performance-results-${{ matrix.os }}
path: |
perf_results.md
*_benchmark.json
perf_test_data/
retention-days: 30
- name: Comment Performance Results on PR
if: github.event_name == 'pull_request'
uses: actions/github-script@v8
with:
script: |
const fs = require('fs');
if (fs.existsSync('perf_results.md')) {
const results = fs.readFileSync('perf_results.md', 'utf8');
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: `## 🚀 Performance Benchmark Results\n\n${results}\n\n_Results from ${{ matrix.os }}_`
});
}
# Memory and resource usage profiling
resource-profiling:
name: Resource Usage Profiling
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v5
- name: Setup Bazel
uses: bazel-contrib/[email protected]
with:
bazelisk-cache: true
disk-cache: ${{ github.workflow }}
repository-cache: true
- name: Setup Go
uses: actions/setup-go@v6
with:
go-version: '1.23'
- name: Setup TinyGo
uses: acifani/setup-tinygo@v2
with:
tinygo-version: "0.38.0"
- name: Install Profiling Tools
run: |
# Install memory profiling tools
sudo apt-get update
sudo apt-get install -y valgrind time
# Install wasmtime with profiling support
curl https://wasmtime.dev/install.sh -sSf | bash
echo "$HOME/.wasmtime/bin" >> $GITHUB_PATH
- name: Build Components for Profiling
run: |
bazel build //tinygo:file_ops_component
- name: Profile WebAssembly Component Memory Usage
run: |
echo "## Memory Usage Profiling" >> profile_results.md
echo "" >> profile_results.md
# Create test data
dd if=/dev/zero of=profile_test.dat bs=1M count=5
# Profile TinyGo WebAssembly component
echo "### TinyGo Component (WASM)" >> profile_results.md
/usr/bin/time -v wasmtime run --dir=. bazel-bin/tinygo/file_ops_component.wasm copy_file --src profile_test.dat --dest profile_copy.dat 2>&1 | \
grep -E "(Maximum resident set size|User time|System time)" >> profile_results.md || echo "Profiling data not available" >> profile_results.md
- name: Profile WebAssembly Runtime Memory
run: |
echo "" >> profile_results.md
echo "## WebAssembly Runtime Profiling (Detailed)" >> profile_results.md
echo "" >> profile_results.md
# Profile WASM component memory usage with detailed stats
/usr/bin/time -v wasmtime run --dir=. bazel-bin/tinygo/file_ops_component.wasm copy_file --src profile_test.dat --dest wasm_profile_copy.dat 2>&1 | \
grep -E "(Maximum resident set size|User time|System time)" >> profile_results.md || echo "Detailed profiling data not available" >> profile_results.md
- name: Upload Profiling Results
uses: actions/upload-artifact@v4
with:
name: resource-profiling-results
path: |
profile_results.md
retention-days: 30
# Performance regression detection
regression-detection:
name: Performance Regression Detection
runs-on: ubuntu-latest
if: github.event_name == 'pull_request'
steps:
- name: Checkout PR
uses: actions/checkout@v5
- name: Checkout Base Branch
run: |
git fetch origin ${{ github.base_ref }}
git checkout origin/${{ github.base_ref }}
mkdir -p baseline_results
- name: Setup Tools
uses: bazel-contrib/[email protected]
with:
bazelisk-cache: true
disk-cache: ${{ github.workflow }}-baseline
repository-cache: true
- name: Setup Go
uses: actions/setup-go@v6
with:
go-version: '1.23'
- name: Setup TinyGo
uses: acifani/setup-tinygo@v2
with:
tinygo-version: "0.38.0"
- name: Install Hyperfine and Wasmtime
run: |
# Install hyperfine for benchmarking
curl -Lo hyperfine.deb https://github.com/sharkdp/hyperfine/releases/download/v1.18.0/hyperfine_1.18.0_amd64.deb
sudo dpkg -i hyperfine.deb
# Install wasmtime for WASM component execution
curl https://wasmtime.dev/install.sh -sSf | bash
echo "$HOME/.wasmtime/bin" >> $GITHUB_PATH
- name: Benchmark Baseline Performance
run: |
bazel build //tinygo:file_ops_component
# Create test data
echo "Baseline test data" > baseline_test.txt
# Benchmark baseline using WASM component
hyperfine --export-json baseline_results/baseline_benchmark.json \
--warmup 3 \
'wasmtime run --dir=. bazel-bin/tinygo/file_ops_component.wasm copy_file --src baseline_test.txt --dest baseline_copy.txt'
- name: Checkout PR Branch
run: |
git checkout ${{ github.sha }}
mkdir -p pr_results
- name: Benchmark PR Performance
run: |
bazel build //tinygo:file_ops_component
# Benchmark PR changes using WASM component
hyperfine --export-json pr_results/pr_benchmark.json \
--warmup 3 \
'wasmtime run --dir=. bazel-bin/tinygo/file_ops_component.wasm copy_file --src baseline_test.txt --dest pr_copy.txt'
- name: Analyze Performance Regression
run: |
# Install jq for JSON analysis
sudo apt-get update && sudo apt-get install -y jq
BASELINE_TIME=$(jq -r '.results[0].mean' baseline_results/baseline_benchmark.json)
PR_TIME=$(jq -r '.results[0].mean' pr_results/pr_benchmark.json)
# Calculate percentage change
CHANGE=$(echo "scale=2; (($PR_TIME - $BASELINE_TIME) / $BASELINE_TIME) * 100" | bc)
echo "## 📈 Performance Regression Analysis" > regression_report.md
echo "" >> regression_report.md
echo "| Metric | Baseline | PR | Change |" >> regression_report.md
echo "|--------|----------|----| -------|" >> regression_report.md
echo "| Execution Time | ${BASELINE_TIME}s | ${PR_TIME}s | ${CHANGE}% |" >> regression_report.md
echo "" >> regression_report.md
# Check for significant regression (>5% slower)
if (( $(echo "$CHANGE > 5" | bc -l) )); then
echo "⚠️ **Performance Regression Detected**: ${CHANGE}% slower than baseline" >> regression_report.md
echo "PERFORMANCE_REGRESSION=true" >> $GITHUB_ENV
elif (( $(echo "$CHANGE < -5" | bc -l) )); then
echo "✅ **Performance Improvement**: ${CHANGE}% faster than baseline" >> regression_report.md
else
echo "✅ **No Significant Change**: Performance within acceptable range" >> regression_report.md
fi
- name: Comment Regression Analysis
uses: actions/github-script@v8
with:
script: |
const fs = require('fs');
if (fs.existsSync('regression_report.md')) {
const report = fs.readFileSync('regression_report.md', 'utf8');
const comment = await github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: report
});
// Set as actionable comment if regression detected
if (process.env.PERFORMANCE_REGRESSION === 'true') {
await github.rest.reactions.createForIssueComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: comment.data.id,
content: 'eyes'
});
}
}