Skip to content
This repository was archived by the owner on Jan 29, 2026. It is now read-only.

feat: Implement comprehensive CI/CD modernization with advanced security practices #47

feat: Implement comprehensive CI/CD modernization with advanced security practices

feat: Implement comprehensive CI/CD modernization with advanced security practices #47

Workflow file for this run

name: Performance Benchmarks
on:
pull_request:
branches: [main, develop]
paths:
- 'src/**'
- 'package.json'
- 'package-lock.json'
push:
branches: [main]
schedule:
- cron: '0 3 * * 0' # Weekly on Sunday at 3 AM UTC
workflow_dispatch:
inputs:
benchmark_type:
description: 'Type of benchmark to run'
required: false
default: 'all'
type: choice
options:
- all
- startup
- memory
- cli
- build
env:
NODE_VERSION: '20'
concurrency:
group: performance-${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
startup-benchmark:
name: Startup Performance
runs-on: ubuntu-latest
if: github.event.inputs.benchmark_type == 'startup' || github.event.inputs.benchmark_type == 'all' || github.event.inputs.benchmark_type == ''
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Install dependencies
run: npm ci
- name: Build project
run: npm run build
- name: Benchmark startup time
run: |
echo "Measuring startup time..."
# Warm up
for i in {1..3}; do
timeout 10s node dist/index.js --help > /dev/null 2>&1 || true
done
# Measure startup times
TIMES=()
for i in {1..10}; do
START=$(date +%s%N)
timeout 10s node dist/index.js --help > /dev/null 2>&1 || true
END=$(date +%s%N)
DURATION=$((($END - $START) / 1000000)) # Convert to milliseconds
TIMES+=($DURATION)
echo "Run $i: ${DURATION}ms"
done
# Calculate average
TOTAL=0
for TIME in "${TIMES[@]}"; do
TOTAL=$((TOTAL + TIME))
done
AVERAGE=$((TOTAL / ${#TIMES[@]}))
echo "Average startup time: ${AVERAGE}ms"
echo "startup_time_ms=$AVERAGE" >> $GITHUB_OUTPUT
# Store results
mkdir -p benchmarks
echo "{\"startup_time_ms\": $AVERAGE, \"runs\": [$(IFS=,; echo "${TIMES[*]}")]}" > benchmarks/startup.json
- name: Upload startup benchmark results
uses: actions/upload-artifact@v4
with:
name: startup-benchmark
path: benchmarks/startup.json
retention-days: 30
memory-benchmark:
name: Memory Usage
runs-on: ubuntu-latest
if: github.event.inputs.benchmark_type == 'memory' || github.event.inputs.benchmark_type == 'all' || github.event.inputs.benchmark_type == ''
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Install dependencies
run: npm ci
- name: Build project
run: npm run build
- name: Benchmark memory usage
run: |
echo "Measuring memory usage..."
# Create a simple benchmark script
cat > memory_benchmark.js << 'EOF'
const { spawn } = require('child_process');
function measureMemory() {
return new Promise((resolve) => {
const process = spawn('node', ['dist/index.js', '--help'], {
stdio: ['pipe', 'pipe', 'pipe']
});
let maxMemory = 0;
const interval = setInterval(() => {
try {
const usage = process.memoryUsage();
maxMemory = Math.max(maxMemory, usage.rss);
} catch (e) {
// Process might have ended
}
}, 10);
process.on('close', () => {
clearInterval(interval);
resolve(maxMemory);
});
process.on('error', () => {
clearInterval(interval);
resolve(maxMemory);
});
// Force close after 5 seconds
setTimeout(() => {
process.kill();
}, 5000);
});
}
async function runBenchmark() {
const measurements = [];
for (let i = 0; i < 5; i++) {
const memory = await measureMemory();
measurements.push(memory);
console.log(`Run ${i + 1}: ${(memory / 1024 / 1024).toFixed(2)} MB`);
}
const average = measurements.reduce((a, b) => a + b, 0) / measurements.length;
console.log(`Average memory usage: ${(average / 1024 / 1024).toFixed(2)} MB`);
require('fs').writeFileSync('benchmarks/memory.json', JSON.stringify({
average_memory_bytes: Math.round(average),
average_memory_mb: Math.round(average / 1024 / 1024 * 100) / 100,
measurements: measurements
}, null, 2));
}
runBenchmark().catch(console.error);
EOF
mkdir -p benchmarks
node memory_benchmark.js
- name: Upload memory benchmark results
uses: actions/upload-artifact@v4
with:
name: memory-benchmark
path: benchmarks/memory.json
retention-days: 30
cli-benchmark:
name: CLI Commands Performance
runs-on: ubuntu-latest
if: github.event.inputs.benchmark_type == 'cli' || github.event.inputs.benchmark_type == 'all' || github.event.inputs.benchmark_type == ''
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Install dependencies
run: npm ci
- name: Build project
run: npm run build
- name: Benchmark CLI commands
run: |
echo "Benchmarking CLI commands..."
mkdir -p benchmarks
# Test common CLI commands
commands=(
"--help"
"--version"
"hive-mind --help"
"swarm --help"
)
results="{"
for cmd in "${commands[@]}"; do
echo "Testing: $cmd"
# Measure execution time
TIMES=()
for i in {1..5}; do
START=$(date +%s%N)
timeout 10s node dist/cli/index.js $cmd > /dev/null 2>&1 || true
END=$(date +%s%N)
DURATION=$((($END - $START) / 1000000)) # Convert to milliseconds
TIMES+=($DURATION)
done
# Calculate average
TOTAL=0
for TIME in "${TIMES[@]}"; do
TOTAL=$((TOTAL + TIME))
done
AVERAGE=$((TOTAL / ${#TIMES[@]}))
echo " Average: ${AVERAGE}ms"
# Add to results (escape spaces in command names)
cmd_key=$(echo "$cmd" | sed 's/ /_/g' | sed 's/-/_/g')
if [[ "$results" != "{" ]]; then
results="$results,"
fi
results="$results\"$cmd_key\": {\"average_ms\": $AVERAGE, \"runs\": [$(IFS=,; echo "${TIMES[*]}")]}"
done
results="$results}"
echo "$results" > benchmarks/cli.json
echo "CLI benchmark results:"
cat benchmarks/cli.json | jq '.'
- name: Upload CLI benchmark results
uses: actions/upload-artifact@v4
with:
name: cli-benchmark
path: benchmarks/cli.json
retention-days: 30
build-benchmark:
name: Build Performance
runs-on: ubuntu-latest
if: github.event.inputs.benchmark_type == 'build' || github.event.inputs.benchmark_type == 'all' || github.event.inputs.benchmark_type == ''
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Install dependencies
run: npm ci
- name: Benchmark build time
run: |
echo "Benchmarking build performance..."
mkdir -p benchmarks
# Clean build multiple times and measure
TIMES=()
for i in {1..3}; do
echo "Build run $i..."
npm run clean
START=$(date +%s%N)
npm run build
END=$(date +%s%N)
DURATION=$((($END - $START) / 1000000)) # Convert to milliseconds
TIMES+=($DURATION)
echo " Duration: ${DURATION}ms"
done
# Calculate average
TOTAL=0
for TIME in "${TIMES[@]}"; do
TOTAL=$((TOTAL + TIME))
done
AVERAGE=$((TOTAL / ${#TIMES[@]}))
echo "Average build time: ${AVERAGE}ms"
# Store results
echo "{\"average_build_time_ms\": $AVERAGE, \"runs\": [$(IFS=,; echo "${TIMES[*]}")]}" > benchmarks/build.json
# Measure build output size
if [ -d "dist" ]; then
BUILD_SIZE=$(du -sb dist | cut -f1)
echo "Build output size: $BUILD_SIZE bytes"
# Update results with size info
jq ". + {\"build_size_bytes\": $BUILD_SIZE, \"build_size_mb\": $(echo "scale=2; $BUILD_SIZE / 1024 / 1024" | bc)}" benchmarks/build.json > benchmarks/build_tmp.json
mv benchmarks/build_tmp.json benchmarks/build.json
fi
echo "Build benchmark results:"
cat benchmarks/build.json | jq '.'
- name: Upload build benchmark results
uses: actions/upload-artifact@v4
with:
name: build-benchmark
path: benchmarks/build.json
retention-days: 30
performance-summary:
name: Performance Summary
runs-on: ubuntu-latest
needs: [startup-benchmark, memory-benchmark, cli-benchmark, build-benchmark]
if: always()
steps:
- name: Download all benchmark results
uses: actions/download-artifact@v4
with:
path: benchmarks
- name: Generate performance report
run: |
echo "## 🚀 Performance Benchmark Results" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
# Startup performance
if [ -f "benchmarks/startup-benchmark/startup.json" ]; then
STARTUP_TIME=$(jq -r '.startup_time_ms' benchmarks/startup-benchmark/startup.json)
echo "### ⚡ Startup Performance" >> $GITHUB_STEP_SUMMARY
echo "- **Average startup time**: ${STARTUP_TIME}ms" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
fi
# Memory usage
if [ -f "benchmarks/memory-benchmark/memory.json" ]; then
MEMORY_MB=$(jq -r '.average_memory_mb' benchmarks/memory-benchmark/memory.json)
echo "### 🧠 Memory Usage" >> $GITHUB_STEP_SUMMARY
echo "- **Average memory usage**: ${MEMORY_MB} MB" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
fi
# Build performance
if [ -f "benchmarks/build-benchmark/build.json" ]; then
BUILD_TIME=$(jq -r '.average_build_time_ms' benchmarks/build-benchmark/build.json)
BUILD_SIZE_MB=$(jq -r '.build_size_mb // "N/A"' benchmarks/build-benchmark/build.json)
echo "### 🏗️ Build Performance" >> $GITHUB_STEP_SUMMARY
echo "- **Average build time**: ${BUILD_TIME}ms" >> $GITHUB_STEP_SUMMARY
echo "- **Build output size**: ${BUILD_SIZE_MB} MB" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
fi
# CLI performance
if [ -f "benchmarks/cli-benchmark/cli.json" ]; then
echo "### 💻 CLI Performance" >> $GITHUB_STEP_SUMMARY
jq -r 'to_entries[] | "- **\(.key)**: \(.value.average_ms)ms"' benchmarks/cli-benchmark/cli.json >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
fi
echo "📊 Detailed results are available in the workflow artifacts." >> $GITHUB_STEP_SUMMARY
- name: Comment PR with performance results
if: github.event_name == 'pull_request'
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
let comment = '## 🚀 Performance Benchmark Results\n\n';
try {
if (fs.existsSync('benchmarks/startup-benchmark/startup.json')) {
const startup = JSON.parse(fs.readFileSync('benchmarks/startup-benchmark/startup.json'));
comment += `### ⚡ Startup Performance\n- **Average startup time**: ${startup.startup_time_ms}ms\n\n`;
}
if (fs.existsSync('benchmarks/memory-benchmark/memory.json')) {
const memory = JSON.parse(fs.readFileSync('benchmarks/memory-benchmark/memory.json'));
comment += `### 🧠 Memory Usage\n- **Average memory usage**: ${memory.average_memory_mb} MB\n\n`;
}
if (fs.existsSync('benchmarks/build-benchmark/build.json')) {
const build = JSON.parse(fs.readFileSync('benchmarks/build-benchmark/build.json'));
comment += `### 🏗️ Build Performance\n- **Average build time**: ${build.average_build_time_ms}ms\n`;
if (build.build_size_mb) {
comment += `- **Build output size**: ${build.build_size_mb} MB\n`;
}
comment += '\n';
}
comment += '📊 Detailed results are available in the workflow artifacts.';
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: comment
});
} catch (error) {
console.log('Error posting comment:', error.message);
}