diff --git a/perf-runner/.dockerignore b/perf-runner/.dockerignore new file mode 100644 index 0000000..9bfeb33 --- /dev/null +++ b/perf-runner/.dockerignore @@ -0,0 +1,15 @@ +# Ignore files not needed in Docker image +node_modules/ +npm-debug.log +.git/ +.gitignore +*.md +!README.md +.DS_Store +*.log +.env +.env.local +.env.development.local +.env.test.local +.env.production.local +run-tests.mjs.original diff --git a/perf-runner/Dockerfile b/perf-runner/Dockerfile new file mode 100644 index 0000000..dbd3428 --- /dev/null +++ b/perf-runner/Dockerfile @@ -0,0 +1,24 @@ +FROM debian:bullseye-slim + +WORKDIR /runner + +# Install dependencies +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + curl \ + git \ + ca-certificates \ + bash \ + && rm -rf /var/lib/apt/lists/* + +# Install NVM (just the tool, no Node yet) +ENV NVM_DIR=/root/.nvm + +RUN curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash + +# Copy all performance runner files +COPY run-tests.mjs . +COPY src/ ./src/ + +# Default command +CMD ["bash"] diff --git a/perf-runner/readme.md b/perf-runner/readme.md new file mode 100644 index 0000000..f3d71f3 --- /dev/null +++ b/perf-runner/readme.md @@ -0,0 +1,284 @@ +# Performance Runner Proposal + +> **Proposal Status**: Under Review +> **Working Group**: Express Performance Testing Working Group + +## Overview + +This proposal outlines a standardized approach to performance testing for JavaScript packages using containerized test runners. The perf-runner tool aims to provide consistent, reproducible performance testing across different Node.js versions and environments. + +## Motivation + +Performance testing in JavaScript ecosystems often lacks standardization, making it difficult to compare results across different projects, environments, and Node.js versions. This proposal introduces a Docker-based solution that addresses these challenges by providing: + +- Consistent testing environments +- Multi-version Node.js support +- Automated CI/CD integration +- Standardized reporting formats + +## Build + +``` +docker build -t /perf-runner:latest -f /Dockerfile perf-runner +``` + +## Publish + +``` +docker push /perf-runner:latest +``` + +## Proposed Usage + +### Steps + +1. Setup CI workflow to run the Docker image +2. Write first performance test +3. Write a template and use it in performance test + +#### Setup CI Workflow + +- You should put a valid PAT (Personal Access Token) in your repository secrets as `COMMENTTOKEN` to allow the bot(or a real account) to comment on PRs. + +expf-testing.yml + +```yaml +name: Performance Test + +on: + pull_request: + branches: + - master # or your main branch + push: + branches: + - master # or your main branch + +jobs: + perf-test: + runs-on: ubuntu-latest + + strategy: + fail-fast: false + matrix: + node-version: [18, 19, 20, 21, 22, 23, 24] # Add or remove Node.js versions as needed + + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Setup Node.js ${{ matrix.node-version }} + uses: actions/setup-node@v3 + with: + node-version: ${{ matrix.node-version }} + + - name: Build + run: npm install + + - name: Pull perf-runner image + run: docker pull /perf-runner:latest + + - name: Run perf-runner tests on Node.js ${{ matrix.node-version }} + env: + NODE_VERSION: ${{ matrix.node-version }} + PACKAGE_NAME: body-parser # Replace with your (NPM) package name + TEST_DIR: 'expf-tests' # Replace with your test directory name (expf-tests recommended as a standard) + REPOSITORY_OWNER: ${{ github.repository_owner }} + REPOSITORY: ${{ github.event.repository.name }} + PR_ID: ${{ github.event.pull_request.number || '' }} + COMMENTTOKEN: ${{ secrets.COMMENTTOKEN }} + run: | + docker run --rm \ + -v ${{ github.workspace }}:/app \ + -e NODE_VERSION=$NODE_VERSION \ + -e PACKAGE_NAME=$PACKAGE_NAME \ + -e TEST_DIR=$TEST_DIR \ + -e PR_ID=$PR_ID \ + -e COMMENTTOKEN=$COMMENTTOKEN \ + -e REPOSITORY_OWNER=$REPOSITORY_OWNER \ + -e REPOSITORY=$REPOSITORY \ + /perf-runner:latest \ + bash -c "source /root/.nvm/nvm.sh && \ + nvm install $NODE_VERSION && \ + nvm use $NODE_VERSION && \ + npm install -g autocannon && \ + node run-tests.mjs" +``` + +- Replace `` with your Docker Hub username. +- main branch is `master` in this example, you can change it to your main branch name. +- `PACKAGE_NAME` is the name of the package you want to test. +- `TEST_DIR` is the directory where your performance tests are located. It is recommended to use `expf-tests` as a standard directory name. + +### Write First Performance Test + +- Create a new folder in the `expf-tests` directory. +- - Each **folder** in `expf-tests` means it's a performance test. +- - And each **file** in `expf-tests` means it's a template for that performance test. +- Each test should have a package.json to seperate test dependencies. + +- Create a `package.json` file in the new folder with the following content: + +/expf-tests/test-sample/package.json +```json +{ + "name": "perf-test-lib---test-sample", + "version": "1.0.0", + "main": "run-test.mjs", + "type": "module", + "dependencies": { + "autocannon": "^8.0.0", + "express": "^5.1.0" + } +} +``` + +/expf-tests/test-sample/run-test.mjs +```javascript +import autocannon from "autocannon"; +import { argv } from 'process'; + +const label = argv[2]; + +async function run() { + console.log(`Running performance test with label: ${label}`); + + // Start server + const test = await import('./start-server.mjs'); + const { + server, + url + } = await test.default(label); + + try { + const result = await autocannon({ + url, + connections: 10, + duration: 5, + }); + + console.log(autocannon.printResult(result)); + console.log('Raw Data'); + console.log('---start:expf-autocanon-data---'); + console.log(JSON.stringify(result, null, 2)); + console.log('---end:expf-autocanon-data---'); + + } catch (err) { + console.error("Autocannon error:", err); + } finally { + server.close(); + } +} + +run(); +``` + +- But as you can see, this way not looking good, we can improve it with a template. + +### Write a Template and Use It in Performance Test + +- Create a `template.mjs` file in the `expf-tests` directory: +- I'll use autocannon as an example template, you can change it to your own test library. + +/expf-tests/autocannon.mjs +```javascript +import autocannon from 'autocannon'; +import { argv } from 'process'; +import { pathToFileURL } from 'url'; + +class PerfTestTemplate { + constructor(label, config) { + this.label = label; + this.server = null; + this.config = config; + this.url = `http://localhost:${config.port}`; + this.lib = null; + + console.log(`Running performance test with label: ${label}`); + } + + async loadLib() { + if (this.label === 'candidate') { + this.lib = await import(pathToFileURL('/app/index.js').href); + } else if (this.label === 'latest') { + this.lib = await import('perf-test-lib'); + } else { + throw new Error(`Unknown label: ${this.label}`); + } + } + + async startServer(serverFactory) { + await this.loadLib(); + this.server = serverFactory(this.lib); + await new Promise((resolve) => this.server.listen(this.config.port, resolve)); + console.log(`Server is running at ${this.url}`); + } + + async run() { + try { + const result = await autocannon({ + url: this.url, + connections: 10, + duration: 5, + }); + + console.log(autocannon.printResult(result)); + return result; + } catch (err) { + console.error('Autocannon error:', err); + } + } + + async report(result) { + console.log('Raw Data'); + console.log('---start:expf-autocanon-data---'); + console.log(JSON.stringify(result, null, 2)); + console.log('---end:expf-autocanon-data---'); + } + + async stop() { + if (this.server) { + this.server.close(() => { + console.log('Server closed'); + }); + } else { + console.warn('No server to close'); + } + } + + static async runTest(serverFactory, config = { port: 3000 }) { + const label = argv[2]; + const test = new PerfTestTemplate(label, config); + + try { + await test.startServer(serverFactory); + const data = await test.run(); + await test.report(data); + await test.stop(); + } catch (error) { + console.error('Test execution error:', error); + await test.stop(); + process.exit(1); + } + } +} + +export { PerfTestTemplate }; +``` + +Then +/expf-tests/test-sample/run-test.mjs +```javascript +import { PerfTestTemplate } from './templates/autocannon.mjs'; + +function createSimpleServer(lib) { + return lib.http.createServer((req, res) => { + res.writeHead(200, { 'Content-Type': 'text/plain' }); + res.end('Hello, world!\n'); + }); +} + +PerfTestTemplate.runTest(createSimpleServer); +``` + +- Now you can use the `PerfTestTemplate` class to create your performance tests easily. + diff --git a/perf-runner/run-tests.mjs b/perf-runner/run-tests.mjs new file mode 100644 index 0000000..e157062 --- /dev/null +++ b/perf-runner/run-tests.mjs @@ -0,0 +1,62 @@ +import { config, validateConfig } from './src/config.mjs'; +import { getTestFolders } from './src/utils.mjs'; +import { runTest } from './src/test-runner.mjs'; +import { compareResults } from './src/result-processor.mjs'; +import { postComment, generatePRComment } from './src/github-api.mjs'; + +/** + * Main orchestration function + */ +async function main() { + // Validate configuration first + validateConfig(); + + const testFolders = getTestFolders(`${config.PATH_PREFIX}/${config.TEST_DIR}`); + const compareList = []; + + for (const testSubfolder of testFolders) { + console.log(`\n--- Starting parallel tests for: ${testSubfolder} ---`); + + const latestResult = await runTest( + 'latest', + `npm install ${config.PACKAGE_NAME}@latest`, + testSubfolder + ); + + const candidateResult = await runTest( + 'candidate', + `npm install ${config.PATH_PREFIX}`, + testSubfolder + ); + + console.log( + `\n--- Comparing results for test folder: ${testSubfolder} ---` + ); + + const { output } = compareResults( + testSubfolder, + latestResult.resultFile, + candidateResult.resultFile + ); + + compareList.push({ + testSubfolder, + output, + }); + } + + if (config.isPR) { + console.log('\n--- Posting PR comment ---'); + + const message = generatePRComment(compareList); + console.log(`Posting comment: ${message}`); + + await postComment(message); + console.log('PR comment posted.'); + } else { + console.log('\n--- No PR comment posted, running in non-PR mode ---'); + } +} + +// Run the main function +main().catch(console.error); diff --git a/perf-runner/src/config.mjs b/perf-runner/src/config.mjs new file mode 100644 index 0000000..e3cf8ad --- /dev/null +++ b/perf-runner/src/config.mjs @@ -0,0 +1,66 @@ +/** + * Configuration and environment variable handling + */ + +export const config = { + // Required environment variables + PACKAGE_NAME: process.env.PACKAGE_NAME, + NODE_VERSION: process.env.NODE_VERSION, + + // Optional environment variables + TEST_DIR: process.env.TEST_DIR || 'expf-tests', + RESULT_UPLOAD_URL: process.env.RESULT_UPLOAD_URL, + PATH_PREFIX: process.env.PATH_PREFIX || '/app', + + // PR-related configuration + PR_ID: process.env.PR_ID || '', + GITHUB_TOKEN: process.env.COMMENTTOKEN || '', + REPOSITORY_OWNER: process.env.REPOSITORY_OWNER || '', + REPOSITORY: process.env.REPOSITORY || '', + + // Derived values + get isPR() { + return !!this.PR_ID; + }, + + get tempDir() { + return '/tmp/perf-test'; + } +}; + +/** + * Validate required configuration + */ +export function validateConfig() { + console.log('PR', { + isPR: config.isPR, + PR_ID: config.PR_ID, + REPOSITORY_OWNER: config.REPOSITORY_OWNER, + REPOSITORY: config.REPOSITORY, + GITHUB_TOKEN: config.GITHUB_TOKEN + ? config.GITHUB_TOKEN.slice(0, 4) + '... (hidden)' + : '-NOT FOUND (undefined or null)-', + }); + + if (!config.PACKAGE_NAME) { + throw new Error('PACKAGE_NAME env var not set!'); + } + + if (!config.NODE_VERSION) { + throw new Error('NODE_VERSION env var not set!'); + } + + if (config.isPR) { + if (!config.GITHUB_TOKEN) { + throw new Error('GITHUB_TOKEN env var must be set for PR runs!'); + } + + if (!config.REPOSITORY_OWNER) { + throw new Error('REPOSITORY_OWNER env var must be set for PR runs!'); + } + + if (!config.REPOSITORY) { + throw new Error('REPOSITORY env var must be set for PR runs!'); + } + } +} diff --git a/perf-runner/src/github-api.mjs b/perf-runner/src/github-api.mjs new file mode 100644 index 0000000..2f250ad --- /dev/null +++ b/perf-runner/src/github-api.mjs @@ -0,0 +1,41 @@ +import { config } from './config.mjs'; + +/** + * Post a comment to a GitHub PR + */ +export async function postComment(message) { + const url = `https://api.github.com/repos/${config.REPOSITORY_OWNER}/${config.REPOSITORY}/issues/${config.PR_ID}/comments`; + + const response = await fetch(url, { + method: 'POST', + headers: { + Authorization: `Bearer ${config.GITHUB_TOKEN}`, + Accept: 'application/vnd.github.v3+json', + 'User-Agent': 'perf-bot', + }, + body: JSON.stringify({ body: message }), + }); + + if (!response.ok) { + const text = await response.text(); + console.error(`Failed to post comment: ${response.status} ${text}`); + } else { + const data = await response.json(); + console.log('Comment posted:', data.html_url); + } +} + +/** + * Generate PR comment message from comparison results + */ +export function generatePRComment(compareList) { + let message = '[This comment is auto-generated by the perf runner]\n\n'; + message += `## Performance Comparison for PR #${config.PR_ID}, Node.js ${config.NODE_VERSION}\n\n`; + + compareList.forEach(({ testSubfolder, output }) => { + message += `### Test Folder: ${testSubfolder}\n\n`; + message += output.trim() + '\n\n'; + }); + + return message; +} diff --git a/perf-runner/src/result-processor.mjs b/perf-runner/src/result-processor.mjs new file mode 100644 index 0000000..01170a3 --- /dev/null +++ b/perf-runner/src/result-processor.mjs @@ -0,0 +1,137 @@ +import { readFileSync } from 'fs'; +import { config } from './config.mjs'; +import { formatTable } from './utils.mjs'; + +/** + * Compare performance results between latest and candidate versions + */ +export function compareResults(testSubfolder, latestFile, candidateFile) { + console.log(`\n--- Comparing results for: ${testSubfolder} ---`); + console.log(`Latest result file: ${latestFile}`); + console.log(`Candidate result file: ${candidateFile}`); + + const latest = JSON.parse( + readFileSync(`${config.tempDir}-latest-${testSubfolder}/${latestFile}`, 'utf8') + ); + const candidate = JSON.parse( + readFileSync(`${config.tempDir}-candidate-${testSubfolder}/${candidateFile}`, 'utf8') + ); + + const latestTime = latest.serverResults.executionTimeMs; + const candidateTime = candidate.serverResults.executionTimeMs; + + console.log(`\n## 📊 Performance Comparison (Node.js ${config.NODE_VERSION})\n`); + + // Prepare table data + const tableData = []; + + // Execution time comparison + const timeDiff = latestTime - candidateTime; + const timeRatio = latestTime / candidateTime; // Lower is better, so latest/candidate + const timeDiffFormatted = timeDiff > 0 ? `+${timeDiff.toFixed(2)} ms` : `${timeDiff.toFixed(2)} ms`; + const timeStatus = timeDiff > 0 ? '✅ Improved' : timeDiff < 0 ? '❌ Regressed' : '✅ Unchanged'; + + tableData.push([ + 'Execution Time', + `${latestTime.toFixed(2)} ms`, + `${candidateTime.toFixed(2)} ms`, + timeDiffFormatted, + `×${timeRatio.toFixed(2)}`, + timeStatus, + ]); + + // Check if both have autocannon data + const hasLatestAutocannon = hasAutocannonData(latest); + const hasCandidateAutocannon = hasAutocannonData(candidate); + + if (hasLatestAutocannon && hasCandidateAutocannon) { + addAutocannonComparisons(tableData, latest, candidate); + } + + // Output markdown table + const headers = [ + 'Metric', + 'Latest', + 'Candidate', + 'Difference', + 'Ratio', + 'Status', + ]; + + let output = formatTable(headers, tableData); + + if (!hasLatestAutocannon || !hasCandidateAutocannon) { + output += '\n*Note: Autocannon data not available for comparison*\n'; + } + + console.log(output); + return { output }; +} + +/** + * Check if result has autocannon data + */ +function hasAutocannonData(result) { + return result.clientResults && + result.clientResults.latency && + result.clientResults.requestsPerSecond; +} + +/** + * Add autocannon-specific comparisons to table data + */ +function addAutocannonComparisons(tableData, latest, candidate) { + // Latency comparison + const latestLatency = latest.clientResults.latency.averageMs; + const candidateLatency = candidate.clientResults.latency.averageMs; + const latencyDiff = latestLatency - candidateLatency; + const latencyRatio = latestLatency / candidateLatency; // Lower is better + const latencyDiffFormatted = latencyDiff > 0 ? `+${latencyDiff.toFixed(2)} ms` : `${latencyDiff.toFixed(2)} ms`; + const latencyStatus = latencyDiff > 0 ? '✅ Improved' : latencyDiff < 0 ? '❌ Regressed' : '✅ Unchanged'; + + tableData.push([ + 'Average Latency', + `${latestLatency.toFixed(2)} ms`, + `${candidateLatency.toFixed(2)} ms`, + latencyDiffFormatted, + `×${latencyRatio.toFixed(2)}`, + latencyStatus, + ]); + + // Requests per second comparison + const latestRps = latest.clientResults.requestsPerSecond; + const candidateRps = candidate.clientResults.requestsPerSecond; + const rpsDiff = candidateRps - latestRps; + const rpsRatio = candidateRps / latestRps; // Higher is better + const rpsDiffFormatted = rpsDiff > 0 ? `+${rpsDiff.toFixed(2)} rps` : `${rpsDiff.toFixed(2)} rps`; + const rpsStatus = rpsDiff > 0 ? '✅ Improved' : rpsDiff < 0 ? '❌ Regressed' : '✅ Unchanged'; + + tableData.push([ + 'Requests/Second', + `${latestRps.toFixed(2)} rps`, + `${candidateRps.toFixed(2)} rps`, + rpsDiffFormatted, + `×${rpsRatio.toFixed(2)}`, + rpsStatus, + ]); + + // Errors comparison (if available) + if (latest.clientResults.errors !== undefined && + candidate.clientResults.errors !== undefined) { + const latestErrors = latest.clientResults.errors; + const candidateErrors = candidate.clientResults.errors; + const errorsDiff = candidateErrors - latestErrors; + const errorsRatio = latestErrors > 0 ? latestErrors / candidateErrors : 'N/A'; // Lower is better + const errorsDiffFormatted = errorsDiff > 0 ? `+${errorsDiff}` : `${errorsDiff}`; + const errorsStatus = errorsDiff < 0 ? '✅ Improved' : errorsDiff > 0 ? '❌ Regressed' : '✅ Unchanged'; + + tableData.push([ + 'Errors', + latestErrors.toString(), + candidateErrors.toString(), + errorsDiffFormatted, + typeof errorsRatio === 'number' ? `×${errorsRatio.toFixed(2)}` : errorsRatio, + errorsStatus, + ]); + } +} diff --git a/perf-runner/src/test-runner.mjs b/perf-runner/src/test-runner.mjs new file mode 100644 index 0000000..903c5dc --- /dev/null +++ b/perf-runner/src/test-runner.mjs @@ -0,0 +1,116 @@ +import { execSync } from 'child_process'; +import { writeFileSync } from 'fs'; +import os from 'os'; +import { performance } from 'perf_hooks'; +import { config } from './config.mjs'; + +/** + * Run a performance test for a specific configuration + */ +export async function runTest(label, installCommand, testSubfolder) { + console.log( + `\n--- Running test for: ${label} - test folder: ${testSubfolder} ---` + ); + + const testTempDir = `${config.tempDir}-${label}-${testSubfolder}`; + execSync(`rm -rf ${testTempDir} && mkdir -p ${testTempDir}`, { + stdio: 'inherit', + }); + + process.chdir(testTempDir); + // Copy test files to the temp directory + execSync( + `cp -r ${config.PATH_PREFIX}/${config.TEST_DIR}/${testSubfolder}/* .`, + { + stdio: 'inherit', + } + ); + // Copy all template files to the templates directory + execSync('mkdir -p ./templates'); + execSync(`cp ${config.PATH_PREFIX}/${config.TEST_DIR}/*.mjs ./templates/`, { + stdio: 'inherit', + }); + + execSync('npm i', { stdio: 'inherit' }); + execSync(installCommand, { stdio: 'inherit' }); + + const start = performance.now(); + const output = execSync(`node run-test.mjs ${label}`, { encoding: 'utf8' }); + const end = performance.now(); + + const result = createResultObject(start, end, output); + const filename = `result-${label}-${testSubfolder}-${Date.now()}.json`; + + writeFileSync(filename, JSON.stringify(result, null, 2)); + console.log(`Saved result to: ${filename}`); + + if (config.RESULT_UPLOAD_URL) { + execSync( + `curl -X POST -H "Content-Type: application/json" -d @${filename} ${config.RESULT_UPLOAD_URL}` + ); + console.log('Result uploaded.'); + } + + return { + resultFile: filename, + }; +} + +/** + * Create a result object with performance data + */ +function createResultObject(start, end, output) { + const result = { + schemaVersion: '1.0.0', + timestamp: Date.now(), + runMetadata: { + repo: `https://github.com/expressjs/${config.PACKAGE_NAME}`, + gitRef: process.env.GIT_REF || 'unknown', + toolSettings: { + connections: 10, + duration: 10, + }, + }, + serverMetadata: { + platform: os.platform(), + arch: os.arch(), + cpus: os.cpus(), + totalmem: os.totalmem(), + }, + clientMetadata: { + platform: os.platform(), + arch: os.arch(), + cpus: os.cpus(), + }, + serverResults: { + executionTimeMs: end - start, + }, + clientResults: { + latency: { + averageMs: 0, + }, + }, + }; + + // Extract autocannon data from output + const autocannonData = output.match( + /---start:expf-autocanon-data---([\s\S]*?)---end:expf-autocanon-data---/ + ); + + if (!autocannonData) { + throw new Error('No autocannon data found in output!'); + } else { + const autocannonOutput = autocannonData[1].trim(); + try { + const parsedData = JSON.parse(autocannonOutput); + result.clientResults.latency.averageMs = parsedData.latency.average; + result.clientResults.requestsPerSecond = + parsedData.requests.total / (parsedData.duration / 1000); + result.clientResults.errors = parsedData.errors; + } catch (error) { + console.error('Failed to parse autocannon data:', error); + } + } + + return result; +} diff --git a/perf-runner/src/utils.mjs b/perf-runner/src/utils.mjs new file mode 100644 index 0000000..e8df13b --- /dev/null +++ b/perf-runner/src/utils.mjs @@ -0,0 +1,47 @@ +import { readdirSync, statSync } from 'fs'; + +/** + * Get all subfolders (tests) inside TEST_DIR + */ +export function getTestFolders(path) { + return readdirSync(path).filter((name) => { + const fullPath = `${path}/${name}`; + return statSync(fullPath).isDirectory(); + }); +} + +/** + * Create table formatting utilities + */ +export function formatTable(headers, tableData) { + const colWidths = [20, 12, 12, 12, 12, 12]; + + // Calculate actual column widths based on content + tableData.forEach((row) => { + headers.forEach((header, i) => { + const content = i === 0 ? row[i] : row[i]; + colWidths[i] = Math.max(colWidths[i], content.toString().length + 2); + }); + }); + + // Print header + let output = ''; + + const headerRow = headers + .map((header, i) => header.padEnd(colWidths[i])) + .join('| '); + const separatorRow = colWidths.map((width) => '-'.repeat(width)).join('|-'); + + output += `| ${headerRow}|\n`; + output += `|-${separatorRow}|\n`; + + // Print data rows + tableData.forEach((row) => { + const dataRow = row + .map((cell, i) => cell.toString().padEnd(colWidths[i])) + .join('| '); + output += `| ${dataRow}|\n`; + }); + + return output; +}