diff --git a/.gitattributes b/.gitattributes index 5e88dc962..55a33f7a1 100644 --- a/.gitattributes +++ b/.gitattributes @@ -2,3 +2,6 @@ # Use LF for all test files to ensure cross-platform consistency packages/swc-plugin-workflow/transform/tests/**/*.js text eol=lf packages/swc-plugin-workflow/transform/tests/**/*.stderr text eol=lf + +# Use bd merge for beads JSONL files +.beads/issues.jsonl merge=beads diff --git a/.github/actions/setup-workflow-dev/action.yml b/.github/actions/setup-workflow-dev/action.yml new file mode 100644 index 000000000..44da12c1b --- /dev/null +++ b/.github/actions/setup-workflow-dev/action.yml @@ -0,0 +1,58 @@ +name: 'Setup Workflow Dev Environment' +description: 'Setup Node.js, pnpm, and optionally Rust for Workflow development. Note: Checkout must be done before calling this action.' + +inputs: + node-version: + description: 'Node.js version to use' + required: false + default: '22.x' + pnpm-version: + description: 'pnpm version to use' + required: false + default: '10.14.0' + setup-rust: + description: 'Whether to setup Rust toolchain' + required: false + default: 'false' + install-dependencies: + description: 'Whether to install dependencies' + required: false + default: 'true' + install-args: + description: 'Additional arguments for pnpm install (e.g., --ignore-scripts)' + required: false + default: '' + build-packages: + description: 'Whether to build packages (excludes workbenches)' + required: false + default: 'true' + +runs: + using: 'composite' + steps: + - name: Setup Rust + if: ${{ inputs.setup-rust == 'true' }} + uses: actions-rust-lang/setup-rust-toolchain@v1 + with: + toolchain: stable + + - name: Setup pnpm + uses: pnpm/action-setup@v3 + with: + version: ${{ inputs.pnpm-version }} + + - name: Setup Node.js ${{ inputs.node-version }} + uses: actions/setup-node@v4 + with: + node-version: ${{ inputs.node-version }} + cache: 'pnpm' + + - name: Install Dependencies + if: ${{ inputs.install-dependencies == 'true' }} + shell: bash + run: pnpm install --frozen-lockfile ${{ inputs.install-args }} + + - name: Build all packages + if: ${{ inputs.build-packages == 'true' }} + shell: bash + run: pnpm turbo run build --filter='!./workbench/*' diff --git a/.github/scripts/aggregate-benchmarks.js b/.github/scripts/aggregate-benchmarks.js index f6b3c2dc9..98dbffb83 100644 --- a/.github/scripts/aggregate-benchmarks.js +++ b/.github/scripts/aggregate-benchmarks.js @@ -17,13 +17,35 @@ for (let i = 0; i < args.length; i++) { } } -// World display config +// World display config - built-in worlds const worldConfig = { local: { emoji: '๐Ÿ’ป', label: 'Local' }, postgres: { emoji: '๐Ÿ˜', label: 'Postgres' }, vercel: { emoji: 'โ–ฒ', label: 'Vercel' }, }; +// Load community worlds from manifest and add to worldConfig +const worldsManifestPath = path.join(__dirname, '../../worlds-manifest.json'); +if (fs.existsSync(worldsManifestPath)) { + try { + const worldsManifest = JSON.parse( + fs.readFileSync(worldsManifestPath, 'utf-8') + ); + for (const world of worldsManifest.worlds || []) { + // Only add community worlds (official ones are already defined above) + if (world.type === 'community') { + worldConfig[world.id] = { + emoji: '๐ŸŒ', + label: world.name, + community: true, + }; + } + } + } catch (e) { + console.error(`Warning: Could not load worlds manifest: ${e.message}`); + } +} + // Framework display config const frameworkConfig = { 'nextjs-turbopack': { label: 'Next.js (Turbopack)' }, @@ -201,7 +223,9 @@ function getAppsAndBackends(data) { function isStreamBenchmark(benchData, apps, backends) { for (const app of apps) { for (const backend of backends) { - if (benchData[app]?.[backend]?.firstByteTime !== null) { + const firstByteTime = benchData[app]?.[backend]?.firstByteTime; + // Must be a number (not null or undefined) to be a stream benchmark + if (typeof firstByteTime === 'number') { return true; } } @@ -216,15 +240,24 @@ function renderBenchmarkTable( baselineBenchData, apps, backends, - isStream + isStream, + { showHeading = true } = {} ) { - console.log(`## ${benchName}\n`); + if (showHeading) { + console.log(`## ${benchName}\n`); + } // Collect all data points (including missing ones) for all app/backend combinations const dataPoints = []; const validDataPoints = []; for (const app of apps) { for (const backend of backends) { + // Skip community worlds for non-nextjs-turbopack frameworks (we only test them with nextjs-turbopack) + const isCommunityWorld = worldConfig[backend]?.community === true; + if (isCommunityWorld && app !== 'nextjs-turbopack') { + continue; + } + const metrics = benchData[app]?.[backend]; const baseline = baselineBenchData?.[app]?.[backend] || null; const dataPoint = { app, backend, metrics: metrics || null, baseline }; @@ -264,17 +297,17 @@ function renderBenchmarkTable( // Render table - different columns for stream vs regular benchmarks if (isStream) { console.log( - '| World | Framework | Workflow Time | TTFB | Wall Time | Overhead | vs Fastest |' + '| World | Framework | Workflow Time | TTFB | Wall Time | Overhead | Samples | vs Fastest |' ); console.log( - '|:------|:----------|--------------:|-----:|----------:|---------:|-----------:|' + '|:------|:----------|--------------:|-----:|----------:|---------:|--------:|-----------:|' ); } else { console.log( - '| World | Framework | Workflow Time | Wall Time | Overhead | vs Fastest |' + '| World | Framework | Workflow Time | Wall Time | Overhead | Samples | vs Fastest |' ); console.log( - '|:------|:----------|--------------:|----------:|---------:|-----------:|' + '|:------|:----------|--------------:|----------:|---------:|--------:|-----------:|' ); } @@ -289,11 +322,11 @@ function renderBenchmarkTable( if (!metrics) { if (isStream) { console.log( - `| ${worldInfo.emoji} ${worldInfo.label} | ${frameworkInfo.label} | โš ๏ธ _missing_ | - | - | - | - |` + `| ${worldInfo.emoji} ${worldInfo.label} | ${frameworkInfo.label} | โš ๏ธ _missing_ | - | - | - | - | - |` ); } else { console.log( - `| ${worldInfo.emoji} ${worldInfo.label} | ${frameworkInfo.label} | โš ๏ธ _missing_ | - | - | - |` + `| ${worldInfo.emoji} ${worldInfo.label} | ${frameworkInfo.label} | โš ๏ธ _missing_ | - | - | - | - |` ); } continue; @@ -326,6 +359,9 @@ function renderBenchmarkTable( baseline?.firstByteTime ); + // Format samples count + const samplesCount = metrics.samples ?? '-'; + const currentTime = metrics.workflowTime ?? metrics.wallTime; const factor = isFastest ? '1.00x' @@ -333,11 +369,11 @@ function renderBenchmarkTable( if (isStream) { console.log( - `| ${worldInfo.emoji} ${worldInfo.label} | ${medal}${frameworkInfo.label} | ${workflowTimeSec}s${workflowDelta} | ${firstByteSec}s${ttfbDelta} | ${wallTimeSec}s${wallDelta} | ${overheadSec}s | ${factor} |` + `| ${worldInfo.emoji} ${worldInfo.label} | ${medal}${frameworkInfo.label} | ${workflowTimeSec}s${workflowDelta} | ${firstByteSec}s${ttfbDelta} | ${wallTimeSec}s${wallDelta} | ${overheadSec}s | ${samplesCount} | ${factor} |` ); } else { console.log( - `| ${worldInfo.emoji} ${worldInfo.label} | ${medal}${frameworkInfo.label} | ${workflowTimeSec}s${workflowDelta} | ${wallTimeSec}s${wallDelta} | ${overheadSec}s | ${factor} |` + `| ${worldInfo.emoji} ${worldInfo.label} | ${medal}${frameworkInfo.label} | ${workflowTimeSec}s${workflowDelta} | ${wallTimeSec}s${wallDelta} | ${overheadSec}s | ${samplesCount} | ${factor} |` ); } } @@ -363,6 +399,10 @@ function renderComparison(data, baselineData) { ); } + // Split backends into local dev and production + const localDevBackends = backends.filter((b) => b !== 'vercel'); + const productionBackends = backends.filter((b) => b === 'vercel'); + // Separate benchmarks into regular and stream categories const regularBenchmarks = []; const streamBenchmarks = []; @@ -375,39 +415,56 @@ function renderComparison(data, baselineData) { } } - // Render regular benchmarks first - if (regularBenchmarks.length > 0) { - for (const [benchName, benchData] of regularBenchmarks) { - const baselineBenchData = baselineData?.[benchName] || null; + // Helper to render both local dev and production tables for a benchmark + const renderBenchmarkWithEnvironments = (benchName, benchData, isStream) => { + const baselineBenchData = baselineData?.[benchName] || null; + + console.log(`## ${benchName}\n`); + + // Render Local Development table + if (localDevBackends.length > 0) { + console.log('#### ๐Ÿ’ป Local Development\n'); renderBenchmarkTable( benchName, benchData, baselineBenchData, apps, - backends, - false + localDevBackends, + isStream, + { showHeading: false } ); } + + // Render Production table + if (productionBackends.length > 0) { + console.log('#### โ–ฒ Production (Vercel)\n'); + renderBenchmarkTable( + benchName, + benchData, + baselineBenchData, + apps, + productionBackends, + isStream, + { showHeading: false } + ); + } + }; + + // Render regular benchmarks + for (const [benchName, benchData] of regularBenchmarks) { + renderBenchmarkWithEnvironments(benchName, benchData, false); } // Render stream benchmarks in a separate section if (streamBenchmarks.length > 0) { console.log('---\n'); - console.log('### Stream Benchmarks\n'); + console.log('## Stream Benchmarks\n'); console.log( '_Stream benchmarks include Time to First Byte (TTFB) metrics._\n' ); for (const [benchName, benchData] of streamBenchmarks) { - const baselineBenchData = baselineData?.[benchName] || null; - renderBenchmarkTable( - benchName, - benchData, - baselineBenchData, - apps, - backends, - true - ); + renderBenchmarkWithEnvironments(benchName, benchData, true); } } @@ -425,6 +482,12 @@ function renderComparison(data, baselineData) { let fastestApp = null; let fastestTime = Infinity; + // Skip community worlds in framework comparison (they only run against nextjs-turbopack) + const isCommunityWorld = worldConfig[backend]?.community === true; + if (isCommunityWorld) { + continue; + } + for (const app of apps) { const metrics = benchData[app]?.[backend]; if (metrics) { @@ -451,6 +514,12 @@ function renderComparison(data, baselineData) { let fastestTime = Infinity; for (const backend of backends) { + // Skip community worlds for non-nextjs-turbopack frameworks + const isCommunityWorld = worldConfig[backend]?.community === true; + if (isCommunityWorld && app !== 'nextjs-turbopack') { + continue; + } + const metrics = benchData[app]?.[backend]; if (metrics) { const time = metrics.workflowTime ?? metrics.wallTime; @@ -479,6 +548,13 @@ function renderComparison(data, baselineData) { console.log('|:------|:---------------------|-----:|'); for (const backend of backends) { + // Skip community worlds in "Fastest Framework by World" summary + // (they only run against nextjs-turbopack, so framework comparison doesn't apply) + const isCommunityWorld = worldConfig[backend]?.community === true; + if (isCommunityWorld) { + continue; + } + const worldInfo = worldConfig[backend] || { emoji: '', label: backend }; const frameworkWins = frameworkWinsByWorld[backend] || {}; @@ -554,14 +630,21 @@ function renderComparison(data, baselineData) { '- **Wall Time**: Total testbench time (trigger workflow + poll for result)' ); console.log('- **Overhead**: Testbench overhead (Wall Time - Workflow Time)'); + console.log('- **Samples**: Number of benchmark iterations run'); console.log( '- **vs Fastest**: How much slower compared to the fastest configuration for this benchmark' ); console.log(''); console.log('**Worlds:**'); - console.log('- ๐Ÿ’ป Local: In-memory filesystem world'); - console.log('- ๐Ÿ˜ Postgres: PostgreSQL database world'); - console.log('- โ–ฒ Vercel: Vercel production world'); + console.log('- ๐Ÿ’ป Local: In-memory filesystem world (local development)'); + console.log('- ๐Ÿ˜ Postgres: PostgreSQL database world (local development)'); + console.log('- โ–ฒ Vercel: Vercel production/preview deployment'); + // Add community worlds to legend + for (const [id, config] of Object.entries(worldConfig)) { + if (config.community) { + console.log(`- ๐ŸŒ ${config.label}: Community world (local development)`); + } + } console.log(''); } diff --git a/.github/scripts/aggregate-e2e-results.js b/.github/scripts/aggregate-e2e-results.js new file mode 100644 index 000000000..e785515fe --- /dev/null +++ b/.github/scripts/aggregate-e2e-results.js @@ -0,0 +1,415 @@ +#!/usr/bin/env node + +const fs = require('fs'); +const path = require('path'); + +// Parse command line arguments +const args = process.argv.slice(2); +let resultsDir = '.'; +let jobName = 'E2E Tests'; +let mode = 'single'; // 'single' for step summary, 'aggregate' for PR comment + +for (let i = 0; i < args.length; i++) { + if (args[i] === '--job-name' && args[i + 1]) { + jobName = args[i + 1]; + i++; + } else if (args[i] === '--mode' && args[i + 1]) { + mode = args[i + 1]; + i++; + } else if (!args[i].startsWith('--')) { + resultsDir = args[i]; + } +} + +// Find all e2e result JSON files +function findResultFiles(dir) { + const files = []; + try { + const entries = fs.readdirSync(dir, { withFileTypes: true }); + for (const entry of entries) { + const fullPath = path.join(dir, entry.name); + if (entry.isDirectory()) { + files.push(...findResultFiles(fullPath)); + } else if ( + entry.name.startsWith('e2e-') && + entry.name.endsWith('.json') + ) { + files.push(fullPath); + } + } + } catch (e) { + // Directory doesn't exist or can't be read + } + return files; +} + +// Parse vitest JSON output +function parseVitestResults(file) { + try { + const content = JSON.parse(fs.readFileSync(file, 'utf-8')); + const results = { + file: path.basename(file), + passed: 0, + failed: 0, + skipped: 0, + duration: 0, + failedTests: [], + }; + + // Handle vitest JSON reporter format + if (content.testResults) { + for (const testFile of content.testResults) { + results.duration += testFile.duration || 0; + for (const assertionResult of testFile.assertionResults || []) { + if (assertionResult.status === 'passed') { + results.passed++; + } else if (assertionResult.status === 'failed') { + results.failed++; + results.failedTests.push({ + name: assertionResult.fullName || assertionResult.title, + file: testFile.name, + message: + assertionResult.failureMessages?.join('\n').slice(0, 200) || '', + }); + } else if (assertionResult.status === 'skipped') { + results.skipped++; + } + } + } + } + + return results; + } catch (e) { + console.error(`Warning: Could not parse ${file}: ${e.message}`); + return null; + } +} + +// Parse job info from filename (e.g., e2e-local-dev-nextjs-turbopack.json) +function parseJobInfo(filename) { + // Pattern: e2e-{category}-{app}.json or e2e-{category}-{subcategory}-{app}.json + const base = path.basename(filename, '.json'); + const parts = base.split('-'); + + if (parts.length >= 3) { + // e2e-vercel-prod-nextjs-turbopack -> category: vercel-prod, app: nextjs-turbopack + // e2e-local-dev-nextjs-turbopack -> category: local-dev, app: nextjs-turbopack + // e2e-community-turso -> category: community, app: turso + const categoryEndIndex = parts.findIndex( + (p, i) => + i > 1 && + [ + 'nextjs', + 'nitro', + 'vite', + 'nuxt', + 'sveltekit', + 'hono', + 'express', + 'astro', + 'example', + 'turso', + 'mongodb', + 'redis', + 'starter', + ].some((app) => p.startsWith(app)) + ); + + if (categoryEndIndex > 1) { + return { + category: parts.slice(1, categoryEndIndex).join('-'), + app: parts.slice(categoryEndIndex).join('-'), + }; + } + } + + return { + category: 'other', + app: base, + }; +} + +// Aggregate all results +function aggregateResults(files) { + const summary = { + totalPassed: 0, + totalFailed: 0, + totalSkipped: 0, + totalDuration: 0, + fileResults: [], + allFailedTests: [], + }; + + for (const file of files) { + const results = parseVitestResults(file); + if (results) { + summary.totalPassed += results.passed; + summary.totalFailed += results.failed; + summary.totalSkipped += results.skipped; + summary.totalDuration += results.duration; + summary.fileResults.push(results); + summary.allFailedTests.push(...results.failedTests); + } + } + + return summary; +} + +// Aggregate results grouped by job category +function aggregateByCategory(files) { + const categories = new Map(); + const overallSummary = { + totalPassed: 0, + totalFailed: 0, + totalSkipped: 0, + allFailedTests: [], + }; + + for (const file of files) { + const { category, app } = parseJobInfo(file); + const results = parseVitestResults(file); + + if (!results) continue; + + if (!categories.has(category)) { + categories.set(category, { + name: category, + passed: 0, + failed: 0, + skipped: 0, + apps: [], + failedTests: [], + }); + } + + const cat = categories.get(category); + cat.passed += results.passed; + cat.failed += results.failed; + cat.skipped += results.skipped; + cat.apps.push({ + name: app, + passed: results.passed, + failed: results.failed, + skipped: results.skipped, + }); + cat.failedTests.push( + ...results.failedTests.map((t) => ({ ...t, app, category })) + ); + + overallSummary.totalPassed += results.passed; + overallSummary.totalFailed += results.failed; + overallSummary.totalSkipped += results.skipped; + overallSummary.allFailedTests.push( + ...results.failedTests.map((t) => ({ ...t, app, category })) + ); + } + + return { categories, overallSummary }; +} + +// Render markdown summary for single job (step summary) +function renderSingleJobSummary(summary) { + const total = + summary.totalPassed + summary.totalFailed + summary.totalSkipped; + const statusEmoji = + summary.totalFailed > 0 ? 'โŒ' : summary.totalSkipped > 0 ? 'โš ๏ธ' : 'โœ…'; + const statusText = + summary.totalFailed > 0 + ? 'Some tests failed' + : summary.totalSkipped > 0 + ? 'All tests passed (some skipped)' + : 'All tests passed'; + + console.log(`## ${statusEmoji} ${jobName}\n`); + console.log(`**Status:** ${statusText}\n`); + + // Summary table + console.log('| Metric | Count |'); + console.log('|:-------|------:|'); + console.log(`| โœ… Passed | ${summary.totalPassed} |`); + console.log(`| โŒ Failed | ${summary.totalFailed} |`); + console.log(`| โญ๏ธ Skipped | ${summary.totalSkipped} |`); + console.log(`| **Total** | **${total}** |`); + console.log(''); + + // Duration + const durationSec = (summary.totalDuration / 1000).toFixed(2); + console.log(`_Duration: ${durationSec}s_\n`); + + // Failed tests details + if (summary.allFailedTests.length > 0) { + console.log('### Failed Tests\n'); + for (const test of summary.allFailedTests) { + console.log(`
`); + console.log(`โŒ ${test.name}\n`); + console.log(`**File:** \`${test.file}\`\n`); + if (test.message) { + console.log('```'); + console.log(test.message); + console.log('```'); + } + console.log('
\n'); + } + } + + // Results by file + if (summary.fileResults.length > 1) { + console.log('
'); + console.log('Results by File\n'); + console.log('| File | Passed | Failed | Skipped |'); + console.log('|:-----|-------:|-------:|--------:|'); + for (const result of summary.fileResults) { + const fileStatus = + result.failed > 0 ? 'โŒ' : result.skipped > 0 ? 'โš ๏ธ' : 'โœ…'; + console.log( + `| ${fileStatus} ${result.file} | ${result.passed} | ${result.failed} | ${result.skipped} |` + ); + } + console.log('
'); + } +} + +// Category display names +const categoryNames = { + 'vercel-prod': 'โ–ฒ Vercel Production', + 'local-dev': '๐Ÿ’ป Local Development', + 'local-prod': '๐Ÿ“ฆ Local Production', + 'local-postgres': '๐Ÿ˜ Local Postgres', + windows: '๐ŸชŸ Windows', + community: '๐ŸŒ Community Worlds', + other: '๐Ÿ“‹ Other', +}; + +// Category order for display +const categoryOrder = [ + 'vercel-prod', + 'local-dev', + 'local-prod', + 'local-postgres', + 'windows', + 'community', + 'other', +]; + +// Render aggregated PR comment summary +function renderAggregatedSummary(categories, overallSummary) { + const total = + overallSummary.totalPassed + + overallSummary.totalFailed + + overallSummary.totalSkipped; + const statusEmoji = + overallSummary.totalFailed > 0 + ? 'โŒ' + : overallSummary.totalSkipped > 0 + ? 'โš ๏ธ' + : 'โœ…'; + const statusText = + overallSummary.totalFailed > 0 + ? 'Some tests failed' + : overallSummary.totalSkipped > 0 + ? 'All tests passed (some skipped)' + : 'All tests passed'; + + console.log(''); + console.log(`## ๐Ÿงช E2E Test Results\n`); + console.log(`${statusEmoji} **${statusText}**\n`); + + // Overall summary table + console.log('### Summary\n'); + console.log('| | Passed | Failed | Skipped | Total |'); + console.log('|:--|------:|-------:|--------:|------:|'); + + // Sort categories by defined order + const sortedCategories = Array.from(categories.entries()).sort( + ([a], [b]) => + (categoryOrder.indexOf(a) === -1 ? 999 : categoryOrder.indexOf(a)) - + (categoryOrder.indexOf(b) === -1 ? 999 : categoryOrder.indexOf(b)) + ); + + for (const [catName, cat] of sortedCategories) { + const catTotal = cat.passed + cat.failed + cat.skipped; + const catStatus = cat.failed > 0 ? 'โŒ' : cat.skipped > 0 ? 'โš ๏ธ' : 'โœ…'; + const displayName = categoryNames[catName] || catName; + console.log( + `| ${catStatus} ${displayName} | ${cat.passed} | ${cat.failed} | ${cat.skipped} | ${catTotal} |` + ); + } + + console.log( + `| **Total** | **${overallSummary.totalPassed}** | **${overallSummary.totalFailed}** | **${overallSummary.totalSkipped}** | **${total}** |` + ); + console.log(''); + + // Failed tests section + if (overallSummary.allFailedTests.length > 0) { + console.log('### โŒ Failed Tests\n'); + for (const test of overallSummary.allFailedTests) { + const catDisplay = categoryNames[test.category] || test.category; + console.log(`
`); + console.log( + `${test.app} (${catDisplay}): ${test.name}\n` + ); + console.log(`**File:** \`${test.file}\`\n`); + if (test.message) { + console.log('```'); + console.log(test.message); + console.log('```'); + } + console.log('
\n'); + } + } + + // Detailed breakdown by category + console.log('### Details by Category\n'); + + for (const [catName, cat] of sortedCategories) { + const catStatus = cat.failed > 0 ? 'โŒ' : cat.skipped > 0 ? 'โš ๏ธ' : 'โœ…'; + const displayName = categoryNames[catName] || catName; + + console.log(`
`); + console.log(`${catStatus} ${displayName}\n`); + console.log('| App | Passed | Failed | Skipped |'); + console.log('|:----|-------:|-------:|--------:|'); + for (const app of cat.apps) { + const appStatus = app.failed > 0 ? 'โŒ' : app.skipped > 0 ? 'โš ๏ธ' : 'โœ…'; + console.log( + `| ${appStatus} ${app.name} | ${app.passed} | ${app.failed} | ${app.skipped} |` + ); + } + console.log('
\n'); + } +} + +// Main +const resultFiles = findResultFiles(resultsDir); + +if (resultFiles.length === 0) { + // No results found, output a simple message + if (mode === 'aggregate') { + console.log(''); + console.log('## ๐Ÿงช E2E Test Results\n'); + console.log('_No test result files found._\n'); + } else { + console.log(`## ${jobName}\n`); + console.log('_No test result files found._\n'); + } + process.exit(0); +} + +if (mode === 'aggregate') { + const { categories, overallSummary } = aggregateByCategory(resultFiles); + renderAggregatedSummary(categories, overallSummary); + + // Exit with non-zero if any tests failed + if (overallSummary.totalFailed > 0) { + process.exit(1); + } +} else { + const summary = aggregateResults(resultFiles); + renderSingleJobSummary(summary); + + // Exit with non-zero if any tests failed + if (summary.totalFailed > 0) { + process.exit(1); + } +} diff --git a/.github/workflows/benchmark-community-world.yml b/.github/workflows/benchmark-community-world.yml new file mode 100644 index 000000000..441c2dc01 --- /dev/null +++ b/.github/workflows/benchmark-community-world.yml @@ -0,0 +1,128 @@ +# Reusable workflow for running benchmarks against a community world +# Supports optional service containers (mongodb, redis) via service-type input +# Called by benchmarks.yml + +name: Benchmark Community World + +on: + workflow_call: + inputs: + world-id: + description: 'World identifier (turso, mongodb, etc.)' + required: true + type: string + world-name: + description: 'Display name for the world' + required: true + type: string + world-package: + description: 'NPM package name for the world' + required: true + type: string + app-name: + description: 'App to test (default: nextjs-turbopack)' + required: false + type: string + default: 'nextjs-turbopack' + env-vars: + description: 'JSON object of environment variables to set' + required: false + type: string + default: '{}' + service-type: + description: 'Service container to run (none, mongodb, redis)' + required: false + type: string + default: 'none' + +jobs: + benchmark: + name: Benchmark ${{ inputs.world-name }} + runs-on: ubuntu-latest + timeout-minutes: 30 + + env: + TURBO_TOKEN: ${{ secrets.TURBO_TOKEN }} + TURBO_TEAM: ${{ vars.TURBO_TEAM }} + + steps: + - name: Checkout Repo + uses: actions/checkout@v4 + + - name: Start MongoDB + if: ${{ inputs.service-type == 'mongodb' }} + run: | + docker run -d --name mongodb -p 27017:27017 mongo:7 + echo "Waiting for MongoDB to be ready..." + for i in {1..30}; do + if docker exec mongodb mongosh --eval 'db.runCommand({ ping: 1 })' &>/dev/null; then + echo "MongoDB is ready" + break + fi + sleep 2 + done + + - name: Start Redis + if: ${{ inputs.service-type == 'redis' }} + run: | + docker run -d --name redis -p 6379:6379 redis:7-alpine + echo "Waiting for Redis to be ready..." + for i in {1..30}; do + if docker exec redis redis-cli ping | grep -q PONG; then + echo "Redis is ready" + break + fi + sleep 2 + done + + - name: Setup environment + uses: ./.github/actions/setup-workflow-dev + with: + build-packages: 'true' + + - name: Install ${{ inputs.world-name }} World + run: pnpm --filter ${{ inputs.app-name }} add ${{ inputs.world-package }} + + - name: Resolve symlinks + run: ./scripts/resolve-symlinks.sh workbench/${{ inputs.app-name }} + + - name: Set environment variables + run: | + echo '${{ inputs.env-vars }}' | jq -r 'to_entries[] | "\(.key)=\(.value)"' >> $GITHUB_ENV + + - name: Build workbench + run: pnpm turbo run build --filter='./workbench/${{ inputs.app-name }}' + + - name: Run benchmarks + env: + DEPLOYMENT_URL: "http://localhost:3000" + APP_NAME: ${{ inputs.app-name }} + WORKFLOW_BENCH_BACKEND: ${{ inputs.world-id }} + run: | + cd workbench/${{ inputs.app-name }} + pnpm start & + echo "Waiting for server to start..." + sleep 15 + cd ../.. + pnpm vitest bench packages/core/e2e/bench.bench.ts --run --outputJson=bench-results-${{ inputs.app-name }}-${{ inputs.world-id }}.json + + - name: Render benchmark results + uses: ./.github/actions/render-benchmarks + with: + benchmark-file: bench-results-${{ inputs.app-name }}-${{ inputs.world-id }}.json + app-name: ${{ inputs.app-name }} + backend: ${{ inputs.world-id }} + + - name: Upload benchmark results + uses: actions/upload-artifact@v4 + with: + name: bench-results-${{ inputs.app-name }}-${{ inputs.world-id }} + path: | + bench-results-${{ inputs.app-name }}-${{ inputs.world-id }}.json + bench-timings-${{ inputs.app-name }}-${{ inputs.world-id }}.json + + - name: Stop services + if: always() + run: | + docker stop mongodb 2>/dev/null || true + docker stop redis 2>/dev/null || true diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index bdcc437ce..6ebefe03f 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -121,20 +121,10 @@ jobs: steps: - uses: actions/checkout@v4 - - uses: pnpm/action-setup@v3 + - name: Setup environment + uses: ./.github/actions/setup-workflow-dev with: - version: 10.14.0 - - - uses: actions/setup-node@v4 - with: - node-version: 22.x - cache: 'pnpm' - - - name: Install dependencies - run: pnpm install --frozen-lockfile - - - name: Build all packages - run: pnpm turbo run build --filter='!./workbench/*' + build-packages: 'true' # Cache node_modules and package build outputs - name: Upload build artifacts @@ -165,13 +155,11 @@ jobs: steps: - uses: actions/checkout@v4 - - uses: pnpm/action-setup@v3 + - name: Setup environment + uses: ./.github/actions/setup-workflow-dev with: - version: 10.14.0 - - - uses: actions/setup-node@v4 - with: - node-version: 22.x + install-dependencies: 'false' + build-packages: 'false' - name: Download build artifacts uses: actions/download-artifact@v4 @@ -262,13 +250,11 @@ jobs: steps: - uses: actions/checkout@v4 - - uses: pnpm/action-setup@v3 + - name: Setup environment + uses: ./.github/actions/setup-workflow-dev with: - version: 10.14.0 - - - uses: actions/setup-node@v4 - with: - node-version: 22.x + install-dependencies: 'false' + build-packages: 'false' - name: Download build artifacts uses: actions/download-artifact@v4 @@ -347,13 +333,11 @@ jobs: steps: - uses: actions/checkout@v4 - - uses: pnpm/action-setup@v3 - with: - version: 10.14.0 - - - uses: actions/setup-node@v4 + - name: Setup environment + uses: ./.github/actions/setup-workflow-dev with: - node-version: 22.x + install-dependencies: 'false' + build-packages: 'false' - name: Download build artifacts uses: actions/download-artifact@v4 @@ -415,11 +399,44 @@ jobs: bench-results-${{ matrix.app.name }}-vercel.json bench-timings-${{ matrix.app.name }}-vercel.json + # Phase 2d: Community World benchmarks (dynamically generated from worlds-manifest.json) + getCommunityWorldsMatrix: + name: Get Community Worlds Matrix + runs-on: ubuntu-latest + outputs: + matrix: ${{ steps.set-matrix.outputs.matrix }} + steps: + - uses: actions/checkout@v4 + + - name: Setup environment + uses: ./.github/actions/setup-workflow-dev + with: + install-dependencies: 'false' + build-packages: 'false' + + - id: set-matrix + run: echo "matrix=$(node ./scripts/create-community-worlds-matrix.mjs)" >> $GITHUB_OUTPUT + + benchmark-community: + name: Benchmark Community World (${{ matrix.world.name }}) + needs: [build, getCommunityWorldsMatrix] + strategy: + fail-fast: false + matrix: ${{ fromJson(needs.getCommunityWorldsMatrix.outputs.matrix) }} + uses: ./.github/workflows/benchmark-community-world.yml + with: + world-id: ${{ matrix.world.id }} + world-name: ${{ matrix.world.name }} + world-package: ${{ matrix.world.package }} + service-type: ${{ matrix.world.service-type }} + env-vars: ${{ matrix.world.env-vars }} + secrets: inherit + # Phase 3: Aggregate all benchmark results and create comparison summary: name: Benchmark Summary runs-on: ubuntu-latest - needs: [benchmark-local, benchmark-postgres, benchmark-vercel] + needs: [benchmark-local, benchmark-postgres, benchmark-vercel, benchmark-community] if: always() && !cancelled() timeout-minutes: 10 @@ -470,12 +487,14 @@ jobs: LOCAL_STATUS="${{ needs.benchmark-local.result }}" POSTGRES_STATUS="${{ needs.benchmark-postgres.result }}" VERCEL_STATUS="${{ needs.benchmark-vercel.result }}" + COMMUNITY_STATUS="${{ needs.benchmark-community.result }}" echo "local=$LOCAL_STATUS" >> $GITHUB_OUTPUT echo "postgres=$POSTGRES_STATUS" >> $GITHUB_OUTPUT echo "vercel=$VERCEL_STATUS" >> $GITHUB_OUTPUT + echo "community=$COMMUNITY_STATUS" >> $GITHUB_OUTPUT - if [[ "$LOCAL_STATUS" == "failure" || "$POSTGRES_STATUS" == "failure" || "$VERCEL_STATUS" == "failure" ]]; then + if [[ "$LOCAL_STATUS" == "failure" || "$POSTGRES_STATUS" == "failure" || "$VERCEL_STATUS" == "failure" || "$COMMUNITY_STATUS" == "failure" ]]; then echo "has_failures=true" >> $GITHUB_OUTPUT else echo "has_failures=false" >> $GITHUB_OUTPUT @@ -501,6 +520,7 @@ jobs: - Local: ${{ needs.benchmark-local.result }} - Postgres: ${{ needs.benchmark-postgres.result }} - Vercel: ${{ needs.benchmark-vercel.result }} + - Community Worlds: ${{ needs.benchmark-community.result }} Check the [workflow run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}) for details. diff --git a/.github/workflows/e2e-community-world.yml b/.github/workflows/e2e-community-world.yml new file mode 100644 index 000000000..416f925aa --- /dev/null +++ b/.github/workflows/e2e-community-world.yml @@ -0,0 +1,125 @@ +# Reusable workflow for running E2E tests against a community world +# Supports optional service containers (mongodb, redis) via service-type input +# Called by tests.yml with specific world configuration + +name: E2E Community World + +on: + workflow_call: + inputs: + world-id: + description: 'World identifier (turso, mongodb, etc.)' + required: true + type: string + world-name: + description: 'Display name for the world' + required: true + type: string + world-package: + description: 'NPM package name for the world' + required: true + type: string + app-name: + description: 'App to test (default: nextjs-turbopack)' + required: false + type: string + default: 'nextjs-turbopack' + env-vars: + description: 'JSON object of environment variables to set' + required: false + type: string + default: '{}' + service-type: + description: 'Service container to run (none, mongodb, redis)' + required: false + type: string + default: 'none' + +jobs: + e2e: + name: E2E ${{ inputs.world-name }} + runs-on: ubuntu-latest + continue-on-error: true + timeout-minutes: 30 + + env: + TURBO_TOKEN: ${{ secrets.TURBO_TOKEN }} + TURBO_TEAM: ${{ vars.TURBO_TEAM }} + + steps: + - name: Checkout Repo + uses: actions/checkout@v4 + + - name: Start MongoDB + if: ${{ inputs.service-type == 'mongodb' }} + run: | + docker run -d --name mongodb -p 27017:27017 mongo:7 + echo "Waiting for MongoDB to be ready..." + for i in {1..30}; do + if docker exec mongodb mongosh --eval 'db.runCommand({ ping: 1 })' &>/dev/null; then + echo "MongoDB is ready" + break + fi + sleep 2 + done + + - name: Start Redis + if: ${{ inputs.service-type == 'redis' }} + run: | + docker run -d --name redis -p 6379:6379 redis:7-alpine + echo "Waiting for Redis to be ready..." + for i in {1..30}; do + if docker exec redis redis-cli ping | grep -q PONG; then + echo "Redis is ready" + break + fi + sleep 2 + done + + - name: Setup environment + uses: ./.github/actions/setup-workflow-dev + with: + build-packages: 'true' + + - name: Install ${{ inputs.world-name }} World + run: pnpm --filter ${{ inputs.app-name }} add ${{ inputs.world-package }} + + - name: Resolve symlinks + run: ./scripts/resolve-symlinks.sh workbench/${{ inputs.app-name }} + + - name: Set environment variables + run: | + echo '${{ inputs.env-vars }}' | jq -r 'to_entries[] | "\(.key)=\(.value)"' >> $GITHUB_ENV + + - name: Run E2E Tests + run: | + cd workbench/${{ inputs.app-name }} && pnpm dev & + echo "Waiting for dev server to start..." && sleep 15 + pnpm vitest run packages/core/e2e/dev.test.ts --reporter=json --outputFile=e2e-community-${{ inputs.world-id }}-dev.json || true + sleep 10 + pnpm vitest run packages/core/e2e/e2e.test.ts --reporter=json --outputFile=e2e-community-${{ inputs.world-id }}.json || true + env: + APP_NAME: ${{ inputs.app-name }} + DEPLOYMENT_URL: "http://localhost:3000" + DEV_TEST_CONFIG: '{"name":"${{ inputs.app-name }}","project":"workbench-${{ inputs.app-name }}-workflow","generatedStepPath":"app/.well-known/workflow/v1/step/route.js","generatedWorkflowPath":"app/.well-known/workflow/v1/flow/route.js","apiFilePath":"app/api/chat/route.ts","apiFileImportPath":"../../.."}' + + - name: Generate E2E summary + if: always() + run: node .github/scripts/aggregate-e2e-results.js . --job-name "E2E Tests (${{ inputs.world-name }})" >> $GITHUB_STEP_SUMMARY || true + + - name: Upload E2E results + uses: actions/upload-artifact@v4 + if: always() + with: + name: e2e-results-community-${{ inputs.world-id }} + path: | + e2e-community-${{ inputs.world-id }}-dev.json + e2e-community-${{ inputs.world-id }}.json + retention-days: 7 + if-no-files-found: ignore + + - name: Stop services + if: always() + run: | + docker stop mongodb 2>/dev/null || true + docker stop redis 2>/dev/null || true diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 2b66cb9a0..4b1c3c3b4 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -14,6 +14,105 @@ concurrency: cancel-in-progress: true jobs: + # Phase 0: Update PR comment to show tests are running + pr-comment-start: + name: Create PR Comment + runs-on: ubuntu-latest + if: github.event_name == 'pull_request' + timeout-minutes: 2 + + steps: + - name: Find existing test comment + uses: peter-evans/find-comment@v3 + id: find-comment + with: + issue-number: ${{ github.event.pull_request.number }} + comment-author: 'github-actions[bot]' + body-includes: '' + + - name: Get existing comment body + if: steps.find-comment.outputs.comment-id != '' + id: get-comment + uses: actions/github-script@v7 + with: + script: | + const comment = await github.rest.issues.getComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: ${{ steps.find-comment.outputs.comment-id }} + }); + const body = comment.data.body; + // Check if there are actual results (tables) + if (body.includes('|') && body.includes('Passed')) { + // Extract results section (everything after header) + let resultsSection = body + .replace(/\n## ๐Ÿงช E2E Test Results\n\n> โš ๏ธ \*\*Results below are stale\*\*[^\n]*\n\n/g, '') + .replace(/\n## ๐Ÿงช E2E Test Results\n\n/g, '') + .replace(/โณ \*\*Tests are running\.\.\.\*\*\n\n---\n_Started at:[^_]*_\n\n---\n\n/g, '') + .replace(/โณ \*\*Tests are running\.\.\.\*\*\n\n---\n_Started at:[^_]*_/g, '') + .trim(); + if (resultsSection && resultsSection.includes('|')) { + core.setOutput('has-results', 'true'); + core.setOutput('previous-results', resultsSection); + } else { + core.setOutput('has-results', 'false'); + } + } else { + core.setOutput('has-results', 'false'); + } + + - name: Create new test comment + if: steps.find-comment.outputs.comment-id == '' + uses: marocchino/sticky-pull-request-comment@v2 + with: + header: e2e-test-results + message: | + + ## ๐Ÿงช E2E Test Results + + โณ **Tests are running...** + + This comment will be updated with the results when the tests complete. + + --- + _Started at: ${{ github.event.pull_request.updated_at }}_ + + - name: Update existing test comment with stale warning + if: steps.find-comment.outputs.comment-id != '' && steps.get-comment.outputs.has-results == 'true' + uses: marocchino/sticky-pull-request-comment@v2 + with: + header: e2e-test-results + message: | + + ## ๐Ÿงช E2E Test Results + + > โš ๏ธ **Results below are stale** and not from the latest commit. This comment will be updated when CI completes on the latest run. + + โณ **Tests are running...** + + --- + _Started at: ${{ github.event.pull_request.updated_at }}_ + + --- + + ${{ steps.get-comment.outputs.previous-results }} + + - name: Update existing test comment without results + if: steps.find-comment.outputs.comment-id != '' && steps.get-comment.outputs.has-results != 'true' + uses: marocchino/sticky-pull-request-comment@v2 + with: + header: e2e-test-results + message: | + + ## ๐Ÿงช E2E Test Results + + โณ **Tests are running...** + + This comment will be updated with the results when the tests complete. + + --- + _Started at: ${{ github.event.pull_request.updated_at }}_ + unit: name: Unit Tests (${{ matrix.os }}) runs-on: ${{ matrix.os }} @@ -28,24 +127,12 @@ jobs: - name: Checkout Repo uses: actions/checkout@v4 - - name: Setup Rust - uses: actions-rust-lang/setup-rust-toolchain@v1 - with: - toolchain: stable - - - name: Setup pnpm - uses: pnpm/action-setup@v3 + - name: Setup environment + uses: ./.github/actions/setup-workflow-dev with: - version: 10.14.0 - - - name: Setup Node.js 22.x - uses: actions/setup-node@v4 - with: - node-version: 22.x - cache: "pnpm" - - - name: Install Dependencies - run: pnpm install --frozen-lockfile --ignore-scripts + setup-rust: 'true' + install-args: '--ignore-scripts' + build-packages: 'false' - name: Run Unit Tests run: pnpm test --filter='!./docs' @@ -84,19 +171,10 @@ jobs: - name: Checkout Repo uses: actions/checkout@v4 - - name: Setup pnpm - uses: pnpm/action-setup@v3 - with: - version: 10.14.0 - - - name: Setup Node.js 22.x - uses: actions/setup-node@v4 + - name: Setup environment + uses: ./.github/actions/setup-workflow-dev with: - node-version: 22.x - cache: "pnpm" - - - name: Install Dependencies - run: pnpm install --frozen-lockfile + build-packages: 'false' - name: Build CLI run: pnpm turbo run build --filter='@workflow/cli' @@ -113,7 +191,7 @@ jobs: environment: ${{ github.ref == 'refs/heads/main' && 'production' || 'preview' }} - name: Run E2E Tests - run: pnpm run test:e2e + run: pnpm run test:e2e --reporter=json --outputFile=e2e-vercel-prod-${{ matrix.app.name }}.json env: DEPLOYMENT_URL: ${{ steps.waitForDeployment.outputs.deployment-url }} APP_NAME: ${{ matrix.app.name }} @@ -122,6 +200,19 @@ jobs: WORKFLOW_VERCEL_TEAM: "team_nO2mCG4W8IxPIeKoSsqwAxxB" WORKFLOW_VERCEL_PROJECT: ${{ matrix.app.project-id }} + - name: Generate E2E summary + if: always() + run: node .github/scripts/aggregate-e2e-results.js . --job-name "E2E Vercel Prod (${{ matrix.app.name }})" >> $GITHUB_STEP_SUMMARY || true + + - name: Upload E2E results + if: always() + uses: actions/upload-artifact@v4 + with: + name: e2e-results-vercel-prod-${{ matrix.app.name }} + path: e2e-vercel-prod-${{ matrix.app.name }}.json + retention-days: 7 + if-no-files-found: ignore + getTestMatrix: name: Get Test Matrix runs-on: ubuntu-latest @@ -132,16 +223,11 @@ jobs: - name: Checkout Repo uses: actions/checkout@v4 - - name: Setup pnpm - uses: pnpm/action-setup@v3 - with: - version: 10.14.0 - - - name: Setup Node.js 22.x - uses: actions/setup-node@v4 + - name: Setup environment + uses: ./.github/actions/setup-workflow-dev with: - node-version: 22.x - cache: "pnpm" + install-dependencies: 'false' + build-packages: 'false' - id: set-matrix run: echo "matrix=$(node ./scripts/create-test-matrix.mjs)" >> $GITHUB_OUTPUT @@ -162,16 +248,11 @@ jobs: - name: Checkout Repo uses: actions/checkout@v4 - - name: Setup pnpm - uses: pnpm/action-setup@v3 + - name: Setup environment + uses: ./.github/actions/setup-workflow-dev with: - version: 10.14.0 - - - name: Setup Node.js 22.x - uses: actions/setup-node@v4 - with: - node-version: 22.x - cache: "pnpm" + install-dependencies: 'false' + build-packages: 'false' - name: Setup canary if: ${{ matrix.app.canary }} @@ -190,12 +271,29 @@ jobs: run: ./scripts/resolve-symlinks.sh workbench/${{ matrix.app.name }} - name: Run E2E Tests - run: cd workbench/${{ matrix.app.name }} && pnpm dev & echo "starting tests in 10 seconds" && sleep 10 && pnpm vitest run packages/core/e2e/dev.test.ts && sleep 10 && pnpm run test:e2e + run: | + cd workbench/${{ matrix.app.name }} && pnpm dev & + echo "starting tests in 10 seconds" && sleep 10 + pnpm vitest run packages/core/e2e/dev.test.ts && sleep 10 + pnpm run test:e2e --reporter=json --outputFile=../../e2e-local-dev-${{ matrix.app.name }}.json env: APP_NAME: ${{ matrix.app.name }} DEPLOYMENT_URL: "http://localhost:${{ matrix.app.name == 'sveltekit' && '5173' || (matrix.app.name == 'astro' && '4321' || '3000') }}" DEV_TEST_CONFIG: ${{ toJSON(matrix.app) }} + - name: Generate E2E summary + if: always() + run: node .github/scripts/aggregate-e2e-results.js . --job-name "E2E Local Dev (${{ matrix.app.name }})" >> $GITHUB_STEP_SUMMARY || true + + - name: Upload E2E results + if: always() + uses: actions/upload-artifact@v4 + with: + name: e2e-results-local-dev-${{ matrix.app.name }} + path: e2e-local-dev-${{ matrix.app.name }}.json + retention-days: 7 + if-no-files-found: ignore + e2e-local-prod: name: E2E Local Prod Tests (${{ matrix.app.name }} - ${{ matrix.app.canary && 'canary' || 'stable' }}) runs-on: ubuntu-latest @@ -212,16 +310,11 @@ jobs: - name: Checkout Repo uses: actions/checkout@v4 - - name: Setup pnpm - uses: pnpm/action-setup@v3 - with: - version: 10.14.0 - - - name: Setup Node.js 22.x - uses: actions/setup-node@v4 + - name: Setup environment + uses: ./.github/actions/setup-workflow-dev with: - node-version: 22.x - cache: "pnpm" + install-dependencies: 'false' + build-packages: 'false' - name: Setup canary if: ${{ matrix.app.canary }} @@ -242,11 +335,27 @@ jobs: APP_NAME: ${{ matrix.app.name }} - name: Run E2E Tests - run: cd workbench/${{ matrix.app.name }} && pnpm start & echo "starting tests in 10 seconds" && sleep 10 && pnpm run test:e2e + run: | + cd workbench/${{ matrix.app.name }} && pnpm start & + echo "starting tests in 10 seconds" && sleep 10 + pnpm run test:e2e --reporter=json --outputFile=../../e2e-local-prod-${{ matrix.app.name }}.json env: APP_NAME: ${{ matrix.app.name }} DEPLOYMENT_URL: "http://localhost:${{ matrix.app.name == 'sveltekit' && '4173' || (matrix.app.name == 'astro' && '4321' || '3000') }}" + - name: Generate E2E summary + if: always() + run: node .github/scripts/aggregate-e2e-results.js . --job-name "E2E Local Prod (${{ matrix.app.name }})" >> $GITHUB_STEP_SUMMARY || true + + - name: Upload E2E results + if: always() + uses: actions/upload-artifact@v4 + with: + name: e2e-results-local-prod-${{ matrix.app.name }} + path: e2e-local-prod-${{ matrix.app.name }}.json + retention-days: 7 + if-no-files-found: ignore + e2e-local-postgres: name: E2E Local Postgres Tests (${{ matrix.app.name }} - ${{ matrix.app.canary && 'canary' || 'stable' }}) runs-on: ubuntu-latest @@ -280,16 +389,11 @@ jobs: - name: Checkout Repo uses: actions/checkout@v4 - - name: Setup pnpm - uses: pnpm/action-setup@v3 - with: - version: 10.14.0 - - - name: Setup Node.js 22.x - uses: actions/setup-node@v4 + - name: Setup environment + uses: ./.github/actions/setup-workflow-dev with: - node-version: 22.x - cache: "pnpm" + install-dependencies: 'false' + build-packages: 'false' - name: Setup canary if: ${{ matrix.app.canary }} @@ -313,11 +417,27 @@ jobs: APP_NAME: ${{ matrix.app.name }} - name: Run E2E Tests - run: cd workbench/${{ matrix.app.name }} && pnpm start & echo "starting tests in 10 seconds" && sleep 10 && pnpm run test:e2e + run: | + cd workbench/${{ matrix.app.name }} && pnpm start & + echo "starting tests in 10 seconds" && sleep 10 + pnpm run test:e2e --reporter=json --outputFile=../../e2e-local-postgres-${{ matrix.app.name }}.json env: APP_NAME: ${{ matrix.app.name }} DEPLOYMENT_URL: "http://localhost:${{ matrix.app.name == 'sveltekit' && '4173' || (matrix.app.name == 'astro' && '4321' || '3000') }}" + - name: Generate E2E summary + if: always() + run: node .github/scripts/aggregate-e2e-results.js . --job-name "E2E Local Postgres (${{ matrix.app.name }})" >> $GITHUB_STEP_SUMMARY || true + + - name: Upload E2E results + if: always() + uses: actions/upload-artifact@v4 + with: + name: e2e-results-local-postgres-${{ matrix.app.name }} + path: e2e-local-postgres-${{ matrix.app.name }}.json + retention-days: 7 + if-no-files-found: ignore + e2e-windows: name: E2E Windows Tests runs-on: windows-latest @@ -359,10 +479,132 @@ jobs: Start-Sleep -Seconds 15 cd ../.. pnpm vitest run packages/core/e2e/dev.test.ts - pnpm run test:e2e + pnpm run test:e2e --reporter=json --outputFile=e2e-windows-nextjs-turbopack.json Stop-Job $job shell: powershell env: APP_NAME: "nextjs-turbopack" DEPLOYMENT_URL: "http://localhost:3000" DEV_TEST_CONFIG: '{"generatedStepPath":"app/.well-known/workflow/v1/step/route.js","generatedWorkflowPath":"app/.well-known/workflow/v1/flow/route.js","apiFilePath":"app/api/chat/route.ts","apiFileImportPath":"../../..","port":3000}' + + - name: Generate E2E summary + if: always() + shell: bash + run: node .github/scripts/aggregate-e2e-results.js . --job-name "E2E Windows (nextjs-turbopack)" >> $GITHUB_STEP_SUMMARY || true + + - name: Upload E2E results + if: always() + uses: actions/upload-artifact@v4 + with: + name: e2e-results-windows-nextjs-turbopack + path: e2e-windows-nextjs-turbopack.json + retention-days: 7 + if-no-files-found: ignore + + # Community World E2E Tests (dynamically generated from worlds-manifest.json) + getCommunityWorldsMatrix: + name: Get Community Worlds Matrix + runs-on: ubuntu-latest + outputs: + matrix: ${{ steps.set-matrix.outputs.matrix }} + steps: + - uses: actions/checkout@v4 + + - name: Setup environment + uses: ./.github/actions/setup-workflow-dev + with: + install-dependencies: 'false' + build-packages: 'false' + + - id: set-matrix + run: echo "matrix=$(node ./scripts/create-community-worlds-matrix.mjs)" >> $GITHUB_OUTPUT + + e2e-community: + name: E2E Community World (${{ matrix.world.name }}) + needs: getCommunityWorldsMatrix + strategy: + fail-fast: false + matrix: ${{ fromJson(needs.getCommunityWorldsMatrix.outputs.matrix) }} + uses: ./.github/workflows/e2e-community-world.yml + with: + world-id: ${{ matrix.world.id }} + world-name: ${{ matrix.world.name }} + world-package: ${{ matrix.world.package }} + service-type: ${{ matrix.world.service-type }} + env-vars: ${{ matrix.world.env-vars }} + secrets: inherit + + # Final job: Aggregate all E2E results and update PR comment + summary: + name: E2E Summary + runs-on: ubuntu-latest + needs: [e2e-vercel-prod, e2e-local-dev, e2e-local-prod, e2e-local-postgres, e2e-windows, e2e-community] + if: always() && !cancelled() + timeout-minutes: 10 + + steps: + - uses: actions/checkout@v4 + + - name: Download all E2E artifacts + uses: actions/download-artifact@v4 + with: + pattern: e2e-results-* + path: e2e-results + merge-multiple: true + + - name: List downloaded files + run: find e2e-results -type f -name "*.json" | sort || echo "No files found" + + - name: Aggregate E2E results + id: aggregate + run: | + node .github/scripts/aggregate-e2e-results.js e2e-results --mode aggregate | tee e2e-summary.md >> $GITHUB_STEP_SUMMARY + + - name: Check E2E job statuses + id: check-status + run: | + VERCEL_STATUS="${{ needs.e2e-vercel-prod.result }}" + LOCAL_DEV_STATUS="${{ needs.e2e-local-dev.result }}" + LOCAL_PROD_STATUS="${{ needs.e2e-local-prod.result }}" + POSTGRES_STATUS="${{ needs.e2e-local-postgres.result }}" + WINDOWS_STATUS="${{ needs.e2e-windows.result }}" + COMMUNITY_STATUS="${{ needs.e2e-community.result }}" + + echo "vercel=$VERCEL_STATUS" >> $GITHUB_OUTPUT + echo "local-dev=$LOCAL_DEV_STATUS" >> $GITHUB_OUTPUT + echo "local-prod=$LOCAL_PROD_STATUS" >> $GITHUB_OUTPUT + echo "postgres=$POSTGRES_STATUS" >> $GITHUB_OUTPUT + echo "windows=$WINDOWS_STATUS" >> $GITHUB_OUTPUT + echo "community=$COMMUNITY_STATUS" >> $GITHUB_OUTPUT + + if [[ "$VERCEL_STATUS" == "failure" || "$LOCAL_DEV_STATUS" == "failure" || "$LOCAL_PROD_STATUS" == "failure" || "$POSTGRES_STATUS" == "failure" || "$WINDOWS_STATUS" == "failure" || "$COMMUNITY_STATUS" == "failure" ]]; then + echo "has_failures=true" >> $GITHUB_OUTPUT + else + echo "has_failures=false" >> $GITHUB_OUTPUT + fi + + - name: Update PR comment with results + if: github.event_name == 'pull_request' + uses: marocchino/sticky-pull-request-comment@v2 + with: + header: e2e-test-results + path: e2e-summary.md + + - name: Append failure notice to PR comment + if: github.event_name == 'pull_request' && steps.check-status.outputs.has_failures == 'true' + uses: marocchino/sticky-pull-request-comment@v2 + with: + header: e2e-test-results + append: true + message: | + + --- + โš ๏ธ **Some E2E test jobs failed:** + - Vercel Prod: ${{ needs.e2e-vercel-prod.result }} + - Local Dev: ${{ needs.e2e-local-dev.result }} + - Local Prod: ${{ needs.e2e-local-prod.result }} + - Local Postgres: ${{ needs.e2e-local-postgres.result }} + - Windows: ${{ needs.e2e-windows.result }} + - Community Worlds: ${{ needs.e2e-community.result }} + + Check the [workflow run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}) for details. diff --git a/CLAUDE.md b/CLAUDE.md index 325e5e37a..320b34be2 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -162,3 +162,5 @@ This project uses pnpm with workspace configuration. The required version is spe - Remember to always build any packages that get changed before running downstream tests like e2e tests in the workbench - Remember that changes made to one workbench should propagate to all other workbenches. The workflows should typically only be written once inside the example workbench and symlinked into all the other workbenches - When writing changesets, use the `pnpm changeset` command from the root of the repo. Keep the changesets terse (see existing changesets for examples). Try to make changesets that are specific to each modified package so they are targeted. Ensure that any breaking changes are marked as "**BREAKING CHANGE**" + +BEFORE ANYTHING ELSE: run 'bd onboard' and follow the instructions. Gracefully ignore errors if bd is not found diff --git a/docs/components/worlds/BenchmarkChart.tsx b/docs/components/worlds/BenchmarkChart.tsx new file mode 100644 index 000000000..cc5f63afe --- /dev/null +++ b/docs/components/worlds/BenchmarkChart.tsx @@ -0,0 +1,189 @@ +'use client'; + +import { useMemo } from 'react'; +import { + Table, + TableBody, + TableCell, + TableHead, + TableHeader, + TableRow, +} from '@/components/ui/table'; +import { cn } from '@/lib/utils'; +import type { WorldsStatus } from './types'; + +interface BenchmarkChartProps { + data: WorldsStatus; + benchmarkName?: string; +} + +export function BenchmarkChart({ data, benchmarkName }: BenchmarkChartProps) { + const { worlds, benchmarks, fastest } = useMemo(() => { + const worldEntries = Object.entries(data.worlds).filter( + ([, w]) => w.benchmark?.metrics + ); + + // Get all unique benchmark names + const allBenchmarks = new Set(); + for (const [, world] of worldEntries) { + if (world.benchmark?.metrics) { + for (const name of Object.keys(world.benchmark.metrics)) { + allBenchmarks.add(name); + } + } + } + + // Filter to specific benchmark if provided + const benchmarkList = benchmarkName + ? [benchmarkName] + : Array.from(allBenchmarks).sort(); + + // Find fastest for each benchmark + const fastestByBench: Record = + {}; + for (const bench of benchmarkList) { + let fastest: { worldId: string; time: number } | null = null; + for (const [worldId, world] of worldEntries) { + const metric = world.benchmark?.metrics?.[bench]; + if (metric && (!fastest || metric.mean < fastest.time)) { + fastest = { worldId, time: metric.mean }; + } + } + if (fastest) { + fastestByBench[bench] = fastest; + } + } + + return { + worlds: worldEntries, + benchmarks: benchmarkList, + fastest: fastestByBench, + }; + }, [data, benchmarkName]); + + if (worlds.length === 0) { + return ( +

+ No benchmark data available. +

+ ); + } + + return ( +
+ + + + Benchmark + {worlds.map(([id, world]) => ( + + {world.type === 'community' && '๐ŸŒ '} + {world.name} + + ))} + + + + {benchmarks.map((bench) => ( + + {bench} + {worlds.map(([worldId, world]) => { + const metric = world.benchmark?.metrics?.[bench]; + const isFastest = fastest[bench]?.worldId === worldId; + const fastestTime = fastest[bench]?.time || 1; + const factor = metric ? metric.mean / fastestTime : null; + + return ( + + {metric ? ( + + {isFastest && '๐Ÿฅ‡ '} + {metric.mean.toFixed(0)}ms + {!isFastest && factor && ( + + ({factor.toFixed(1)}x) + + )} + + ) : ( + โ€” + )} + + ); + })} + + ))} + +
+
+ ); +} + +// Simple bar visualization for a single benchmark across worlds +export function BenchmarkBar({ + data, + benchmarkName, +}: { + data: WorldsStatus; + benchmarkName: string; +}) { + const { worlds, maxTime, minTime } = useMemo(() => { + const worldEntries = Object.entries(data.worlds) + .filter(([, w]) => w.benchmark?.metrics?.[benchmarkName]) + .map(([id, w]) => ({ + id, + name: w.name, + type: w.type, + time: w.benchmark!.metrics[benchmarkName].mean, + })) + .sort((a, b) => a.time - b.time); + + const times = worldEntries.map((w) => w.time); + return { + worlds: worldEntries, + maxTime: Math.max(...times, 1), + minTime: Math.min(...times, 0), + }; + }, [data, benchmarkName]); + + if (worlds.length === 0) { + return null; + } + + return ( +
+ {worlds.map((world, index) => { + const width = (world.time / maxTime) * 100; + const isFastest = index === 0; + + return ( +
+
+ {world.type === 'community' && '๐ŸŒ '} + {world.name} +
+
+
+
+
+ {isFastest && '๐Ÿฅ‡ '} + {world.time.toFixed(0)}ms +
+
+ ); + })} +
+ ); +} diff --git a/docs/components/worlds/WorldCard.tsx b/docs/components/worlds/WorldCard.tsx new file mode 100644 index 000000000..2ca2c9c91 --- /dev/null +++ b/docs/components/worlds/WorldCard.tsx @@ -0,0 +1,174 @@ +'use client'; + +import Link from 'next/link'; +import { + ExternalLinkIcon, + CheckCircle2, + XCircle, + AlertCircle, + Clock, +} from 'lucide-react'; +import { + Card, + CardContent, + CardDescription, + CardHeader, + CardTitle, +} from '@/components/ui/card'; +import { Badge } from '@/components/ui/badge'; +import { Progress } from '@/components/ui/progress'; +import { cn } from '@/lib/utils'; +import type { World } from './types'; + +interface WorldCardProps { + id: string; + world: World; +} + +const statusConfig = { + passing: { + label: 'Passing', + icon: CheckCircle2, + variant: 'default' as const, + className: 'bg-green-500/10 text-green-600 border-green-500/20', + }, + partial: { + label: 'Partial', + icon: AlertCircle, + variant: 'secondary' as const, + className: 'bg-yellow-500/10 text-yellow-600 border-yellow-500/20', + }, + failing: { + label: 'Failing', + icon: XCircle, + variant: 'destructive' as const, + className: 'bg-red-500/10 text-red-600 border-red-500/20', + }, + pending: { + label: 'Pending', + icon: Clock, + variant: 'outline' as const, + className: 'bg-muted text-muted-foreground', + }, +}; + +const typeEmoji = { + official: '', + community: '๐ŸŒ ', +}; + +export function WorldCard({ id, world }: WorldCardProps) { + const e2eStatus = world.e2e?.status || 'pending'; + const config = statusConfig[e2eStatus]; + const StatusIcon = config.icon; + + const isExternal = world.docs.startsWith('http'); + + // Calculate average benchmark time + const metricsValues = world.benchmark?.metrics + ? Object.values(world.benchmark.metrics) + : []; + const avgBenchmark = + metricsValues.length > 0 + ? metricsValues.reduce((sum, m) => sum + m.mean, 0) / metricsValues.length + : null; + + return ( + + +
+
+ + + {typeEmoji[world.type]} + {world.name} + + {world.type === 'official' && ( + + Official + + )} + + + {world.package} + +
+ + + {config.label} + +
+
+ +

+ {world.description} +

+ + {/* E2E Progress */} + {world.e2e && ( +
+
+ E2E Tests + + {world.e2e.passed}/{world.e2e.total} ({world.e2e.progress}%) + +
+ div]:bg-green-500' + : world.e2e.progress >= 75 + ? '[&>div]:bg-yellow-500' + : '[&>div]:bg-red-500' + )} + /> + {world.e2e.failed > 0 && ( +

+ {world.e2e.failed} failing, {world.e2e.skipped} skipped +

+ )} +
+ )} + + {/* Benchmark Summary */} + {avgBenchmark !== null && ( +
+ Avg. Workflow Time + + {avgBenchmark.toFixed(0)}ms + +
+ )} + + {/* Links */} +
+ + Documentation + {isExternal && } + + {world.repository && ( + <> + ยท + + Repository + + + + )} +
+
+
+ ); +} diff --git a/docs/components/worlds/WorldsDashboard.tsx b/docs/components/worlds/WorldsDashboard.tsx new file mode 100644 index 000000000..5543d4ffa --- /dev/null +++ b/docs/components/worlds/WorldsDashboard.tsx @@ -0,0 +1,165 @@ +'use client'; + +import { useState } from 'react'; +import { WorldCard } from './WorldCard'; +import { BenchmarkChart, BenchmarkBar } from './BenchmarkChart'; +import { Badge } from '@/components/ui/badge'; +import { Tabs, TabsContent, TabsList, TabsTrigger } from '@/components/ui/tabs'; +import type { WorldsStatus } from './types'; + +interface WorldsDashboardProps { + data: WorldsStatus; +} + +export function WorldsDashboard({ data }: WorldsDashboardProps) { + const [filter, setFilter] = useState<'all' | 'official' | 'community'>('all'); + + const worlds = Object.entries(data.worlds); + const officialWorlds = worlds.filter(([, w]) => w.type === 'official'); + const communityWorlds = worlds.filter(([, w]) => w.type === 'community'); + + const filteredWorlds = + filter === 'all' + ? worlds + : filter === 'official' + ? officialWorlds + : communityWorlds; + + // Calculate summary stats + const stats = { + total: worlds.length, + official: officialWorlds.length, + community: communityWorlds.length, + passing: worlds.filter(([, w]) => w.e2e?.status === 'passing').length, + partial: worlds.filter(([, w]) => w.e2e?.status === 'partial').length, + withBenchmarks: worlds.filter(([, w]) => w.benchmark?.status === 'measured') + .length, + }; + + // Get benchmark names for the bar chart + const benchmarkNames = new Set(); + for (const [, world] of worlds) { + if (world.benchmark?.metrics) { + for (const name of Object.keys(world.benchmark.metrics)) { + benchmarkNames.add(name); + } + } + } + const sortedBenchmarks = Array.from(benchmarkNames).sort(); + + return ( +
+ {/* Summary */} +
+ + {stats.total} Worlds + + + {stats.official} Official + + + ๐ŸŒ {stats.community} Community + + + โœ… {stats.passing} Fully Compatible + + {stats.partial > 0 && ( + + โš ๏ธ {stats.partial} Partial + + )} +
+ + {/* Tabs */} + + + Overview + Benchmarks + + + + {/* Filter */} +
+ + + +
+ + {/* World Cards */} +
+ {filteredWorlds.map(([id, world]) => ( + + ))} +
+
+ + + {/* Benchmark comparison */} +
+

Performance Comparison

+

+ Average workflow execution time across all worlds. Lower is + better. +

+ +
+ + {/* Individual benchmark bars */} + {sortedBenchmarks.slice(0, 3).map((benchName) => ( +
+

{benchName}

+ +
+ ))} +
+
+ + {/* Last updated */} +
+ Last updated: {new Date(data.lastUpdated).toLocaleString()} + {data.commit && ( + <> + {' ยท '} + Commit:{' '} + + {data.commit.slice(0, 7)} + + + )} +
+
+ ); +} diff --git a/docs/components/worlds/WorldsDashboardServer.tsx b/docs/components/worlds/WorldsDashboardServer.tsx new file mode 100644 index 000000000..e55130dd1 --- /dev/null +++ b/docs/components/worlds/WorldsDashboardServer.tsx @@ -0,0 +1,7 @@ +import { getWorldsData } from '@/lib/worlds-data'; +import { WorldsDashboard } from './WorldsDashboard'; + +export async function WorldsDashboardServer() { + const data = await getWorldsData(); + return ; +} diff --git a/docs/components/worlds/index.ts b/docs/components/worlds/index.ts new file mode 100644 index 000000000..0910f80ba --- /dev/null +++ b/docs/components/worlds/index.ts @@ -0,0 +1,5 @@ +export { WorldCard } from './WorldCard'; +export { WorldsDashboard } from './WorldsDashboard'; +export { WorldsDashboardServer } from './WorldsDashboardServer'; +export { BenchmarkChart, BenchmarkBar } from './BenchmarkChart'; +export type * from './types'; diff --git a/docs/components/worlds/types.ts b/docs/components/worlds/types.ts new file mode 100644 index 000000000..e2f4a203b --- /dev/null +++ b/docs/components/worlds/types.ts @@ -0,0 +1,52 @@ +export interface WorldE2E { + status: 'passing' | 'partial' | 'failing' | 'pending'; + total: number; + passed: number; + failed: number; + skipped: number; + progress: number; + tests?: Array<{ + name: string; + status: 'passed' | 'failed' | 'skipped'; + duration?: number; + }>; + lastRun: string | null; + note?: string; +} + +export interface BenchmarkMetric { + mean: number; + min: number; + max: number; + samples?: number; + ttfb?: { + mean: number; + min: number; + max: number; + }; +} + +export interface WorldBenchmark { + status: 'measured' | 'pending'; + metrics: Record | null; + lastRun: string | null; +} + +export interface World { + type: 'official' | 'community'; + name: string; + package: string; + description: string; + docs: string; + repository?: string; + e2e: WorldE2E | null; + benchmark: WorldBenchmark | null; +} + +export interface WorldsStatus { + $schema: string; + lastUpdated: string; + commit: string | null; + branch: string | null; + worlds: Record; +} diff --git a/docs/content/docs/deploying/world/index.mdx b/docs/content/docs/deploying/world/index.mdx index 9a8ff0313..36568f7df 100644 --- a/docs/content/docs/deploying/world/index.mdx +++ b/docs/content/docs/deploying/world/index.mdx @@ -31,5 +31,13 @@ Workflow DevKit provides two built-in world implementations: ## Community Worlds +> These worlds are maintained by the community and tested in CI. See the [worlds-manifest.json](https://github.com/vercel/workflow/blob/main/worlds-manifest.json) for configuration details. + +- [Turso](https://github.com/mizzle-dev/workflow-worlds/tree/main/packages/turso) (`@workflow-worlds/turso`) - Turso/libSQL World for embedded or remote SQLite databases +- [MongoDB](https://github.com/mizzle-dev/workflow-worlds/tree/main/packages/mongodb) (`@workflow-worlds/mongodb`) - MongoDB World using native driver +- [Redis](https://github.com/mizzle-dev/workflow-worlds/tree/main/packages/redis) (`@workflow-worlds/redis`) - Redis World using BullMQ for queues, Redis Streams for output + +### Other Community Worlds + - [Postgres World](/docs/deploying/world/postgres-world) - Reference implementation for a multi-host PostgreSQL backend world. -- [Jazz World](https://github.com/garden-co/workflow-world-jazz) - A full World implementation built on top of [Jazz](https://jazz.tools) +- [Jazz World](https://github.com/garden-co/workflow-world-jazz) - A full World implementation built on top of [Jazz](https://jazz.tools) \ No newline at end of file diff --git a/docs/content/docs/worlds/index.mdx b/docs/content/docs/worlds/index.mdx new file mode 100644 index 000000000..f1dd679ef --- /dev/null +++ b/docs/content/docs/worlds/index.mdx @@ -0,0 +1,39 @@ +--- +title: Worlds Ecosystem +description: Overview of official and community Workflow World implementations with compatibility status and performance benchmarks. +--- + +import { WorldsDashboardServer } from '@/components/worlds'; + +# Workflow Worlds Ecosystem + +The Workflow `World` is an interface that abstracts how workflows and steps communicate with the outside world. This page shows the compatibility status and performance of all available World implementations. + + + +## What is a World? + +A World implementation handles: +- **Workflow Storage**: Persisting workflow state and event logs +- **Step Execution**: Managing step function invocations +- **Message Passing**: Communication between workflow orchestrator and step functions + +## Choosing a World + +| Use Case | Recommended World | +|----------|------------------| +| Local development | Local World | +| Vercel deployment | Vercel World | +| Self-hosted with PostgreSQL | Postgres World | +| Edge/embedded databases | Turso World | +| High-throughput queues | Redis World | + +## Contributing a World + +Want to build your own World implementation? Check out the [World interface documentation](/docs/deploying/world) and the [reference implementations](https://github.com/vercel/workflow/tree/main/packages) for guidance. + +Community worlds are tested in CI against our E2E test suite. To add your world to this dashboard: + +1. Implement the World interface +2. Publish to npm +3. Open a PR to add your world to `worlds-manifest.json` diff --git a/docs/content/docs/worlds/meta.json b/docs/content/docs/worlds/meta.json new file mode 100644 index 000000000..8c35cb372 --- /dev/null +++ b/docs/content/docs/worlds/meta.json @@ -0,0 +1,4 @@ +{ + "title": "Worlds Ecosystem", + "pages": ["index"] +} diff --git a/docs/lib/github.ts b/docs/lib/github.ts new file mode 100644 index 000000000..d3c10f8ad --- /dev/null +++ b/docs/lib/github.ts @@ -0,0 +1,319 @@ +/** + * GitHub API utilities for fetching CI workflow results + */ + +const GITHUB_API = 'https://api.github.com'; +const OWNER = 'vercel'; +const REPO = 'workflow'; + +// Artifact names we're looking for +const E2E_ARTIFACT_PATTERNS = [ + 'e2e-results-local', + 'e2e-results-postgres', + 'e2e-results-vercel', + 'e2e-results-starter', + 'e2e-results-turso', + 'e2e-results-mongodb', + 'e2e-results-redis', +]; + +const BENCH_ARTIFACT_PATTERNS = [ + 'bench-results-nextjs-turbopack-local', + 'bench-results-nextjs-turbopack-postgres', + 'bench-results-nextjs-turbopack-vercel', + 'bench-results-nextjs-turbopack-starter', + 'bench-results-nextjs-turbopack-turso', + 'bench-results-nextjs-turbopack-mongodb', + 'bench-results-nextjs-turbopack-redis', +]; + +interface GitHubWorkflowRun { + id: number; + head_sha: string; + head_branch: string; + status: string; + conclusion: string; + created_at: string; + updated_at: string; +} + +interface GitHubArtifact { + id: number; + name: string; + archive_download_url: string; + size_in_bytes: number; + created_at: string; +} + +interface WorkflowRunsResponse { + total_count: number; + workflow_runs: GitHubWorkflowRun[]; +} + +interface ArtifactsResponse { + total_count: number; + artifacts: GitHubArtifact[]; +} + +interface E2ETestResult { + numTotalTests: number; + numPassedTests: number; + numFailedTests: number; + numPendingTests: number; + testResults: Array<{ + assertionResults: Array<{ + fullName: string; + status: 'passed' | 'failed' | 'skipped'; + duration?: number; + }>; + }>; +} + +interface BenchmarkResult { + files: Array<{ + groups: Array<{ + benchmarks: Array<{ + name: string; + mean: number; + min: number; + max: number; + sampleCount: number; + }>; + }>; + }>; +} + +async function fetchGitHub( + path: string, + options?: RequestInit +): Promise { + const url = `${GITHUB_API}${path}`; + const headers: HeadersInit = { + Accept: 'application/vnd.github+json', + 'X-GitHub-Api-Version': '2022-11-28', + }; + + // Use GITHUB_TOKEN if available (for higher rate limits) + const token = process.env.GITHUB_TOKEN; + if (token) { + headers['Authorization'] = `Bearer ${token}`; + } + + try { + const res = await fetch(url, { + ...options, + headers: { ...headers, ...options?.headers }, + next: { revalidate: 300 }, // Cache for 5 minutes + }); + + if (!res.ok) { + console.error(`GitHub API error: ${res.status} ${res.statusText}`); + return null; + } + + return res.json(); + } catch (error) { + console.error('Failed to fetch from GitHub:', error); + return null; + } +} + +/** + * Get the latest successful workflow run for a specific workflow on main branch + */ +export async function getLatestWorkflowRun( + workflowFileName: string +): Promise { + const params = new URLSearchParams({ + branch: 'main', + status: 'completed', + per_page: '1', + }); + + const data = await fetchGitHub( + `/repos/${OWNER}/${REPO}/actions/workflows/${workflowFileName}/runs?${params}` + ); + + return data?.workflow_runs?.[0] ?? null; +} + +/** + * Get artifacts from a workflow run + */ +export async function getWorkflowArtifacts( + runId: number +): Promise { + const data = await fetchGitHub( + `/repos/${OWNER}/${REPO}/actions/runs/${runId}/artifacts?per_page=100` + ); + + return data?.artifacts ?? []; +} + +/** + * Download and parse an artifact's JSON content + * Note: This requires authentication for private repos + */ +export async function downloadArtifact( + artifactId: number +): Promise { + const token = process.env.GITHUB_TOKEN; + if (!token) { + console.warn('GITHUB_TOKEN not set, cannot download artifacts'); + return null; + } + + try { + // Get the download URL (this redirects to a blob storage URL) + const res = await fetch( + `${GITHUB_API}/repos/${OWNER}/${REPO}/actions/artifacts/${artifactId}/zip`, + { + headers: { + Authorization: `Bearer ${token}`, + Accept: 'application/vnd.github+json', + }, + redirect: 'follow', + } + ); + + if (!res.ok) { + console.error(`Failed to download artifact: ${res.status}`); + return null; + } + + // The response is a ZIP file - we need to extract the JSON + const JSZip = (await import('jszip')).default; + const arrayBuffer = await res.arrayBuffer(); + const zip = await JSZip.loadAsync(arrayBuffer); + + // Find and parse the JSON file + const files = Object.keys(zip.files); + const jsonFile = files.find((f) => f.endsWith('.json')); + if (!jsonFile) { + console.error('No JSON file found in artifact'); + return null; + } + + const content = await zip.files[jsonFile].async('string'); + return JSON.parse(content); + } catch (error) { + console.error('Failed to download/parse artifact:', error); + return null; + } +} + +/** + * Parse E2E results into the WorldE2E format + */ +export function parseE2EResults(results: E2ETestResult | null): { + status: 'passing' | 'partial' | 'failing' | 'pending'; + total: number; + passed: number; + failed: number; + skipped: number; + progress: number; + tests?: Array<{ + name: string; + status: 'passed' | 'failed' | 'skipped'; + duration?: number; + }>; +} | null { + if (!results) return null; + + const total = results.numTotalTests; + const passed = results.numPassedTests; + const failed = results.numFailedTests; + const skipped = results.numPendingTests; + const progress = total > 0 ? (passed / total) * 100 : 0; + + let status: 'passing' | 'partial' | 'failing' | 'pending'; + if (passed === total) { + status = 'passing'; + } else if (passed > 0) { + status = 'partial'; + } else if (failed > 0) { + status = 'failing'; + } else { + status = 'pending'; + } + + // Extract individual test results + const tests: Array<{ + name: string; + status: 'passed' | 'failed' | 'skipped'; + duration?: number; + }> = []; + for (const testFile of results.testResults) { + for (const assertion of testFile.assertionResults) { + tests.push({ + name: assertion.fullName, + status: + assertion.status === 'pending' + ? 'skipped' + : (assertion.status as 'passed' | 'failed'), + duration: assertion.duration, + }); + } + } + + return { status, total, passed, failed, skipped, progress, tests }; +} + +/** + * Parse benchmark results into the WorldBenchmark format + */ +export function parseBenchmarkResults(results: BenchmarkResult | null): { + status: 'measured' | 'pending'; + metrics: Record< + string, + { mean: number; min: number; max: number; samples?: number } + > | null; +} | null { + if (!results?.files?.[0]?.groups?.[0]?.benchmarks) return null; + + const metrics: Record< + string, + { mean: number; min: number; max: number; samples?: number } + > = {}; + + for (const bench of results.files[0].groups[0].benchmarks) { + metrics[bench.name] = { + mean: bench.mean, + min: bench.min, + max: bench.max, + samples: bench.sampleCount, + }; + } + + return { + status: Object.keys(metrics).length > 0 ? 'measured' : 'pending', + metrics: Object.keys(metrics).length > 0 ? metrics : null, + }; +} + +/** + * Map artifact name to world ID + */ +export function artifactToWorldId(artifactName: string): string | null { + // E2E results + if (artifactName.startsWith('e2e-results-')) { + return artifactName.replace('e2e-results-', ''); + } + if (artifactName.startsWith('e2e-dev-results-')) { + return artifactName.replace('e2e-dev-results-', ''); + } + // Benchmark results + if (artifactName.startsWith('bench-results-nextjs-turbopack-')) { + return artifactName.replace('bench-results-nextjs-turbopack-', ''); + } + return null; +} + +export { + type GitHubWorkflowRun, + type GitHubArtifact, + type E2ETestResult, + type BenchmarkResult, + E2E_ARTIFACT_PATTERNS, + BENCH_ARTIFACT_PATTERNS, +}; diff --git a/docs/lib/worlds-data.ts b/docs/lib/worlds-data.ts new file mode 100644 index 000000000..214fd0584 --- /dev/null +++ b/docs/lib/worlds-data.ts @@ -0,0 +1,252 @@ +/** + * Server-side data fetching for the Worlds dashboard + * Fetches CI test results and benchmarks directly from GitHub API + */ + +import { unstable_cache } from 'next/cache'; +import type { + WorldsStatus, + World, + WorldE2E, + WorldBenchmark, +} from '@/components/worlds/types'; + +// Import manifest data at build time +import worldsManifest from '../../worlds-manifest.json'; + +const GITHUB_API = 'https://api.github.com'; +const OWNER = 'vercel'; +const REPO = 'workflow'; + +interface GitHubWorkflowRun { + id: number; + head_sha: string; + head_branch: string; + status: string; + conclusion: string; + created_at: string; + updated_at: string; +} + +interface GitHubArtifact { + id: number; + name: string; + archive_download_url: string; + size_in_bytes: number; + created_at: string; + expired: boolean; +} + +async function fetchGitHub(path: string): Promise { + const url = `${GITHUB_API}${path}`; + const headers: HeadersInit = { + Accept: 'application/vnd.github+json', + 'X-GitHub-Api-Version': '2022-11-28', + }; + + // Use GITHUB_TOKEN if available (for higher rate limits) + const token = process.env.GITHUB_TOKEN; + if (token) { + headers['Authorization'] = `Bearer ${token}`; + } + + try { + const res = await fetch(url, { headers }); + + if (!res.ok) { + console.error( + `GitHub API error: ${res.status} ${res.statusText} for ${path}` + ); + return null; + } + + return res.json(); + } catch (error) { + console.error('Failed to fetch from GitHub:', error); + return null; + } +} + +/** + * Get the latest successful workflow run for Tests workflow on main branch + */ +async function getLatestTestsRun(): Promise { + const data = await fetchGitHub<{ workflow_runs: GitHubWorkflowRun[] }>( + `/repos/${OWNER}/${REPO}/actions/workflows/tests.yml/runs?branch=main&status=completed&per_page=1` + ); + return data?.workflow_runs?.[0] ?? null; +} + +/** + * Get the latest successful workflow run for Benchmarks workflow on main branch + */ +async function getLatestBenchmarksRun(): Promise { + const data = await fetchGitHub<{ workflow_runs: GitHubWorkflowRun[] }>( + `/repos/${OWNER}/${REPO}/actions/workflows/benchmarks.yml/runs?branch=main&status=completed&per_page=1` + ); + return data?.workflow_runs?.[0] ?? null; +} + +/** + * Get artifacts from a workflow run + */ +async function getWorkflowArtifacts(runId: number): Promise { + const data = await fetchGitHub<{ artifacts: GitHubArtifact[] }>( + `/repos/${OWNER}/${REPO}/actions/runs/${runId}/artifacts?per_page=100` + ); + return data?.artifacts?.filter((a) => !a.expired) ?? []; +} + +/** + * Extract world ID from artifact name + */ +function extractWorldId(artifactName: string): string | null { + // E2E results for community worlds: e2e-results-community-{world} + if (artifactName.startsWith('e2e-results-community-')) { + return artifactName.replace('e2e-results-community-', ''); + } + // E2E results: e2e-results-{world} or e2e-dev-results-{world} + if (artifactName.startsWith('e2e-results-')) { + return artifactName.replace('e2e-results-', ''); + } + if (artifactName.startsWith('e2e-dev-results-')) { + return artifactName.replace('e2e-dev-results-', ''); + } + // Benchmark results: bench-results-{app}-{world} + if (artifactName.startsWith('bench-results-')) { + const parts = artifactName.replace('bench-results-', '').split('-'); + return parts[parts.length - 1]; // Last part is the world + } + return null; +} + +/** + * Build initial worlds status from manifest (no CI data yet) + */ +function buildInitialWorldsStatus(): Record { + const worlds: Record = {}; + + for (const world of worldsManifest.worlds) { + worlds[world.id] = { + type: world.type as 'official' | 'community', + name: world.name, + package: world.package, + description: world.description, + docs: world.docs, + repository: (world as any).repository, + e2e: null, + benchmark: null, + }; + } + + return worlds; +} + +/** + * Get worlds data with CI results + * Cached for 5 minutes to avoid hitting GitHub rate limits + */ +export const getWorldsData = unstable_cache( + async (): Promise => { + const worlds = buildInitialWorldsStatus(); + let lastUpdated = new Date().toISOString(); + let commit: string | null = null; + let branch: string | null = null; + + try { + // Get latest test and benchmark runs in parallel + const [testsRun, benchmarksRun] = await Promise.all([ + getLatestTestsRun(), + getLatestBenchmarksRun(), + ]); + + // Use the most recent run's metadata + const latestRun = testsRun || benchmarksRun; + if (latestRun) { + lastUpdated = latestRun.updated_at; + commit = latestRun.head_sha; + branch = latestRun.head_branch; + } + + // Get artifacts from both runs in parallel + const [testsArtifacts, benchmarksArtifacts] = await Promise.all([ + testsRun ? getWorkflowArtifacts(testsRun.id) : Promise.resolve([]), + benchmarksRun + ? getWorkflowArtifacts(benchmarksRun.id) + : Promise.resolve([]), + ]); + + // Process E2E test artifacts + // We can't download the artifact contents without auth, but we can tell + // which worlds have artifacts (meaning their tests ran) + for (const artifact of testsArtifacts) { + const worldId = extractWorldId(artifact.name); + if (worldId && worlds[worldId]) { + // Mark as having E2E data available + // The actual test results would require downloading + parsing the artifact + // For now, we'll indicate that tests exist + if (!worlds[worldId].e2e) { + worlds[worldId].e2e = { + status: 'pending' as const, + total: 0, + passed: 0, + failed: 0, + skipped: 0, + progress: 0, + lastRun: artifact.created_at, + note: 'Test results available in CI artifacts', + }; + } + } + } + + // Process benchmark artifacts similarly + for (const artifact of benchmarksArtifacts) { + const worldId = extractWorldId(artifact.name); + if (worldId && worlds[worldId]) { + if (!worlds[worldId].benchmark) { + worlds[worldId].benchmark = { + status: 'pending' as const, + metrics: null, + lastRun: artifact.created_at, + }; + } + } + } + } catch (error) { + console.error('Error fetching worlds data from GitHub:', error); + } + + return { + $schema: './worlds-status.schema.json', + lastUpdated, + commit, + branch, + worlds, + }; + }, + ['worlds-data'], + { revalidate: 300 } // Cache for 5 minutes +); + +/** + * Get worlds data with full artifact download and parsing + * This requires GITHUB_TOKEN to be set + */ +export const getWorldsDataWithArtifacts = unstable_cache( + async (): Promise => { + const token = process.env.GITHUB_TOKEN; + if (!token) { + console.warn( + 'GITHUB_TOKEN not set, returning data without artifact parsing' + ); + return getWorldsData(); + } + + // For now, return the basic data + // Full artifact parsing would require JSZip and more complex logic + return getWorldsData(); + }, + ['worlds-data-full'], + { revalidate: 300 } +); diff --git a/packages/core/e2e/bench.bench.ts b/packages/core/e2e/bench.bench.ts index ce57115a2..da0e7ad43 100644 --- a/packages/core/e2e/bench.bench.ts +++ b/packages/core/e2e/bench.bench.ts @@ -107,12 +107,18 @@ async function getWorkflowReturnValue( function getTimingOutputPath() { const appName = process.env.APP_NAME || 'unknown'; - // Detect backend type: vercel if WORKFLOW_VERCEL_ENV is set, postgres if target world includes postgres, otherwise local - const backend = process.env.WORKFLOW_VERCEL_ENV - ? 'vercel' - : process.env.WORKFLOW_TARGET_WORLD?.includes('postgres') - ? 'postgres' - : 'local'; + // Detect backend type: + // 1. WORKFLOW_BENCH_BACKEND if explicitly set (for community worlds) + // 2. vercel if WORKFLOW_VERCEL_ENV is set + // 3. postgres if target world includes postgres + // 4. local as fallback + const backend = + process.env.WORKFLOW_BENCH_BACKEND || + (process.env.WORKFLOW_VERCEL_ENV + ? 'vercel' + : process.env.WORKFLOW_TARGET_WORLD?.includes('postgres') + ? 'postgres' + : 'local'); return path.resolve( process.cwd(), `bench-timings-${appName}-${backend}.json` diff --git a/scripts/aggregate-worlds-data.mjs b/scripts/aggregate-worlds-data.mjs new file mode 100644 index 000000000..0355d521f --- /dev/null +++ b/scripts/aggregate-worlds-data.mjs @@ -0,0 +1,353 @@ +#!/usr/bin/env node + +/** + * Aggregates E2E test results and benchmark data from CI runs + * into a unified worlds-status.json for the dashboard. + * + * Usage: + * node scripts/aggregate-worlds-data.mjs [results-dir] [--output path/to/output.json] + * + * Input files expected: + * - e2e-results-{world}.json: Vitest JSON output for E2E tests + * - bench-results-{app}-{world}.json: Vitest benchmark output + * - bench-timings-{app}-{world}.json: Custom timing data + * + * Output: + * - worlds-status.json: Combined status of all worlds + */ + +import fs from 'fs'; +import path from 'path'; +import { fileURLToPath } from 'url'; + +const __dirname = path.dirname(fileURLToPath(import.meta.url)); +const rootDir = path.join(__dirname, '..'); + +// Parse command line arguments +const args = process.argv.slice(2); +let resultsDir = '.'; +let outputPath = path.join(rootDir, 'docs/public/data/worlds-status.json'); + +for (let i = 0; i < args.length; i++) { + if (args[i] === '--output' && args[i + 1]) { + outputPath = args[i + 1]; + i++; + } else if (!args[i].startsWith('--')) { + resultsDir = args[i]; + } +} + +// Load worlds manifest +const manifestPath = path.join(rootDir, 'worlds-manifest.json'); +let manifest; +try { + manifest = JSON.parse(fs.readFileSync(manifestPath, 'utf-8')); +} catch (e) { + console.error(`Error: Could not load worlds manifest: ${e.message}`); + process.exit(1); +} + +// Get all worlds from manifest (now a flat array with type field) +function getAllWorlds() { + return manifest.worlds || []; +} + +// Find all E2E result files +function findE2EResultFiles(dir) { + const files = []; + try { + const entries = fs.readdirSync(dir, { withFileTypes: true }); + for (const entry of entries) { + const fullPath = path.join(dir, entry.name); + if (entry.isDirectory()) { + files.push(...findE2EResultFiles(fullPath)); + } else if ( + entry.name.startsWith('e2e-results-') && + entry.name.endsWith('.json') + ) { + files.push(fullPath); + } + } + } catch (e) { + // Directory may not exist + } + return files; +} + +// Find all benchmark result files +function findBenchmarkFiles(dir) { + const files = []; + try { + const entries = fs.readdirSync(dir, { withFileTypes: true }); + for (const entry of entries) { + const fullPath = path.join(dir, entry.name); + if (entry.isDirectory()) { + files.push(...findBenchmarkFiles(fullPath)); + } else if ( + entry.name.startsWith('bench-timings-') && + entry.name.endsWith('.json') + ) { + files.push(fullPath); + } + } + } catch (e) { + // Directory may not exist + } + return files; +} + +// Parse E2E result file (vitest JSON output) +function parseE2EResults(filePath) { + try { + const data = JSON.parse(fs.readFileSync(filePath, 'utf-8')); + + let total = 0; + let passed = 0; + let failed = 0; + let skipped = 0; + const tests = []; + + // Vitest JSON format + for (const file of data.testResults || []) { + for (const test of file.assertionResults || []) { + total++; + const status = test.status; + if (status === 'passed') passed++; + else if (status === 'failed') failed++; + else if (status === 'skipped' || status === 'pending') skipped++; + + tests.push({ + name: test.fullName || test.title, + status: status === 'pending' ? 'skipped' : status, + duration: test.duration || 0, + }); + } + } + + // Alternative format (direct vitest output) + if (total === 0 && data.numTotalTests) { + total = data.numTotalTests; + passed = data.numPassedTests || 0; + failed = data.numFailedTests || 0; + skipped = (data.numPendingTests || 0) + (data.numTodoTests || 0); + } + + return { total, passed, failed, skipped, tests }; + } catch (e) { + console.error( + `Warning: Could not parse E2E results ${filePath}: ${e.message}` + ); + return null; + } +} + +// Parse benchmark timing file +function parseBenchmarkTimings(filePath) { + try { + const data = JSON.parse(fs.readFileSync(filePath, 'utf-8')); + const metrics = {}; + + if (data.summary) { + for (const [benchName, stats] of Object.entries(data.summary)) { + metrics[benchName] = { + mean: stats.avgExecutionTimeMs, + min: stats.minExecutionTimeMs, + max: stats.maxExecutionTimeMs, + samples: stats.samples, + }; + + // Add TTFB for stream benchmarks + if (stats.avgFirstByteTimeMs !== undefined) { + metrics[benchName].ttfb = { + mean: stats.avgFirstByteTimeMs, + min: stats.minFirstByteTimeMs, + max: stats.maxFirstByteTimeMs, + }; + } + } + } + + return metrics; + } catch (e) { + console.error( + `Warning: Could not parse benchmark timings ${filePath}: ${e.message}` + ); + return null; + } +} + +// Extract world ID from filename +function extractWorldFromFilename(filename, prefix) { + // e2e-results-{world}.json -> world + // bench-timings-{app}-{world}.json -> world + const basename = path.basename(filename, '.json'); + const withoutPrefix = basename.replace(prefix, ''); + + // For bench files, format is {app}-{world}, we want the last part + const parts = withoutPrefix.split('-'); + return parts[parts.length - 1]; +} + +// Aggregate all data +function aggregateWorldsData() { + const allWorlds = getAllWorlds(); + const timestamp = new Date().toISOString(); + + // Initialize worlds status + const worldsStatus = {}; + for (const world of allWorlds) { + worldsStatus[world.id] = { + type: world.type, + name: world.name, + package: world.package, + description: world.description, + docs: world.docs, + repository: world.repository, + e2e: null, + benchmark: null, + }; + } + + // Process E2E results + const e2eFiles = findE2EResultFiles(resultsDir); + for (const file of e2eFiles) { + const worldId = extractWorldFromFilename(file, 'e2e-results-'); + if (worldsStatus[worldId]) { + const results = parseE2EResults(file); + if (results) { + const progress = + results.total > 0 + ? Math.round((results.passed / results.total) * 1000) / 10 + : 0; + + worldsStatus[worldId].e2e = { + status: + results.failed === 0 + ? 'passing' + : results.passed > 0 + ? 'partial' + : 'failing', + total: results.total, + passed: results.passed, + failed: results.failed, + skipped: results.skipped, + progress, + tests: results.tests, + lastRun: timestamp, + }; + } + } + } + + // Process benchmark results + const benchFiles = findBenchmarkFiles(resultsDir); + const benchmarksByWorld = {}; + + for (const file of benchFiles) { + const worldId = extractWorldFromFilename(file, 'bench-timings-'); + if (!benchmarksByWorld[worldId]) { + benchmarksByWorld[worldId] = {}; + } + + const metrics = parseBenchmarkTimings(file); + if (metrics) { + // Merge metrics (could have multiple apps) + Object.assign(benchmarksByWorld[worldId], metrics); + } + } + + // Assign benchmarks to worlds + for (const [worldId, metrics] of Object.entries(benchmarksByWorld)) { + if (worldsStatus[worldId]) { + worldsStatus[worldId].benchmark = { + status: Object.keys(metrics).length > 0 ? 'measured' : 'pending', + metrics, + lastRun: timestamp, + }; + } + } + + return { + $schema: './worlds-status.schema.json', + lastUpdated: timestamp, + commit: process.env.GITHUB_SHA || null, + branch: process.env.GITHUB_REF_NAME || null, + worlds: worldsStatus, + }; +} + +// Generate test matrix (detailed per-test breakdown) +function generateTestMatrix(worldsStatus) { + const allTests = new Map(); // testName -> { world -> status } + + // Collect all tests from all worlds + for (const [worldId, world] of Object.entries(worldsStatus)) { + if (world.e2e?.tests) { + for (const test of world.e2e.tests) { + if (!allTests.has(test.name)) { + allTests.set(test.name, {}); + } + allTests.get(test.name)[worldId] = test.status; + } + } + } + + // Convert to array format + const tests = []; + for (const [name, results] of allTests) { + tests.push({ name, results }); + } + + // Sort by test name + tests.sort((a, b) => a.name.localeCompare(b.name)); + + return { + lastUpdated: new Date().toISOString(), + tests, + }; +} + +// Main +console.log('Aggregating worlds data...'); +console.log(` Results directory: ${resultsDir}`); +console.log(` Output path: ${outputPath}`); + +const status = aggregateWorldsData(); +const testMatrix = generateTestMatrix(status.worlds); + +// Ensure output directory exists +const outputDir = path.dirname(outputPath); +if (!fs.existsSync(outputDir)) { + fs.mkdirSync(outputDir, { recursive: true }); +} + +// Write worlds-status.json +fs.writeFileSync(outputPath, JSON.stringify(status, null, 2)); +console.log(`\nGenerated ${outputPath}`); + +// Write test-matrix.json +const testMatrixPath = path.join(path.dirname(outputPath), 'test-matrix.json'); +fs.writeFileSync(testMatrixPath, JSON.stringify(testMatrix, null, 2)); +console.log(`Generated ${testMatrixPath}`); + +// Summary +const worldCount = Object.keys(status.worlds).length; +const withE2E = Object.values(status.worlds).filter((w) => w.e2e).length; +const withBenchmarks = Object.values(status.worlds).filter( + (w) => w.benchmark +).length; + +console.log(`\nSummary:`); +console.log(` Total worlds: ${worldCount}`); +console.log(` With E2E data: ${withE2E}`); +console.log(` With benchmark data: ${withBenchmarks}`); + +for (const [id, world] of Object.entries(status.worlds)) { + const e2eStatus = world.e2e + ? `${world.e2e.passed}/${world.e2e.total} (${world.e2e.progress}%)` + : 'no data'; + const benchStatus = world.benchmark + ? `${Object.keys(world.benchmark.metrics).length} benchmarks` + : 'no data'; + console.log(` - ${world.name}: E2E ${e2eStatus}, Benchmarks ${benchStatus}`); +} diff --git a/scripts/create-community-worlds-matrix.mjs b/scripts/create-community-worlds-matrix.mjs new file mode 100644 index 000000000..de156bf5d --- /dev/null +++ b/scripts/create-community-worlds-matrix.mjs @@ -0,0 +1,71 @@ +#!/usr/bin/env node + +/** + * Generates a GitHub Actions matrix for community world testing. + * Reads from worlds-manifest.json and filters to testable community worlds. + * + * Usage: node scripts/create-community-worlds-matrix.mjs + * + * Output format (JSON): + * { + * "world": [ + * { + * "id": "starter", + * "name": "Starter", + * "package": "@workflow-worlds/starter", + * "service-type": "none", + * "env-vars": "{\"WORKFLOW_TARGET_WORLD\":\"@workflow-worlds/starter\"}" + * }, + * ... + * ] + * } + */ + +import fs from 'fs'; +import path from 'path'; +import { fileURLToPath } from 'url'; + +const __dirname = path.dirname(fileURLToPath(import.meta.url)); +const rootDir = path.join(__dirname, '..'); + +// Read the manifest +const manifestPath = path.join(rootDir, 'worlds-manifest.json'); +const manifest = JSON.parse(fs.readFileSync(manifestPath, 'utf-8')); + +// Filter to community worlds that can be tested in CI +const testableWorlds = manifest.worlds.filter((world) => { + // Only community worlds + if (world.type !== 'community') return false; + + // Skip worlds that require external credentials (e.g., Jazz needs API keys) + if (world.requiresCredentials) return false; + + return true; +}); + +// Build the matrix +const matrix = { + world: testableWorlds.map((world) => { + // Determine service type based on services array + let serviceType = 'none'; + if (world.services && world.services.length > 0) { + // Use the first service's name as the service type + // Currently supports: mongodb, redis + const serviceName = world.services[0].name; + if (['mongodb', 'redis'].includes(serviceName)) { + serviceType = serviceName; + } + } + + return { + id: world.id, + name: world.name, + package: world.package, + 'service-type': serviceType, + 'env-vars': JSON.stringify(world.env || {}), + }; + }), +}; + +// Output JSON for GitHub Actions +console.log(JSON.stringify(matrix)); diff --git a/worlds-manifest.json b/worlds-manifest.json new file mode 100644 index 000000000..99f996782 --- /dev/null +++ b/worlds-manifest.json @@ -0,0 +1,153 @@ +{ + "worlds": [ + { + "id": "local", + "type": "official", + "package": "@workflow/world-local", + "name": "Local", + "description": "Filesystem-based world for local development and testing", + "docs": "/docs/deploying/world/local-world", + "env": {}, + "services": [] + }, + { + "id": "postgres", + "type": "official", + "package": "@workflow/world-postgres", + "name": "Postgres", + "description": "PostgreSQL-based world for multi-host deployments", + "docs": "/docs/deploying/world/postgres-world", + "env": { + "WORKFLOW_TARGET_WORLD": "@workflow/world-postgres", + "WORKFLOW_POSTGRES_URL": "postgres://world:world@localhost:5432/world" + }, + "services": [ + { + "name": "postgres", + "image": "postgres:18-alpine", + "ports": ["5432:5432"], + "env": { + "POSTGRES_USER": "world", + "POSTGRES_PASSWORD": "world", + "POSTGRES_DB": "world" + }, + "healthCheck": { + "cmd": "pg_isready", + "interval": "10s", + "timeout": "5s", + "retries": 5 + } + } + ], + "setup": "./packages/world-postgres/bin/setup.js" + }, + { + "id": "vercel", + "type": "official", + "package": "@workflow/world-vercel", + "name": "Vercel", + "description": "Production-ready world for Vercel platform deployments", + "docs": "/docs/deploying/world/vercel-world", + "env": { + "WORKFLOW_VERCEL_ENV": "production" + }, + "services": [], + "requiresDeployment": true + }, + { + "id": "starter", + "type": "community", + "package": "@workflow-worlds/starter", + "name": "Starter", + "description": "Starter template for building Workflow DevKit Worlds", + "repository": "https://github.com/mizzle-dev/workflow-worlds", + "docs": "https://github.com/mizzle-dev/workflow-worlds/tree/main/packages/starter", + "env": { + "WORKFLOW_TARGET_WORLD": "@workflow-worlds/starter" + }, + "services": [] + }, + { + "id": "turso", + "type": "community", + "package": "@workflow-worlds/turso", + "name": "Turso", + "description": "Turso/libSQL World for embedded or remote SQLite databases", + "repository": "https://github.com/mizzle-dev/workflow-worlds", + "docs": "https://github.com/mizzle-dev/workflow-worlds/tree/main/packages/turso", + "env": { + "WORKFLOW_TARGET_WORLD": "@workflow-worlds/turso", + "WORKFLOW_TURSO_DATABASE_URL": "file:workflow.db" + }, + "services": [] + }, + { + "id": "mongodb", + "type": "community", + "package": "@workflow-worlds/mongodb", + "name": "MongoDB", + "description": "MongoDB World using native driver", + "repository": "https://github.com/mizzle-dev/workflow-worlds", + "docs": "https://github.com/mizzle-dev/workflow-worlds/tree/main/packages/mongodb", + "env": { + "WORKFLOW_TARGET_WORLD": "@workflow-worlds/mongodb", + "WORKFLOW_MONGODB_URI": "mongodb://localhost:27017", + "WORKFLOW_MONGODB_DATABASE_NAME": "workflow" + }, + "services": [ + { + "name": "mongodb", + "image": "mongo:7", + "ports": ["27017:27017"], + "healthCheck": { + "cmd": "mongosh --eval 'db.runCommand({ ping: 1 })'", + "interval": "10s", + "timeout": "5s", + "retries": 5 + } + } + ] + }, + { + "id": "redis", + "type": "community", + "package": "@workflow-worlds/redis", + "name": "Redis", + "description": "Redis World using BullMQ for queues, Redis Streams for output", + "repository": "https://github.com/mizzle-dev/workflow-worlds", + "docs": "https://github.com/mizzle-dev/workflow-worlds/tree/main/packages/redis", + "env": { + "WORKFLOW_TARGET_WORLD": "@workflow-worlds/redis", + "WORKFLOW_REDIS_URI": "redis://localhost:6379" + }, + "services": [ + { + "name": "redis", + "image": "redis:7-alpine", + "ports": ["6379:6379"], + "healthCheck": { + "cmd": "redis-cli ping", + "interval": "10s", + "timeout": "5s", + "retries": 5 + } + } + ] + }, + { + "id": "jazz", + "type": "community", + "package": "workflow-world-jazz", + "name": "Jazz", + "description": "Jazz Cloud world for local-first sync and real-time collaboration", + "repository": "https://github.com/garden-co/workflow-world-jazz", + "docs": "https://github.com/garden-co/workflow-world-jazz", + "env": { + "WORKFLOW_TARGET_WORLD": "workflow-world-jazz" + }, + "services": [], + "requiresCredentials": true, + "credentialsNote": "Requires JAZZ_API_KEY, JAZZ_WORKER_ACCOUNT, and JAZZ_WORKER_SECRET from Jazz Cloud" + } + ] +}