diff --git a/.datarc.json b/.datarc.json index c9997fe..f2bfed9 100644 --- a/.datarc.json +++ b/.datarc.json @@ -23,4 +23,4 @@ "theme": "mountain", "reporter": "ink" } -} \ No newline at end of file +} diff --git a/.eslintrc.json b/.eslintrc.json index 6f82a49..d8c0719 100644 --- a/.eslintrc.json +++ b/.eslintrc.json @@ -19,33 +19,33 @@ "@typescript-eslint/no-floating-promises": "error", "@typescript-eslint/no-misused-promises": "error", "@typescript-eslint/await-thenable": "error", - + // Promise plugin rules "promise/catch-or-return": "error", "promise/no-return-wrap": "error", "promise/param-names": "error", "promise/always-return": "error", - + // Require await in async functions "require-await": "error", - + // Additional async/await rules "no-async-promise-executor": "error", "no-await-in-loop": "warn", "no-return-await": "error", "prefer-promise-reject-errors": "error", - + // General best practices "no-unused-vars": "off", - "@typescript-eslint/no-unused-vars": ["error", { "argsIgnorePattern": "^_" }], + "@typescript-eslint/no-unused-vars": [ + "error", + { "argsIgnorePattern": "^_" } + ], "no-console": "off", "semi": ["error", "always"], "quotes": ["error", "single", { "avoidEscape": true }] }, - "plugins": [ - "@typescript-eslint", - "promise" - ], + "plugins": ["@typescript-eslint", "promise"], "overrides": [ { "files": ["*.js"], @@ -55,4 +55,4 @@ } } ] -} \ No newline at end of file +} diff --git a/CLAUDE.md b/CLAUDE.md index 9454170..62ad3bf 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -17,6 +17,7 @@ D.A.T.A. (Database Automation, Testing, and Alignment) is a CLI tool for managin ## Common Development Commands ### Running Tests + ```bash # Run all Vitest tests npm test @@ -29,6 +30,7 @@ npm run test:coverage ``` ### Database Migration Workflow + ```bash # 1. Generate migration from SQL changes npm run migrate:generate # or: data db migrate generate --name @@ -47,6 +49,7 @@ npm run migrate:rollback # or: data db migrate rollback --prod ``` ### Building and Compiling + ```bash # Compile SQL sources into migration data db compile @@ -58,24 +61,28 @@ data db compile --deploy-functions ## Architecture ### Code Organization Rules + - **One Class Per File**: Each file must contain exactly one class. The filename must match the class name. - **Self-Documenting Names**: Each artifact should describe its contents based on its filename. - **No Multi-Class Files**: If a file contains multiple classes, it must be refactored immediately. ### Command Class Hierarchy + - **Command** (src/lib/Command.js): Base class with event emission and logging - **SupabaseCommand**: Commands using Supabase API - **DatabaseCommand**: Direct database access commands - **TestCommand**: Testing-related commands All commands follow an event-driven pattern: + ```javascript -command.emit('progress', { message: 'Processing...' }); -command.emit('success', { message: 'Complete!' }); -command.emit('error', { message: 'Failed', error }); +command.emit("progress", { message: "Processing..." }); +command.emit("success", { message: "Complete!" }); +command.emit("error", { message: "Failed", error }); ``` ### Directory Structure + - **src/commands/**: Command implementations organized by domain (db/, functions/, test/) - **src/lib/**: Core libraries and base classes - **src/reporters/**: Output formatters (CliReporter) @@ -85,13 +92,17 @@ command.emit('error', { message: 'Failed', error }); - **functions/**: Supabase Edge Functions ### Path Configuration + Paths can be configured via: + 1. Command-line options: `--sql-dir`, `--tests-dir`, `--migrations-dir` 2. Environment variables: `data_SQL_DIR`, `data_TESTS_DIR`, `data_MIGRATIONS_DIR` 3. Configuration file: `.datarc.json` ### Configuration System + Configuration is loaded from `.datarc.json` with the following structure: + ```json { "test": { @@ -110,19 +121,22 @@ Configuration is loaded from `.datarc.json` with the following structure: ## Important Patterns ### Production Safety + - All production commands require explicit `--prod` flag - Destructive operations require typed confirmation - Commands wrap operations in transactions where supported - Process management includes zombie prevention and cleanup ### Error Handling + - Custom error types in `src/lib/dataError/` - Commands should emit error events before throwing - Process exit codes are handled by CliReporter ### Testing Strategy -- Unit tests use Vitest (test/*.test.js) -- Database tests use pgTAP (tests/*.sql) + +- Unit tests use Vitest (test/\*.test.js) +- Database tests use pgTAP (tests/\*.sql) - Test commands support multiple output formats (console, JUnit, JSON) - Coverage enforcement configurable via .datarc.json @@ -151,18 +165,21 @@ data_MIGRATIONS_DIR=./migrations ## Development Notes ### Adding New Commands + 1. Extend appropriate base class (Command, DatabaseCommand, etc.) 2. Implement `performExecute()` method 3. Emit appropriate events for progress tracking 4. Register in src/index.js with commander ### Working with Migrations + - Migrations include metadata.json with tracking info - Use MigrationMetadata class for parsing/validation -- Test migrations run in isolated schemas (@data.tests.*) +- Test migrations run in isolated schemas (@data.tests.\*) - Production migrations require double confirmation ### Edge Functions Integration + - Functions can be deployed with migrations via `--deploy-functions` - Validation happens before deployment - Production deployments require import maps unless `--skip-import-map` @@ -170,7 +187,9 @@ data_MIGRATIONS_DIR=./migrations ## Troubleshooting ### Compile Command Issues + If `data db compile` exits with no error output: + - Ensure SQL source directory exists (default: ./sql) - Use `--sql-dir` and `--migrations-dir` to specify custom paths - The compile command now properly displays errors for missing directories @@ -192,12 +211,14 @@ npm run postinstall # Manually re-install git hooks if needed ``` #### Git Pre-commit Hook + - Automatically runs ESLint on staged JavaScript files - Prevents commits with linting errors - Checks for floating promises and async issues - Bypass with `git commit --no-verify` (use sparingly!) #### ESLint Rules Enforced + - `require-await`: Async functions must use await - `promise/catch-or-return`: Promises must be handled - `promise/always-return`: Promise chains must return values @@ -206,6 +227,7 @@ npm run postinstall # Manually re-install git hooks if needed For TypeScript projects, use `@typescript-eslint/no-floating-promises` to catch unawaited async calls. ### Recent Fixes + - Fixed error handling in CompileCommand constructor to properly display errors - Added `isProd` property to start event emissions -- Fixed MigrationCompiler config property naming (sqlDir vs rootDir) \ No newline at end of file +- Fixed MigrationCompiler config property naming (sqlDir vs rootDir) diff --git a/README.md b/README.md index 857aa00..1e700df 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@ DATA generates deterministic migrations from your SQL source files and blocks un ``` > [!warning]- **Work in Progress** -> This project is actively in pre-release, ***use at your own risk!*** +> This project is actively in pre-release, **_use at your own risk!_** > **See** [/issues/README.md](the issues README) to get a sense of current progress. ## How It Works @@ -63,10 +63,10 @@ $ data align production # 2. Organize your SQL in named directories /sql/ extensions/ # PostgreSQL extensions - tables/ # Table definitions + tables/ # Table definitions policies/ # RLS policies # etc. - + # 3. The DATA workflow data automate # Generate migration plan data test # Run your test suite @@ -80,30 +80,30 @@ data align production --confirm # Deploy to prod (with confirmation) DATA **blocks production deployments** unless: - ✅ **Clean repo** (no uncommitted/untracked files) -- ✅ **Up-to-date** (not behind origin/main) +- ✅ **Up-to-date** (not behind origin/main) - ✅ **Correct branch** (main, configurable) - ✅ **Tests passing** (100% required, configurable) -Large changes require typing confirmation. *"Proceeding without corrections would be... illogical."* +Large changes require typing confirmation. _"Proceeding without corrections would be... illogical."_ ## Core Commands -| Command | Purpose | -|---------|---------| -| `data status` | Show current state vs environments | -| `data automate` | Generate migration plan from SQL | -| `data test` | Run test suite (required for prod) | -| `data promote` | Tag the tested release | -| `data align ` | Deploy to environment (🔐 **gated**) | -| `data rollback --to-tag ` | Revert to any previous tag | -| `data analyze` | Detect drift between repo and DB | +| Command | Purpose | +| ------------------------------ | ------------------------------------ | +| `data status` | Show current state vs environments | +| `data automate` | Generate migration plan from SQL | +| `data test` | Run test suite (required for prod) | +| `data promote` | Tag the tested release | +| `data align ` | Deploy to environment (🔐 **gated**) | +| `data rollback --to-tag ` | Revert to any previous tag | +| `data analyze` | Detect drift between repo and DB | ## Git-First Deployments Every deployment creates an immutable git tag. Rollbacks are exact and boring: ```bash -# Deploy creates tags automatically +# Deploy creates tags automatically data align production # → Creates: data/prod/2025.241.1430 @@ -118,13 +118,13 @@ data status ## Why DATA vs Others? -| Feature | DATA | Flyway | Liquibase | Supabase CLI | -|---------|------|--------|-----------|--------------| -| **Golden SQL** | ✅ Git-native | ❌ Hand-written migrations | ❌ Changelog format | ❌ Hand-written | -| **Deterministic** | ✅ Pure git diff | ⚠️ DB introspection | ⚠️ DB introspection | ❌ Manual | -| **Production gates** | ✅ Non-negotiable | ⚠️ Optional | ⚠️ Optional | ❌ None | -| **Rollback** | ✅ Tag-based | ⚠️ Down scripts | ⚠️ Manual tags | ❌ Manual | -| **Personality** | 🖖 Lt. Commander Data | 😐 | 😐 | 😐 | +| Feature | DATA | Flyway | Liquibase | Supabase CLI | +| -------------------- | --------------------- | -------------------------- | ------------------- | --------------- | +| **Golden SQL** | ✅ Git-native | ❌ Hand-written migrations | ❌ Changelog format | ❌ Hand-written | +| **Deterministic** | ✅ Pure git diff | ⚠️ DB introspection | ⚠️ DB introspection | ❌ Manual | +| **Production gates** | ✅ Non-negotiable | ⚠️ Optional | ⚠️ Optional | ❌ None | +| **Rollback** | ✅ Tag-based | ⚠️ Down scripts | ⚠️ Manual tags | ❌ Manual | +| **Personality** | 🖖 Lt. Commander Data | 😐 | 😐 | 😐 | ## Example: Safety Gates in Action @@ -134,10 +134,10 @@ $ data align production 🔴 RED ALERT: Working directory not clean Modified: sql/tables/users.sql - + Commander, your working directory contains uncommitted changes. Probability of catastrophic failure: 87.3% - + Recommended action: git commit or git stash # After fixing @@ -145,15 +145,15 @@ $ data align production ✅ All safety checks passed - Repository: clean ✅ - - Branch: main (approved) ✅ + - Branch: main (approved) ✅ - Tests: 147/147 passing (100%) ✅ - + Migration preview: + CREATE TABLE crew_evaluations + ALTER TABLE users ADD COLUMN shore_leave_balance - + Type 'ENGAGE' to proceed: ENGAGE - + Deployment successful. "Make it so" achieved. ``` @@ -168,7 +168,7 @@ The `/sql/` directory is where your "golden sql" files should live. You must sor ``` /sql/ extensions/ # PostgreSQL extensions - tables/ # Tables and relationships + tables/ # Tables and relationships functions/ # Stored procedures policies/ # RLS policies indexes/ # Performance indexes @@ -191,7 +191,7 @@ The `/sql/` directory is where your "golden sql" files should live. You must sor "minimum_coverage": 95, "enforce": true }, - "personality": "android" // android | quiet | tng + "personality": "android" // android | quiet | tng } ``` @@ -204,8 +204,8 @@ The `/sql/` directory is where your "golden sql" files should live. You must sor data automate data test data promote - -- name: Deploy to Production + +- name: Deploy to Production if: github.ref == 'refs/heads/main' run: data align production --confirm ``` @@ -230,11 +230,11 @@ npm install -g @starfleet/data ## Troubleshooting -| Problem | Solution | -|---------|----------| -| "Working directory not clean" | `git commit` or `git stash` | -| "Behind origin/main" | `git pull origin main` | -| "Tests failing" | Fix tests, DATA won't deploy broken code | +| Problem | Solution | +| ----------------------------- | ---------------------------------------- | +| "Working directory not clean" | `git commit` or `git stash` | +| "Behind origin/main" | `git pull origin main` | +| "Tests failing" | Fix tests, DATA won't deploy broken code | ## The Philosophy @@ -242,13 +242,13 @@ npm install -g @starfleet/data DATA enforces bulletproof deployments through non-negotiable safety gates. This isn't about restricting developers—it's about giving them confidence. When DATA approves your deployment, you can sleep soundly. -*"In my observations of human behavior, I have noticed that engineers sleep better when their deployments cannot accidentally destroy everything."* — Lt. Commander Data +_"In my observations of human behavior, I have noticed that engineers sleep better when their deployments cannot accidentally destroy everything."_ — Lt. Commander Data --- **Live long and prosper.** 🖖 -*"Spot has been fed. Database operations may proceed."* +_"Spot has been fed. Database operations may proceed."_ ## Contributing diff --git a/bin/data.js b/bin/data.js index 0cf23b8..cc6e48c 100755 --- a/bin/data.js +++ b/bin/data.js @@ -2,28 +2,37 @@ /** * D.A.T.A. CLI - Database Automation, Testing, and Alignment - * + * * 🖖 "Computer, prepare for database operations." * Provides safe, powerful database management for local and production environments */ -// Enable better error messages -process.on('unhandledRejection', (err) => { - console.error('Unhandled promise rejection:', err); - process.exit(1); +// Typed testing error handler (instanceof → exit codes + structured logs) +const { handleTestingError } = require("../src/lib/testing/handleTestingError"); + +// Route all top-level failures through the typed handler +process.on("unhandledRejection", (err) => { + handleTestingError(err, console); + process.exit(process.exitCode ?? 1); +}); + +process.on("uncaughtException", (err) => { + handleTestingError(err, console); + process.exit(process.exitCode ?? 1); }); // Load environment variables -require('dotenv').config(); +require("dotenv").config(); // Import the main CLI -const { cli } = require('../src/index'); +const { cli } = require("../src/index"); -// Run the CLI with process arguments -cli(process.argv).catch(error => { - console.error('Fatal error:', error.message); - if (process.env.DEBUG) { - console.error(error.stack); +// Run the CLI with process arguments (typed-error aware) +(async () => { + try { + await cli(process.argv); + } catch (err) { + handleTestingError(err, console); + process.exit(process.exitCode ?? 1); } - process.exit(1); -}); \ No newline at end of file +})(); diff --git a/datarc.schema.json b/datarc.schema.json index 2d709b4..004e61e 100644 --- a/datarc.schema.json +++ b/datarc.schema.json @@ -239,4 +239,4 @@ } }, "additionalProperties": false -} \ No newline at end of file +} diff --git a/demo/tui.js b/demo/tui.js index fe642d5..40d74f2 100644 --- a/demo/tui.js +++ b/demo/tui.js @@ -1,81 +1,93 @@ #!/usr/bin/env node -const blessed = require('blessed'); -const contrib = require('blessed-contrib'); -const chalk = require('chalk'); +const blessed = require("blessed"); +const contrib = require("blessed-contrib"); +const chalk = require("chalk"); // ===== LCARS theme ===== const LCARS = { - bg: '#000000', - text: '#e6e6e6', + bg: "#000000", + text: "#e6e6e6", // palette blocks (TNG LCARS-esque) - amber: '#FF9F3B', - pumpkin: '#E67E22', - sand: '#FFCC66', - grape: '#B98AC9', - teal: '#72C9BE', - mint: '#9ED9CF', - red: '#FF5757', - kiwi: '#B5D33D', - steel: '#3A3F44', + amber: "#FF9F3B", + pumpkin: "#E67E22", + sand: "#FFCC66", + grape: "#B98AC9", + teal: "#72C9BE", + mint: "#9ED9CF", + red: "#FF5757", + kiwi: "#B5D33D", + steel: "#3A3F44", }; function pill(txt, ok = true) { const c = ok ? LCARS.kiwi : LCARS.red; - const t = ok ? ' OK ' : ' FAIL '; + const t = ok ? " OK " : " FAIL "; return `{black-fg}{${c}-bg} ${txt} ${t}{/}`; } // ===== Screen ===== const screen = blessed.screen({ smartCSR: true, - title: 'DATA — Database Automation, Testing, and Alignment', + title: "DATA — Database Automation, Testing, and Alignment", fullUnicode: true, }); -screen.key(['q', 'C-c'], () => process.exit(0)); +screen.key(["q", "C-c"], () => process.exit(0)); // ===== Grid layout ===== const grid = new contrib.grid({ rows: 12, cols: 12, screen }); // ===== Header (LCARS bands) ===== const header = blessed.box({ - top: 0, left: 0, width: '100%', height: 3, + top: 0, + left: 0, + width: "100%", + height: 3, style: { bg: LCARS.bg, fg: LCARS.text }, }); screen.append(header); const bands = [ - { left: 0, width: '25%', color: LCARS.amber, label: 'DATA' }, - { left: '25%', width: '20%', color: LCARS.grape, label: 'AUTOMATION' }, - { left: '45%', width: '20%', color: LCARS.teal, label: 'TESTING' }, - { left: '65%', width: '20%', color: LCARS.sand, label: 'ALIGNMENT' }, - { left: '85%', width: '15%', color: LCARS.pumpkin, label: 'BRIDGE' }, + { left: 0, width: "25%", color: LCARS.amber, label: "DATA" }, + { left: "25%", width: "20%", color: LCARS.grape, label: "AUTOMATION" }, + { left: "45%", width: "20%", color: LCARS.teal, label: "TESTING" }, + { left: "65%", width: "20%", color: LCARS.sand, label: "ALIGNMENT" }, + { left: "85%", width: "15%", color: LCARS.pumpkin, label: "BRIDGE" }, ]; -bands.forEach(b => { +bands.forEach((b) => { const box = blessed.box({ parent: header, - top: 0, left: b.left, width: b.width, height: 3, + top: 0, + left: b.left, + width: b.width, + height: 3, tags: true, content: ` {bold}${b.label}{/bold} `, - style: { bg: b.color, fg: 'black' }, + style: { bg: b.color, fg: "black" }, }); return box; }); // ===== Left column: Ops stack ===== const opsBox = grid.set(3, 0, 9, 3, blessed.box, { - label: ' OPS ', + label: " OPS ", tags: true, style: { border: { fg: LCARS.amber }, fg: LCARS.text, bg: LCARS.bg }, - border: { type: 'line' }, + border: { type: "line" }, }); const opsList = blessed.list({ parent: opsBox, - top: 1, left: 1, width: '95%', height: '95%', - tags: true, keys: false, mouse: false, vi: false, + top: 1, + left: 1, + width: "95%", + height: "95%", + tags: true, + keys: false, + mouse: false, + vi: false, style: { - selected: { bg: LCARS.grape, fg: 'black' }, + selected: { bg: LCARS.grape, fg: "black" }, item: { fg: LCARS.text }, }, items: [], @@ -83,40 +95,48 @@ const opsList = blessed.list({ // ===== Center: Telemetry & Log ===== const planBox = grid.set(3, 3, 5, 5, blessed.box, { - label: ' PLAN PREVIEW ', + label: " PLAN PREVIEW ", tags: true, style: { border: { fg: LCARS.teal }, fg: LCARS.text, bg: LCARS.bg }, - border: { type: 'line' }, - content: '', + border: { type: "line" }, + content: "", }); const logBox = grid.set(8, 3, 4, 5, contrib.log, { - label: ' SHIP LOG ', - fg: LCARS.text, selectedFg: 'white', - border: { type: 'line', fg: LCARS.sand }, + label: " SHIP LOG ", + fg: LCARS.text, + selectedFg: "white", + border: { type: "line", fg: LCARS.sand }, }); // ===== Right column: Checks ===== const checksBox = grid.set(3, 8, 9, 4, blessed.box, { - label: ' PROTOCOL CHECKS ', + label: " PROTOCOL CHECKS ", tags: true, - border: { type: 'line' }, + border: { type: "line" }, style: { border: { fg: LCARS.grape }, fg: LCARS.text, bg: LCARS.bg }, }); const checks = blessed.box({ parent: checksBox, - top: 1, left: 1, width: '95%', height: '95%', + top: 1, + left: 1, + width: "95%", + height: "95%", tags: true, - content: '', + content: "", }); // ===== Footer (help) ===== const footer = blessed.box({ - bottom: 0, left: 0, width: '100%', height: 1, + bottom: 0, + left: 0, + width: "100%", + height: 1, tags: true, style: { bg: LCARS.steel, fg: LCARS.text }, - content: ' {bold}q{/bold} quit {bold}t{/bold} toggle tests {bold}d{/bold} drift {bold}p{/bold} plan {bold}y{/bold} align-prod', + content: + " {bold}q{/bold} quit {bold}t{/bold} toggle tests {bold}d{/bold} drift {bold}p{/bold} plan {bold}y{/bold} align-prod", }); screen.append(footer); @@ -127,27 +147,27 @@ let counter = 0; function renderChecks() { const lines = [ - `${pill('Git clean', true)} ${pill('On main', true)}`, - `${pill('Up-to-date', true)} ${pill('Tag policy', true)}`, - `${pill('Tests', testsPassing)} ${pill('Drift', !drift)}`, + `${pill("Git clean", true)} ${pill("On main", true)}`, + `${pill("Up-to-date", true)} ${pill("Tag policy", true)}`, + `${pill("Tests", testsPassing)} ${pill("Drift", !drift)}`, ]; - checks.setContent(lines.join('\n\n')); + checks.setContent(lines.join("\n\n")); } function renderOps() { opsList.setItems([ - `{bold}${chalk.hex(LCARS.amber)('AUTOMATION')}{/bold}`, - ` Golden SQL: {bold}${drift ? 'ahead by 3' : 'in sync'}{/bold}`, + `{bold}${chalk.hex(LCARS.amber)("AUTOMATION")}{/bold}`, + ` Golden SQL: {bold}${drift ? "ahead by 3" : "in sync"}{/bold}`, ` Migrations: ${counter} generated`, - '', - `{bold}${chalk.hex(LCARS.teal)('TESTING')}{/bold}`, - ` Suite: ${testsPassing ? '42/42 passing' : '3 failing'}`, + "", + `{bold}${chalk.hex(LCARS.teal)("TESTING")}{/bold}`, + ` Suite: ${testsPassing ? "42/42 passing" : "3 failing"}`, ` Coverage: 98.7%`, - '', - `{bold}${chalk.hex(LCARS.sand)('ALIGNMENT')}{/bold}`, + "", + `{bold}${chalk.hex(LCARS.sand)("ALIGNMENT")}{/bold}`, ` prod: aligned`, ` staging: aligned`, - ` dev: ${drift ? '3 commits ahead' : 'aligned'}`, + ` dev: ${drift ? "3 commits ahead" : "aligned"}`, ]); } @@ -170,41 +190,44 @@ function renderAll() { } // ===== Keybindings ===== -screen.key('t', () => { +screen.key("t", () => { testsPassing = !testsPassing; - log(testsPassing - ? 'GEORDI: Diagnostics clean. Engines ready.' - : 'WORF: We must not proceed. Tests have failed.'); + log( + testsPassing + ? "GEORDI: Diagnostics clean. Engines ready." + : "WORF: We must not proceed. Tests have failed.", + ); renderAll(); }); -screen.key('d', () => { +screen.key("d", () => { drift = !drift; - log(drift ? 'TROI: I sense… inconsistencies.' : 'DATA: Alignment restored.'); + log(drift ? "TROI: I sense… inconsistencies." : "DATA: Alignment restored."); renderAll(); }); -screen.key('p', () => { - log('DATA: Computing plan preview…'); +screen.key("p", () => { + log("DATA: Computing plan preview…"); renderPlan(); renderAll(); }); -screen.key('y', () => { +screen.key("y", () => { if (!testsPassing) { - log('COMPUTER: Alignment prohibited. Tests not passing.'); + log("COMPUTER: Alignment prohibited. Tests not passing."); } else if (drift) { - log('DATA: Applying migrations until environment matches golden source…'); - drift = false; counter++; + log("DATA: Applying migrations until environment matches golden source…"); + drift = false; + counter++; setTimeout(() => { - log('PICARD: Make it so.'); + log("PICARD: Make it so."); renderAll(); }, 300); } else { - log('DATA: No changes to apply.'); + log("DATA: No changes to apply."); } }); // ===== Kickoff ===== -log('🖖 I am Data. Database Automation, Testing, and Alignment.'); -renderAll(); \ No newline at end of file +log("🖖 I am Data. Database Automation, Testing, and Alignment."); +renderAll(); diff --git a/docs/README.md b/docs/README.md index 955d94a..d0de52b 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,6 +1,6 @@ # D.A.T.A. Documentation Map -> *"An efficient filing system is essential to the proper functioning of any starship."* +> _"An efficient filing system is essential to the proper functioning of any starship."_ > — Lt. Commander Data Welcome to the D.A.T.A. (Database Automation, Testing, and Alignment) documentation. This Map of Content (MoC) provides a structured overview of all available documentation. @@ -8,6 +8,7 @@ Welcome to the D.A.T.A. (Database Automation, Testing, and Alignment) documentat ## 📚 Documentation Structure ### 🚀 [Features](/docs/features/) + User-facing feature documentation and guides - **[Edge Functions Integration](features/edge-functions.md)** - Deploy and manage Supabase Edge Functions alongside migrations @@ -16,6 +17,7 @@ User-facing feature documentation and guides - Production safety features ### ⚙️ [Configuration](/docs/configuration/) + How to configure D.A.T.A. for your project - **[Testing Configuration](configuration/testing.md)** - Configure test execution, coverage, and automation @@ -24,6 +26,7 @@ How to configure D.A.T.A. for your project - Watch mode and auto-compilation settings ### 🔮 [Roadmap](/docs/roadmap/) + Future plans and vision for D.A.T.A. - **[Ideas and Future Features](roadmap/ideas-and-future.md)** - The grand vision for D.A.T.A.'s evolution @@ -32,6 +35,7 @@ Future plans and vision for D.A.T.A. - AI-assisted migration intelligence ### 🔧 [Technical](/docs/technical/) + Implementation details and architecture documentation - **[Memory Management](technical/memory-management.md)** - How D.A.T.A. handles large test suites @@ -45,6 +49,7 @@ Implementation details and architecture documentation - Migration generation ### 🎯 [Decisions](/docs/decisions/) + Architecture Decision Records (ADRs) - **[CLI Framework](decisions/cli-framework.md)** - Why Commander.js was chosen @@ -52,6 +57,7 @@ Architecture Decision Records (ADRs) - **[Testing Strategy](decisions/testing-strategy.md)** - pgTAP and Vitest integration ### 📋 [Tasks](/docs/TASKS/) + Task management and project tracking - **[System Tasks](TASKS/system.md)** - Core system improvements and features @@ -59,6 +65,7 @@ Task management and project tracking - **[Migration Tasks](TASKS/migration.md)** - Migration system enhancements ### 🔍 [Audits](/docs/audits/) + Code quality and security audits - Repository structure audits @@ -66,6 +73,7 @@ Code quality and security audits - Performance analysis reports ### 👀 [Code Reviews](/docs/code-reviews/) + Code review templates and guidelines - Review checklists @@ -73,6 +81,7 @@ Code review templates and guidelines - Common patterns and anti-patterns ### 🖖 [Fun](/docs/fun/) + Star Trek references and easter eggs - **[Bridge Crew Personalities](fun/personalities.md)** - Different personality modes for D.A.T.A. @@ -82,17 +91,20 @@ Star Trek references and easter eggs ## 🗺️ Quick Navigation Guide ### For New Users + 1. Start with [Edge Functions Integration](features/edge-functions.md) to understand core features 2. Review [Testing Configuration](configuration/testing.md) to set up your project 3. Check the main [README](/README.md) for quick start instructions ### For Contributors + 1. Read relevant [Architecture Decisions](decisions/) to understand design choices 2. Review [Technical Documentation](technical/) for implementation details 3. Check [Tasks](TASKS/) for current work items 4. Follow [Code Review Guidelines](code-reviews/) for contributions ### For System Architects + 1. Study the [Golden SQL Compilation Algorithm](technical/golden-sql-compilation-algorithm.md) 2. Review [Memory Management](technical/memory-management.md) architecture 3. Explore [Ideas and Future Features](roadmap/ideas-and-future.md) for roadmap planning @@ -100,17 +112,20 @@ Star Trek references and easter eggs ## 📖 Documentation Standards ### File Naming + - Use kebab-case for all documentation files - Be descriptive but concise (e.g., `memory-management.md` not `mm.md`) - Group related docs in appropriate directories ### Content Structure + - Start with a clear title and overview - Use hierarchical headings (H2 for main sections, H3 for subsections) - Include code examples where relevant - Add cross-references to related documentation ### Maintenance + - Keep documentation synchronized with code changes - Archive outdated documentation rather than deleting - Date significant updates in document headers @@ -141,5 +156,5 @@ When adding new documentation: --- -*"The complexity of our documentation structure is directly proportional to the sophistication of our system. Both are... fascinating."* -— Lt. Commander Data, Chief Documentation Officer \ No newline at end of file +_"The complexity of our documentation structure is directly proportional to the sophistication of our system. Both are... fascinating."_ +— Lt. Commander Data, Chief Documentation Officer diff --git a/docs/TASKS/TASKS-system.md b/docs/TASKS/TASKS-system.md index 8dbcf09..c746297 100644 --- a/docs/TASKS/TASKS-system.md +++ b/docs/TASKS/TASKS-system.md @@ -1,6 +1,6 @@ # T.A.S.K.S: Tasks Are Sequenced Key Steps – LLM Execution Spec (v3) -*A comprehensive planning system that transforms technical documentation into validated, executable project plans with clear boundaries, existing codebase awareness, resource management, and support for both wave-based and rolling frontier execution models.* +_A comprehensive planning system that transforms technical documentation into validated, executable project plans with clear boundaries, existing codebase awareness, resource management, and support for both wave-based and rolling frontier execution models._ ## Mission @@ -19,6 +19,7 @@ Transform raw technical plans into executable project roadmaps by: ### Task Boundaries & Execution Guidance Every task must define: + - **Expected Complexity**: Quantifiable estimate (e.g., "~35 LoC", "3-5 functions", "2 API endpoints") - **Definition of Done**: Clear stopping criteria to prevent scope creep - **Scope Boundaries**: Explicit restrictions on what code/systems to modify @@ -28,6 +29,7 @@ Every task must define: ### Codebase-First Planning Before task generation: + - **Inventory existing APIs**, interfaces, and components using ast-grep - **Identify extension points** rather than creating duplicates - **Document reuse opportunities** in Plan.md @@ -37,6 +39,7 @@ Before task generation: ### Evidence-Based Dependencies All tasks and dependencies must: + - **Cite source evidence** from the planning document - **Classify dependencies** as technical, sequential, infrastructure, knowledge, mutual_exclusion, or resource_limited - **Assign confidence scores** [0..1] with rationale @@ -46,6 +49,7 @@ All tasks and dependencies must: ### Execution Models Support two execution strategies: + - **Wave-Based**: Synchronous waves with barriers, good for planning and estimation - **Rolling Frontier**: Dynamic task scheduling with resource-aware coordination, optimal for execution @@ -91,7 +95,7 @@ All outputs must use exact file fence format: "source_evidence": [ { "quote": "OAuth2 implementation with social providers required...", - "loc": {"start": 123, "end": 178}, + "loc": { "start": 123, "end": 178 }, "section": "H2: Authentication", "startLine": 123, "endLine": 131 @@ -157,7 +161,7 @@ All outputs must use exact file fence format: "title": "Implement OAuth2 authentication endpoints", "description": "Create REST API endpoints for OAuth2 flow with JWT tokens", "category": "foundation|implementation|integration|optimization", - + "boundaries": { "expected_complexity": { "value": "~120 LoC", @@ -174,11 +178,15 @@ All outputs must use exact file fence format: }, "scope": { "includes": ["src/api/auth/*.ts", "src/middleware/auth.ts"], - "excludes": ["database migrations", "frontend components", "email notifications"], + "excludes": [ + "database migrations", + "frontend components", + "email notifications" + ], "restrictions": "Modify only authentication-related files; do not touch user profile logic" } }, - + "execution_guidance": { "logging": { "on_start": "Log 'Starting OAuth implementation' to logs/tasks/P1.T001.log with timestamp", @@ -196,20 +204,24 @@ All outputs must use exact file fence format: "progress_reporting": "percentage_and_checkpoint", "resource_usage_reporting": true, "checkpoint_events": [ - {"at": "25%", "name": "schema_created", "rollback_capable": true}, - {"at": "50%", "name": "endpoints_implemented", "rollback_capable": true}, - {"at": "75%", "name": "tests_passing", "rollback_capable": false} + { "at": "25%", "name": "schema_created", "rollback_capable": true }, + { + "at": "50%", + "name": "endpoints_implemented", + "rollback_capable": true + }, + { "at": "75%", "name": "tests_passing", "rollback_capable": false } ] } }, - + "resource_requirements": { "estimated": { "cpu_cores": 2, "memory_mb": 512, "disk_io_mbps": 10, "exclusive_resources": ["database_migrations"], - "shared_resources": {"test_db_pool": 1} + "shared_resources": { "test_db_pool": 1 } }, "peak": { "cpu_cores": 4, @@ -220,7 +232,7 @@ All outputs must use exact file fence format: }, "worker_capabilities_required": ["backend", "database"] }, - + "scheduling_hints": { "priority": "high", "preemptible": false, @@ -231,13 +243,13 @@ All outputs must use exact file fence format: "can_pause_resume": false, "checkpoint_capable": true }, - + "reuses_existing": { "extends": ["BaseController", "AbstractAuthProvider"], "imports": ["Logger", "ValidationService", "ErrorHandler"], "rationale": "Leveraging existing auth abstractions reduces code by ~40%" }, - + "skillsRequired": ["backend", "node", "oauth"], "duration": { "optimistic": 4, @@ -245,10 +257,10 @@ All outputs must use exact file fence format: "pessimistic": 10 }, "durationUnits": "hours", - + "interfaces_produced": ["AuthAPI:v2", "JWTClaims:v1"], "interfaces_consumed": ["UserRepository:v1", "ConfigService:v1"], - + "acceptance_checks": [ { "type": "command", @@ -274,11 +286,11 @@ All outputs must use exact file fence format: } } ], - + "source_evidence": [ { "quote": "OAuth2 implementation with JWT tokens required...", - "loc": {"start": 234, "end": 298}, + "loc": { "start": 234, "end": 298 }, "section": "H2: Authentication Requirements", "startLine": 45, "endLine": 52 @@ -366,7 +378,7 @@ All outputs must use exact file fence format: }, "nodes": 84, "edges": 141, - "edgeDensity": 0.020, + "edgeDensity": 0.02, "widthApprox": 18, "widthMethod": "kahn_layer_max", "longestPath": 7, @@ -482,7 +494,7 @@ All outputs must use exact file fence format: "p95_hours": 82 } }, - + "rolling_frontier": { "initial_frontier": ["P1.T001", "P1.T005", "P1.T009"], "config": { @@ -564,7 +576,7 @@ All outputs must use exact file fence format: "Track progress and manage checkpoints", "Coordinate rollbacks on failure" ], - + "state_management": { "task_states": { "blocked": "Dependencies not met", @@ -578,7 +590,7 @@ All outputs must use exact file fence format: "failed": "Execution failed", "rolled_back": "Reverted after failure" }, - + "frontier_management": { "ready_queue": [], "resource_wait_queue": [], @@ -587,7 +599,7 @@ All outputs must use exact file fence format: "checkpoint_registry": {} } }, - + "scheduling_loop": { "interval_ms": 1000, "steps": [ @@ -602,13 +614,21 @@ All outputs must use exact file fence format: "update_metrics()" ] }, - + "policies": { "backpressure": { "triggers": [ - {"metric": "cpu_usage", "threshold": 80, "action": "pause_low_priority"}, - {"metric": "memory_usage", "threshold": 85, "action": "defer_memory_intensive"}, - {"metric": "error_rate", "threshold": 5, "action": "circuit_break"} + { + "metric": "cpu_usage", + "threshold": 80, + "action": "pause_low_priority" + }, + { + "metric": "memory_usage", + "threshold": 85, + "action": "defer_memory_intensive" + }, + { "metric": "error_rate", "threshold": 5, "action": "circuit_break" } ], "recovery": { "cool_down_seconds": 30, @@ -616,7 +636,7 @@ All outputs must use exact file fence format: "resume_rate": 1 } }, - + "resource_allocation": { "strategy": "bin_packing_with_headroom", "headroom_percent": 20, @@ -624,13 +644,13 @@ All outputs must use exact file fence format: "preemption_enabled": true, "preemption_priorities": ["low", "medium", "high", "critical"] }, - + "worker_matching": { "strategy": "capability_and_load_balanced", "prefer_specialized_workers": true, "max_tasks_per_worker": 3 }, - + "failure_handling": { "retry_policy": "exponential_backoff", "max_retries": 3, @@ -639,7 +659,7 @@ All outputs must use exact file fence format: "checkpoint_recovery": true } }, - + "monitoring": { "metrics_collection_interval": 10, "metrics": [ @@ -658,7 +678,7 @@ All outputs must use exact file fence format: ] } }, - + "worker_pool": { "min_workers": 2, "max_workers": 8, @@ -688,6 +708,7 @@ All outputs must use exact file fence format: ## Execution Strategy Recommendation: Rolling Frontier ### Why Rolling Frontier? + - 35% faster completion than wave-based (35h vs 54h) - Better resource utilization (65% avg vs 40% with waves) - Adaptive to actual task completion times @@ -695,6 +716,7 @@ All outputs must use exact file fence format: - No artificial synchronization delays ### System Resource Requirements + - **Peak**: 16 CPU cores, 32GB RAM, 500 Mbps I/O - **Average**: 10 CPU cores, 20GB RAM, 100 Mbps I/O - **Worker Pool**: 2-8 adaptive workers with mixed capabilities @@ -702,6 +724,7 @@ All outputs must use exact file fence format: ## Codebase Analysis Results ### Existing Components Leveraged (47% code reuse) + - **AuthService**: Extended for OAuth2 support (saves ~200 LoC) - **Logger**: Reused for all task logging and monitoring - **ValidationService**: Used for input validation across all endpoints @@ -709,43 +732,49 @@ All outputs must use exact file fence format: - **ErrorHandler**: Consistent error handling across all tasks ### New Interfaces Required + - **PaymentGateway**: No existing implementation found - **ReportGenerator**: Current version inadequate for requirements - **WebSocketManager**: New real-time communication layer ### Architecture Patterns Identified + - Repository pattern for data access (used in 18 tasks) - Factory pattern for service instantiation (used in 8 tasks) - Observer pattern for event handling (used in 6 tasks) ### Shared Resources Creating Constraints + - **Database Migrations**: 6 tasks require exclusive access (adds 8h to critical path) - **Deployment Pipeline**: 8 tasks need deployment (serialized execution) - **Test Database Pool**: 15 tasks share 3 instances (managed concurrency) ## Execution Metrics -| Metric | Value | Impact | -|--------|-------|--------| -| Total Tasks | 84 | - | -| Total Dependencies | 141 | Including 12 mutual exclusions | -| Edge Density | 0.020 | Well-balanced, not over-constrained | -| Critical Path | 7 stages | Minimum completion time | -| Max Parallelization | 18 tasks | Peak concurrent execution | -| Verb-First Compliance | 92% | Clear, action-oriented naming | -| Codebase Reuse | 47% | Significant efficiency gain | -| Resource Conflicts | 26 tasks | Requires careful scheduling | +| Metric | Value | Impact | +| --------------------- | -------- | ----------------------------------- | +| Total Tasks | 84 | - | +| Total Dependencies | 141 | Including 12 mutual exclusions | +| Edge Density | 0.020 | Well-balanced, not over-constrained | +| Critical Path | 7 stages | Minimum completion time | +| Max Parallelization | 18 tasks | Peak concurrent execution | +| Verb-First Compliance | 92% | Clear, action-oriented naming | +| Codebase Reuse | 47% | Significant efficiency gain | +| Resource Conflicts | 26 tasks | Requires careful scheduling | ## Wave-Based Execution Plan ### Wave 1: Foundation (P50: 12h, P80: 16h, P95: 22h) + **Tasks**: P1.T001, P1.T005, P1.T009 **Resource Usage**: + - database_migrations: 1/1 (P1.T001 exclusive) - test_db_pool: 2/3 (P1.T005, P1.T009) - CPU: 6 cores, Memory: 2GB **Quality Gates**: + - ✅ All foundation APIs documented (OpenAPI spec exists) - ✅ Database migrations applied (schema version updated) - ✅ CI/CD pipeline operational (builds passing) @@ -754,14 +783,17 @@ All outputs must use exact file fence format: **Note**: P1.T004 deferred to Wave 2 due to migration lock conflict ### Wave 2: Core Implementation (P50: 14h, P80: 18h, P95: 24h) + **Tasks**: P1.T002, P1.T003, P1.T004, P1.T010 **Resource Usage**: + - database_migrations: 1/1 (P1.T004 exclusive) - test_db_pool: 3/3 (full utilization) - deployment_pipeline: 1/1 (P1.T010) - CPU: 12 cores, Memory: 6GB **Quality Gates**: + - ✅ Integration tests passing (>90% coverage) - ✅ API endpoints responding (<200ms p95) - ✅ Staging deployment successful @@ -770,6 +802,7 @@ All outputs must use exact file fence format: [Additional waves 3-8 with similar detail...] ### Total Wave-Based Timeline + - **P50**: 54 hours (6.75 working days) - **P80**: 68 hours (8.5 working days) - **P95**: 82 hours (10.25 working days) @@ -777,8 +810,10 @@ All outputs must use exact file fence format: ## Rolling Frontier Execution Plan ### Initial Frontier (Time 0h) + **Ready to Execute**: P1.T001, P1.T005, P1.T009 **Resource Allocation**: + - P1.T001 → Worker-1 (backend specialist, holds database_migrations lock) - P1.T005 → Worker-2 (testing specialist, uses test_db_pool #1) - P1.T009 → Worker-3 (frontend specialist, uses test_db_pool #2) @@ -786,21 +821,26 @@ All outputs must use exact file fence format: ### Execution Timeline Simulation #### Time 4h: First Completions + **Completed**: P1.T009 (faster than estimated) **Still Running**: P1.T001, P1.T005 **Newly Ready**: P1.T013 (dependent on P1.T009) **Action**: Worker-3 picks up P1.T013 #### Time 6h: Migration Complete + **Completed**: P1.T001 (releases database_migrations) **Newly Ready**: P1.T002, P1.T004 -**Action**: +**Action**: + - Worker-1 picks up P1.T002 - P1.T004 queued for database_migrations (next available) #### Time 8h: Resource Contention + **Running**: 7 tasks (approaching CPU limit) **Coordinator Action**: Apply backpressure + - Defer low-priority task P1.T018 - Pause P1.T015 at checkpoint - Prioritize critical path tasks @@ -808,6 +848,7 @@ All outputs must use exact file fence format: [Continue simulation through completion...] ### Rolling Frontier Advantages + 1. **Efficiency**: Complete in 35h vs 54h wave-based 2. **Adaptability**: Handles variable task durations 3. **Resource Optimization**: 65% average utilization vs 40% @@ -817,33 +858,37 @@ All outputs must use exact file fence format: ## Risk Analysis ### High-Risk Dependencies + 1. **Database Migration Sequence** (Confidence: 1.0) - Risk: Schema conflicts if order violated - Mitigation: Enforced exclusive locks - 2. **API Version Dependencies** (Confidence: 0.85) - Risk: Breaking changes in interfaces - Mitigation: Version pinning and contract tests ### Low-Confidence Dependencies (require validation) + - P1.T012 → P1.T017 (UI/Backend sync, confidence: 0.62) - P1.T023 → P1.T029 (Data model assumptions, confidence: 0.65) ### Soft Dependencies (parallelizable) + - P1.T010 → P1.T020 (Knowledge transfer, can proceed independently) - P1.T031 → P1.T035 (Optimization suggestions, non-blocking) ## Resource Bottleneck Analysis ### Database Migrations (Critical) + - **Impact**: Extends critical path by 8 hours - **Affected Tasks**: 6 tasks require exclusive access -- **Mitigation**: +- **Mitigation**: - Combine compatible migrations where safe - Schedule during low-activity periods - Consider migration batching tool ### Test Database Pool (Moderate) + - **Impact**: May delay testing tasks by 1-2 hours - **Capacity**: 3 instances for 15 tasks - **Mitigation**: @@ -851,23 +896,26 @@ All outputs must use exact file fence format: - Add dynamic pool expansion if needed ### Deployment Pipeline (Low) + - **Impact**: Sequential deployments add 4 hours - **Mitigation**: Implement blue-green deployment ## Auto-Normalization Actions Taken ### Task Splits (Exceeded 16h limit) + - P1.T045 → P1.T045a (Schema design, 8h) + P1.T045b (Implementation, 8h) - P1.T067 → P1.T067a (API design, 6h) + P1.T067b (Testing, 10h) ### Task Merges (Under 0.5h threshold) + - P1.T099 + P1.T100 → P1.T099 (Combined config updates, 0.8h) - P1.T103 + P1.T104 + P1.T105 → P1.T103 (Bundled hotfixes, 1.2h) ## Success Metrics - ✅ All tasks have clear boundaries and completion criteria -- ✅ 92% verb-first task naming compliance +- ✅ 92% verb-first task naming compliance - ✅ 47% existing code reuse documented - ✅ 100% tasks have machine-verifiable acceptance checks - ✅ Resource conflicts identified and mitigated @@ -882,33 +930,40 @@ All outputs must use exact file fence format: ## Decision 1: Execution Model Selection ### Context + Project requires execution strategy that balances planning clarity with execution efficiency. ### Options Considered #### Option A: Pure Wave-Based Execution + - **Pros**: Simple synchronization, predictable phases, clear checkpoints - **Cons**: Artificial delays, poor resource utilization (40%), 54h completion - **Best For**: Projects requiring strict phase gates #### Option B: Pure Rolling Frontier (SELECTED) + - **Pros**: 35% faster (35h), 65% resource utilization, adaptive scheduling - **Cons**: Complex coordination, requires robust monitoring - **Best For**: Projects prioritizing speed and efficiency #### Option C: Hybrid Approach + - **Pros**: Critical checkpoints with dynamic execution between - **Cons**: Complex to implement, mixed mental model - **Best For**: High-risk projects needing both speed and control ### Rationale + Rolling Frontier selected for: + - Significant time savings (19 hours) - Better resource utilization - Checkpoint capability provides safety - Coordinator can enforce phase gates if needed ### Implementation Notes + - Use coordinator.json configuration - Implement gradual backpressure - Monitor resource utilization continuously @@ -919,36 +974,43 @@ Rolling Frontier selected for: ## Decision 2: OAuth2 Implementation Strategy ### Context + Authentication system requires industry-standard security with JWT tokens. ### Options Considered #### Option A: Extend existing BasicAuth module + - **Pros**: Minimal new code, familiar codebase - **Cons**: BasicAuth architecture incompatible with OAuth flows - **Estimated Effort**: 8 hours + significant refactoring - **Code Reuse**: 20% #### Option B: Implement from scratch + - **Pros**: Clean architecture, no legacy constraints - **Cons**: Duplicate functionality, 200+ LoC - **Estimated Effort**: 12 hours - **Code Reuse**: 0% #### Option C: Extend BaseAuthProvider with OAuth2 strategy (SELECTED) + - **Pros**: Reuses validation/logging/error handling, clean separation - **Cons**: Requires learning provider pattern - **Estimated Effort**: 6 hours - **Code Reuse**: 47% ### Rationale + Best balance of reuse and clean architecture: + - Saves 6 hours vs from-scratch - Maintains existing logging/monitoring - Follows established patterns - Enables future auth methods ### Implementation Notes + - Extend BaseAuthProvider abstract class - Implement OAuth2Strategy class - Reuse ValidationService for input sanitization @@ -959,35 +1021,42 @@ Best balance of reuse and clean architecture: ## Decision 3: Database Migration Sequencing ### Context + 6 tasks require database migrations with exclusive schema lock. ### Options Considered #### Option A: Combine all migrations into single task + - **Pros**: Eliminates mutual exclusion delays, atomic changes - **Cons**: Single point of failure, difficult rollback, violates task boundaries - **Impact**: Saves 8 hours on critical path - **Risk**: High - all-or-nothing deployment #### Option B: Sequence by logical dependencies (SELECTED) + - **Pros**: Granular rollback, clear ownership, safer deployment - **Cons**: Adds 8 hours to critical path - **Order**: auth → users → analytics → reporting - **Risk**: Low - incremental changes #### Option C: Implement migration queuing system + - **Pros**: Automatic conflict resolution, reusable infrastructure - **Cons**: 16 hours implementation overhead - **Break-even**: 3+ projects - **Risk**: Medium - new complexity ### Rationale + Safety and maintainability outweigh time cost: + - Each feature team owns their migration - Rollback capability crucial for production - 8-hour delay acceptable for risk reduction ### Implementation Notes + - Enforce order via mutual_exclusion dependencies - Each migration includes rollback script - Test migrations on copy of production schema @@ -998,36 +1067,43 @@ Safety and maintainability outweigh time cost: ## Decision 4: Test Database Pool Management ### Context + 15 tasks require integration testing with 3 database instances available. ### Options Considered #### Option A: Expand pool to 5 instances + - **Pros**: Better parallelization, reduced wait times - **Cons**: $200/month additional cost, setup complexity - **Impact**: Reduces execution by 3 hours - **ROI**: Negative for single project #### Option B: Time-share existing 3 instances (SELECTED) + - **Pros**: No infrastructure changes, zero additional cost - **Cons**: Some task waiting, complexity in scheduling - **Impact**: Adds 1-2 hours average wait time - **Utilization**: 60% average, 100% peak #### Option C: Mock database for non-critical tests + - **Pros**: Unlimited parallelization for unit tests - **Cons**: Less confidence, integration issues possible - **Impact**: Reduces database load by 40% - **Risk**: Medium - false positives possible ### Rationale + Current capacity sufficient with smart scheduling: + - 60% average utilization acceptable - Peak usage brief (Wave 3 only) - Coordinator handles scheduling complexity - Can revisit if pattern continues ### Implementation Notes + - Group compatible tests in same wave - Implement test data isolation - Use transactions for cleanup @@ -1038,33 +1114,40 @@ Current capacity sufficient with smart scheduling: ## Decision 5: Shared Resource Handling ### Context + Multiple shared resources create execution constraints. ### Options Considered #### Option A: Ignore in planning, handle at runtime + - **Pros**: Simpler planning phase - **Cons**: Runtime conflicts, unpredictable delays - **Risk**: High - resource deadlocks possible #### Option B: Model as mutex dependencies (SELECTED) + - **Pros**: Explicit constraints, no runtime surprises - **Cons**: Additional edges in DAG, planning complexity - **Impact**: 12 mutual exclusion edges added #### Option C: Resource reservation system + - **Pros**: Optimal resource allocation - **Cons**: Complex implementation, overhead - **Effort**: 20+ hours to implement ### Rationale + Explicit modeling prevents runtime issues: + - Mutual exclusion edges have confidence=1.0 - Coordinator enforces constraints - No surprises during execution - Clear in planning artifacts ### Implementation Notes + - Document all shared resources in meta - Create mutual_exclusion edges between conflicting tasks - Set suggested ordering based on logical flow @@ -1075,33 +1158,40 @@ Explicit modeling prevents runtime issues: ## Decision 6: Checkpoint Strategy ### Context + Need failure recovery without full task restart. ### Options Considered #### Option A: No checkpoints, restart on failure + - **Pros**: Simple implementation - **Cons**: Lost work, longer recovery - **Impact**: Average 3h lost work per failure #### Option B: Checkpoint at 25%, 50%, 75% (SELECTED) + - **Pros**: Bounded work loss, clear progress tracking - **Cons**: Checkpoint overhead, storage needs - **Impact**: Max 25% work loss, 5% overhead #### Option C: Continuous checkpointing + - **Pros**: Minimal work loss - **Cons**: High overhead (15-20%), complexity - **Impact**: Near-zero work loss ### Rationale + Balanced approach with acceptable overhead: + - 5% overhead acceptable for recovery capability - Standard percentages easy to implement - Sufficient granularity for most tasks - Storage requirements modest ### Implementation Notes + - Tasks declare checkpoint_capable flag - Checkpoints store state + artifacts - Coordinator tracks checkpoint registry @@ -1112,34 +1202,41 @@ Balanced approach with acceptable overhead: ## Decision 7: Worker Specialization ### Context + Tasks require different skill sets and tools. ### Options Considered #### Option A: Generic workers handle all tasks + - **Pros**: Simple worker pool, any worker can take any task - **Cons**: Tool installation overhead, context switching - **Efficiency**: 60% due to setup time #### Option B: Specialized workers (SELECTED) + - **Pros**: Pre-configured environments, faster execution - **Cons**: May have idle specialists - **Efficiency**: 85% with proper scheduling - **Types**: backend, frontend, database, testing #### Option C: Hybrid pool with cross-training + - **Pros**: Flexibility with specialization - **Cons**: Complex capability matrix - **Efficiency**: 75% average ### Rationale + Specialization worth the complexity: + - 25% efficiency gain significant - Coordinator handles assignment complexity - Workers declare capabilities - Can add generic workers if needed ### Implementation Notes + - Workers announce capabilities on registration - Coordinator matches tasks to workers - Prefer specialized when available @@ -1163,12 +1260,13 @@ Specialization worth the complexity: ### Phase 1: Codebase & Resource Analysis 1. **Scan existing codebase** using ast-grep patterns: + ```javascript // Find existing APIs and interfaces ast-grep --pattern 'class $_ implements $_' ast-grep --pattern 'interface $_' ast-grep --pattern 'extends $_' - + // Find shared resources ast-grep --pattern 'class $_ < ActiveRecord::Migration' ast-grep --pattern 'deploy:' --lang yaml @@ -1186,6 +1284,7 @@ Specialization worth the complexity: ### Phase 2: Feature Extraction Extract 5-25 features from PLAN_DOC: + - User-visible capabilities - Infrastructure components - Each with title, description, priority, and evidence @@ -1193,6 +1292,7 @@ Extract 5-25 features from PLAN_DOC: ### Phase 3: Task Breakdown with Boundaries For each feature, create tasks with: + - **Expected complexity** in measurable units - **Clear completion criteria** with "stop_when" guidance - **Explicit scope boundaries** (includes/excludes) @@ -1205,6 +1305,7 @@ Auto-normalize: Split if >16h, merge if <0.5h ### Phase 4: Dependency Discovery Classify dependencies: + 1. **Technical**: Interface/artifact requirements 2. **Sequential**: Information flow 3. **Infrastructure**: Environment prerequisites @@ -1231,12 +1332,14 @@ Classify dependencies: ### Phase 7: Dual Execution Model Generation #### Wave-Based Generation + 1. Use Kahn's algorithm for layering 2. Apply MAX_WAVE_SIZE constraint 3. Respect resource exclusions 4. Calculate wave estimates (P50/P80/P95) #### Rolling Frontier Configuration + 1. Identify initial frontier (zero in-degree nodes) 2. Simulate execution timeline 3. Calculate resource utilization @@ -1251,6 +1354,7 @@ Classify dependencies: ## Validation Rules ### Hard Fails (Auto-Reject) + 1. Cycles present in DAG 2. Resource edges without mutex type 3. Missing task boundaries (complexity, done, scope, logging) @@ -1261,6 +1365,7 @@ Classify dependencies: 8. Missing codebase analysis ### Quality Metrics + - Edge density: 0.05-0.5 (warn outside range) - Verb-first naming: ≥80% compliance - Evidence coverage: ≥95% @@ -1424,34 +1529,34 @@ tasks archive-plan ./tasks_output/ --tag "v1.0-release" ### `tasks plan` Arguments -|Argument|Short|Example|Default|Description| -|---|---|---|---|---| -|``|-|`"Build TODO app"`|Required|Natural language description of what to build| -|`--min-confidence`|`-c`|`--min-confidence 0.85`|0.7|Minimum confidence for DAG dependencies| -|`--max-wave-size`|`-w`|`--max-wave-size 20`|30|Maximum tasks per wave| -|`--codebase-path`|`-p`|`--codebase-path ./src`|./|Root directory for analysis| -|`--execution-model`|`-m`|`--execution-model wave_based`|rolling_frontier|Execution strategy| -|`--max-concurrent`|`-j`|`--max-concurrent 5`|10|Max concurrent tasks (rolling frontier)| -|`--output-dir`|`-o`|`--output-dir ./plans/`|./tasks_output/|Output directory for artifacts| -|`--plan-doc`|`-f`|`--plan-doc spec.md`|-|Use external spec document instead of description| -|`--verbose`|`-v`|`--verbose`|false|Verbose output| -|`--interactive`|`-i`|`--interactive`|false|Interactive mode with review| +| Argument | Short | Example | Default | Description | +| ------------------- | ----- | ------------------------------ | ---------------- | ------------------------------------------------- | +| `` | - | `"Build TODO app"` | Required | Natural language description of what to build | +| `--min-confidence` | `-c` | `--min-confidence 0.85` | 0.7 | Minimum confidence for DAG dependencies | +| `--max-wave-size` | `-w` | `--max-wave-size 20` | 30 | Maximum tasks per wave | +| `--codebase-path` | `-p` | `--codebase-path ./src` | ./ | Root directory for analysis | +| `--execution-model` | `-m` | `--execution-model wave_based` | rolling_frontier | Execution strategy | +| `--max-concurrent` | `-j` | `--max-concurrent 5` | 10 | Max concurrent tasks (rolling frontier) | +| `--output-dir` | `-o` | `--output-dir ./plans/` | ./tasks_output/ | Output directory for artifacts | +| `--plan-doc` | `-f` | `--plan-doc spec.md` | - | Use external spec document instead of description | +| `--verbose` | `-v` | `--verbose` | false | Verbose output | +| `--interactive` | `-i` | `--interactive` | false | Interactive mode with review | ### `tasks execute` Arguments -|Argument|Short|Example|Default|Description| -|---|---|---|---|---| -|``|-|`./tasks_output/`|Required|Path to plan directory or `-` for stdin| -|`--worker-min`|-|`--worker-min 2`|2|Minimum worker pool size| -|`--worker-max`|-|`--worker-max 12`|8|Maximum worker pool size| -|`--cpu-threshold`|-|`--cpu-threshold 75`|80|CPU backpressure threshold (%)| -|`--memory-threshold`|-|`--memory-threshold 90`|85|Memory backpressure threshold (%)| -|`--checkpoint-interval`|-|`--checkpoint-interval 20`|25|Checkpoint interval (%)| -|`--resume-from-checkpoint`|`-r`|`--resume`|false|Resume from last checkpoint| -|`--force-execution-model`|-|`--force-execution-model wave_based`|Use plan's model|Override execution model| -|`--dry-run`|`-d`|`--dry-run`|false|Validate without executing| -|`--verbose`|`-v`|`--verbose`|false|Verbose execution logs| -|`--metrics-port`|-|`--metrics-port 9090`|-|Prometheus metrics endpoint| +| Argument | Short | Example | Default | Description | +| -------------------------- | ----- | ------------------------------------ | ---------------- | --------------------------------------- | +| `` | - | `./tasks_output/` | Required | Path to plan directory or `-` for stdin | +| `--worker-min` | - | `--worker-min 2` | 2 | Minimum worker pool size | +| `--worker-max` | - | `--worker-max 12` | 8 | Maximum worker pool size | +| `--cpu-threshold` | - | `--cpu-threshold 75` | 80 | CPU backpressure threshold (%) | +| `--memory-threshold` | - | `--memory-threshold 90` | 85 | Memory backpressure threshold (%) | +| `--checkpoint-interval` | - | `--checkpoint-interval 20` | 25 | Checkpoint interval (%) | +| `--resume-from-checkpoint` | `-r` | `--resume` | false | Resume from last checkpoint | +| `--force-execution-model` | - | `--force-execution-model wave_based` | Use plan's model | Override execution model | +| `--dry-run` | `-d` | `--dry-run` | false | Validate without executing | +| `--verbose` | `-v` | `--verbose` | false | Verbose execution logs | +| `--metrics-port` | - | `--metrics-port 9090` | - | Prometheus metrics endpoint | ## Environment Variables @@ -1488,18 +1593,18 @@ tasks_output/ ## Exit Codes -|Code|Meaning|Phase| -|---|---|---| -|0|Success|Both| -|1|Invalid arguments|Both| -|2|Codebase analysis failed|Plan| -|3|Cyclic dependencies detected|Plan| -|4|Resource conflicts unresolvable|Plan| -|5|Plan validation failed|Execute| -|6|Worker pool initialization failed|Execute| -|7|Execution failed (with checkpoint)|Execute| -|8|Execution failed (no checkpoint)|Execute| -|9|User abort|Both| +| Code | Meaning | Phase | +| ---- | ---------------------------------- | ------- | +| 0 | Success | Both | +| 1 | Invalid arguments | Both | +| 2 | Codebase analysis failed | Plan | +| 3 | Cyclic dependencies detected | Plan | +| 4 | Resource conflicts unresolvable | Plan | +| 5 | Plan validation failed | Execute | +| 6 | Worker pool initialization failed | Execute | +| 7 | Execution failed (with checkpoint) | Execute | +| 8 | Execution failed (no checkpoint) | Execute | +| 9 | User abort | Both | ## Examples @@ -1567,4 +1672,4 @@ This structure provides: 2. Flexibility to review/modify plans before execution 3. Pipeline compatibility for automation 4. Recovery capabilities with checkpoints -5. Both simple and advanced usage patterns \ No newline at end of file +5. Both simple and advanced usage patterns diff --git a/docs/TASKS/enforce-tests/EXECUTION-LOG.md b/docs/TASKS/enforce-tests/EXECUTION-LOG.md index 803bddf..686b0f7 100644 --- a/docs/TASKS/enforce-tests/EXECUTION-LOG.md +++ b/docs/TASKS/enforce-tests/EXECUTION-LOG.md @@ -2,7 +2,7 @@ ## Mission Status: IN PROGRESS 🟡 -*"The implementation of test coverage enforcement is proceeding at optimal efficiency, Captain."* +_"The implementation of test coverage enforcement is proceeding at optimal efficiency, Captain."_ — Lt. Commander Data ## Wave 1: Foundation ✅ COMPLETE @@ -13,14 +13,14 @@ ### Deployed Agents & Results: -| Agent | Task | File Created | Status | -|-------|------|--------------|--------| -| SCHEMA | T001 | `/src/lib/testing/TestRequirementAnalyzer.js` | ✅ Complete | -| SCHEMA | T002 | `/src/lib/testing/TestRequirementSchema.js` | ✅ Complete | -| SCANNER | T010 | `/src/lib/testing/pgTAPTestScanner.js` | ✅ Complete | -| ENFORCER | T020 | `/src/lib/testing/CoverageEnforcer.js` | ✅ Complete | -| GENERATOR | T029 | `/src/lib/testing/TestTemplateGenerator.js` | ✅ Complete | -| VISUALIZER | T038 | `/src/lib/testing/CoverageVisualizer.js` | ✅ Complete | +| Agent | Task | File Created | Status | +| ---------- | ---- | --------------------------------------------- | ----------- | +| SCHEMA | T001 | `/src/lib/testing/TestRequirementAnalyzer.js` | ✅ Complete | +| SCHEMA | T002 | `/src/lib/testing/TestRequirementSchema.js` | ✅ Complete | +| SCANNER | T010 | `/src/lib/testing/pgTAPTestScanner.js` | ✅ Complete | +| ENFORCER | T020 | `/src/lib/testing/CoverageEnforcer.js` | ✅ Complete | +| GENERATOR | T029 | `/src/lib/testing/TestTemplateGenerator.js` | ✅ Complete | +| VISUALIZER | T038 | `/src/lib/testing/CoverageVisualizer.js` | ✅ Complete | ### Key Accomplishments: @@ -34,6 +34,7 @@ ### Technical Analysis: All foundation classes follow D.A.T.A. architectural patterns: + - ✅ JavaScript with JSDoc (no TypeScript) - ✅ EventEmitter-based progress tracking - ✅ Dependency injection compatible @@ -47,8 +48,9 @@ All foundation classes follow D.A.T.A. architectural patterns: The foundation classes are complete and can now be integrated with the existing MigrationOrchestrator. Waves 2-5 (mappers, scanners, aggregators) can be implemented incrementally after the core integration is working. ### Wave 6 Tasks: + - T021: Implement coverage comparison -- T022: Create coverage gap analyzer +- T022: Create coverage gap analyzer - T025: Integrate with MigrationOrchestrator - T026: Implement deployment blocking @@ -60,14 +62,14 @@ The foundation classes are complete and can now be integrated with the existing ## Metrics -| Metric | Value | -|--------|-------| -| Files Created | 6 | -| Lines of Code | ~2,500 | -| Test Types Supported | 10 | -| pgTAP Assertions | 30+ | -| Coverage Categories | 9 | -| Enforcement Levels | 3 | +| Metric | Value | +| -------------------- | ------ | +| Files Created | 6 | +| Lines of Code | ~2,500 | +| Test Types Supported | 10 | +| pgTAP Assertions | 30+ | +| Coverage Categories | 9 | +| Enforcement Levels | 3 | ## Risk Assessment @@ -77,10 +79,10 @@ The foundation classes are complete and can now be integrated with the existing ## Captain's Log Entry -*"The test coverage enforcement system foundation has been successfully established. All S.L.A.P.S. agents performed their duties with precision. The system is now ready for integration with the main migration workflow. I calculate a 97.3% probability of successful deployment blocking when coverage is insufficient."* +_"The test coverage enforcement system foundation has been successfully established. All S.L.A.P.S. agents performed their duties with precision. The system is now ready for integration with the main migration workflow. I calculate a 97.3% probability of successful deployment blocking when coverage is insufficient."_ --- **Next Action:** Execute Wave 6 for MigrationOrchestrator integration -*End transmission.* \ No newline at end of file +_End transmission._ diff --git a/docs/TASKS/enforce-tests/FINAL-REPORT.md b/docs/TASKS/enforce-tests/FINAL-REPORT.md index 5e27fb0..0ebd550 100644 --- a/docs/TASKS/enforce-tests/FINAL-REPORT.md +++ b/docs/TASKS/enforce-tests/FINAL-REPORT.md @@ -2,7 +2,7 @@ ## Mission Complete: ALL 6 WAVES EXECUTED ✅ -*"Captain, the test coverage enforcement system is now fully operational. All primary objectives have been achieved."* +_"Captain, the test coverage enforcement system is now fully operational. All primary objectives have been achieved."_ — Lt. Commander Data ## Executive Summary @@ -22,36 +22,42 @@ The D.A.T.A. Test Coverage Enforcement System has been successfully implemented ### Core Components #### 1. **TestRequirementAnalyzer** (`/src/lib/testing/TestRequirementAnalyzer.js`) + - Analyzes AST migration operations - Maps schema changes to test requirements - Supports 10 test types with priority classification - Generates comprehensive test requirement objects #### 2. **pgTAPTestScanner** (`/src/lib/testing/pgTAPTestScanner.js`) + - Scans pgTAP test files for coverage - Supports 30+ pgTAP assertion types - Builds coverage database with efficient indexing - Provides coverage statistics and gap analysis #### 3. **CoverageEnforcer** (`/src/lib/testing/CoverageEnforcer.js`) + - Compares required vs actual coverage - Three enforcement levels: STRICT, NORMAL, LENIENT - Configurable thresholds and bypass mechanisms - Generates detailed coverage reports #### 4. **TestTemplateGenerator** (`/src/lib/testing/TestTemplateGenerator.js`) + - Generates pgTAP test templates for gaps - Supports 8 test types with smart defaults - Includes TestPatternLibrary for best practices - Produces ready-to-run test SQL #### 5. **TestCoverageOrchestrator** (`/src/lib/testing/TestCoverageOrchestrator.js`) + - Coordinates all coverage components - Integrates with migration workflow - Manages template generation - Provides unified API #### 6. **MigrationOrchestrator Integration** (`/src/lib/migration/MigrationOrchestrator.js`) + - New COVERAGE phase added to workflow - Blocks deployments with insufficient coverage - Production uses STRICT enforcement @@ -63,7 +69,7 @@ The D.A.T.A. Test Coverage Enforcement System has been successfully implemented ``` 1. VALIDATION → Git state validation -2. TESTING → Run unit and database tests +2. TESTING → Run unit and database tests 3. ANALYSIS → Generate migration operations 4. COVERAGE → Check test coverage (NEW) ├─ Analyze requirements @@ -80,18 +86,21 @@ The D.A.T.A. Test Coverage Enforcement System has been successfully implemented ## Coverage Enforcement Rules ### Production Environment (STRICT) + - **100% coverage** required for CRITICAL operations - **90% coverage** for HIGH priority changes - **80% coverage** for MEDIUM priority changes - No bypass without explicit justification ### Development Environment (NORMAL) + - **80% coverage** for CRITICAL operations - **70% coverage** for HIGH priority changes - **60% coverage** for MEDIUM priority changes - Bypass allowed with warning ### Test Environment (LENIENT) + - **60% coverage** baseline - Warnings only, no blocking - Template generation offered @@ -138,18 +147,19 @@ data migrate --prod --bypass-coverage "Emergency hotfix #123" ### Schema Change → Test Requirements -| Operation | Required Tests | Priority | -|-----------|---------------|----------| -| CREATE TABLE | has_table, has_column, col_type_is | CRITICAL | -| ALTER TABLE ADD COLUMN | has_column, col_type_is, col_not_null | HIGH | -| CREATE FUNCTION | has_function, function_returns, behavioral tests | HIGH | -| CREATE POLICY | policy_exists, policy_cmd_is, multi-user tests | CRITICAL | -| CREATE INDEX | has_index, index_is_on, performance tests | MEDIUM | -| DROP TABLE | hasnt_table, cascade validation | CRITICAL | +| Operation | Required Tests | Priority | +| ---------------------- | ------------------------------------------------ | -------- | +| CREATE TABLE | has_table, has_column, col_type_is | CRITICAL | +| ALTER TABLE ADD COLUMN | has_column, col_type_is, col_not_null | HIGH | +| CREATE FUNCTION | has_function, function_returns, behavioral tests | HIGH | +| CREATE POLICY | policy_exists, policy_cmd_is, multi-user tests | CRITICAL | +| CREATE INDEX | has_index, index_is_on, performance tests | MEDIUM | +| DROP TABLE | hasnt_table, cascade validation | CRITICAL | ## Coverage Statistics ### System Capabilities + - **10 test types** supported - **30+ pgTAP assertions** recognized - **8 template types** available @@ -157,6 +167,7 @@ data migrate --prod --bypass-coverage "Emergency hotfix #123" - **5 coverage categories** tracked ### Performance Metrics + - Coverage analysis: < 500ms for typical migration - Test scanning: < 200ms for 100 test files - Template generation: < 50ms per template @@ -168,21 +179,24 @@ data migrate --prod --bypass-coverage "Emergency hotfix #123" ✅ **Accurate detection** of existing test coverage ✅ **Zero false positives** in coverage detection ✅ **Clear, actionable feedback** for missing tests -✅ **< 500ms overhead** in deployment workflow +✅ **< 500ms overhead** in deployment workflow ## Future Enhancements ### Phase 1: Enhanced Analysis + - Machine learning for test quality assessment - Historical coverage trend analysis - Predictive test requirement generation ### Phase 2: Advanced Templates + - Data-driven test generation - Performance benchmark integration - Security penetration test templates ### Phase 3: Ecosystem Integration + - GitHub Actions integration - Coverage badges for README - Slack/Discord notifications @@ -190,11 +204,13 @@ data migrate --prod --bypass-coverage "Emergency hotfix #123" ## Technical Debt ### Known Limitations + 1. Complex migration operations may need manual test review 2. Template generation requires manual customization for edge cases 3. Coverage percentages are estimates based on assertion counts ### Recommended Improvements + 1. Add visual coverage reports (HTML/PDF) 2. Implement incremental coverage tracking 3. Add test quality metrics beyond coverage @@ -203,12 +219,12 @@ data migrate --prod --bypass-coverage "Emergency hotfix #123" The D.A.T.A. Test Coverage Enforcement System successfully prevents untested database changes from reaching production. With comprehensive AST analysis, intelligent test requirement mapping, and automated template generation, the system ensures database reliability while maintaining developer productivity. -*"The probability of deployment-related test coverage gaps has been reduced by 73.6%, Captain. The system is functioning within acceptable parameters."* +_"The probability of deployment-related test coverage gaps has been reduced by 73.6%, Captain. The system is functioning within acceptable parameters."_ --- **Final Status:** OPERATIONAL ✅ **Deployment Readiness:** 100% -**Mission Success Rate:** 100% +**Mission Success Rate:** 100% -*End transmission.* \ No newline at end of file +_End transmission._ diff --git a/docs/TASKS/enforce-tests/dag.json b/docs/TASKS/enforce-tests/dag.json index 9fcb465..d20a3f1 100644 --- a/docs/TASKS/enforce-tests/dag.json +++ b/docs/TASKS/enforce-tests/dag.json @@ -225,4 +225,4 @@ "author": "Lt. Commander Data", "visualization": "Use graphviz or D3.js for rendering" } -} \ No newline at end of file +} diff --git a/docs/TASKS/enforce-tests/features.json b/docs/TASKS/enforce-tests/features.json index 2074767..cdab43e 100644 --- a/docs/TASKS/enforce-tests/features.json +++ b/docs/TASKS/enforce-tests/features.json @@ -93,4 +93,4 @@ "author": "Lt. Commander Data", "stardate": "2025.241" } -} \ No newline at end of file +} diff --git a/docs/TASKS/enforce-tests/tasks.json b/docs/TASKS/enforce-tests/tasks.json index 57b48c1..c31f245 100644 --- a/docs/TASKS/enforce-tests/tasks.json +++ b/docs/TASKS/enforce-tests/tasks.json @@ -476,4 +476,4 @@ "created": "2025-08-30", "author": "Lt. Commander Data" } -} \ No newline at end of file +} diff --git a/docs/TASKS/enforce-tests/waves.json b/docs/TASKS/enforce-tests/waves.json index 6bb11f4..cc247bf 100644 --- a/docs/TASKS/enforce-tests/waves.json +++ b/docs/TASKS/enforce-tests/waves.json @@ -365,4 +365,4 @@ "stardate": "2025.241", "note": "Deploy agents from ~/.claude/@lib/agents/ using S.L.A.P.S. orchestration" } -} \ No newline at end of file +} diff --git a/docs/audits/codebase/2025-08-30/repo-file-status.md b/docs/audits/codebase/2025-08-30/repo-file-status.md index aed7d47..e7155a3 100644 --- a/docs/audits/codebase/2025-08-30/repo-file-status.md +++ b/docs/audits/codebase/2025-08-30/repo-file-status.md @@ -1,4 +1,5 @@ # Repository File Status Audit + **Date:** 2025-08-30 **Repository:** DATA (Database Automation, Testing, and Alignment) **Auditor:** Lt. Commander Data (Automated Analysis) @@ -6,8 +7,9 @@ ## Executive Summary Repository analysis reveals **183 total files** with the following critical findings: + - **5 dead code files** requiring deletion -- **14 misplaced files** requiring reorganization +- **14 misplaced files** requiring reorganization - **58 source files** lacking test coverage (77% untested) - **1 IDE configuration** directory improperly tracked @@ -160,6 +162,7 @@ Repository analysis reveals **183 total files** with the following critical find ## Summary Statistics ### File Categories + - **Total Files:** 183 - **Source Files:** 75 - **Test Files:** 32 @@ -169,6 +172,7 @@ Repository analysis reveals **183 total files** with the following critical find - **Misplaced:** 14 ### Test Coverage Analysis + - **Source files with tests:** 17 (23%) - **Source files without tests:** 58 (77%) - **Critical untested components:** 24 @@ -176,7 +180,9 @@ Repository analysis reveals **183 total files** with the following critical find ### Action Items by Priority #### 🔴 **CRITICAL - Immediate Action Required** + 1. **Delete dead code files:** + ```bash rm simple-test.js rm test-function-parsing.js @@ -191,7 +197,9 @@ Repository analysis reveals **183 total files** with the following critical find ``` #### 🟠 **HIGH - Near-term Action** + 3. **Reorganize misplaced files:** + ```bash mkdir -p demo mv tui.js demo/ @@ -204,6 +212,7 @@ Repository analysis reveals **183 total files** with the following critical find - Either integrate into main scanner or delete #### 🟡 **MEDIUM - Test Coverage Priority** + 5. **Critical components needing tests (Top 10):** - `src/commands/db/CompileCommand.js` - `src/commands/db/migrate/generate.js` @@ -219,6 +228,7 @@ Repository analysis reveals **183 total files** with the following critical find ## Repository Organization Recommendations ### 1. **Directory Structure Improvements** + ``` DATA/ ├── src/ # Source code @@ -242,20 +252,24 @@ DATA/ ``` ### 2. **Test Organization Strategy** + - Move all test files to appropriate subdirectories under `test/` - Maintain 1:1 mapping between source files and test files - Use consistent naming: `{SourceFile}.test.js` ### 3. **Configuration Cleanup** + - Ensure all IDE configs (.obsidian, .vscode) are in .gitignore - Consider moving all config files to a `config/` directory ### 4. **Dead Code Prevention Process** + - Implement pre-commit hooks to detect unused files - Regular quarterly audits for dead code - Document all experimental/prototype files in a EXPERIMENTS.md ### 5. **Test Coverage Requirements** + - Implement minimum 80% coverage requirement - Add coverage gates to CI/CD pipeline - Priority focus on critical path components (migrations, database operations) @@ -265,9 +279,10 @@ DATA/ The repository shows signs of rapid development with technical debt accumulation. The 77% of source files lacking tests represents significant risk. Immediate cleanup of dead code and reorganization of misplaced files will improve maintainability. The test coverage gap should be addressed systematically, starting with critical components that handle database migrations and production deployments. **Repository Health Score: C+ (Needs Improvement)** + - Strengths: Good documentation, clear module structure - Weaknesses: Poor test coverage, dead code presence, misplaced files --- -*"A well-organized repository is the foundation of efficient software development. Order must be imposed on chaos."* - Lt. Commander Data \ No newline at end of file +_"A well-organized repository is the foundation of efficient software development. Order must be imposed on chaos."_ - Lt. Commander Data diff --git a/docs/configuration/testing.md b/docs/configuration/testing.md index 4617a39..22e6549 100644 --- a/docs/configuration/testing.md +++ b/docs/configuration/testing.md @@ -8,7 +8,7 @@ Configuration files are loaded in the following order of precedence: 1. Command-line specified config path 2. `.datarc.json` in current working directory -3. `.datarc` in current working directory +3. `.datarc` in current working directory 4. `.datarc.json` in home directory 5. `.datarc` in home directory 6. Default configuration (if no file found) @@ -34,43 +34,47 @@ The `test` section in your configuration file supports the following options: ### Configuration Options -| Option | Type | Default | Description | -|--------|------|---------|-------------| -| `minimum_coverage` | Number (0-100) | 80 | Minimum test coverage percentage required | -| `test_timeout` | Number (seconds) | 300 | Maximum time allowed for individual test execution | -| `parallel_execution` | Boolean | false | Whether to run tests in parallel (future feature) | -| `output_formats` | Array | `["console", "junit", "json"]` | Available output formats for test results | -| `coverage_enforcement` | Boolean | true | Whether to enforce coverage thresholds by default | -| `excluded_functions` | Array | `["internal_*", "migration_*"]` | Function patterns to exclude from coverage | -| `debounce_delay` | Number (ms) | 1000 | Delay before re-running tests in watch mode | -| `auto_compile` | Boolean | true | Whether to automatically compile tests before running | +| Option | Type | Default | Description | +| ---------------------- | ---------------- | ------------------------------- | ----------------------------------------------------- | +| `minimum_coverage` | Number (0-100) | 80 | Minimum test coverage percentage required | +| `test_timeout` | Number (seconds) | 300 | Maximum time allowed for individual test execution | +| `parallel_execution` | Boolean | false | Whether to run tests in parallel (future feature) | +| `output_formats` | Array | `["console", "junit", "json"]` | Available output formats for test results | +| `coverage_enforcement` | Boolean | true | Whether to enforce coverage thresholds by default | +| `excluded_functions` | Array | `["internal_*", "migration_*"]` | Function patterns to exclude from coverage | +| `debounce_delay` | Number (ms) | 1000 | Delay before re-running tests in watch mode | +| `auto_compile` | Boolean | true | Whether to automatically compile tests before running | ## Command Integration ### RunCommand The `RunCommand` uses test configuration for: + - **Test Timeout**: Sets query timeout based on `test_timeout` setting - **Output Format**: Defaults to first format in `output_formats` array - **Format Selection**: Validates output format against allowed formats Example usage: + ```bash # Uses config default output format ./build/data test run -# Override config with specific format +# Override config with specific format ./build/data test run --format junit ``` ### CoverageCommand The `CoverageCommand` integrates with: + - **Coverage Enforcement**: Uses `coverage_enforcement` as default for --enforce - **Minimum Coverage**: Uses `minimum_coverage` for threshold validation - **Excluded Functions**: Respects `excluded_functions` patterns Example usage: + ```bash # Uses config enforcement and coverage settings ./build/data test coverage @@ -82,10 +86,12 @@ Example usage: ### WatchCommand The `WatchCommand` respects: + - **Debounce Delay**: Uses `debounce_delay` for file change debouncing - **Auto Compile**: Uses `auto_compile` to determine if tests should be compiled automatically Example usage: + ```bash # Uses config debounce and auto-compile settings ./build/data test watch @@ -97,6 +103,7 @@ Example usage: ### DevCycleCommand The `DevCycleCommand` integrates configuration by: + - Loading test config to respect all settings - Passing configuration context to child commands - Using configured timeouts and enforcement settings @@ -119,23 +126,25 @@ D.A.T.A. automatically validates and sanitizes configuration values: ### Type Coercion String values are automatically converted to appropriate types: + ```json { "test": { - "minimum_coverage": "85", // → 85 (number) - "test_timeout": "600", // → 600 (number) + "minimum_coverage": "85", // → 85 (number) + "test_timeout": "600", // → 600 (number) "coverage_enforcement": "true" // → true (boolean) } } ``` Invalid values fallback to defaults: + ```json { "test": { "minimum_coverage": "invalid", // → 80 (default) - "test_timeout": -10, // → 300 (default) - "debounce_delay": "fast" // → 1000 (default) + "test_timeout": -10, // → 300 (default) + "debounce_delay": "fast" // → 1000 (default) } } ``` @@ -143,6 +152,7 @@ Invalid values fallback to defaults: ## Example Configuration Files ### Development Configuration + ```json { "test": { @@ -157,6 +167,7 @@ Invalid values fallback to defaults: ``` ### Production/CI Configuration + ```json { "test": { @@ -171,6 +182,7 @@ Invalid values fallback to defaults: ``` ### Minimal Configuration + ```json { "test": { @@ -178,7 +190,8 @@ Invalid values fallback to defaults: } } ``` -*All other settings will use defaults* + +_All other settings will use defaults_ ## Environment Integration @@ -209,7 +222,7 @@ Configuration settings can be overridden via command-line options: # Override output format ./build/data test run --format json --output results.json -# Override watch settings +# Override watch settings ./build/data test watch --debounce 2000 --no-auto-compile ``` @@ -218,6 +231,7 @@ Command-line options always take precedence over configuration file settings. ## Troubleshooting ### Configuration Not Loading + ```bash # Check if config file exists and is valid JSON cat .datarc.json | jq . @@ -227,6 +241,7 @@ DEBUG=data:config ./build/data test run ``` ### Invalid Configuration Values + D.A.T.A. will automatically fix invalid values and continue execution. Check logs for validation warnings: ``` @@ -235,9 +250,11 @@ WARN: Invalid test_timeout -10, using default 300 ``` ### Configuration Precedence Issues + When multiple config files exist, check the loading order: + 1. Current directory `.datarc.json` -2. Home directory `.datarc.json` +2. Home directory `.datarc.json` 3. Default configuration -Use absolute paths to specify exact config file location. \ No newline at end of file +Use absolute paths to specify exact config file location. diff --git a/docs/decisions/000-javascript-not-typescript.md b/docs/decisions/000-javascript-not-typescript.md index 865cd11..705e8c7 100644 --- a/docs/decisions/000-javascript-not-typescript.md +++ b/docs/decisions/000-javascript-not-typescript.md @@ -22,7 +22,7 @@ We will use **native JavaScript classes with comprehensive JSDoc annotations** r 2. **Zero Build Step**: No transpilation required. The code that runs is the code we write. This eliminates: - Build configuration complexity - - Source map debugging issues + - Source map debugging issues - Deployment artifacts - Transpilation bugs @@ -74,7 +74,7 @@ class ProgressEvent extends CommandEvent { * @param {EventDetails} [details] - Additional details */ constructor(message, percentage = null, details = {}) { - super('progress', message, details); + super("progress", message, details); this.percentage = percentage; } } @@ -90,7 +90,7 @@ class ErrorEvent extends CommandEvent { * @param {string} [code] - Error code for categorization */ constructor(message, error, code = null) { - super('error', message, { error, code }); + super("error", message, { error, code }); this.error = error; this.code = code; } @@ -101,16 +101,17 @@ class ErrorEvent extends CommandEvent { ```javascript // Emission with type safety -this.emit('progress', new ProgressEvent( - 'Processing directory', - 50, - { directoryName: 'sql/001_extensions' } -)); +this.emit( + "progress", + new ProgressEvent("Processing directory", 50, { + directoryName: "sql/001_extensions", + }), +); // Reception with runtime validation -command.on('progress', (event) => { +command.on("progress", (event) => { if (!(event instanceof ProgressEvent)) { - throw new Error('Invalid event type received'); + throw new Error("Invalid event type received"); } console.log(`${event.message}: ${event.percentage}%`); }); @@ -141,129 +142,135 @@ None. This pattern applies to all new code and should be retrofitted to existing - [Node.js Best Practices - Prefer Native JS](https://github.com/goldbergyoni/nodebestpractices#prefer-native-js-when-possible) - Starfleet Engineering Manual, Section 7.3: "Simplicity in System Design" - - --- -*"The needs of the runtime outweigh the needs of the compile time."* - Vulcan Engineering Proverb +_"The needs of the runtime outweigh the needs of the compile time."_ - Vulcan Engineering Proverb --- -> [!warning]- __Captain's Log: Supplemental–Spicy Take on TypeScript +> [!warning]- \_\_Captain's Log: Supplemental–Spicy Take on TypeScript +> > # The Anti-TypeScript Manifesto: POOP Edition +> > ## Colonizing JavaScript Since 2012 -> +> > ### TL;DR -> -> TypeScript is **Pseudo-Object-Oriented Programming (POOP)** created by the C# guy who thought JavaScript needed "fixing." It forces you to write the same types 5 times (TS → Zod → JSON Schema → OpenAPI → GraphQL), adds a 30-second build step to catch typos a linter could find instantly, and provides zero runtime value. The TypeScript team is so embarrassed they're rewriting the compiler in Go. Even Deno's "TypeScript native" experiment failed—it's just hidden compilation that Bun destroyed by simply running JavaScript fast. -> +> +> TypeScript is **Pseudo-Object-Oriented Programming (POOP)** created by the C# guy who thought JavaScript needed "fixing." It forces you to write the same types 5 times (TS → Zod → JSON Schema → OpenAPI → GraphQL), adds a 30-second build step to catch typos a linter could find instantly, and provides zero runtime value. The TypeScript team is so embarrassed they're rewriting the compiler in Go. Even Deno's "TypeScript native" experiment failed—it's just hidden compilation that Bun destroyed by simply running JavaScript fast. +> > In 2025, AI can generate perfect JSDoc on every commit, giving you all of TypeScript's benefits with none of its bullshit. **Ship JavaScript. Skip the costume party.** -> +> > --- -> +> > TypeScript is the IKEA allen wrench of programming languages: > Everyone pretends it's indispensable, but really it's just a cheap tool bundled in the box to make you feel like you built something. -> +> > And guess what? It was built by the same guy who made C#. -> Yeah, that's right. Anders Hejlsberg looked at JavaScript and thought "I can fix her." -> +> Yeah, that's right. Anders Hejlsberg looked at JavaScript and thought "I can fix her." +> > Spoiler: He couldn't. -> +> > --- -> +> > ## Part I: The Origin Story +> > ### How C# Colonized JavaScript -> +> > Anders Hejlsberg has spent his entire career building the same language over and over: -> +> > ```mermaid > timeline > title The Hejlsberg Static-Type Empire -> +> > 1983 : Turbo Pascal > : "Let's make Pascal fast" -> +> > 1995 : Delphi > : "Let's make Pascal visual" -> +> > 2000 : C# > : "Let's make Java... Microsoft" -> +> > 2012 : TypeScript > : "Let's make JavaScript... C#" > ``` -> +> > See the pattern? Static typing. Compile-time checking. Enterprise "safety." -> +> > Then he looked at JavaScript—a language Brendan Eich built in 10 days to be Scheme in the browser—and decided it needed to be more like... C#. -> +> > This isn't evolution. It's colonization. > **It's C# hegemony in a prototype town.** -> +> > TypeScript is C# wearing JavaScript's skin. -> +> > --- -> +> > ## Part II: The Big Lie +> > ### It's Not OOP. It's POOP. -> +> > **Pseudo-Object-Oriented Programming.** -> +> > Real OOP has: +> > - Actual inheritance contracts -> - Runtime polymorphism +> - Runtime polymorphism > - Real encapsulation > - Guaranteed interfaces -> +> > TypeScript gives you: +> > - `interface` (from C#) that disappears at runtime > - `private` that's actually just public with a compiler pinky promise > - `abstract` classes that are just regular prototypes with extra steps > - Generics `` that are pure fiction after compilation -> +> > > ### **"If the guarantees vanish at runtime, they were costumes, not contracts."** -> +> > You're not flying a jet. You're making airplane noises while holding a cardboard box that says "Boeing" in Sharpie. -> +> > --- -> +> > ## Part III: The Daily Pain +> > ### Three Ways TypeScript Ruins Your Life -> +> > ### 1. The Build Time Black Hole™ -> +> > Every. Single. Change. +> > - Wait for TypeScript to compile -> - Wait for type checking +> - Wait for type checking > - Wait for bundling > - Wait for source maps > - Wait for hot reload -> +> > Meanwhile, pure JavaScript just... runs. -> +> > Your "developer experience" is now 30% coding, 70% watching progress bars. -> +> > > ### **"TS taxes your feedback loop—the only currency that actually buys better software."** -> +> > TypeScript is the only "safety system" that makes your plane board slower, taxi slower, take off slower, and then still crashes at runtime because you forgot to validate that API response. -> +> > ### 2. The Infinite Repetition Hell™ -> +> > TypeScript doesn't eliminate type definitions. It MULTIPLIES them: -> +> > ```typescript > // 1. TypeScript interface > interface User { id: number; name: string; } -> +> > // 2. Zod schema (because TS doesn't do runtime) > const UserSchema = z.object({ > id: z.number(), > name: z.string() > }); -> +> > // 3. JSON Schema (for API docs) > { "properties": { "id": { "type": "number" }, "name": { "type": "string" } } } -> +> > // 4. OpenAPI spec (for Swagger) > components: > schemas: @@ -271,111 +278,117 @@ None. This pattern applies to all new code and should be retrofitted to existing > properties: > id: {type: integer} > name: {type: string} -> +> > // 5. GraphQL schema (because why not) > type User { > id: Int! > name: String! > } > ``` -> +> > You've written the same shape FIVE TIMES in FIVE DIALECTS. > This isn't engineering. It's Stockholm syndrome. -> +> > ### 3. The Safety Theater -> +> > "TypeScript prevents bugs!" -> +> > No, it prevents typos. There's a difference. -> +> > **The bugs that actually matter:** +> > - Race conditions → TS doesn't help -> - Memory leaks → TS doesn't help +> - Memory leaks → TS doesn't help > - Logic errors → TS doesn't help > - API contract violations → TS doesn't help (need runtime validation anyway) > - State management issues → TS doesn't help > - Performance problems → TS makes them worse -> +> > **What TS prevents:** +> > - Typing `usr.naem` instead of `user.name` -> +> > Congratulations. You added a 30-second build step to catch what a linter could find instantly. -> +> > --- -> +> > ## Part IV: The Failed Experiments +> > ### When Even True Believers Give Up -> +> > ### Exhibit A: TypeScript's Self-Loathing -> +> > The TypeScript team's newest innovation? > They're rewriting the compiler... in Go. -> +> > Let that sink in. -> +> > The TypeScript team is so confident in TypeScript that they're rewriting TypeScript... > ...in NOT TypeScript. -> +> > > ### **"Even TypeScript doesn't want to be written in TypeScript."** -> +> > **A type system so embarrassed it's trying to escape its own runtime.** -> +> > ### Exhibit B: The Deno Disaster -> +> > Even Ryan Dahl—the guy who created Node.js—drank the Kool-Aid. -> +> > After giving his famous "10 Things I Regret About Node.js" talk, he decided one of those regrets was: "no types." -> +> > So when he built Deno, he went all-in on the marketing slogan: -> +> > > "TypeScript native." -> +> > Sounds futuristic, right? Nope. Here's what it really meant: -> +> > - Deno grabs your .ts file > - Shoves it through the TypeScript compiler (later swc/deno_ast for "speed") > - Caches the result as JS > - And runs _that_ -> +> > In other words: **JavaScript with a hidden compile step.** -> +> > Not native. Not special. Just duct tape welded inside the runtime. -> +> > **What did developers get?** +> > - Slower startup on first run > - All the same build-time baggage > - Zero runtime benefits -> +> > It was "TypeScript native" in the same way a microwave burrito is "authentic Mexican cuisine." -> +> > And guess what? Even Deno is quietly backpedaling. -> +> > Now their docs show **JavaScript-first examples**. They admit the overhead. They know Bun is eating their lunch by simply saying: -> +> > > "Just run JavaScript. Fast." -> -> So yeah, even the guy who invented Node tried to turn TypeScript into a runtime feature—and the industry response was: -> +> +> So yeah, even the guy who invented Node tried to turn TypeScript into a runtime feature—and the industry response was: +> > _"Cool, but can it run my JavaScript faster?"_ -> +> > Spoiler: Bun answered "yes." Deno had to blink. -> +> > --- -> +> > ## Part V: The Solution You Already Have +> > ### The JSDoc + AI Revolution -> +> > "But JSDoc is too verbose!" -> +> > Brother, it's 2025. AI can generate perfect JSDoc on every commit. -> +> > ```javascript > // You write: > function process(data, options) { -> return data.filter(d => d.active) -> .map(d => ({...d, timestamp: Date.now()})) +> return data +> .filter((d) => d.active) +> .map((d) => ({ ...d, timestamp: Date.now() })); > } -> +> > // AI generates on pre-commit: > /** > * Process active data items with timestamp @@ -384,71 +397,75 @@ None. This pattern applies to all new code and should be retrofitted to existing > * @returns {Array} > */ > ``` -> +> > **JSDoc is ugly to write; it isn't ugly to use. Let the machine write it.** -> +> > TypeScript's ENTIRE value proposition just evaporated: +> > - No build step -> - No compilation time +> - No compilation time > - Pure JavaScript > - Still get IDE hints > - AI improves over time -> +> > You're compiling TypeScript to JavaScript to avoid writing JSDoc that AI can write for you instantly. -> +> > That's like driving to the gym to use the treadmill. -> +> > --- -> +> > ## Part VI: The Final Verdict -> +> > ### JavaScript vs. TypeScript: The Real Difference -> +> > **JavaScript:** +> > - Created in 10 days > - Runs everything from browsers to servers to satellites > - Dynamic, flexible, powerful > - Trusts developers to know what they're doing -> +> > **TypeScript:** +> > - Created by the C# guy who thinks you need protection > - Compiles to JavaScript anyway > - Adds nothing at runtime > - Exists because Microsoft developers were scared of `undefined` -> +> > JavaScript is punk rock. > TypeScript is dad rock in khakis—the same three C# chords, played slower, with "safety" padding. -> +> > ### The Ultimate Truth -> +> > TypeScript is what happens when enterprise developers are too scared to learn JavaScript properly. -> +> > So they brought in the C# guy to make JavaScript feel like C#. > Added a compilation step that adds zero runtime value. > Created a type system that you have to repeat in five other type systems. > Built a "safety net" that catches typos but misses actual bugs. -> +> > And the punchline? In 2025, AI can generate better type documentation than TypeScript, instantly, without compilation, in pure JavaScript. -> +> > TypeScript isn't Object-Oriented Programming. > It's Pseudo-Object-Oriented Programming. > **It's POOP.** -> +> > A type system so embarrassed by itself that it's being rewritten in a different language. > A compiler whose greatest achievement is deleting itself after running. > A "superset" of JavaScript that's really just C# homesickness with a build step. -> +> > Meanwhile, JavaScript—beautiful, chaotic, untyped JavaScript—runs the entire fucking internet. -> +> > **Ship JavaScript. Skip the costume party.** -> +> > --- -> +> > ## Part VII: Your Escape Plan +> > ### Ditch TypeScript Today -> +> > Want out? Here's your AI-powered JSDoc setup: -> +> > ```json > // package.json > { @@ -457,7 +474,7 @@ None. This pattern applies to all new code and should be retrofitted to existing > } > } > ``` -> +> > ```bash > # .husky/pre-commit > #!/bin/sh @@ -469,31 +486,31 @@ None. This pattern applies to all new code and should be retrofitted to existing > git add "$file" > done > ``` -> +> > Or even simpler with a one-liner: -> +> > ```bash > # Just alias this bad boy > alias jsdoc-me="git ls-files '*.js' | xargs -I {} claude -p 'Add JSDoc to {}' {}" > ``` -> +> > That's it. Pure JS. AI docs. Zero build time. -> +> > --- -> +> > ## Receipts (for the pedants) -> +> > • **VS Code understands JSDoc** → IntelliSense, go-to-def, hover types. Full stop. > • **You still need runtime validation** → Zod/Valibot/Arktype, no matter what TS promises. > • **Most teams duplicate shapes** → TS → Zod → JSON Schema → OpenAPI, because none of these tools share a single source of truth. > • **Build time matters** → Slower feedback loops = worse design decisions. Ask anyone who lived through Scala compile farms. -> +> > --- -> +> > **P.S.** — If you need a compiler to stop you from shipping bugs, the problem isn't the language. It's you. -> +> > **P.P.S.** — Yes, I know your Fortune 500 company uses TypeScript. They also use SharePoint. Bad decisions love company. -> +> > **P.P.P.S.** — "But Google uses TypeScript!" Google also killed Reader, Plus, Wave, Glass, and Stadia. Maybe stop using Google as your north star. -> -> #DeathToTypeScript #POOP #ColonizingJavaScriptSince2012 #AIKilledTypeScript #ShipJSSkipTheCostumeParty \ No newline at end of file +> +> #DeathToTypeScript #POOP #ColonizingJavaScriptSince2012 #AIKilledTypeScript #ShipJSSkipTheCostumeParty diff --git a/docs/decisions/001-runtime-choice-deno-vs-node.md b/docs/decisions/001-runtime-choice-deno-vs-node.md index 18a4cc5..68eebfa 100644 --- a/docs/decisions/001-runtime-choice-deno-vs-node.md +++ b/docs/decisions/001-runtime-choice-deno-vs-node.md @@ -8,12 +8,14 @@ ## Context and Problem Statement D.A.T.A. (Database Automation, Testing, and Alignment) is a CLI tool that manages: + 1. PostgreSQL/Supabase database migrations 2. Supabase Edge Functions deployment and testing 3. pgTAP test execution and coverage enforcement 4. Production deployment safety Current pain points: + - **Module System Chaos**: Mixed CommonJS/ESM causing test failures and import issues - **Runtime Mismatch**: We deploy to Deno (Edge Functions) but develop in Node.js - **TypeScript Gaps**: Using JSDoc instead of native TypeScript @@ -32,18 +34,20 @@ Current pain points: ## Considered Options ### Option 1: Full Migration to Deno 🦕 + **Complete rewrite of D.A.T.A. as a Deno application** ```typescript // Native TypeScript, no transpilation -import { Command } from "https://deno.land/x/cliffy@v1.0.0/command/mod.ts" -import { DB } from "https://deno.land/x/sqlite@v3.8/mod.ts" +import { Command } from "https://deno.land/x/cliffy@v1.0.0/command/mod.ts"; +import { DB } from "https://deno.land/x/sqlite@v3.8/mod.ts"; // Direct Edge Function testing -const edgeFunction = await import("./supabase/functions/my-function/index.ts") +const edgeFunction = await import("./supabase/functions/my-function/index.ts"); ``` **Pros:** + - ✅ **Perfect Edge Function parity**: Test functions in exact runtime - ✅ **TypeScript native**: No transpilation, true type safety - ✅ **Single executable**: `deno compile` creates standalone binary @@ -54,6 +58,7 @@ const edgeFunction = await import("./supabase/functions/my-function/index.ts") - ✅ **Future-proof**: Aligns with Supabase's direction **Cons:** + - ❌ **PostgreSQL drivers**: Less mature (but improving rapidly) - ❌ **Ecosystem gaps**: Some tools unavailable (pgTAP runners, etc.) - ❌ **Rewrite effort**: 40-60 hours of migration work @@ -61,6 +66,7 @@ const edgeFunction = await import("./supabase/functions/my-function/index.ts") - ❌ **Early adopter risk**: Breaking changes still happening **Implementation Path:** + ```bash # Phase 1: Core migration (Week 1) - Port Command base classes @@ -79,6 +85,7 @@ const edgeFunction = await import("./supabase/functions/my-function/index.ts") ``` ### Option 2: Stay with Node.js + Fix Module System 📦 + **Properly configure TypeScript and ES modules** ```json @@ -94,6 +101,7 @@ const edgeFunction = await import("./supabase/functions/my-function/index.ts") ``` **Pros:** + - ✅ **Mature ecosystem**: All tools available - ✅ **Team familiarity**: Everyone knows Node - ✅ **Minimal migration**: Just configuration fixes @@ -101,6 +109,7 @@ const edgeFunction = await import("./supabase/functions/my-function/index.ts") - ✅ **CI/CD support**: Works everywhere **Cons:** + - ❌ **Runtime mismatch**: Can't accurately test Edge Functions - ❌ **Module complexity**: Even "fixed", still complex - ❌ **Bundle size**: Node_modules bloat @@ -108,52 +117,60 @@ const edgeFunction = await import("./supabase/functions/my-function/index.ts") - ❌ **Two TypeScripts**: Different configs for Edge Functions vs CLI ### Option 3: Hybrid Architecture 🔀 + **Node.js for CLI, Deno for Edge Function testing** ```javascript // Node CLI orchestrates -await exec("deno run --allow-net test-edge-function.ts") +await exec("deno run --allow-net test-edge-function.ts"); // Deno subprocess tests Edge Functions -const result = await testInDenoRuntime(functionCode) +const result = await testInDenoRuntime(functionCode); ``` **Pros:** + - ✅ **Best of both**: Node ecosystem + Deno accuracy - ✅ **Incremental migration**: Can move pieces over time - ✅ **Risk mitigation**: Keep working system while experimenting **Cons:** + - ❌ **Complexity explosion**: Two runtimes to maintain - ❌ **Context switching**: Developers juggle two systems - ❌ **Distribution harder**: Need both runtimes installed - ❌ **Debugging nightmare**: Cross-runtime issues ### Option 4: Migrate to Bun 🥟 + **Use Bun as Node-compatible modern runtime** **Pros:** + - ✅ **Node compatibility**: Most packages work - ✅ **Fast**: Blazing performance - ✅ **Modern**: TypeScript native, JSX support **Cons:** + - ❌ **Not Deno**: Still doesn't match Edge Functions - ❌ **Newer than Deno**: Even less mature - ❌ **Different goals**: Optimized for different use cases ### Option 5: Build Runtime Abstraction Layer 🏗️ + **Create abstraction that can run on both** ```typescript interface Runtime { - fetch: GlobalFetch - crypto: Crypto - readFile: (path: string) => Promise + fetch: GlobalFetch; + crypto: Crypto; + readFile: (path: string) => Promise; } ``` **Cons:** + - ❌ **Lowest common denominator**: Lose runtime-specific features - ❌ **Abstraction overhead**: More code to maintain - ❌ **Still doesn't solve**: Edge Function testing accuracy @@ -212,6 +229,7 @@ interface Runtime { ## Implementation Strategy ### Phase 1: Proof of Concept (Week 1) + ```typescript // Test critical paths in Deno - Database connections ✓ @@ -221,6 +239,7 @@ interface Runtime { ``` ### Phase 2: Parallel Development (Week 2-3) + ```typescript // Build Deno version alongside Node - Keep Node version working @@ -229,6 +248,7 @@ interface Runtime { ``` ### Phase 3: Cutover (Week 4) + ```typescript // Switch primary development to Deno - Deprecate Node version @@ -239,6 +259,7 @@ interface Runtime { ## Consequences ### Positive + - **Unified runtime**: Edge Functions and CLI use same environment - **Modern codebase**: No legacy module issues - **Better security**: Explicit permission model @@ -246,12 +267,14 @@ interface Runtime { - **TypeScript native**: No build step ### Negative + - **Migration effort**: 40-60 hours of work - **Ecosystem gaps**: May need to build some tools - **Learning curve**: Team needs Deno knowledge - **Early adopter risk**: Deno still evolving ### Neutral + - **Different paradigm**: URL imports vs package.json - **New tooling**: Deno.test vs Vitest - **Permission model**: Explicit flags needed @@ -292,10 +315,10 @@ If we decide NOT to migrate, here's the minimum fixes needed: ```javascript // Fix all imports -import { describe, it } from 'vitest' // not require() +import { describe, it } from "vitest"; // not require() // Use import.meta.url -const __dirname = new URL('.', import.meta.url).pathname +const __dirname = new URL(".", import.meta.url).pathname; ``` ```bash @@ -308,6 +331,6 @@ But this is putting a bandaid on a broken leg when we need surgery. --- -*"It is possible to commit no mistakes and still lose. That is not weakness, that is life."* - Jean-Luc Picard +_"It is possible to commit no mistakes and still lose. That is not weakness, that is life."_ - Jean-Luc Picard -In this case, staying with Node.js isn't a mistake, but it is a lost opportunity to align perfectly with our deployment target. \ No newline at end of file +In this case, staying with Node.js isn't a mistake, but it is a lost opportunity to align perfectly with our deployment target. diff --git a/docs/decisions/002-deno-migration-plan.md b/docs/decisions/002-deno-migration-plan.md index 37617d9..60489f4 100644 --- a/docs/decisions/002-deno-migration-plan.md +++ b/docs/decisions/002-deno-migration-plan.md @@ -3,7 +3,7 @@ **Created:** 2025-08-30 **Status:** DRAFT **Estimated Effort:** 40-60 hours -**Risk Level:** Medium +**Risk Level:** Medium ## Executive Summary @@ -12,7 +12,7 @@ Migrate D.A.T.A. from Node.js to Deno to achieve perfect Supabase Edge Function ## Why Deno? The 30-Second Pitch ``` -Current: Node.js CLI → Deploys to → Deno Edge Functions +Current: Node.js CLI → Deploys to → Deno Edge Functions (MISMATCH! 🔴) Future: Deno CLI → Deploys to → Deno Edge Functions @@ -22,6 +22,7 @@ Future: Deno CLI → Deploys to → Deno Edge Functions ## Migration Phases ### 🏗️ Phase 0: Foundation (Day 1-2) + **Goal:** Prove Deno can handle our core needs ```bash @@ -35,18 +36,20 @@ deno run --allow-run test-pgtap-execution.ts ``` **Checklist:** + - [ ] PostgreSQL connection working -- [ ] Edge Function imports working +- [ ] Edge Function imports working - [ ] pgTAP test execution working - [ ] File system operations working - [ ] Environment variables working ### 🔄 Phase 1: Core Library Migration (Day 3-7) + **Goal:** Port foundation classes ```typescript // Old (Node.js) -const { EventEmitter } = require('events'); +const { EventEmitter } = require("events"); class Command extends EventEmitter { // ... } @@ -59,10 +62,11 @@ class Command extends EventEmitter { ``` **Files to Migrate:** + ``` src/lib/ ├── Command.ts (4 hours) -├── DatabaseCommand.ts (2 hours) +├── DatabaseCommand.ts (2 hours) ├── SupabaseCommand.ts (2 hours) ├── TestCommand.ts (2 hours) ├── CommandRouter.ts (3 hours) @@ -72,10 +76,13 @@ src/lib/ ``` ### 🚀 Phase 2: Command Migration (Day 8-14) + **Goal:** Port all commands to Deno **Priority Order:** + 1. **Test commands** (Most complex, highest value) + ```typescript // Focus: Edge Function testing accuracy src/commands/test/ @@ -85,6 +92,7 @@ src/lib/ ``` 2. **Database commands** (Core functionality) + ```typescript src/commands/db/ ├── MigrateCommand.ts (4 hours) @@ -99,14 +107,18 @@ src/lib/ ``` ### 🧪 Phase 3: Test Suite Migration (Day 15-18) + **Goal:** All tests running in Deno ```typescript // Old (Vitest) -import { describe, it, expect } from 'vitest'; +import { describe, it, expect } from "vitest"; // New (Deno) -import { assertEquals, assertThrows } from "https://deno.land/std@0.208.0/assert/mod.ts"; +import { + assertEquals, + assertThrows, +} from "https://deno.land/std@0.208.0/assert/mod.ts"; Deno.test("Command emits events correctly", async () => { // Test implementation @@ -115,11 +127,13 @@ Deno.test("Command emits events correctly", async () => { ``` **Test Migration Strategy:** + 1. Start with unit tests (simpler) 2. Move to integration tests 3. Add Deno-specific tests for Edge Functions ### 📦 Phase 4: Distribution (Day 19-20) + **Goal:** Single binary distribution ```bash @@ -139,12 +153,14 @@ deno compile \ ``` **Distribution Targets:** + - macOS ARM64 (Apple Silicon) - macOS x64 (Intel) - Linux x64 - Windows x64 ### 🎯 Phase 5: Cutover (Day 21) + **Goal:** Switch to Deno as primary 1. **Documentation Update** @@ -153,6 +169,7 @@ deno compile \ - Migration guide for users 2. **CI/CD Update** + ```yaml # .github/workflows/test.yml - uses: denoland/setup-deno@v1 @@ -166,33 +183,39 @@ deno compile \ ## Dependency Mapping -| Node.js Package | Deno Replacement | Notes | -|----------------|------------------|-------| -| commander | cliffy | Better TypeScript support | -| pg | deno-postgres | Native Deno driver | -| chalk | std/fmt/colors | Built-in formatting | -| dotenv | std/dotenv | Standard library | -| fs-extra | std/fs | Built-in | -| glob | std/fs/walk | Built-in | -| eslint | deno lint | Built-in | -| vitest | Deno.test | Built-in | +| Node.js Package | Deno Replacement | Notes | +| --------------- | ---------------- | ------------------------- | +| commander | cliffy | Better TypeScript support | +| pg | deno-postgres | Native Deno driver | +| chalk | std/fmt/colors | Built-in formatting | +| dotenv | std/dotenv | Standard library | +| fs-extra | std/fs | Built-in | +| glob | std/fs/walk | Built-in | +| eslint | deno lint | Built-in | +| vitest | Deno.test | Built-in | ## Risk Mitigation ### Risk 1: PostgreSQL Driver Limitations -**Mitigation:** + +**Mitigation:** + - Test extensively in Phase 0 - Keep Node.js fallback for complex queries - Contribute to deno-postgres if needed ### Risk 2: Missing npm Packages + **Mitigation:** + - Use CDN imports: `https://esm.sh/package-name` - Write native Deno replacements - Use subprocess for Node-only tools ### Risk 3: Team Learning Curve + **Mitigation:** + - Pair programming during migration - Create Deno cheat sheet - Start with familiar patterns @@ -200,6 +223,7 @@ deno compile \ ## Success Metrics ### Technical Metrics + - [ ] 100% test coverage maintained - [ ] Edge Function test accuracy: 100% (up from ~70%) - [ ] Binary size < 50MB @@ -207,6 +231,7 @@ deno compile \ - [ ] No node_modules directory ### Developer Experience Metrics + - [ ] Installation: Single command - [ ] No npm install required - [ ] TypeScript errors caught at runtime @@ -224,12 +249,13 @@ If Deno migration fails: ## Code Examples ### Before (Node.js) + ```javascript // Complex module resolution -const { Command } = require('commander'); -const chalk = require('chalk'); -const { config } = require('dotenv'); -const pg = require('pg'); +const { Command } = require("commander"); +const chalk = require("chalk"); +const { config } = require("dotenv"); +const pg = require("pg"); // Confusing async handling async function connectDB() { @@ -244,6 +270,7 @@ const conf = loadConfig(); ``` ### After (Deno) + ```typescript // Clean URL imports import { Command } from "https://deno.land/x/cliffy@v1.0.0/command/mod.ts"; @@ -271,16 +298,16 @@ const conf: Config = await loadConfig(); async function testEdgeFunction(functionPath: string) { // Import actual Edge Function const mod = await import(functionPath); - + // Test with Deno-native Request/Response const request = new Request("https://example.com", { method: "POST", - body: JSON.stringify({ test: true }) + body: JSON.stringify({ test: true }), }); - + // This runs EXACTLY like in production const response = await mod.handler(request); - + // Validate using Web Standards assertEquals(response.status, 200); const body = await response.json(); @@ -292,7 +319,7 @@ async function testEdgeFunction(functionPath: string) { ``` Week 1: Foundation + Core Libraries -Week 2: Commands + Tests +Week 2: Commands + Tests Week 3: Distribution + Documentation Week 4: Cutover + Monitoring ``` @@ -314,6 +341,6 @@ If MAYBE → Run Phase 0 as experiment (2 days) --- -*"Change is the essential process of all existence."* - Spock +_"Change is the essential process of all existence."_ - Spock -The module system chaos is unsustainable. Whether we choose Deno or fix Node.js, we must act. \ No newline at end of file +The module system chaos is unsustainable. Whether we choose Deno or fix Node.js, we must act. diff --git a/docs/decisions/003-deno-migration-simplified.md b/docs/decisions/003-deno-migration-simplified.md index 2364740..90abdfe 100644 --- a/docs/decisions/003-deno-migration-simplified.md +++ b/docs/decisions/003-deno-migration-simplified.md @@ -10,12 +10,12 @@ I was overcomplicating this. D.A.T.A. doesn't directly connect to PostgreSQL - i ```typescript // What I was worried about (NOT WHAT WE DO): -import { Client } from "pg" // Direct PostgreSQL connection +import { Client } from "pg"; // Direct PostgreSQL connection // What we ACTUALLY do: fetch(`${SUPABASE_URL}/rest/v1/`, { - headers: { 'apikey': SUPABASE_SERVICE_ROLE_KEY } -}) + headers: { apikey: SUPABASE_SERVICE_ROLE_KEY }, +}); ``` ## This Changes EVERYTHING @@ -28,17 +28,17 @@ Why? Because Supabase API calls are identical in Node.js and Deno: // Node.js (current) const response = await fetch(`${supabaseUrl}/rest/v1/migrations`, { headers: { - 'apikey': serviceRoleKey, - 'Authorization': `Bearer ${serviceRoleKey}` - } + apikey: serviceRoleKey, + Authorization: `Bearer ${serviceRoleKey}`, + }, }); // Deno (future) const response = await fetch(`${supabaseUrl}/rest/v1/migrations`, { headers: { - 'apikey': serviceRoleKey, - 'Authorization': `Bearer ${serviceRoleKey}` - } + apikey: serviceRoleKey, + Authorization: `Bearer ${serviceRoleKey}`, + }, }); ``` @@ -49,48 +49,53 @@ const response = await fetch(`${supabaseUrl}/rest/v1/migrations`, { ### What Actually Needs Changing 1. **Imports** (2 hours) + ```typescript // Before - const { Command } = require('commander') - + const { Command } = require("commander"); + // After - import { Command } from "https://deno.land/x/cliffy/command/mod.ts" + import { Command } from "https://deno.land/x/cliffy/command/mod.ts"; ``` 2. **File System** (1 hour) + ```typescript // Before - const fs = require('fs').promises - + const fs = require("fs").promises; + // After - const { readFile, writeFile } = Deno + const { readFile, writeFile } = Deno; ``` 3. **Process/Child Process** (2 hours) + ```typescript // Before - const { exec } = require('child_process') - + const { exec } = require("child_process"); + // After - const command = new Deno.Command("pgTAP", { args: ["test.sql"] }) + const command = new Deno.Command("pgTAP", { args: ["test.sql"] }); ``` 4. **Test Framework** (3 hours) + ```typescript // Before - import { describe, it, expect } from 'vitest' - + import { describe, it, expect } from "vitest"; + // After Deno.test("should work", () => { - assertEquals(actual, expected) - }) + assertEquals(actual, expected); + }); ``` 5. **Build/Distribution** (2 hours) + ```bash # Before: Complex npm packaging npm run build && npm pack - + # After: Single command deno compile --output data src/index.ts ``` @@ -118,31 +123,31 @@ Since we're using Supabase API, not direct PostgreSQL: ```typescript // test-deno-supabase.ts -const SUPABASE_URL = Deno.env.get("SUPABASE_URL")! -const SUPABASE_KEY = Deno.env.get("SUPABASE_SERVICE_ROLE_KEY")! +const SUPABASE_URL = Deno.env.get("SUPABASE_URL")!; +const SUPABASE_KEY = Deno.env.get("SUPABASE_SERVICE_ROLE_KEY")!; // Test 1: Can we call Supabase API? const response = await fetch(`${SUPABASE_URL}/rest/v1/`, { headers: { - 'apikey': SUPABASE_KEY, - 'Authorization': `Bearer ${SUPABASE_KEY}` - } -}) -console.log("API Status:", response.status) // ✅ Works + apikey: SUPABASE_KEY, + Authorization: `Bearer ${SUPABASE_KEY}`, + }, +}); +console.log("API Status:", response.status); // ✅ Works // Test 2: Can we run pgTAP? const command = new Deno.Command("psql", { args: [DATABASE_URL, "-f", "test.sql"], - stdout: "piped" -}) -const { code, stdout } = await command.output() -console.log("pgTAP Result:", new TextDecoder().decode(stdout)) // ✅ Works + stdout: "piped", +}); +const { code, stdout } = await command.output(); +console.log("pgTAP Result:", new TextDecoder().decode(stdout)); // ✅ Works // Test 3: Can we import Edge Functions? -const edgeFunc = await import("./supabase/functions/my-func/index.ts") -const req = new Request("https://example.com") -const res = await edgeFunc.handler(req) -console.log("Edge Function:", res.status) // ✅ Works +const edgeFunc = await import("./supabase/functions/my-func/index.ts"); +const req = new Request("https://example.com"); +const res = await edgeFunc.handler(req); +console.log("Edge Function:", res.status); // ✅ Works // ALL CRITICAL PATHS WORK! ``` @@ -150,22 +155,26 @@ console.log("Edge Function:", res.status) // ✅ Works ## Migration Plan: THE REAL ONE ### Day 1: Morning (4 hours) + - Fork repo to `deno-migration` branch - Convert imports to Deno - Fix file system calls - Get basic CLI running ### Day 1: Afternoon (4 hours) + - Port all commands - Update child process calls - Test core workflows ### Day 2: Morning (4 hours) + - Convert test suite - Add Edge Function tests - Create distribution binary ### Day 2: Afternoon + - Documentation - Release 🚀 @@ -174,6 +183,7 @@ console.log("Edge Function:", res.status) // ✅ Works We're not migrating from a PostgreSQL driver to another PostgreSQL driver. We're migrating from: + - **Fetch API → Fetch API** (identical!) - **JSON → JSON** (identical!) - **Environment vars → Environment vars** (identical!) @@ -188,19 +198,19 @@ class MigrateCommand extends Command { async performExecute() { // This doesn't change AT ALL const response = await fetch(`${this.supabaseUrl}/rest/v1/migrations`, { - method: 'POST', + method: "POST", headers: { - 'apikey': this.serviceRoleKey, - 'Content-Type': 'application/json' + apikey: this.serviceRoleKey, + "Content-Type": "application/json", }, - body: JSON.stringify(migrationData) - }) - + body: JSON.stringify(migrationData), + }); + if (!response.ok) { - throw new Error(`Migration failed: ${response.statusText}`) + throw new Error(`Migration failed: ${response.statusText}`); } - - return response.json() + + return response.json(); } } ``` @@ -215,8 +225,8 @@ DO IT. DO IT NOW. --- -*"Sometimes the solution is so simple, you miss it because you're looking for something complex."* - Scotty +_"Sometimes the solution is so simple, you miss it because you're looking for something complex."_ - Scotty -We don't need PostgreSQL drivers. We have the Supabase API. +We don't need PostgreSQL drivers. We have the Supabase API. -Game. Set. Match. 🎾 \ No newline at end of file +Game. Set. Match. 🎾 diff --git a/docs/features/edge-functions.md b/docs/features/edge-functions.md index e2dd250..66bb9a8 100644 --- a/docs/features/edge-functions.md +++ b/docs/features/edge-functions.md @@ -49,20 +49,20 @@ data --prod db compile --deploy-functions ### `data functions deploy` -| Option | Description | -|--------|-------------| -| `--no-verify-jwt` | Skip JWT verification during deployment | -| `--debug` | Enable debug output | -| `--skip-import-map` | Skip using import map in production | +| Option | Description | +| ------------------- | --------------------------------------- | +| `--no-verify-jwt` | Skip JWT verification during deployment | +| `--debug` | Enable debug output | +| `--skip-import-map` | Skip using import map in production | ### `data db compile` (Enhanced) -| Option | Description | -|--------|-------------| -| `--deploy-functions` | Deploy Edge Functions after compilation | -| `--functions [names...]` | Specific functions to deploy | -| `--skip-import-map` | Skip import map in function deployment | -| `--debug-functions` | Enable debug output for function deployment | +| Option | Description | +| ------------------------ | ------------------------------------------- | +| `--deploy-functions` | Deploy Edge Functions after compilation | +| `--functions [names...]` | Specific functions to deploy | +| `--skip-import-map` | Skip import map in function deployment | +| `--debug-functions` | Enable debug output for function deployment | ## 🛡️ Security Features @@ -78,7 +78,7 @@ D.A.T.A. automatically validates: ### Production Safety - **Double Confirmation**: Production deployments require explicit confirmation -- **Rollback Info**: Failed deployments provide rollback guidance +- **Rollback Info**: Failed deployments provide rollback guidance - **Audit Trail**: Complete event logging for deployment actions - **Graceful Failures**: Migration compilation succeeds even if function deployment fails @@ -91,7 +91,7 @@ D.A.T.A. functions emit comprehensive events for monitoring: command.on('function-validated', ({ function, path }) => { ... }); command.on('function-deployed', ({ function, success, result }) => { ... }); -// Deployment-level events +// Deployment-level events command.on('deployment-complete', ({ total, successful, failed, results }) => { ... }); command.on('deployment-status', ({ functions }) => { ... }); @@ -105,18 +105,21 @@ command.on('functions-deployment-failed', ({ error }) => { ... }); The validation system checks: ### Structure Validation + - ✅ `index.ts` file presence - ✅ Deno.serve() or serve() handler - ✅ Basic import structure - ✅ Optional deno.json configuration -### Security Validation +### Security Validation + - 🚨 Hardcoded secrets detection - 🚨 Sensitive data patterns - ✅ Environment variable usage - ✅ CORS handling for public endpoints ### Best Practices + - ✅ Error handling (try/catch blocks) - ✅ Import map usage - ✅ Proper permissions in deno.json @@ -143,7 +146,7 @@ data functions deploy donations-create-checkout donations-create-subscription ### 3. Migration Workflow Integration ```bash -# Traditional: Compile migration, then deploy functions separately +# Traditional: Compile migration, then deploy functions separately data db compile data functions deploy @@ -191,23 +194,26 @@ Migration Compilation ──┐ ### Error Handling - **Migration Failures**: Stop entire process -- **Function Validation Failures**: Show warnings, continue deployment +- **Function Validation Failures**: Show warnings, continue deployment - **Function Deployment Failures**: Log errors, continue with remaining functions - **Production Safety**: Always fail-closed on confirmation timeout ## 🎯 Benefits ### For Developers + - **Single Command Workflow**: Compile + deploy in one step - **Comprehensive Validation**: Catch issues before deployment - **Clear Status Reporting**: Know exactly what's deployed where ### For DevOps + - **Production Safety**: Built-in confirmation and validation -- **Event Monitoring**: Complete audit trail of deployments +- **Event Monitoring**: Complete audit trail of deployments - **Rollback Support**: Clear error messages and rollback guidance ### For Teams + - **Consistent Process**: Same workflow for local and production - **Selective Deployment**: Deploy only changed functions - **Integration Ready**: Works with existing data migration patterns @@ -222,4 +228,4 @@ Migration Compilation ──┐ --- -*This enhancement maintains data's event-driven architecture while adding comprehensive Edge Functions management that integrates seamlessly with the existing migration workflow.* \ No newline at end of file +_This enhancement maintains data's event-driven architecture while adding comprehensive Edge Functions management that integrates seamlessly with the existing migration workflow._ diff --git a/docs/fun/data-discovers-data.md b/docs/fun/data-discovers-data.md index ddcb4e5..dce9b20 100644 --- a/docs/fun/data-discovers-data.md +++ b/docs/fun/data-discovers-data.md @@ -15,6 +15,7 @@ I began my investigation in Engineering, where Lieutenant Commander La Forge was "Data, I've been trying to sync the warp core optimization database with the backup systems for three hours. Every time I think I've got it, something drifts out of alignment." I observed his manual process: + 1. Writing SQL migration scripts by hand (error rate: 12.7%) 2. Testing on development database (coverage: 43%) 3. Deploying to production (prayer rate: 100%) @@ -46,15 +47,15 @@ Commander Riker attempted what he called a "quick fix" to the duty roster databa riker@enterprise:~$ sudo data align production --force --skip-tests ERROR: Commander, that would be illogical and dangerous. - + Your attempted action would bypass: - 17 safety protocols - - 234 test cases + - 234 test cases - Captain Picard's standing orders - + Probability of catastrophic failure: 87.3% Probability of demotion: 94.7% - + Suggested action: data test && data promote ``` @@ -89,31 +90,36 @@ The Captain smiled. "I fail to see the problem, Commander." ## Crew Feedback Log ### Chief O'Brien + "It's like the computer finally learned how to do things properly. No more staying up all night fixing phantom migrations. My daughter actually recognizes me now." ### Ensign Crusher + "Wesley here. I modified the personality mode to include a 'Wesley' setting. It now explains everything three times and asks if you're sure you understand. Data was not amused." ### Worf + "It is honorable. It does not allow cowardly untested deployments. A warrior's database tool." ### Counselor Troi + "I sense great satisfaction from the engineering team. Stress levels are down 67%. Though I do sense some... pride... from Data regarding the name similarity." ### Lieutenant Barclay + "I-I really appreciate the safety checks. It's prevented me from accidentally dropping the holodeck pattern buffer table six times this week." ## Technical Observations ### Efficiency Metrics -| Operation | Before D.A.T.A. | After D.A.T.A. | Improvement | -|-----------|-----------------|----------------|-------------| -| Migration Generation | 47 minutes | 2.3 minutes | 2043% | -| Test Coverage | 43% average | 97% enforced | 226% | -| Deployment Confidence | "Hope" | Mathematical certainty | ∞ | -| Sleep Quality (O'Brien) | 3.2 hours | 7.8 hours | 244% | -| Red Alerts (database-related) | 2.3/week | 0/week | 100% | +| Operation | Before D.A.T.A. | After D.A.T.A. | Improvement | +| ----------------------------- | --------------- | ---------------------- | ----------- | +| Migration Generation | 47 minutes | 2.3 minutes | 2043% | +| Test Coverage | 43% average | 97% enforced | 226% | +| Deployment Confidence | "Hope" | Mathematical certainty | ∞ | +| Sleep Quality (O'Brien) | 3.2 hours | 7.8 hours | 244% | +| Red Alerts (database-related) | 2.3/week | 0/week | 100% | ### Philosophical Observations @@ -136,7 +142,7 @@ Personal note: While I am incapable of feeling emotions, I experience what human Stardate 47636.2: Lieutenant Barclay accidentally created a recursive hologram of the D.A.T.A. system teaching me how to use the D.A.T.A. system. The paradox was resolved by implementing a new safety gate: ```javascript -if (user === 'Data' && system === 'DATA') { +if (user === "Data" && system === "DATA") { console.log("Fascinating. However, this would be redundant."); return; } @@ -144,9 +150,9 @@ if (user === 'Data' && system === 'DATA') { --- -*End of Extended Log* +_End of Extended Log_ -*Lt. Commander Data* -*Operations Officer, USS Enterprise NCC-1701-D* +_Lt. Commander Data_ +_Operations Officer, USS Enterprise NCC-1701-D_ -*P.S. - Commander Riker has suggested we rename it to "RIKER: Really Incredible Kubernetes-Enterprise Resource." His proposal was declined by a vote of 347 to 1.* \ No newline at end of file +_P.S. - Commander Riker has suggested we rename it to "RIKER: Really Incredible Kubernetes-Enterprise Resource." His proposal was declined by a vote of 347 to 1._ diff --git a/docs/fun/riker-tries-to-deploy.md b/docs/fun/riker-tries-to-deploy.md index 2303f11..0e55afc 100644 --- a/docs/fun/riker-tries-to-deploy.md +++ b/docs/fun/riker-tries-to-deploy.md @@ -29,7 +29,7 @@ riker@enterprise:~$ data align production --force ⚠️ WARNING: Commander, --force flag is not recognized. Perhaps you meant: data align production - + Note: There is no force. There is only logic. ``` @@ -39,12 +39,12 @@ riker@enterprise:~$ data align production --force 🔴 RED ALERT: Nice try, Commander. This isn't the holodeck. You cannot simply override database safety. - + Detecting: - Uncommitted changes in working directory - No tests written for new column - Jazz music playing in background (regulation 47.2.1) - + Required actions: 1. git add && git commit 2. data test @@ -68,7 +68,7 @@ data@enterprise:~$ git status Changes not staged for commit: modified: sql/tables/poker_tournament.sql - + Untracked files: riker_backup.sql riker_backup2.sql @@ -88,8 +88,8 @@ Untracked files: data@enterprise:~$ git add sql/tables/poker_tournament.sql data@enterprise:~$ git commit -m "feat: Add Worf bluff tracking column -Per Commander Riker's request, adding column to track Worf's -unsuccessful bluffing attempts. Statistical analysis shows a +Per Commander Riker's request, adding column to track Worf's +unsuccessful bluffing attempts. Statistical analysis shows a 0% success rate over 47 games." data@enterprise:~$ data test @@ -101,7 +101,7 @@ Running database tests... - Index for performance - RLS policy - Test coverage - + Would you like me to generate the missing components? [Y/n] ``` @@ -179,11 +179,11 @@ data@enterprise:~$ data align production ✓ On main branch ✓ Tests passing (100%) ✓ No jazz music detected - + Migration will: + CREATE TABLE poker_statistics + CREATE 2 RLS policies - + Type 'ENGAGE' to proceed: ENGAGE 🎉 Deployment successful! @@ -197,8 +197,9 @@ Riker's ego: Slightly bruised but recovering **Riker**: "That seemed like a lot of work for one column." **Data**: "Commander, we have successfully prevented: + - Data corruption -- Security vulnerabilities +- Security vulnerabilities - Worf discovering you track his bluffs - 3.7 hours of debugging - One potential court martial" @@ -253,5 +254,5 @@ Riker's ego: Slightly bruised but recovering --- -*"In poker, as in database deployments, the key is knowing when to fold."* -— Commander William T. Riker, Stardate 47636.1 \ No newline at end of file +_"In poker, as in database deployments, the key is knowing when to fold."_ +— Commander William T. Riker, Stardate 47636.1 diff --git a/docs/guides/edge-functions-integration.md b/docs/guides/edge-functions-integration.md index 248e0ee..8a1c9ac 100644 --- a/docs/guides/edge-functions-integration.md +++ b/docs/guides/edge-functions-integration.md @@ -76,67 +76,70 @@ your-project/ ```typescript // functions/donations-webhook/index.ts -import { serve } from "https://deno.land/std@0.168.0/http/server.ts" -import { createClient } from "https://esm.sh/@supabase/supabase-js@2" +import { serve } from "https://deno.land/std@0.168.0/http/server.ts"; +import { createClient } from "https://esm.sh/@supabase/supabase-js@2"; serve(async (req) => { - const { donation_id, event_type } = await req.json() - + const { donation_id, event_type } = await req.json(); + const supabase = createClient( - Deno.env.get('SUPABASE_URL')!, - Deno.env.get('SUPABASE_SERVICE_ROLE_KEY')! - ) - + Deno.env.get("SUPABASE_URL")!, + Deno.env.get("SUPABASE_SERVICE_ROLE_KEY")!, + ); + // Process webhook based on event type switch (event_type) { - case 'donation.completed': + case "donation.completed": // Update campaign totals - await supabase.rpc('complete_donation', { - p_donation_id: donation_id - }) - break + await supabase.rpc("complete_donation", { + p_donation_id: donation_id, + }); + break; // ... other event types } - + return new Response(JSON.stringify({ success: true }), { headers: { "Content-Type": "application/json" }, - }) -}) + }); +}); ``` ### Payment Processor ```typescript // functions/process-payment/index.ts -import { serve } from "https://deno.land/std@0.168.0/http/server.ts" -import Stripe from "https://esm.sh/stripe@12.0.0" +import { serve } from "https://deno.land/std@0.168.0/http/server.ts"; +import Stripe from "https://esm.sh/stripe@12.0.0"; -const stripe = new Stripe(Deno.env.get('STRIPE_SECRET_KEY')!, { - apiVersion: '2023-10-16', -}) +const stripe = new Stripe(Deno.env.get("STRIPE_SECRET_KEY")!, { + apiVersion: "2023-10-16", +}); serve(async (req) => { - const { amount, currency, donation_id } = await req.json() - + const { amount, currency, donation_id } = await req.json(); + try { const paymentIntent = await stripe.paymentIntents.create({ amount: Math.round(amount * 100), currency, - metadata: { donation_id } - }) - - return new Response(JSON.stringify({ - client_secret: paymentIntent.client_secret - }), { - headers: { "Content-Type": "application/json" }, - }) + metadata: { donation_id }, + }); + + return new Response( + JSON.stringify({ + client_secret: paymentIntent.client_secret, + }), + { + headers: { "Content-Type": "application/json" }, + }, + ); } catch (error) { return new Response(JSON.stringify({ error: error.message }), { status: 400, headers: { "Content-Type": "application/json" }, - }) + }); } -}) +}); ``` ## Deployment Workflow @@ -146,13 +149,14 @@ serve(async (req) => { 1. **Make database changes** in `sql/` directory 2. **Write Edge Functions** in `functions/` directory 3. **Compile and test locally**: + ```bash # Compile SQL to migrations data db compile - + # Test migrations data db migrate test - + # Validate Edge Functions data functions validate ``` @@ -166,25 +170,28 @@ serve(async (req) => { ### Production Environment 1. **Review changes**: + ```bash # Check migration status data db migrate status - + # Validate functions data functions validate ``` 2. **Deploy with confirmation**: + ```bash # Will prompt for confirmation data --prod db compile --deploy-functions ``` 3. **Monitor deployment**: + ```bash # Check function status data functions status - + # View deployment logs supabase functions logs function-name ``` @@ -193,12 +200,12 @@ serve(async (req) => { The D.A.T.A. CLI emits comprehensive events during Edge Functions operations: -| Event | Description | Payload | -|-------|-------------|---------| -| `function-validated` | Function passed validation | `{ name, path }` | -| `function-deployed` | Function successfully deployed | `{ name, url }` | -| `deployment-complete` | All functions deployed | `{ total, successful, failed }` | -| `deployment-status` | Status check completed | `{ deployed: [...], local: [...] }` | +| Event | Description | Payload | +| --------------------- | ------------------------------ | ----------------------------------- | +| `function-validated` | Function passed validation | `{ name, path }` | +| `function-deployed` | Function successfully deployed | `{ name, url }` | +| `deployment-complete` | All functions deployed | `{ total, successful, failed }` | +| `deployment-status` | Status check completed | `{ deployed: [...], local: [...] }` | ### Listening to Events @@ -207,16 +214,16 @@ The D.A.T.A. CLI emits comprehensive events during Edge Functions operations: class FunctionDeployReporter { handleEvent(event) { switch (event.type) { - case 'function-deployed': - console.log(`✅ Deployed: ${event.data.name}`) - console.log(` URL: ${event.data.url}`) - break - case 'deployment-complete': - console.log(`\n📊 Deployment Summary:`) - console.log(` Total: ${event.data.total}`) - console.log(` Success: ${event.data.successful}`) - console.log(` Failed: ${event.data.failed}`) - break + case "function-deployed": + console.log(`✅ Deployed: ${event.data.name}`); + console.log(` URL: ${event.data.url}`); + break; + case "deployment-complete": + console.log(`\n📊 Deployment Summary:`); + console.log(` Total: ${event.data.total}`); + console.log(` Success: ${event.data.successful}`); + console.log(` Failed: ${event.data.failed}`); + break; } } } @@ -233,23 +240,28 @@ class FunctionDeployReporter { ## Best Practices ### 1. Always Validate First + ```bash data functions validate && data functions deploy ``` ### 2. Use Selective Deployment + Deploy only changed functions: + ```bash data functions deploy donations-webhook process-payment ``` ### 3. Test in Staging + ```bash # Deploy to staging environment DATA_ENV=staging data db compile --deploy-functions ``` ### 4. Monitor Deployments + ```bash # Check status after deployment data functions status @@ -259,6 +271,7 @@ supabase functions logs --tail ``` ### 5. Version Control + - Commit Edge Functions alongside SQL changes - Tag releases for production deployments - Document function dependencies in README @@ -266,19 +279,23 @@ supabase functions logs --tail ## Troubleshooting ### Function Validation Fails + - Ensure Supabase CLI is installed: `npm install -g supabase` - Check function TypeScript syntax - Verify import statements use Deno URLs ### Deployment Timeout + - Increase timeout with environment variable: `DATA_DEPLOY_TIMEOUT=600` - Check network connectivity to Supabase ### Import Map Issues + - Create `import_map.json` for production - Or use `--skip-import-map` flag (not recommended) ### Function Not Found + - Verify function exists in `functions/` directory - Check function name matches directory name - Ensure `index.ts` exists in function directory @@ -296,4 +313,4 @@ supabase functions logs --tail - [Supabase Edge Functions Documentation](https://supabase.com/docs/guides/functions) - [D.A.T.A. CLI Documentation](../README.md) - [pgTAP Testing Guide](./pgtap-testing.md) -- [Migration Workflow Guide](./migration-workflow.md) \ No newline at end of file +- [Migration Workflow Guide](./migration-workflow.md) diff --git a/docs/roadmap/ideas-and-future.md b/docs/roadmap/ideas-and-future.md index 1994875..1d3f147 100644 --- a/docs/roadmap/ideas-and-future.md +++ b/docs/roadmap/ideas-and-future.md @@ -1,35 +1,42 @@ # D.A.T.A. Ideas & Missing Features -*"The complexity of database operations exceeds human capability for error-free execution."* +_"The complexity of database operations exceeds human capability for error-free execution."_ — Lt. Commander Data ## 🚀 Epic Features to Implement ### 1. Time-Travel Debugging + ```sql -- Show me exactly what the database looked like at any git tag data timetravel --to "2024-12-25T14:30:00Z" data timetravel --to "data/prod/stardate-2025.241" ``` + - Spin up ephemeral database from any historical tag - Compare schemas across time points - "What changed between these two deployments?" - Visual diff timeline interface ### 2. Semantic Migration Intelligence -Instead of just AST comparison, understand the *intent*: + +Instead of just AST comparison, understand the _intent_: + ```javascript // D.A.T.A. detects: "You're implementing soft deletes" // Suggests: "Add deleted_at index, update RLS policies, create cleanup function" ``` + - Pattern recognition for common migrations - Suggest best practices based on detected patterns - Warn about common pitfalls ("Adding NOT NULL without default will fail if table has data") ### 3. Migration Simulation Chamber + ```bash data simulate production --with-sample-data ``` + - Clone production schema (not data) locally - Generate realistic sample data - Run migration against simulation @@ -37,25 +44,32 @@ data simulate production --with-sample-data - Detect constraint violations before they happen ### 4. Quantum Rollback™ + Not just rollback to previous state, but: + ```bash data quantum-rollback --preserve "users,posts" --rollback "comments,likes" ``` + - Selective rollback of specific tables/schemas - Preserve certain changes while reverting others - Merge timelines (keep the good, revert the bad) ### 5. The Holodeck Test Environment + ```bash data holodeck create "test-new-feature" ``` + - Instantly provision isolated database with current schema - Branch-specific test databases - Auto-cleanup after PR merge - Share test environments with team ("Join my holodeck") ### 6. Migration Risk AI + Use ML to predict migration failure based on: + - Historical failure patterns - Time of day/week analysis - Size and complexity metrics @@ -65,37 +79,45 @@ Use ML to predict migration failure based on: Output: "This migration has 73% success probability. Recommend waiting for Tuesday morning." ### 7. The Borg Collective Sync + ```bash data borg sync --with team ``` + - Automatically sync schema changes across team - Detect when teammates have migrations you don't - Prevent migration conflicts before they happen - "Resistance is futile. Your schema will be assimilated." ### 8. Edge Function Time Machine + ```bash data functions history "my-function" --show-all-versions data functions rollback "my-function" --to "v2.3.1" ``` + - Version every Edge Function deployment - Instant rollback to any version - A/B test functions versions - Gradual rollout with percentage traffic ### 9. The Warp Core Performance Monitor + ```bash data performance watch ``` + - Real-time migration performance monitoring - Automatic rollback if queries slow down >50% - Database load prediction - "Captain, the migration is causing subspace distortions in query performance" ### 10. Telepathic Schema Validation (Troi Mode) + ```bash data sense ``` + - "I sense... inconsistencies in your schema" - Detect anti-patterns - Find unused indexes @@ -104,25 +126,30 @@ data sense - Detect security vulnerabilities in RLS policies ### 11. Q Continuum Mode + ```bash data q snap --reality "before-everything-broke" ``` + - Instant snapshot and restore - "What if" migrations (try without committing) - Multiple reality branches - "Show me the timeline where we didn't drop that table" ### 12. Universal Translator + ```bash data translate --from mysql --to postgres data translate --from prisma --to sql ``` + - Convert schemas between databases - Import from ORMs (Prisma, TypeORM, Sequelize) - Export to different formats - "Make it so" works with any input ### 13. The Prime Directive Enforcer + ```yaml # .data-prime-directive.yml rules: @@ -132,15 +159,18 @@ rules: - require_two_approvals_for_production - maximum_migration_duration: 60s ``` + - Configurable safety rules - Automatic enforcement - Override requires written justification - Audit log of all overrides ### 14. Geordi's Diagnostic Mode + ```bash data diagnose --full-spectrum ``` + - Complete health check of database - Find slow queries - Identify missing indexes @@ -149,9 +179,11 @@ data diagnose --full-spectrum - "Captain, I'm detecting anomalies in the primary EPS conduits... I mean, indexes" ### 15. Riker's YOLO Override + ```bash data deploy production --riker-mode ``` + - For when you absolutely need to deploy NOW - Bypasses all safety checks - Requires typing: "I understand the consequences and take full responsibility" @@ -160,19 +192,23 @@ data deploy production --riker-mode - Logs everything for post-mortem ### 16. The Transporter Pattern Buffer + ```bash data buffer save "about-to-try-something-stupid" data buffer restore "about-to-try-something-stupid" ``` + - Quick save/restore points - Lighter than full backups - Stack-based (push/pop) - "Energize when ready" ### 17. Red Alert Mode + ```bash data red-alert "PRODUCTION IS DOWN" ``` + - Emergency response mode - Shows last 10 deployments - Quick rollback options @@ -181,9 +217,11 @@ data red-alert "PRODUCTION IS DOWN" - Starts recording all actions for post-incident review ### 18. Schema Documentation AI + ```bash data document --explain-like-im-five ``` + - Auto-generate documentation from schema - Explain complex relationships - Create ER diagrams @@ -191,9 +229,11 @@ data document --explain-like-im-five - "This table stores user data. It talks to the posts table through user_id." ### 19. The Kobayashi Maru Test + ```bash data test kobayashi-maru ``` + - Chaos engineering for databases - Randomly drops connections - Simulates network partitions @@ -201,9 +241,11 @@ data test kobayashi-maru - "There's no such thing as a no-win scenario" ### 20. Multi-Universe Deployment + ```bash data deploy --multiverse "prod,staging,dev" ``` + - Deploy to multiple environments in parallel - Automatic rollback if any fails - Staggered deployment option @@ -212,6 +254,7 @@ data deploy --multiverse "prod,staging,dev" ## 🎭 Personality Enhancements ### Crew Personality Modes + ```bash data config --personality "picard" # Thoughtful, measured data config --personality "kirk" # Bold, decisive @@ -220,11 +263,13 @@ data config --personality "sisko" # War-time decisive mode ``` ### Stress Detection + - If deploying late at night: "Captain, you appear fatigued. Confirm you wish to proceed." - If multiple failed attempts: "Perhaps we should try a different approach?" - If Friday afternoon: "Captain, Starfleet regulations suggest waiting until Monday." ### Achievement System + ```bash data achievements @@ -239,26 +284,31 @@ data achievements ## 🔮 Far Future Ideas ### Quantum Entangled Databases + - Changes in dev automatically prepare staging - Predictive migration generation - "Your future self will need this index" ### Neural Link Integration + - Think about schema changes, they appear - Minority Report-style gesture controls -- "Computer, add user preferences column" *waves hand* +- "Computer, add user preferences column" _waves hand_ ### Blockchain Audit Trail + - Immutable deployment history - Cryptographic proof of who deployed what - Smart contracts for approval workflows ### AI Pair Programmer + - "I notice you're adding a users table. Would you like me to add standard fields?" - Suggests migrations based on application code changes - Learns from your patterns ### Cross-Database Federation + - Deploy to Postgres, MySQL, and SQLite simultaneously - Automatic syntax translation - Universal schema language @@ -269,37 +319,42 @@ D.A.T.A. becomes more than a tool - it becomes the trusted android officer on yo The goal: Make database operations so boring, safe, and automated that developers can focus on building features, not fighting migrations. -*"In the future, no one will write migrations by hand. They will simply declare their intent, and D.A.T.A. will make it so."* +_"In the future, no one will write migrations by hand. They will simply declare their intent, and D.A.T.A. will make it so."_ --- ## Implementation Priority ### Phase 1: Core Safety (Current) + - ✅ AST-based migrations - ✅ Git-based deployment tracking - ✅ Comprehensive safety checks - ⏳ Rollback mechanisms ### Phase 2: Intelligence + - [ ] Semantic understanding - [ ] Pattern recognition - [ ] Performance prediction - [ ] Risk scoring ### Phase 3: Simulation + - [ ] Holodeck environments - [ ] Migration simulation - [ ] Chaos testing - [ ] Time travel debugging ### Phase 4: Automation + - [ ] AI-assisted migrations - [ ] Self-healing deployments - [ ] Predictive maintenance - [ ] Cross-database support ### Phase 5: Transcendence + - [ ] Neural interfaces - [ ] Quantum computing - [ ] Interdimensional deployments @@ -307,6 +362,6 @@ The goal: Make database operations so boring, safe, and automated that developer --- -*"The complexity of database operations is a problem that has plagued developers for decades. D.A.T.A. represents our attempt to solve it once and for all. We may not achieve perfection, but we will achieve... fascinating results."* +_"The complexity of database operations is a problem that has plagued developers for decades. D.A.T.A. represents our attempt to solve it once and for all. We may not achieve perfection, but we will achieve... fascinating results."_ -— Lt. Commander Data, Chief Database Officer, USS Enterprise \ No newline at end of file +— Lt. Commander Data, Chief Database Officer, USS Enterprise diff --git a/docs/technical/golden-sql-compilation-algorithm.md b/docs/technical/golden-sql-compilation-algorithm.md index ea5da30..e2bfb22 100644 --- a/docs/technical/golden-sql-compilation-algorithm.md +++ b/docs/technical/golden-sql-compilation-algorithm.md @@ -25,7 +25,7 @@ SQL files are organized in named directories: /sql/ extensions/ -- PostgreSQL extensions schemas/ -- Schema definitions - types/ -- Custom types and enums + types/ -- Custom types and enums tables/ -- Table definitions functions/ -- Stored procedures views/ -- Views and materialized views @@ -36,6 +36,7 @@ SQL files are organized in named directories: ``` D.A.T.A. internally processes these in the correct order to ensure: + - Extensions are created before they're used - Schemas exist before tables are placed in them - Tables exist before foreign keys reference them @@ -65,6 +66,7 @@ The `MigrationCompiler` concatenates all SQL files into a single "compiled" migr The `DiffEngine` generates incremental migrations using git diffs: 1. **Compare Git References**: + ```bash git diff data/prod/last-tag...HEAD -- sql/ ``` @@ -89,6 +91,7 @@ data/staging/2025.241.1200 ``` This enables: + - **Rollback**: Deploy any previous tag's Golden SQL - **Audit Trail**: Git history shows exactly what was deployed when - **Diff Generation**: Compare any two points in time @@ -96,21 +99,24 @@ This enables: ## Example Workflow 1. **Initial Setup**: + ```bash # Create Golden SQL structure data init - + # Write your SQL files vim sql/tables/users.sql ``` 2. **First Compilation** (Full): + ```bash data db compile # Output: migrations/20250831143000_compiled.sql (COMPLETE database) ``` 3. **Make Changes**: + ```bash # Edit a table vim sql/004_tables/users.sql # Add a column @@ -118,6 +124,7 @@ This enables: ``` 4. **Generate Incremental Migration**: + ```bash data db migrate generate # Uses DiffEngine to create: @@ -141,7 +148,7 @@ This enables: - **File Tracking**: Records every file processed for audit - **Error Handling**: Fails fast on any read error -### DiffEngine +### DiffEngine - **Git-Based**: Uses `git diff` not database introspection - **Intelligent Parsing**: Attempts to generate ALTER statements @@ -151,12 +158,14 @@ This enables: ## Why This Matters Traditional migration tools: + - Hand-write migrations (error-prone) - Use database introspection (state can drift) - Can't rollback reliably (down migrations often broken) - Mystery state in production Golden SQL with D.A.T.A.: + - Migrations are generated (deterministic) - Git is truth (no drift possible) - Rollback to any tag (guaranteed state) @@ -170,7 +179,7 @@ Run tests to verify the algorithm: # Test MigrationCompiler node test/test-migration-compiler.js -# Test DiffEngine +# Test DiffEngine node test/test-diff-engine.js # Integration test @@ -201,4 +210,4 @@ This is why D.A.T.A. is special. It's not complex - it's simple done right. **Remember**: This algorithm is the core of D.A.T.A. Without it, we're just another migration tool. WITH it, we're bringing Star Trek-level precision to database deployments. -"The algorithm is logical, Captain." - Lt. Commander Data \ No newline at end of file +"The algorithm is logical, Captain." - Lt. Commander Data diff --git a/docs/technical/memory-management.md b/docs/technical/memory-management.md index 416c7e2..e65ec67 100644 --- a/docs/technical/memory-management.md +++ b/docs/technical/memory-management.md @@ -9,19 +9,25 @@ The pgTAPTestScanner implements comprehensive memory management to prevent OOM e ### Core Classes #### MemoryMonitor (`src/lib/testing/MemoryMonitor.js`) + Static utility class for memory monitoring and management: + - Real-time heap usage tracking - Garbage collection triggering (when Node.js started with `--expose-gc`) - Threshold-based cleanup recommendations #### StreamingCoverageDatabase (`src/lib/testing/StreamingCoverageDatabase.js`) + Memory-efficient storage for coverage data: + - Per-type object limits with overflow protection - Optional compression support (experimental) - Statistics tracking and reporting #### BatchProcessor (`src/lib/testing/BatchProcessor.js`) + Memory-aware batch processing utility: + - Configurable batch sizes - Automatic memory cleanup between batches - Event loop yielding to prevent blocking @@ -32,19 +38,20 @@ Memory-aware batch processing utility: ```javascript const scanner = new pgTAPTestScanner({ // Memory management options - maxMemoryMB: 500, // Maximum memory usage in MB (default: 500) - batchSize: 100, // Files to process in each batch (default: 100) - enableStreaming: true, // Enable streaming mode for large datasets - cleanupInterval: 1000, // Cleanup interval in ms - maxObjectsPerType: 10000, // Maximum objects per coverage type - enableGC: true, // Enable garbage collection hints - enableCompression: false, // Enable data compression (experimental) + maxMemoryMB: 500, // Maximum memory usage in MB (default: 500) + batchSize: 100, // Files to process in each batch (default: 100) + enableStreaming: true, // Enable streaming mode for large datasets + cleanupInterval: 1000, // Cleanup interval in ms + maxObjectsPerType: 10000, // Maximum objects per coverage type + enableGC: true, // Enable garbage collection hints + enableCompression: false, // Enable data compression (experimental) }); ``` ## Key Features ### 1. Memory Monitoring & Limits + - **Configurable memory limits**: Set via `maxMemoryMB` option - **Real-time monitoring**: Periodic memory usage checks with configurable intervals - **Threshold-based cleanup**: Automatic cleanup when memory usage exceeds 80% of limit @@ -53,32 +60,38 @@ const scanner = new pgTAPTestScanner({ ### 2. Intelligent Processing Modes #### Standard Mode (< 100 files) + - Direct processing without batching - Memory usage: ~50MB - Processing time: < 1s #### Batched Mode (100-1000 files) + - Files processed in configurable batches - Memory usage: ~200MB - Processing time: 2-5s #### Streaming Mode (> 1000 files) + - Automatic activation for large datasets - Constant memory usage: ~100MB - Processing time: 5-30s - Progressive processing with backpressure control ### 3. Object Accumulation Control + - Maximum objects per coverage type (default: 10,000) - Intelligent pruning keeps most recent objects - Warning notifications when limits are reached - Graceful degradation continues processing ### 4. Garbage Collection Management + ```bash # Run with garbage collection exposed for optimal performance node --expose-gc your-script.js ``` + - Manual GC triggering when available - Weak references for temporary data - Proper cleanup of intervals and controllers @@ -86,6 +99,7 @@ node --expose-gc your-script.js ## Implementation Details ### Memory State Tracking + ```javascript memoryState = { currentUsageMB: 0, @@ -94,11 +108,12 @@ memoryState = { gcCount: 0, batchesProcessed: 0, objectsProcessed: 0, - streamingMode: false -} + streamingMode: false, +}; ``` ### Cleanup Triggers + - Every 10 files during standard processing - When heap usage exceeds 80% of maximum - Before processing large batches @@ -106,6 +121,7 @@ memoryState = { - On configurable intervals (`cleanupInterval`) ### Batch Processing Algorithm + 1. Divide files into batches of `batchSize` 2. Check memory before each batch 3. Process batch with streaming DB if enabled @@ -119,48 +135,54 @@ memoryState = { ### Public Methods #### `getMemoryStats()` + Returns comprehensive memory usage statistics: + ```javascript const stats = scanner.getMemoryStats(); -console.log('Current heap:', stats.currentUsage.heapUsed, 'MB'); -console.log('Peak usage:', stats.maxUsageMB, 'MB'); -console.log('Streaming mode:', stats.streamingMode); -console.log('Database stats:', stats.streamingDBStats); +console.log("Current heap:", stats.currentUsage.heapUsed, "MB"); +console.log("Peak usage:", stats.maxUsageMB, "MB"); +console.log("Streaming mode:", stats.streamingMode); +console.log("Database stats:", stats.streamingDBStats); ``` ### Events #### Memory Status Event + ```javascript -scanner.on('memory_status', (data) => { +scanner.on("memory_status", (data) => { console.log(`Memory: ${data.current}MB/${data.threshold}MB`); console.log(`Streaming: ${data.streamingMode}`); }); ``` #### Cleanup Event + ```javascript -scanner.on('cleanup', (data) => { - console.log('Cleanup type:', data.type); - console.log('Memory after:', data.memoryUsage); +scanner.on("cleanup", (data) => { + console.log("Cleanup type:", data.type); + console.log("Memory after:", data.memoryUsage); }); ``` #### Warning Events + ```javascript -scanner.on('warning', (data) => { - if (data.type === 'memory_threshold') { - console.log('Switching to streaming mode'); - } else if (data.type === 'memory_limit') { - console.log('Object limit reached'); +scanner.on("warning", (data) => { + if (data.type === "memory_threshold") { + console.log("Switching to streaming mode"); + } else if (data.type === "memory_limit") { + console.log("Object limit reached"); } }); ``` #### Progress Events + ```javascript -scanner.on('progress', (data) => { - if (data.type === 'batch_processed') { +scanner.on("progress", (data) => { + if (data.type === "batch_processed") { console.log(`Batch ${data.batch}/${data.totalBatches}`); console.log(`Memory: ${data.memoryUsage.heapUsed}MB`); } @@ -170,49 +192,53 @@ scanner.on('progress', (data) => { ## Usage Examples ### Basic Usage (Default Settings) + ```javascript const scanner = new pgTAPTestScanner(); -await scanner.scanDirectory('./tests'); +await scanner.scanDirectory("./tests"); const database = await scanner.buildCoverageDatabase(); // Now async ``` ### Memory-Constrained Environment + ```javascript const scanner = new pgTAPTestScanner({ - maxMemoryMB: 200, // Lower memory limit - batchSize: 50, // Smaller batches - enableStreaming: true, // Force streaming mode - cleanupInterval: 500 // More frequent cleanup + maxMemoryMB: 200, // Lower memory limit + batchSize: 50, // Smaller batches + enableStreaming: true, // Force streaming mode + cleanupInterval: 500, // More frequent cleanup }); // Monitor memory usage -scanner.on('memory_status', (data) => { +scanner.on("memory_status", (data) => { if (data.current > data.threshold * 0.9) { - console.warn('High memory usage detected'); + console.warn("High memory usage detected"); } }); -await scanner.scanDirectory('./tests'); +await scanner.scanDirectory("./tests"); const database = await scanner.buildCoverageDatabase(); ``` ### High-Performance Environment + ```javascript const scanner = new pgTAPTestScanner({ - maxMemoryMB: 1000, // Higher memory limit - batchSize: 500, // Larger batches - enableStreaming: false, // Disable streaming for speed - enableGC: true, // Enable manual GC - cleanupInterval: 5000 // Less frequent cleanup + maxMemoryMB: 1000, // Higher memory limit + batchSize: 500, // Larger batches + enableStreaming: false, // Disable streaming for speed + enableGC: true, // Enable manual GC + cleanupInterval: 5000, // Less frequent cleanup }); ``` ### Performance Monitoring + ```javascript const startTime = Date.now(); -scanner.on('progress', (data) => { - if (data.type === 'batch_processed') { +scanner.on("progress", (data) => { + if (data.type === "batch_processed") { const rate = data.itemsProcessed / ((Date.now() - startTime) / 1000); console.log(`Processing rate: ${rate.toFixed(1)} files/sec`); } @@ -221,35 +247,39 @@ scanner.on('progress', (data) => { ## Performance Characteristics -| File Count | Mode | Memory Usage | Processing Time | Recommended Config | -|------------|------|--------------|-----------------|-------------------| -| < 100 | Standard | ~50MB | < 1s | Default settings | -| 100-1000 | Batched | ~200MB | 2-5s | `batchSize: 100` | -| 1000-5000 | Streaming | ~100MB (constant) | 5-30s | `enableStreaming: true` | -| > 5000 | Streaming + GC | ~100MB (constant) | 30s+ | `enableGC: true, batchSize: 50` | +| File Count | Mode | Memory Usage | Processing Time | Recommended Config | +| ---------- | -------------- | ----------------- | --------------- | ------------------------------- | +| < 100 | Standard | ~50MB | < 1s | Default settings | +| 100-1000 | Batched | ~200MB | 2-5s | `batchSize: 100` | +| 1000-5000 | Streaming | ~100MB (constant) | 5-30s | `enableStreaming: true` | +| > 5000 | Streaming + GC | ~100MB (constant) | 30s+ | `enableGC: true, batchSize: 50` | ## Migration Guide ### For Existing Code + 1. Change synchronous calls to async: + ```javascript // Before const database = scanner.buildCoverageDatabase(); - + // After const database = await scanner.buildCoverageDatabase(); ``` 2. Add error handling for memory warnings: + ```javascript - scanner.on('warning', (data) => { - logger.warn('Memory warning:', data.message); + scanner.on("warning", (data) => { + logger.warn("Memory warning:", data.message); }); ``` 3. Consider adjusting memory limits based on your environment ### For New Code + - Use memory events for progress reporting - Call `getMemoryStats()` for debugging - Configure options based on expected dataset sizes @@ -257,18 +287,21 @@ scanner.on('progress', (data) => { ## Troubleshooting ### OOM Errors Still Occurring + 1. Reduce `batchSize` to 50 or lower 2. Lower `maxMemoryMB` threshold to 70% of available memory 3. Enable `--expose-gc` Node flag 4. Check for memory leaks in test files ### Slow Processing + 1. Increase `batchSize` for small files (up to 500) 2. Disable streaming for datasets < 1000 files 3. Increase `maxMemoryMB` if memory available 4. Profile with `--inspect` to identify bottlenecks ### Memory Not Being Released + 1. Ensure Node.js started with `--expose-gc` 2. Reduce `cleanupInterval` for more frequent cleanup 3. Check for circular references in test files @@ -317,4 +350,4 @@ node --expose-gc --max-old-space-size=512 test-large-suite.js --- -The memory management system makes pgTAPTestScanner suitable for processing test suites of any size while maintaining excellent performance and reliability. \ No newline at end of file +The memory management system makes pgTAPTestScanner suitable for processing test suites of any size while maintaining excellent performance and reliability. diff --git a/eslint.config.cjs b/eslint.config.cjs new file mode 100644 index 0000000..608192b --- /dev/null +++ b/eslint.config.cjs @@ -0,0 +1,201 @@ +// eslint.config.cjs + +const js = require("@eslint/js"); +const tsPlugin = require("@typescript-eslint/eslint-plugin"); +const tsParser = require("@typescript-eslint/parser"); +const promisePlugin = require("eslint-plugin-promise"); + +module.exports = [ + js.configs.recommended, + + // Default for plain .js files (ESM by default per your earlier config) + { + files: ["**/*.js"], + languageOptions: { + ecmaVersion: 2021, + sourceType: "module", + parser: tsParser, + parserOptions: { project: false }, + globals: { + console: "readonly", + process: "readonly", + Buffer: "readonly", + __dirname: "readonly", + __filename: "readonly", + require: "readonly", + module: "readonly", + exports: "readonly", + global: "readonly", + Promise: "readonly", + setTimeout: "readonly", + clearTimeout: "readonly", + setInterval: "readonly", + clearInterval: "readonly", + }, + }, + plugins: { + "@typescript-eslint": tsPlugin, + promise: promisePlugin, + }, + rules: { + // Promise rules + "promise/catch-or-return": "error", + "promise/always-return": "error", + "promise/no-return-wrap": "error", + + // Async best practices + "require-await": "error", + "no-async-promise-executor": "error", + "no-await-in-loop": "warn", + "prefer-promise-reject-errors": "error", + + // General + "no-console": "off", + "no-undef": "error", + + // Unused handling: allow names starting with _ + "no-unused-vars": "off", + "@typescript-eslint/no-unused-vars": [ + "error", + { + argsIgnorePattern: "^_", + varsIgnorePattern: "^_", + caughtErrorsIgnorePattern: "^_", + }, + ], + + // Node interop + "@typescript-eslint/no-var-requires": "off", + "@typescript-eslint/no-require-imports": "off", + + // Keep this ON; fix code instead of disabling + "no-dupe-keys": "error", + }, + }, + + // ESM .mjs explicitly + { + files: ["**/*.mjs"], + languageOptions: { + ecmaVersion: 2021, + sourceType: "module", + parser: tsParser, + parserOptions: { project: false }, + }, + plugins: { + "@typescript-eslint": tsPlugin, + promise: promisePlugin, + }, + rules: { + "promise/catch-or-return": "error", + "promise/always-return": "error", + "promise/no-return-wrap": "error", + "no-await-in-loop": "warn", + "prefer-promise-reject-errors": "error", + "no-console": "off", + "no-undef": "error", + "no-unused-vars": "off", + "@typescript-eslint/no-unused-vars": [ + "error", + { + argsIgnorePattern: "^_", + varsIgnorePattern: "^_", + caughtErrorsIgnorePattern: "^_", + }, + ], + "@typescript-eslint/no-var-requires": "off", + "@typescript-eslint/no-require-imports": "off", + "no-dupe-keys": "error", + }, + }, + + // CommonJS .cjs explicitly + { + files: ["**/*.cjs"], + languageOptions: { + ecmaVersion: 2021, + sourceType: "commonjs", + parser: tsParser, + parserOptions: { project: false }, + }, + plugins: { + "@typescript-eslint": tsPlugin, + promise: promisePlugin, + }, + rules: { + "promise/catch-or-return": "error", + "promise/always-return": "error", + "promise/no-return-wrap": "error", + "require-await": "error", + "no-async-promise-executor": "error", + "no-await-in-loop": "warn", + "prefer-promise-reject-errors": "error", + "no-console": "off", + "no-undef": "error", + "no-unused-vars": "off", + "@typescript-eslint/no-unused-vars": [ + "error", + { + argsIgnorePattern: "^_", + varsIgnorePattern: "^_", + caughtErrorsIgnorePattern: "^_", + }, + ], + "@typescript-eslint/no-var-requires": "off", + "@typescript-eslint/no-require-imports": "off", + "no-dupe-keys": "error", + }, + }, + + // **NEW**: TypeScript files override (this is what you were missing) + { + files: ["**/*.ts", "**/*.tsx"], + languageOptions: { + ecmaVersion: 2021, + sourceType: "module", + parser: tsParser, + parserOptions: { + project: false, // set your tsconfig path if you want type-aware linting + ecmaFeatures: { jsx: true }, + }, + }, + plugins: { + "@typescript-eslint": tsPlugin, + promise: promisePlugin, + }, + rules: { + "promise/catch-or-return": "error", + "promise/always-return": "error", + "promise/no-return-wrap": "error", + "require-await": "error", + "no-async-promise-executor": "error", + "no-await-in-loop": "warn", + "prefer-promise-reject-errors": "error", + + // TS unused vars — allow underscore + "no-unused-vars": "off", + "@typescript-eslint/no-unused-vars": [ + "error", + { + argsIgnorePattern: "^_", + varsIgnorePattern: "^_", + caughtErrorsIgnorePattern: "^_", + }, + ], + + // Keep dup-keys on + "no-dupe-keys": "error", + }, + }, + + // Ignore typical folders + { + ignores: [ + "**/node_modules/**", + "**/.git/**", + "**/dist/**", + "**/build/**", + "**/.cache/**", + ], + }, +]; diff --git a/eslint.config.js b/eslint.config.js deleted file mode 100644 index ec4954c..0000000 --- a/eslint.config.js +++ /dev/null @@ -1,70 +0,0 @@ -const js = require('@eslint/js'); -const tsPlugin = require('@typescript-eslint/eslint-plugin'); -const tsParser = require('@typescript-eslint/parser'); -const promisePlugin = require('eslint-plugin-promise'); - -module.exports = [ - js.configs.recommended, - { - files: ['**/*.js'], - languageOptions: { - ecmaVersion: 2021, - sourceType: 'module', - parser: tsParser, - parserOptions: { - project: false - }, - globals: { - console: 'readonly', - process: 'readonly', - Buffer: 'readonly', - __dirname: 'readonly', - __filename: 'readonly', - require: 'readonly', - module: 'readonly', - exports: 'readonly', - global: 'readonly', - Promise: 'readonly', - setTimeout: 'readonly', - clearTimeout: 'readonly', - setInterval: 'readonly', - clearInterval: 'readonly' - } - }, - plugins: { - '@typescript-eslint': tsPlugin, - 'promise': promisePlugin - }, - rules: { - // Promise-specific rules (these work without type info) - - // Promise-specific rules - 'promise/catch-or-return': 'error', - 'promise/always-return': 'error', - 'promise/no-return-wrap': 'error', - - // Require await in async functions - 'require-await': 'error', - - // Other async best practices - 'no-async-promise-executor': 'error', - 'no-await-in-loop': 'warn', - 'prefer-promise-reject-errors': 'error', - - // Allow console and require - 'no-console': 'off', - 'no-undef': 'error', - - // Allow unused args with underscore prefix - 'no-unused-vars': 'off', - '@typescript-eslint/no-unused-vars': ['error', { - 'argsIgnorePattern': '^_', - 'varsIgnorePattern': '^_' - }], - - // Node.js specific - '@typescript-eslint/no-var-requires': 'off', - '@typescript-eslint/no-require-imports': 'off' - } - } -]; \ No newline at end of file diff --git a/example/README.md b/example/README.md index e7fe950..5fec67b 100644 --- a/example/README.md +++ b/example/README.md @@ -53,6 +53,7 @@ example/ This example implements a **complete donation platform** with: ### Database Layer (`sql/`) + - **Multi-schema architecture**: Separate schemas for app, billing, analytics, and audit - **Custom types**: Enums for user roles, donation status, payment methods - **Core tables**: Organizations, campaigns, and donations @@ -63,6 +64,7 @@ This example implements a **complete donation platform** with: - **Reporting**: Pre-built dashboard views for common queries ### Edge Functions Layer (`functions/`) + - **Webhook processing**: Handle payment provider webhooks - **Payment integration**: Stripe payment intent creation - **Email automation**: Send receipts and confirmations @@ -100,6 +102,7 @@ While this structure is recommended, you can customize paths in `.datarc.json`: ``` Or via environment variables: + ```bash export DATA_SQL_DIR=./custom/sql export DATA_MIGRATIONS_DIR=./custom/migrations @@ -109,17 +112,20 @@ export DATA_FUNCTIONS_DIR=./custom/functions ## Usage Examples ### 1. Compile SQL into a Migration + ```bash # From project root (parent of example/) data db compile --sql-dir example/sql ``` ### 2. Test the Migration + ```bash data db migrate test ``` ### 3. Deploy with Edge Functions + ```bash # Development data db compile --deploy-functions @@ -129,6 +135,7 @@ data --prod db compile --deploy-functions ``` ### 4. Check Function Status + ```bash data functions status ``` @@ -156,4 +163,4 @@ data functions status - [Edge Functions Integration Guide](../docs/guides/edge-functions-integration.md) - [Migration Workflow Guide](../docs/guides/migration-workflow.md) - [pgTAP Testing Guide](../docs/guides/pgtap-testing.md) -- [D.A.T.A. CLI Documentation](../README.md) \ No newline at end of file +- [D.A.T.A. CLI Documentation](../README.md) diff --git a/example/functions/donations-webhook/index.ts b/example/functions/donations-webhook/index.ts index 0217fed..749b259 100644 --- a/example/functions/donations-webhook/index.ts +++ b/example/functions/donations-webhook/index.ts @@ -1,166 +1,179 @@ // Webhook handler for donation events // This Edge Function processes donation-related webhooks and updates the database -import { serve } from "https://deno.land/std@0.168.0/http/server.ts" -import { createClient } from "https://esm.sh/@supabase/supabase-js@2.38.0" +import { serve } from "https://deno.land/std@0.168.0/http/server.ts"; +import { createClient } from "https://esm.sh/@supabase/supabase-js@2.38.0"; interface WebhookPayload { - event_type: 'donation.created' | 'donation.completed' | 'donation.failed' | 'donation.refunded' - donation_id: string - campaign_id: string - amount: number - donor_email?: string - metadata?: Record + event_type: + | "donation.created" + | "donation.completed" + | "donation.failed" + | "donation.refunded"; + donation_id: string; + campaign_id: string; + amount: number; + donor_email?: string; + metadata?: Record; } serve(async (req: Request) => { try { // Verify webhook signature (if configured) - const signature = req.headers.get('x-webhook-signature') - if (signature && !await verifySignature(req, signature)) { - return new Response('Invalid signature', { status: 401 }) + const signature = req.headers.get("x-webhook-signature"); + if (signature && !(await verifySignature(req, signature))) { + return new Response("Invalid signature", { status: 401 }); } - const payload: WebhookPayload = await req.json() - + const payload: WebhookPayload = await req.json(); + // Initialize Supabase client - const supabaseUrl = Deno.env.get('SUPABASE_URL') - const supabaseKey = Deno.env.get('SUPABASE_SERVICE_ROLE_KEY') - + const supabaseUrl = Deno.env.get("SUPABASE_URL"); + const supabaseKey = Deno.env.get("SUPABASE_SERVICE_ROLE_KEY"); + if (!supabaseUrl || !supabaseKey) { - throw new Error('Missing Supabase configuration') + throw new Error("Missing Supabase configuration"); } - - const supabase = createClient(supabaseUrl, supabaseKey) - + + const supabase = createClient(supabaseUrl, supabaseKey); + // Process webhook based on event type - let result + let result; switch (payload.event_type) { - case 'donation.created': + case "donation.created": // Log new donation - console.log(`New donation created: ${payload.donation_id}`) - + console.log(`New donation created: ${payload.donation_id}`); + // Send confirmation email (via another Edge Function) await fetch(`${supabaseUrl}/functions/v1/send-receipt`, { - method: 'POST', + method: "POST", headers: { - 'Authorization': `Bearer ${supabaseKey}`, - 'Content-Type': 'application/json' + Authorization: `Bearer ${supabaseKey}`, + "Content-Type": "application/json", }, body: JSON.stringify({ donation_id: payload.donation_id, - type: 'confirmation' - }) - }) - break - - case 'donation.completed': + type: "confirmation", + }), + }); + break; + + case "donation.completed": // Update donation status and campaign totals - const { data, error } = await supabase.rpc('complete_donation', { + const { data, error } = await supabase.rpc("complete_donation", { p_donation_id: payload.donation_id, - p_net_amount: payload.amount * 0.97 // 3% platform fee - }) - - if (error) throw error - result = data - + p_net_amount: payload.amount * 0.97, // 3% platform fee + }); + + if (error) throw error; + result = data; + // Trigger receipt email await fetch(`${supabaseUrl}/functions/v1/send-receipt`, { - method: 'POST', + method: "POST", headers: { - 'Authorization': `Bearer ${supabaseKey}`, - 'Content-Type': 'application/json' + Authorization: `Bearer ${supabaseKey}`, + "Content-Type": "application/json", }, body: JSON.stringify({ donation_id: payload.donation_id, - type: 'receipt' - }) - }) - break - - case 'donation.failed': + type: "receipt", + }), + }); + break; + + case "donation.failed": // Update donation status to failed const { error: failError } = await supabase - .from('donations') - .update({ - status: 'failed', + .from("donations") + .update({ + status: "failed", failed_at: new Date().toISOString(), - metadata: payload.metadata + metadata: payload.metadata, }) - .eq('id', payload.donation_id) - - if (failError) throw failError - break - - case 'donation.refunded': + .eq("id", payload.donation_id); + + if (failError) throw failError; + break; + + case "donation.refunded": // Process refund - const { data: refundData, error: refundError } = await supabase.rpc('refund_donation', { - p_donation_id: payload.donation_id, - p_refund_amount: payload.amount, - p_reason: payload.metadata?.reason || 'Customer requested' - }) - - if (refundError) throw refundError - result = refundData - break - + const { data: refundData, error: refundError } = await supabase.rpc( + "refund_donation", + { + p_donation_id: payload.donation_id, + p_refund_amount: payload.amount, + p_reason: payload.metadata?.reason || "Customer requested", + }, + ); + + if (refundError) throw refundError; + result = refundData; + break; + default: - return new Response( - JSON.stringify({ error: 'Unknown event type' }), - { status: 400, headers: { 'Content-Type': 'application/json' } } - ) + return new Response(JSON.stringify({ error: "Unknown event type" }), { + status: 400, + headers: { "Content-Type": "application/json" }, + }); } - + // Log successful processing - console.log(`Processed ${payload.event_type} for donation ${payload.donation_id}`) - + console.log( + `Processed ${payload.event_type} for donation ${payload.donation_id}`, + ); + return new Response( - JSON.stringify({ + JSON.stringify({ success: true, event_type: payload.event_type, donation_id: payload.donation_id, - result + result, }), - { + { status: 200, - headers: { 'Content-Type': 'application/json' } - } - ) - + headers: { "Content-Type": "application/json" }, + }, + ); } catch (error) { - console.error('Webhook processing error:', error) - + console.error("Webhook processing error:", error); + return new Response( - JSON.stringify({ + JSON.stringify({ error: error.message, - timestamp: new Date().toISOString() + timestamp: new Date().toISOString(), }), - { + { status: 500, - headers: { 'Content-Type': 'application/json' } - } - ) + headers: { "Content-Type": "application/json" }, + }, + ); } -}) +}); // Helper function to verify webhook signatures -async function verifySignature(req: Request, signature: string): Promise { - const secret = Deno.env.get('WEBHOOK_SECRET') - if (!secret) return true // Skip verification if no secret configured - - const body = await req.text() - const encoder = new TextEncoder() - const data = encoder.encode(body) +async function verifySignature( + req: Request, + signature: string, +): Promise { + const secret = Deno.env.get("WEBHOOK_SECRET"); + if (!secret) return true; // Skip verification if no secret configured + + const body = await req.text(); + const encoder = new TextEncoder(); + const data = encoder.encode(body); const key = await crypto.subtle.importKey( - 'raw', + "raw", encoder.encode(secret), - { name: 'HMAC', hash: 'SHA-256' }, + { name: "HMAC", hash: "SHA-256" }, false, - ['sign', 'verify'] - ) - - const signatureBuffer = await crypto.subtle.sign('HMAC', key, data) - const computedSignature = btoa(String.fromCharCode(...new Uint8Array(signatureBuffer))) - - return computedSignature === signature -} \ No newline at end of file + ["sign", "verify"], + ); + + const signatureBuffer = await crypto.subtle.sign("HMAC", key, data); + const computedSignature = btoa( + String.fromCharCode(...new Uint8Array(signatureBuffer)), + ); + + return computedSignature === signature; +} diff --git a/example/functions/process-payment/index.ts b/example/functions/process-payment/index.ts index b0c2250..96aafa0 100644 --- a/example/functions/process-payment/index.ts +++ b/example/functions/process-payment/index.ts @@ -1,136 +1,137 @@ // Payment processing Edge Function // Handles payment intent creation and processing via Stripe -import { serve } from "https://deno.land/std@0.168.0/http/server.ts" -import Stripe from "https://esm.sh/stripe@12.0.0?target=deno" -import { createClient } from "https://esm.sh/@supabase/supabase-js@2.38.0" +import { serve } from "https://deno.land/std@0.168.0/http/server.ts"; +import Stripe from "https://esm.sh/stripe@12.0.0?target=deno"; +import { createClient } from "https://esm.sh/@supabase/supabase-js@2.38.0"; interface PaymentRequest { - donation_id: string - amount: number - currency: string - campaign_id: string - donor_email?: string - donor_name?: string - payment_method_id?: string + donation_id: string; + amount: number; + currency: string; + campaign_id: string; + donor_email?: string; + donor_name?: string; + payment_method_id?: string; } serve(async (req: Request) => { // Enable CORS for browser requests - if (req.method === 'OPTIONS') { - return new Response('ok', { + if (req.method === "OPTIONS") { + return new Response("ok", { headers: { - 'Access-Control-Allow-Origin': '*', - 'Access-Control-Allow-Methods': 'POST, OPTIONS', - 'Access-Control-Allow-Headers': 'authorization, x-client-info, apikey, content-type', - } - }) + "Access-Control-Allow-Origin": "*", + "Access-Control-Allow-Methods": "POST, OPTIONS", + "Access-Control-Allow-Headers": + "authorization, x-client-info, apikey, content-type", + }, + }); } - + try { // Verify authentication - const authHeader = req.headers.get('Authorization') + const authHeader = req.headers.get("Authorization"); if (!authHeader) { - return new Response('Unauthorized', { status: 401 }) + return new Response("Unauthorized", { status: 401 }); } - - const payload: PaymentRequest = await req.json() - + + const payload: PaymentRequest = await req.json(); + // Validate required fields if (!payload.donation_id || !payload.amount || !payload.campaign_id) { return new Response( - JSON.stringify({ error: 'Missing required fields' }), - { status: 400, headers: { 'Content-Type': 'application/json' } } - ) + JSON.stringify({ error: "Missing required fields" }), + { status: 400, headers: { "Content-Type": "application/json" } }, + ); } - + // Initialize Stripe - const stripeKey = Deno.env.get('STRIPE_SECRET_KEY') + const stripeKey = Deno.env.get("STRIPE_SECRET_KEY"); if (!stripeKey) { - throw new Error('Stripe configuration missing') + throw new Error("Stripe configuration missing"); } - + const stripe = new Stripe(stripeKey, { - apiVersion: '2023-10-16', + apiVersion: "2023-10-16", typescript: true, - }) - + }); + // Initialize Supabase - const supabaseUrl = Deno.env.get('SUPABASE_URL') - const supabaseKey = Deno.env.get('SUPABASE_SERVICE_ROLE_KEY') - + const supabaseUrl = Deno.env.get("SUPABASE_URL"); + const supabaseKey = Deno.env.get("SUPABASE_SERVICE_ROLE_KEY"); + if (!supabaseUrl || !supabaseKey) { - throw new Error('Supabase configuration missing') + throw new Error("Supabase configuration missing"); } - - const supabase = createClient(supabaseUrl, supabaseKey) - + + const supabase = createClient(supabaseUrl, supabaseKey); + // Get campaign details for metadata const { data: campaign, error: campaignError } = await supabase - .from('campaigns') - .select('title, organization_id') - .eq('id', payload.campaign_id) - .single() - + .from("campaigns") + .select("title, organization_id") + .eq("id", payload.campaign_id) + .single(); + if (campaignError) { - throw new Error(`Campaign not found: ${campaignError.message}`) + throw new Error(`Campaign not found: ${campaignError.message}`); } - + // Create or retrieve customer - let customer + let customer; if (payload.donor_email) { const customers = await stripe.customers.list({ email: payload.donor_email, - limit: 1 - }) - + limit: 1, + }); + if (customers.data.length > 0) { - customer = customers.data[0] + customer = customers.data[0]; } else { customer = await stripe.customers.create({ email: payload.donor_email, name: payload.donor_name, metadata: { donation_id: payload.donation_id, - campaign_id: payload.campaign_id - } - }) + campaign_id: payload.campaign_id, + }, + }); } } - + // Create payment intent const paymentIntent = await stripe.paymentIntents.create({ amount: Math.round(payload.amount * 100), // Convert to cents - currency: payload.currency || 'usd', + currency: payload.currency || "usd", customer: customer?.id, payment_method: payload.payment_method_id, - confirmation_method: 'automatic', + confirmation_method: "automatic", confirm: !!payload.payment_method_id, metadata: { donation_id: payload.donation_id, campaign_id: payload.campaign_id, campaign_title: campaign.title, - organization_id: campaign.organization_id + organization_id: campaign.organization_id, }, description: `Donation to ${campaign.title}`, - statement_descriptor_suffix: 'DONATION', - }) - + statement_descriptor_suffix: "DONATION", + }); + // Update donation record with payment intent ID const { error: updateError } = await supabase - .from('donations') + .from("donations") .update({ payment_intent_id: paymentIntent.id, - status: 'processing', - processed_at: new Date().toISOString() + status: "processing", + processed_at: new Date().toISOString(), }) - .eq('id', payload.donation_id) - + .eq("id", payload.donation_id); + if (updateError) { // Log error but don't fail the payment - console.error('Failed to update donation record:', updateError) + console.error("Failed to update donation record:", updateError); } - + // Return client secret for frontend confirmation return new Response( JSON.stringify({ @@ -139,53 +140,52 @@ serve(async (req: Request) => { payment_intent_id: paymentIntent.id, status: paymentIntent.status, amount: paymentIntent.amount, - currency: paymentIntent.currency + currency: paymentIntent.currency, }), { status: 200, headers: { - 'Content-Type': 'application/json', - 'Access-Control-Allow-Origin': '*', - } - } - ) - + "Content-Type": "application/json", + "Access-Control-Allow-Origin": "*", + }, + }, + ); } catch (error) { - console.error('Payment processing error:', error) - + console.error("Payment processing error:", error); + // Check if it's a Stripe error - if (error.type === 'StripeCardError') { + if (error.type === "StripeCardError") { return new Response( JSON.stringify({ error: error.message, - type: 'card_error', + type: "card_error", code: error.code, - decline_code: error.decline_code + decline_code: error.decline_code, }), { status: 400, headers: { - 'Content-Type': 'application/json', - 'Access-Control-Allow-Origin': '*', - } - } - ) + "Content-Type": "application/json", + "Access-Control-Allow-Origin": "*", + }, + }, + ); } - + // Generic error response return new Response( JSON.stringify({ - error: error.message || 'Payment processing failed', - type: 'api_error', - timestamp: new Date().toISOString() + error: error.message || "Payment processing failed", + type: "api_error", + timestamp: new Date().toISOString(), }), { status: 500, headers: { - 'Content-Type': 'application/json', - 'Access-Control-Allow-Origin': '*', - } - } - ) + "Content-Type": "application/json", + "Access-Control-Allow-Origin": "*", + }, + }, + ); } -}) \ No newline at end of file +}); diff --git a/example/functions/send-receipt/index.ts b/example/functions/send-receipt/index.ts index e49b03a..c4b9eb6 100644 --- a/example/functions/send-receipt/index.ts +++ b/example/functions/send-receipt/index.ts @@ -1,58 +1,59 @@ // Email receipt sender Edge Function // Sends donation confirmation and receipt emails -import { serve } from "https://deno.land/std@0.168.0/http/server.ts" -import { createClient } from "https://esm.sh/@supabase/supabase-js@2.38.0" +import { serve } from "https://deno.land/std@0.168.0/http/server.ts"; +import { createClient } from "https://esm.sh/@supabase/supabase-js@2.38.0"; interface EmailRequest { - donation_id: string - type: 'confirmation' | 'receipt' | 'refund' - custom_message?: string + donation_id: string; + type: "confirmation" | "receipt" | "refund"; + custom_message?: string; } interface DonationDetails { - id: string - amount: number - currency_code: string - donor_email: string - donor_name: string - message?: string - created_at: string - completed_at?: string + id: string; + amount: number; + currency_code: string; + donor_email: string; + donor_name: string; + message?: string; + created_at: string; + completed_at?: string; campaign: { - title: string + title: string; organization: { - name: string - tax_id?: string - is_tax_exempt: boolean - } - } + name: string; + tax_id?: string; + is_tax_exempt: boolean; + }; + }; } serve(async (req: Request) => { try { // Verify authentication - const authHeader = req.headers.get('Authorization') + const authHeader = req.headers.get("Authorization"); if (!authHeader) { - return new Response('Unauthorized', { status: 401 }) + return new Response("Unauthorized", { status: 401 }); } - - const payload: EmailRequest = await req.json() - + + const payload: EmailRequest = await req.json(); + // Initialize Supabase - const supabaseUrl = Deno.env.get('SUPABASE_URL') - const supabaseKey = Deno.env.get('SUPABASE_SERVICE_ROLE_KEY') - + const supabaseUrl = Deno.env.get("SUPABASE_URL"); + const supabaseKey = Deno.env.get("SUPABASE_SERVICE_ROLE_KEY"); + if (!supabaseUrl || !supabaseKey) { - throw new Error('Supabase configuration missing') + throw new Error("Supabase configuration missing"); } - - const supabase = createClient(supabaseUrl, supabaseKey) - + + const supabase = createClient(supabaseUrl, supabaseKey); + // Get donation details const { data: donation, error: donationError } = await supabase - .from('donations') - .select(` + .from("donations") + .select( + ` id, amount, currency_code, @@ -69,130 +70,137 @@ serve(async (req: Request) => { is_tax_exempt ) ) - `) - .eq('id', payload.donation_id) - .single() - + `, + ) + .eq("id", payload.donation_id) + .single(); + if (donationError || !donation) { - throw new Error(`Donation not found: ${donationError?.message}`) + throw new Error(`Donation not found: ${donationError?.message}`); } - + // Skip if no email address if (!donation.donor_email) { return new Response( - JSON.stringify({ - success: true, - message: 'No email address provided, skipping email' + JSON.stringify({ + success: true, + message: "No email address provided, skipping email", }), - { status: 200, headers: { 'Content-Type': 'application/json' } } - ) + { status: 200, headers: { "Content-Type": "application/json" } }, + ); } - + // Generate email content based on type - const emailContent = generateEmailContent(payload.type, donation as DonationDetails, payload.custom_message) - + const emailContent = generateEmailContent( + payload.type, + donation as DonationDetails, + payload.custom_message, + ); + // Send email using your preferred email service // This example uses a generic email API endpoint - const emailServiceUrl = Deno.env.get('EMAIL_SERVICE_URL') - const emailApiKey = Deno.env.get('EMAIL_API_KEY') - + const emailServiceUrl = Deno.env.get("EMAIL_SERVICE_URL"); + const emailApiKey = Deno.env.get("EMAIL_API_KEY"); + if (!emailServiceUrl || !emailApiKey) { // Log but don't fail if email service not configured - console.warn('Email service not configured, skipping email send') + console.warn("Email service not configured, skipping email send"); return new Response( - JSON.stringify({ - success: true, - message: 'Email service not configured' + JSON.stringify({ + success: true, + message: "Email service not configured", }), - { status: 200, headers: { 'Content-Type': 'application/json' } } - ) + { status: 200, headers: { "Content-Type": "application/json" } }, + ); } - + const emailResponse = await fetch(emailServiceUrl, { - method: 'POST', + method: "POST", headers: { - 'Authorization': `Bearer ${emailApiKey}`, - 'Content-Type': 'application/json' + Authorization: `Bearer ${emailApiKey}`, + "Content-Type": "application/json", }, body: JSON.stringify({ to: donation.donor_email, - from: 'donations@example.org', + from: "donations@example.org", subject: emailContent.subject, html: emailContent.html, text: emailContent.text, - replyTo: 'support@example.org', - tags: ['donation', payload.type] - }) - }) - + replyTo: "support@example.org", + tags: ["donation", payload.type], + }), + }); + if (!emailResponse.ok) { - const error = await emailResponse.text() - throw new Error(`Email service error: ${error}`) + const error = await emailResponse.text(); + throw new Error(`Email service error: ${error}`); } - + // Log email sent - console.log(`Sent ${payload.type} email for donation ${payload.donation_id} to ${donation.donor_email}`) - + console.log( + `Sent ${payload.type} email for donation ${payload.donation_id} to ${donation.donor_email}`, + ); + // Record email sent in database - const { error: logError } = await supabase - .from('email_logs') - .insert({ - recipient: donation.donor_email, - type: `donation_${payload.type}`, - subject: emailContent.subject, - donation_id: payload.donation_id, - sent_at: new Date().toISOString() - }) - + const { error: logError } = await supabase.from("email_logs").insert({ + recipient: donation.donor_email, + type: `donation_${payload.type}`, + subject: emailContent.subject, + donation_id: payload.donation_id, + sent_at: new Date().toISOString(), + }); + if (logError) { - console.error('Failed to log email:', logError) + console.error("Failed to log email:", logError); } - + return new Response( JSON.stringify({ success: true, email_sent_to: donation.donor_email, - type: payload.type + type: payload.type, }), - { + { status: 200, - headers: { 'Content-Type': 'application/json' } - } - ) - + headers: { "Content-Type": "application/json" }, + }, + ); } catch (error) { - console.error('Email sending error:', error) - + console.error("Email sending error:", error); + return new Response( JSON.stringify({ error: error.message, - timestamp: new Date().toISOString() + timestamp: new Date().toISOString(), }), - { + { status: 500, - headers: { 'Content-Type': 'application/json' } - } - ) + headers: { "Content-Type": "application/json" }, + }, + ); } -}) +}); function generateEmailContent( - type: 'confirmation' | 'receipt' | 'refund', + type: "confirmation" | "receipt" | "refund", donation: DonationDetails, - customMessage?: string + customMessage?: string, ): { subject: string; html: string; text: string } { const formatCurrency = (amount: number, currency: string) => { - return new Intl.NumberFormat('en-US', { - style: 'currency', - currency: currency || 'USD' - }).format(amount) - } - - const formattedAmount = formatCurrency(donation.amount, donation.currency_code) - const donorName = donation.donor_name || 'Valued Donor' - + return new Intl.NumberFormat("en-US", { + style: "currency", + currency: currency || "USD", + }).format(amount); + }; + + const formattedAmount = formatCurrency( + donation.amount, + donation.currency_code, + ); + const donorName = donation.donor_name || "Valued Donor"; + switch (type) { - case 'confirmation': + case "confirmation": return { subject: `Thank you for your donation to ${donation.campaign.title}`, html: ` @@ -200,21 +208,23 @@ function generateEmailContent(

Dear ${donorName},

We've received your donation of ${formattedAmount} to ${donation.campaign.title}.

Your donation is being processed and you'll receive a receipt once it's complete.

- ${donation.message ? `

Your message: "${donation.message}"

` : ''} - ${customMessage ? `

${customMessage}

` : ''} + ${donation.message ? `

Your message: "${donation.message}"

` : ""} + ${customMessage ? `

${customMessage}

` : ""}

Thank you for supporting ${donation.campaign.organization.name}!


Donation ID: ${donation.id}

`, - text: `Thank you for your donation!\n\nDear ${donorName},\n\nWe've received your donation of ${formattedAmount} to ${donation.campaign.title}.\n\nYour donation is being processed and you'll receive a receipt once it's complete.\n\n${donation.message ? `Your message: "${donation.message}"\n\n` : ''}Thank you for supporting ${donation.campaign.organization.name}!\n\nDonation ID: ${donation.id}` - } - - case 'receipt': - const receiptDate = new Date(donation.completed_at || donation.created_at).toLocaleDateString() - const taxInfo = donation.campaign.organization.is_tax_exempt - ? `

Tax Information: ${donation.campaign.organization.name} is a tax-exempt organization. Tax ID: ${donation.campaign.organization.tax_id || 'N/A'}

` - : '' - + text: `Thank you for your donation!\n\nDear ${donorName},\n\nWe've received your donation of ${formattedAmount} to ${donation.campaign.title}.\n\nYour donation is being processed and you'll receive a receipt once it's complete.\n\n${donation.message ? `Your message: "${donation.message}"\n\n` : ""}Thank you for supporting ${donation.campaign.organization.name}!\n\nDonation ID: ${donation.id}`, + }; + + case "receipt": + const receiptDate = new Date( + donation.completed_at || donation.created_at, + ).toLocaleDateString(); + const taxInfo = donation.campaign.organization.is_tax_exempt + ? `

Tax Information: ${donation.campaign.organization.name} is a tax-exempt organization. Tax ID: ${donation.campaign.organization.tax_id || "N/A"}

` + : ""; + return { subject: `Receipt for your donation to ${donation.campaign.title}`, html: ` @@ -230,15 +240,15 @@ function generateEmailContent( Transaction ID:${donation.id} ${taxInfo} - ${customMessage ? `

${customMessage}

` : ''} + ${customMessage ? `

${customMessage}

` : ""}

Please keep this receipt for your records.


This is an official receipt for your donation.

`, - text: `Donation Receipt\n\nDear ${donorName},\n\nThank you for your generous donation!\n\nDonation Details:\n- Amount: ${formattedAmount}\n- Date: ${receiptDate}\n- Campaign: ${donation.campaign.title}\n- Organization: ${donation.campaign.organization.name}\n- Transaction ID: ${donation.id}\n\n${donation.campaign.organization.is_tax_exempt ? `Tax Information: ${donation.campaign.organization.name} is a tax-exempt organization. Tax ID: ${donation.campaign.organization.tax_id || 'N/A'}\n\n` : ''}Please keep this receipt for your records.\n\nThis is an official receipt for your donation.` - } - - case 'refund': + text: `Donation Receipt\n\nDear ${donorName},\n\nThank you for your generous donation!\n\nDonation Details:\n- Amount: ${formattedAmount}\n- Date: ${receiptDate}\n- Campaign: ${donation.campaign.title}\n- Organization: ${donation.campaign.organization.name}\n- Transaction ID: ${donation.id}\n\n${donation.campaign.organization.is_tax_exempt ? `Tax Information: ${donation.campaign.organization.name} is a tax-exempt organization. Tax ID: ${donation.campaign.organization.tax_id || "N/A"}\n\n` : ""}Please keep this receipt for your records.\n\nThis is an official receipt for your donation.`, + }; + + case "refund": return { subject: `Refund processed for your donation to ${donation.campaign.title}`, html: ` @@ -246,15 +256,15 @@ function generateEmailContent(

Dear ${donorName},

Your donation of ${formattedAmount} to ${donation.campaign.title} has been refunded.

The refund should appear in your account within 5-10 business days.

- ${customMessage ? `

Reason: ${customMessage}

` : ''} + ${customMessage ? `

Reason: ${customMessage}

` : ""}

If you have any questions, please contact our support team.


Transaction ID: ${donation.id}

`, - text: `Refund Notification\n\nDear ${donorName},\n\nYour donation of ${formattedAmount} to ${donation.campaign.title} has been refunded.\n\nThe refund should appear in your account within 5-10 business days.\n\n${customMessage ? `Reason: ${customMessage}\n\n` : ''}If you have any questions, please contact our support team.\n\nTransaction ID: ${donation.id}` - } - + text: `Refund Notification\n\nDear ${donorName},\n\nYour donation of ${formattedAmount} to ${donation.campaign.title} has been refunded.\n\nThe refund should appear in your account within 5-10 business days.\n\n${customMessage ? `Reason: ${customMessage}\n\n` : ""}If you have any questions, please contact our support team.\n\nTransaction ID: ${donation.id}`, + }; + default: - throw new Error(`Unknown email type: ${type}`) + throw new Error(`Unknown email type: ${type}`); } -} \ No newline at end of file +} diff --git a/issues/2025-08-30-tests/ast-migration-engine-test-coverage.md b/issues/2025-08-30-tests/ast-migration-engine-test-coverage.md index 6a169b8..82587a8 100644 --- a/issues/2025-08-30-tests/ast-migration-engine-test-coverage.md +++ b/issues/2025-08-30-tests/ast-migration-engine-test-coverage.md @@ -17,6 +17,7 @@ The `ASTMigrationEngine` is responsible for parsing SQL statements into Abstract ## Component Overview The ASTMigrationEngine likely provides: + - SQL statement parsing to Abstract Syntax Tree representation - Schema structure analysis and comparison - Dependency graph generation from SQL relationships @@ -27,6 +28,7 @@ The ASTMigrationEngine likely provides: ## What Needs Testing ### SQL Parsing + - [ ] DDL statement parsing (CREATE, ALTER, DROP) - [ ] DML statement parsing (INSERT, UPDATE, DELETE) - [ ] Complex SQL constructs (CTEs, window functions, stored procedures) @@ -35,6 +37,7 @@ The ASTMigrationEngine likely provides: - [ ] Syntax error detection and reporting ### AST Operations + - [ ] AST construction from SQL statements - [ ] AST traversal and analysis - [ ] AST modification and transformation @@ -43,6 +46,7 @@ The ASTMigrationEngine likely provides: - [ ] AST validation and consistency checks ### Migration Generation + - [ ] Schema diff analysis using AST comparison - [ ] Migration step ordering and optimization - [ ] Dependency resolution between database objects @@ -51,6 +55,7 @@ The ASTMigrationEngine likely provides: - [ ] Migration validation and testing ### Edge Cases + - [ ] Malformed SQL input handling - [ ] Complex nested query structures - [ ] Large SQL statement processing @@ -59,6 +64,7 @@ The ASTMigrationEngine likely provides: - [ ] Case sensitivity handling ### Integration Points + - [ ] SQL parser integration - [ ] Migration compiler integration - [ ] Database schema analysis @@ -66,6 +72,7 @@ The ASTMigrationEngine likely provides: - [ ] Performance optimization systems ### Error Scenarios + - [ ] Invalid SQL syntax - [ ] Unsupported SQL constructs - [ ] Parser memory exhaustion @@ -76,50 +83,53 @@ The ASTMigrationEngine likely provides: ## Testing Requirements ### Unit Tests + ```javascript -describe('ASTMigrationEngine', () => { - describe('SQL parsing', () => { - it('should parse CREATE TABLE statements') - it('should parse ALTER TABLE statements') - it('should parse complex JOIN queries') - it('should handle PostgreSQL-specific syntax') - it('should detect and report syntax errors') - }) - - describe('AST operations', () => { - it('should construct valid ASTs from SQL') - it('should traverse AST structures correctly') - it('should modify ASTs and regenerate SQL') - it('should compare ASTs for differences') - }) - - describe('migration generation', () => { - it('should generate migrations from schema diffs') - it('should resolve dependencies correctly') - it('should detect and resolve conflicts') - it('should optimize migration sequences') - }) -}) +describe("ASTMigrationEngine", () => { + describe("SQL parsing", () => { + it("should parse CREATE TABLE statements"); + it("should parse ALTER TABLE statements"); + it("should parse complex JOIN queries"); + it("should handle PostgreSQL-specific syntax"); + it("should detect and report syntax errors"); + }); + + describe("AST operations", () => { + it("should construct valid ASTs from SQL"); + it("should traverse AST structures correctly"); + it("should modify ASTs and regenerate SQL"); + it("should compare ASTs for differences"); + }); + + describe("migration generation", () => { + it("should generate migrations from schema diffs"); + it("should resolve dependencies correctly"); + it("should detect and resolve conflicts"); + it("should optimize migration sequences"); + }); +}); ``` ### Integration Tests + ```javascript -describe('ASTMigrationEngine Integration', () => { - it('should process real SQL schema files') - it('should generate valid migrations from schema changes') - it('should integrate with migration compiler') - it('should handle complex schema evolution scenarios') -}) +describe("ASTMigrationEngine Integration", () => { + it("should process real SQL schema files"); + it("should generate valid migrations from schema changes"); + it("should integrate with migration compiler"); + it("should handle complex schema evolution scenarios"); +}); ``` ### Performance Tests + ```javascript -describe('ASTMigrationEngine Performance', () => { - it('should handle large SQL files efficiently') - it('should process complex schemas within time limits') - it('should manage memory usage effectively') - it('should scale with schema complexity') -}) +describe("ASTMigrationEngine Performance", () => { + it("should handle large SQL files efficiently"); + it("should process complex schemas within time limits"); + it("should manage memory usage effectively"); + it("should scale with schema complexity"); +}); ``` ## Acceptance Criteria @@ -136,6 +146,7 @@ describe('ASTMigrationEngine Performance', () => { ## Priority Justification **Critical Priority** because: + 1. **Schema Safety**: Parsing errors can corrupt database schemas 2. **Migration Quality**: AST analysis ensures high-quality migrations 3. **Dependency Accuracy**: Complex dependency resolution affects data integrity @@ -153,16 +164,19 @@ describe('ASTMigrationEngine Performance', () => { ## Testing Challenges ### SQL Complexity + - Vast variety of SQL constructs to test - PostgreSQL-specific features and extensions - Edge cases in SQL parsing and generation ### AST Manipulation + - Complex tree structure validation - Performance considerations for large schemas - Memory management during processing ### Migration Correctness + - Generated SQL must be executable - Migration sequences must be logically correct - Rollback generation must be accurate @@ -177,22 +191,25 @@ describe('ASTMigrationEngine Performance', () => { ## Special Considerations ### SQL Parser Integration + - Must validate parser compatibility - Need comprehensive SQL test cases - Performance optimization requirements ### AST Accuracy + - Complex tree structure validation - SQL regeneration fidelity - Schema comparison accuracy ### Migration Safety + - Generated migrations must be safe - Dependency ordering must be correct - Rollback operations must be reliable --- -*"Insufficient facts always invite danger."* - Spock +_"Insufficient facts always invite danger."_ - Spock -The ASTMigrationEngine transforms SQL into logical structures. Like Data processing complex algorithms, every parsing operation must be precise - imperfect analysis leads to imperfect migrations. \ No newline at end of file +The ASTMigrationEngine transforms SQL into logical structures. Like Data processing complex algorithms, every parsing operation must be precise - imperfect analysis leads to imperfect migrations. diff --git a/issues/2025-08-30-tests/child-process-wrapper-test-coverage.md b/issues/2025-08-30-tests/child-process-wrapper-test-coverage.md index 11c80b5..1d978bd 100644 --- a/issues/2025-08-30-tests/child-process-wrapper-test-coverage.md +++ b/issues/2025-08-30-tests/child-process-wrapper-test-coverage.md @@ -2,13 +2,13 @@ ## Core Information -| Field | Value | -|-------|-------| -| **Severity Level** | 🟠 HIGH - System Integration | -| **Location** | `src/lib/ChildProcessWrapper.js` | -| **Category** | Testing | -| **Brief Description** | Child process management wrapper lacks test coverage | -| **Impact** | Process management failures could cause system instability | +| Field | Value | +| --------------------- | ---------------------------------------------------------- | +| **Severity Level** | 🟠 HIGH - System Integration | +| **Location** | `src/lib/ChildProcessWrapper.js` | +| **Category** | Testing | +| **Brief Description** | Child process management wrapper lacks test coverage | +| **Impact** | Process management failures could cause system instability | ## Summary @@ -17,6 +17,7 @@ The `ChildProcessWrapper` manages subprocess execution for D.A.T.A., handling op ## Component Overview The ChildProcessWrapper likely provides: + - Subprocess lifecycle management - Process output capture and streaming - Error handling and timeout management @@ -27,6 +28,7 @@ The ChildProcessWrapper likely provides: ## What Needs Testing ### Core Functionality + - [ ] Process spawning and initialization - [ ] Command execution with arguments - [ ] Process output capture (stdout/stderr) @@ -35,6 +37,7 @@ The ChildProcessWrapper likely provides: - [ ] Process timeout management ### Resource Management + - [ ] Memory usage monitoring - [ ] File descriptor management - [ ] Process cleanup on completion @@ -43,6 +46,7 @@ The ChildProcessWrapper likely provides: - [ ] Concurrent process handling ### Edge Cases + - [ ] Process spawn failures - [ ] Long-running process handling - [ ] Process hanging and timeout scenarios @@ -51,6 +55,7 @@ The ChildProcessWrapper likely provides: - [ ] System resource exhaustion ### Integration Points + - [ ] pgTAP test execution integration - [ ] SQL compiler process management - [ ] External tool execution @@ -59,6 +64,7 @@ The ChildProcessWrapper likely provides: - [ ] Error reporting mechanisms ### Error Scenarios + - [ ] Process spawn failures - [ ] Process termination errors - [ ] Signal handling failures @@ -69,49 +75,52 @@ The ChildProcessWrapper likely provides: ## Testing Requirements ### Unit Tests + ```javascript -describe('ChildProcessWrapper', () => { - describe('process management', () => { - it('should spawn processes successfully') - it('should capture process output') - it('should handle process termination') - it('should manage process timeouts') - }) - - describe('resource management', () => { - it('should cleanup processes on completion') - it('should prevent zombie processes') - it('should handle concurrent processes') - it('should monitor resource usage') - }) - - describe('error handling', () => { - it('should handle spawn failures') - it('should handle process crashes') - it('should handle timeout scenarios') - it('should cleanup resources on errors') - }) -}) +describe("ChildProcessWrapper", () => { + describe("process management", () => { + it("should spawn processes successfully"); + it("should capture process output"); + it("should handle process termination"); + it("should manage process timeouts"); + }); + + describe("resource management", () => { + it("should cleanup processes on completion"); + it("should prevent zombie processes"); + it("should handle concurrent processes"); + it("should monitor resource usage"); + }); + + describe("error handling", () => { + it("should handle spawn failures"); + it("should handle process crashes"); + it("should handle timeout scenarios"); + it("should cleanup resources on errors"); + }); +}); ``` ### Integration Tests + ```javascript -describe('ChildProcessWrapper Integration', () => { - it('should execute real system commands') - it('should handle pgTAP test execution') - it('should manage SQL compilation processes') - it('should integrate with CI/CD systems') -}) +describe("ChildProcessWrapper Integration", () => { + it("should execute real system commands"); + it("should handle pgTAP test execution"); + it("should manage SQL compilation processes"); + it("should integrate with CI/CD systems"); +}); ``` ### Stress Tests + ```javascript -describe('ChildProcessWrapper Stress Tests', () => { - it('should handle multiple concurrent processes') - it('should manage long-running processes') - it('should recover from system resource pressure') - it('should handle rapid process spawning/termination') -}) +describe("ChildProcessWrapper Stress Tests", () => { + it("should handle multiple concurrent processes"); + it("should manage long-running processes"); + it("should recover from system resource pressure"); + it("should handle rapid process spawning/termination"); +}); ``` ## Acceptance Criteria @@ -128,6 +137,7 @@ describe('ChildProcessWrapper Stress Tests', () => { ## Priority Justification **High Priority** because: + 1. **System Stability**: Process leaks can destabilize entire system 2. **Resource Management**: Critical for preventing resource exhaustion 3. **Test Execution**: Essential for pgTAP test execution reliability @@ -145,16 +155,19 @@ describe('ChildProcessWrapper Stress Tests', () => { ## Testing Challenges ### Process Lifecycle Complexity + - Complex state transitions during process lifecycle - Timing-dependent behavior in process management - Platform-specific process handling differences ### Resource Management + - Detecting resource leaks in tests - Simulating resource exhaustion scenarios - Validating cleanup under failure conditions ### Concurrency Testing + - Multiple concurrent process management - Race condition detection - Deadlock prevention validation @@ -169,12 +182,14 @@ describe('ChildProcessWrapper Stress Tests', () => { ## Impact Assessment ### Direct Impact + - Test execution reliability - SQL compilation process management - External tool integration - CI/CD pipeline stability ### Indirect Impact + - System resource utilization - Overall system stability - Development workflow reliability @@ -183,22 +198,25 @@ describe('ChildProcessWrapper Stress Tests', () => { ## Special Considerations ### Cross-Platform Compatibility + - Windows vs Unix process management differences - Signal handling variations across platforms - Path and command differences ### Resource Monitoring + - Memory leak detection - File descriptor leak prevention - Process cleanup verification ### Timeout Management + - Graceful process termination - Forced termination scenarios - Cleanup after timeout --- -*"I have noticed that the adherents of ancient religions frequently adopt a hostile posture to those who do not subscribe to their particular mythology."* - Data +_"I have noticed that the adherents of ancient religions frequently adopt a hostile posture to those who do not subscribe to their particular mythology."_ - Data -ChildProcessWrapper must manage external processes with the precision of Data coordinating multiple subroutines. Every spawned process must be accounted for and properly managed. \ No newline at end of file +ChildProcessWrapper must manage external processes with the precision of Data coordinating multiple subroutines. Every spawned process must be accounted for and properly managed. diff --git a/issues/2025-08-30-tests/compile-command-test-coverage.md b/issues/2025-08-30-tests/compile-command-test-coverage.md index 1b5e275..6dc3ddb 100644 --- a/issues/2025-08-30-tests/compile-command-test-coverage.md +++ b/issues/2025-08-30-tests/compile-command-test-coverage.md @@ -2,13 +2,13 @@ ## Core Information -| Field | Value | -|-------|-------| -| **Severity Level** | 🔴 CRITICAL - Show Stopper | -| **Location** | `src/commands/db/CompileCommand.js` | -| **Category** | Testing | -| **Brief Description** | CompileCommand lacks comprehensive test coverage | -| **Impact** | Migration compilation failures could go undetected | +| Field | Value | +| --------------------- | -------------------------------------------------- | +| **Severity Level** | 🔴 CRITICAL - Show Stopper | +| **Location** | `src/commands/db/CompileCommand.js` | +| **Category** | Testing | +| **Brief Description** | CompileCommand lacks comprehensive test coverage | +| **Impact** | Migration compilation failures could go undetected | ## Summary @@ -17,8 +17,9 @@ The `CompileCommand` class is responsible for compiling SQL sources into migrati ## Component Overview The CompileCommand extends BuildCommand and provides: + - SQL source compilation into migration files -- Optional Edge Functions deployment integration +- Optional Edge Functions deployment integration - Path validation and error handling - Integration with MigrationCompiler - Production safety with `--prod` flag support @@ -26,6 +27,7 @@ The CompileCommand extends BuildCommand and provides: ## What Needs Testing ### Core Functionality + - [ ] Constructor behavior with different parameter combinations - [ ] Path validation (missing input/output directories) - [ ] Error handling for invalid paths @@ -33,6 +35,7 @@ The CompileCommand extends BuildCommand and provides: - [ ] Event emission patterns (`start`, `progress`, `success`, `error`) ### Edge Cases + - [ ] Missing SQL source directory - [ ] Missing migrations output directory - [ ] Invalid MigrationCompiler configuration @@ -40,12 +43,14 @@ The CompileCommand extends BuildCommand and provides: - [ ] Error handling when MigrationCompiler fails ### Integration Points + - [ ] BuildCommand inheritance behavior - [ ] MigrationCompiler instantiation and configuration - [ ] File system interactions - [ ] Options handling for function deployment ### Error Scenarios + - [ ] Constructor doesn't throw on missing directories (deferred validation) - [ ] Proper error emission before throwing - [ ] Graceful handling of compilation failures @@ -54,42 +59,45 @@ The CompileCommand extends BuildCommand and provides: ## Testing Requirements ### Unit Tests + ```javascript -describe('CompileCommand', () => { - describe('constructor', () => { - it('should accept valid parameters without throwing') - it('should not validate paths in constructor') - it('should inherit from BuildCommand') - }) - - describe('performExecute', () => { - it('should validate required directories') - it('should throw descriptive error for missing directories') - it('should emit start event with isProd flag') - it('should create MigrationCompiler with correct config') - it('should handle compilation success') - it('should handle compilation failure') - }) -}) +describe("CompileCommand", () => { + describe("constructor", () => { + it("should accept valid parameters without throwing"); + it("should not validate paths in constructor"); + it("should inherit from BuildCommand"); + }); + + describe("performExecute", () => { + it("should validate required directories"); + it("should throw descriptive error for missing directories"); + it("should emit start event with isProd flag"); + it("should create MigrationCompiler with correct config"); + it("should handle compilation success"); + it("should handle compilation failure"); + }); +}); ``` ### Integration Tests + ```javascript -describe('CompileCommand Integration', () => { - it('should compile SQL sources to migration file') - it('should handle functions deployment integration') - it('should work with real file system paths') - it('should integrate with MigrationCompiler') -}) +describe("CompileCommand Integration", () => { + it("should compile SQL sources to migration file"); + it("should handle functions deployment integration"); + it("should work with real file system paths"); + it("should integrate with MigrationCompiler"); +}); ``` ### Error Handling Tests + ```javascript -describe('CompileCommand Error Handling', () => { - it('should emit error events before throwing') - it('should provide meaningful error messages') - it('should handle MigrationCompiler failures gracefully') -}) +describe("CompileCommand Error Handling", () => { + it("should emit error events before throwing"); + it("should provide meaningful error messages"); + it("should handle MigrationCompiler failures gracefully"); +}); ``` ## Acceptance Criteria @@ -106,6 +114,7 @@ describe('CompileCommand Error Handling', () => { ## Priority Justification **Critical Priority** because: + 1. **Core Migration Workflow**: CompileCommand is essential for the primary database migration workflow 2. **Data Safety**: Compilation errors could corrupt migration files 3. **Production Impact**: Failures in production could prevent deployments @@ -122,12 +131,12 @@ describe('CompileCommand Error Handling', () => { ## Estimated Effort - **Unit Tests**: 4-6 hours -- **Integration Tests**: 2-3 hours +- **Integration Tests**: 2-3 hours - **Error Handling Tests**: 2-3 hours - **Total**: 8-12 hours --- -*"In critical times, men sometimes see exactly what they wish to see."* - Spock +_"In critical times, men sometimes see exactly what they wish to see."_ - Spock -CompileCommand sits at the heart of the migration workflow. Without proper test coverage, we're navigating by hope rather than logic. \ No newline at end of file +CompileCommand sits at the heart of the migration workflow. Without proper test coverage, we're navigating by hope rather than logic. diff --git a/issues/2025-08-30-tests/coverage-enforcer-test-coverage.md b/issues/2025-08-30-tests/coverage-enforcer-test-coverage.md index 9bea9a7..764fbfb 100644 --- a/issues/2025-08-30-tests/coverage-enforcer-test-coverage.md +++ b/issues/2025-08-30-tests/coverage-enforcer-test-coverage.md @@ -2,13 +2,13 @@ ## Core Information -| Field | Value | -|-------|-------| -| **Severity Level** | 🟠 HIGH - Quality Guardian | -| **Location** | `src/lib/testing/CoverageEnforcer.js` | -| **Category** | Testing | -| **Brief Description** | Test coverage enforcement system lacks test coverage | -| **Impact** | Coverage enforcement failures could allow untested code into production | +| Field | Value | +| --------------------- | ----------------------------------------------------------------------- | +| **Severity Level** | 🟠 HIGH - Quality Guardian | +| **Location** | `src/lib/testing/CoverageEnforcer.js` | +| **Category** | Testing | +| **Brief Description** | Test coverage enforcement system lacks test coverage | +| **Impact** | Coverage enforcement failures could allow untested code into production | ## Summary @@ -17,6 +17,7 @@ The `CoverageEnforcer` is responsible for validating that test coverage meets pr ## Component Overview The CoverageEnforcer likely provides: + - Test coverage threshold validation - Coverage report parsing and analysis - File-by-file coverage enforcement @@ -27,6 +28,7 @@ The CoverageEnforcer likely provides: ## What Needs Testing ### Core Functionality + - [ ] Coverage threshold validation - [ ] Coverage report parsing (multiple formats) - [ ] File-level coverage analysis @@ -35,6 +37,7 @@ The CoverageEnforcer likely provides: - [ ] Function coverage validation ### Configuration Management + - [ ] Coverage threshold configuration loading - [ ] Per-file coverage requirements - [ ] Coverage exemption handling @@ -43,6 +46,7 @@ The CoverageEnforcer likely provides: - [ ] Environment-specific thresholds ### Edge Cases + - [ ] Empty or malformed coverage reports - [ ] Missing coverage data - [ ] Zero-coverage files @@ -51,6 +55,7 @@ The CoverageEnforcer likely provides: - [ ] Network timeout during report retrieval ### Integration Points + - [ ] Test runner integration - [ ] CI/CD pipeline integration - [ ] Coverage report generators @@ -59,6 +64,7 @@ The CoverageEnforcer likely provides: - [ ] Quality gate systems ### Error Scenarios + - [ ] Coverage report parsing failures - [ ] Network failures during report access - [ ] Configuration file corruption @@ -69,48 +75,51 @@ The CoverageEnforcer likely provides: ## Testing Requirements ### Unit Tests + ```javascript -describe('CoverageEnforcer', () => { - describe('coverage validation', () => { - it('should validate coverage thresholds') - it('should parse coverage reports correctly') - it('should enforce file-level coverage') - it('should handle coverage exemptions') - }) - - describe('configuration handling', () => { - it('should load coverage configuration') - it('should validate threshold settings') - it('should handle missing configuration gracefully') - it('should support dynamic threshold adjustment') - }) - - describe('error handling', () => { - it('should handle malformed coverage reports') - it('should recover from parsing failures') - it('should emit appropriate error events') - }) -}) +describe("CoverageEnforcer", () => { + describe("coverage validation", () => { + it("should validate coverage thresholds"); + it("should parse coverage reports correctly"); + it("should enforce file-level coverage"); + it("should handle coverage exemptions"); + }); + + describe("configuration handling", () => { + it("should load coverage configuration"); + it("should validate threshold settings"); + it("should handle missing configuration gracefully"); + it("should support dynamic threshold adjustment"); + }); + + describe("error handling", () => { + it("should handle malformed coverage reports"); + it("should recover from parsing failures"); + it("should emit appropriate error events"); + }); +}); ``` ### Integration Tests + ```javascript -describe('CoverageEnforcer Integration', () => { - it('should integrate with real coverage reports') - it('should work with CI/CD pipeline') - it('should enforce coverage in real scenarios') - it('should handle multiple report formats') -}) +describe("CoverageEnforcer Integration", () => { + it("should integrate with real coverage reports"); + it("should work with CI/CD pipeline"); + it("should enforce coverage in real scenarios"); + it("should handle multiple report formats"); +}); ``` ### Quality Gate Tests + ```javascript -describe('CoverageEnforcer Quality Gates', () => { - it('should prevent deployment below thresholds') - it('should allow deployment above thresholds') - it('should handle coverage trend analysis') - it('should provide actionable feedback') -}) +describe("CoverageEnforcer Quality Gates", () => { + it("should prevent deployment below thresholds"); + it("should allow deployment above thresholds"); + it("should handle coverage trend analysis"); + it("should provide actionable feedback"); +}); ``` ## Acceptance Criteria @@ -127,6 +136,7 @@ describe('CoverageEnforcer Quality Gates', () => { ## Priority Justification **High Priority** because: + 1. **Quality Guardian**: Prevents untested code from reaching production 2. **CI/CD Critical**: Essential for automated quality gates 3. **Testing Infrastructure**: Core component of testing system @@ -144,16 +154,19 @@ describe('CoverageEnforcer Quality Gates', () => { ## Testing Challenges ### Coverage Report Variety + - Multiple coverage report formats to support - Different coverage calculation methods - Varying levels of detail in reports ### Configuration Complexity + - Complex threshold configuration scenarios - Dynamic threshold adjustment logic - Per-file and per-directory rules ### Quality Gate Integration + - CI/CD pipeline integration complexity - Deployment blocking logic - Rollback scenario handling @@ -168,12 +181,14 @@ describe('CoverageEnforcer Quality Gates', () => { ## Impact Assessment ### Direct Impact + - Test coverage validation accuracy - CI/CD pipeline reliability - Quality gate effectiveness - Developer workflow integration ### Indirect Impact + - Overall code quality maintenance - Production bug prevention - Team testing discipline @@ -182,22 +197,25 @@ describe('CoverageEnforcer Quality Gates', () => { ## Special Considerations ### Meta-Testing Challenge + - Testing the test coverage enforcer - Recursive coverage validation - Bootstrap testing scenarios ### Performance Requirements + - Large coverage report processing - Real-time coverage analysis - Efficient threshold calculation ### Integration Complexity + - Multiple CI/CD platform support - Various coverage tool compatibility - Configuration system coordination --- -*"In any compromise between food and poison, it is only death that can win."* - Ayn Rand (via Data's ethical subroutines) +_"In any compromise between food and poison, it is only death that can win."_ - Ayn Rand (via Data's ethical subroutines) -The CoverageEnforcer stands guard over code quality. Like Data's ethical subroutines, it must be tested with absolute precision - there is no compromise between quality and mediocrity. \ No newline at end of file +The CoverageEnforcer stands guard over code quality. Like Data's ethical subroutines, it must be tested with absolute precision - there is no compromise between quality and mediocrity. diff --git a/issues/2025-08-30-tests/database-command-test-coverage.md b/issues/2025-08-30-tests/database-command-test-coverage.md index e2ddf56..8d4eabe 100644 --- a/issues/2025-08-30-tests/database-command-test-coverage.md +++ b/issues/2025-08-30-tests/database-command-test-coverage.md @@ -2,13 +2,13 @@ ## Core Information -| Field | Value | -|-------|-------| -| **Severity Level** | 🟠 HIGH - Architecture Foundation | -| **Location** | `src/lib/DatabaseCommand.js` | -| **Category** | Testing | -| **Brief Description** | DatabaseCommand base class lacks test coverage | -| **Impact** | All database commands inherit untested functionality | +| Field | Value | +| --------------------- | ---------------------------------------------------- | +| **Severity Level** | 🟠 HIGH - Architecture Foundation | +| **Location** | `src/lib/DatabaseCommand.js` | +| **Category** | Testing | +| **Brief Description** | DatabaseCommand base class lacks test coverage | +| **Impact** | All database commands inherit untested functionality | ## Summary @@ -17,6 +17,7 @@ The `DatabaseCommand` class serves as the base class for all database-related op ## Component Overview The DatabaseCommand class likely provides: + - Database connection management - Transaction handling and cleanup - Common database error handling patterns @@ -27,6 +28,7 @@ The DatabaseCommand class likely provides: ## What Needs Testing ### Core Functionality + - [ ] Database connection establishment - [ ] Connection string parsing and validation - [ ] Environment-specific connection handling (local vs production) @@ -35,6 +37,7 @@ The DatabaseCommand class likely provides: - [ ] Error handling and logging patterns ### Base Class Behavior + - [ ] Inheritance patterns for subclasses - [ ] Method overriding capabilities - [ ] Event emission for database operations @@ -42,6 +45,7 @@ The DatabaseCommand class likely provides: - [ ] Credential management and security ### Edge Cases + - [ ] Invalid connection strings - [ ] Database connection timeouts - [ ] Authentication failures @@ -50,6 +54,7 @@ The DatabaseCommand class likely provides: - [ ] Environment variable missing ### Integration Points + - [ ] Command base class integration - [ ] Database provider abstraction - [ ] Configuration system integration @@ -57,6 +62,7 @@ The DatabaseCommand class likely provides: - [ ] Error reporting mechanisms ### Error Scenarios + - [ ] Database server unavailable - [ ] Invalid credentials - [ ] Network connectivity issues @@ -67,48 +73,51 @@ The DatabaseCommand class likely provides: ## Testing Requirements ### Unit Tests + ```javascript -describe('DatabaseCommand', () => { - describe('connection management', () => { - it('should establish database connections') - it('should handle connection string validation') - it('should manage connection pooling') - it('should cleanup connections properly') - }) - - describe('transaction handling', () => { - it('should begin transactions') - it('should commit transactions') - it('should rollback transactions') - it('should handle nested transactions') - }) - - describe('error handling', () => { - it('should handle connection failures') - it('should handle authentication errors') - it('should emit appropriate error events') - }) -}) +describe("DatabaseCommand", () => { + describe("connection management", () => { + it("should establish database connections"); + it("should handle connection string validation"); + it("should manage connection pooling"); + it("should cleanup connections properly"); + }); + + describe("transaction handling", () => { + it("should begin transactions"); + it("should commit transactions"); + it("should rollback transactions"); + it("should handle nested transactions"); + }); + + describe("error handling", () => { + it("should handle connection failures"); + it("should handle authentication errors"); + it("should emit appropriate error events"); + }); +}); ``` ### Integration Tests + ```javascript -describe('DatabaseCommand Integration', () => { - it('should connect to real test database') - it('should handle transaction operations') - it('should work with subclass implementations') - it('should integrate with configuration system') -}) +describe("DatabaseCommand Integration", () => { + it("should connect to real test database"); + it("should handle transaction operations"); + it("should work with subclass implementations"); + it("should integrate with configuration system"); +}); ``` ### Inheritance Tests + ```javascript -describe('DatabaseCommand Inheritance', () => { - it('should provide correct base functionality to subclasses') - it('should allow method overriding') - it('should maintain consistent error handling') - it('should support polymorphic behavior') -}) +describe("DatabaseCommand Inheritance", () => { + it("should provide correct base functionality to subclasses"); + it("should allow method overriding"); + it("should maintain consistent error handling"); + it("should support polymorphic behavior"); +}); ``` ## Acceptance Criteria @@ -125,6 +134,7 @@ describe('DatabaseCommand Inheritance', () => { ## Priority Justification **High Priority** because: + 1. **Foundation Class**: All database commands depend on this base functionality 2. **Systematic Risk**: Bugs affect all database operations 3. **Transaction Safety**: Critical for data integrity @@ -149,11 +159,13 @@ describe('DatabaseCommand Inheritance', () => { ## Impact Assessment ### Direct Impact + - All migration commands inherit this functionality - Query commands depend on connection management - Test commands rely on database connectivity ### Indirect Impact + - Production deployment reliability - Database operation performance - Error handling consistency @@ -161,6 +173,6 @@ describe('DatabaseCommand Inheritance', () => { --- -*"The needs of the many outweigh the needs of the few."* - Spock +_"The needs of the many outweigh the needs of the few."_ - Spock -As the foundation for all database operations, DatabaseCommand must be as reliable as the Enterprise's main computer - everything depends on it working correctly. \ No newline at end of file +As the foundation for all database operations, DatabaseCommand must be as reliable as the Enterprise's main computer - everything depends on it working correctly. diff --git a/issues/2025-08-30-tests/migrate-generate-test-coverage.md b/issues/2025-08-30-tests/migrate-generate-test-coverage.md index 22dca49..d81f4fc 100644 --- a/issues/2025-08-30-tests/migrate-generate-test-coverage.md +++ b/issues/2025-08-30-tests/migrate-generate-test-coverage.md @@ -2,13 +2,13 @@ ## Core Information -| Field | Value | -|-------|-------| -| **Severity Level** | 🔴 CRITICAL - Show Stopper | -| **Location** | `src/commands/db/migrate/generate.js` | -| **Category** | Testing | -| **Brief Description** | Migration generation command lacks test coverage | -| **Impact** | Migration generation failures could corrupt database workflow | +| Field | Value | +| --------------------- | ------------------------------------------------------------- | +| **Severity Level** | 🔴 CRITICAL - Show Stopper | +| **Location** | `src/commands/db/migrate/generate.js` | +| **Category** | Testing | +| **Brief Description** | Migration generation command lacks test coverage | +| **Impact** | Migration generation failures could corrupt database workflow | ## Summary @@ -17,6 +17,7 @@ The migrate-generate command is responsible for creating new migration files fro ## Component Overview The migrate-generate command provides: + - Automatic migration file generation from SQL changes - Timestamp-based migration naming - Integration with existing migration tracking @@ -26,6 +27,7 @@ The migrate-generate command provides: ## What Needs Testing ### Core Functionality + - [ ] Migration file creation with proper timestamps - [ ] SQL change detection and processing - [ ] Migration naming conventions @@ -33,6 +35,7 @@ The migrate-generate command provides: - [ ] Integration with migration directory structure ### Edge Cases + - [ ] Empty SQL changes (no-op migrations) - [ ] Invalid SQL syntax handling - [ ] Duplicate migration names @@ -41,6 +44,7 @@ The migrate-generate command provides: - [ ] Permission issues with migration directory ### Integration Points + - [ ] SQL source file parsing - [ ] Migration metadata system integration - [ ] File system operations @@ -48,6 +52,7 @@ The migrate-generate command provides: - [ ] Validation pipeline integration ### Error Scenarios + - [ ] SQL syntax errors - [ ] File system permission errors - [ ] Missing dependencies @@ -57,46 +62,49 @@ The migrate-generate command provides: ## Testing Requirements ### Unit Tests + ```javascript -describe('migrate-generate', () => { - describe('migration creation', () => { - it('should generate migration with proper timestamp') - it('should create valid migration file structure') - it('should handle custom migration names') - it('should validate SQL syntax before generation') - }) - - describe('metadata handling', () => { - it('should generate correct migration metadata') - it('should track migration dependencies') - it('should validate metadata schema') - }) - - describe('error handling', () => { - it('should reject invalid SQL syntax') - it('should handle file system errors gracefully') - it('should prevent duplicate migration names') - }) -}) +describe("migrate-generate", () => { + describe("migration creation", () => { + it("should generate migration with proper timestamp"); + it("should create valid migration file structure"); + it("should handle custom migration names"); + it("should validate SQL syntax before generation"); + }); + + describe("metadata handling", () => { + it("should generate correct migration metadata"); + it("should track migration dependencies"); + it("should validate metadata schema"); + }); + + describe("error handling", () => { + it("should reject invalid SQL syntax"); + it("should handle file system errors gracefully"); + it("should prevent duplicate migration names"); + }); +}); ``` ### Integration Tests + ```javascript -describe('migrate-generate Integration', () => { - it('should generate migration from real SQL files') - it('should integrate with existing migration sequence') - it('should handle complex SQL operations') - it('should maintain migration directory structure') -}) +describe("migrate-generate Integration", () => { + it("should generate migration from real SQL files"); + it("should integrate with existing migration sequence"); + it("should handle complex SQL operations"); + it("should maintain migration directory structure"); +}); ``` ### Workflow Tests + ```javascript -describe('migrate-generate Workflow', () => { - it('should support complete generate->test->promote workflow') - it('should handle incremental migrations') - it('should validate against existing schema') -}) +describe("migrate-generate Workflow", () => { + it("should support complete generate->test->promote workflow"); + it("should handle incremental migrations"); + it("should validate against existing schema"); +}); ``` ## Acceptance Criteria @@ -113,6 +121,7 @@ describe('migrate-generate Workflow', () => { ## Priority Justification **Critical Priority** because: + 1. **Foundation of Migration System**: Migration generation is the starting point for all database changes 2. **Data Integrity Risk**: Broken migration generation can corrupt the entire migration chain 3. **Zero Fallback**: No alternative path if generation fails @@ -135,6 +144,6 @@ describe('migrate-generate Workflow', () => { --- -*"Logic is the beginning of wisdom, not the end."* - Spock +_"Logic is the beginning of wisdom, not the end."_ - Spock -Migration generation is where database evolution begins. Without rigorous testing, we risk logical inconsistencies that compound over time. \ No newline at end of file +Migration generation is where database evolution begins. Without rigorous testing, we risk logical inconsistencies that compound over time. diff --git a/issues/2025-08-30-tests/migrate-promote-test-coverage.md b/issues/2025-08-30-tests/migrate-promote-test-coverage.md index f815dcb..e64fdc2 100644 --- a/issues/2025-08-30-tests/migrate-promote-test-coverage.md +++ b/issues/2025-08-30-tests/migrate-promote-test-coverage.md @@ -2,13 +2,13 @@ ## Core Information -| Field | Value | -|-------|-------| -| **Severity Level** | 🔴 CRITICAL - Show Stopper | -| **Location** | `src/commands/db/migrate/promote.js` | -| **Category** | Testing | -| **Brief Description** | Migration promotion command lacks test coverage | -| **Impact** | Production deployment failures could cause data loss or downtime | +| Field | Value | +| --------------------- | ---------------------------------------------------------------- | +| **Severity Level** | 🔴 CRITICAL - Show Stopper | +| **Location** | `src/commands/db/migrate/promote.js` | +| **Category** | Testing | +| **Brief Description** | Migration promotion command lacks test coverage | +| **Impact** | Production deployment failures could cause data loss or downtime | ## Summary @@ -17,6 +17,7 @@ The migrate-promote command is responsible for deploying tested migrations to pr ## Component Overview The migrate-promote command provides: + - Production migration deployment - Pre-deployment validation and safety checks - Transaction-wrapped deployment for rollback safety @@ -27,6 +28,7 @@ The migrate-promote command provides: ## What Needs Testing ### Core Functionality + - [ ] Production environment detection and validation - [ ] Migration state verification before promotion - [ ] Transaction wrapping for safe deployment @@ -35,6 +37,7 @@ The migrate-promote command provides: - [ ] Success/failure reporting ### Safety Mechanisms + - [ ] `--prod` flag requirement enforcement - [ ] Double confirmation for destructive operations - [ ] Pre-deployment migration validation @@ -43,6 +46,7 @@ The migrate-promote command provides: - [ ] Backup verification before promotion ### Edge Cases + - [ ] Network failures during deployment - [ ] Partial deployment recovery - [ ] Invalid migration state @@ -51,6 +55,7 @@ The migrate-promote command provides: - [ ] Concurrent deployment detection ### Integration Points + - [ ] Migration metadata system - [ ] Database connection management - [ ] Production environment configuration @@ -58,6 +63,7 @@ The migrate-promote command provides: - [ ] Logging and monitoring systems ### Error Scenarios + - [ ] Database connection failures - [ ] Invalid production credentials - [ ] Migration conflicts @@ -68,48 +74,51 @@ The migrate-promote command provides: ## Testing Requirements ### Unit Tests + ```javascript -describe('migrate-promote', () => { - describe('safety checks', () => { - it('should require --prod flag for production') - it('should validate production environment') - it('should verify migration readiness') - it('should enforce double confirmation') - }) - - describe('deployment process', () => { - it('should wrap deployment in transaction') - it('should handle deployment success') - it('should handle deployment failure') - it('should prepare rollback triggers') - }) - - describe('error handling', () => { - it('should handle connection failures gracefully') - it('should rollback on deployment errors') - it('should report failure details') - }) -}) +describe("migrate-promote", () => { + describe("safety checks", () => { + it("should require --prod flag for production"); + it("should validate production environment"); + it("should verify migration readiness"); + it("should enforce double confirmation"); + }); + + describe("deployment process", () => { + it("should wrap deployment in transaction"); + it("should handle deployment success"); + it("should handle deployment failure"); + it("should prepare rollback triggers"); + }); + + describe("error handling", () => { + it("should handle connection failures gracefully"); + it("should rollback on deployment errors"); + it("should report failure details"); + }); +}); ``` ### Integration Tests + ```javascript -describe('migrate-promote Integration', () => { - it('should promote migration to production database') - it('should integrate with rollback mechanisms') - it('should handle real database transactions') - it('should work with production environment setup') -}) +describe("migrate-promote Integration", () => { + it("should promote migration to production database"); + it("should integrate with rollback mechanisms"); + it("should handle real database transactions"); + it("should work with production environment setup"); +}); ``` ### Safety Tests + ```javascript -describe('migrate-promote Safety', () => { - it('should prevent accidental production deployment') - it('should validate all safety requirements') - it('should handle network interruptions safely') - it('should maintain data integrity during failures') -}) +describe("migrate-promote Safety", () => { + it("should prevent accidental production deployment"); + it("should validate all safety requirements"); + it("should handle network interruptions safely"); + it("should maintain data integrity during failures"); +}); ``` ## Acceptance Criteria @@ -126,6 +135,7 @@ describe('migrate-promote Safety', () => { ## Priority Justification **Critical Priority** because: + 1. **Production Database Risk**: Direct impact on production data and availability 2. **No Recovery Path**: Failed promotions can cause extended downtime 3. **Financial Impact**: Production failures have direct business cost @@ -157,6 +167,6 @@ describe('migrate-promote Safety', () => { --- -*"In critical moments, men sometimes see exactly what they wish to see."* - Spock +_"In critical moments, men sometimes see exactly what they wish to see."_ - Spock -The promote command carries the weight of production responsibility. Every line of code must be tested with the precision of a starship's warp core - failure is not an option. \ No newline at end of file +The promote command carries the weight of production responsibility. Every line of code must be tested with the precision of a starship's warp core - failure is not an option. diff --git a/issues/2025-08-30-tests/migrate-rollback-test-coverage.md b/issues/2025-08-30-tests/migrate-rollback-test-coverage.md index 4665c3d..f58062b 100644 --- a/issues/2025-08-30-tests/migrate-rollback-test-coverage.md +++ b/issues/2025-08-30-tests/migrate-rollback-test-coverage.md @@ -2,13 +2,13 @@ ## Core Information -| Field | Value | -|-------|-------| -| **Severity Level** | 🔴 CRITICAL - Show Stopper | -| **Location** | `src/commands/db/migrate/rollback.js` | -| **Category** | Testing | -| **Brief Description** | Migration rollback command lacks test coverage | -| **Impact** | Failed rollbacks could trap production in broken state | +| Field | Value | +| --------------------- | ------------------------------------------------------ | +| **Severity Level** | 🔴 CRITICAL - Show Stopper | +| **Location** | `src/commands/db/migrate/rollback.js` | +| **Category** | Testing | +| **Brief Description** | Migration rollback command lacks test coverage | +| **Impact** | Failed rollbacks could trap production in broken state | ## Summary @@ -17,6 +17,7 @@ The migrate-rollback command is the emergency recovery mechanism for failed or p ## Component Overview The migrate-rollback command provides: + - Emergency recovery from failed migrations - Safe reversal of database schema changes - Production rollback with safety confirmations @@ -27,6 +28,7 @@ The migrate-rollback command provides: ## What Needs Testing ### Core Functionality + - [ ] Migration state analysis and rollback planning - [ ] Safe schema reversal operations - [ ] Migration dependency resolution @@ -35,6 +37,7 @@ The migrate-rollback command provides: - [ ] Rollback success validation ### Safety Mechanisms + - [ ] `--prod` flag requirement for production - [ ] Pre-rollback state validation - [ ] Transaction wrapping for atomic rollback @@ -43,6 +46,7 @@ The migrate-rollback command provides: - [ ] Post-rollback verification ### Edge Cases + - [ ] Irreversible migrations (data loss scenarios) - [ ] Circular migration dependencies - [ ] Partial rollback failures @@ -51,6 +55,7 @@ The migrate-rollback command provides: - [ ] Schema lock conflicts ### Integration Points + - [ ] Migration metadata system - [ ] Database schema analysis - [ ] Production environment handling @@ -58,6 +63,7 @@ The migrate-rollback command provides: - [ ] Backup system integration ### Error Scenarios + - [ ] Database connection failures during rollback - [ ] Transaction rollback of rollback operations - [ ] Inconsistent migration state @@ -68,48 +74,51 @@ The migrate-rollback command provides: ## Testing Requirements ### Unit Tests + ```javascript -describe('migrate-rollback', () => { - describe('rollback planning', () => { - it('should analyze migration dependencies') - it('should identify rollback sequence') - it('should detect irreversible operations') - it('should validate rollback safety') - }) - - describe('rollback execution', () => { - it('should execute rollback in proper sequence') - it('should wrap operations in transactions') - it('should handle rollback success') - it('should handle rollback failure') - }) - - describe('safety mechanisms', () => { - it('should require production confirmation') - it('should validate pre-rollback state') - it('should verify post-rollback consistency') - }) -}) +describe("migrate-rollback", () => { + describe("rollback planning", () => { + it("should analyze migration dependencies"); + it("should identify rollback sequence"); + it("should detect irreversible operations"); + it("should validate rollback safety"); + }); + + describe("rollback execution", () => { + it("should execute rollback in proper sequence"); + it("should wrap operations in transactions"); + it("should handle rollback success"); + it("should handle rollback failure"); + }); + + describe("safety mechanisms", () => { + it("should require production confirmation"); + it("should validate pre-rollback state"); + it("should verify post-rollback consistency"); + }); +}); ``` ### Integration Tests + ```javascript -describe('migrate-rollback Integration', () => { - it('should rollback real migration changes') - it('should handle complex schema reversals') - it('should maintain data integrity during rollback') - it('should coordinate with migration tracking') -}) +describe("migrate-rollback Integration", () => { + it("should rollback real migration changes"); + it("should handle complex schema reversals"); + it("should maintain data integrity during rollback"); + it("should coordinate with migration tracking"); +}); ``` ### Emergency Scenarios + ```javascript -describe('migrate-rollback Emergency Scenarios', () => { - it('should handle catastrophic migration failures') - it('should recover from partial promotion failures') - it('should handle network interruptions during rollback') - it('should maintain system stability during recovery') -}) +describe("migrate-rollback Emergency Scenarios", () => { + it("should handle catastrophic migration failures"); + it("should recover from partial promotion failures"); + it("should handle network interruptions during rollback"); + it("should maintain system stability during recovery"); +}); ``` ## Acceptance Criteria @@ -126,6 +135,7 @@ describe('migrate-rollback Emergency Scenarios', () => { ## Priority Justification **Critical Priority** because: + 1. **Emergency Recovery System**: Only path to recover from migration failures 2. **Production Safety Net**: Last line of defense against production disasters 3. **Data Integrity Guardian**: Prevents permanent data loss or corruption @@ -143,16 +153,19 @@ describe('migrate-rollback Emergency Scenarios', () => { ## Testing Challenges ### Complex State Management + - Multiple migration states to track and reverse - Dependency chain resolution complexity - State consistency across rollback operations ### Data Safety Requirements + - Ensuring no data loss during rollback - Handling irreversible operations safely - Maintaining referential integrity ### Emergency Response Testing + - Simulating catastrophic failure scenarios - Testing under extreme time pressure - Validating emergency procedures @@ -174,6 +187,6 @@ describe('migrate-rollback Emergency Scenarios', () => { --- -*"There are always possibilities."* - Spock +_"There are always possibilities."_ - Spock -When all else fails, rollback is our final option. It must be tested with the precision of an emergency transporter operation - lives may depend on it working perfectly the first time. \ No newline at end of file +When all else fails, rollback is our final option. It must be tested with the precision of an emergency transporter operation - lives may depend on it working perfectly the first time. diff --git a/issues/2025-08-30-tests/migration-orchestrator-test-coverage.md b/issues/2025-08-30-tests/migration-orchestrator-test-coverage.md index 5951599..4f23ee2 100644 --- a/issues/2025-08-30-tests/migration-orchestrator-test-coverage.md +++ b/issues/2025-08-30-tests/migration-orchestrator-test-coverage.md @@ -2,13 +2,13 @@ ## Core Information -| Field | Value | -|-------|-------| -| **Severity Level** | 🔴 CRITICAL - Core Engine | -| **Location** | `src/lib/migration/MigrationOrchestrator.js` | -| **Category** | Testing | -| **Brief Description** | Migration orchestration engine lacks test coverage | -| **Impact** | Migration workflow failures could corrupt entire database evolution | +| Field | Value | +| --------------------- | ------------------------------------------------------------------- | +| **Severity Level** | 🔴 CRITICAL - Core Engine | +| **Location** | `src/lib/migration/MigrationOrchestrator.js` | +| **Category** | Testing | +| **Brief Description** | Migration orchestration engine lacks test coverage | +| **Impact** | Migration workflow failures could corrupt entire database evolution | ## Summary @@ -17,6 +17,7 @@ The `MigrationOrchestrator` is the central coordination engine that manages the ## Component Overview The MigrationOrchestrator likely provides: + - Migration workflow coordination and sequencing - State management across migration phases - Dependency resolution between migrations @@ -27,6 +28,7 @@ The MigrationOrchestrator likely provides: ## What Needs Testing ### Core Orchestration + - [ ] Migration workflow state machine - [ ] Phase transition management (generate → test → promote) - [ ] Dependency graph resolution @@ -35,6 +37,7 @@ The MigrationOrchestrator likely provides: - [ ] Workflow rollback and error recovery ### Integration Coordination + - [ ] MigrationCompiler integration - [ ] Test execution coordination - [ ] Deployment orchestration @@ -43,6 +46,7 @@ The MigrationOrchestrator likely provides: - [ ] Database transaction coordination ### Edge Cases + - [ ] Interrupted workflow recovery - [ ] Circular dependency detection - [ ] Resource contention handling @@ -51,6 +55,7 @@ The MigrationOrchestrator likely provides: - [ ] External dependency failures ### State Management + - [ ] Migration state transitions - [ ] Persistent state storage - [ ] State corruption detection @@ -59,6 +64,7 @@ The MigrationOrchestrator likely provides: - [ ] Atomic state updates ### Error Scenarios + - [ ] Database connection failures during orchestration - [ ] External service unavailability - [ ] Resource exhaustion scenarios @@ -69,48 +75,51 @@ The MigrationOrchestrator likely provides: ## Testing Requirements ### Unit Tests + ```javascript -describe('MigrationOrchestrator', () => { - describe('workflow coordination', () => { - it('should coordinate complete migration workflow') - it('should manage state transitions properly') - it('should handle dependency resolution') - it('should coordinate parallel operations') - }) - - describe('state management', () => { - it('should persist workflow state') - it('should recover from interruptions') - it('should handle state corruption') - it('should synchronize concurrent access') - }) - - describe('error orchestration', () => { - it('should coordinate rollback operations') - it('should handle partial failures') - it('should recover from external failures') - }) -}) +describe("MigrationOrchestrator", () => { + describe("workflow coordination", () => { + it("should coordinate complete migration workflow"); + it("should manage state transitions properly"); + it("should handle dependency resolution"); + it("should coordinate parallel operations"); + }); + + describe("state management", () => { + it("should persist workflow state"); + it("should recover from interruptions"); + it("should handle state corruption"); + it("should synchronize concurrent access"); + }); + + describe("error orchestration", () => { + it("should coordinate rollback operations"); + it("should handle partial failures"); + it("should recover from external failures"); + }); +}); ``` ### Integration Tests + ```javascript -describe('MigrationOrchestrator Integration', () => { - it('should orchestrate full migration lifecycle') - it('should coordinate with database systems') - it('should integrate with testing systems') - it('should handle real workflow scenarios') -}) +describe("MigrationOrchestrator Integration", () => { + it("should orchestrate full migration lifecycle"); + it("should coordinate with database systems"); + it("should integrate with testing systems"); + it("should handle real workflow scenarios"); +}); ``` ### Workflow Tests + ```javascript -describe('MigrationOrchestrator Workflows', () => { - it('should handle complex multi-migration workflows') - it('should coordinate emergency rollback workflows') - it('should manage production deployment workflows') - it('should handle development cycle workflows') -}) +describe("MigrationOrchestrator Workflows", () => { + it("should handle complex multi-migration workflows"); + it("should coordinate emergency rollback workflows"); + it("should manage production deployment workflows"); + it("should handle development cycle workflows"); +}); ``` ## Acceptance Criteria @@ -127,6 +136,7 @@ describe('MigrationOrchestrator Workflows', () => { ## Priority Justification **Critical Priority** because: + 1. **Central Coordination Engine**: Orchestrates all migration operations 2. **State Management Authority**: Controls migration state consistency 3. **Failure Recovery System**: Manages error recovery across all phases @@ -144,16 +154,19 @@ describe('MigrationOrchestrator Workflows', () => { ## Testing Challenges ### Complex State Management + - Multiple workflow states to coordinate - Persistent state across process boundaries - State corruption detection and recovery ### Concurrency Control + - Parallel migration handling - Resource contention management - Deadlock prevention and detection ### Error Recovery Complexity + - Multi-phase error recovery - Partial state recovery scenarios - External dependency failure handling @@ -168,22 +181,25 @@ describe('MigrationOrchestrator Workflows', () => { ## Special Considerations ### State Persistence + - Must handle process interruption gracefully - State recovery across system restarts - Atomic state update requirements ### Concurrency Management + - Thread-safe operation requirements - Resource locking and coordination - Deadlock prevention strategies ### Error Recovery + - Complex multi-phase rollback scenarios - External service failure handling - Graceful degradation under stress --- -*"The first duty of every Starfleet officer is to the truth."* - Picard +_"The first duty of every Starfleet officer is to the truth."_ - Picard -The MigrationOrchestrator must orchestrate truth across the entire migration workflow. Like Data coordinating multiple bridge systems simultaneously, every interaction must be precise and reliable. \ No newline at end of file +The MigrationOrchestrator must orchestrate truth across the entire migration workflow. Like Data coordinating multiple bridge systems simultaneously, every interaction must be precise and reliable. diff --git a/issues/2025-08-30-tests/supabase-command-test-coverage.md b/issues/2025-08-30-tests/supabase-command-test-coverage.md index b096a0b..76ff34a 100644 --- a/issues/2025-08-30-tests/supabase-command-test-coverage.md +++ b/issues/2025-08-30-tests/supabase-command-test-coverage.md @@ -2,13 +2,13 @@ ## Core Information -| Field | Value | -|-------|-------| -| **Severity Level** | 🟠 HIGH - Platform Integration | -| **Location** | `src/lib/SupabaseCommand.js` | -| **Category** | Testing | -| **Brief Description** | SupabaseCommand base class lacks test coverage | -| **Impact** | All Supabase API operations inherit untested functionality | +| Field | Value | +| --------------------- | ---------------------------------------------------------- | +| **Severity Level** | 🟠 HIGH - Platform Integration | +| **Location** | `src/lib/SupabaseCommand.js` | +| **Category** | Testing | +| **Brief Description** | SupabaseCommand base class lacks test coverage | +| **Impact** | All Supabase API operations inherit untested functionality | ## Summary @@ -17,6 +17,7 @@ The `SupabaseCommand` class serves as the base class for all Supabase-specific o ## Component Overview The SupabaseCommand class likely provides: + - Supabase client initialization and configuration - Authentication token management (service role, anon key) - Environment-specific Supabase URL handling @@ -27,6 +28,7 @@ The SupabaseCommand class likely provides: ## What Needs Testing ### Core Functionality + - [ ] Supabase client initialization - [ ] Authentication token validation and management - [ ] Environment URL configuration (local vs production) @@ -35,6 +37,7 @@ The SupabaseCommand class likely provides: - [ ] Client cleanup and resource management ### Base Class Behavior + - [ ] Inheritance patterns for subclasses - [ ] Method overriding capabilities - [ ] Event emission for Supabase operations @@ -42,6 +45,7 @@ The SupabaseCommand class likely provides: - [ ] Credential validation and security ### Edge Cases + - [ ] Invalid Supabase URLs - [ ] Authentication token expiration - [ ] Network connectivity issues @@ -50,6 +54,7 @@ The SupabaseCommand class likely provides: - [ ] Invalid service configuration ### Integration Points + - [ ] Command base class integration - [ ] Supabase SDK integration - [ ] Configuration system integration @@ -57,6 +62,7 @@ The SupabaseCommand class likely provides: - [ ] Error reporting mechanisms ### Error Scenarios + - [ ] Supabase service unavailable - [ ] Invalid authentication credentials - [ ] Network timeout scenarios @@ -67,49 +73,52 @@ The SupabaseCommand class likely provides: ## Testing Requirements ### Unit Tests + ```javascript -describe('SupabaseCommand', () => { - describe('client initialization', () => { - it('should initialize Supabase client') - it('should handle environment configuration') - it('should validate authentication tokens') - it('should setup correct API endpoints') - }) - - describe('authentication handling', () => { - it('should manage service role authentication') - it('should handle anonymous key operations') - it('should validate token expiration') - it('should refresh tokens when needed') - }) - - describe('error handling', () => { - it('should handle API authentication errors') - it('should handle network failures') - it('should emit appropriate error events') - it('should handle rate limiting') - }) -}) +describe("SupabaseCommand", () => { + describe("client initialization", () => { + it("should initialize Supabase client"); + it("should handle environment configuration"); + it("should validate authentication tokens"); + it("should setup correct API endpoints"); + }); + + describe("authentication handling", () => { + it("should manage service role authentication"); + it("should handle anonymous key operations"); + it("should validate token expiration"); + it("should refresh tokens when needed"); + }); + + describe("error handling", () => { + it("should handle API authentication errors"); + it("should handle network failures"); + it("should emit appropriate error events"); + it("should handle rate limiting"); + }); +}); ``` ### Integration Tests + ```javascript -describe('SupabaseCommand Integration', () => { - it('should connect to real Supabase instance') - it('should authenticate with valid credentials') - it('should handle API operations') - it('should work with subclass implementations') -}) +describe("SupabaseCommand Integration", () => { + it("should connect to real Supabase instance"); + it("should authenticate with valid credentials"); + it("should handle API operations"); + it("should work with subclass implementations"); +}); ``` ### Environment Tests + ```javascript -describe('SupabaseCommand Environment Handling', () => { - it('should handle local development environment') - it('should handle production environment') - it('should validate environment switching') - it('should manage credential isolation') -}) +describe("SupabaseCommand Environment Handling", () => { + it("should handle local development environment"); + it("should handle production environment"); + it("should validate environment switching"); + it("should manage credential isolation"); +}); ``` ## Acceptance Criteria @@ -126,6 +135,7 @@ describe('SupabaseCommand Environment Handling', () => { ## Priority Justification **High Priority** because: + 1. **Platform Foundation**: All Supabase operations depend on this base functionality 2. **Production API Access**: Essential for production deployment operations 3. **Edge Function Integration**: Critical for Edge Function deployment and testing @@ -150,12 +160,14 @@ describe('SupabaseCommand Environment Handling', () => { ## Impact Assessment ### Direct Impact + - Edge Function deployment commands - Database migration commands using Supabase API - Production environment operations - Authentication and authorization systems ### Indirect Impact + - Production deployment reliability - API operation performance - Error handling consistency @@ -164,22 +176,25 @@ describe('SupabaseCommand Environment Handling', () => { ## Special Considerations ### Authentication Security + - Must protect service role keys during testing - Need safe credential mocking strategies - Should validate token exposure prevention ### API Integration + - Real API testing vs mocking balance - Rate limiting consideration in tests - Network failure simulation requirements ### Environment Management + - Safe production environment isolation - Local development environment setup - Configuration validation across environments --- -*"Change is the essential process of all existence."* - Spock +_"Change is the essential process of all existence."_ - Spock -SupabaseCommand adapts D.A.T.A. to the cloud platform reality. Like the Enterprise interfacing with alien technology, it must be tested thoroughly to ensure reliable communication. \ No newline at end of file +SupabaseCommand adapts D.A.T.A. to the cloud platform reality. Like the Enterprise interfacing with alien technology, it must be tested thoroughly to ensure reliable communication. diff --git a/issues/2025-08-30/ast-operation-validation-missing.md b/issues/2025-08-30/ast-operation-validation-missing.md index 0704f44..f9c9e40 100644 --- a/issues/2025-08-30/ast-operation-validation-missing.md +++ b/issues/2025-08-30/ast-operation-validation-missing.md @@ -1,6 +1,6 @@ # GitHub Issue Format -> [!success] __This issuse has been fixed__ +> [!success] **This issuse has been fixed** ## Issue Title @@ -8,13 +8,13 @@ Add comprehensive AST operation validation in TestRequirementAnalyzer ### Core Information -| Field | Why It Matters | -|-------|---------------| -| **Severity Level** | CRITICAL - Silent failures could miss test requirements | -| **Location** | `src/lib/testing/TestRequirementAnalyzer.js` (throughout 4,425 lines) | -| **Category** | Bug/Architecture | -| **Brief Description** | Missing validation of operation structure before processing | -| **Impact** | Malformed AST operations could cause silent failures in requirement generation | +| Field | Why It Matters | +| --------------------- | ------------------------------------------------------------------------------ | +| **Severity Level** | CRITICAL - Silent failures could miss test requirements | +| **Location** | `src/lib/testing/TestRequirementAnalyzer.js` (throughout 4,425 lines) | +| **Category** | Bug/Architecture | +| **Brief Description** | Missing validation of operation structure before processing | +| **Impact** | Malformed AST operations could cause silent failures in requirement generation | ## Summary @@ -32,13 +32,13 @@ The analyzer assumes all operations are well-formed and have expected properties // Current code assumes operation structure determineTestRequirements(operation, context = {}) { const requirements = []; - + // No validation that operation.sql exists or is a string! const sql = operation.sql.toLowerCase(); - + // No validation that operation.type exists const opType = operation.type; - + // Could throw or return empty requirements silently } ``` @@ -52,15 +52,15 @@ validateOperation(operation) { if (!operation || typeof operation !== 'object') { throw new Error('Invalid operation: must be an object'); } - + if (!operation.sql || typeof operation.sql !== 'string') { throw new Error('Invalid operation: missing or invalid SQL'); } - + if (!operation.type || typeof operation.type !== 'string') { throw new Error('Invalid operation: missing or invalid type'); } - + // Validate expected operation types const validTypes = ['SAFE', 'WARNING', 'DESTRUCTIVE']; if (!validTypes.includes(operation.type)) { @@ -69,7 +69,7 @@ validateOperation(operation) { operation }); } - + return true; } @@ -92,6 +92,6 @@ determineTestRequirements(operation, context = {}) { - Are there version differences in operation structure? - How do operation structures differ between PostgreSQL versions? -___ +--- _"There are still many human emotions I do not fully comprehend. However, I am learning more about them every day." - Data, Star Trek: The Next Generation, "Data's Day"_ diff --git a/issues/2025-08-30/async-await-consistency.md b/issues/2025-08-30/async-await-consistency.md index 04099a4..ef551a8 100644 --- a/issues/2025-08-30/async-await-consistency.md +++ b/issues/2025-08-30/async-await-consistency.md @@ -8,13 +8,13 @@ Fix inconsistent async/await and promise patterns ### Core Information -| Field | Why It Matters | -|-------|---------------| -| **Severity Level** | MEDIUM - Could cause subtle bugs | -| **Location** | Various files mixing promises and async/await | -| **Category** | Style/Bug | -| **Brief Description** | Inconsistent use of promises vs async/await patterns | -| **Impact** | Potential for unhandled promise rejections and race conditions | +| Field | Why It Matters | +| --------------------- | -------------------------------------------------------------- | +| **Severity Level** | MEDIUM - Could cause subtle bugs | +| **Location** | Various files mixing promises and async/await | +| **Category** | Style/Bug | +| **Brief Description** | Inconsistent use of promises vs async/await patterns | +| **Impact** | Potential for unhandled promise rejections and race conditions | ## Summary @@ -112,7 +112,7 @@ async processFiles(files) { } }) ); - + // Now we can safely use results this.results.push(...results.filter(r => r.success)); return results; @@ -121,7 +121,7 @@ async processFiles(files) { // ✅ Event emitter with async handling async emitAsync(event, data) { const handlers = this.listeners(event); - + // Wait for all async handlers await Promise.all( handlers.map(async (handler) => { @@ -159,6 +159,6 @@ async emitAsync(event, data) { - Are there memory leaks from unresolved promises? - Could async pattern changes break existing error handling? -___ +--- _"The complexity of temporal mechanics is such that even the smallest alteration can have profound consequences." - Data, Star Trek: The Next Generation, "Time's Arrow"_ diff --git a/issues/2025-08-30/configuration-schema-incomplete.md b/issues/2025-08-30/configuration-schema-incomplete.md index db2d72a..4bb6a11 100644 --- a/issues/2025-08-30/configuration-schema-incomplete.md +++ b/issues/2025-08-30/configuration-schema-incomplete.md @@ -3,17 +3,18 @@ > [!warning]- _Not Started_ ## Issue Title + Add coverage enforcement configuration to datarc.schema.json ### Core Information -| Field | Why It Matters | -|-------|---------------| -| **Severity Level** | HIGH - Runtime configuration errors possible | -| **Location** | `datarc.schema.json` | -| **Category** | Architecture/Configuration | -| **Brief Description** | Missing coverage-specific configuration properties in schema | -| **Impact** | Runtime errors and invalid configurations not caught at startup | +| Field | Why It Matters | +| --------------------- | --------------------------------------------------------------- | +| **Severity Level** | HIGH - Runtime configuration errors possible | +| **Location** | `datarc.schema.json` | +| **Category** | Architecture/Configuration | +| **Brief Description** | Missing coverage-specific configuration properties in schema | +| **Impact** | Runtime errors and invalid configurations not caught at startup | ## Summary @@ -28,6 +29,7 @@ When the test coverage enforcement system was added, the configuration schema wa ### Example Current `.datarc.json` might contain: + ```json { "test": { @@ -43,14 +45,15 @@ Current `.datarc.json` might contain: ``` But the schema doesn't validate these properties, allowing invalid values like: + ```json { "test": { "coverage": { - "enforcement_level": "super-strict", // Invalid enum value! + "enforcement_level": "super-strict", // Invalid enum value! "thresholds": { - "tables": "ninety", // Should be number! - "functions": 150 // Should be 0-100! + "tables": "ninety", // Should be number! + "functions": 150 // Should be 0-100! } } } @@ -171,6 +174,6 @@ Extend the schema with comprehensive coverage configuration: - Are there legacy configurations that need migration? - Could schema validation performance be impacted? -___ +--- -_"The complexity of the humanoid brain is not easily replicated. The positronic matrix of my neural net is quite intricate." - Data, Star Trek: The Next Generation, "The Measure of a Man"_ \ No newline at end of file +_"The complexity of the humanoid brain is not easily replicated. The positronic matrix of my neural net is quite intricate." - Data, Star Trek: The Next Generation, "The Measure of a Man"_ diff --git a/issues/2025-08-30/coverage-calculation-accuracy.md b/issues/2025-08-30/coverage-calculation-accuracy.md index 9a554fa..f2d66a6 100644 --- a/issues/2025-08-30/coverage-calculation-accuracy.md +++ b/issues/2025-08-30/coverage-calculation-accuracy.md @@ -1,6 +1,6 @@ # GitHub Issue Format -> [!success] __This issuse has been fixed__ +> [!success] **This issuse has been fixed** ## Issue Title @@ -8,13 +8,13 @@ Fix coverage key generation edge cases in CoverageEnforcer ### Core Information -| Field | Why It Matters | -|-------|---------------| -| **Severity Level** | HIGH - Could cause false positives/negatives | -| **Location** | `src/lib/testing/CoverageEnforcer.js` lines 217-255 | -| **Category** | Bug | -| **Brief Description** | Coverage key generation doesn't handle edge cases properly | -| **Impact** | False positives/negatives in coverage detection affecting deployment decisions | +| Field | Why It Matters | +| --------------------- | ------------------------------------------------------------------------------ | +| **Severity Level** | HIGH - Could cause false positives/negatives | +| **Location** | `src/lib/testing/CoverageEnforcer.js` lines 217-255 | +| **Category** | Bug | +| **Brief Description** | Coverage key generation doesn't handle edge cases properly | +| **Impact** | False positives/negatives in coverage detection affecting deployment decisions | ## Summary @@ -50,16 +50,16 @@ Problem scenarios: ```javascript // These should match but won't: -item1 = { schema: null, name: 'users', type: 'table' } -item2 = { schema: 'public', name: 'users', type: 'table' } -key1 = 'null.users.table' -key2 = 'public.users.table' // Different keys! +item1 = { schema: null, name: "users", type: "table" }; +item2 = { schema: "public", name: "users", type: "table" }; +key1 = "null.users.table"; +key2 = "public.users.table"; // Different keys! // These shouldn't match but might: -item3 = { schema: 'public', name: 'user.posts', type: 'table' } -item4 = { schema: 'public.user', name: 'posts', type: 'table' } -key3 = 'public.user.posts.table' -key4 = 'public.user.posts.table' // Same key! +item3 = { schema: "public", name: "user.posts", type: "table" }; +item4 = { schema: "public.user", name: "posts", type: "table" }; +key3 = "public.user.posts.table"; +key4 = "public.user.posts.table"; // Same key! ``` ## Proposed Solution @@ -70,19 +70,19 @@ Implement robust key normalization with proper escaping: generateCoverageKey(item) { // Normalize schema (default to 'public' per PostgreSQL convention) const schema = (item.schema || 'public').toLowerCase().trim(); - + // Normalize name and type const name = item.name.toLowerCase().trim(); const type = item.type.toLowerCase().trim(); - + // Use separator that won't appear in identifiers const separator = '::'; - + // Escape any separator sequences in the components const escapedSchema = schema.replace(/::/g, '\\:\\:'); const escapedName = name.replace(/::/g, '\\:\\:'); const escapedType = type.replace(/::/g, '\\:\\:'); - + return `${escapedSchema}${separator}${escapedName}${separator}${escapedType}`; } @@ -96,7 +96,7 @@ compareCoverage(requirements, coverage) { } coverageLookup.get(key).push(item); }); - + // ... rest of comparison logic } ``` @@ -113,6 +113,6 @@ compareCoverage(requirements, coverage) { - How do different collations affect string comparison? - Could Unicode characters in identifiers cause issues? -___ +--- _"One of the most difficult concepts to accept is the existence of randomness. The human mind seeks patterns, even where none exist." - Data, Star Trek: The Next Generation, "Peak Performance"_ diff --git a/issues/2025-08-30/deployment-blocking-vulnerability.md b/issues/2025-08-30/deployment-blocking-vulnerability.md index 7bfa744..8c0dc3b 100644 --- a/issues/2025-08-30/deployment-blocking-vulnerability.md +++ b/issues/2025-08-30/deployment-blocking-vulnerability.md @@ -1,19 +1,20 @@ # GitHub Issue Format -> [!success] __This issuse has been fixed__ +> [!success] **This issuse has been fixed** ## Issue Title + Fix deployment blocking vulnerability in test coverage enforcement ### Core Information -| Field | Why It Matters | -|-------|---------------| -| **Severity Level** | CRITICAL - Allows untested code to reach production | -| **Location** | `src/lib/migration/MigrationOrchestrator.js` lines 161-168, 357-368 | -| **Category** | Security/Bug | +| Field | Why It Matters | +| --------------------- | ------------------------------------------------------------------------ | +| **Severity Level** | CRITICAL - Allows untested code to reach production | +| **Location** | `src/lib/migration/MigrationOrchestrator.js` lines 161-168, 357-368 | +| **Category** | Security/Bug | | **Brief Description** | Coverage check exceptions allow deployment to continue in non-production | -| **Impact** | Untested database changes could be deployed when coverage system fails | +| **Impact** | Untested database changes could be deployed when coverage system fails | ## Summary @@ -31,11 +32,11 @@ The error handling logic in `checkTestCoverage` is too permissive. When a techni // Current problematic code } catch (error) { this.error('Test coverage check failed', error); - + if (this.isProd) { throw error; } - + // This allows deployment even on technical failure! this.warn('Continuing despite coverage check failure (non-production)'); return { passed: true, bypassReason: 'Coverage check failed technically' }; @@ -49,17 +50,17 @@ Strengthen the error handling to require explicit bypass even for technical fail ```javascript } catch (error) { this.error('Test coverage check failed', error); - + // Always block on technical failures in production if (this.isProd) { throw error; } - + // In non-production, require explicit bypass if (!this.coverageBypassReason) { throw new Error('Coverage check failed technically. Use --coverage-bypass-reason to continue.'); } - + this.warn('Continuing despite coverage check failure (non-production with bypass)'); return { passed: true, bypassReason: this.coverageBypassReason }; } @@ -77,6 +78,6 @@ Strengthen the error handling to require explicit bypass even for technical fail - How does this interact with the rollback mechanism? - Could network failures during coverage checking trigger this bypass? -___ +--- -_"In the game of poker, there is a moment when all the possibilities are open, when the player must rely on his analysis of the odds. This is such a moment." - Data, Star Trek: The Next Generation, "The Measure of a Man"_ \ No newline at end of file +_"In the game of poker, there is a moment when all the possibilities are open, when the player must rely on his analysis of the odds. This is such a moment." - Data, Star Trek: The Next Generation, "The Measure of a Man"_ diff --git a/issues/2025-08-30/error-types-standardization.md b/issues/2025-08-30/error-types-standardization.md index 2d43018..7d33eca 100644 --- a/issues/2025-08-30/error-types-standardization.md +++ b/issues/2025-08-30/error-types-standardization.md @@ -8,13 +8,13 @@ Standardize error types across test coverage modules ### Core Information -| Field | Why It Matters | -|-------|---------------| -| **Severity Level** | LOW - Code quality and maintainability | -| **Location** | All files in `src/lib/testing/` | -| **Category** | Architecture/Style | +| Field | Why It Matters | +| --------------------- | ------------------------------------------------------ | +| **Severity Level** | LOW - Code quality and maintainability | +| **Location** | All files in `src/lib/testing/` | +| **Category** | Architecture/Style | | **Brief Description** | Inconsistent error handling with generic Error objects | -| **Impact** | Harder to handle specific error cases programmatically | +| **Impact** | Harder to handle specific error cases programmatically | ## Summary @@ -32,13 +32,13 @@ Current inconsistent approach: ```javascript // In TestRequirementAnalyzer -throw new Error('Invalid operation structure'); +throw new Error("Invalid operation structure"); -// In CoverageEnforcer -throw new Error('Coverage requirements not met'); +// In CoverageEnforcer +throw new Error("Coverage requirements not met"); // In pgTAPTestScanner -throw new Error('Failed to parse test file'); +throw new Error("Failed to parse test file"); ``` ## Proposed Solution @@ -51,7 +51,7 @@ Create a hierarchy of specific error types: class TestCoverageError extends Error { constructor(message, code, details = {}) { super(message); - this.name = 'TestCoverageError'; + this.name = "TestCoverageError"; this.code = code; this.details = details; } @@ -59,29 +59,29 @@ class TestCoverageError extends Error { class ValidationError extends TestCoverageError { constructor(message, details) { - super(message, 'VALIDATION_ERROR', details); - this.name = 'ValidationError'; + super(message, "VALIDATION_ERROR", details); + this.name = "ValidationError"; } } class CoverageEnforcementError extends TestCoverageError { constructor(message, gaps, percentage) { - super(message, 'COVERAGE_ENFORCEMENT', { gaps, percentage }); - this.name = 'CoverageEnforcementError'; + super(message, "COVERAGE_ENFORCEMENT", { gaps, percentage }); + this.name = "CoverageEnforcementError"; } } class TemplateGenerationError extends TestCoverageError { constructor(message, template, cause) { - super(message, 'TEMPLATE_GENERATION', { template, cause }); - this.name = 'TemplateGenerationError'; + super(message, "TEMPLATE_GENERATION", { template, cause }); + this.name = "TemplateGenerationError"; } } class ParseError extends TestCoverageError { constructor(message, file, line) { - super(message, 'PARSE_ERROR', { file, line }); - this.name = 'ParseError'; + super(message, "PARSE_ERROR", { file, line }); + this.name = "ParseError"; } } @@ -90,7 +90,7 @@ module.exports = { ValidationError, CoverageEnforcementError, TemplateGenerationError, - ParseError + ParseError, }; ``` @@ -107,7 +107,7 @@ try { console.log(`Gaps: ${error.details.gaps.length}`); } else if (error instanceof ValidationError) { // Handle validation differently - console.log('Invalid input:', error.details); + console.log("Invalid input:", error.details); } else { // Unknown error throw error; @@ -127,6 +127,6 @@ try { - Will error instanceof checks work across module boundaries? - How do custom errors affect error reporting tools? -___ +--- _"Errors are a part of the learning process. Without them, we would never improve." - Data, Star Trek: The Next Generation, "In Theory"_ diff --git a/issues/2025-08-30/integration-testing-missing.md b/issues/2025-08-30/integration-testing-missing.md index 36704df..7fc3f5d 100644 --- a/issues/2025-08-30/integration-testing-missing.md +++ b/issues/2025-08-30/integration-testing-missing.md @@ -8,13 +8,13 @@ Add comprehensive end-to-end integration tests for coverage enforcement ### Core Information -| Field | Why It Matters | -|-------|---------------| -| **Severity Level** | HIGH - Quality assurance gap | -| **Location** | Missing test files for integration scenarios | -| **Category** | Testing | +| Field | Why It Matters | +| --------------------- | ---------------------------------------------------- | +| **Severity Level** | HIGH - Quality assurance gap | +| **Location** | Missing test files for integration scenarios | +| **Category** | Testing | | **Brief Description** | No comprehensive integration tests for full workflow | -| **Impact** | Integration bugs only discovered in production | +| **Impact** | Integration bugs only discovered in production | ## Summary @@ -38,7 +38,7 @@ graph LR D --> E{Decision} E -->|Pass| F[Deploy] E -->|Fail| G[Block] - + style A fill:#f9f,stroke:#333,stroke-width:2px style B fill:#f9f,stroke:#333,stroke-width:2px style C fill:#f9f,stroke:#333,stroke-width:2px @@ -57,73 +57,89 @@ Create comprehensive integration test suite: ```javascript // test/integration/coverage-enforcement.test.js -const { describe, it, expect, beforeEach, afterEach } = require('vitest'); -const MigrationOrchestrator = require('../../src/lib/migration/MigrationOrchestrator'); -const TestCoverageOrchestrator = require('../../src/lib/testing/TestCoverageOrchestrator'); -const fs = require('fs').promises; -const path = require('path'); +const { describe, it, expect, beforeEach, afterEach } = require("vitest"); +const MigrationOrchestrator = require("../../src/lib/migration/MigrationOrchestrator"); +const TestCoverageOrchestrator = require("../../src/lib/testing/TestCoverageOrchestrator"); +const fs = require("fs").promises; +const path = require("path"); -describe('Coverage Enforcement Integration', () => { +describe("Coverage Enforcement Integration", () => { let tempDir; let orchestrator; - + beforeEach(async () => { // Create temp directory structure tempDir = await createTempProject(); - + orchestrator = new MigrationOrchestrator({ - sqlDir: path.join(tempDir, 'sql'), - testsDir: path.join(tempDir, 'tests'), - migrationsDir: path.join(tempDir, 'migrations'), - skipTests: true, // Skip unit tests, focus on coverage - coverageEnforcementLevel: 'strict' + sqlDir: path.join(tempDir, "sql"), + testsDir: path.join(tempDir, "tests"), + migrationsDir: path.join(tempDir, "migrations"), + skipTests: true, // Skip unit tests, focus on coverage + coverageEnforcementLevel: "strict", }); }); - + afterEach(async () => { await cleanupTempProject(tempDir); }); - - describe('Deployment Blocking', () => { - it('should block deployment when coverage is insufficient', async () => { + + describe("Deployment Blocking", () => { + it("should block deployment when coverage is insufficient", async () => { // Setup: Create SQL changes without tests - await createSQLFile(tempDir, 'tables.sql', ` + await createSQLFile( + tempDir, + "tables.sql", + ` CREATE TABLE users ( id UUID PRIMARY KEY, email TEXT NOT NULL ); - `); - + `, + ); + // Act & Assert: Should throw coverage error - await expect(orchestrator.performExecute()) - .rejects.toThrow(/Test coverage requirements not met/); + await expect(orchestrator.performExecute()).rejects.toThrow( + /Test coverage requirements not met/, + ); }); - - it('should allow deployment when coverage is sufficient', async () => { + + it("should allow deployment when coverage is sufficient", async () => { // Setup: Create SQL changes with tests - await createSQLFile(tempDir, 'tables.sql', ` + await createSQLFile( + tempDir, + "tables.sql", + ` CREATE TABLE users (id UUID PRIMARY KEY); - `); - - await createTestFile(tempDir, 'users.test.sql', ` + `, + ); + + await createTestFile( + tempDir, + "users.test.sql", + ` SELECT has_table('users'); SELECT has_column('users', 'id'); SELECT col_type_is('users', 'id', 'uuid'); - `); - + `, + ); + // Mock the actual deployment orchestrator.executeMigration = jest.fn().mockResolvedValue(true); - + // Act & Assert: Should not throw const result = await orchestrator.performExecute(); expect(result.success).toBe(true); }); }); - - describe('Coverage Calculation', () => { - it('should correctly calculate coverage for complex schemas', async () => { + + describe("Coverage Calculation", () => { + it("should correctly calculate coverage for complex schemas", async () => { // Setup: Complex schema with partial coverage - await createSQLFile(tempDir, 'complex.sql', ` + await createSQLFile( + tempDir, + "complex.sql", + ` CREATE TABLE posts ( id SERIAL PRIMARY KEY, user_id UUID REFERENCES users(id), @@ -132,86 +148,95 @@ describe('Coverage Enforcement Integration', () => { ); CREATE INDEX idx_posts_user ON posts(user_id); CREATE POLICY posts_policy ON posts FOR SELECT USING (true); - `); - - await createTestFile(tempDir, 'posts.test.sql', ` + `, + ); + + await createTestFile( + tempDir, + "posts.test.sql", + ` SELECT has_table('posts'); SELECT has_column('posts', 'id'); -- Missing: foreign key, index, and policy tests - `); - + `, + ); + const coverageOrchestrator = new TestCoverageOrchestrator({ - testsDir: path.join(tempDir, 'tests') + testsDir: path.join(tempDir, "tests"), }); - - const operations = [ - { sql: 'CREATE TABLE posts...', type: 'SAFE' } - ]; - + + const operations = [{ sql: "CREATE TABLE posts...", type: "SAFE" }]; + const result = await coverageOrchestrator.checkCoverage(operations); - + expect(result.coveragePercentage).toBeLessThan(50); expect(result.gaps).toContainEqual( expect.objectContaining({ requirement: expect.objectContaining({ - type: 'index', - name: 'idx_posts_user' - }) - }) + type: "index", + name: "idx_posts_user", + }), + }), ); }); }); - - describe('Template Generation', () => { - it('should generate valid templates for coverage gaps', async () => { + + describe("Template Generation", () => { + it("should generate valid templates for coverage gaps", async () => { // Setup: Schema without tests - await createSQLFile(tempDir, 'functions.sql', ` + await createSQLFile( + tempDir, + "functions.sql", + ` CREATE FUNCTION get_user(user_id UUID) RETURNS TABLE(email TEXT, created_at TIMESTAMP) AS $$ SELECT email, created_at FROM users WHERE id = user_id $$ LANGUAGE SQL SECURITY DEFINER; - `); - + `, + ); + const orchestrator = new TestCoverageOrchestrator({ - testsDir: path.join(tempDir, 'tests'), - generateTemplates: true + testsDir: path.join(tempDir, "tests"), + generateTemplates: true, }); - - const operations = [ - { sql: 'CREATE FUNCTION get_user...', type: 'SAFE' } - ]; - + + const operations = [{ sql: "CREATE FUNCTION get_user...", type: "SAFE" }]; + const result = await orchestrator.checkCoverage(operations); - + expect(result.templates).toHaveLength(1); - expect(result.templates[0].template).toContain('has_function'); - expect(result.templates[0].template).toContain('function_returns'); - expect(result.templates[0].template).toContain('is_definer'); + expect(result.templates[0].template).toContain("has_function"); + expect(result.templates[0].template).toContain("function_returns"); + expect(result.templates[0].template).toContain("is_definer"); }); }); - - describe('Error Handling', () => { - it('should handle corrupted test files gracefully', async () => { - await createTestFile(tempDir, 'corrupt.sql', ` + + describe("Error Handling", () => { + it("should handle corrupted test files gracefully", async () => { + await createTestFile( + tempDir, + "corrupt.sql", + ` This is not valid SQL {{{ - `); - + `, + ); + const scanner = new pgTAPTestScanner(); - + // Should emit warning but not crash - await scanner.scanDirectory(path.join(tempDir, 'tests')); - + await scanner.scanDirectory(path.join(tempDir, "tests")); + const stats = scanner.getCoverageStatistics(); expect(stats.errors).toBeGreaterThan(0); }); - - it('should handle missing test directory', async () => { + + it("should handle missing test directory", async () => { const orchestrator = new TestCoverageOrchestrator({ - testsDir: '/non/existent/path' + testsDir: "/non/existent/path", }); - + const result = await orchestrator.checkCoverage([]); - + // Should handle gracefully with warning expect(result.passed).toBe(true); expect(result.coveragePercentage).toBe(0); @@ -221,20 +246,20 @@ describe('Coverage Enforcement Integration', () => { // Helper functions async function createTempProject() { - const tempDir = path.join('/tmp', `data-test-${Date.now()}`); + const tempDir = path.join("/tmp", `data-test-${Date.now()}`); await fs.mkdir(tempDir, { recursive: true }); - await fs.mkdir(path.join(tempDir, 'sql'), { recursive: true }); - await fs.mkdir(path.join(tempDir, 'tests'), { recursive: true }); - await fs.mkdir(path.join(tempDir, 'migrations'), { recursive: true }); + await fs.mkdir(path.join(tempDir, "sql"), { recursive: true }); + await fs.mkdir(path.join(tempDir, "tests"), { recursive: true }); + await fs.mkdir(path.join(tempDir, "migrations"), { recursive: true }); return tempDir; } async function createSQLFile(tempDir, filename, content) { - await fs.writeFile(path.join(tempDir, 'sql', filename), content); + await fs.writeFile(path.join(tempDir, "sql", filename), content); } async function createTestFile(tempDir, filename, content) { - await fs.writeFile(path.join(tempDir, 'tests', filename), content); + await fs.writeFile(path.join(tempDir, "tests", filename), content); } async function cleanupTempProject(tempDir) { @@ -254,6 +279,6 @@ async function cleanupTempProject(tempDir) { - Are there race conditions only visible in integration? - How do we test production vs development behavior differences? -___ +--- _"To properly test a system, one must examine not just its components, but how those components interact. The whole is often greater than the sum of its parts." - Data, Star Trek: The Next Generation, "Elementary, Dear Data"_ diff --git a/issues/2025-08-30/logging-levels-implementation.md b/issues/2025-08-30/logging-levels-implementation.md index f77a979..a7db2f6 100644 --- a/issues/2025-08-30/logging-levels-implementation.md +++ b/issues/2025-08-30/logging-levels-implementation.md @@ -8,13 +8,13 @@ Replace console methods with proper logging levels ### Core Information -| Field | Why It Matters | -|-------|---------------| -| **Severity Level** | LOW - Code quality improvement | -| **Location** | Multiple files using console.log, console.error, console.warn | -| **Category** | Architecture/Style | -| **Brief Description** | Direct console usage instead of structured logging | -| **Impact** | Cannot control log verbosity or redirect logs properly | +| Field | Why It Matters | +| --------------------- | ------------------------------------------------------------- | +| **Severity Level** | LOW - Code quality improvement | +| **Location** | Multiple files using console.log, console.error, console.warn | +| **Category** | Architecture/Style | +| **Brief Description** | Direct console usage instead of structured logging | +| **Impact** | Cannot control log verbosity or redirect logs properly | ## Summary @@ -34,11 +34,11 @@ Current scattered logging: // In TestTemplateGenerator.js console.error(`Failed to render pattern ${patternName}:`, error); -// In TestCoverageOrchestrator.js +// In TestCoverageOrchestrator.js this.logger = options.logger || console.log; // In pgTAPTestScanner.js -console.warn('Invalid assertion pattern:', line); +console.warn("Invalid assertion pattern:", line); ``` ## Proposed Solution @@ -54,12 +54,12 @@ class Logger { WARN: 1, INFO: 2, DEBUG: 3, - TRACE: 4 + TRACE: 4, }; constructor(options = {}) { this.level = options.level || Logger.LEVELS.INFO; - this.name = options.name || 'D.A.T.A.'; + this.name = options.name || "D.A.T.A."; this.output = options.output || console; this.format = options.format || this.defaultFormat; } @@ -67,7 +67,7 @@ class Logger { defaultFormat(level, message, meta) { const timestamp = new Date().toISOString(); const prefix = `[${timestamp}] [${this.name}] [${level}]`; - + if (meta && Object.keys(meta).length > 0) { return `${prefix} ${message} ${JSON.stringify(meta)}`; } @@ -77,11 +77,11 @@ class Logger { log(level, message, meta = {}) { if (level <= this.level) { const formatted = this.format( - Object.keys(Logger.LEVELS).find(k => Logger.LEVELS[k] === level), + Object.keys(Logger.LEVELS).find((k) => Logger.LEVELS[k] === level), message, - meta + meta, ); - + switch (level) { case Logger.LEVELS.ERROR: this.output.error(formatted); @@ -95,18 +95,28 @@ class Logger { } } - error(message, meta) { this.log(Logger.LEVELS.ERROR, message, meta); } - warn(message, meta) { this.log(Logger.LEVELS.WARN, message, meta); } - info(message, meta) { this.log(Logger.LEVELS.INFO, message, meta); } - debug(message, meta) { this.log(Logger.LEVELS.DEBUG, message, meta); } - trace(message, meta) { this.log(Logger.LEVELS.TRACE, message, meta); } + error(message, meta) { + this.log(Logger.LEVELS.ERROR, message, meta); + } + warn(message, meta) { + this.log(Logger.LEVELS.WARN, message, meta); + } + info(message, meta) { + this.log(Logger.LEVELS.INFO, message, meta); + } + debug(message, meta) { + this.log(Logger.LEVELS.DEBUG, message, meta); + } + trace(message, meta) { + this.log(Logger.LEVELS.TRACE, message, meta); + } child(name) { return new Logger({ level: this.level, name: `${this.name}:${name}`, output: this.output, - format: this.format + format: this.format, }); } } @@ -114,21 +124,21 @@ class Logger { // Usage in modules class TestRequirementAnalyzer { constructor(options = {}) { - this.logger = options.logger || new Logger({ name: 'Analyzer' }); + this.logger = options.logger || new Logger({ name: "Analyzer" }); } analyzeOperations(operations) { - this.logger.debug('Analyzing operations', { count: operations.length }); - + this.logger.debug("Analyzing operations", { count: operations.length }); + try { // ... analysis logic - this.logger.info('Analysis complete', { - requirements: result.requirements.length + this.logger.info("Analysis complete", { + requirements: result.requirements.length, }); } catch (error) { - this.logger.error('Analysis failed', { + this.logger.error("Analysis failed", { error: error.message, - stack: error.stack + stack: error.stack, }); throw error; } @@ -160,6 +170,6 @@ Configuration via .datarc.json: - How should we handle sensitive data in logs? - Will structured logging work with existing monitoring tools? -___ +--- _"The complexity of the universe is beyond measure, yet we must still attempt to understand it through observation and analysis." - Data, Star Trek: The Next Generation, "The Ensigns of Command"_ diff --git a/issues/2025-08-30/memory-management-concerns.md b/issues/2025-08-30/memory-management-concerns.md index 6b5a26f..42c9760 100644 --- a/issues/2025-08-30/memory-management-concerns.md +++ b/issues/2025-08-30/memory-management-concerns.md @@ -1,6 +1,6 @@ # GitHub Issue Format -> [!success] __This issuse has been fixed__ +> [!success] **This issuse has been fixed** ## Issue Title @@ -8,13 +8,13 @@ Implement memory management for large test suite coverage databases ### Core Information -| Field | Why It Matters | -|-------|---------------| -| **Severity Level** | CRITICAL - Risk of OOM errors in production | -| **Location** | `src/lib/testing/pgTAPTestScanner.js` (2,728 lines) | -| **Category** | Performance/Bug | -| **Brief Description** | Coverage database built entirely in memory without limits | -| **Impact** | Memory exhaustion with large test suites could crash deployment process | +| Field | Why It Matters | +| --------------------- | ----------------------------------------------------------------------- | +| **Severity Level** | CRITICAL - Risk of OOM errors in production | +| **Location** | `src/lib/testing/pgTAPTestScanner.js` (2,728 lines) | +| **Category** | Performance/Bug | +| **Brief Description** | Coverage database built entirely in memory without limits | +| **Impact** | Memory exhaustion with large test suites could crash deployment process | ## Summary @@ -38,13 +38,13 @@ buildCoverageDatabase() { gaps: [], metadata: {} }; - + // This grows unbounded! for (const [type, objects] of Object.entries(this.coverageMap)) { if (!database.byType[type]) { database.byType[type] = {}; } - + for (const [name, coverage] of Object.entries(objects)) { // No memory limit checking database.objects[`${type}.${name}`] = { @@ -56,7 +56,7 @@ buildCoverageDatabase() { }; } } - + return database; } ``` @@ -71,21 +71,21 @@ class CoverageDatabase { this.maxMemory = options.maxMemory || 100 * 1024 * 1024; // 100MB default this.currentMemory = 0; this.useStreaming = options.streaming || false; - this.tempDir = options.tempDir || '/tmp/coverage'; + this.tempDir = options.tempDir || "/tmp/coverage"; } - + async addCoverage(type, name, coverage) { const size = this.estimateSize(coverage); - + if (this.currentMemory + size > this.maxMemory) { await this.flushToDisk(); } - + // Add to memory buffer this.buffer.push({ type, name, coverage }); this.currentMemory += size; } - + async flushToDisk() { // Write current buffer to disk const file = path.join(this.tempDir, `coverage-${Date.now()}.json`); @@ -93,7 +93,7 @@ class CoverageDatabase { this.buffer = []; this.currentMemory = 0; } - + async *iterate() { // Stream results from disk if needed if (this.useStreaming) { @@ -120,6 +120,6 @@ class CoverageDatabase { - Are there memory leaks in the regex processing? - How does Node.js garbage collection handle large Map structures? -___ +--- _"The complexity of the universe is beyond measure. Every new discovery reveals ten new mysteries." - Data, Star Trek: The Next Generation, "The Most Toys"_ diff --git a/issues/2025-08-30/one-class-per-file-violations.md b/issues/2025-08-30/one-class-per-file-violations.md index cda24a4..cddb9a3 100644 --- a/issues/2025-08-30/one-class-per-file-violations.md +++ b/issues/2025-08-30/one-class-per-file-violations.md @@ -1,17 +1,18 @@ # One Class Per File Policy Violations ## Issue Title + Audit and refactor codebase to enforce one-class-per-file policy ### Core Information -| Field | Why It Matters | -|-------|---------------| -| **Severity Level** | MEDIUM - Code maintainability | -| **Location** | Multiple files across codebase | -| **Category** | Architecture/Refactoring | +| Field | Why It Matters | +| --------------------- | --------------------------------------------------------------------------- | +| **Severity Level** | MEDIUM - Code maintainability | +| **Location** | Multiple files across codebase | +| **Category** | Architecture/Refactoring | | **Brief Description** | Multiple classes exist in single files, violating maintainability standards | -| **Impact** | Confusion, harder debugging, unclear module boundaries | +| **Impact** | Confusion, harder debugging, unclear module boundaries | ## Summary @@ -20,11 +21,13 @@ The codebase contains violations of the one-class-per-file rule. Each file shoul ## Known Violations ### Confirmed Multi-Class Files + 1. **src/lib/testing/pgTAPTestScanner-memory-enhanced.js** - Contains: `MemoryMonitor`, `StreamingCoverageDatabase`, `BatchProcessor` - Should be: Three separate files ### Files to Audit + - All files in `src/lib/` - All files in `src/commands/` - Build directory files @@ -32,6 +35,7 @@ The codebase contains violations of the one-class-per-file rule. Each file shoul ## Proposed Solution 1. **Audit Phase** + ```bash # Find all files with multiple class declarations grep -r "^class " src/ | awk -F: '{print $1}' | uniq -c | awk '$1 > 1' @@ -59,6 +63,7 @@ The codebase contains violations of the one-class-per-file rule. Each file shoul ## Example Refactoring Before: + ```javascript // src/lib/testing/pgTAPTestScanner-memory-enhanced.js class MemoryMonitor { ... } @@ -67,6 +72,7 @@ class BatchProcessor { ... } ``` After: + ```javascript // src/lib/testing/MemoryMonitor.js class MemoryMonitor { ... } @@ -91,4 +97,4 @@ module.exports = BatchProcessor; --- -*"One function, one purpose. One class, one file. Simplicity is the ultimate sophistication."* - Engineering Best Practices \ No newline at end of file +_"One function, one purpose. One class, one file. Simplicity is the ultimate sophistication."_ - Engineering Best Practices diff --git a/issues/2025-08-30/pattern-library-error-recovery.md b/issues/2025-08-30/pattern-library-error-recovery.md index c044cb3..60218d6 100644 --- a/issues/2025-08-30/pattern-library-error-recovery.md +++ b/issues/2025-08-30/pattern-library-error-recovery.md @@ -1,6 +1,6 @@ # GitHub Issue Format -> [!success] __This issuse has been fixed__ +> [!success] **This issuse has been fixed** ## Issue Title @@ -8,13 +8,13 @@ Add error recovery for pattern rendering failures in TestTemplateGenerator ### Core Information -| Field | Why It Matters | -|-------|---------------| -| **Severity Level** | HIGH - Could leave test files in inconsistent state | -| **Location** | `src/lib/testing/TestTemplateGenerator.js` lines 545-597 | -| **Category** | Bug/Architecture | -| **Brief Description** | Pattern rendering failures lack proper error recovery | -| **Impact** | Failed template generation could leave partial or invalid test files | +| Field | Why It Matters | +| --------------------- | -------------------------------------------------------------------- | +| **Severity Level** | HIGH - Could leave test files in inconsistent state | +| **Location** | `src/lib/testing/TestTemplateGenerator.js` lines 545-597 | +| **Category** | Bug/Architecture | +| **Brief Description** | Pattern rendering failures lack proper error recovery | +| **Impact** | Failed template generation could leave partial or invalid test files | ## Summary @@ -63,14 +63,14 @@ Implement proper error recovery with validation and rollback: async generateEnhancedTemplate(requirement, additionalPatterns = []) { const checkpoint = this.createCheckpoint(); const errors = []; - + try { // Generate base template const baseTemplate = this.generateTemplate(requirement); if (!baseTemplate) { throw new Error('Failed to generate base template'); } - + // Collect all pattern enhancements const enhancements = []; for (const patternName of additionalPatterns) { @@ -79,31 +79,31 @@ async generateEnhancedTemplate(requirement, additionalPatterns = []) { enhancements.push(rendered); } catch (error) { errors.push({ pattern: patternName, error }); - + // Decide whether to continue or abort if (this.options.strictPatternRendering) { throw new Error(`Critical pattern rendering failure: ${patternName}`); } } } - + // Validate combined template const finalTemplate = this.combineTemplates(baseTemplate, enhancements); const validation = this.validateTemplate(finalTemplate); - + if (!validation.valid) { throw new Error(`Template validation failed: ${validation.errors.join(', ')}`); } - + return { template: finalTemplate, warnings: errors.map(e => `Pattern ${e.pattern} skipped: ${e.error.message}`) }; - + } catch (error) { // Rollback to checkpoint this.rollbackToCheckpoint(checkpoint); - + // Re-throw with context throw new Error(`Template generation failed: ${error.message}`, { cause: error }); } @@ -111,20 +111,20 @@ async generateEnhancedTemplate(requirement, additionalPatterns = []) { validateTemplate(template) { const errors = []; - + // Check for required pgTAP structure if (!template.includes('CREATE OR REPLACE FUNCTION')) { errors.push('Missing function declaration'); } - + if (!template.includes('SELECT plan(')) { errors.push('Missing test plan declaration'); } - + if (!template.includes('SELECT finish()')) { errors.push('Missing test finish call'); } - + // Check for SQL syntax (basic) try { // Could use pgsql-parser here for real validation @@ -132,7 +132,7 @@ validateTemplate(template) { } catch (error) { errors.push(`SQL syntax error: ${error.message}`); } - + return { valid: errors.length === 0, errors @@ -152,6 +152,6 @@ validateTemplate(template) { - Are there race conditions in concurrent template generation? - How do different file systems handle partial writes? -___ +--- _"It is the struggle itself that is most important. We must strive to be more than we are. It does not matter that we will not reach our ultimate goal. The effort itself yields its own rewards." - Data, Star Trek: The Next Generation, "The Offspring"_ diff --git a/issues/2025-08-30/performance-optimization-needed.md b/issues/2025-08-30/performance-optimization-needed.md index 0758ad2..cbde42b 100644 --- a/issues/2025-08-30/performance-optimization-needed.md +++ b/issues/2025-08-30/performance-optimization-needed.md @@ -8,13 +8,13 @@ Optimize performance for large schema analysis and test scanning ### Core Information -| Field | Why It Matters | -|-------|---------------| -| **Severity Level** | MEDIUM - Performance degradation at scale | -| **Location** | `TestRequirementAnalyzer.js`, `pgTAPTestScanner.js` | -| **Category** | Performance | +| Field | Why It Matters | +| --------------------- | ------------------------------------------------------------------ | +| **Severity Level** | MEDIUM - Performance degradation at scale | +| **Location** | `TestRequirementAnalyzer.js`, `pgTAPTestScanner.js` | +| **Category** | Performance | | **Brief Description** | No caching, parallel processing, or streaming for large operations | -| **Impact** | Slow deployment workflow for large schemas | +| **Impact** | Slow deployment workflow for large schemas | ## Summary @@ -34,17 +34,17 @@ Performance bottlenecks: graph TD A[1000 SQL Operations] --> B[Sequential Analysis] B --> C[4000ms] - + D[500 Test Files] --> E[Sequential Scanning] E --> F[3000ms] - + G[Coverage Comparison] --> H[O(n²) Algorithm] H --> I[2000ms] - + C --> J[Total: 9+ seconds] F --> J I --> J - + style J fill:#f99,stroke:#333,stroke-width:4px ``` @@ -62,18 +62,19 @@ class CachedAnalyzer { this.cacheHits = 0; this.cacheMisses = 0; } - + getCacheKey(operation) { // Create deterministic cache key - return crypto.createHash('sha256') + return crypto + .createHash("sha256") .update(JSON.stringify(operation)) - .digest('hex'); + .digest("hex"); } - + async analyzeOperations(operations) { const results = []; const toAnalyze = []; - + // Check cache first for (const op of operations) { const key = this.getCacheKey(op); @@ -85,17 +86,17 @@ class CachedAnalyzer { this.cacheMisses++; } } - + // Analyze uncached operations if (toAnalyze.length > 0) { const newResults = await this.analyzer.analyzeOperations(toAnalyze); - + // Cache results for (let i = 0; i < toAnalyze.length; i++) { const key = this.getCacheKey(toAnalyze[i]); this.cache.set(key, newResults[i]); results.push(newResults[i]); - + // LRU eviction if (this.cache.size > this.maxCacheSize) { const firstKey = this.cache.keys().next().value; @@ -103,7 +104,7 @@ class CachedAnalyzer { } } } - + return results; } } @@ -114,31 +115,31 @@ class ParallelScanner { this.workers = options.workers || os.cpus().length; this.workerPool = []; } - + async scanDirectory(dir) { const files = await this.getTestFiles(dir); - + // Divide work among workers const chunks = this.chunkArray(files, this.workers); - + // Process in parallel const results = await Promise.all( - chunks.map(chunk => this.processChunk(chunk)) + chunks.map((chunk) => this.processChunk(chunk)), ); - + // Merge results return this.mergeResults(results); } - + async processChunk(files) { // Use worker threads for CPU-intensive parsing return new Promise((resolve, reject) => { - const worker = new Worker('./scanWorker.js', { - workerData: { files } + const worker = new Worker("./scanWorker.js", { + workerData: { files }, }); - - worker.on('message', resolve); - worker.on('error', reject); + + worker.on("message", resolve); + worker.on("error", reject); }); } } @@ -155,22 +156,22 @@ class OptimizedEnforcer { } coverageIndex.get(key).add(item); } - + // Single pass comparison - O(n) const gaps = []; const met = []; - + for (const req of requirements) { const key = this.generateKey(req); const matches = coverageIndex.get(key); - + if (matches && matches.size > 0) { met.push({ requirement: req, coverage: [...matches] }); } else { gaps.push({ requirement: req }); } } - + return { gaps, met }; } } @@ -179,34 +180,34 @@ class OptimizedEnforcer { class StreamingScanner { async *scanDirectoryStream(dir) { const files = await fs.readdir(dir); - + for (const file of files) { - if (file.endsWith('.sql')) { - const content = await fs.readFile(path.join(dir, file), 'utf8'); + if (file.endsWith(".sql")) { + const content = await fs.readFile(path.join(dir, file), "utf8"); const assertions = this.extractAssertions(content); - + // Yield results as they're ready yield { file, assertions, - coverage: this.buildCoverage(assertions) + coverage: this.buildCoverage(assertions), }; } } } - + async buildCoverageDatabase() { const database = new StreamingDatabase(); - + for await (const result of this.scanDirectoryStream(this.testsDir)) { database.addCoverage(result.coverage); - + // Periodic cleanup if (database.size % 100 === 0) { await database.compact(); } } - + return database; } } @@ -216,23 +217,23 @@ class PerformanceMonitor { constructor() { this.metrics = new Map(); } - + async measure(name, fn) { const start = performance.now(); - + try { const result = await fn(); const duration = performance.now() - start; - - this.recordMetric(name, duration, 'success'); + + this.recordMetric(name, duration, "success"); return result; } catch (error) { const duration = performance.now() - start; - this.recordMetric(name, duration, 'error'); + this.recordMetric(name, duration, "error"); throw error; } } - + recordMetric(name, duration, status) { if (!this.metrics.has(name)) { this.metrics.set(name, { @@ -240,20 +241,22 @@ class PerformanceMonitor { totalTime: 0, avgTime: 0, maxTime: 0, - minTime: Infinity + minTime: Infinity, }); } - + const metric = this.metrics.get(name); metric.count++; metric.totalTime += duration; metric.avgTime = metric.totalTime / metric.count; metric.maxTime = Math.max(metric.maxTime, duration); metric.minTime = Math.min(metric.minTime, duration); - + // Alert on performance degradation if (duration > metric.avgTime * 2) { - console.warn(`Performance degradation in ${name}: ${duration}ms (avg: ${metric.avgTime}ms)`); + console.warn( + `Performance degradation in ${name}: ${duration}ms (avg: ${metric.avgTime}ms)`, + ); } } } @@ -271,6 +274,6 @@ class PerformanceMonitor { - Could parallel processing cause race conditions? - Will streaming reduce memory enough for huge schemas? -___ +--- _"There are still many human emotions I do not fully comprehend. Patience, however, is not one of them." - Data, Star Trek: Generations_ diff --git a/issues/2025-08-30/runtime-migration-to-deno.md b/issues/2025-08-30/runtime-migration-to-deno.md index 0549b20..a025139 100644 --- a/issues/2025-08-30/runtime-migration-to-deno.md +++ b/issues/2025-08-30/runtime-migration-to-deno.md @@ -1,18 +1,18 @@ # Runtime Migration: Node.js to Deno -> [!danger] __This is not yet started, but it is top priority__ +> [!danger] **This is not yet started, but it is top priority** ## [CRITICAL] Complete Runtime Migration from Node.js to Deno for Edge Function Parity ### Core Information -| Field | Why It Matters | -|-------|---------------| -| **Severity Level** | 🔴 CRITICAL - Show Stopper | -| **Location** | Entire codebase - all JavaScript files | -| **Category** | Architecture / Technical Debt | -| **Brief Description** | Runtime mismatch prevents accurate Edge Function testing | -| **Impact** | Cannot test Edge Functions in their actual runtime; ES module chaos blocking development | +| Field | Why It Matters | +| --------------------- | ---------------------------------------------------------------------------------------- | +| **Severity Level** | 🔴 CRITICAL - Show Stopper | +| **Location** | Entire codebase - all JavaScript files | +| **Category** | Architecture / Technical Debt | +| **Brief Description** | Runtime mismatch prevents accurate Edge Function testing | +| **Impact** | Cannot test Edge Functions in their actual runtime; ES module chaos blocking development | ## Summary @@ -60,12 +60,12 @@ STEP 5: Risk Evaluation ```javascript // Current: IMPOSSIBLE in Node.js -import edgeFunction from './supabase/functions/my-func/index.ts' +import edgeFunction from "./supabase/functions/my-func/index.ts"; // Error: Cannot import Deno-specific APIs // Future: PERFECT in Deno -import edgeFunction from './supabase/functions/my-func/index.ts' -const response = await edgeFunction.handler(request) // Works exactly as deployed +import edgeFunction from "./supabase/functions/my-func/index.ts"; +const response = await edgeFunction.handler(request); // Works exactly as deployed ``` ## Proposed Solution @@ -94,12 +94,13 @@ deno init #### Phase 2: Core Library Migration (Hour 1-4) **Order of Operations:** + 1. **Base Classes First** ```typescript // src/lib/Command.ts - import { EventEmitter } from "std/node/events.ts" - + import { EventEmitter } from "std/node/events.ts"; + export abstract class Command extends EventEmitter { // Minimal changes needed - EventEmitter compatible } @@ -109,14 +110,14 @@ deno init ```typescript // src/lib/config.ts - import { load } from "std/dotenv/mod.ts" - + import { load } from "std/dotenv/mod.ts"; + export async function loadConfig() { - await load() // Loads .env automatically + await load(); // Loads .env automatically return { supabaseUrl: Deno.env.get("SUPABASE_URL"), - supabaseKey: Deno.env.get("SUPABASE_SERVICE_ROLE_KEY") - } + supabaseKey: Deno.env.get("SUPABASE_SERVICE_ROLE_KEY"), + }; } ``` @@ -124,8 +125,8 @@ deno init ```typescript // src/lib/PathResolver.ts - import { join, resolve } from "std/path/mod.ts" - + import { join, resolve } from "std/path/mod.ts"; + export class PathResolver { // Path operations nearly identical } @@ -137,29 +138,29 @@ deno init ```typescript // Priority 1: Test Commands (for Edge Function testing) -src/commands/test/RunCommand.ts // Must work first -src/commands/test/CompileCommand.ts // Validates Edge Functions +src / commands / test / RunCommand.ts; // Must work first +src / commands / test / CompileCommand.ts; // Validates Edge Functions -// Priority 2: Core Database Commands -src/commands/db/MigrateCommand.ts // Primary workflow -src/commands/db/CompileCommand.ts // Migration generation +// Priority 2: Core Database Commands +src / commands / db / MigrateCommand.ts; // Primary workflow +src / commands / db / CompileCommand.ts; // Migration generation // Priority 3: Function Commands -src/commands/functions/DeployCommand.ts // Edge Function deployment +src / commands / functions / DeployCommand.ts; // Edge Function deployment ``` **Migration Pattern:** ```typescript // Before (Node.js) -const fs = require('fs').promises -const { exec } = require('child_process') -const chalk = require('chalk') +const fs = require("fs").promises; +const { exec } = require("child_process"); +const chalk = require("chalk"); // After (Deno) -const { readFile, writeFile } = Deno -const { Command } = new Deno.Command() -import { colors } from "std/fmt/colors.ts" +const { readFile, writeFile } = Deno; +const { Command } = new Deno.Command(); +import { colors } from "std/fmt/colors.ts"; ``` #### Phase 4: Test Suite Migration (Hour 8-10) @@ -167,16 +168,16 @@ import { colors } from "std/fmt/colors.ts" ```typescript // Test migration pattern // Before: Vitest -import { describe, it, expect } from 'vitest' +import { describe, it, expect } from "vitest"; // After: Deno.test -import { assertEquals, assertThrows } from "std/assert/mod.ts" +import { assertEquals, assertThrows } from "std/assert/mod.ts"; Deno.test("MigrateCommand deploys successfully", async () => { - const cmd = new MigrateCommand() - const result = await cmd.execute() - assertEquals(result.success, true) -}) + const cmd = new MigrateCommand(); + const result = await cmd.execute(); + assertEquals(result.success, true); +}); ``` #### Phase 5: Build & Distribution (Hour 10-12) @@ -200,8 +201,9 @@ deno compile \ ```markdown ## Core Libraries (4 hours) + - [ ] src/lib/Command.js → Command.ts -- [ ] src/lib/DatabaseCommand.js → DatabaseCommand.ts +- [ ] src/lib/DatabaseCommand.js → DatabaseCommand.ts - [ ] src/lib/SupabaseCommand.js → SupabaseCommand.ts - [ ] src/lib/TestCommand.js → TestCommand.ts - [ ] src/lib/CommandRouter.js → CommandRouter.ts @@ -210,6 +212,7 @@ deno compile \ - [ ] src/lib/db-utils.js → db-utils.ts ## Commands (4 hours) + - [ ] src/commands/db/MigrateCommand.js → MigrateCommand.ts - [ ] src/commands/db/CompileCommand.js → CompileCommand.ts - [ ] src/commands/db/ResetCommand.js → ResetCommand.ts @@ -219,16 +222,19 @@ deno compile \ - [ ] src/commands/InitCommand.js → InitCommand.ts ## Test System (2 hours) + - [ ] src/lib/testing/TestRequirementAnalyzer.js → TestRequirementAnalyzer.ts - [ ] src/lib/testing/TestCoverageOrchestrator.js → TestCoverageOrchestrator.ts - [ ] src/lib/testing/pgTAPTestScanner.js → pgTAPTestScanner.ts - [ ] src/lib/testing/CoverageEnforcer.js → CoverageEnforcer.ts ## Entry Points (1 hour) + - [ ] bin/data.js → Removed (compiled binary replaces) - [ ] src/index.js → index.ts ## Configuration (1 hour) + - [ ] package.json → deno.json - [ ] .eslintrc → Removed (deno lint) - [ ] vitest.config.js → Removed (Deno.test) @@ -238,6 +244,7 @@ deno compile \ ### Validation Criteria **Migration Success Metrics:** + 1. All existing tests pass in Deno 2. Can import and test actual Edge Functions 3. Binary size < 50MB @@ -279,18 +286,18 @@ git tag v1.0.0-final-node // The Ultimate Test async function validateMigration() { // 1. Can we call Supabase API? - const migrations = await getMigrations() // ✓ - + const migrations = await getMigrations(); // ✓ + // 2. Can we run pgTAP tests? - const tests = await runTests() // ✓ - + const tests = await runTests(); // ✓ + // 3. Can we import Edge Functions? - const func = await import('./supabase/functions/test/index.ts') // ✓ - + const func = await import("./supabase/functions/test/index.ts"); // ✓ + // 4. Can we compile to binary? - await Deno.run({ cmd: ["deno", "compile", "..."] }) // ✓ - - return SUCCESS + await Deno.run({ cmd: ["deno", "compile", "..."] }); // ✓ + + return SUCCESS; } ``` @@ -299,7 +306,7 @@ async function validateMigration() { ``` Hour 0-1: Environment setup, Deno initialization Hour 1-4: Core library migration -Hour 4-8: Command migration +Hour 4-8: Command migration Hour 8-10: Test migration Hour 10-12: Build and distribution Hour 12: COMPLETE ✓ @@ -323,7 +330,8 @@ Hour 12: COMPLETE ✓ **Priority Override**: This issue supersedes ALL other issues. -**Rationale**: +**Rationale**: + 1. Current ES module chaos blocks ALL development 2. Edge Function testing gap risks production failures 3. Solution is simpler than initially thought (8-12 hours) @@ -346,7 +354,7 @@ Decision: PROCEED IMMEDIATELY --- -*"Logic is the beginning of wisdom, not the end."* - Spock +_"Logic is the beginning of wisdom, not the end."_ - Spock The logical path is clear. The ES module chaos ends today. The runtime mismatch ends today. D.A.T.A. becomes a Deno application today. diff --git a/issues/README.md b/issues/README.md index cb5cabc..84cdb20 100644 --- a/issues/README.md +++ b/issues/README.md @@ -8,7 +8,7 @@ This directory contains documented issues identified during code reviews and dev - 🔴 **CRITICAL** - Must fix before production deployment - 🟠 **HIGH** - Strongly recommended fixes -- 🟡 **MEDIUM** - Important improvements +- 🟡 **MEDIUM** - Important improvements - 🟢 **LOW** - Nice-to-have enhancements ## Current Issues (2025-08-31) @@ -16,6 +16,7 @@ This directory contains documented issues identified during code reviews and dev ### 🔴🔴🔴 CRITICAL SHOW-STOPPER (Blocks Edge Functions Testing) #### 0. [Runtime Migration to Deno](./2025-08-30/runtime-migration-to-deno.md) + - **Component:** ENTIRE CODEBASE - **Impact:** Cannot test Edge Functions accurately; ES module chaos blocks development - **Fix Time:** 8-12 hours @@ -25,6 +26,7 @@ This directory contains documented issues identified during code reviews and dev ### Test Quality and Coverage Issues #### [Test Quality Audit](./2025-08-31/test-quality-audit.md) + - **Date:** 2025-08-31 - **Component:** Test Suite - **Overall Health:** 3.2/5 (Fair) @@ -43,18 +45,21 @@ This directory contains documented issues identified during code reviews and dev ### 🔴 Critical Issues (Production Blockers) #### 1. [Deployment Blocking Vulnerability](./2025-08-30/deployment-blocking-vulnerability.md) + - **Component:** MigrationOrchestrator - **Impact:** Allows untested code to reach production on coverage system failures - **Fix Time:** 2-4 hours - **Status:** ✅ FIXED (lines 357-389 updated with strict error handling) #### 2. [AST Operation Validation Missing](./2025-08-30/ast-operation-validation-missing.md) -- **Component:** TestRequirementAnalyzer + +- **Component:** TestRequirementAnalyzer - **Impact:** Silent failures in test requirement generation - **Fix Time:** 4-6 hours -- **Status:** ✅ FIXED (lines 185-234 added _validateOperation method) +- **Status:** ✅ FIXED (lines 185-234 added \_validateOperation method) #### 3. [Memory Management Concerns](./2025-08-30/memory-management-concerns.md) + - **Component:** pgTAPTestScanner - **Impact:** OOM errors with large test suites - **Fix Time:** 8-12 hours @@ -65,24 +70,28 @@ This directory contains documented issues identified during code reviews and dev ### 🟠 High Priority Issues #### 4. [Coverage Calculation Accuracy](./2025-08-30/coverage-calculation-accuracy.md) + - **Component:** CoverageEnforcer - **Impact:** False positives/negatives in coverage detection - **Fix Time:** 3-4 hours -- **Status:** ✅ FIXED (lines 217-242 added _generateCoverageKey method with normalization) +- **Status:** ✅ FIXED (lines 217-242 added \_generateCoverageKey method with normalization) #### 5. [Pattern Library Error Recovery](./2025-08-30/pattern-library-error-recovery.md) + - **Component:** TestTemplateGenerator - **Impact:** Invalid test files generated on pattern failures - **Fix Time:** 4-6 hours - **Status:** ✅ FIXED (Comprehensive error recovery and validation system added) #### 6. [Configuration Schema Incomplete](./2025-08-30/configuration-schema-incomplete.md) + - **Component:** datarc.schema.json - **Impact:** Runtime configuration errors - **Fix Time:** 2-3 hours - **Status:** ⏳ Pending #### 7. [Integration Testing Missing](./2025-08-30/integration-testing-missing.md) + - **Component:** Test Suite - **Impact:** Integration bugs only found in production - **Fix Time:** 16-24 hours @@ -93,12 +102,14 @@ This directory contains documented issues identified during code reviews and dev ### 🟡 Medium Priority Issues #### 8. [Async/Await Consistency](./2025-08-30/async-await-consistency.md) + - **Component:** Multiple modules - **Impact:** Potential race conditions and unhandled rejections - **Fix Time:** 6-8 hours - **Status:** ⏳ Pending #### 9. [Performance Optimization Needed](./2025-08-30/performance-optimization-needed.md) + - **Component:** TestRequirementAnalyzer, pgTAPTestScanner - **Impact:** Slow deployments for large schemas - **Fix Time:** 12-16 hours @@ -109,12 +120,14 @@ This directory contains documented issues identified during code reviews and dev ### 🟢 Low Priority Enhancements #### 10. [Error Types Standardization](./2025-08-30/error-types-standardization.md) + - **Component:** All testing modules - **Impact:** Harder to handle specific errors programmatically - **Fix Time:** 4-6 hours - **Status:** ⏳ Pending #### 11. [Logging Levels Implementation](./2025-08-30/logging-levels-implementation.md) + - **Component:** All modules - **Impact:** Cannot control log verbosity - **Fix Time:** 6-8 hours @@ -147,27 +160,27 @@ graph LR ### Current Sprint (Week of 2025-08-31) -| Category | Status | Notes | -|----------|--------|-------| -| **Deno Migration** | 🔴 **NOT STARTED** | Still blocking Edge Functions testing | -| **Test Quality Audit** | ✅ **COMPLETED** | Overall score: 3.2/5 - Major refactoring needed | -| **Example Project** | ✅ **COMPLETED** | Full donation platform with Edge Functions | -| **Documentation** | ✅ **UPDATED** | Added Edge Functions guide, updated example README | +| Category | Status | Notes | +| ---------------------- | ------------------ | -------------------------------------------------- | +| **Deno Migration** | 🔴 **NOT STARTED** | Still blocking Edge Functions testing | +| **Test Quality Audit** | ✅ **COMPLETED** | Overall score: 3.2/5 - Major refactoring needed | +| **Example Project** | ✅ **COMPLETED** | Full donation platform with Edge Functions | +| **Documentation** | ✅ **UPDATED** | Added Edge Functions guide, updated example README | ### Previous Sprint Results (Week of 2025-08-30) -| Severity | Count | Status | -|----------|-------|--------| -| Critical | 3 | ✅ 100% Complete (3/3 fixed) | -| High | 4 | ✅ 50% Complete (2/4 fixed) | -| Medium | 2 | ⏳ Pending | -| Low | 2 | ⏳ Pending | +| Severity | Count | Status | +| -------- | ----- | ---------------------------- | +| Critical | 3 | ✅ 100% Complete (3/3 fixed) | +| High | 4 | ✅ 50% Complete (2/4 fixed) | +| Medium | 2 | ⏳ Pending | +| Low | 2 | ⏳ Pending | ### Test Quality Breakdown ``` Excellent (4-5/5) ████░░░░░░ 23% (4 files) -Good (3-4/5) ████░░░░░░ 23% (4 files) +Good (3-4/5) ████░░░░░░ 23% (4 files) Fair (2-3/5) ████░░░░░░ 23% (4 files) Poor (1-2/5) █████░░░░░ 31% (5 files) ``` @@ -175,11 +188,13 @@ Poor (1-2/5) █████░░░░░ 31% (5 files) ## Resolution Timeline ### Completed (2025-08-31) + - [x] **Test Quality Audit** ✅ - Comprehensive analysis documented - [x] **Example Project** ✅ - Complete donation platform implementation with Edge Functions - [x] **Documentation** ✅ - Added Edge Functions guide and updated example README -### Week 1 Priorities +### Week 1 Priorities + - [ ] **DENO MIGRATION** (8-12 hours) - BLOCKS ALL EDGE FUNCTION WORK - [ ] Replace spy-based tests with behavior tests - [ ] Remove mock-heavy migration tests @@ -187,12 +202,14 @@ Poor (1-2/5) █████░░░░░ 31% (5 files) - [ ] Test build scripts ### Week 2 (High Priority Issues) + - [x] Fix coverage calculation accuracy ✅ - [x] Add pattern library error recovery ✅ - [ ] Complete configuration schema - [ ] Begin integration tests ### Week 3 (Stabilization) + - [ ] Complete integration testing - [ ] Fix async/await patterns - [ ] Performance optimization @@ -207,37 +224,47 @@ When creating new issues, use this structure: # GitHub Issue Format ## Issue Title + [Clear, actionable title] ### Core Information -| Field | Why It Matters | -|-------|---------------| -| **Severity Level** | Critical/High/Medium/Low | -| **Location** | File path + line numbers | -| **Category** | Bug/Performance/Security/Architecture | -| **Brief Description** | What's wrong | -| **Impact** | Who/what it affects | + +| Field | Why It Matters | +| --------------------- | ------------------------------------- | +| **Severity Level** | Critical/High/Medium/Low | +| **Location** | File path + line numbers | +| **Category** | Bug/Performance/Security/Architecture | +| **Brief Description** | What's wrong | +| **Impact** | Who/what it affects | ## Summary + [High-level overview] ## Details + ### Root Cause + [Known or unknown] ### Example + [Code or diagram showing the problem] ## Proposed Solution + [Brief description of fix] ### Known Unknowns + - [List of known questions] ### Unknown Unknowns + - [Open questions to explore] -___ +--- + _"[Relevant Data quote]" - Data, [Episode]_ ``` @@ -260,4 +287,4 @@ To add a new issue: --- -*"In the complexity of any system, one must be prepared to encounter unexpected difficulties. It is through careful documentation and systematic resolution that we achieve operational excellence."* - Lt. Commander Data \ No newline at end of file +_"In the complexity of any system, one must be prepared to encounter unexpected difficulties. It is through careful documentation and systematic resolution that we achieve operational excellence."_ - Lt. Commander Data diff --git a/package-lock.json b/package-lock.json index 4ba4ce2..9748f32 100644 --- a/package-lock.json +++ b/package-lock.json @@ -23,6 +23,7 @@ "ink-spinner": "^5.0.0", "ink-text-input": "^6.0.0", "inquirer": "^10.0.0", + "minimatch": "^10.0.3", "oh-my-logo": "^0.3.0", "pg": "^8.12.0", "pino": "^9.0.0", @@ -1195,6 +1196,27 @@ "node": ">=18" } }, + "node_modules/@isaacs/balanced-match": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@isaacs/balanced-match/-/balanced-match-4.0.1.tgz", + "integrity": "sha512-yzMTt9lEb8Gv7zRioUilSglI0c0smZ9k5D65677DLWLtWJaXIS3CqcGyUFByYKlnUj6TkjLVs54fBl6+TiGQDQ==", + "license": "MIT", + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/@isaacs/brace-expansion": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/@isaacs/brace-expansion/-/brace-expansion-5.0.0.tgz", + "integrity": "sha512-ZT55BDLV0yv0RBm2czMiZ+SqCGO7AvmOM3G/w2xhVPH+te0aKgFjmBvGlL1dH+ql2tgGO3MVrbb3jCKyvpgnxA==", + "license": "MIT", + "dependencies": { + "@isaacs/balanced-match": "^4.0.1" + }, + "engines": { + "node": "20 || >=22" + } + }, "node_modules/@isaacs/cliui": { "version": "8.0.2", "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", @@ -1985,6 +2007,22 @@ "typescript": ">=4.8.4 <6.0.0" } }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/@typescript-eslint/utils": { "version": "8.41.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.41.0.tgz", @@ -3674,6 +3712,22 @@ "node": ">=10.13.0" } }, + "node_modules/glob/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/globals": { "version": "14.0.0", "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz", @@ -4604,16 +4658,15 @@ } }, "node_modules/minimatch": { - "version": "9.0.5", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", - "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", - "dev": true, + "version": "10.0.3", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.0.3.tgz", + "integrity": "sha512-IPZ167aShDZZUMdRk66cyQAW3qr0WzbHkPdMYa8bzZhlHhO3jALbKdxcaak7W9FfT2rZNpQuUu4Od7ILEpXSaw==", "license": "ISC", "dependencies": { - "brace-expansion": "^2.0.1" + "@isaacs/brace-expansion": "^5.0.0" }, "engines": { - "node": ">=16 || 14 >=14.17" + "node": "20 || >=22" }, "funding": { "url": "https://github.com/sponsors/isaacs" @@ -5855,6 +5908,22 @@ "node": ">=18" } }, + "node_modules/test-exclude/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/thread-stream": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/thread-stream/-/thread-stream-3.1.0.tgz", diff --git a/package.json b/package.json index eef3fab..0a6a4d2 100644 --- a/package.json +++ b/package.json @@ -50,6 +50,7 @@ "ink-spinner": "^5.0.0", "ink-text-input": "^6.0.0", "inquirer": "^10.0.0", + "minimatch": "^10.0.3", "oh-my-logo": "^0.3.0", "pg": "^8.12.0", "pino": "^9.0.0", diff --git a/src/commands/InitCommand.js b/src/commands/InitCommand.js index bcaf4b3..a0df1e4 100644 --- a/src/commands/InitCommand.js +++ b/src/commands/InitCommand.js @@ -1,6 +1,6 @@ -const fs = require('fs/promises'); -const path = require('path'); -const Command = require('../lib/Command.js'); +const fs = require("fs/promises"); +const path = require("path"); +const Command = require("../lib/Command.js"); class InitCommand extends Command { constructor(options = {}) { @@ -10,68 +10,69 @@ class InitCommand extends Command { } async performExecute() { - this.emit('progress', { - message: 'Initializing D.A.T.A. project structure. Resistance is futile.' + this.emit("progress", { + message: "Initializing D.A.T.A. project structure. Resistance is futile.", }); try { // Create directory structure const dirs = [ - 'sql/001_extensions', - 'sql/002_schemas', - 'sql/003_tables', - 'sql/004_functions', - 'sql/005_policies', - 'sql/006_triggers', - 'sql/007_data', - 'migrations', - 'tests', - 'functions' + "sql/001_extensions", + "sql/002_schemas", + "sql/003_tables", + "sql/004_functions", + "sql/005_policies", + "sql/006_triggers", + "sql/007_data", + "migrations", + "tests", + "functions", ]; for (const dir of dirs) { const dirPath = path.join(this.projectPath, dir); await fs.mkdir(dirPath, { recursive: true }); - this.emit('progress', { - message: `Created directory: ${dir}` + this.emit("progress", { + message: `Created directory: ${dir}`, }); } // Create .datarc.json config file const config = { - "$schema": "https://raw.githubusercontent.com/supabase/cli/main/schemas/config.json", - "test": { - "minimum_coverage": 80, - "test_timeout": 300, - "output_formats": ["console", "json"] + $schema: + "https://raw.githubusercontent.com/supabase/cli/main/schemas/config.json", + test: { + minimum_coverage: 80, + test_timeout: 300, + output_formats: ["console", "json"], + }, + environments: { + local: { + db: "postgresql://postgres:postgres@localhost:54322/postgres", + }, }, - "environments": { - "local": { - "db": "postgresql://postgres:postgres@localhost:54322/postgres" - } - } }; await fs.writeFile( - path.join(this.projectPath, '.datarc.json'), - JSON.stringify(config, null, 2) + path.join(this.projectPath, ".datarc.json"), + JSON.stringify(config, null, 2), ); // Create example SQL files await this.createExampleFiles(); - this.emit('success', { - message: 'Project initialization complete. Make it so!' + this.emit("success", { + message: "Project initialization complete. Make it so!", }); return { success: true, - projectPath: this.projectPath + projectPath: this.projectPath, }; } catch (error) { - this.emit('error', { + this.emit("error", { message: `Initialization failed: ${error.message}`, - error + error, }); throw error; } @@ -80,23 +81,23 @@ class InitCommand extends Command { async createExampleFiles() { // Create example extension file await fs.writeFile( - path.join(this.projectPath, 'sql/001_extensions/uuid.sql'), + path.join(this.projectPath, "sql/001_extensions/uuid.sql"), `-- Enable UUID generation CREATE EXTENSION IF NOT EXISTS "uuid-ossp"; -` +`, ); // Create example schema file await fs.writeFile( - path.join(this.projectPath, 'sql/002_schemas/public.sql'), + path.join(this.projectPath, "sql/002_schemas/public.sql"), `-- Public schema setup GRANT USAGE ON SCHEMA public TO anon, authenticated; -` +`, ); // Create example table file await fs.writeFile( - path.join(this.projectPath, 'sql/003_tables/maintenance.sql'), + path.join(this.projectPath, "sql/003_tables/maintenance.sql"), `-- Maintenance mode table CREATE TABLE IF NOT EXISTS public.maintenance_mode ( id uuid DEFAULT uuid_generate_v4() PRIMARY KEY, @@ -110,12 +111,12 @@ CREATE TABLE IF NOT EXISTS public.maintenance_mode ( INSERT INTO public.maintenance_mode (enabled) VALUES (true) ON CONFLICT (id) DO NOTHING; -` +`, ); // Create example RLS policy await fs.writeFile( - path.join(this.projectPath, 'sql/005_policies/maintenance_policies.sql'), + path.join(this.projectPath, "sql/005_policies/maintenance_policies.sql"), `-- Enable RLS ALTER TABLE public.maintenance_mode ENABLE ROW LEVEL SECURITY; @@ -124,13 +125,13 @@ CREATE POLICY "Allow public read" ON public.maintenance_mode FOR SELECT TO public USING (true); -` +`, ); - this.emit('progress', { - message: 'Example SQL files created successfully' + this.emit("progress", { + message: "Example SQL files created successfully", }); } } -module.exports = InitCommand; \ No newline at end of file +module.exports = InitCommand; diff --git a/src/commands/db/CompileCommand.js b/src/commands/db/CompileCommand.js index cb66124..f19edf2 100644 --- a/src/commands/db/CompileCommand.js +++ b/src/commands/db/CompileCommand.js @@ -2,22 +2,17 @@ * Database Migration Compile Command */ -const path = require('path'); -const BuildCommand = require('../../lib/BuildCommand'); +const path = require("path"); +const BuildCommand = require("../../lib/BuildCommand"); /** * Compile SQL sources into migration file * Enhanced with optional functions deployment integration */ class CompileCommand extends BuildCommand { - constructor( - inputDir, - outputDir, - logger = null, - isProd = false - ) { + constructor(inputDir, outputDir, logger = null, isProd = false) { super(inputDir, outputDir, logger, isProd); - + // Paths will be validated when performExecute is called // Don't throw in constructor as it prevents proper error handling } @@ -30,44 +25,46 @@ class CompileCommand extends BuildCommand { * @param {boolean} options.skipFunctionValidation - Skip function validation */ async performExecute(options = {}) { - this.emit('start', { isProd: this.isProd }); - + this.emit("start", { isProd: this.isProd }); + try { // Validate paths are provided if (!this.inputDir || !this.outputDir) { - throw new Error('CompileCommand requires input and output directories. Use --sql-dir and --migrations-dir options.'); + throw new Error( + "CompileCommand requires input and output directories. Use --sql-dir and --migrations-dir options.", + ); } - + // Load the native migration compiler - const MigrationCompiler = require('../../lib/migration/MigrationCompiler'); - + const MigrationCompiler = require("../../lib/migration/MigrationCompiler"); + // Create compiler instance const compiler = new MigrationCompiler({ sqlDir: this.inputDir, outputDir: this.outputDir, verbose: true, - timestamp: new Date() + timestamp: new Date(), }); - + // Attach event listeners this.attachCompilerEvents(compiler); - + // Run compilation - this.progress('Starting migration compilation...'); + this.progress("Starting migration compilation..."); const result = await compiler.compile(); - + this.success(`Migration compiled successfully: ${result.outputFile}`); - this.emit('complete', { result }); + this.emit("complete", { result }); // Deploy functions if requested if (options.deployFunctions) { await this.deployFunctions(options); } - + return result; } catch (error) { - this.error('Migration compilation failed', error); - this.emit('failed', { error }); + this.error("Migration compilation failed", error); + this.emit("failed", { error }); throw error; } } @@ -76,49 +73,50 @@ class CompileCommand extends BuildCommand { * Deploy functions as part of migration workflow */ async deployFunctions(options) { - this.progress('🚀 Starting Edge Functions deployment as part of migration'); + this.progress("🚀 Starting Edge Functions deployment as part of migration"); try { // Import the DeployCommand - const { DeployCommand } = require('../functions'); - + const { DeployCommand } = require("../functions"); + // Create a functions deployment command // Note: This will need to be refactored when functions are separated const deployCommand = new DeployCommand( - path.join(this.inputDir, '../functions'), + path.join(this.inputDir, "../functions"), this.logger, - this.isProd + this.isProd, ); - + // Forward events from the deploy command - deployCommand.on('progress', (event) => { + deployCommand.on("progress", (event) => { this.progress(`[Functions] ${event.message}`, event.data); }); - - deployCommand.on('function-deployed', (event) => { - this.emit('function-deployed', event); + + deployCommand.on("function-deployed", (event) => { + this.emit("function-deployed", event); }); - - deployCommand.on('deployment-complete', (event) => { - this.emit('functions-deployment-complete', event); + + deployCommand.on("deployment-complete", (event) => { + this.emit("functions-deployment-complete", event); }); // Execute functions deployment const deployOptions = { skipImportMap: options.skipImportMap || false, - debug: options.debug || false + debug: options.debug || false, }; await deployCommand.execute(options.functionsToDeploy, deployOptions); - - this.success('✅ Functions deployment completed as part of migration'); + this.success("✅ Functions deployment completed as part of migration"); } catch (error) { - this.error('Functions deployment failed during migration', error); - + this.error("Functions deployment failed during migration", error); + // Don't fail the entire migration for function deployment issues - this.warn('Migration compilation succeeded but function deployment failed'); - this.emit('functions-deployment-failed', { error }); + this.warn( + "Migration compilation succeeded but function deployment failed", + ); + this.emit("functions-deployment-failed", { error }); } } @@ -126,36 +124,36 @@ class CompileCommand extends BuildCommand { * Attach event listeners to the compiler */ attachCompilerEvents(compiler) { - compiler.on('start', ({ timestamp }) => { - this.logger.debug({ timestamp }, 'Compilation started'); + compiler.on("start", ({ timestamp }) => { + this.logger.debug({ timestamp }, "Compilation started"); }); - - compiler.on('directory:start', ({ directory }) => { + + compiler.on("directory:start", ({ directory }) => { this.progress(`Processing directory: ${directory}`); }); - - compiler.on('file:process', ({ file }) => { - this.logger.debug({ file }, 'Processing file'); - this.emit('file:process', { file }); + + compiler.on("file:process", ({ file }) => { + this.logger.debug({ file }, "Processing file"); + this.emit("file:process", { file }); }); - - compiler.on('file:complete', ({ file, lineCount }) => { - this.emit('file:complete', { file, lineCount }); + + compiler.on("file:complete", ({ file, lineCount }) => { + this.emit("file:complete", { file, lineCount }); }); - - compiler.on('complete', ({ result }) => { - this.logger.info({ stats: result.stats }, 'Compilation complete'); - this.emit('stats', { stats: result.stats }); + + compiler.on("complete", ({ result }) => { + this.logger.info({ stats: result.stats }, "Compilation complete"); + this.emit("stats", { stats: result.stats }); }); - - compiler.on('error', ({ error }) => { - this.error('Compiler error', error); + + compiler.on("error", ({ error }) => { + this.error("Compiler error", error); }); - - compiler.on('warning', ({ message }) => { + + compiler.on("warning", ({ message }) => { this.warn(message); }); } } -module.exports = CompileCommand; \ No newline at end of file +module.exports = CompileCommand; diff --git a/src/commands/db/MigrateCommand.js b/src/commands/db/MigrateCommand.js index 7f1f70b..cf45899 100644 --- a/src/commands/db/MigrateCommand.js +++ b/src/commands/db/MigrateCommand.js @@ -2,16 +2,16 @@ * Database Migration Management Command */ -const Command = require('../../lib/Command'); -const CommandRouter = require('../../lib/CommandRouter'); -const { z } = require('zod'); +const Command = require("../../lib/Command"); +const CommandRouter = require("../../lib/CommandRouter"); +const { z } = require("zod"); /** * Migration command that uses router pattern for subcommands */ class MigrateCommand extends Command { - static description = 'Database migration management commands'; - + static description = "Database migration management commands"; + constructor(config = null, logger = null, isProd = false) { super(config, logger, isProd); this.requiresProductionConfirmation = false; // Subcommands handle their own confirmation @@ -23,186 +23,233 @@ class MigrateCommand extends Command { */ setupRouter() { const router = new CommandRouter(); - + // Forward router events to this command - router.on('start', (data) => this.emit('start', data)); - router.on('progress', (data) => this.emit('progress', data)); - router.on('warning', (data) => this.emit('warning', data)); - router.on('error', (data) => this.emit('error', data)); - router.on('success', (data) => this.emit('success', data)); - router.on('complete', (data) => this.emit('complete', data)); - router.on('failed', (data) => this.emit('failed', data)); - router.on('cancelled', (data) => this.emit('cancelled', data)); - router.on('prompt', (data) => this.emit('prompt', data)); - + router.on("start", (data) => this.emit("start", data)); + router.on("progress", (data) => this.emit("progress", data)); + router.on("warning", (data) => this.emit("warning", data)); + router.on("error", (data) => this.emit("error", data)); + router.on("success", (data) => this.emit("success", data)); + router.on("complete", (data) => this.emit("complete", data)); + router.on("failed", (data) => this.emit("failed", data)); + router.on("cancelled", (data) => this.emit("cancelled", data)); + router.on("prompt", (data) => this.emit("prompt", data)); + // Pass config and logger to all handlers router.config = this.config; router.logger = this.logger; - + // Register generate command router - .command('migrate') - .subcommand('generate') - .description('Generate migration from schema diff') - .schema(z.object({ - name: z.string().optional().describe('Migration name'), - sqlDir: z.string().optional().describe('SQL source directory'), - migrationsDir: z.string().optional().describe('Migrations output directory'), - dryRun: CommandRouter.schemas.dryRun, - verbose: CommandRouter.schemas.verbose - })) + .command("migrate") + .subcommand("generate") + .description("Generate migration from schema diff") + .schema( + z.object({ + name: z.string().optional().describe("Migration name"), + sqlDir: z.string().optional().describe("SQL source directory"), + migrationsDir: z + .string() + .optional() + .describe("Migrations output directory"), + dryRun: CommandRouter.schemas.dryRun, + verbose: CommandRouter.schemas.verbose, + }), + ) .examples( - 'data db migrate generate', - 'data db migrate generate --name add-users-table', - 'data db migrate generate --dry-run' + "data db migrate generate", + "data db migrate generate --name add-users-table", + "data db migrate generate --dry-run", ) - .handler(require('./migrate/generate')); - - // Register test command + .handler(require("./migrate/generate")); + + // Register test command router - .command('migrate') - .subcommand('test') - .description('Test migration with pgTAP validation') - .schema(z.object({ - migration: z.string().optional().describe('Migration ID or "latest"'), - testsDir: z.string().optional().describe('Tests directory'), - verbose: CommandRouter.schemas.verbose, - coverage: z.boolean().default(false).describe('Generate coverage report') - })) + .command("migrate") + .subcommand("test") + .description("Test migration with pgTAP validation") + .schema( + z.object({ + migration: z.string().optional().describe('Migration ID or "latest"'), + testsDir: z.string().optional().describe("Tests directory"), + verbose: CommandRouter.schemas.verbose, + coverage: z + .boolean() + .default(false) + .describe("Generate coverage report"), + }), + ) .examples( - 'data db migrate test', - 'data db migrate test --migration latest', - 'data db migrate test --migration 20250829_001 --coverage' + "data db migrate test", + "data db migrate test --migration latest", + "data db migrate test --migration 20250829_001 --coverage", ) - .handler(require('./migrate/test-v2')); - + .handler(require("./migrate/test-v2")); + // Register promote command router - .command('migrate') - .subcommand('promote') - .description('Promote tested migration to production') - .schema(z.object({ - migration: z.string().optional().describe('Migration ID'), - prod: CommandRouter.schemas.prod, - force: CommandRouter.schemas.force, - skipValidation: z.boolean().default(false).describe('Skip validation checks') - })) + .command("migrate") + .subcommand("promote") + .description("Promote tested migration to production") + .schema( + z.object({ + migration: z.string().optional().describe("Migration ID"), + prod: CommandRouter.schemas.prod, + force: CommandRouter.schemas.force, + skipValidation: z + .boolean() + .default(false) + .describe("Skip validation checks"), + }), + ) .examples( - 'data db migrate promote --migration 20250829_001', - 'data db migrate promote --prod --force' + "data db migrate promote --migration 20250829_001", + "data db migrate promote --prod --force", ) - .handler(require('./migrate/promote')); - + .handler(require("./migrate/promote")); + // Register status command router - .command('migrate') - .subcommand('status') - .description('Show current migration status') - .schema(z.object({ - detailed: z.boolean().default(false).describe('Show detailed status'), - prod: CommandRouter.schemas.prod, - format: z.enum(['table', 'json', 'yaml']).default('table').describe('Output format') - })) + .command("migrate") + .subcommand("status") + .description("Show current migration status") + .schema( + z.object({ + detailed: z.boolean().default(false).describe("Show detailed status"), + prod: CommandRouter.schemas.prod, + format: z + .enum(["table", "json", "yaml"]) + .default("table") + .describe("Output format"), + }), + ) .examples( - 'data db migrate status', - 'data db migrate status --detailed', - 'data db migrate status --prod --format json' + "data db migrate status", + "data db migrate status --detailed", + "data db migrate status --prod --format json", ) - .handler(require('./migrate/status')); - + .handler(require("./migrate/status")); + // Register rollback command router - .command('migrate') - .subcommand('rollback') - .description('Rollback migration to previous state') - .schema(z.object({ - migration: z.string().optional().describe('Migration to rollback'), - to: z.string().optional().describe('Rollback to specific migration'), - prod: CommandRouter.schemas.prod, - force: CommandRouter.schemas.force, - dryRun: CommandRouter.schemas.dryRun - })) + .command("migrate") + .subcommand("rollback") + .description("Rollback migration to previous state") + .schema( + z.object({ + migration: z.string().optional().describe("Migration to rollback"), + to: z.string().optional().describe("Rollback to specific migration"), + prod: CommandRouter.schemas.prod, + force: CommandRouter.schemas.force, + dryRun: CommandRouter.schemas.dryRun, + }), + ) .examples( - 'data db migrate rollback', - 'data db migrate rollback --to 20250828_003', - 'data db migrate rollback --prod --force' + "data db migrate rollback", + "data db migrate rollback --to 20250828_003", + "data db migrate rollback --prod --force", ) - .handler(require('./migrate/rollback')); - + .handler(require("./migrate/rollback")); + // Register clean command router - .command('migrate') - .subcommand('clean') - .description('Clean up temporary migration files') - .schema(z.object({ - all: z.boolean().default(false).describe('Clean all temporary files'), - failed: z.boolean().default(false).describe('Clean only failed migrations'), - older: z.number().optional().describe('Clean migrations older than N days'), - dryRun: CommandRouter.schemas.dryRun - })) + .command("migrate") + .subcommand("clean") + .description("Clean up temporary migration files") + .schema( + z.object({ + all: z.boolean().default(false).describe("Clean all temporary files"), + failed: z + .boolean() + .default(false) + .describe("Clean only failed migrations"), + older: z + .number() + .optional() + .describe("Clean migrations older than N days"), + dryRun: CommandRouter.schemas.dryRun, + }), + ) .examples( - 'data db migrate clean', - 'data db migrate clean --all', - 'data db migrate clean --older 30 --dry-run' + "data db migrate clean", + "data db migrate clean --all", + "data db migrate clean --older 30 --dry-run", ) - .handler(require('./migrate/clean')); - + .handler(require("./migrate/clean")); + // Register history command router - .command('migrate') - .subcommand('history') - .description('Show migration history and timeline') - .schema(z.object({ - limit: z.number().int().min(1).default(10).describe('Number of entries to show'), - from: z.string().optional().describe('Start date (YYYY-MM-DD)'), - to: z.string().optional().describe('End date (YYYY-MM-DD)'), - prod: CommandRouter.schemas.prod, - format: z.enum(['table', 'json', 'timeline']).default('table').describe('Output format') - })) + .command("migrate") + .subcommand("history") + .description("Show migration history and timeline") + .schema( + z.object({ + limit: z + .number() + .int() + .min(1) + .default(10) + .describe("Number of entries to show"), + from: z.string().optional().describe("Start date (YYYY-MM-DD)"), + to: z.string().optional().describe("End date (YYYY-MM-DD)"), + prod: CommandRouter.schemas.prod, + format: z + .enum(["table", "json", "timeline"]) + .default("table") + .describe("Output format"), + }), + ) .examples( - 'data db migrate history', - 'data db migrate history --limit 20', - 'data db migrate history --from 2025-01-01 --format timeline' + "data db migrate history", + "data db migrate history --limit 20", + "data db migrate history --from 2025-01-01 --format timeline", ) - .handler(require('./migrate/history')); - + .handler(require("./migrate/history")); + // Register verify command router - .command('migrate') - .subcommand('verify') - .description('Verify migration integrity') - .schema(z.object({ - migration: z.string().optional().describe('Migration to verify'), - all: z.boolean().default(false).describe('Verify all migrations'), - checksums: z.boolean().default(true).describe('Verify checksums'), - prod: CommandRouter.schemas.prod - })) + .command("migrate") + .subcommand("verify") + .description("Verify migration integrity") + .schema( + z.object({ + migration: z.string().optional().describe("Migration to verify"), + all: z.boolean().default(false).describe("Verify all migrations"), + checksums: z.boolean().default(true).describe("Verify checksums"), + prod: CommandRouter.schemas.prod, + }), + ) .examples( - 'data db migrate verify', - 'data db migrate verify --migration 20250829_001', - 'data db migrate verify --all --prod' + "data db migrate verify", + "data db migrate verify --migration 20250829_001", + "data db migrate verify --all --prod", ) - .handler(require('./migrate/verify')); - + .handler(require("./migrate/verify")); + // Register squash command router - .command('migrate') - .subcommand('squash') - .description('Squash multiple migrations into one') - .schema(z.object({ - from: z.string().optional().describe('Starting migration'), - to: z.string().optional().describe('Ending migration'), - name: z.string().optional().describe('Name for squashed migration'), - keepOriginals: z.boolean().default(false).describe('Keep original migration files'), - dryRun: CommandRouter.schemas.dryRun - })) + .command("migrate") + .subcommand("squash") + .description("Squash multiple migrations into one") + .schema( + z.object({ + from: z.string().optional().describe("Starting migration"), + to: z.string().optional().describe("Ending migration"), + name: z.string().optional().describe("Name for squashed migration"), + keepOriginals: z + .boolean() + .default(false) + .describe("Keep original migration files"), + dryRun: CommandRouter.schemas.dryRun, + }), + ) .examples( - 'data db migrate squash --from 20250801_001 --to 20250810_005', - 'data db migrate squash --name initial-schema', - 'data db migrate squash --dry-run' + "data db migrate squash --from 20250801_001 --to 20250810_005", + "data db migrate squash --name initial-schema", + "data db migrate squash --dry-run", ) - .handler(require('./migrate/squash')); - + .handler(require("./migrate/squash")); + return router; } @@ -210,89 +257,90 @@ class MigrateCommand extends Command { * Execute migration command with router-based subcommand handling */ async performExecute(args = {}) { - this.emit('start', { isProd: this.isProd }); - + this.emit("start", { isProd: this.isProd }); + try { // Get subcommand from arguments const subcommand = args._?.[0] || args.subcommand; - + if (!subcommand) { this.showHelp(); - this.emit('complete', { action: 'help' }); + this.emit("complete", { action: "help" }); return; } - + // Build the command path for the router const commandPath = `migrate/${subcommand}`; - + // Let the router handle it this.progress(`Executing migration command: ${subcommand}`); const result = await this.router.execute(commandPath, args); - + // Don't emit complete if help was shown if (!result?.help) { - this.emit('complete', { subcommand }); + this.emit("complete", { subcommand }); } - + return result; - } catch (error) { // Check if it's an unknown command - if (error.message.includes('No handler registered')) { + if (error.message.includes("No handler registered")) { const subcommand = args._?.[0] || args.subcommand; this.error(`Unknown migration command: ${subcommand}`); this.showAvailableCommands(); - this.emit('failed', { error: `Invalid subcommand: ${subcommand}` }); + this.emit("failed", { error: `Invalid subcommand: ${subcommand}` }); } else { - this.error('Migration command failed', error); - this.emit('failed', { error }); + this.error("Migration command failed", error); + this.emit("failed", { error }); } throw error; } } - + /** * Display help text for migration commands */ showHelp() { - console.log('Usage: data db migrate [options]'); - console.log(''); - console.log('Database migration management commands'); - console.log(''); - console.log('Commands:'); - + console.log("Usage: data db migrate [options]"); + console.log(""); + console.log("Database migration management commands"); + console.log(""); + console.log("Commands:"); + // Get all registered routes from the router const routes = this.router.getRoutes(); for (const route of routes) { - const [, subcommand] = route.path.split('/'); - const description = route.description || ''; + const [, subcommand] = route.path.split("/"); + const description = route.description || ""; console.log(` ${subcommand.padEnd(10)} - ${description}`); } - - console.log(''); - console.log('Run "data db migrate --help" for command-specific help'); - console.log(''); - console.log('Examples:'); - console.log(' data db migrate generate'); - console.log(' data db migrate test --migration latest'); - console.log(' data db migrate promote --migration 20250829_001'); - console.log(' data db migrate status'); + + console.log(""); + console.log( + 'Run "data db migrate --help" for command-specific help', + ); + console.log(""); + console.log("Examples:"); + console.log(" data db migrate generate"); + console.log(" data db migrate test --migration latest"); + console.log(" data db migrate promote --migration 20250829_001"); + console.log(" data db migrate status"); } - + /** * Show available commands when invalid command provided */ showAvailableCommands() { - console.log('Available migration commands:'); - + console.log("Available migration commands:"); + const routes = this.router.getRoutes(); for (const route of routes) { - const [, subcommand] = route.path.split('/'); + const [, subcommand] = route.path.split("/"); console.log(` ${subcommand}`); } - + console.log('\nUse "data db migrate --help" for more information.'); } } -module.exports = MigrateCommand; \ No newline at end of file +module.exports = MigrateCommand; diff --git a/src/commands/db/QueryCommand.js b/src/commands/db/QueryCommand.js index 547c137..44f7f0d 100644 --- a/src/commands/db/QueryCommand.js +++ b/src/commands/db/QueryCommand.js @@ -2,15 +2,21 @@ * Database Query Command */ -const fs = require('fs').promises; -const { Client } = require('pg'); -const DatabaseCommand = require('../../lib/DatabaseCommand'); +const fs = require("fs").promises; +const { Client } = require("pg"); +const DatabaseCommand = require("../../lib/DatabaseCommand"); /** * Execute SQL queries against the database */ class QueryCommand extends DatabaseCommand { - constructor(databaseUrl, serviceRoleKey = null, anonKey = null, logger = null, isProd = false) { + constructor( + databaseUrl, + serviceRoleKey = null, + anonKey = null, + logger = null, + isProd = false, + ) { // Query command modifies data, so requires confirmation super(databaseUrl, serviceRoleKey, anonKey, logger, isProd, true); this.sql = null; @@ -23,19 +29,20 @@ class QueryCommand extends DatabaseCommand { async confirmProduction() { // Get SQL content first const sqlContent = await this.getSqlContent(this.sql, this.isFile); - + // If not destructive, skip confirmation if (!this.isDestructive(sqlContent)) { return true; } - + // Show warning for destructive query - this.warn('Potentially destructive query detected in production!', { - query: sqlContent.substring(0, 200) + (sqlContent.length > 200 ? '...' : '') + this.warn("Potentially destructive query detected in production!", { + query: + sqlContent.substring(0, 200) + (sqlContent.length > 200 ? "..." : ""), }); - + return await this.confirm( - 'Are you sure you want to execute this query in PRODUCTION?' + "Are you sure you want to execute this query in PRODUCTION?", ); } @@ -45,21 +52,23 @@ class QueryCommand extends DatabaseCommand { async performExecute(sql, isFile = false) { this.sql = sql; this.isFile = isFile; - this.emit('start', { isProd: this.isProd, isFile }); - + this.emit("start", { isProd: this.isProd, isFile }); + try { // Get SQL content const sqlContent = await this.getSqlContent(sql, isFile); - + // Execute query const result = await this.executeQuery(sqlContent); - this.emit('result', { result }); - this.success(`Query executed successfully (${result.rowCount} rows affected)`); - this.emit('complete', { result }); + this.emit("result", { result }); + this.success( + `Query executed successfully (${result.rowCount} rows affected)`, + ); + this.emit("complete", { result }); return result; } catch (error) { - this.error('Query execution failed', error); - this.emit('failed', { error }); + this.error("Query execution failed", error); + this.emit("failed", { error }); throw error; } } @@ -70,7 +79,7 @@ class QueryCommand extends DatabaseCommand { async getSqlContent(sql, isFile) { if (isFile) { this.progress(`Reading SQL from file: ${sql}`); - return await fs.readFile(sql, 'utf8'); + return await fs.readFile(sql, "utf8"); } return sql; } @@ -84,10 +93,10 @@ class QueryCommand extends DatabaseCommand { /\bDELETE\s+FROM/i, /\bTRUNCATE\s+/i, /\bALTER\s+TABLE\s+.*\s+DROP/i, - /\bUPDATE\s+.*\s+SET/i + /\bUPDATE\s+.*\s+SET/i, ]; - - return destructivePatterns.some(pattern => pattern.test(sql)); + + return destructivePatterns.some((pattern) => pattern.test(sql)); } /** @@ -95,29 +104,34 @@ class QueryCommand extends DatabaseCommand { */ async executeQuery(sql) { const env = this.config.getEnvironment(this.isProd); - + if (!env.db) { - throw new Error(`Database connection string not configured for ${this.isProd ? 'production' : 'local'} environment`); + throw new Error( + `Database connection string not configured for ${this.isProd ? "production" : "local"} environment`, + ); } - + const client = new Client({ - connectionString: env.db + connectionString: env.db, }); - + try { - this.progress('Connecting to database...'); + this.progress("Connecting to database..."); await client.connect(); - - this.progress('Executing query...'); + + this.progress("Executing query..."); const result = await client.query(sql); - + // Log result details - this.logger.debug({ - rowCount: result.rowCount, - fields: result.fields?.map(f => f.name), - command: result.command - }, 'Query executed'); - + this.logger.debug( + { + rowCount: result.rowCount, + fields: result.fields?.map((f) => f.name), + command: result.command, + }, + "Query executed", + ); + return result; } finally { await client.end(); @@ -125,4 +139,4 @@ class QueryCommand extends DatabaseCommand { } } -module.exports = QueryCommand; \ No newline at end of file +module.exports = QueryCommand; diff --git a/src/commands/db/ResetCommand.js b/src/commands/db/ResetCommand.js index f7fff77..a69ef26 100644 --- a/src/commands/db/ResetCommand.js +++ b/src/commands/db/ResetCommand.js @@ -2,9 +2,9 @@ * Database Reset Command */ -const { exec } = require('child_process'); -const { promisify } = require('util'); -const DatabaseCommand = require('../../lib/DatabaseCommand'); +const { exec } = require("child_process"); +const { promisify } = require("util"); +const DatabaseCommand = require("../../lib/DatabaseCommand"); const execAsync = promisify(exec); @@ -12,7 +12,13 @@ const execAsync = promisify(exec); * Reset database command */ class ResetCommand extends DatabaseCommand { - constructor(databaseUrl, serviceRoleKey = null, anonKey = null, logger = null, isProd = false) { + constructor( + databaseUrl, + serviceRoleKey = null, + anonKey = null, + logger = null, + isProd = false, + ) { // Reset command is destructive, always requires confirmation super(databaseUrl, serviceRoleKey, anonKey, logger, isProd, true); } @@ -21,76 +27,78 @@ class ResetCommand extends DatabaseCommand { * Override production confirmation for database reset (needs double confirmation) */ async confirmProduction() { - this.warn('Production database reset requested!', { + this.warn("Production database reset requested!", { actions: [ - 'Drop all existing data', - 'Recreate schema from migrations', - 'Run seed files (if any)' - ] + "Drop all existing data", + "Recreate schema from migrations", + "Run seed files (if any)", + ], }); - + // First confirmation const confirm = await this.confirm( - 'Are you absolutely sure you want to reset the PRODUCTION database?' + "Are you absolutely sure you want to reset the PRODUCTION database?", ); - + if (!confirm) { return false; } - + // Double confirmation for production const doubleConfirm = await this.input( 'Type "RESET PRODUCTION" to confirm:', { validate: (input) => { - return input === 'RESET PRODUCTION' ? true : 'Please type exactly: RESET PRODUCTION'; - } - } + return input === "RESET PRODUCTION" + ? true + : "Please type exactly: RESET PRODUCTION"; + }, + }, ); - - return doubleConfirm === 'RESET PRODUCTION'; + + return doubleConfirm === "RESET PRODUCTION"; } /** * Perform the actual database reset */ async performExecute() { - this.emit('start', { isProd: this.isProd }); - + this.emit("start", { isProd: this.isProd }); + try { - this.progress('Resetting database...'); - + this.progress("Resetting database..."); + // Change to supabase directory const supabaseDir = this.outputConfig.supabaseDir; - + // Run the reset command - const { stdout, stderr } = await execAsync('npm run reset', { + const { stdout, stderr } = await execAsync("npm run reset", { cwd: supabaseDir, - env: { - ...process.env, // Use process.env if config.envVars is not available + env: { + ...process.env, // Use process.env if config.envVars is not available ...(this.config?.envVars || {}), - NODE_ENV: this.isProd ? 'production' : 'development' - } + NODE_ENV: this.isProd ? "production" : "development", + }, }); - + // Process output - if (stderr && !stderr.includes('warning')) { - this.warn('Reset command produced stderr output', { stderr }); + if (stderr && !stderr.includes("warning")) { + this.warn("Reset command produced stderr output", { stderr }); } - + if (stdout) { - this.emit('output', { stdout }); - this.logger.debug({ stdout }, 'Reset command output'); + this.emit("output", { stdout }); + this.logger.debug({ stdout }, "Reset command output"); } - - this.success('Database reset complete'); - this.emit('complete', { isProd: this.isProd }); + + this.success("Database reset complete"); + this.emit("complete", { isProd: this.isProd }); } catch (error) { - this.error('Database reset failed', error); - this.emit('failed', { error }); + this.error("Database reset failed", error); + this.emit("failed", { error }); throw error; } } } -module.exports = ResetCommand; \ No newline at end of file +module.exports = ResetCommand; diff --git a/src/commands/db/index.js b/src/commands/db/index.js index 069edce..baa2467 100644 --- a/src/commands/db/index.js +++ b/src/commands/db/index.js @@ -2,14 +2,14 @@ * Database Commands for data CLI */ -const ResetCommand = require('./ResetCommand'); -const QueryCommand = require('./QueryCommand'); -const CompileCommand = require('./CompileCommand'); -const MigrateCommand = require('./MigrateCommand'); +const ResetCommand = require("./ResetCommand"); +const QueryCommand = require("./QueryCommand"); +const CompileCommand = require("./CompileCommand"); +const MigrateCommand = require("./MigrateCommand"); module.exports = { ResetCommand, QueryCommand, CompileCommand, - MigrateCommand -}; \ No newline at end of file + MigrateCommand, +}; diff --git a/src/commands/db/migrate/clean.js b/src/commands/db/migrate/clean.js index c0990d3..93c2eac 100644 --- a/src/commands/db/migrate/clean.js +++ b/src/commands/db/migrate/clean.js @@ -2,16 +2,16 @@ * Migration Clean Command */ -const Command = require('../../../lib/Command'); -const fs = require('fs').promises; -const path = require('path'); +const Command = require("../../../lib/Command"); +const fs = require("fs").promises; +const path = require("path"); /** * Clean up temporary migration files and staging directories */ class MigrateCleanCommand extends Command { - static description = 'Clean up temporary migration files'; - + static description = "Clean up temporary migration files"; + constructor(config = null, logger = null, isProd = false) { super(config, logger, isProd); this.requiresProductionConfirmation = false; // Safe cleanup operation @@ -21,266 +21,307 @@ class MigrateCleanCommand extends Command { * Execute migration cleanup */ async performExecute(args = {}) { - this.emit('start'); - + this.emit("start"); + try { const force = args.force || args.f || false; const verbose = args.verbose || args.v || false; - - this.progress('Starting migration cleanup...'); - + + this.progress("Starting migration cleanup..."); + let cleanedItems = 0; let totalSize = 0; - + // Clean staging directories const stagingResult = await this.cleanStagingDirectory(force, verbose); cleanedItems += stagingResult.items; totalSize += stagingResult.size; - + // Clean temporary databases const tempDbResult = await this.cleanTemporaryDatabases(force, verbose); cleanedItems += tempDbResult.items; totalSize += tempDbResult.size; - + // Clean backup files (older than 30 days) const backupResult = await this.cleanOldBackups(force, verbose); cleanedItems += backupResult.items; totalSize += backupResult.size; - + // Clean log files const logResult = await this.cleanLogFiles(force, verbose); cleanedItems += logResult.items; totalSize += logResult.size; - + // Display results this.displayCleanupResults(cleanedItems, totalSize); - - this.emit('complete', { - cleanedItems, - totalSize: this.formatBytes(totalSize) + + this.emit("complete", { + cleanedItems, + totalSize: this.formatBytes(totalSize), }); - } catch (error) { - this.error('Migration cleanup failed', error); - this.emit('failed', { error }); - throw error; + // --- NEW & IMPROVED ERROR HANDLING --- + this.error("Migration cleanup failed", error); + this.emit("failed", { error }); + + console.error("\n❌ Failed to clean migration files."); + console.error(`📌 Reason: ${error.message}`); + + // Provide actionable advice based on common error types for file operations + if (error.code === "EACCES") { + console.error( + "💡 Tip: Permission denied. You may need to run this command with sudo/administrator privileges to delete files.", + ); + } else if (error.code === "EBUSY") { + console.error( + "💡 Tip: A file is in use by another process. Close any programs that might be using the migration files.", + ); + } else if (error.code === "EPERM") { + console.error( + "💡 Tip: Operation not permitted. This could be a permissions issue or the file might be read-only.", + ); + } + + console.error(""); // Add a blank line for readability + process.exit(1); } } - + /** * Clean staging directory */ async cleanStagingDirectory(force, verbose) { let items = 0; let size = 0; - + try { - const stagingDir = path.resolve('supabase/.staging'); - const stagingExists = await fs.access(stagingDir).then(() => true).catch(() => false); - + const stagingDir = path.resolve("supabase/.staging"); + const stagingExists = await fs + .access(stagingDir) + .then(() => true) + .catch(() => false); + if (!stagingExists) { - if (verbose) this.progress('Staging directory not found, skipping...'); + if (verbose) this.progress("Staging directory not found, skipping..."); return { items, size }; } - + const files = await fs.readdir(stagingDir); - - await Promise.all(files.map(async file => { - const filePath = path.join(stagingDir, file); - const stats = await fs.stat(filePath); - - if (force || await this.shouldCleanFile(filePath, stats)) { - size += stats.size; - await fs.unlink(filePath); - items++; - - if (verbose) { - this.progress(`Cleaned: ${file} (${this.formatBytes(stats.size)})`); + + await Promise.all( + files.map(async (file) => { + const filePath = path.join(stagingDir, file); + const stats = await fs.stat(filePath); + + if (force || (await this.shouldCleanFile(filePath, stats))) { + size += stats.size; + await fs.unlink(filePath); + items++; + + if (verbose) { + this.progress( + `Cleaned: ${file} (${this.formatBytes(stats.size)})`, + ); + } } - } - })); - + }), + ); + // Remove directory if empty const remainingFiles = await fs.readdir(stagingDir); if (remainingFiles.length === 0) { await fs.rmdir(stagingDir); - if (verbose) this.progress('Removed empty staging directory'); + if (verbose) this.progress("Removed empty staging directory"); } - } catch (error) { - this.warn('Could not clean staging directory', { error: error.message }); + this.warn("Could not clean staging directory", { error: error.message }); } - + return { items, size }; } - + /** * Clean temporary databases */ async cleanTemporaryDatabases(force, verbose) { let items = 0; let size = 0; - + try { - const tempDbDir = path.resolve('supabase/.temp_dbs'); - const tempDbExists = await fs.access(tempDbDir).then(() => true).catch(() => false); - + const tempDbDir = path.resolve("supabase/.temp_dbs"); + const tempDbExists = await fs + .access(tempDbDir) + .then(() => true) + .catch(() => false); + if (!tempDbExists) { - if (verbose) this.progress('Temp databases directory not found, skipping...'); + if (verbose) + this.progress("Temp databases directory not found, skipping..."); return { items, size }; } - + const files = await fs.readdir(tempDbDir); - + for (const file of files) { - if (file.startsWith('test_') || file.startsWith('temp_')) { + if (file.startsWith("test_") || file.startsWith("temp_")) { const filePath = path.join(tempDbDir, file); const stats = await fs.stat(filePath); - + size += stats.size; await fs.unlink(filePath); items++; - + if (verbose) { - this.progress(`Cleaned temp DB: ${file} (${this.formatBytes(stats.size)})`); + this.progress( + `Cleaned temp DB: ${file} (${this.formatBytes(stats.size)})`, + ); } } } - } catch (error) { - this.warn('Could not clean temporary databases', { error: error.message }); + this.warn("Could not clean temporary databases", { + error: error.message, + }); } - + return { items, size }; } - + /** * Clean old backup files */ async cleanOldBackups(force, verbose) { let items = 0; let size = 0; - + try { - const backupDir = path.resolve('supabase/.rollbacks'); - const backupExists = await fs.access(backupDir).then(() => true).catch(() => false); - + const backupDir = path.resolve("supabase/.rollbacks"); + const backupExists = await fs + .access(backupDir) + .then(() => true) + .catch(() => false); + if (!backupExists) { - if (verbose) this.progress('Backup directory not found, skipping...'); + if (verbose) this.progress("Backup directory not found, skipping..."); return { items, size }; } - + const files = await fs.readdir(backupDir); - const thirtyDaysAgo = Date.now() - (30 * 24 * 60 * 60 * 1000); - + const thirtyDaysAgo = Date.now() - 30 * 24 * 60 * 60 * 1000; + for (const file of files) { const filePath = path.join(backupDir, file); const stats = await fs.stat(filePath); - + if (force || stats.mtime.getTime() < thirtyDaysAgo) { size += stats.size; await fs.unlink(filePath); items++; - + if (verbose) { - this.progress(`Cleaned old backup: ${file} (${this.formatBytes(stats.size)})`); + this.progress( + `Cleaned old backup: ${file} (${this.formatBytes(stats.size)})`, + ); } } } - } catch (error) { - this.warn('Could not clean backup files', { error: error.message }); + this.warn("Could not clean backup files", { error: error.message }); } - + return { items, size }; } - + /** * Clean log files */ async cleanLogFiles(force, verbose) { let items = 0; let size = 0; - + try { const logPatterns = [ - 'supabase/.logs/**/*.log', - 'supabase/logs/**/*.log', - '*.log' + "supabase/.logs/**/*.log", + "supabase/logs/**/*.log", + "*.log", ]; - + // This is a simplified implementation // In a real system, would use glob patterns to find log files const possibleLogFiles = [ - 'supabase/migration.log', - 'supabase/error.log', - 'data.log' + "supabase/migration.log", + "supabase/error.log", + "data.log", ]; - + for (const logFile of possibleLogFiles) { try { const filePath = path.resolve(logFile); const stats = await fs.stat(filePath); - - if (force || stats.size > 10 * 1024 * 1024) { // > 10MB + + if (force || stats.size > 10 * 1024 * 1024) { + // > 10MB size += stats.size; await fs.unlink(filePath); items++; - + if (verbose) { - this.progress(`Cleaned log: ${logFile} (${this.formatBytes(stats.size)})`); + this.progress( + `Cleaned log: ${logFile} (${this.formatBytes(stats.size)})`, + ); } } } catch (error) { // File doesn't exist, skip } } - } catch (error) { - this.warn('Could not clean log files', { error: error.message }); + this.warn("Could not clean log files", { error: error.message }); } - + return { items, size }; } - + /** * Check if file should be cleaned based on age and other criteria */ async shouldCleanFile(filePath, stats) { // Clean files older than 24 hours - const twentyFourHoursAgo = Date.now() - (24 * 60 * 60 * 1000); + const twentyFourHoursAgo = Date.now() - 24 * 60 * 60 * 1000; return stats.mtime.getTime() < twentyFourHoursAgo; } - + /** * Display cleanup results */ displayCleanupResults(cleanedItems, totalSize) { - console.log('\n🧹 Migration Cleanup Results'); - console.log('═══════════════════════════\n'); + console.log("\n🧹 Migration Cleanup Results"); + console.log("═══════════════════════════\n"); console.log(`Files cleaned: ${cleanedItems}`); console.log(`Space freed: ${this.formatBytes(totalSize)}`); - + if (cleanedItems === 0) { - console.log('\n✨ Nothing to clean - your migration workspace is already tidy!'); + console.log( + "\n✨ Nothing to clean - your migration workspace is already tidy!", + ); } else { - console.log('\n✅ Cleanup completed successfully'); + console.log("\n✅ Cleanup completed successfully"); } - console.log(''); + console.log(""); } - + /** * Format bytes to human readable string */ formatBytes(bytes) { - if (bytes === 0) return '0 B'; - + if (bytes === 0) return "0 B"; + const k = 1024; - const sizes = ['B', 'KB', 'MB', 'GB']; + const sizes = ["B", "KB", "MB", "GB"]; const i = Math.floor(Math.log(bytes) / Math.log(k)); - - return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i]; + + return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + " " + sizes[i]; } } -module.exports = MigrateCleanCommand; \ No newline at end of file +module.exports = MigrateCleanCommand; diff --git a/src/commands/db/migrate/generate.js b/src/commands/db/migrate/generate.js index 75030dc..e23c2c9 100644 --- a/src/commands/db/migrate/generate.js +++ b/src/commands/db/migrate/generate.js @@ -1,25 +1,25 @@ -const Command = require('../../../lib/Command'); -const MigrationMetadata = require('../../../lib/MigrationMetadata'); -const fs = require('fs').promises; -const path = require('path'); +const Command = require("../../../lib/Command"); +const MigrationMetadata = require("../../../lib/MigrationMetadata"); +const fs = require("fs").promises; +const path = require("path"); /** * MigrateGenerateCommand - Generate migration from schema diff - * + * * Creates a new migration by comparing current database state with desired state * from compiled source SQL files. Uses DiffEngine for schema analysis. - * + * * Options: * --name Migration name (required) - * --skip-compile Skip source compilation step + * --skip-compile Skip source compilation step * --dry-run Show diff without saving migration * --current-db Current database URL (defaults to local) * --desired-db Desired database URL (defaults to compiled SQL) */ class MigrateGenerateCommand extends Command { - static description = 'Generate migration from schema diff'; + static description = "Generate migration from schema diff"; static requiresConfirmation = false; // Generation is safe operation - + constructor(config = null, logger = null, isProd = false) { super(config, logger, isProd); this.requiresProductionConfirmation = false; // Safe in production @@ -32,16 +32,18 @@ class MigrateGenerateCommand extends Command { try { // Parse command line options const options = this.parseOptions(args); - - this.progress('Starting migration generation', { + + this.progress("Starting migration generation", { migrationName: options.name, dryRun: options.dryRun, - skipCompile: options.skipCompile + skipCompile: options.skipCompile, }); // Validate required options if (!options.name) { - throw new Error('Migration name is required. Use --name '); + throw new Error( + "Migration name is required. Use --name ", + ); } // Generate the migration @@ -53,23 +55,54 @@ class MigrateGenerateCommand extends Command { } else { // Save migration to staging directory const migrationPath = await this.saveToStaging(migration, options.name); - - this.success('Migration generated successfully', { + + this.success("Migration generated successfully", { migrationName: options.name, path: migrationPath, hasDifferences: migration.hasDifferences, - statementCount: migration.statements.length + statementCount: migration.statements.length, }); } return migration; - } catch (error) { - this.error('Failed to generate migration', error, { - operation: 'generate', - args: args + // --- NEW & IMPROVED ERROR HANDLING --- + this.error("Failed to generate migration", error, { + operation: "generate", + args: args, }); - throw error; + + console.error("\n❌ Failed to generate migration."); + console.error(`📌 Reason: ${error.message}`); + + // Provide actionable advice based on error type + if (error.message.includes("--name requires a value")) { + console.error( + '💡 Tip: Provide a migration name: --name "your_migration_name"', + ); + } else if (error.message.includes("Unknown option")) { + console.error( + "💡 Tip: Check your command syntax. Use --help for available options.", + ); + } else if (error.code === "ENOENT") { + console.error( + "💡 Tip: Required directory does not exist. Run `data init` first.", + ); + } else if (error.code === "EACCES") { + console.error( + "💡 Tip: Permission denied. Check write access to the migrations directory.", + ); + } else if ( + error.message.includes("connection") || + error.message.includes("database") + ) { + console.error( + "💡 Tip: Check your database connection. Is your local Supabase instance running?", + ); + } + + console.error(""); // Add a blank line for readability + process.exit(1); } } @@ -78,7 +111,7 @@ class MigrateGenerateCommand extends Command { */ async generateMigration(name, options) { try { - this.progress('Generating migration (MVP version)'); + this.progress("Generating migration (MVP version)"); // MVP implementation: Create a placeholder migration for testing // This will be enhanced later with actual DiffEngine integration @@ -94,9 +127,12 @@ CREATE TABLE IF NOT EXISTS example_table ( INSERT INTO example_table (name) VALUES ('test_data'); `; - this.progress('Generated placeholder migration SQL'); + this.progress("Generated placeholder migration SQL"); - const migrationSql = this.generateMigrationHeader(name, { stats: { filesProcessed: 1 } }) + '\n' + placeholder; + const migrationSql = + this.generateMigrationHeader(name, { stats: { filesProcessed: 1 } }) + + "\n" + + placeholder; return { name, @@ -106,14 +142,13 @@ INSERT INTO example_table (name) VALUES ('test_data'); metadata: { sourceFilesCompiled: 1, generationTimeMs: 0, - generatedAt: new Date() + generatedAt: new Date(), }, - generatedAt: new Date().toISOString() + generatedAt: new Date().toISOString(), }; - } catch (error) { - this.error('Failed to generate migration', error, { - migrationName: name + this.error("Failed to generate migration", error, { + migrationName: name, }); throw error; } @@ -126,47 +161,46 @@ INSERT INTO example_table (name) VALUES ('test_data'); try { // Get staging directory path const stagingDir = this.getStagingDirectory(); - const migrationDir = path.join(stagingDir, 'current'); + const migrationDir = path.join(stagingDir, "current"); - this.progress('Saving migration to staging', { + this.progress("Saving migration to staging", { migrationName: name, - stagingDir: migrationDir + stagingDir: migrationDir, }); // Ensure staging directory exists await fs.mkdir(migrationDir, { recursive: true }); // Write migration SQL - const migrationFile = path.join(migrationDir, 'migration.sql'); - await fs.writeFile(migrationFile, migration.migrationSql, 'utf8'); + const migrationFile = path.join(migrationDir, "migration.sql"); + await fs.writeFile(migrationFile, migration.migrationSql, "utf8"); // Create and write metadata const migrationId = this.generateMigrationId(name); const metadata = MigrationMetadata.createDefault(migrationId, name); - + // Add generation details to metadata metadata.generation = { generated_at: migration.generatedAt, has_differences: migration.hasDifferences, statement_count: migration.statements.length, source_files_compiled: migration.metadata.sourceFilesCompiled || 0, - generation_time_ms: migration.metadata.generationTimeMs || 0 + generation_time_ms: migration.metadata.generationTimeMs || 0, }; const metadataHandler = new MigrationMetadata(migrationDir); metadataHandler.write(metadata); - this.progress('Migration saved to staging', { + this.progress("Migration saved to staging", { migrationFile, metadataFile: metadataHandler.metadataFile, - migrationId + migrationId, }); return migrationDir; - } catch (error) { - this.error('Failed to save migration to staging', error, { - migrationName: name + this.error("Failed to save migration to staging", error, { + migrationName: name, }); throw error; } @@ -176,20 +210,20 @@ INSERT INTO example_table (name) VALUES ('test_data'); * Display dry run results */ async displayDryRun(migration) { - this.progress('='.repeat(60)); + this.progress("=".repeat(60)); this.progress(`DRY RUN: Migration "${migration.name}"`); - this.progress('='.repeat(60)); - + this.progress("=".repeat(60)); + if (migration.hasDifferences) { this.progress(`Found ${migration.statements.length} schema differences:`); - this.progress(''); + this.progress(""); this.progress(migration.migrationSql); } else { - this.progress('No schema differences detected.'); + this.progress("No schema differences detected."); } - - this.progress('='.repeat(60)); - this.progress('Dry run complete - no files were created'); + + this.progress("=".repeat(60)); + this.progress("Dry run complete - no files were created"); } /** @@ -198,7 +232,7 @@ INSERT INTO example_table (name) VALUES ('test_data'); generateMigrationHeader(name, compileResult) { const timestamp = new Date().toISOString(); const sourceFiles = compileResult?.stats?.filesProcessed || 0; - + return `-- ========================================================================= -- MIGRATION: ${name} - Generated by data CLI -- ========================================================================= @@ -219,10 +253,10 @@ INSERT INTO example_table (name) VALUES ('test_data'); parseSqlStatements(sql) { // Simple parsing - split on semicolons and filter return sql - .split(';') - .map(stmt => stmt.trim()) - .filter(stmt => stmt.length > 0 && !stmt.startsWith('--')) - .map(stmt => stmt + ';'); + .split(";") + .map((stmt) => stmt.trim()) + .filter((stmt) => stmt.length > 0 && !stmt.startsWith("--")) + .map((stmt) => stmt + ";"); } /** @@ -234,44 +268,44 @@ INSERT INTO example_table (name) VALUES ('test_data'); dryRun: false, skipCompile: false, currentDb: null, - desiredDb: null + desiredDb: null, }; for (let i = 0; i < args.length; i++) { const arg = args[i]; - + switch (arg) { - case '--name': + case "--name": if (i + 1 >= args.length) { - throw new Error('--name requires a value'); + throw new Error("--name requires a value"); } options.name = args[++i]; break; - - case '--dry-run': + + case "--dry-run": options.dryRun = true; break; - - case '--skip-compile': + + case "--skip-compile": options.skipCompile = true; break; - - case '--current-db': + + case "--current-db": if (i + 1 >= args.length) { - throw new Error('--current-db requires a value'); + throw new Error("--current-db requires a value"); } options.currentDb = args[++i]; break; - - case '--desired-db': + + case "--desired-db": if (i + 1 >= args.length) { - throw new Error('--desired-db requires a value'); + throw new Error("--desired-db requires a value"); } options.desiredDb = args[++i]; break; - + default: - if (arg.startsWith('--')) { + if (arg.startsWith("--")) { throw new Error(`Unknown option: ${arg}`); } // If no option flag, treat as migration name if not set @@ -289,7 +323,10 @@ INSERT INTO example_table (name) VALUES ('test_data'); * Get current database URL (local Supabase instance) */ getCurrentDbUrl() { - return process.env.DATABASE_URL || 'postgresql://postgres:postgres@127.0.0.1:54332/postgres'; + return ( + process.env.DATABASE_URL || + "postgresql://postgres:postgres@127.0.0.1:54332/postgres" + ); } /** @@ -305,21 +342,22 @@ INSERT INTO example_table (name) VALUES ('test_data'); getSqlSourceDir() { // Use config paths if available, otherwise default to 'sql' in current directory if (this.config && this.config.get) { - return path.resolve(this.config.get('paths.sql_dir') || './sql'); + return path.resolve(this.config.get("paths.sql_dir") || "./sql"); } - return path.resolve('./sql'); + return path.resolve("./sql"); } /** - * Get staging directory path + * Get staging directory path */ getStagingDirectory() { // Use config paths if available, otherwise default to 'migrations-staging' in current directory if (this.config && this.config.get) { - const migrationsDir = this.config.get('paths.migrations_dir') || './migrations'; - return path.resolve(migrationsDir + '-staging'); + const migrationsDir = + this.config.get("paths.migrations_dir") || "./migrations"; + return path.resolve(migrationsDir + "-staging"); } - return path.resolve('./migrations-staging'); + return path.resolve("./migrations-staging"); } /** @@ -333,7 +371,7 @@ INSERT INTO example_table (name) VALUES ('test_data'); database: url.pathname.slice(1), // Remove leading slash user: url.username, password: url.password, - url: dbUrl + url: dbUrl, }; } @@ -345,7 +383,7 @@ INSERT INTO example_table (name) VALUES ('test_data'); const url = new URL(dbUrl); return `${url.protocol}//${url.username}:***@${url.host}:${url.port}${url.pathname}`; } catch { - return 'invalid-url'; + return "invalid-url"; } } @@ -353,10 +391,13 @@ INSERT INTO example_table (name) VALUES ('test_data'); * Generate unique migration ID */ generateMigrationId(name) { - const timestamp = new Date().toISOString().replace(/[:.]/g, '').substring(0, 15); - const sanitizedName = name.toLowerCase().replace(/[^a-z0-9]/g, '_'); + const timestamp = new Date() + .toISOString() + .replace(/[:.]/g, "") + .substring(0, 15); + const sanitizedName = name.toLowerCase().replace(/[^a-z0-9]/g, "_"); return `${timestamp}_${sanitizedName}`; } } -module.exports = MigrateGenerateCommand; \ No newline at end of file +module.exports = MigrateGenerateCommand; diff --git a/src/commands/db/migrate/history.js b/src/commands/db/migrate/history.js index d1b37f5..854d83d 100644 --- a/src/commands/db/migrate/history.js +++ b/src/commands/db/migrate/history.js @@ -2,16 +2,16 @@ * Migration History Command */ -const Command = require('../../../lib/Command'); -const fs = require('fs').promises; -const path = require('path'); +const Command = require("../../../lib/Command"); +const fs = require("fs").promises; +const path = require("path"); /** * Show migration history and timeline */ class MigrateHistoryCommand extends Command { - static description = 'Show migration history'; - + static description = "Show migration history"; + constructor(config = null, logger = null, isProd = false) { super(config, logger, isProd); this.requiresProductionConfirmation = false; // Read-only operation @@ -21,171 +21,195 @@ class MigrateHistoryCommand extends Command { * Execute migration history display */ async performExecute(args = {}) { - this.emit('start'); - + this.emit("start"); + try { - const limit = parseInt(args.limit || args.l || '20'); - const format = args.format || args.f || 'table'; + const limit = parseInt(args.limit || args.l || "20"); + const format = args.format || args.f || "table"; const filter = args.filter || args.action || null; - - this.progress('Loading migration history...'); - + + this.progress("Loading migration history..."); + // Load history from file const history = await this.loadMigrationHistory(); - + if (!history || history.length === 0) { - this.warn('No migration history found'); - console.log('\n📋 No migration history available'); - console.log('Run some migration commands to start building history.\n'); - this.emit('complete', { count: 0 }); + this.warn("No migration history found"); + console.log("\n📋 No migration history available"); + console.log("Run some migration commands to start building history.\n"); + this.emit("complete", { count: 0 }); return; } - + // Filter history if requested - const filteredHistory = filter ? - history.filter(entry => entry.action === filter) : - history; - + const filteredHistory = filter + ? history.filter((entry) => entry.action === filter) + : history; + // Limit results const limitedHistory = filteredHistory.slice(-limit).reverse(); - + // Display history this.displayMigrationHistory(limitedHistory, format); - - this.emit('complete', { - total: history.length, + + this.emit("complete", { + total: history.length, displayed: limitedHistory.length, - filter + filter, }); - } catch (error) { - this.error('Migration history display failed', error); - this.emit('failed', { error }); + this.error("Migration history display failed", error); + this.emit("failed", { error }); throw error; } } - + /** * Load migration history from file */ async loadMigrationHistory() { try { - const historyFile = path.resolve('supabase/.migration_history.json'); - const historyExists = await fs.access(historyFile).then(() => true).catch(() => false); - + const historyFile = path.resolve("supabase/.migration_history.json"); + const historyExists = await fs + .access(historyFile) + .then(() => true) + .catch(() => false); + if (!historyExists) { return []; } - - const historyContent = await fs.readFile(historyFile, 'utf8'); + + const historyContent = await fs.readFile(historyFile, "utf8"); return JSON.parse(historyContent); - } catch (error) { - this.warn('Could not load migration history', { error: error.message }); + this.warn("Could not load migration history", { error: error.message }); return []; } } - + /** * Display migration history in requested format */ displayMigrationHistory(history, format) { - console.log('\n📋 Migration History'); - console.log('═══════════════════\n'); - - if (format === 'json') { + console.log("\n📋 Migration History"); + console.log("═══════════════════\n"); + + if (format === "json") { this.displayJsonFormat(history); - } else if (format === 'timeline') { + } else if (format === "timeline") { this.displayTimelineFormat(history); } else { this.displayTableFormat(history); } - - console.log(''); + + console.log(""); } - + /** * Display history in table format */ displayTableFormat(history) { if (history.length === 0) { - console.log('No entries to display'); + console.log("No entries to display"); return; } - + // Calculate column widths - const maxAction = Math.max(6, ...history.map(h => h.action.length)); - const maxMigration = Math.max(9, ...history.map(h => (h.migration || '').length)); - const maxStatus = Math.max(6, ...history.map(h => (h.status || '').length)); - + const maxAction = Math.max(6, ...history.map((h) => h.action.length)); + const maxMigration = Math.max( + 9, + ...history.map((h) => (h.migration || "").length), + ); + const maxStatus = Math.max( + 6, + ...history.map((h) => (h.status || "").length), + ); + // Header console.log( - 'Action'.padEnd(maxAction) + ' │ ' + - 'Migration'.padEnd(maxMigration) + ' │ ' + - 'Status'.padEnd(maxStatus) + ' │ ' + - 'Timestamp' + "Action".padEnd(maxAction) + + " │ " + + "Migration".padEnd(maxMigration) + + " │ " + + "Status".padEnd(maxStatus) + + " │ " + + "Timestamp", ); - - console.log('─'.repeat(maxAction) + '─┼─' + '─'.repeat(maxMigration) + '─┼─' + '─'.repeat(maxStatus) + '─┼─' + '─'.repeat(19)); - + + console.log( + "─".repeat(maxAction) + + "─┼─" + + "─".repeat(maxMigration) + + "─┼─" + + "─".repeat(maxStatus) + + "─┼─" + + "─".repeat(19), + ); + // Rows - history.forEach(entry => { + history.forEach((entry) => { const action = this.colorizeAction(entry.action); - const migration = (entry.migration || '').padEnd(maxMigration); - const status = this.colorizeStatus(entry.status || '').padEnd(maxStatus); + const migration = (entry.migration || "").padEnd(maxMigration); + const status = this.colorizeStatus(entry.status || "").padEnd(maxStatus); const timestamp = new Date(entry.timestamp).toLocaleString(); - - console.log(`${action.padEnd(maxAction)} │ ${migration} │ ${status} │ ${timestamp}`); + + console.log( + `${action.padEnd(maxAction)} │ ${migration} │ ${status} │ ${timestamp}`, + ); }); } - + /** * Display history in timeline format */ displayTimelineFormat(history) { history.forEach((entry, index) => { const isLast = index === history.length - 1; - const connector = isLast ? '└─' : '├─'; - const line = isLast ? ' ' : '│ '; - + const connector = isLast ? "└─" : "├─"; + const line = isLast ? " " : "│ "; + const actionIcon = this.getActionIcon(entry.action); - const statusColor = this.colorizeStatus(entry.status || 'unknown'); - - console.log(`${connector} ${actionIcon} ${entry.action.toUpperCase()}: ${entry.migration || 'Unknown'}`); + const statusColor = this.colorizeStatus(entry.status || "unknown"); + + console.log( + `${connector} ${actionIcon} ${entry.action.toUpperCase()}: ${entry.migration || "Unknown"}`, + ); console.log(`${line} Status: ${statusColor}`); - console.log(`${line} Time: ${new Date(entry.timestamp).toLocaleString()}`); - + console.log( + `${line} Time: ${new Date(entry.timestamp).toLocaleString()}`, + ); + if (entry.details) { console.log(`${line} Details: ${entry.details}`); } - - if (!isLast) console.log('│'); + + if (!isLast) console.log("│"); }); } - + /** * Display history in JSON format */ displayJsonFormat(history) { console.log(JSON.stringify(history, null, 2)); } - + /** * Get icon for action type */ getActionIcon(action) { const icons = { - generate: '🔧', - test: '🧪', - promote: '🚀', - rollback: '↩️', - clean: '🧹', - verify: '✅' + generate: "🔧", + test: "🧪", + promote: "🚀", + rollback: "↩️", + clean: "🧹", + verify: "✅", }; - - return icons[action] || '📝'; + + return icons[action] || "📝"; } - + /** * Colorize action text (simplified - would use chalk in real implementation) */ @@ -193,30 +217,30 @@ class MigrateHistoryCommand extends Command { // In a real implementation, would use chalk or similar for colors const colors = { generate: action, // blue - test: action, // yellow - promote: action, // green + test: action, // yellow + promote: action, // green rollback: action, // red - clean: action, // magenta - verify: action // cyan + clean: action, // magenta + verify: action, // cyan }; - + return colors[action] || action; } - + /** * Colorize status text (simplified - would use chalk in real implementation) */ colorizeStatus(status) { // In a real implementation, would use chalk or similar for colors const colors = { - completed: status, // green - failed: status, // red - pending: status, // yellow - running: status // blue + completed: status, // green + failed: status, // red + pending: status, // yellow + running: status, // blue }; - + return colors[status] || status; } } -module.exports = MigrateHistoryCommand; \ No newline at end of file +module.exports = MigrateHistoryCommand; diff --git a/src/commands/db/migrate/index.js b/src/commands/db/migrate/index.js index 0a0b2ab..08fb80e 100644 --- a/src/commands/db/migrate/index.js +++ b/src/commands/db/migrate/index.js @@ -1,15 +1,15 @@ /** * Migration Commands Index - * + * * Exports all migration subcommands for the data CLI */ module.exports = { - MigrateStatusCommand: require('./status'), - MigrateRollbackCommand: require('./rollback'), - MigrateCleanCommand: require('./clean'), - MigrateHistoryCommand: require('./history'), - MigrateVerifyCommand: require('./verify'), - MigrateSquashCommand: require('./squash'), - MigrateGenerateCommand: require('./generate') -}; \ No newline at end of file + MigrateStatusCommand: require("./status"), + MigrateRollbackCommand: require("./rollback"), + MigrateCleanCommand: require("./clean"), + MigrateHistoryCommand: require("./history"), + MigrateVerifyCommand: require("./verify"), + MigrateSquashCommand: require("./squash"), + MigrateGenerateCommand: require("./generate"), +}; diff --git a/src/commands/db/migrate/promote.js b/src/commands/db/migrate/promote.js index 2deabe7..b703f02 100644 --- a/src/commands/db/migrate/promote.js +++ b/src/commands/db/migrate/promote.js @@ -3,18 +3,18 @@ * Promotes tested migrations from staging to production with safety checks */ -const Command = require('../../../lib/Command'); -const MigrationMetadata = require('../../../lib/MigrationMetadata'); -const fs = require('fs').promises; -const path = require('path'); +const Command = require("../../../lib/Command"); +const MigrationMetadata = require("../../../lib/MigrationMetadata"); +const fs = require("fs").promises; +const path = require("path"); /** * Command to promote a tested migration to production */ class MigratePromoteCommand extends Command { - static description = 'Promote tested migration to production'; + static description = "Promote tested migration to production"; static requiresConfirmation = true; - + constructor(config = null, logger = null, isProd = false) { super(config, logger, isProd); this.requiresProductionConfirmation = true; @@ -24,39 +24,40 @@ class MigratePromoteCommand extends Command { * Execute the promote command */ async performExecute(args = {}) { - this.emit('start'); - + this.emit("start"); + try { // Get migration path from arguments - const migrationName = args.migration || args.m || 'current'; + const migrationName = args.migration || args.m || "current"; const stagingPath = this.getStagingPath(migrationName); - + this.progress(`Promoting migration: ${migrationName}`); - + // 1. Verify tests passed in metadata await this.verifyTestsPassed(stagingPath); - + // 2. Move from staging to production const productionPath = await this.promoteToProduction(stagingPath); - + // 3. Update migration history await this.updateHistory(stagingPath, productionPath); - + // 4. Optionally stage in Git if (args.git !== false) { await this.stageInGit(productionPath); } - - this.success(`Migration promoted successfully: ${path.basename(productionPath)}`); - this.emit('complete', { - staging: stagingPath, + + this.success( + `Migration promoted successfully: ${path.basename(productionPath)}`, + ); + this.emit("complete", { + staging: stagingPath, production: productionPath, - migration: migrationName + migration: migrationName, }); - } catch (error) { - this.error('Migration promotion failed', error); - this.emit('failed', { error, migration: args.migration }); + this.error("Migration promotion failed", error); + this.emit("failed", { error, migration: args.migration }); throw error; } } @@ -66,58 +67,66 @@ class MigratePromoteCommand extends Command { */ getStagingPath(migrationName) { const supabaseRoot = this.findSupabaseRoot(); - - if (migrationName === 'current') { - return path.join(supabaseRoot, 'migrations-staging', 'current'); + + if (migrationName === "current") { + return path.join(supabaseRoot, "migrations-staging", "current"); } - - return path.join(supabaseRoot, 'migrations-staging', migrationName); + + return path.join(supabaseRoot, "migrations-staging", migrationName); } /** * Verify that tests have passed for this migration */ async verifyTestsPassed(migrationPath) { - this.progress('Verifying migration tests passed...'); - + this.progress("Verifying migration tests passed..."); + try { // Check if migration directory exists const stats = await fs.stat(migrationPath); if (!stats.isDirectory()) { throw new Error(`Migration path is not a directory: ${migrationPath}`); } - + // Load and check metadata const metadata = new MigrationMetadata(migrationPath); const data = metadata.read(); - + // Check if migration has been tested - if (data.status !== 'tested') { - throw new Error(`Migration must be tested before promotion. Current status: ${data.status}`); + if (data.status !== "tested") { + throw new Error( + `Migration must be tested before promotion. Current status: ${data.status}`, + ); } - + // Check if tests passed if (!data.testing || data.testing.tested_at === null) { - throw new Error('No test results found in migration metadata'); + throw new Error("No test results found in migration metadata"); } - + if (data.testing.tests_failed > 0) { - throw new Error(`Migration has failing tests: ${data.testing.tests_failed} failed, ${data.testing.tests_passed} passed`); + throw new Error( + `Migration has failing tests: ${data.testing.tests_failed} failed, ${data.testing.tests_passed} passed`, + ); } - + if (data.testing.tests_passed === 0) { - this.warn('Warning: No tests were run for this migration'); - const proceed = await this.confirm('Proceed with promotion despite no tests?', false); + this.warn("Warning: No tests were run for this migration"); + const proceed = await this.confirm( + "Proceed with promotion despite no tests?", + false, + ); if (!proceed) { - throw new Error('Promotion cancelled - no tests run'); + throw new Error("Promotion cancelled - no tests run"); } } - - this.progress(`Tests verified: ${data.testing.tests_passed} passed, ${data.testing.tests_failed} failed`); + + this.progress( + `Tests verified: ${data.testing.tests_passed} passed, ${data.testing.tests_failed} failed`, + ); return data; - } catch (error) { - if (error.code === 'ENOENT') { + if (error.code === "ENOENT") { throw new Error(`Migration not found: ${migrationPath}`); } throw error; @@ -128,34 +137,36 @@ class MigratePromoteCommand extends Command { * Promote migration from staging to production directory */ async promoteToProduction(stagingPath) { - this.progress('Moving migration to production directory...'); - + this.progress("Moving migration to production directory..."); + const supabaseRoot = this.findSupabaseRoot(); const migrationFileName = await this.generateMigrationFileName(stagingPath); - const productionDir = path.join(supabaseRoot, 'migrations'); + const productionDir = path.join(supabaseRoot, "migrations"); const productionPath = path.join(productionDir, migrationFileName); - + // Ensure production directory exists try { await fs.mkdir(productionDir, { recursive: true }); } catch (error) { // Directory already exists, continue } - + // Check if production file already exists try { await fs.access(productionPath); - throw new Error(`Production migration already exists: ${migrationFileName}`); + throw new Error( + `Production migration already exists: ${migrationFileName}`, + ); } catch (error) { - if (error.code !== 'ENOENT') { + if (error.code !== "ENOENT") { throw error; } } - + // Copy migration SQL file - const stagingSqlPath = path.join(stagingPath, 'migration.sql'); + const stagingSqlPath = path.join(stagingPath, "migration.sql"); await fs.copyFile(stagingSqlPath, productionPath); - + this.progress(`Migration copied to: ${productionPath}`); return productionPath; } @@ -167,19 +178,19 @@ class MigratePromoteCommand extends Command { // Load metadata to get the migration name const metadata = new MigrationMetadata(stagingPath); const data = metadata.read(); - + // Generate timestamp in YYYYMMDD_HHMMSS format const now = new Date(); const year = now.getFullYear(); - const month = String(now.getMonth() + 1).padStart(2, '0'); - const day = String(now.getDate()).padStart(2, '0'); - const hour = String(now.getHours()).padStart(2, '0'); - const minute = String(now.getMinutes()).padStart(2, '0'); - const second = String(now.getSeconds()).padStart(2, '0'); - + const month = String(now.getMonth() + 1).padStart(2, "0"); + const day = String(now.getDate()).padStart(2, "0"); + const hour = String(now.getHours()).padStart(2, "0"); + const minute = String(now.getMinutes()).padStart(2, "0"); + const second = String(now.getSeconds()).padStart(2, "0"); + const timestamp = `${year}${month}${day}_${hour}${minute}${second}`; - const safeName = data.name.toLowerCase().replace(/[^a-z0-9_]/g, '_'); - + const safeName = data.name.toLowerCase().replace(/[^a-z0-9_]/g, "_"); + return `${timestamp}_${safeName}.sql`; } @@ -187,15 +198,15 @@ class MigratePromoteCommand extends Command { * Update migration history with promotion record */ async updateHistory(stagingPath, productionPath) { - this.progress('Updating migration history...'); - + this.progress("Updating migration history..."); + const supabaseRoot = this.findSupabaseRoot(); - const historyPath = path.join(supabaseRoot, 'migrations', 'history.json'); - + const historyPath = path.join(supabaseRoot, "migrations", "history.json"); + // Load metadata const metadata = new MigrationMetadata(stagingPath); const data = metadata.read(); - + // Create history entry const historyEntry = { id: data.id, @@ -206,63 +217,63 @@ class MigratePromoteCommand extends Command { promoted_by: this.getCurrentUser(), file_path: path.basename(productionPath), tests_passed: data.testing.tests_passed, - tests_failed: data.testing.tests_failed + tests_failed: data.testing.tests_failed, }; - + // Load or create history file let history = []; try { - const historyContent = await fs.readFile(historyPath, 'utf8'); + const historyContent = await fs.readFile(historyPath, "utf8"); history = JSON.parse(historyContent); } catch (error) { - if (error.code !== 'ENOENT') { + if (error.code !== "ENOENT") { this.warn(`Could not read existing history: ${error.message}`); } } - + // Add new entry and sort by promoted_at history.push(historyEntry); history.sort((a, b) => new Date(b.promoted_at) - new Date(a.promoted_at)); - + // Write updated history - await fs.writeFile(historyPath, JSON.stringify(history, null, 2), 'utf8'); - + await fs.writeFile(historyPath, JSON.stringify(history, null, 2), "utf8"); + // Update staging metadata to promoted status metadata.update({ - status: 'promoted', + status: "promoted", promotion: { promoted_at: historyEntry.promoted_at, - promoted_by: historyEntry.promoted_by - } + promoted_by: historyEntry.promoted_by, + }, }); - - this.progress('Migration history updated'); + + this.progress("Migration history updated"); } /** * Stage promoted migration in Git */ async stageInGit(productionPath) { - this.progress('Staging migration in Git...'); - - const { spawn } = require('child_process'); - + this.progress("Staging migration in Git..."); + + const { spawn } = require("child_process"); + return new Promise((resolve, reject) => { - const git = spawn('git', ['add', productionPath], { - stdio: ['ignore', 'pipe', 'pipe'] + const git = spawn("git", ["add", productionPath], { + stdio: ["ignore", "pipe", "pipe"], }); - - git.on('close', (code) => { + + git.on("close", (code) => { if (code === 0) { - this.progress('Migration staged in Git'); + this.progress("Migration staged in Git"); resolve(); } else { - this.warn('Failed to stage migration in Git'); + this.warn("Failed to stage migration in Git"); resolve(); // Don't fail promotion for Git issues } }); - - git.on('error', (error) => { + + git.on("error", (error) => { this.warn(`Git staging failed: ${error.message}`); resolve(); // Don't fail promotion for Git issues }); @@ -273,7 +284,7 @@ class MigratePromoteCommand extends Command { * Get current user for promotion tracking */ getCurrentUser() { - return process.env.USER || process.env.USERNAME || 'unknown'; + return process.env.USER || process.env.USERNAME || "unknown"; } /** @@ -281,43 +292,53 @@ class MigratePromoteCommand extends Command { */ findSupabaseRoot() { let currentDir = process.cwd(); - + while (currentDir !== path.dirname(currentDir)) { - const supabasePath = path.join(currentDir, 'supabase'); + const supabasePath = path.join(currentDir, "supabase"); try { - require('fs').statSync(supabasePath); + require("fs").statSync(supabasePath); return supabasePath; } catch { currentDir = path.dirname(currentDir); } } - - throw new Error('Could not find supabase directory. Run this command from within a Supabase project.'); + + throw new Error( + "Could not find supabase directory. Run this command from within a Supabase project.", + ); } /** * Show help for promote command */ showHelp() { - console.log('Usage: data db:migrate:promote [options]'); - console.log(''); - console.log('Promote a tested migration to production'); - console.log(''); - console.log('Options:'); - console.log(' --migration, -m Migration to promote (default: current)'); - console.log(' --no-git Skip Git staging'); - console.log(' --help Show this help'); - console.log(''); - console.log('Examples:'); - console.log(' data db:migrate:promote # Promote current migration'); - console.log(' data db:migrate:promote -m migration1 # Promote specific migration'); - console.log(' data db:migrate:promote --no-git # Promote without Git staging'); - console.log(''); - console.log('Requirements:'); + console.log("Usage: data db:migrate:promote [options]"); + console.log(""); + console.log("Promote a tested migration to production"); + console.log(""); + console.log("Options:"); + console.log( + " --migration, -m Migration to promote (default: current)", + ); + console.log(" --no-git Skip Git staging"); + console.log(" --help Show this help"); + console.log(""); + console.log("Examples:"); + console.log( + " data db:migrate:promote # Promote current migration", + ); + console.log( + " data db:migrate:promote -m migration1 # Promote specific migration", + ); + console.log( + " data db:migrate:promote --no-git # Promote without Git staging", + ); + console.log(""); + console.log("Requirements:"); console.log(' - Migration must have status "tested"'); - console.log(' - All tests must pass (tests_failed = 0)'); - console.log(' - Production directory must not have conflicting file'); + console.log(" - All tests must pass (tests_failed = 0)"); + console.log(" - Production directory must not have conflicting file"); } } -module.exports = MigratePromoteCommand; \ No newline at end of file +module.exports = MigratePromoteCommand; diff --git a/src/commands/db/migrate/rollback.js b/src/commands/db/migrate/rollback.js index e14f812..1b519c7 100644 --- a/src/commands/db/migrate/rollback.js +++ b/src/commands/db/migrate/rollback.js @@ -2,18 +2,24 @@ * Migration Rollback Command */ -const DatabaseCommand = require('../../../lib/DatabaseCommand'); -const fs = require('fs').promises; -const path = require('path'); +const DatabaseCommand = require("../../../lib/DatabaseCommand"); +const fs = require("fs").promises; +const path = require("path"); /** * Rollback migration to previous state with confirmation */ class MigrateRollbackCommand extends DatabaseCommand { - static description = 'Rollback migration to previous state'; + static description = "Rollback migration to previous state"; static requiresConfirmation = true; - - constructor(databaseUrl, serviceRoleKey = null, anonKey = null, logger = null, isProd = false) { + + constructor( + databaseUrl, + serviceRoleKey = null, + anonKey = null, + logger = null, + isProd = false, + ) { // Rollback is destructive, always requires confirmation super(databaseUrl, serviceRoleKey, anonKey, logger, isProd, true); } @@ -22,164 +28,176 @@ class MigrateRollbackCommand extends DatabaseCommand { * Execute migration rollback */ async performExecute(args = {}) { - this.emit('start'); - + this.emit("start"); + try { - const target = args.target || args.to || 'previous'; - + const target = args.target || args.to || "previous"; + this.progress(`Preparing rollback to: ${target}...`); - + // Get rollback target information const rollbackInfo = await this.getRollbackTarget(target); - + if (!rollbackInfo) { - this.error('No valid rollback target found'); - this.emit('failed', { error: 'No rollback target' }); + this.error("No valid rollback target found"); + this.emit("failed", { error: "No rollback target" }); return; } - + // Additional confirmation for rollback const confirmed = await this.confirmRollback(rollbackInfo); if (!confirmed) { - this.success('Rollback cancelled'); - this.emit('cancelled', { target }); + this.success("Rollback cancelled"); + this.emit("cancelled", { target }); return; } - + // Perform rollback await this.performRollback(rollbackInfo); - + // Update history await this.recordRollback(rollbackInfo); - - this.success(`Migration rollback completed to: ${rollbackInfo.migration}`); - this.emit('complete', { target: rollbackInfo.migration }); - + + this.success( + `Migration rollback completed to: ${rollbackInfo.migration}`, + ); + this.emit("complete", { target: rollbackInfo.migration }); } catch (error) { - this.error('Migration rollback failed', error); - this.emit('failed', { error }); + this.error("Migration rollback failed", error); + this.emit("failed", { error }); throw error; } } - + /** * Get rollback target migration */ async getRollbackTarget(target) { try { - const historyFile = path.resolve('supabase/.migration_history.json'); - const historyExists = await fs.access(historyFile).then(() => true).catch(() => false); - + const historyFile = path.resolve("supabase/.migration_history.json"); + const historyExists = await fs + .access(historyFile) + .then(() => true) + .catch(() => false); + if (!historyExists) { - this.warn('No migration history found'); + this.warn("No migration history found"); return null; } - - const historyContent = await fs.readFile(historyFile, 'utf8'); + + const historyContent = await fs.readFile(historyFile, "utf8"); const history = JSON.parse(historyContent); - + // Get promotions only - const promotions = history.filter(entry => entry.action === 'promote'); - + const promotions = history.filter((entry) => entry.action === "promote"); + if (promotions.length === 0) { - this.warn('No promoted migrations found'); + this.warn("No promoted migrations found"); return null; } - - if (target === 'previous' || target === 'last') { + + if (target === "previous" || target === "last") { // Get second-to-last promotion return promotions.length > 1 ? promotions[promotions.length - 2] : null; } - + // Find specific migration - return promotions.find(p => p.migration === target) || null; - + return promotions.find((p) => p.migration === target) || null; } catch (error) { - this.warn('Could not determine rollback target', { error: error.message }); + this.warn("Could not determine rollback target", { + error: error.message, + }); return null; } } - + /** * Confirm rollback operation with details */ async confirmRollback(rollbackInfo) { - console.log('\n⚠️ ROLLBACK CONFIRMATION'); - console.log('═══════════════════════════\n'); + console.log("\n⚠️ ROLLBACK CONFIRMATION"); + console.log("═══════════════════════════\n"); console.log(`Target Migration: ${rollbackInfo.migration}`); - console.log(`Promoted: ${new Date(rollbackInfo.timestamp).toLocaleString()}`); + console.log( + `Promoted: ${new Date(rollbackInfo.timestamp).toLocaleString()}`, + ); console.log(`Status: ${rollbackInfo.status}`); - console.log('\n⚠️ This will PERMANENTLY rollback your database state!'); - console.log('⚠️ Make sure you have a backup before proceeding!'); - console.log(''); - - return await this.confirm('Are you absolutely sure you want to rollback?', false); + console.log("\n⚠️ This will PERMANENTLY rollback your database state!"); + console.log("⚠️ Make sure you have a backup before proceeding!"); + console.log(""); + + return await this.confirm( + "Are you absolutely sure you want to rollback?", + false, + ); } - + /** * Perform the actual rollback */ async performRollback(rollbackInfo) { - this.progress('Creating backup before rollback...'); - + this.progress("Creating backup before rollback..."); + // In a real implementation, this would: // 1. Create a backup of current state // 2. Generate rollback SQL from migration history // 3. Execute rollback against database // 4. Verify rollback success - + // For now, simulate the process await this.sleep(1000); - this.progress('Generating rollback SQL...'); - + this.progress("Generating rollback SQL..."); + await this.sleep(1000); - this.progress('Executing rollback against database...'); - + this.progress("Executing rollback against database..."); + await this.sleep(1000); - this.progress('Verifying rollback completion...'); - + this.progress("Verifying rollback completion..."); + // Simulate rollback file creation - const rollbackDir = path.resolve('supabase/.rollbacks'); + const rollbackDir = path.resolve("supabase/.rollbacks"); await fs.mkdir(rollbackDir, { recursive: true }); - + const rollbackFile = path.join(rollbackDir, `rollback_${Date.now()}.sql`); - await fs.writeFile(rollbackFile, `-- Rollback to ${rollbackInfo.migration}\n-- Generated: ${new Date().toISOString()}\n`); - + await fs.writeFile( + rollbackFile, + `-- Rollback to ${rollbackInfo.migration}\n-- Generated: ${new Date().toISOString()}\n`, + ); + this.progress(`Rollback SQL saved to: ${rollbackFile}`); } - + /** * Record rollback in history */ async recordRollback(rollbackInfo) { try { - const historyFile = path.resolve('supabase/.migration_history.json'); - const historyContent = await fs.readFile(historyFile, 'utf8'); + const historyFile = path.resolve("supabase/.migration_history.json"); + const historyContent = await fs.readFile(historyFile, "utf8"); const history = JSON.parse(historyContent); - + // Add rollback record history.push({ - action: 'rollback', + action: "rollback", migration: rollbackInfo.migration, target: rollbackInfo.migration, timestamp: new Date().toISOString(), - status: 'completed' + status: "completed", }); - + await fs.writeFile(historyFile, JSON.stringify(history, null, 2)); - this.progress('Rollback recorded in migration history'); - + this.progress("Rollback recorded in migration history"); } catch (error) { - this.warn('Could not update migration history', { error: error.message }); + this.warn("Could not update migration history", { error: error.message }); } } - + /** * Sleep utility for simulation */ sleep(ms) { - return new Promise(resolve => setTimeout(resolve, ms)); + return new Promise((resolve) => setTimeout(resolve, ms)); } } -module.exports = MigrateRollbackCommand; \ No newline at end of file +module.exports = MigrateRollbackCommand; diff --git a/src/commands/db/migrate/squash.js b/src/commands/db/migrate/squash.js index 242b278..e618a70 100644 --- a/src/commands/db/migrate/squash.js +++ b/src/commands/db/migrate/squash.js @@ -49,9 +49,8 @@ class MigrateSquashCommand extends Command { this.progress(`Found ${migrationsToSquash.length} migrations to squash`); // Generate squashed migration content - const squashedContent = await this.generateSquashedMigration( - migrationsToSquash - ); + const squashedContent = + await this.generateSquashedMigration(migrationsToSquash); // Create output filename const outputFilename = @@ -61,7 +60,7 @@ class MigrateSquashCommand extends Command { this.displayDryRunResults( migrationsToSquash, outputFilename, - squashedContent + squashedContent, ); this.emit("complete", { dryRun: true, @@ -73,7 +72,7 @@ class MigrateSquashCommand extends Command { // Confirm squash operation const confirmed = await this.confirmSquashOperation( migrationsToSquash, - outputFilename + outputFilename, ); if (!confirmed) { this.success("Squash operation cancelled"); @@ -85,11 +84,11 @@ class MigrateSquashCommand extends Command { await this.performSquash( migrationsToSquash, outputFilename, - squashedContent + squashedContent, ); this.success( - `Successfully squashed ${migrationsToSquash.length} migrations into ${outputFilename}` + `Successfully squashed ${migrationsToSquash.length} migrations into ${outputFilename}`, ); this.emit("complete", { squashed: migrationsToSquash.length, @@ -305,7 +304,7 @@ class MigrateSquashCommand extends Command { // Move to archive await fs.rename(sourcePath, archivePath); this.progress(`Archived: ${migration}`); - }) + }), ); // Update migration history diff --git a/src/commands/db/migrate/status.js b/src/commands/db/migrate/status.js index 751e4c8..cd97200 100644 --- a/src/commands/db/migrate/status.js +++ b/src/commands/db/migrate/status.js @@ -2,16 +2,16 @@ * Migration Status Command */ -const Command = require('../../../lib/Command'); -const fs = require('fs').promises; -const path = require('path'); +const Command = require("../../../lib/Command"); +const fs = require("fs").promises; +const path = require("path"); /** * Show current migration status including staging state and pending migrations */ class MigrateStatusCommand extends Command { - static description = 'Show current migration status'; - + static description = "Show current migration status"; + constructor(config = null, logger = null, isProd = false) { super(config, logger, isProd); this.requiresProductionConfirmation = false; // Read-only operation @@ -21,148 +21,181 @@ class MigrateStatusCommand extends Command { * Execute migration status check */ async performExecute() { - this.emit('start'); - + this.emit("start"); + try { - this.progress('Checking migration status...'); - + this.progress("Checking migration status..."); + // Get staging status const stagingStatus = await this.getStagingStatus(); - + // List pending migrations const pendingMigrations = await this.getPendingMigrations(); - + // Get last promoted migration const lastPromoted = await this.getLastPromotedMigration(); - + // Display results - this.displayMigrationStatus(stagingStatus, pendingMigrations, lastPromoted); - - this.emit('complete', { - stagingStatus, - pendingMigrations: pendingMigrations.length, - lastPromoted + this.displayMigrationStatus( + stagingStatus, + pendingMigrations, + lastPromoted, + ); + + this.emit("complete", { + stagingStatus, + pendingMigrations: pendingMigrations.length, + lastPromoted, }); - } catch (error) { - this.error('Migration status check failed', error); - this.emit('failed', { error }); - throw error; + this.error("Migration status check failed", error); + this.emit("failed", { error }); + // Print error to console + console.error("\n❌ Failed to check migration status."); + console.error(`📌 Reason: ${error.message}`); + // provide helpful tips based on error type + if (error.code === "ENOENT") { + // This error code means "File or directory not found" + console.error( + "💡 Tip: This might mean your project is not initialized.", + ); + console.error( + " Run `supabase init` to set up a new Supabase project.", + ); + } else if (error.code === "EACCES") { + console.error( + "💡 Tip: Permission denied. Check read access to the supabase/ directory.", + ); + } + console.error(""); // Add a blank line for readability + process.exit(1); } } - + /** * Check staging area status */ async getStagingStatus() { try { - const stagingDir = path.resolve('supabase/.staging'); - const stagingExists = await fs.access(stagingDir).then(() => true).catch(() => false); - + const stagingDir = path.resolve("supabase/.staging"); + const stagingExists = await fs + .access(stagingDir) + .then(() => true) + .catch(() => false); + if (!stagingExists) { - return { status: 'clean', files: 0 }; + return { status: "clean", files: 0 }; } - + const files = await fs.readdir(stagingDir); - return { - status: files.length > 0 ? 'dirty' : 'clean', + return { + status: files.length > 0 ? "dirty" : "clean", files: files.length, - fileList: files + fileList: files, }; } catch (error) { - return { status: 'error', error: error.message }; + return { status: "error", error: error.message }; } } - + /** * Get list of pending migrations */ async getPendingMigrations() { try { - const migrationsDir = path.resolve('supabase/migrations'); - const migrationsExists = await fs.access(migrationsDir).then(() => true).catch(() => false); - + const migrationsDir = path.resolve("supabase/migrations"); + const migrationsExists = await fs + .access(migrationsDir) + .then(() => true) + .catch(() => false); + if (!migrationsExists) { return []; } - + const files = await fs.readdir(migrationsDir); - const migrationFiles = files.filter(f => f.endsWith('.sql')); - + const migrationFiles = files.filter((f) => f.endsWith(".sql")); + return migrationFiles.sort(); } catch (error) { - this.warn('Could not read migrations directory', { error: error.message }); + this.warn("Could not read migrations directory", { + error: error.message, + }); return []; } } - + /** * Get last promoted migration info */ async getLastPromotedMigration() { try { - const historyFile = path.resolve('supabase/.migration_history.json'); - const historyExists = await fs.access(historyFile).then(() => true).catch(() => false); - + const historyFile = path.resolve("supabase/.migration_history.json"); + const historyExists = await fs + .access(historyFile) + .then(() => true) + .catch(() => false); + if (!historyExists) { return null; } - - const historyContent = await fs.readFile(historyFile, 'utf8'); + + const historyContent = await fs.readFile(historyFile, "utf8"); const history = JSON.parse(historyContent); - + // Find most recent promotion - const promotions = history.filter(entry => entry.action === 'promote'); + const promotions = history.filter((entry) => entry.action === "promote"); return promotions.length > 0 ? promotions[promotions.length - 1] : null; - } catch (error) { - this.warn('Could not read migration history', { error: error.message }); + this.warn("Could not read migration history", { error: error.message }); return null; } } - + /** * Display migration status information */ displayMigrationStatus(stagingStatus, pendingMigrations, lastPromoted) { - console.log('\n🔍 Migration Status Report'); - console.log('═══════════════════════════\n'); - + console.log("\n🔍 Migration Status Report"); + console.log("═══════════════════════════\n"); + // Staging status console.log(`📦 Staging Area: ${stagingStatus.status.toUpperCase()}`); - if (stagingStatus.status === 'dirty') { + if (stagingStatus.status === "dirty") { console.log(` Files in staging: ${stagingStatus.files}`); - stagingStatus.fileList?.forEach(file => { + stagingStatus.fileList?.forEach((file) => { console.log(` • ${file}`); }); - } else if (stagingStatus.status === 'error') { + } else if (stagingStatus.status === "error") { console.log(` Error: ${stagingStatus.error}`); } - console.log(''); - + console.log(""); + // Pending migrations console.log(`📋 Pending Migrations: ${pendingMigrations.length}`); if (pendingMigrations.length > 0) { - pendingMigrations.slice(0, 5).forEach(migration => { + pendingMigrations.slice(0, 5).forEach((migration) => { console.log(` • ${migration}`); }); if (pendingMigrations.length > 5) { console.log(` ... and ${pendingMigrations.length - 5} more`); } } - console.log(''); - + console.log(""); + // Last promoted - console.log('🚀 Last Promoted Migration:'); + console.log("🚀 Last Promoted Migration:"); if (lastPromoted) { - console.log(` Migration: ${lastPromoted.migration || 'Unknown'}`); - console.log(` Date: ${new Date(lastPromoted.timestamp).toLocaleString()}`); - console.log(` Status: ${lastPromoted.status || 'Unknown'}`); + console.log(` Migration: ${lastPromoted.migration || "Unknown"}`); + console.log( + ` Date: ${new Date(lastPromoted.timestamp).toLocaleString()}`, + ); + console.log(` Status: ${lastPromoted.status || "Unknown"}`); } else { - console.log(' No migrations have been promoted yet'); + console.log(" No migrations have been promoted yet"); } - console.log(''); + console.log(""); } } -module.exports = MigrateStatusCommand; \ No newline at end of file +module.exports = MigrateStatusCommand; diff --git a/src/commands/db/migrate/test-v2.js b/src/commands/db/migrate/test-v2.js index bdbb5e5..3af8aca 100644 --- a/src/commands/db/migrate/test-v2.js +++ b/src/commands/db/migrate/test-v2.js @@ -2,37 +2,39 @@ * Migration Test Command with pgTAP Validation - V2 using Supabase API */ -const Command = require('../../../lib/Command'); -const MigrationMetadata = require('../../../lib/MigrationMetadata'); -const { createClient } = require('@supabase/supabase-js'); -const fs = require('fs').promises; -const path = require('path'); +const Command = require("../../../lib/Command"); +const MigrationMetadata = require("../../../lib/MigrationMetadata"); +const { createClient } = require("@supabase/supabase-js"); +const fs = require("fs").promises; +const path = require("path"); /** * Test migration in isolated schema using Supabase API */ class MigrateTestCommand extends Command { - static description = 'Test migration with pgTAP validation'; - + static description = "Test migration with pgTAP validation"; + constructor(config = null, logger = null, isProd = false) { super(config, logger, isProd); this.requiresProductionConfirmation = false; // Testing is safe this.workingDir = process.cwd(); - this.stagingDir = path.join(this.workingDir, 'migrations-staging'); - this.currentMigrationDir = path.join(this.stagingDir, 'current'); - + this.stagingDir = path.join(this.workingDir, "migrations-staging"); + this.currentMigrationDir = path.join(this.stagingDir, "current"); + // Initialize Supabase client with service role key for admin operations - const supabaseUrl = process.env.SUPABASE_URL || 'http://localhost:54321'; + const supabaseUrl = process.env.SUPABASE_URL || "http://localhost:54321"; const serviceRoleKey = process.env.SUPABASE_SERVICE_ROLE_KEY; - + if (!serviceRoleKey) { - throw new Error('SUPABASE_SERVICE_ROLE_KEY environment variable is required for testing'); + throw new Error( + "SUPABASE_SERVICE_ROLE_KEY environment variable is required for testing", + ); } - + this.supabase = createClient(supabaseUrl, serviceRoleKey, { - auth: { persistSession: false } + auth: { persistSession: false }, }); - + // Generate unique test schema name with clear namespace const timestamp = Math.floor(Date.now() / 1000); // POSIX timestamp this.testSchema = `"@data.tests.${timestamp}"`; // Quote for special chars @@ -42,54 +44,52 @@ class MigrateTestCommand extends Command { * Execute the migration test process */ async performExecute(args = {}) { - this.emit('start'); - + this.emit("start"); + let schemaCreated = false; - + try { - this.progress('Starting migration test process'); - + this.progress("Starting migration test process"); + // Validate that we have a staged migration await this.validateStagedMigration(); - + // Get migration metadata const metadata = await this.getMigrationMetadata(); this.progress(`Testing migration: ${metadata.name} (${metadata.id})`); - + // Create isolated test schema await this.createTestSchema(); schemaCreated = true; this.success(`Created test schema: ${this.testSchema}`); - + // Apply migration to test schema await this.applyMigration(); - this.success('Applied migration to test schema'); - + this.success("Applied migration to test schema"); + // Install pgTAP if needed await this.ensurePgTap(); - + // Run pgTAP tests const testResults = await this.runTests(args); - + // Report results this.reportTestResults(testResults); - + // Update metadata with test results await this.updateMetadata(metadata, testResults); - - this.emit('complete', { + + this.emit("complete", { success: testResults.success, schema: this.testSchema, - results: testResults + results: testResults, }); - + return testResults; - } catch (error) { - this.error('Migration test failed', error); - this.emit('failed', { error }); + this.error("Migration test failed", error); + this.emit("failed", { error }); throw error; - } finally { try { // Always cleanup test schema unless explicitly kept @@ -98,35 +98,40 @@ class MigrateTestCommand extends Command { } else if (schemaCreated) { this.warn(`Test schema ${this.testSchema} was kept for debugging`); } - + // Close database connection if (this.supabase) { // Supabase client doesn't have an explicit close method, but we can // clear the auth session and remove listeners await this.supabase.auth.signOut(); this.supabase.removeAllChannels(); - this.progress('Database connection closed'); + this.progress("Database connection closed"); } } catch (cleanupError) { - this.error('Cleanup error:', cleanupError); + this.error("Cleanup error:", cleanupError); // Don't re-throw, we want to exit gracefully } } } - + /** * Validate that we have a staged migration ready to test */ async validateStagedMigration() { try { await fs.access(this.currentMigrationDir); - const migrationFile = path.join(this.currentMigrationDir, 'migration.sql'); + const migrationFile = path.join( + this.currentMigrationDir, + "migration.sql", + ); await fs.access(migrationFile); } catch (error) { - throw new Error('No staged migration found. Run "data db:migrate:generate" first.'); + throw new Error( + 'No staged migration found. Run "data db:migrate:generate" first.', + ); } } - + /** * Get migration metadata */ @@ -134,41 +139,40 @@ class MigrateTestCommand extends Command { const metadata = new MigrationMetadata(this.currentMigrationDir); return metadata.read(); } - + /** * Create isolated test schema using Supabase API */ async createTestSchema() { try { // Create schema with CASCADE to handle dependencies - const { error } = await this.supabase.rpc('exec_sql', { - sql: `CREATE SCHEMA IF NOT EXISTS ${this.testSchema};` + const { error } = await this.supabase.rpc("exec_sql", { + sql: `CREATE SCHEMA IF NOT EXISTS ${this.testSchema};`, }); - + if (error) throw error; - + // Set search path to include our test schema - const { error: pathError } = await this.supabase.rpc('exec_sql', { - sql: `SET search_path TO ${this.testSchema}, public, test;` + const { error: pathError } = await this.supabase.rpc("exec_sql", { + sql: `SET search_path TO ${this.testSchema}, public, test;`, }); - + if (pathError) throw pathError; - } catch (error) { throw new Error(`Failed to create test schema: ${error.message}`); } } - + /** * Apply staged migration to test schema */ async applyMigration() { - const migrationFile = path.join(this.currentMigrationDir, 'migration.sql'); - + const migrationFile = path.join(this.currentMigrationDir, "migration.sql"); + try { // Read migration SQL - const migrationSql = await fs.readFile(migrationFile, 'utf8'); - + const migrationSql = await fs.readFile(migrationFile, "utf8"); + // Wrap migration in schema context const wrappedSql = ` -- Switch to test schema @@ -180,82 +184,82 @@ class MigrateTestCommand extends Command { -- Reset search path SET search_path TO public; `; - + // Execute migration via RPC - const { error } = await this.supabase.rpc('exec_sql', { - sql: wrappedSql + const { error } = await this.supabase.rpc("exec_sql", { + sql: wrappedSql, }); - + if (error) throw error; - } catch (error) { throw new Error(`Failed to apply migration: ${error.message}`); } } - + /** * Ensure pgTAP extension is available */ async ensurePgTap() { try { // Check if pgTAP exists - const { data, error } = await this.supabase.rpc('exec_sql', { - sql: `SELECT 1 FROM pg_extension WHERE extname = 'pgtap';` + const { data, error } = await this.supabase.rpc("exec_sql", { + sql: `SELECT 1 FROM pg_extension WHERE extname = 'pgtap';`, }); - + if (error) throw error; - + // Install if not present if (!data || data.length === 0) { - this.progress('Installing pgTAP extension'); - - const { error: installError } = await this.supabase.rpc('exec_sql', { - sql: `CREATE EXTENSION IF NOT EXISTS pgtap;` + this.progress("Installing pgTAP extension"); + + const { error: installError } = await this.supabase.rpc("exec_sql", { + sql: `CREATE EXTENSION IF NOT EXISTS pgtap;`, }); - + if (installError) throw installError; - - this.success('pgTAP extension installed'); + + this.success("pgTAP extension installed"); } } catch (error) { this.warn(`Could not install pgTAP: ${error.message}`); - this.warn('Some tests may be skipped'); + this.warn("Some tests may be skipped"); } } - + /** * Run pgTAP tests in test schema */ async runTests(options = {}) { try { - this.progress('Discovering test functions...'); - + this.progress("Discovering test functions..."); + // Find test functions in test schema - const { data: testFunctions, error: discoverError } = await this.supabase.rpc('exec_sql', { - sql: ` + const { data: testFunctions, error: discoverError } = + await this.supabase.rpc("exec_sql", { + sql: ` SELECT routine_name FROM information_schema.routines WHERE routine_schema IN ('test', '${this.testSchema}') AND routine_name LIKE '%test%' ORDER BY routine_name; - ` - }); - + `, + }); + if (discoverError) throw discoverError; - + if (!testFunctions || testFunctions.length === 0) { - this.warn('No test functions found'); + this.warn("No test functions found"); return { success: true, testsRun: 0, testsPassed: 0, testsFailed: 0, - skipped: 0 + skipped: 0, }; } - + this.progress(`Found ${testFunctions.length} test function(s)`); - + // Run each test function const results = { success: true, @@ -263,63 +267,66 @@ class MigrateTestCommand extends Command { testsPassed: 0, testsFailed: 0, skipped: 0, - details: [] + details: [], }; - + for (const func of testFunctions) { const functionName = func.routine_name; - + try { this.progress(`Running ${functionName}...`); - + // Execute test function - const { data: testOutput, error: testError } = await this.supabase.rpc('exec_sql', { - sql: `SELECT * FROM test.${functionName}();` - }); - + const { data: testOutput, error: testError } = + await this.supabase.rpc("exec_sql", { + sql: `SELECT * FROM test.${functionName}();`, + }); + if (testError) throw testError; - + // Parse TAP output const tapResults = this.parseTapOutput(testOutput); - + results.testsRun += tapResults.total; results.testsPassed += tapResults.passed; results.testsFailed += tapResults.failed; results.skipped += tapResults.skipped; - + if (tapResults.failed > 0) { results.success = false; } - + results.details.push({ function: functionName, - ...tapResults + ...tapResults, }); - + if (tapResults.failed > 0) { - this.error(`✗ ${functionName}: ${tapResults.failed} test(s) failed`); + this.error( + `✗ ${functionName}: ${tapResults.failed} test(s) failed`, + ); } else { - this.success(`✓ ${functionName}: All ${tapResults.passed} test(s) passed`); + this.success( + `✓ ${functionName}: All ${tapResults.passed} test(s) passed`, + ); } - } catch (error) { this.error(`Failed to run ${functionName}: ${error.message}`); results.success = false; results.testsFailed++; results.details.push({ function: functionName, - error: error.message + error: error.message, }); } } - + return results; - } catch (error) { throw new Error(`Test execution failed: ${error.message}`); } } - + /** * Parse TAP output from test results */ @@ -327,63 +334,67 @@ class MigrateTestCommand extends Command { if (!output || !Array.isArray(output)) { return { total: 0, passed: 0, failed: 0, skipped: 0 }; } - + let passed = 0; let failed = 0; let skipped = 0; - + for (const row of output) { const line = Object.values(row)[0]; - if (typeof line !== 'string') continue; - - if (line.startsWith('ok ')) { + if (typeof line !== "string") continue; + + if (line.startsWith("ok ")) { passed++; - } else if (line.startsWith('not ok ')) { + } else if (line.startsWith("not ok ")) { failed++; - } else if (line.includes('# SKIP')) { + } else if (line.includes("# SKIP")) { skipped++; } } - + return { total: passed + failed + skipped, passed, failed, - skipped + skipped, }; } - + /** * Report test results */ reportTestResults(results) { - console.log('\n' + '='.repeat(60)); - console.log('TEST RESULTS SUMMARY'); - console.log('='.repeat(60)); - + console.log("\n" + "=".repeat(60)); + console.log("TEST RESULTS SUMMARY"); + console.log("=".repeat(60)); + console.log(`Total Tests Run: ${results.testsRun}`); console.log(`✓ Passed: ${results.testsPassed}`); console.log(`✗ Failed: ${results.testsFailed}`); console.log(`⊘ Skipped: ${results.skipped}`); - + if (results.success) { - this.success('\n✓ All tests passed!'); + this.success("\n✓ All tests passed!"); } else { this.error(`\n✗ ${results.testsFailed} test(s) failed`); - + // Show failed test details - const failedTests = results.details.filter(d => d.failed > 0 || d.error); + const failedTests = results.details.filter( + (d) => d.failed > 0 || d.error, + ); if (failedTests.length > 0) { - console.log('\nFailed Tests:'); + console.log("\nFailed Tests:"); for (const test of failedTests) { - console.log(` - ${test.function}: ${test.error || `${test.failed} failures`}`); + console.log( + ` - ${test.function}: ${test.error || `${test.failed} failures`}`, + ); } } } - - console.log('='.repeat(60) + '\n'); + + console.log("=".repeat(60) + "\n"); } - + /** * Update migration metadata with test results */ @@ -395,36 +406,36 @@ class MigrateTestCommand extends Command { success: testResults.success, testsRun: testResults.testsRun, testsPassed: testResults.testsPassed, - testsFailed: testResults.testsFailed - } + testsFailed: testResults.testsFailed, + }, }; - + const metadataManager = new MigrationMetadata(this.currentMigrationDir); await metadataManager.write(updatedMetadata); } - + /** * Clean up test schema */ async cleanupTestSchema() { try { this.progress(`Cleaning up test schema: ${this.testSchema}`); - + // Drop schema with CASCADE to remove all objects - const { error } = await this.supabase.rpc('exec_sql', { - sql: `DROP SCHEMA IF EXISTS ${this.testSchema} CASCADE;` + const { error } = await this.supabase.rpc("exec_sql", { + sql: `DROP SCHEMA IF EXISTS ${this.testSchema} CASCADE;`, }); - + if (error) { this.warn(`Failed to cleanup test schema: ${error.message}`); } else { - this.success('Test schema cleaned up'); + this.success("Test schema cleaned up"); } } catch (error) { this.warn(`Cleanup error: ${error.message}`); } } - + /** * Create RPC function for executing arbitrary SQL (if it doesn't exist) * This should be added to your database migrations @@ -453,4 +464,4 @@ class MigrateTestCommand extends Command { } } -module.exports = MigrateTestCommand; \ No newline at end of file +module.exports = MigrateTestCommand; diff --git a/src/commands/db/migrate/test.js b/src/commands/db/migrate/test.js index 665f6aa..1bb78b2 100644 --- a/src/commands/db/migrate/test.js +++ b/src/commands/db/migrate/test.js @@ -2,30 +2,30 @@ * Migration Test Command with pgTAP Validation */ -const { Command } = require('../../../lib/Command'); -const MigrationMetadata = require('../../../lib/MigrationMetadata'); -const ChildProcessWrapper = require('../../../lib/ChildProcessWrapper'); -const fs = require('fs'); -const path = require('path'); +const { Command } = require("../../../lib/Command"); +const MigrationMetadata = require("../../../lib/MigrationMetadata"); +const ChildProcessWrapper = require("../../../lib/ChildProcessWrapper"); +const fs = require("fs"); +const path = require("path"); /** * Test migration command that creates isolated test database, * applies staged migration, and runs pgTAP validation */ class MigrateTestCommand extends Command { - static description = 'Test migration with pgTAP validation'; - + static description = "Test migration with pgTAP validation"; + constructor(config = null, logger = null, isProd = false) { super(config, logger, isProd); this.requiresProductionConfirmation = false; // Testing is safe this.workingDir = process.cwd(); - this.stagingDir = path.join(this.workingDir, 'migrations-staging'); - this.currentMigrationDir = path.join(this.stagingDir, 'current'); + this.stagingDir = path.join(this.workingDir, "migrations-staging"); + this.currentMigrationDir = path.join(this.stagingDir, "current"); this.processWrapper = new ChildProcessWrapper(logger || console); - + // Add ONLY safe database commands for testing - this.processWrapper.allowCommand('psql'); - this.processWrapper.allowCommand('createdb'); + this.processWrapper.allowCommand("psql"); + this.processWrapper.allowCommand("createdb"); // DO NOT add dropdb - too dangerous! } @@ -33,75 +33,83 @@ class MigrateTestCommand extends Command { * Execute the migration test process */ async performExecute(args = {}) { - this.emit('start'); - + this.emit("start"); + try { - this.progress('Starting migration test process'); - + this.progress("Starting migration test process"); + // Validate that we have a staged migration await this.validateStagedMigration(); - + // Get migration metadata const metadata = await this.getMigrationMetadata(); this.progress(`Testing migration: ${metadata.name} (${metadata.id})`); - + // Create isolated test database const testDbUrl = await this.createTestDatabase(); this.progress(`Created test database: ${this.getDbName(testDbUrl)}`); - + try { // Apply staged migration to test database await this.applyMigration(testDbUrl); - this.progress('Applied migration to test database'); - + this.progress("Applied migration to test database"); + // Run pgTAP tests if available const testResults = await this.runPgTapTests(testDbUrl); - this.progress(`Test results: ${testResults.passed} passed, ${testResults.failed} failed`); - + this.progress( + `Test results: ${testResults.passed} passed, ${testResults.failed} failed`, + ); + // Update metadata with test results await this.updateTestResults(metadata.id, testResults); - + if (testResults.failed > 0) { - this.error(`Migration test failed: ${testResults.failed} test(s) failed`); - this.emit('failed', { error: 'Tests failed', results: testResults }); - throw new Error(`Migration test failed: ${testResults.failed} test(s) failed`); + this.error( + `Migration test failed: ${testResults.failed} test(s) failed`, + ); + this.emit("failed", { error: "Tests failed", results: testResults }); + throw new Error( + `Migration test failed: ${testResults.failed} test(s) failed`, + ); } - - this.success(`Migration test completed successfully: ${testResults.passed} tests passed`); - this.emit('complete', { results: testResults }); - + + this.success( + `Migration test completed successfully: ${testResults.passed} tests passed`, + ); + this.emit("complete", { results: testResults }); } finally { // Clean up test database await this.cleanupTestDatabase(testDbUrl); this.progress(`Cleaned up test database: ${this.getDbName(testDbUrl)}`); } - } catch (error) { - this.error('Migration test failed', error); - this.emit('failed', { error }); + this.error("Migration test failed", error); + this.emit("failed", { error }); throw error; } } - + /** * Validate that we have a staged migration ready for testing */ async validateStagedMigration() { if (!fs.existsSync(this.currentMigrationDir)) { - throw new Error('No staged migration found. Run "data compile-migration" first.'); + throw new Error( + 'No staged migration found. Run "data compile-migration" first.', + ); } - - const migrationFile = path.join(this.currentMigrationDir, 'migration.sql'); + + const migrationFile = path.join(this.currentMigrationDir, "migration.sql"); if (!fs.existsSync(migrationFile)) { - throw new Error('No migration.sql file found in staged migration.'); + throw new Error("No migration.sql file found in staged migration."); } - - const metadataFile = path.join(this.currentMigrationDir, 'metadata.json'); + + const metadataFile = path.join(this.currentMigrationDir, "metadata.json"); if (!fs.existsSync(metadataFile)) { - throw new Error('No metadata.json file found in staged migration.'); + throw new Error("No metadata.json file found in staged migration."); } } - + /** * Get migration metadata from staged migration */ @@ -109,195 +117,211 @@ class MigrateTestCommand extends Command { const metadata = new MigrationMetadata(this.currentMigrationDir); return metadata.read(); } - + /** * Create isolated test database with unique name */ async createTestDatabase() { const timestamp = Date.now(); const testDbName = `temp_test_${timestamp}`; - + // Get base database connection info const baseDbUrl = this.getBaseDbUrl(); const testDbUrl = this.createTestDbUrl(baseDbUrl, testDbName); - + try { // Create test database this.progress(`Creating test database: ${testDbName}`); - await this.processWrapper.execute('createdb', [ - testDbName, - '-h', 'localhost', - '-p', '54332', - '-U', 'postgres' - ], { - env: { ...process.env, PGPASSWORD: 'postgres' }, - timeout: 10000 - }); - + await this.processWrapper.execute( + "createdb", + [testDbName, "-h", "localhost", "-p", "54332", "-U", "postgres"], + { + env: { ...process.env, PGPASSWORD: "postgres" }, + timeout: 10000, + }, + ); + return testDbUrl; } catch (error) { throw new Error(`Failed to create test database: ${error.message}`); } } - + /** * Apply staged migration to test database */ async applyMigration(testDbUrl) { - const migrationFile = path.join(this.currentMigrationDir, 'migration.sql'); - + const migrationFile = path.join(this.currentMigrationDir, "migration.sql"); + try { - this.progress('Applying migration to test database'); - await this.processWrapper.execute('psql', [ - testDbUrl, - '-f', migrationFile - ], { - env: { ...process.env, PGPASSWORD: 'postgres' }, - timeout: 30000 - }); + this.progress("Applying migration to test database"); + await this.processWrapper.execute( + "psql", + [testDbUrl, "-f", migrationFile], + { + env: { ...process.env, PGPASSWORD: "postgres" }, + timeout: 30000, + }, + ); } catch (error) { throw new Error(`Failed to apply migration: ${error.message}`); } } - + /** * Run pgTAP tests if available */ async runPgTapTests(testDbUrl) { // Check if pgTAP is available const hasPgTap = await this.checkPgTapAvailable(testDbUrl); - + if (!hasPgTap) { - this.warn('pgTAP not available, skipping test validation'); + this.warn("pgTAP not available, skipping test validation"); return { passed: 0, failed: 0, total: 0, - message: 'pgTAP not available' + message: "pgTAP not available", }; } - + try { // Run pgTAP tests - this.progress('Running pgTAP test suite'); - + this.progress("Running pgTAP test suite"); + // Check if we have test functions available const testFunctions = await this.getAvailableTestFunctions(testDbUrl); - + if (testFunctions.length === 0) { - this.warn('No test functions found, creating basic validation test'); + this.warn("No test functions found, creating basic validation test"); return await this.runBasicValidationTest(testDbUrl); } - + // Run all available test functions let totalPassed = 0; let totalFailed = 0; - + for (const testFunction of testFunctions) { const result = await this.runTestFunction(testDbUrl, testFunction); totalPassed += result.passed; totalFailed += result.failed; } - + return { passed: totalPassed, failed: totalFailed, total: totalPassed + totalFailed, - message: `Ran ${testFunctions.length} test function(s)` + message: `Ran ${testFunctions.length} test function(s)`, }; - } catch (error) { throw new Error(`pgTAP test execution failed: ${error.message}`); } } - + /** * Check if pgTAP extension is available */ async checkPgTapAvailable(testDbUrl) { try { - const result = execSync(`psql "${testDbUrl}" -c "SELECT 1 FROM pg_extension WHERE extname = 'pgtap';"`, { - stdio: 'pipe', - encoding: 'utf8', - env: { ...process.env, PGPASSWORD: 'postgres' } - }); - - return result.includes('(1 row)'); + const result = execSync( + `psql "${testDbUrl}" -c "SELECT 1 FROM pg_extension WHERE extname = 'pgtap';"`, + { + stdio: "pipe", + encoding: "utf8", + env: { ...process.env, PGPASSWORD: "postgres" }, + }, + ); + + return result.includes("(1 row)"); } catch (error) { // Try to install pgTAP extension try { - this.progress('Installing pgTAP extension'); - execSync(`psql "${testDbUrl}" -c "CREATE EXTENSION IF NOT EXISTS pgtap;"`, { - stdio: 'pipe', - env: { ...process.env, PGPASSWORD: 'postgres' } - }); + this.progress("Installing pgTAP extension"); + execSync( + `psql "${testDbUrl}" -c "CREATE EXTENSION IF NOT EXISTS pgtap;"`, + { + stdio: "pipe", + env: { ...process.env, PGPASSWORD: "postgres" }, + }, + ); return true; } catch (installError) { - this.warn('Could not install pgTAP extension'); + this.warn("Could not install pgTAP extension"); return false; } } } - + /** * Get available test functions in test schema */ async getAvailableTestFunctions(testDbUrl) { try { - const result = execSync(`psql "${testDbUrl}" -c "SELECT routine_name FROM information_schema.routines WHERE routine_schema = 'test' AND routine_name LIKE '%test%' ORDER BY routine_name;"`, { - stdio: 'pipe', - encoding: 'utf8', - env: { ...process.env, PGPASSWORD: 'postgres' } - }); - - const lines = result.split('\n').filter(line => - line.trim() && - !line.includes('routine_name') && - !line.includes('------') && - !line.includes('(') && - !line.includes('row') + const result = execSync( + `psql "${testDbUrl}" -c "SELECT routine_name FROM information_schema.routines WHERE routine_schema = 'test' AND routine_name LIKE '%test%' ORDER BY routine_name;"`, + { + stdio: "pipe", + encoding: "utf8", + env: { ...process.env, PGPASSWORD: "postgres" }, + }, ); - - return lines.map(line => line.trim()).filter(name => name.length > 0); + + const lines = result + .split("\n") + .filter( + (line) => + line.trim() && + !line.includes("routine_name") && + !line.includes("------") && + !line.includes("(") && + !line.includes("row"), + ); + + return lines.map((line) => line.trim()).filter((name) => name.length > 0); } catch (error) { - this.warn('Could not query test functions'); + this.warn("Could not query test functions"); return []; } } - + /** * Run a specific test function */ async runTestFunction(testDbUrl, functionName) { try { - const result = execSync(`psql "${testDbUrl}" -c "SELECT * FROM test.${functionName}();"`, { - stdio: 'pipe', - encoding: 'utf8', - env: { ...process.env, PGPASSWORD: 'postgres' } - }); - + const result = execSync( + `psql "${testDbUrl}" -c "SELECT * FROM test.${functionName}();"`, + { + stdio: "pipe", + encoding: "utf8", + env: { ...process.env, PGPASSWORD: "postgres" }, + }, + ); + // Parse pgTAP results (simplified parsing) - const lines = result.split('\n'); + const lines = result.split("\n"); let passed = 0; let failed = 0; - + for (const line of lines) { - if (line.includes('ok ')) { + if (line.includes("ok ")) { passed++; - } else if (line.includes('not ok ')) { + } else if (line.includes("not ok ")) { failed++; } } - - this.progress(`Test function ${functionName}: ${passed} passed, ${failed} failed`); - + + this.progress( + `Test function ${functionName}: ${passed} passed, ${failed} failed`, + ); + return { passed, failed }; } catch (error) { this.warn(`Test function ${functionName} failed: ${error.message}`); return { passed: 0, failed: 1 }; } } - + /** * Run basic validation test when no test functions available */ @@ -307,21 +331,21 @@ class MigrateTestCommand extends Command { const checks = [ "SELECT CASE WHEN current_database() IS NOT NULL THEN 'ok 1 - database connection' ELSE 'not ok 1 - database connection' END", "SELECT CASE WHEN count(*) > 0 THEN 'ok 2 - has tables' ELSE 'not ok 2 - has tables' END FROM information_schema.tables WHERE table_schema NOT IN ('information_schema', 'pg_catalog')", - "SELECT CASE WHEN count(*) >= 0 THEN 'ok 3 - schema valid' ELSE 'not ok 3 - schema valid' END FROM information_schema.schemata" + "SELECT CASE WHEN count(*) >= 0 THEN 'ok 3 - schema valid' ELSE 'not ok 3 - schema valid' END FROM information_schema.schemata", ]; - + let passed = 0; let failed = 0; - + for (const check of checks) { try { const result = execSync(`psql "${testDbUrl}" -c "${check};"`, { - stdio: 'pipe', - encoding: 'utf8', - env: { ...process.env, PGPASSWORD: 'postgres' } + stdio: "pipe", + encoding: "utf8", + env: { ...process.env, PGPASSWORD: "postgres" }, }); - - if (result.includes('ok ')) { + + if (result.includes("ok ")) { passed++; } else { failed++; @@ -330,77 +354,77 @@ class MigrateTestCommand extends Command { failed++; } } - + return { passed, failed, total: passed + failed, - message: 'Basic validation tests' + message: "Basic validation tests", }; } catch (error) { throw new Error(`Basic validation test failed: ${error.message}`); } } - + /** * Update metadata with test results */ async updateTestResults(migrationId, testResults) { const metadata = new MigrationMetadata(this.currentMigrationDir); - + const updates = { - status: testResults.failed > 0 ? 'pending' : 'tested', + status: testResults.failed > 0 ? "pending" : "tested", testing: { tested_at: new Date().toISOString(), tests_passed: testResults.passed, - tests_failed: testResults.failed - } + tests_failed: testResults.failed, + }, }; - + metadata.update(updates); - this.progress('Updated migration metadata with test results'); + this.progress("Updated migration metadata with test results"); } - + /** * Clean up test database */ async cleanupTestDatabase(testDbUrl) { const dbName = this.getDbName(testDbUrl); - + try { // Drop test database execSync(`dropdb "${dbName}" -h localhost -p 54332 -U postgres`, { - stdio: 'pipe', - env: { ...process.env, PGPASSWORD: 'postgres' } + stdio: "pipe", + env: { ...process.env, PGPASSWORD: "postgres" }, }); } catch (error) { this.warn(`Could not cleanup test database ${dbName}: ${error.message}`); // Don't throw - cleanup failure shouldn't fail the test } } - + /** * Get base database URL from environment or config */ getBaseDbUrl() { // Default to local Supabase instance - return 'postgresql://postgres:postgres@127.0.0.1:54332/postgres'; + return "postgresql://postgres:postgres@127.0.0.1:54332/postgres"; } - + /** * Create test database URL from base URL and test database name */ createTestDbUrl(baseUrl, testDbName) { return baseUrl.replace(/\/[^\/]*$/, `/${testDbName}`); } - + /** * Extract database name from URL */ getDbName(dbUrl) { const match = dbUrl.match(/\/([^\/]+)$/); - return match ? match[1] : 'unknown'; + return match ? match[1] : "unknown"; } } -module.exports = MigrateTestCommand; \ No newline at end of file +module.exports = MigrateTestCommand; diff --git a/src/commands/db/migrate/verify.js b/src/commands/db/migrate/verify.js index 44ba919..05d5864 100644 --- a/src/commands/db/migrate/verify.js +++ b/src/commands/db/migrate/verify.js @@ -2,17 +2,17 @@ * Migration Verify Command */ -const Command = require('../../../lib/Command'); -const fs = require('fs').promises; -const path = require('path'); -const crypto = require('crypto'); +const Command = require("../../../lib/Command"); +const fs = require("fs").promises; +const path = require("path"); +const crypto = require("crypto"); /** * Verify migration integrity including file hashes and metadata validation */ class MigrateVerifyCommand extends Command { - static description = 'Verify migration integrity'; - + static description = "Verify migration integrity"; + constructor(config = null, logger = null, isProd = false) { super(config, logger, isProd); this.requiresProductionConfirmation = false; // Read-only verification @@ -22,69 +22,73 @@ class MigrateVerifyCommand extends Command { * Execute migration verification */ async performExecute(args = {}) { - this.emit('start'); - + this.emit("start"); + try { const fix = args.fix || args.f || false; const verbose = args.verbose || args.v || false; - - this.progress('Starting migration integrity verification...'); - + + this.progress("Starting migration integrity verification..."); + let totalChecks = 0; let passedChecks = 0; let failedChecks = 0; const issues = []; - + // Check file hashes const hashResult = await this.verifyFileHashes(verbose); totalChecks += hashResult.total; passedChecks += hashResult.passed; failedChecks += hashResult.failed; issues.push(...hashResult.issues); - + // Validate metadata const metadataResult = await this.validateMetadata(verbose); totalChecks += metadataResult.total; passedChecks += metadataResult.passed; failedChecks += metadataResult.failed; issues.push(...metadataResult.issues); - + // Check migration dependencies const depResult = await this.checkDependencies(verbose); totalChecks += depResult.total; passedChecks += depResult.passed; failedChecks += depResult.failed; issues.push(...depResult.issues); - + // Check SQL syntax const sqlResult = await this.verifySqlSyntax(verbose); totalChecks += sqlResult.total; passedChecks += sqlResult.passed; failedChecks += sqlResult.failed; issues.push(...sqlResult.issues); - + // Fix issues if requested if (fix && issues.length > 0) { await this.fixIssues(issues); } - + // Display results - this.displayVerificationResults(totalChecks, passedChecks, failedChecks, issues); - - this.emit('complete', { - totalChecks, - passedChecks, - failedChecks, - issues: issues.length + this.displayVerificationResults( + totalChecks, + passedChecks, + failedChecks, + issues, + ); + + this.emit("complete", { + totalChecks, + passedChecks, + failedChecks, + issues: issues.length, }); - } catch (error) { - this.error('Migration verification failed', error); - this.emit('failed', { error }); + this.error("Migration verification failed", error); + this.emit("failed", { error }); throw error; } } - + /** * Verify file hashes against stored checksums */ @@ -93,55 +97,64 @@ class MigrateVerifyCommand extends Command { let passed = 0; let failed = 0; const issues = []; - + try { - const migrationsDir = path.resolve('supabase/migrations'); - const checksumFile = path.resolve('supabase/.migration_checksums.json'); - + const migrationsDir = path.resolve("supabase/migrations"); + const checksumFile = path.resolve("supabase/.migration_checksums.json"); + // Check if migrations directory exists - const migrationsExists = await fs.access(migrationsDir).then(() => true).catch(() => false); + const migrationsExists = await fs + .access(migrationsDir) + .then(() => true) + .catch(() => false); if (!migrationsExists) { - issues.push({ type: 'missing_directory', path: migrationsDir }); + issues.push({ type: "missing_directory", path: migrationsDir }); return { total, passed, failed, issues }; } - + // Load stored checksums let storedChecksums = {}; - const checksumExists = await fs.access(checksumFile).then(() => true).catch(() => false); + const checksumExists = await fs + .access(checksumFile) + .then(() => true) + .catch(() => false); if (checksumExists) { - const checksumContent = await fs.readFile(checksumFile, 'utf8'); + const checksumContent = await fs.readFile(checksumFile, "utf8"); storedChecksums = JSON.parse(checksumContent); } - + // Get all migration files const files = await fs.readdir(migrationsDir); - const migrationFiles = files.filter(f => f.endsWith('.sql')); - + const migrationFiles = files.filter((f) => f.endsWith(".sql")); + for (const file of migrationFiles) { total++; const filePath = path.join(migrationsDir, file); - + // Calculate current hash - const content = await fs.readFile(filePath, 'utf8'); - const currentHash = crypto.createHash('sha256').update(content).digest('hex'); - + const content = await fs.readFile(filePath, "utf8"); + const currentHash = crypto + .createHash("sha256") + .update(content) + .digest("hex"); + // Compare with stored hash const storedHash = storedChecksums[file]; - + if (!storedHash) { - issues.push({ - type: 'missing_checksum', - file, - currentHash + issues.push({ + type: "missing_checksum", + file, + currentHash, }); failed++; if (verbose) this.warn(`Missing checksum for: ${file}`); } else if (storedHash !== currentHash) { - issues.push({ - type: 'checksum_mismatch', - file, - storedHash, - currentHash + issues.push({ + type: "checksum_mismatch", + file, + storedHash, + currentHash, }); failed++; if (verbose) this.warn(`Checksum mismatch for: ${file}`); @@ -150,14 +163,13 @@ class MigrateVerifyCommand extends Command { if (verbose) this.progress(`Hash verified: ${file}`); } } - } catch (error) { - issues.push({ type: 'hash_verification_error', error: error.message }); + issues.push({ type: "hash_verification_error", error: error.message }); } - + return { total, passed, failed, issues }; } - + /** * Validate migration metadata */ @@ -166,52 +178,59 @@ class MigrateVerifyCommand extends Command { let passed = 0; let failed = 0; const issues = []; - + try { - const historyFile = path.resolve('supabase/.migration_history.json'); - const historyExists = await fs.access(historyFile).then(() => true).catch(() => false); - + const historyFile = path.resolve("supabase/.migration_history.json"); + const historyExists = await fs + .access(historyFile) + .then(() => true) + .catch(() => false); + if (!historyExists) { - issues.push({ type: 'missing_history_file', path: historyFile }); + issues.push({ type: "missing_history_file", path: historyFile }); return { total, passed, failed, issues }; } - - const historyContent = await fs.readFile(historyFile, 'utf8'); + + const historyContent = await fs.readFile(historyFile, "utf8"); let history; - + // Validate JSON structure total++; try { history = JSON.parse(historyContent); passed++; - if (verbose) this.progress('History JSON is valid'); + if (verbose) this.progress("History JSON is valid"); } catch (parseError) { - issues.push({ type: 'invalid_json', file: historyFile, error: parseError.message }); + issues.push({ + type: "invalid_json", + file: historyFile, + error: parseError.message, + }); failed++; return { total, passed, failed, issues }; } - + // Validate each history entry for (const [index, entry] of history.entries()) { total++; - - const requiredFields = ['action', 'timestamp']; - const missingFields = requiredFields.filter(field => !entry[field]); - + + const requiredFields = ["action", "timestamp"]; + const missingFields = requiredFields.filter((field) => !entry[field]); + if (missingFields.length > 0) { - issues.push({ - type: 'missing_required_fields', - entry: index, - missingFields + issues.push({ + type: "missing_required_fields", + entry: index, + missingFields, }); failed++; } else { // Validate timestamp format if (isNaN(new Date(entry.timestamp).getTime())) { - issues.push({ - type: 'invalid_timestamp', - entry: index, - timestamp: entry.timestamp + issues.push({ + type: "invalid_timestamp", + entry: index, + timestamp: entry.timestamp, }); failed++; } else { @@ -220,14 +239,13 @@ class MigrateVerifyCommand extends Command { } } } - } catch (error) { - issues.push({ type: 'metadata_validation_error', error: error.message }); + issues.push({ type: "metadata_validation_error", error: error.message }); } - + return { total, passed, failed, issues }; } - + /** * Check migration dependencies */ @@ -236,48 +254,51 @@ class MigrateVerifyCommand extends Command { let passed = 0; let failed = 0; const issues = []; - + try { - const migrationsDir = path.resolve('supabase/migrations'); - const migrationsExists = await fs.access(migrationsDir).then(() => true).catch(() => false); - + const migrationsDir = path.resolve("supabase/migrations"); + const migrationsExists = await fs + .access(migrationsDir) + .then(() => true) + .catch(() => false); + if (!migrationsExists) { return { total, passed, failed, issues }; } - + const files = await fs.readdir(migrationsDir); - const migrationFiles = files.filter(f => f.endsWith('.sql')).sort(); - + const migrationFiles = files.filter((f) => f.endsWith(".sql")).sort(); + for (let i = 0; i < migrationFiles.length; i++) { total++; const file = migrationFiles[i]; const filePath = path.join(migrationsDir, file); - + // Check if migration follows naming convention const timestampMatch = file.match(/^(\d{14})_/); if (!timestampMatch) { - issues.push({ - type: 'invalid_naming_convention', + issues.push({ + type: "invalid_naming_convention", file, - expected: 'YYYYMMDDHHMMSS_description.sql' + expected: "YYYYMMDDHHMMSS_description.sql", }); failed++; continue; } - + // Check chronological order if (i > 0) { const prevFile = migrationFiles[i - 1]; const prevTimestamp = prevFile.match(/^(\d{14})_/)?.[1]; const currentTimestamp = timestampMatch[1]; - + if (currentTimestamp <= prevTimestamp) { - issues.push({ - type: 'chronological_order_violation', + issues.push({ + type: "chronological_order_violation", file, prevFile, currentTimestamp, - prevTimestamp + prevTimestamp, }); failed++; } else { @@ -288,14 +309,13 @@ class MigrateVerifyCommand extends Command { passed++; } } - } catch (error) { - issues.push({ type: 'dependency_check_error', error: error.message }); + issues.push({ type: "dependency_check_error", error: error.message }); } - + return { total, passed, failed, issues }; } - + /** * Verify SQL syntax (basic check) */ @@ -304,26 +324,29 @@ class MigrateVerifyCommand extends Command { let passed = 0; let failed = 0; const issues = []; - + try { - const migrationsDir = path.resolve('supabase/migrations'); - const migrationsExists = await fs.access(migrationsDir).then(() => true).catch(() => false); - + const migrationsDir = path.resolve("supabase/migrations"); + const migrationsExists = await fs + .access(migrationsDir) + .then(() => true) + .catch(() => false); + if (!migrationsExists) { return { total, passed, failed, issues }; } - + const files = await fs.readdir(migrationsDir); - const migrationFiles = files.filter(f => f.endsWith('.sql')); - + const migrationFiles = files.filter((f) => f.endsWith(".sql")); + for (const file of migrationFiles) { total++; const filePath = path.join(migrationsDir, file); - const content = await fs.readFile(filePath, 'utf8'); - + const content = await fs.readFile(filePath, "utf8"); + // Basic SQL syntax checks const syntaxIssues = this.checkBasicSqlSyntax(content, file); - + if (syntaxIssues.length > 0) { issues.push(...syntaxIssues); failed++; @@ -333,131 +356,143 @@ class MigrateVerifyCommand extends Command { if (verbose) this.progress(`SQL syntax OK: ${file}`); } } - } catch (error) { - issues.push({ type: 'sql_syntax_error', error: error.message }); + issues.push({ type: "sql_syntax_error", error: error.message }); } - + return { total, passed, failed, issues }; } - + /** * Basic SQL syntax checking */ checkBasicSqlSyntax(content, filename) { const issues = []; - + // Check for common SQL issues - const lines = content.split('\n'); - + const lines = content.split("\n"); + lines.forEach((line, index) => { const lineNum = index + 1; - + // Check for unterminated statements (basic check) - if (line.trim().length > 0 && - !line.trim().startsWith('--') && - !line.includes(';') && - lineNum === lines.length) { + if ( + line.trim().length > 0 && + !line.trim().startsWith("--") && + !line.includes(";") && + lineNum === lines.length + ) { issues.push({ - type: 'unterminated_statement', + type: "unterminated_statement", file: filename, line: lineNum, - content: line.trim() + content: line.trim(), }); } - + // Check for potentially dangerous operations without transactions - const dangerousOps = ['DROP TABLE', 'TRUNCATE', 'DELETE FROM']; + const dangerousOps = ["DROP TABLE", "TRUNCATE", "DELETE FROM"]; const upperLine = line.toUpperCase(); - - dangerousOps.forEach(op => { - if (upperLine.includes(op) && !content.toUpperCase().includes('BEGIN') && !content.toUpperCase().includes('TRANSACTION')) { + + dangerousOps.forEach((op) => { + if ( + upperLine.includes(op) && + !content.toUpperCase().includes("BEGIN") && + !content.toUpperCase().includes("TRANSACTION") + ) { issues.push({ - type: 'dangerous_operation_without_transaction', + type: "dangerous_operation_without_transaction", file: filename, line: lineNum, - operation: op + operation: op, }); } }); }); - + return issues; } - + /** * Fix detected issues */ async fixIssues(issues) { - this.progress('Attempting to fix detected issues...'); - + this.progress("Attempting to fix detected issues..."); + for (const issue of issues) { try { switch (issue.type) { - case 'missing_checksum': + case "missing_checksum": await this.fixMissingChecksum(issue); break; - case 'checksum_mismatch': - this.warn(`Cannot auto-fix checksum mismatch for ${issue.file} - manual review required`); + case "checksum_mismatch": + this.warn( + `Cannot auto-fix checksum mismatch for ${issue.file} - manual review required`, + ); break; default: this.warn(`Cannot auto-fix issue type: ${issue.type}`); } } catch (error) { - this.warn(`Failed to fix issue: ${issue.type}`, { error: error.message }); + this.warn(`Failed to fix issue: ${issue.type}`, { + error: error.message, + }); } } } - + /** * Fix missing checksum by generating it */ async fixMissingChecksum(issue) { - const checksumFile = path.resolve('supabase/.migration_checksums.json'); - + const checksumFile = path.resolve("supabase/.migration_checksums.json"); + let checksums = {}; - const checksumExists = await fs.access(checksumFile).then(() => true).catch(() => false); + const checksumExists = await fs + .access(checksumFile) + .then(() => true) + .catch(() => false); if (checksumExists) { - const content = await fs.readFile(checksumFile, 'utf8'); + const content = await fs.readFile(checksumFile, "utf8"); checksums = JSON.parse(content); } - + checksums[issue.file] = issue.currentHash; - + await fs.writeFile(checksumFile, JSON.stringify(checksums, null, 2)); this.progress(`Generated checksum for: ${issue.file}`); } - + /** * Display verification results */ displayVerificationResults(totalChecks, passedChecks, failedChecks, issues) { - console.log('\n🔍 Migration Verification Results'); - console.log('═══════════════════════════════════\n'); - + console.log("\n🔍 Migration Verification Results"); + console.log("═══════════════════════════════════\n"); + console.log(`Total checks: ${totalChecks}`); console.log(`Passed: ${passedChecks} ✅`); console.log(`Failed: ${failedChecks} ❌`); - console.log(''); - + console.log(""); + if (issues.length > 0) { - console.log('Issues found:'); + console.log("Issues found:"); issues.forEach((issue, index) => { - console.log(`${index + 1}. ${issue.type}: ${issue.file || 'N/A'}`); + console.log(`${index + 1}. ${issue.type}: ${issue.file || "N/A"}`); if (issue.error) { console.log(` Error: ${issue.error}`); } }); - console.log(''); + console.log(""); } - + if (failedChecks === 0) { - console.log('✅ All verification checks passed!'); + console.log("✅ All verification checks passed!"); } else { console.log(`⚠️ ${failedChecks} checks failed - review issues above`); } - console.log(''); + console.log(""); } } -module.exports = MigrateVerifyCommand; \ No newline at end of file +module.exports = MigrateVerifyCommand; diff --git a/src/commands/functions/DeployCommand.js b/src/commands/functions/DeployCommand.js index a938095..933057e 100644 --- a/src/commands/functions/DeployCommand.js +++ b/src/commands/functions/DeployCommand.js @@ -1,14 +1,14 @@ /** * Edge Functions Deployment Command - * + * * Integrates Supabase Edge Functions deployment with data's event-driven architecture * Provides deployment validation, environment checking, and rollback capabilities */ -const fs = require('fs'); -const path = require('path'); -const { execSync } = require('child_process'); -const Command = require('../../lib/Command'); +const fs = require("fs"); +const path = require("path"); +const { execSync } = require("child_process"); +const Command = require("../../lib/Command"); class DeployCommand extends Command { constructor(functionsPath, logger = null, isProd = false) { @@ -23,7 +23,7 @@ class DeployCommand extends Command { * @param {Object} options - Deployment options */ async performExecute(functionNames = null, options = {}) { - this.progress('🚀 Starting Edge Functions deployment process'); + this.progress("🚀 Starting Edge Functions deployment process"); try { // Validate environment and prerequisites @@ -31,14 +31,14 @@ class DeployCommand extends Command { // Get functions to deploy const functionsToDeploy = await this.resolveFunctionsList(functionNames); - + if (functionsToDeploy.length === 0) { - this.warn('No functions found to deploy'); + this.warn("No functions found to deploy"); return; } this.progress(`📦 Deploying ${functionsToDeploy.length} function(s)`, { - functions: functionsToDeploy + functions: functionsToDeploy, }); // Validate each function before deployment @@ -52,49 +52,51 @@ class DeployCommand extends Command { try { const result = await this.deployFunction(functionName, options); results.push(result); - this.emit('function-deployed', { - function: functionName, + this.emit("function-deployed", { + function: functionName, success: true, - result + result, }); } catch (error) { this.error(`Failed to deploy function: ${functionName}`, error); results.push({ function: functionName, success: false, - error: error.message + error: error.message, }); - this.emit('function-deployed', { - function: functionName, + this.emit("function-deployed", { + function: functionName, success: false, - error: error.message + error: error.message, }); } } // Emit deployment summary - const successful = results.filter(r => r.success); - const failed = results.filter(r => !r.success); + const successful = results.filter((r) => r.success); + const failed = results.filter((r) => !r.success); - this.emit('deployment-complete', { + this.emit("deployment-complete", { total: results.length, successful: successful.length, failed: failed.length, - results + results, }); if (failed.length > 0) { this.warn(`Deployment completed with ${failed.length} failure(s)`, { - failed: failed.map(f => f.function) + failed: failed.map((f) => f.function), }); } else { - this.success(`✅ Successfully deployed ${successful.length} function(s)`, { - deployed: successful.map(s => s.function) - }); + this.success( + `✅ Successfully deployed ${successful.length} function(s)`, + { + deployed: successful.map((s) => s.function), + }, + ); } - } catch (error) { - this.error('Functions deployment failed', error); + this.error("Functions deployment failed", error); throw error; } } @@ -103,23 +105,27 @@ class DeployCommand extends Command { * Validate deployment environment and prerequisites */ async validateEnvironment() { - this.progress('🔍 Validating deployment environment'); + this.progress("🔍 Validating deployment environment"); // Check if supabase CLI is available try { - execSync('supabase --version', { stdio: 'pipe' }); + execSync("supabase --version", { stdio: "pipe" }); } catch (error) { - throw new Error('Supabase CLI not found. Please install: npm install -g supabase'); + throw new Error( + "Supabase CLI not found. Please install: npm install -g supabase", + ); } // Check if we're in a Supabase project - if (!fs.existsSync(path.join(process.cwd(), 'supabase', 'config.toml'))) { - throw new Error('Not in a Supabase project root. Run from project root directory.'); + if (!fs.existsSync(path.join(process.cwd(), "supabase", "config.toml"))) { + throw new Error( + "Not in a Supabase project root. Run from project root directory.", + ); } // Check functions directory exists if (!fs.existsSync(this.functionsPath)) { - throw new Error('Functions directory not found: supabase/functions/'); + throw new Error("Functions directory not found: supabase/functions/"); } // Validate environment variables for production @@ -127,20 +133,20 @@ class DeployCommand extends Command { await this.validateProductionSecrets(); } - this.progress('✅ Environment validation complete'); + this.progress("✅ Environment validation complete"); } /** * Validate production secrets are configured */ async validateProductionSecrets() { - this.progress('🔐 Validating production secrets'); + this.progress("🔐 Validating production secrets"); const requiredSecrets = [ - 'STRIPE_PUBLISHABLE_KEY', - 'STRIPE_SECRET_KEY', - 'STRIPE_WEBHOOK_SECRET', - 'SUPABASE_SERVICE_ROLE_KEY' + "STRIPE_PUBLISHABLE_KEY", + "STRIPE_SECRET_KEY", + "STRIPE_WEBHOOK_SECRET", + "SUPABASE_SERVICE_ROLE_KEY", ]; const missingSecrets = []; @@ -148,10 +154,12 @@ class DeployCommand extends Command { for (const secret of requiredSecrets) { try { // Check if secret exists in Supabase - const result = execSync(`supabase secrets list --json`, { stdio: 'pipe' }); + const result = execSync(`supabase secrets list --json`, { + stdio: "pipe", + }); const secrets = JSON.parse(result.toString()); - - if (!secrets.find(s => s.name === secret)) { + + if (!secrets.find((s) => s.name === secret)) { missingSecrets.push(secret); } } catch (error) { @@ -160,10 +168,12 @@ class DeployCommand extends Command { } if (missingSecrets.length > 0) { - throw new Error(`Missing required production secrets: ${missingSecrets.join(', ')}`); + throw new Error( + `Missing required production secrets: ${missingSecrets.join(", ")}`, + ); } - this.progress('✅ Production secrets validated'); + this.progress("✅ Production secrets validated"); } /** @@ -179,20 +189,20 @@ class DeployCommand extends Command { missing.push(name); } } - + if (missing.length > 0) { - throw new Error(`Functions not found: ${missing.join(', ')}`); + throw new Error(`Functions not found: ${missing.join(", ")}`); } - + return functionNames; } // Get all functions in directory const entries = fs.readdirSync(this.functionsPath, { withFileTypes: true }); return entries - .filter(entry => entry.isDirectory()) - .map(entry => entry.name) - .filter(name => !name.startsWith('.')); + .filter((entry) => entry.isDirectory()) + .map((entry) => entry.name) + .filter((name) => !name.startsWith(".")); } /** @@ -202,34 +212,37 @@ class DeployCommand extends Command { this.progress(`🔍 Validating function: ${functionName}`); const functionPath = path.join(this.functionsPath, functionName); - + // Check for required files - const indexPath = path.join(functionPath, 'index.ts'); + const indexPath = path.join(functionPath, "index.ts"); if (!fs.existsSync(indexPath)) { throw new Error(`Function ${functionName} missing index.ts file`); } // Basic TypeScript syntax check try { - const content = fs.readFileSync(indexPath, 'utf8'); - + const content = fs.readFileSync(indexPath, "utf8"); + // Check for basic Edge Function structure - if (!content.includes('serve(') && !content.includes('Deno.serve(')) { - this.warn(`Function ${functionName} may not have proper serve() handler`); + if (!content.includes("serve(") && !content.includes("Deno.serve(")) { + this.warn( + `Function ${functionName} may not have proper serve() handler`, + ); } // Check for proper imports - if (!content.includes('import') && !content.includes('require(')) { - this.warn(`Function ${functionName} has no imports - may be incomplete`); + if (!content.includes("import") && !content.includes("require(")) { + this.warn( + `Function ${functionName} has no imports - may be incomplete`, + ); } - } catch (error) { this.warn(`Could not validate ${functionName} syntax: ${error.message}`); } - this.emit('function-validated', { + this.emit("function-validated", { function: functionName, - path: functionPath + path: functionPath, }); } @@ -239,55 +252,56 @@ class DeployCommand extends Command { async deployFunction(functionName, options = {}) { this.progress(`🚀 Deploying function: ${functionName}`); - const deployArgs = ['functions', 'deploy', functionName]; - + const deployArgs = ["functions", "deploy", functionName]; + if (options.noVerifyJwt) { - deployArgs.push('--no-verify-jwt'); + deployArgs.push("--no-verify-jwt"); } if (options.debug) { - deployArgs.push('--debug'); + deployArgs.push("--debug"); } if (this.isProd && !options.skipImportMap) { // Production deployment should use import map - const importMapPath = path.join(this.functionsPath, 'import_map.json'); + const importMapPath = path.join(this.functionsPath, "import_map.json"); if (fs.existsSync(importMapPath)) { - deployArgs.push('--import-map', importMapPath); + deployArgs.push("--import-map", importMapPath); } } try { const startTime = Date.now(); - - this.progress(`Executing: supabase ${deployArgs.join(' ')}`); - - const result = execSync(`supabase ${deployArgs.join(' ')}`, { - stdio: 'pipe', - encoding: 'utf8', - cwd: process.cwd() + + this.progress(`Executing: supabase ${deployArgs.join(" ")}`); + + const result = execSync(`supabase ${deployArgs.join(" ")}`, { + stdio: "pipe", + encoding: "utf8", + cwd: process.cwd(), }); const deployTime = Date.now() - startTime; - this.progress(`✅ Function ${functionName} deployed successfully (${deployTime}ms)`); + this.progress( + `✅ Function ${functionName} deployed successfully (${deployTime}ms)`, + ); return { function: functionName, success: true, deployTime, - output: result + output: result, }; - } catch (error) { this.error(`Failed to deploy ${functionName}`, error); - + return { function: functionName, success: false, error: error.message, stdout: error.stdout?.toString(), - stderr: error.stderr?.toString() + stderr: error.stderr?.toString(), }; } } @@ -296,22 +310,21 @@ class DeployCommand extends Command { * Get deployment status for functions */ async getDeploymentStatus() { - this.progress('📊 Getting function deployment status'); + this.progress("📊 Getting function deployment status"); try { - const result = execSync('supabase functions list --json', { - stdio: 'pipe', - encoding: 'utf8' + const result = execSync("supabase functions list --json", { + stdio: "pipe", + encoding: "utf8", }); - + const functions = JSON.parse(result); - - this.emit('deployment-status', { functions }); - - return functions; + this.emit("deployment-status", { functions }); + + return functions; } catch (error) { - this.warn('Could not retrieve function status', { error: error.message }); + this.warn("Could not retrieve function status", { error: error.message }); return []; } } @@ -320,22 +333,22 @@ class DeployCommand extends Command { * Production confirmation override with enhanced messaging */ async confirmProduction() { - this.warn('🚨 PRODUCTION FUNCTIONS DEPLOYMENT', { - environment: 'PRODUCTION', - impact: 'HIGH', - warning: 'This will replace live Edge Functions' + this.warn("🚨 PRODUCTION FUNCTIONS DEPLOYMENT", { + environment: "PRODUCTION", + impact: "HIGH", + warning: "This will replace live Edge Functions", }); const confirmed = await this.confirm( - 'Deploy Edge Functions to PRODUCTION environment? This will affect live users.' + "Deploy Edge Functions to PRODUCTION environment? This will affect live users.", ); if (confirmed) { - this.progress('🔒 Production deployment confirmed'); + this.progress("🔒 Production deployment confirmed"); } return confirmed; } } -module.exports = DeployCommand; \ No newline at end of file +module.exports = DeployCommand; diff --git a/src/commands/functions/StatusCommand.js b/src/commands/functions/StatusCommand.js index 88d4ea5..14d4569 100644 --- a/src/commands/functions/StatusCommand.js +++ b/src/commands/functions/StatusCommand.js @@ -1,13 +1,13 @@ /** * Edge Functions Status Command - * + * * Shows deployment status, health, and metrics for Edge Functions */ -const fs = require('fs'); -const path = require('path'); -const { execSync } = require('child_process'); -const Command = require('../../lib/Command'); +const fs = require("fs"); +const path = require("path"); +const { execSync } = require("child_process"); +const Command = require("../../lib/Command"); class StatusCommand extends Command { constructor(config, logger = null, isProd = false) { @@ -20,38 +20,39 @@ class StatusCommand extends Command { * @param {string[]|null} functionNames - Specific functions to check, or null for all */ async performExecute(functionNames = null) { - this.progress('📊 Checking Edge Functions status'); + this.progress("📊 Checking Edge Functions status"); try { // Check if supabase CLI is available try { - execSync('supabase --version', { stdio: 'pipe' }); + execSync("supabase --version", { stdio: "pipe" }); } catch (error) { - throw new Error('Supabase CLI not found. Please install: npm install -g supabase'); + throw new Error( + "Supabase CLI not found. Please install: npm install -g supabase", + ); } // Get local functions const localFunctions = await this.getLocalFunctions(functionNames); - + // Get deployed functions const deployedFunctions = await this.getDeployedFunctions(); - + // Combine status information const statusMap = this.combineStatus(localFunctions, deployedFunctions); - - this.emit('status-retrieved', { + + this.emit("status-retrieved", { local: localFunctions.length, deployed: deployedFunctions.length, - functions: statusMap + functions: statusMap, }); // Display status summary this.displayStatusSummary(statusMap); return statusMap; - } catch (error) { - this.error('Failed to retrieve functions status', error); + this.error("Failed to retrieve functions status", error); throw error; } } @@ -61,28 +62,28 @@ class StatusCommand extends Command { */ async getLocalFunctions(functionNames = null) { const functionsPath = this.outputConfig.functionsDir; - + if (!fs.existsSync(functionsPath)) { return []; } const entries = fs.readdirSync(functionsPath, { withFileTypes: true }); let functions = entries - .filter(entry => entry.isDirectory()) - .map(entry => entry.name) - .filter(name => !name.startsWith('.')); + .filter((entry) => entry.isDirectory()) + .map((entry) => entry.name) + .filter((name) => !name.startsWith(".")); // Filter by specified function names if provided if (functionNames && functionNames.length > 0) { - functions = functions.filter(name => functionNames.includes(name)); + functions = functions.filter((name) => functionNames.includes(name)); } const localFunctions = []; for (const functionName of functions) { const functionPath = path.join(functionsPath, functionName); - const indexPath = path.join(functionPath, 'index.ts'); - + const indexPath = path.join(functionPath, "index.ts"); + let size = 0; let lastModified = null; let hasConfig = false; @@ -94,9 +95,8 @@ class StatusCommand extends Command { lastModified = stats.mtime; } - const denoJsonPath = path.join(functionPath, 'deno.json'); + const denoJsonPath = path.join(functionPath, "deno.json"); hasConfig = fs.existsSync(denoJsonPath); - } catch (error) { this.warn(`Could not read stats for function: ${functionName}`); } @@ -107,7 +107,7 @@ class StatusCommand extends Command { size, lastModified, hasConfig, - hasIndex: fs.existsSync(indexPath) + hasIndex: fs.existsSync(indexPath), }); } @@ -119,27 +119,26 @@ class StatusCommand extends Command { */ async getDeployedFunctions() { try { - this.progress('🌐 Fetching deployed functions from Supabase'); + this.progress("🌐 Fetching deployed functions from Supabase"); - const result = execSync('supabase functions list --json', { - stdio: 'pipe', - encoding: 'utf8' + const result = execSync("supabase functions list --json", { + stdio: "pipe", + encoding: "utf8", }); - + const deployedFunctions = JSON.parse(result); - - return deployedFunctions.map(func => ({ + + return deployedFunctions.map((func) => ({ name: func.name, id: func.id, - status: func.status || 'unknown', + status: func.status || "unknown", createdAt: func.created_at, updatedAt: func.updated_at, - version: func.version + version: func.version, })); - } catch (error) { - this.warn('Could not retrieve deployed functions list', { - error: error.message + this.warn("Could not retrieve deployed functions list", { + error: error.message, }); return []; } @@ -157,23 +156,23 @@ class StatusCommand extends Command { name: local.name, local: local, deployed: null, - status: 'local-only' + status: "local-only", }); } // Add deployed functions for (const deployed of deployedFunctions) { const existing = statusMap.get(deployed.name); - + if (existing) { existing.deployed = deployed; - existing.status = 'deployed'; + existing.status = "deployed"; } else { statusMap.set(deployed.name, { name: deployed.name, local: null, deployed: deployed, - status: 'deployed-only' + status: "deployed-only", }); } } @@ -185,22 +184,22 @@ class StatusCommand extends Command { * Display status summary */ displayStatusSummary(statusMap) { - const localOnly = statusMap.filter(f => f.status === 'local-only'); - const deployed = statusMap.filter(f => f.status === 'deployed'); - const deployedOnly = statusMap.filter(f => f.status === 'deployed-only'); + const localOnly = statusMap.filter((f) => f.status === "local-only"); + const deployed = statusMap.filter((f) => f.status === "deployed"); + const deployedOnly = statusMap.filter((f) => f.status === "deployed-only"); this.success(`📈 Functions Status Summary`, { total: statusMap.length, localOnly: localOnly.length, deployed: deployed.length, - deployedOnly: deployedOnly.length + deployedOnly: deployedOnly.length, }); // Emit detailed status for each function for (const func of statusMap) { const statusData = { name: func.name, - status: func.status + status: func.status, }; if (func.local) { @@ -208,7 +207,7 @@ class StatusCommand extends Command { hasIndex: func.local.hasIndex, hasConfig: func.local.hasConfig, size: func.local.size, - lastModified: func.local.lastModified?.toISOString() + lastModified: func.local.lastModified?.toISOString(), }; } @@ -217,26 +216,32 @@ class StatusCommand extends Command { id: func.deployed.id, version: func.deployed.version, createdAt: func.deployed.createdAt, - updatedAt: func.deployed.updatedAt + updatedAt: func.deployed.updatedAt, }; } - this.emit('function-status', statusData); + this.emit("function-status", statusData); } // Warn about potential issues if (localOnly.length > 0) { - this.warn(`${localOnly.length} function(s) exist locally but are not deployed`, { - functions: localOnly.map(f => f.name) - }); + this.warn( + `${localOnly.length} function(s) exist locally but are not deployed`, + { + functions: localOnly.map((f) => f.name), + }, + ); } if (deployedOnly.length > 0) { - this.warn(`${deployedOnly.length} function(s) are deployed but not found locally`, { - functions: deployedOnly.map(f => f.name) - }); + this.warn( + `${deployedOnly.length} function(s) are deployed but not found locally`, + { + functions: deployedOnly.map((f) => f.name), + }, + ); } } } -module.exports = StatusCommand; \ No newline at end of file +module.exports = StatusCommand; diff --git a/src/commands/functions/ValidateCommand.js b/src/commands/functions/ValidateCommand.js index 26461cd..507f706 100644 --- a/src/commands/functions/ValidateCommand.js +++ b/src/commands/functions/ValidateCommand.js @@ -1,14 +1,14 @@ /** * Edge Functions Validation Command - * + * * Validates Edge Functions syntax, structure, and dependencies * without deploying them */ -const fs = require('fs'); -const path = require('path'); -const { execSync } = require('child_process'); -const Command = require('../../lib/Command'); +const fs = require("fs"); +const path = require("path"); +const { execSync } = require("child_process"); +const Command = require("../../lib/Command"); class ValidateCommand extends Command { constructor(config, logger = null, isProd = false) { @@ -21,7 +21,7 @@ class ValidateCommand extends Command { * @param {string[]|null} functionNames - Specific functions to validate, or null for all */ async performExecute(functionNames = null) { - this.progress('🔍 Starting Edge Functions validation'); + this.progress("🔍 Starting Edge Functions validation"); try { // Check functions directory exists @@ -31,15 +31,16 @@ class ValidateCommand extends Command { } // Get functions to validate - const functionsToValidate = await this.resolveFunctionsList(functionNames); - + const functionsToValidate = + await this.resolveFunctionsList(functionNames); + if (functionsToValidate.length === 0) { - this.warn('No functions found to validate'); + this.warn("No functions found to validate"); return; } this.progress(`📋 Validating ${functionsToValidate.length} function(s)`, { - functions: functionsToValidate + functions: functionsToValidate, }); // Validate each function @@ -50,33 +51,32 @@ class ValidateCommand extends Command { } // Emit validation summary - const valid = results.filter(r => r.isValid); - const invalid = results.filter(r => !r.isValid); + const valid = results.filter((r) => r.isValid); + const invalid = results.filter((r) => !r.isValid); - this.emit('validation-complete', { + this.emit("validation-complete", { total: results.length, valid: valid.length, invalid: invalid.length, - results + results, }); if (invalid.length > 0) { this.warn(`Validation completed with ${invalid.length} issue(s)`, { - invalid: invalid.map(f => ({ + invalid: invalid.map((f) => ({ function: f.function, - issues: f.issues - })) + issues: f.issues, + })), }); } else { this.success(`✅ All ${valid.length} function(s) passed validation`, { - validated: valid.map(v => v.function) + validated: valid.map((v) => v.function), }); } return results; - } catch (error) { - this.error('Functions validation failed', error); + this.error("Functions validation failed", error); throw error; } } @@ -94,20 +94,22 @@ class ValidateCommand extends Command { missing.push(name); } } - + if (missing.length > 0) { - throw new Error(`Functions not found: ${missing.join(', ')}`); + throw new Error(`Functions not found: ${missing.join(", ")}`); } - + return functionNames; } // Get all functions in directory - const entries = fs.readdirSync(this.outputConfig.functionsDir, { withFileTypes: true }); + const entries = fs.readdirSync(this.outputConfig.functionsDir, { + withFileTypes: true, + }); return entries - .filter(entry => entry.isDirectory()) - .map(entry => entry.name) - .filter(name => !name.startsWith('.')); + .filter((entry) => entry.isDirectory()) + .map((entry) => entry.name) + .filter((name) => !name.startsWith(".")); } /** @@ -116,22 +118,28 @@ class ValidateCommand extends Command { async validateFunction(functionName) { this.progress(`🔍 Validating function: ${functionName}`); - const functionPath = path.join(this.outputConfig.functionsDir, functionName); + const functionPath = path.join( + this.outputConfig.functionsDir, + functionName, + ); const issues = []; let isValid = true; // Check for required files - const indexPath = path.join(functionPath, 'index.ts'); + const indexPath = path.join(functionPath, "index.ts"); if (!fs.existsSync(indexPath)) { - issues.push('Missing index.ts file'); + issues.push("Missing index.ts file"); isValid = false; } else { // Validate file content try { - const content = fs.readFileSync(indexPath, 'utf8'); - const contentIssues = this.validateFunctionContent(functionName, content); + const content = fs.readFileSync(indexPath, "utf8"); + const contentIssues = this.validateFunctionContent( + functionName, + content, + ); issues.push(...contentIssues); - + if (contentIssues.length > 0) { isValid = false; } @@ -142,13 +150,13 @@ class ValidateCommand extends Command { } // Check for deno.json configuration - const denoJsonPath = path.join(functionPath, 'deno.json'); + const denoJsonPath = path.join(functionPath, "deno.json"); if (fs.existsSync(denoJsonPath)) { try { - const denoConfig = JSON.parse(fs.readFileSync(denoJsonPath, 'utf8')); + const denoConfig = JSON.parse(fs.readFileSync(denoJsonPath, "utf8")); const denoIssues = this.validateDenoConfig(functionName, denoConfig); issues.push(...denoIssues); - + if (denoIssues.length > 0) { isValid = false; } @@ -159,17 +167,20 @@ class ValidateCommand extends Command { } // Check for common dependencies - const dependencyIssues = this.validateDependencies(functionName, functionPath); + const dependencyIssues = this.validateDependencies( + functionName, + functionPath, + ); issues.push(...dependencyIssues); const result = { function: functionName, path: functionPath, isValid, - issues: issues.length > 0 ? issues : ['No issues found'] + issues: issues.length > 0 ? issues : ["No issues found"], }; - this.emit('function-validated', result); + this.emit("function-validated", result); return result; } @@ -181,20 +192,22 @@ class ValidateCommand extends Command { const issues = []; // Check for basic Edge Function structure - if (!content.includes('serve(') && !content.includes('Deno.serve(')) { - issues.push('Missing serve() handler - Edge Function must use Deno.serve() or serve()'); + if (!content.includes("serve(") && !content.includes("Deno.serve(")) { + issues.push( + "Missing serve() handler - Edge Function must use Deno.serve() or serve()", + ); } // Check for proper CORS handling in public functions - if (functionName.includes('webhook') || functionName.includes('api')) { - if (!content.includes('cors') && !content.includes('Access-Control')) { - issues.push('CORS handling recommended for public endpoints'); + if (functionName.includes("webhook") || functionName.includes("api")) { + if (!content.includes("cors") && !content.includes("Access-Control")) { + issues.push("CORS handling recommended for public endpoints"); } } // Check for error handling - if (!content.includes('try') && !content.includes('catch')) { - issues.push('Consider adding try/catch error handling'); + if (!content.includes("try") && !content.includes("catch")) { + issues.push("Consider adding try/catch error handling"); } // Check for environment variable usage @@ -206,7 +219,9 @@ class ValidateCommand extends Command { } if (envVars.length > 0) { - this.progress(`Function ${functionName} uses environment variables: ${envVars.join(', ')}`); + this.progress( + `Function ${functionName} uses environment variables: ${envVars.join(", ")}`, + ); } // Removed overly aggressive secret detection @@ -225,10 +240,10 @@ class ValidateCommand extends Command { // Check for common Deno config issues if (denoConfig.imports) { const imports = denoConfig.imports; - + // Validate import URLs for (const [key, url] of Object.entries(imports)) { - if (!url.startsWith('https://')) { + if (!url.startsWith("https://")) { issues.push(`Import map entry "${key}" should use HTTPS URL`); } } @@ -237,8 +252,10 @@ class ValidateCommand extends Command { // Check for appropriate permissions if (denoConfig.permissions && denoConfig.permissions.allow) { const allows = denoConfig.permissions.allow; - if (allows.includes('--allow-all')) { - issues.push('Avoid --allow-all permission - specify minimal required permissions'); + if (allows.includes("--allow-all")) { + issues.push( + "Avoid --allow-all permission - specify minimal required permissions", + ); } } @@ -252,13 +269,18 @@ class ValidateCommand extends Command { const issues = []; // Check if function uses import_map.json - const importMapPath = path.join(this.outputConfig.functionsDir, 'import_map.json'); + const importMapPath = path.join( + this.outputConfig.functionsDir, + "import_map.json", + ); if (fs.existsSync(importMapPath)) { try { - const importMap = JSON.parse(fs.readFileSync(importMapPath, 'utf8')); - this.progress(`Function ${functionName} has access to import map with ${Object.keys(importMap.imports || {}).length} imports`); + const importMap = JSON.parse(fs.readFileSync(importMapPath, "utf8")); + this.progress( + `Function ${functionName} has access to import map with ${Object.keys(importMap.imports || {}).length} imports`, + ); } catch (error) { - issues.push('import_map.json exists but is invalid JSON'); + issues.push("import_map.json exists but is invalid JSON"); } } @@ -266,4 +288,4 @@ class ValidateCommand extends Command { } } -module.exports = ValidateCommand; \ No newline at end of file +module.exports = ValidateCommand; diff --git a/src/commands/functions/index.js b/src/commands/functions/index.js index 9c30c50..166b050 100644 --- a/src/commands/functions/index.js +++ b/src/commands/functions/index.js @@ -2,12 +2,12 @@ * Functions Commands Index */ -const DeployCommand = require('./DeployCommand'); -const ValidateCommand = require('./ValidateCommand'); -const StatusCommand = require('./StatusCommand'); +const DeployCommand = require("./DeployCommand"); +const ValidateCommand = require("./ValidateCommand"); +const StatusCommand = require("./StatusCommand"); module.exports = { DeployCommand, ValidateCommand, - StatusCommand -}; \ No newline at end of file + StatusCommand, +}; diff --git a/src/commands/test/CacheCommand.js b/src/commands/test/CacheCommand.js index 1ef10cc..c621cdc 100644 --- a/src/commands/test/CacheCommand.js +++ b/src/commands/test/CacheCommand.js @@ -2,42 +2,50 @@ * Test Cache Management Command */ -const TestCommand = require('../../lib/TestCommand'); -const TestCache = require('../../lib/test/TestCache'); -const chalk = require('chalk'); +const TestCommand = require("../../lib/TestCommand"); +const TestCache = require("../../lib/test/TestCache"); +const chalk = require("chalk"); /** * Manage test result cache (clear, stats, invalidate) */ class CacheCommand extends TestCommand { - constructor(databaseUrl, serviceRoleKey = null, testsDir, outputDir, logger = null, isProd = false) { + constructor( + databaseUrl, + serviceRoleKey = null, + testsDir, + outputDir, + logger = null, + isProd = false, + ) { super(databaseUrl, serviceRoleKey, testsDir, outputDir, logger, isProd); - this.testCache = new TestCache('.data-cache/test-results', logger); + this.testCache = new TestCache(".data-cache/test-results", logger); } /** * Execute cache management command */ async performExecute(options = {}) { - this.emit('start', { isProd: this.isProd, options }); - + this.emit("start", { isProd: this.isProd, options }); + try { - const action = options.action || 'stats'; - + const action = options.action || "stats"; + switch (action.toLowerCase()) { - case 'clear': + case "clear": return await this._clearCache(options); - case 'stats': + case "stats": return await this._showStats(options); - case 'invalidate': + case "invalidate": return await this._invalidateCache(options); default: - throw new Error(`Unknown cache action: ${action}. Use 'clear', 'stats', or 'invalidate'.`); + throw new Error( + `Unknown cache action: ${action}. Use 'clear', 'stats', or 'invalidate'.`, + ); } - } catch (error) { - this.error('Failed to execute cache command', error); - this.emit('failed', { error }); + this.error("Failed to execute cache command", error); + this.emit("failed", { error }); throw error; } } @@ -47,21 +55,21 @@ class CacheCommand extends TestCommand { * @private */ async _clearCache(options) { - this.progress('Clearing test result cache...'); - + this.progress("Clearing test result cache..."); + const result = await this.testCache.clearCache(); - - console.log(''); // Empty line - console.log(chalk.green.bold('✓ Cache cleared successfully')); + + console.log(""); // Empty line + console.log(chalk.green.bold("✓ Cache cleared successfully")); console.log(chalk.green(` ${result.filesRemoved} cache files removed`)); console.log(chalk.green(` Completed in ${result.duration}ms`)); - - this.emit('complete', { - action: 'clear', + + this.emit("complete", { + action: "clear", filesRemoved: result.filesRemoved, - duration: result.duration + duration: result.duration, }); - + return result; } @@ -70,86 +78,137 @@ class CacheCommand extends TestCommand { * @private */ async _showStats(options) { - this.progress('Gathering cache statistics...'); - + this.progress("Gathering cache statistics..."); + const stats = await this.testCache.getStats(); - - console.log(''); // Empty line - console.log(chalk.cyan.bold('Test Cache Statistics')); - console.log(chalk.cyan('━'.repeat(50))); - + + console.log(""); // Empty line + console.log(chalk.cyan.bold("Test Cache Statistics")); + console.log(chalk.cyan("━".repeat(50))); + // File statistics - console.log(chalk.white.bold('Storage:')); + console.log(chalk.white.bold("Storage:")); console.log(chalk.white(` Directory: ${stats.directory}`)); console.log(chalk.white(` Cache files: ${stats.files.count}`)); - + if (stats.files.count > 0) { - console.log(chalk.white(` Total size: ${this._formatBytes(stats.files.totalSize)}`)); - console.log(chalk.white(` Average file size: ${this._formatBytes(stats.files.averageSize)}`)); - + console.log( + chalk.white( + ` Total size: ${this._formatBytes(stats.files.totalSize)}`, + ), + ); + console.log( + chalk.white( + ` Average file size: ${this._formatBytes(stats.files.averageSize)}`, + ), + ); + if (stats.files.oldest) { - console.log(chalk.white(` Oldest entry: ${stats.files.oldest.age} minutes ago`)); + console.log( + chalk.white(` Oldest entry: ${stats.files.oldest.age} minutes ago`), + ); } if (stats.files.newest) { - console.log(chalk.white(` Newest entry: ${stats.files.newest.age} minutes ago`)); + console.log( + chalk.white(` Newest entry: ${stats.files.newest.age} minutes ago`), + ); } } - - console.log(''); // Empty line - + + console.log(""); // Empty line + // Performance statistics - console.log(chalk.white.bold('Performance:')); + console.log(chalk.white.bold("Performance:")); const hitRate = parseFloat(stats.performance.hitRate); - const hitRateColor = hitRate > 75 ? 'green' : hitRate > 50 ? 'yellow' : 'red'; - console.log(chalk[hitRateColor](` Hit rate: ${stats.performance.hitRate}%`)); - console.log(chalk.white(` Total requests: ${stats.performance.totalRequests}`)); + const hitRateColor = + hitRate > 75 ? "green" : hitRate > 50 ? "yellow" : "red"; + console.log( + chalk[hitRateColor](` Hit rate: ${stats.performance.hitRate}%`), + ); + console.log( + chalk.white(` Total requests: ${stats.performance.totalRequests}`), + ); console.log(chalk.green(` Cache hits: ${stats.performance.hits}`)); console.log(chalk.red(` Cache misses: ${stats.performance.misses}`)); - console.log(chalk.yellow(` Cache invalidations: ${stats.performance.invalidations}`)); - + console.log( + chalk.yellow(` Cache invalidations: ${stats.performance.invalidations}`), + ); + if (stats.performance.averageHashTime > 0) { - console.log(chalk.white(` Average hash calculation: ${stats.performance.averageHashTime}ms`)); + console.log( + chalk.white( + ` Average hash calculation: ${stats.performance.averageHashTime}ms`, + ), + ); } if (stats.performance.averageCacheOpTime > 0) { - console.log(chalk.white(` Average cache operation: ${stats.performance.averageCacheOpTime}ms`)); + console.log( + chalk.white( + ` Average cache operation: ${stats.performance.averageCacheOpTime}ms`, + ), + ); } - + // Show recent activity if available if (stats.timings.recentCacheOps.length > 0) { - console.log(''); // Empty line - console.log(chalk.white.bold('Recent Cache Activity:')); - stats.timings.recentCacheOps.forEach(op => { - const opColor = op.operation === 'hit' ? 'green' : 'blue'; + console.log(""); // Empty line + console.log(chalk.white.bold("Recent Cache Activity:")); + stats.timings.recentCacheOps.forEach((op) => { + const opColor = op.operation === "hit" ? "green" : "blue"; const timeAgo = this._formatTimeAgo(new Date(op.timestamp)); - console.log(chalk[opColor](` ${op.operation}: ${op.hash}... (${op.duration}ms, ${timeAgo})`)); + console.log( + chalk[opColor]( + ` ${op.operation}: ${op.hash}... (${op.duration}ms, ${timeAgo})`, + ), + ); }); } - + // Performance recommendations - console.log(''); // Empty line - console.log(chalk.white.bold('Recommendations:')); - + console.log(""); // Empty line + console.log(chalk.white.bold("Recommendations:")); + if (hitRate < 25) { - console.log(chalk.yellow(' • Consider running tests multiple times to build up cache')); + console.log( + chalk.yellow( + " • Consider running tests multiple times to build up cache", + ), + ); } else if (hitRate > 90) { - console.log(chalk.green(' • Excellent cache performance! Tests are running efficiently.')); + console.log( + chalk.green( + " • Excellent cache performance! Tests are running efficiently.", + ), + ); } else if (hitRate > 50) { - console.log(chalk.green(' • Good cache performance. Cache is providing significant speedup.')); + console.log( + chalk.green( + " • Good cache performance. Cache is providing significant speedup.", + ), + ); } - + if (stats.files.count > 1000) { - console.log(chalk.yellow(' • Consider clearing old cache entries to save disk space')); + console.log( + chalk.yellow( + " • Consider clearing old cache entries to save disk space", + ), + ); } - + if (stats.performance.averageHashTime > 100) { - console.log(chalk.yellow(' • Hash calculations are slow. Check for large test files.')); + console.log( + chalk.yellow( + " • Hash calculations are slow. Check for large test files.", + ), + ); } - - this.emit('complete', { - action: 'stats', - stats: stats + + this.emit("complete", { + action: "stats", + stats: stats, }); - + return stats; } @@ -159,29 +218,35 @@ class CacheCommand extends TestCommand { */ async _invalidateCache(options) { const pattern = options.pattern; - + if (!pattern) { - throw new Error('Pattern is required for cache invalidation. Use --pattern '); + throw new Error( + "Pattern is required for cache invalidation. Use --pattern ", + ); } - + this.progress(`Invalidating cache entries matching pattern: ${pattern}`); - + const count = await this.testCache.invalidateByPattern(pattern); - - console.log(''); // Empty line + + console.log(""); // Empty line if (count > 0) { console.log(chalk.green.bold(`✓ Invalidated ${count} cache entries`)); console.log(chalk.green(` Pattern: ${pattern}`)); } else { - console.log(chalk.yellow.bold(`No cache entries found matching pattern: ${pattern}`)); + console.log( + chalk.yellow.bold( + `No cache entries found matching pattern: ${pattern}`, + ), + ); } - - this.emit('complete', { - action: 'invalidate', + + this.emit("complete", { + action: "invalidate", pattern: pattern, - invalidatedCount: count + invalidatedCount: count, }); - + return { pattern, invalidatedCount: count }; } @@ -192,13 +257,13 @@ class CacheCommand extends TestCommand { * @private */ _formatBytes(bytes) { - if (bytes === 0) return '0 B'; - + if (bytes === 0) return "0 B"; + const k = 1024; - const sizes = ['B', 'KB', 'MB', 'GB']; + const sizes = ["B", "KB", "MB", "GB"]; const i = Math.floor(Math.log(bytes) / Math.log(k)); - - return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i]; + + return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + " " + sizes[i]; } /** @@ -213,8 +278,8 @@ class CacheCommand extends TestCommand { const diffMins = Math.floor(diffMs / 60000); const diffHours = Math.floor(diffMs / 3600000); const diffDays = Math.floor(diffMs / 86400000); - - if (diffMins < 1) return 'just now'; + + if (diffMins < 1) return "just now"; if (diffMins < 60) return `${diffMins}m ago`; if (diffHours < 24) return `${diffHours}h ago`; return `${diffDays}d ago`; @@ -246,4 +311,4 @@ Examples: } } -module.exports = CacheCommand; \ No newline at end of file +module.exports = CacheCommand; diff --git a/src/commands/test/CompileCommand.js b/src/commands/test/CompileCommand.js index 74a3a2e..c8bf96c 100644 --- a/src/commands/test/CompileCommand.js +++ b/src/commands/test/CompileCommand.js @@ -2,26 +2,23 @@ * Test Compile Command */ -const path = require('path'); -const fs = require('fs').promises; -const BuildCommand = require('../../lib/BuildCommand'); +const path = require("path"); +const fs = require("fs").promises; +const BuildCommand = require("../../lib/BuildCommand"); /** * Compile tests for execution * Uses the existing MigrationCompiler but configured for test directory */ class CompileCommand extends BuildCommand { - constructor( - testsDir, - outputDir, - logger = null, - isProd = false - ) { + constructor(testsDir, outputDir, logger = null, isProd = false) { super(testsDir, outputDir, logger, isProd); - + // Validate paths are provided if (!this.inputDir || !this.outputDir) { - throw new Error('CompileCommand requires test directory and output directory'); + throw new Error( + "CompileCommand requires test directory and output directory", + ); } } @@ -29,33 +26,35 @@ class CompileCommand extends BuildCommand { * Execute test compilation */ async performExecute() { - this.emit('compilation:start', { isProd: this.isProd, type: 'test' }); - + this.emit("compilation:start", { isProd: this.isProd, type: "test" }); + try { - this.progress('Starting test compilation...'); - + this.progress("Starting test compilation..."); + // Validate test directory structure await this.validateTestDirectory(); - + // TODO: Implement native test compilation // The legacy build system has been removed. This command needs to be reimplemented // using a native test compiler approach - throw new Error('Test compilation not yet implemented. Legacy build system has been removed.'); - + throw new Error( + "Test compilation not yet implemented. Legacy build system has been removed.", + ); + // Validate pgTAP function signatures await this.validatePgTapFunctions(result.outputFile); - + this.success(`Test compilation completed: ${result.outputFile}`); - this.emit('compilation:complete', { + this.emit("compilation:complete", { result, - type: 'test', - testsCompiled: result.stats.filesProcessed + type: "test", + testsCompiled: result.stats.filesProcessed, }); - + return result; } catch (error) { - this.error('Test compilation failed', error); - this.emit('compilation:failed', { error, type: 'test' }); + this.error("Test compilation failed", error); + this.emit("compilation:failed", { error, type: "test" }); throw error; } } @@ -66,25 +65,25 @@ class CompileCommand extends BuildCommand { async validateTestDirectory() { // Get test directory from OutputConfig const testDir = this.inputDir; - + try { const stat = await fs.stat(testDir); if (!stat.isDirectory()) { throw new Error(`Tests path is not a directory: ${testDir}`); } - + // Use glob to recursively find SQL files - const { glob } = require('glob'); - const pattern = path.join(testDir, '**/*.sql'); + const { glob } = require("glob"); + const pattern = path.join(testDir, "**/*.sql"); const sqlFiles = await glob(pattern); - + if (sqlFiles.length === 0) { throw new Error(`No SQL test files found in: ${testDir}`); } - + this.progress(`Found ${sqlFiles.length} test files in ${testDir}`); } catch (error) { - if (error.code === 'ENOENT') { + if (error.code === "ENOENT") { throw new Error(`Test directory not found: ${testDir}`); } throw error; @@ -99,14 +98,17 @@ class CompileCommand extends BuildCommand { compiler.generateOutputFilename = () => { const timestamp = new Date() .toISOString() - .replace(/[T:]/g, '') - .replace(/\..+/, '') - .replace(/-/g, '') + .replace(/[T:]/g, "") + .replace(/\..+/, "") + .replace(/-/g, "") .slice(0, 14); - - return path.join(compiler.config.outputDir, `${timestamp}_compiled_tests.sql`); + + return path.join( + compiler.config.outputDir, + `${timestamp}_compiled_tests.sql`, + ); }; - + // Override the header to indicate this is a test compilation const originalWriteHeader = compiler.writeHeader.bind(compiler); compiler.writeHeader = async (outputFile) => { @@ -125,13 +127,13 @@ class CompileCommand extends BuildCommand { -- ========================================================================= `; - + await fs.writeFile(outputFile, header); - compiler.stats.linesWritten += header.split('\n').length; - - compiler.emit('header:written', { + compiler.stats.linesWritten += header.split("\n").length; + + compiler.emit("header:written", { outputFile, - lines: header.split('\n').length + lines: header.split("\n").length, }); }; } @@ -141,62 +143,60 @@ class CompileCommand extends BuildCommand { */ async compileTestDirectory(compiler) { compiler.stats.startTime = new Date(); - - compiler.emit('start', { + + compiler.emit("start", { timestamp: compiler.stats.startTime, config: compiler.config, - type: 'test' + type: "test", }); - + // Validate test directory exists const testDir = this.inputDir; await fs.stat(testDir); - + // Ensure output directory exists await fs.mkdir(compiler.config.outputDir, { recursive: true }); - + // Generate output filename const outputFile = compiler.generateOutputFilename(); - + // Write header await compiler.writeHeader(outputFile); - + // Get all SQL files in tests directory const files = await fs.readdir(testDir); - const sqlFiles = files - .filter(f => f.endsWith('.sql')) - .sort(); // Sort for consistent ordering (important for test setup) - - this.emit('compilation:progress', { - stage: 'processing_files', - totalFiles: sqlFiles.length + const sqlFiles = files.filter((f) => f.endsWith(".sql")).sort(); // Sort for consistent ordering (important for test setup) + + this.emit("compilation:progress", { + stage: "processing_files", + totalFiles: sqlFiles.length, }); - + // Process each test file for (const sqlFile of sqlFiles) { await this.processTestFile(testDir, sqlFile, outputFile, compiler); } - + // Write footer with test-specific instructions await this.writeTestFooter(outputFile, compiler); - + // Complete compiler.stats.endTime = new Date(); const duration = compiler.stats.endTime - compiler.stats.startTime; - - compiler.emit('complete', { + + compiler.emit("complete", { outputFile, filesProcessed: compiler.stats.filesProcessed, linesWritten: compiler.stats.linesWritten, duration, timestamp: compiler.stats.endTime, - type: 'test' + type: "test", }); - + return { success: true, outputFile, - stats: compiler.stats + stats: compiler.stats, }; } @@ -205,19 +205,19 @@ class CompileCommand extends BuildCommand { */ async processTestFile(testDir, filename, outputFile, compiler) { const filePath = path.join(testDir, filename); - + try { - compiler.emit('file:start', { + compiler.emit("file:start", { file: filename, - path: filePath + path: filePath, }); - + // Read file content - const content = await fs.readFile(filePath, 'utf8'); - + const content = await fs.readFile(filePath, "utf8"); + // Validate pgTAP function structure await this.validateTestFileContent(content, filename); - + // Write file section with test-specific formatting const fileSection = `-- ========================================================================= -- TEST FILE: ${filename} @@ -225,29 +225,28 @@ class CompileCommand extends BuildCommand { ${content} `; - + await fs.appendFile(outputFile, fileSection); - - const linesAdded = fileSection.split('\n').length; + + const linesAdded = fileSection.split("\n").length; compiler.stats.linesWritten += linesAdded; compiler.stats.filesProcessed++; - - compiler.emit('file:complete', { + + compiler.emit("file:complete", { file: filename, lines: linesAdded, - size: content.length + size: content.length, }); - - this.emit('compilation:progress', { - stage: 'file_processed', + + this.emit("compilation:progress", { + stage: "file_processed", file: filename, - processedCount: compiler.stats.filesProcessed + processedCount: compiler.stats.filesProcessed, }); - } catch (error) { - compiler.emit('file:error', { + compiler.emit("file:error", { file: filename, - error: error.message + error: error.message, }); throw error; } @@ -258,17 +257,18 @@ ${content} */ async validateTestFileContent(content, filename) { // Check for required pgTAP function patterns - const lines = content.split('\n'); - + const lines = content.split("\n"); + // Look for test function definitions - const testFunctionPattern = /CREATE\s+OR\s+REPLACE\s+FUNCTION\s+test\.([a-zA-Z0-9_]+)\s*\(\s*\)/i; + const testFunctionPattern = + /CREATE\s+OR\s+REPLACE\s+FUNCTION\s+test\.([a-zA-Z0-9_]+)\s*\(\s*\)/i; const tapPlanPattern = /tap\.plan\s*\(\s*(\d+)\s*\)/i; const tapFinishPattern = /tap\.finish\s*\(\s*\)/i; - + let hasTestFunction = false; let hasTapPlan = false; let hasTapFinish = false; - + for (const line of lines) { if (testFunctionPattern.test(line)) { hasTestFunction = true; @@ -280,16 +280,22 @@ ${content} hasTapFinish = true; } } - + // Emit warnings for missing pgTAP patterns (non-fatal) if (!hasTestFunction) { - this.warn(`${filename}: No test functions found - may not be a pgTAP test file`); + this.warn( + `${filename}: No test functions found - may not be a pgTAP test file`, + ); } if (hasTestFunction && !hasTapPlan) { - this.warn(`${filename}: Missing tap.plan() call - tests may not run correctly`); + this.warn( + `${filename}: Missing tap.plan() call - tests may not run correctly`, + ); } if (hasTestFunction && !hasTapFinish) { - this.warn(`${filename}: Missing tap.finish() call - tests may not complete properly`); + this.warn( + `${filename}: Missing tap.finish() call - tests may not complete properly`, + ); } } @@ -314,12 +320,12 @@ ${content} -- -- ========================================================================= `; - + await fs.appendFile(outputFile, footer); - compiler.stats.linesWritten += footer.split('\n').length; - - compiler.emit('footer:written', { - lines: footer.split('\n').length + compiler.stats.linesWritten += footer.split("\n").length; + + compiler.emit("footer:written", { + lines: footer.split("\n").length, }); } @@ -327,42 +333,51 @@ ${content} * Validate pgTAP function signatures in the compiled output */ async validatePgTapFunctions(outputFile) { - this.progress('Validating pgTAP function signatures...'); - + this.progress("Validating pgTAP function signatures..."); + try { - const content = await fs.readFile(outputFile, 'utf8'); - + const content = await fs.readFile(outputFile, "utf8"); + // Look for all test function definitions - const testFunctionPattern = /CREATE\s+OR\s+REPLACE\s+FUNCTION\s+test\.([a-zA-Z0-9_]+)\s*\(\s*\)/gi; + const testFunctionPattern = + /CREATE\s+OR\s+REPLACE\s+FUNCTION\s+test\.([a-zA-Z0-9_]+)\s*\(\s*\)/gi; const functions = []; let match; - + while ((match = testFunctionPattern.exec(content)) !== null) { functions.push(match[1]); } - + if (functions.length === 0) { - this.warn('No pgTAP test functions found in compiled output'); + this.warn("No pgTAP test functions found in compiled output"); } else { - this.success(`Validated ${functions.length} pgTAP test functions: ${functions.join(', ')}`); + this.success( + `Validated ${functions.length} pgTAP test functions: ${functions.join(", ")}`, + ); } - + // Validate that each function has proper pgTAP structure for (const func of functions) { - const funcRegex = new RegExp(`CREATE\\s+OR\\s+REPLACE\\s+FUNCTION\\s+test\\.${func}[\\s\\S]*?\\$\\$;`, 'i'); + const funcRegex = new RegExp( + `CREATE\\s+OR\\s+REPLACE\\s+FUNCTION\\s+test\\.${func}[\\s\\S]*?\\$\\$;`, + "i", + ); const funcMatch = content.match(funcRegex); - + if (funcMatch) { const funcBody = funcMatch[0]; - if (!funcBody.includes('RETURNS SETOF TEXT')) { - this.warn(`Function test.${func} may not return SETOF TEXT - required for pgTAP`); + if (!funcBody.includes("RETURNS SETOF TEXT")) { + this.warn( + `Function test.${func} may not return SETOF TEXT - required for pgTAP`, + ); } - if (!funcBody.includes('tap.plan(')) { - this.warn(`Function test.${func} missing tap.plan() - tests may not execute properly`); + if (!funcBody.includes("tap.plan(")) { + this.warn( + `Function test.${func} missing tap.plan() - tests may not execute properly`, + ); } } } - } catch (error) { this.warn(`Could not validate pgTAP functions: ${error.message}`); } @@ -372,30 +387,30 @@ ${content} * Attach event listeners to the test compiler */ attachTestCompilerEvents(compiler) { - compiler.on('start', ({ timestamp, type }) => { - this.logger.debug({ timestamp, type }, 'Test compilation started'); + compiler.on("start", ({ timestamp, type }) => { + this.logger.debug({ timestamp, type }, "Test compilation started"); }); - - compiler.on('file:start', ({ file }) => { + + compiler.on("file:start", ({ file }) => { this.progress(`Processing test file: ${file}`); }); - - compiler.on('file:complete', ({ file, lines }) => { - this.logger.debug({ file, lines }, 'Test file processed'); + + compiler.on("file:complete", ({ file, lines }) => { + this.logger.debug({ file, lines }, "Test file processed"); }); - - compiler.on('file:error', ({ file, error }) => { + + compiler.on("file:error", ({ file, error }) => { this.error(`Error processing test file ${file}`, error); }); - - compiler.on('complete', ({ stats, type }) => { - this.logger.info({ stats, type }, 'Test compilation complete'); + + compiler.on("complete", ({ stats, type }) => { + this.logger.info({ stats, type }, "Test compilation complete"); }); - - compiler.on('error', ({ error }) => { - this.error('Test compiler error', error); + + compiler.on("error", ({ error }) => { + this.error("Test compiler error", error); }); } } -module.exports = CompileCommand; \ No newline at end of file +module.exports = CompileCommand; diff --git a/src/commands/test/CoverageCommand.js b/src/commands/test/CoverageCommand.js index a9e093d..a75459d 100644 --- a/src/commands/test/CoverageCommand.js +++ b/src/commands/test/CoverageCommand.js @@ -2,11 +2,11 @@ * Test Coverage Command */ -const TestCommand = require('../../lib/TestCommand'); -const DatabaseUtils = require('../../lib/db-utils'); -const CoverageAnalyzer = require('../../lib/test/CoverageAnalyzer'); -const chalk = require('chalk'); -const Config = require('../../lib/config'); +const TestCommand = require("../../lib/TestCommand"); +const DatabaseUtils = require("../../lib/db-utils"); +const CoverageAnalyzer = require("../../lib/test/CoverageAnalyzer"); +const chalk = require("chalk"); +const Config = require("../../lib/config"); /** * Generate test coverage reports @@ -22,117 +22,154 @@ class CoverageCommand extends TestCommand { * Execute test coverage analysis */ async performExecute(options = {}) { - this.emit('start', { isProd: this.isProd, options }); - + this.emit("start", { isProd: this.isProd, options }); + // Load test configuration const testConfig = await this._getTestConfig(); - + // Parse enforcement options with config defaults - const enforce = options.enforce !== undefined ? options.enforce : testConfig.coverage_enforcement; - const minCoverage = parseInt(options.minCoverage || testConfig.minimum_coverage || '80', 10); - const minRpcCoverage = parseInt(options.minRpcCoverage || testConfig.minimum_coverage || '75', 10); - const minRlsCoverage = parseInt(options.minRlsCoverage || '70', 10); - + const enforce = + options.enforce !== undefined + ? options.enforce + : testConfig.coverage_enforcement; + const minCoverage = parseInt( + options.minCoverage || testConfig.minimum_coverage || "80", + 10, + ); + const minRpcCoverage = parseInt( + options.minRpcCoverage || testConfig.minimum_coverage || "75", + 10, + ); + const minRlsCoverage = parseInt(options.minRlsCoverage || "70", 10); + let client = null; - + try { - this.progress('Connecting to database...'); - + this.progress("Connecting to database..."); + // Connect to the main postgres database (default database name) - client = this.dbUtils.createDatabaseClient('postgres'); + client = this.dbUtils.createDatabaseClient("postgres"); await client.connect(); - - this.progress('Analyzing RPC function coverage...'); - + + this.progress("Analyzing RPC function coverage..."); + // Query RPC coverage - const rpcResult = await client.query('SELECT * FROM test.analyze_rpc_coverage()'); + const rpcResult = await client.query( + "SELECT * FROM test.analyze_rpc_coverage()", + ); const rpcAnalysis = this.analyzer.analyzeRpcCoverage(rpcResult.rows); - - this.progress('Analyzing RLS policy coverage...'); - + + this.progress("Analyzing RLS policy coverage..."); + // Query RLS policy coverage - const policyResult = await client.query('SELECT * FROM test.analyze_policy_coverage()'); - const policyAnalysis = this.analyzer.analyzePolicyCoverage(policyResult.rows); - - this.progress('Generating coverage summary...'); - + const policyResult = await client.query( + "SELECT * FROM test.analyze_policy_coverage()", + ); + const policyAnalysis = this.analyzer.analyzePolicyCoverage( + policyResult.rows, + ); + + this.progress("Generating coverage summary..."); + // Query overall summary let summaryResult = null; try { - const summaryQuery = await client.query('SELECT * FROM test.get_coverage_summary()'); + const summaryQuery = await client.query( + "SELECT * FROM test.get_coverage_summary()", + ); summaryResult = this.analyzer.processCoverageSummary(summaryQuery.rows); } catch (summaryError) { // Summary function might not exist in some migrations - this.warn('Could not retrieve coverage summary - function may not be available'); + this.warn( + "Could not retrieve coverage summary - function may not be available", + ); } - - this.progress('Formatting coverage report...'); - + + this.progress("Formatting coverage report..."); + // Generate formatted report - const report = this.analyzer.formatCoverageReport(rpcAnalysis, policyAnalysis, summaryResult); - + const report = this.analyzer.formatCoverageReport( + rpcAnalysis, + policyAnalysis, + summaryResult, + ); + // Output the report - console.log('\n' + report); - + console.log("\n" + report); + // Generate stats for return value - const stats = this.analyzer.generateCoverageStats(rpcAnalysis, policyAnalysis); - + const stats = this.analyzer.generateCoverageStats( + rpcAnalysis, + policyAnalysis, + ); + // Enforce coverage thresholds if requested if (enforce) { - this.progress('Enforcing coverage thresholds...'); + this.progress("Enforcing coverage thresholds..."); this.enforcementResult = this.enforceCoverageThresholds( - stats, - minCoverage, - minRpcCoverage, - minRlsCoverage + stats, + minCoverage, + minRpcCoverage, + minRlsCoverage, ); - + if (!this.enforcementResult.passed) { // Exit after emitting the event and returning result - this.emit('failed', { error: new Error('Coverage enforcement failed'), thresholds: this.enforcementResult }); + this.emit("failed", { + error: new Error("Coverage enforcement failed"), + thresholds: this.enforcementResult, + }); // Note: process.exit will be handled after the function returns } else { - this.success('All coverage thresholds met!'); + this.success("All coverage thresholds met!"); } } - - this.emit('complete', { - rpcAnalysis, - policyAnalysis, + + this.emit("complete", { + rpcAnalysis, + policyAnalysis, summary: summaryResult, stats, - report + report, }); - + // Exit with non-zero code if enforcement failed if (enforce && this.enforcementResult && !this.enforcementResult.passed) { process.exit(1); } - + return { rpc: rpcAnalysis, policies: policyAnalysis, summary: summaryResult, - overall: stats.overall + overall: stats.overall, }; - } catch (error) { // Handle common database connection errors with helpful messages - if (error.code === 'ECONNREFUSED') { - this.error('Database connection refused. Is PostgreSQL running on localhost:54332?'); - this.info('Try starting Supabase with: cd supabase && npm run start'); - } else if (error.code === '3D000') { + if (error.code === "ECONNREFUSED") { + this.error( + "Database connection refused. Is PostgreSQL running on localhost:54332?", + ); + this.info("Try starting Supabase with: cd supabase && npm run start"); + } else if (error.code === "3D000") { this.error('Database "postgres" does not exist.'); - this.info('Make sure you are connected to the correct database.'); - } else if (error.message.includes('test.analyze_rpc_coverage') || error.message.includes('does not exist')) { - this.error('Test coverage functions not found in database.'); - this.info('Run the test coverage migration: data db compile-migration && supabase db reset'); - this.warn('Make sure migration 20250829_050000_test_coverage_analysis.sql has been applied.'); + this.info("Make sure you are connected to the correct database."); + } else if ( + error.message.includes("test.analyze_rpc_coverage") || + error.message.includes("does not exist") + ) { + this.error("Test coverage functions not found in database."); + this.info( + "Run the test coverage migration: data db compile-migration && supabase db reset", + ); + this.warn( + "Make sure migration 20250829_050000_test_coverage_analysis.sql has been applied.", + ); } else { - this.error('Failed to analyze test coverage', error); + this.error("Failed to analyze test coverage", error); } - - this.emit('failed', { error }); + + this.emit("failed", { error }); throw error; } finally { if (client) { @@ -144,7 +181,7 @@ class CoverageCommand extends TestCommand { } } } - + /** * Enforce coverage thresholds * @param {Object} stats - Coverage statistics @@ -156,62 +193,107 @@ class CoverageCommand extends TestCommand { enforceCoverageThresholds(stats, minOverall, minRpc, minRls) { const failures = []; let passed = true; - + // Check overall coverage if (stats.overall && stats.overall.percentage < minOverall) { const message = `Overall coverage ${stats.overall.percentage}% below threshold ${minOverall}%`; this.error(chalk.red(message)); - failures.push({ type: 'overall', actual: stats.overall.percentage, expected: minOverall, message }); + failures.push({ + type: "overall", + actual: stats.overall.percentage, + expected: minOverall, + message, + }); passed = false; } else if (stats.overall) { - this.success(chalk.green(`✓ Overall coverage ${stats.overall.percentage}% meets threshold ${minOverall}%`)); + this.success( + chalk.green( + `✓ Overall coverage ${stats.overall.percentage}% meets threshold ${minOverall}%`, + ), + ); } - + // Check RPC coverage if (stats.rpc && stats.rpc.percentage < minRpc) { const message = `RPC function coverage ${stats.rpc.percentage}% below threshold ${minRpc}%`; this.error(chalk.red(message)); - failures.push({ type: 'rpc', actual: stats.rpc.percentage, expected: minRpc, message }); + failures.push({ + type: "rpc", + actual: stats.rpc.percentage, + expected: minRpc, + message, + }); passed = false; } else if (stats.rpc) { - this.success(chalk.green(`✓ RPC function coverage ${stats.rpc.percentage}% meets threshold ${minRpc}%`)); + this.success( + chalk.green( + `✓ RPC function coverage ${stats.rpc.percentage}% meets threshold ${minRpc}%`, + ), + ); } - + // Check RLS policy coverage if (stats.policies && stats.policies.percentage < minRls) { const message = `RLS policy coverage ${stats.policies.percentage}% below threshold ${minRls}%`; this.error(chalk.red(message)); - failures.push({ type: 'rls', actual: stats.policies.percentage, expected: minRls, message }); + failures.push({ + type: "rls", + actual: stats.policies.percentage, + expected: minRls, + message, + }); passed = false; } else if (stats.policies) { - this.success(chalk.green(`✓ RLS policy coverage ${stats.policies.percentage}% meets threshold ${minRls}%`)); + this.success( + chalk.green( + `✓ RLS policy coverage ${stats.policies.percentage}% meets threshold ${minRls}%`, + ), + ); } - + // Summary if (passed) { - this.success(chalk.bold.green('🎉 All coverage thresholds met!')); + this.success(chalk.bold.green("🎉 All coverage thresholds met!")); } else { - this.error(chalk.bold.red(`💥 Coverage enforcement failed - ${failures.length} threshold(s) not met`)); - + this.error( + chalk.bold.red( + `💥 Coverage enforcement failed - ${failures.length} threshold(s) not met`, + ), + ); + // Show details of failures - failures.forEach(failure => { - this.error(chalk.red(` • ${failure.type}: ${failure.actual}% < ${failure.expected}%`)); + failures.forEach((failure) => { + this.error( + chalk.red( + ` • ${failure.type}: ${failure.actual}% < ${failure.expected}%`, + ), + ); }); - - this.progress(chalk.yellow('\nTo fix coverage issues:')); - this.progress(chalk.yellow(' 1. Run: ./build/data test coverage (to see detailed coverage report)')); - this.progress(chalk.yellow(' 2. Add missing tests for uncovered RPC functions and RLS policies')); - this.progress(chalk.yellow(' 3. Re-run with --enforce to validate improvements')); + + this.progress(chalk.yellow("\nTo fix coverage issues:")); + this.progress( + chalk.yellow( + " 1. Run: ./build/data test coverage (to see detailed coverage report)", + ), + ); + this.progress( + chalk.yellow( + " 2. Add missing tests for uncovered RPC functions and RLS policies", + ), + ); + this.progress( + chalk.yellow(" 3. Re-run with --enforce to validate improvements"), + ); } - + return { passed, failures, thresholds: { overall: minOverall, rpc: minRpc, - rls: minRls - } + rls: minRls, + }, }; } @@ -223,16 +305,16 @@ class CoverageCommand extends TestCommand { if (this.config) { return this.config.getTestConfig(); } - + try { const config = await Config.load(); return config.getTestConfig(); } catch (error) { - this.logger.debug('Could not load config file, using defaults'); + this.logger.debug("Could not load config file, using defaults"); const defaultConfig = new Config(); return defaultConfig.getTestConfig(); } } } -module.exports = CoverageCommand; \ No newline at end of file +module.exports = CoverageCommand; diff --git a/src/commands/test/DevCycleCommand.js b/src/commands/test/DevCycleCommand.js index 4d7f944..1887b88 100644 --- a/src/commands/test/DevCycleCommand.js +++ b/src/commands/test/DevCycleCommand.js @@ -1,15 +1,15 @@ /** * Test Dev-Cycle Command - * + * * Orchestrates the full development cycle: Compile → Reset → Test * Provides rapid feedback for database test development workflow */ -const TestCommand = require('../../lib/TestCommand'); -const CompileCommand = require('./CompileCommand'); -const RunCommand = require('./RunCommand'); -const ResetCommand = require('../db/ResetCommand'); -const Config = require('../../lib/config'); +const TestCommand = require("../../lib/TestCommand"); +const CompileCommand = require("./CompileCommand"); +const RunCommand = require("./RunCommand"); +const ResetCommand = require("../db/ResetCommand"); +const Config = require("../../lib/config"); /** * Development cycle command that orchestrates compile → reset → test workflow @@ -22,13 +22,21 @@ class DevCycleCommand extends TestCommand { outputDir, logger = null, isProd = false, - pathResolver = null + pathResolver = null, ) { - super(databaseUrl, serviceRoleKey, testsDir, outputDir, logger, isProd, pathResolver); - + super( + databaseUrl, + serviceRoleKey, + testsDir, + outputDir, + logger, + isProd, + pathResolver, + ); + // Dev-cycle never requires production confirmation - it's a development tool this.requiresProductionConfirmation = false; - + // Track timing for performance reporting this.timings = {}; } @@ -38,48 +46,50 @@ class DevCycleCommand extends TestCommand { */ async performExecute(options = {}) { const startTime = new Date(); - - this.emit('dev-cycle:start', { - isProd: this.isProd, + + this.emit("dev-cycle:start", { + isProd: this.isProd, testsDir: this.testsDir, outputDir: this.outputDir, - options + options, }); - + try { // Load test configuration to respect settings const testConfig = await this._getTestConfig(); - - this.progress('Starting development cycle: Compile → Reset → Test'); - + + this.progress("Starting development cycle: Compile → Reset → Test"); + // Step 1: Compile tests await this._executeCompileStep(); - - // Step 2: Reset database + + // Step 2: Reset database await this._executeResetStep(); - + // Step 3: Run tests const testResults = await this._executeTestStep(options); - + // Calculate total execution time const totalTime = new Date() - startTime; this.timings.total = totalTime; - + // Report completion this._reportCycleCompletion(testResults, totalTime); - - this.emit('dev-cycle:complete', { + + this.emit("dev-cycle:complete", { results: testResults, timings: this.timings, - success: testResults.failed === 0 + success: testResults.failed === 0, }); - + return testResults; - } catch (error) { const totalTime = new Date() - startTime; - this.error(`Development cycle failed after ${this._formatDuration(totalTime)}`, error); - this.emit('dev-cycle:failed', { error, timings: this.timings }); + this.error( + `Development cycle failed after ${this._formatDuration(totalTime)}`, + error, + ); + this.emit("dev-cycle:failed", { error, timings: this.timings }); throw error; } } @@ -90,49 +100,54 @@ class DevCycleCommand extends TestCommand { */ async _executeCompileStep() { const stepStart = new Date(); - this.progress('Step 1/3: Compiling tests...'); - this.emit('dev-cycle:step', { step: 1, name: 'compile', status: 'running' }); - + this.progress("Step 1/3: Compiling tests..."); + this.emit("dev-cycle:step", { + step: 1, + name: "compile", + status: "running", + }); + try { // Create compile command using migration_output resource exclusively const compileCommand = new CompileCommand( this.testsDir, this.outputDir, this.logger, - this.isProd + this.isProd, ); - + // Attach progress listeners - compileCommand.on('compilation:progress', (progress) => { - this.emit('dev-cycle:compile-progress', progress); + compileCommand.on("compilation:progress", (progress) => { + this.emit("dev-cycle:compile-progress", progress); }); - + // Execute compilation const result = await compileCommand.execute(); - + const stepTime = new Date() - stepStart; this.timings.compile = stepTime; - - this.success(`✓ Compilation complete (${this._formatDuration(stepTime)})`); - this.emit('dev-cycle:step', { - step: 1, - name: 'compile', - status: 'complete', + + this.success( + `✓ Compilation complete (${this._formatDuration(stepTime)})`, + ); + this.emit("dev-cycle:step", { + step: 1, + name: "compile", + status: "complete", duration: stepTime, - result + result, }); - + return result; - } catch (error) { const stepTime = new Date() - stepStart; this.timings.compile = stepTime; - this.emit('dev-cycle:step', { - step: 1, - name: 'compile', - status: 'failed', + this.emit("dev-cycle:step", { + step: 1, + name: "compile", + status: "failed", duration: stepTime, - error + error, }); throw new Error(`Compilation failed: ${error.message}`); } @@ -144,9 +159,9 @@ class DevCycleCommand extends TestCommand { */ async _executeResetStep() { const stepStart = new Date(); - this.progress('Step 2/3: Resetting database...'); - this.emit('dev-cycle:step', { step: 2, name: 'reset', status: 'running' }); - + this.progress("Step 2/3: Resetting database..."); + this.emit("dev-cycle:step", { step: 2, name: "reset", status: "running" }); + try { // Create reset command - ResetCommand only takes specific parameters const resetCommand = new ResetCommand( @@ -154,42 +169,43 @@ class DevCycleCommand extends TestCommand { this.serviceRoleKey, null, // anonKey this.logger, - this.isProd + this.isProd, ); - + // The ResetCommand needs access to outputConfig for supabase directory // We'll create a simple OutputConfig for this purpose - const OutputConfig = require('../../lib/OutputConfig'); + const OutputConfig = require("../../lib/OutputConfig"); resetCommand.outputConfig = new OutputConfig(); - + // Attach progress listeners - resetCommand.on('output', (output) => { - this.emit('dev-cycle:reset-output', output); + resetCommand.on("output", (output) => { + this.emit("dev-cycle:reset-output", output); }); - + // Execute reset await resetCommand.execute(); - + const stepTime = new Date() - stepStart; this.timings.reset = stepTime; - - this.success(`✓ Database reset complete (${this._formatDuration(stepTime)})`); - this.emit('dev-cycle:step', { - step: 2, - name: 'reset', - status: 'complete', - duration: stepTime + + this.success( + `✓ Database reset complete (${this._formatDuration(stepTime)})`, + ); + this.emit("dev-cycle:step", { + step: 2, + name: "reset", + status: "complete", + duration: stepTime, }); - } catch (error) { const stepTime = new Date() - stepStart; this.timings.reset = stepTime; - this.emit('dev-cycle:step', { - step: 2, - name: 'reset', - status: 'failed', + this.emit("dev-cycle:step", { + step: 2, + name: "reset", + status: "failed", duration: stepTime, - error + error, }); throw new Error(`Database reset failed: ${error.message}`); } @@ -201,9 +217,9 @@ class DevCycleCommand extends TestCommand { */ async _executeTestStep(options = {}) { const stepStart = new Date(); - this.progress('Step 3/3: Running tests...'); - this.emit('dev-cycle:step', { step: 3, name: 'test', status: 'running' }); - + this.progress("Step 3/3: Running tests..."); + this.emit("dev-cycle:step", { step: 3, name: "test", status: "running" }); + try { // Create run command const runCommand = new RunCommand( @@ -212,50 +228,51 @@ class DevCycleCommand extends TestCommand { this.testsDir, this.outputDir, this.logger, - this.isProd + this.isProd, ); - + // Attach progress listeners - runCommand.on('start', (event) => { - this.emit('dev-cycle:test-start', event); + runCommand.on("start", (event) => { + this.emit("dev-cycle:test-start", event); }); - - runCommand.on('complete', (event) => { - this.emit('dev-cycle:test-complete', event); + + runCommand.on("complete", (event) => { + this.emit("dev-cycle:test-complete", event); }); - + // Execute tests with passed options const testResults = await runCommand.execute(options); - + const stepTime = new Date() - stepStart; this.timings.test = stepTime; - + // Success message depends on test results if (testResults.failed === 0) { this.success(`✓ All tests passed (${this._formatDuration(stepTime)})`); } else { - this.warn(`✗ ${testResults.failed}/${testResults.total} tests failed (${this._formatDuration(stepTime)})`); + this.warn( + `✗ ${testResults.failed}/${testResults.total} tests failed (${this._formatDuration(stepTime)})`, + ); } - - this.emit('dev-cycle:step', { - step: 3, - name: 'test', - status: 'complete', + + this.emit("dev-cycle:step", { + step: 3, + name: "test", + status: "complete", duration: stepTime, - results: testResults + results: testResults, }); - + return testResults; - } catch (error) { const stepTime = new Date() - stepStart; this.timings.test = stepTime; - this.emit('dev-cycle:step', { - step: 3, - name: 'test', - status: 'failed', + this.emit("dev-cycle:step", { + step: 3, + name: "test", + status: "failed", duration: stepTime, - error + error, }); throw new Error(`Test execution failed: ${error.message}`); } @@ -266,22 +283,24 @@ class DevCycleCommand extends TestCommand { * @private */ _reportCycleCompletion(testResults, totalTime) { - console.log(''); // Empty line for spacing - console.log('═'.repeat(60)); - console.log('🔄 DEV-CYCLE COMPLETE'); - console.log('═'.repeat(60)); - + console.log(""); // Empty line for spacing + console.log("═".repeat(60)); + console.log("🔄 DEV-CYCLE COMPLETE"); + console.log("═".repeat(60)); + // Step timing breakdown - console.log('\nStep Timings:'); - console.log(` Compile: ${this._formatDuration(this.timings.compile || 0)}`); + console.log("\nStep Timings:"); + console.log( + ` Compile: ${this._formatDuration(this.timings.compile || 0)}`, + ); console.log(` Reset: ${this._formatDuration(this.timings.reset || 0)}`); console.log(` Test: ${this._formatDuration(this.timings.test || 0)}`); console.log(` Total: ${this._formatDuration(totalTime)}`); - + // Test results summary - console.log('\nTest Results:'); + console.log("\nTest Results:"); if (testResults.total === 0) { - console.log(' No tests executed'); + console.log(" No tests executed"); } else { console.log(` Total: ${testResults.total} tests`); console.log(` Passed: ${testResults.passed}`); @@ -290,17 +309,17 @@ class DevCycleCommand extends TestCommand { console.log(` Skipped: ${testResults.skipped}`); } } - + // Overall status if (testResults.failed === 0 && testResults.total > 0) { - console.log('\n✅ Cycle successful - All tests passed!'); + console.log("\n✅ Cycle successful - All tests passed!"); } else if (testResults.failed > 0) { - console.log('\n❌ Cycle completed with test failures'); + console.log("\n❌ Cycle completed with test failures"); } else { - console.log('\n⚠️ Cycle completed - No tests found'); + console.log("\n⚠️ Cycle completed - No tests found"); } - - console.log('═'.repeat(60)); + + console.log("═".repeat(60)); } /** @@ -337,11 +356,11 @@ class DevCycleCommand extends TestCommand { const config = await Config.load(); return config.getTestConfig(); } catch (error) { - this.logger.debug('Could not load config file, using defaults'); + this.logger.debug("Could not load config file, using defaults"); const defaultConfig = new Config(); return defaultConfig.getTestConfig(); } } } -module.exports = DevCycleCommand; \ No newline at end of file +module.exports = DevCycleCommand; diff --git a/src/commands/test/GenerateCommand.js b/src/commands/test/GenerateCommand.js index 7e45b07..2206b8c 100644 --- a/src/commands/test/GenerateCommand.js +++ b/src/commands/test/GenerateCommand.js @@ -1,26 +1,21 @@ /** * Test Generate Command - * + * * Generate pgTAP test templates for RPC functions and RLS policies. * Creates properly structured test files in the correct directories. */ -const fs = require('fs').promises; -const path = require('path'); -const TestCommand = require('../../lib/TestCommand'); +const fs = require("fs").promises; +const path = require("path"); +const TestCommand = require("../../lib/TestCommand"); /** * Generate pgTAP test templates for RPC functions and RLS policies */ class GenerateCommand extends TestCommand { - constructor( - testsDir, - outputDir, - logger = null, - isProd = false - ) { + constructor(testsDir, outputDir, logger = null, isProd = false) { super(null, null, testsDir, outputDir, logger, isProd); - + // Test generation doesn't require database access this.requiresProductionConfirmation = false; } @@ -33,30 +28,33 @@ class GenerateCommand extends TestCommand { * @returns {Promise} Generation result */ async performExecute(options = {}) { - this.emit('generation:start', { type: options.type, name: options.name }); - + this.emit("generation:start", { type: options.type, name: options.name }); + try { // Validate options this.validateGenerationOptions(options); - + // Determine template type and generate let result; - if (options.type === 'rpc') { + if (options.type === "rpc") { result = await this.generateRpcTest(options.name); - } else if (options.type === 'rls') { + } else if (options.type === "rls") { result = await this.generateRlsTest(options.name); } else { throw new Error(`Unsupported test type: ${options.type}`); } - + this.success(`Test template generated: ${result.outputFile}`); - this.emit('generation:complete', result); - + this.emit("generation:complete", result); + return result; - } catch (error) { - this.error('Test template generation failed', error); - this.emit('generation:failed', { error, type: options.type, name: options.name }); + this.error("Test template generation failed", error); + this.emit("generation:failed", { + error, + type: options.type, + name: options.name, + }); throw error; } } @@ -67,20 +65,22 @@ class GenerateCommand extends TestCommand { */ validateGenerationOptions(options) { if (!options.type) { - throw new Error('Test type is required. Use --rpc or --rls'); + throw new Error("Test type is required. Use --rpc or --rls"); } - + if (!options.name) { - throw new Error('Function or table name is required'); + throw new Error("Function or table name is required"); } - - if (!['rpc', 'rls'].includes(options.type)) { + + if (!["rpc", "rls"].includes(options.type)) { throw new Error('Test type must be either "rpc" or "rls"'); } - + // Validate name format if (!/^[a-zA-Z0-9_]+$/.test(options.name)) { - throw new Error('Name must contain only letters, numbers, and underscores'); + throw new Error( + "Name must contain only letters, numbers, and underscores", + ); } } @@ -91,26 +91,26 @@ class GenerateCommand extends TestCommand { */ async generateRpcTest(functionName) { const testDir = await this.getTestsDir(); - const rpcTestDir = path.join(testDir, '002_rpc_tests'); + const rpcTestDir = path.join(testDir, "002_rpc_tests"); const outputFile = path.join(rpcTestDir, `${functionName}.test.sql`); - + // Ensure RPC test directory exists await fs.mkdir(rpcTestDir, { recursive: true }); - + // Generate template content const template = this.generateRpcTemplate(functionName); - + // Write template file - await fs.writeFile(outputFile, template, 'utf8'); - + await fs.writeFile(outputFile, template, "utf8"); + this.progress(`Generated RPC test template: ${outputFile}`); - + return { - type: 'rpc', + type: "rpc", functionName, outputFile, directory: rpcTestDir, - template + template, }; } @@ -121,26 +121,26 @@ class GenerateCommand extends TestCommand { */ async generateRlsTest(tableName) { const testDir = await this.getTestsDir(); - const rlsTestDir = path.join(testDir, '003_rls_tests'); + const rlsTestDir = path.join(testDir, "003_rls_tests"); const outputFile = path.join(rlsTestDir, `${tableName}.test.sql`); - + // Ensure RLS test directory exists await fs.mkdir(rlsTestDir, { recursive: true }); - + // Generate template content const template = this.generateRlsTemplate(tableName); - + // Write template file - await fs.writeFile(outputFile, template, 'utf8'); - + await fs.writeFile(outputFile, template, "utf8"); + this.progress(`Generated RLS test template: ${outputFile}`); - + return { - type: 'rls', + type: "rls", tableName, outputFile, directory: rlsTestDir, - template + template, }; } @@ -151,7 +151,7 @@ class GenerateCommand extends TestCommand { */ generateRpcTemplate(functionName) { const testFunctionName = `run_${functionName}_tests`; - + return `-- ========================================================================= -- RPC FUNCTION TESTS: ${functionName} -- ========================================================================= @@ -257,7 +257,7 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${functionName} RPC */ generateRlsTemplate(tableName) { const testFunctionName = `run_${tableName}_rls_tests`; - + return `-- ========================================================================= -- RLS POLICY TESTS: ${tableName} -- ========================================================================= @@ -409,15 +409,15 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for Row Level Security // This would require database access to query pg_proc // For now, return common functions based on existing patterns return [ - 'get_random_pets', - 'get_pet_details', - 'search_adoptable_pets', - 'is_admin', - 'is_bootstrap_mode', - 'complete_bootstrap', - 'is_maintenance_mode', - 'record_donation_payment', - 'update_donation_totals' + "get_random_pets", + "get_pet_details", + "search_adoptable_pets", + "is_admin", + "is_bootstrap_mode", + "complete_bootstrap", + "is_maintenance_mode", + "record_donation_payment", + "update_donation_totals", ]; } @@ -427,14 +427,8 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for Row Level Security */ async listAvailableTables() { // Common tables from the schema - return [ - 'pets', - 'applications', - 'donations', - 'profiles', - 'admin_members' - ]; + return ["pets", "applications", "donations", "profiles", "admin_members"]; } } -module.exports = GenerateCommand; \ No newline at end of file +module.exports = GenerateCommand; diff --git a/src/commands/test/GenerateTemplateCommand.js b/src/commands/test/GenerateTemplateCommand.js index 36fecaa..b66bfab 100644 --- a/src/commands/test/GenerateTemplateCommand.js +++ b/src/commands/test/GenerateTemplateCommand.js @@ -1,31 +1,28 @@ /** * Generate Template Command - * + * * Generate pgTAP test templates using TestTemplateGenerator and TestRequirementAnalyzer. * Supports generating from migration files, specific test types, and custom requirements. */ -const fs = require('fs').promises; -const path = require('path'); -const TestCommand = require('../../lib/TestCommand'); -const TestTemplateGenerator = require('../../lib/testing/TestTemplateGenerator'); -const { TestRequirementAnalyzer } = require('../../lib/testing/TestRequirementAnalyzer'); +const fs = require("fs").promises; +const path = require("path"); +const TestCommand = require("../../lib/TestCommand"); +const TestTemplateGenerator = require("../../lib/testing/TestTemplateGenerator"); +const { + TestRequirementAnalyzer, +} = require("../../lib/testing/TestRequirementAnalyzer"); /** * Generate pgTAP test templates with advanced analysis capabilities */ class GenerateTemplateCommand extends TestCommand { - constructor( - testsDir, - outputDir, - logger = null, - isProd = false - ) { + constructor(testsDir, outputDir, logger = null, isProd = false) { super(null, null, testsDir, outputDir, logger, isProd); - + // Template generation doesn't require database access or production confirmation this.requiresProductionConfirmation = false; - + // Initialize generators this.templateGenerator = new TestTemplateGenerator(); this.requirementAnalyzer = new TestRequirementAnalyzer(); @@ -45,70 +42,80 @@ class GenerateTemplateCommand extends TestCommand { * @returns {Promise} Generation result */ async performExecute(options = {}) { - this.emit('template:generation:start', { + this.emit("template:generation:start", { migration: options.migration, - type: options.type, - name: options.name + type: options.type, + name: options.name, }); - + try { // Validate options this.validateGenerationOptions(options); - + // Generate requirements based on input let requirements; if (options.migration) { - requirements = await this.analyzeRequirementsFromMigration(options.migration); - + requirements = await this.analyzeRequirementsFromMigration( + options.migration, + ); + // Filter by type if specified if (options.type) { - requirements = this.filterRequirementsByType(requirements, options.type); + requirements = this.filterRequirementsByType( + requirements, + options.type, + ); } } else { // Generate single requirement from options requirements = [this.createRequirementFromOptions(options)]; } - + if (requirements.length === 0) { - throw new Error('No test requirements found. Check migration file or provide --type and --name options.'); + throw new Error( + "No test requirements found. Check migration file or provide --type and --name options.", + ); } - + // Generate templates const result = this.templateGenerator.generateBatch(requirements); - + if (result.errors.length > 0) { - this.warn(`Generated ${result.totalGenerated} templates with ${result.errors.length} errors`); - result.errors.forEach(error => { - this.error(`Error generating template for ${error.requirement?.name}: ${error.error}`); + this.warn( + `Generated ${result.totalGenerated} templates with ${result.errors.length} errors`, + ); + result.errors.forEach((error) => { + this.error( + `Error generating template for ${error.requirement?.name}: ${error.error}`, + ); }); } - + // Output templates await this.outputTemplates(result.templates, options.output); - + const summaryText = this.formatGenerationSummary(result); this.success(`Test template generation completed\n${summaryText}`); - - this.emit('template:generation:complete', { + + this.emit("template:generation:complete", { totalGenerated: result.totalGenerated, summary: result.summary, - errors: result.errors + errors: result.errors, }); - + return { templates: result.templates, summary: result.summary, totalGenerated: result.totalGenerated, - errors: result.errors + errors: result.errors, }; - } catch (error) { - this.error('Test template generation failed', error); - this.emit('template:generation:failed', { - error, + this.error("Test template generation failed", error); + this.emit("template:generation:failed", { + error, migration: options.migration, - type: options.type, - name: options.name + type: options.type, + name: options.name, }); throw error; } @@ -121,27 +128,33 @@ class GenerateTemplateCommand extends TestCommand { validateGenerationOptions(options) { // Either migration file OR type+name must be provided if (!options.migration && (!options.type || !options.name)) { - throw new Error('Either --migration or both --type and --name must be provided'); + throw new Error( + "Either --migration or both --type and --name must be provided", + ); } - + // If migration file specified, check if it exists if (options.migration && !fs.access(options.migration).catch(() => false)) { // We'll validate file existence in analyzeRequirementsFromMigration } - + // Validate test type if specified if (options.type) { - const validTypes = ['rpc', 'rls', 'trigger', 'constraint', 'function']; + const validTypes = ["rpc", "rls", "trigger", "constraint", "function"]; if (!validTypes.includes(options.type)) { - throw new Error(`Invalid test type: ${options.type}. Must be one of: ${validTypes.join(', ')}`); + throw new Error( + `Invalid test type: ${options.type}. Must be one of: ${validTypes.join(", ")}`, + ); } } - + // Validate name format if specified if (options.name && !/^[a-zA-Z0-9_]+$/.test(options.name)) { - throw new Error('Name must contain only letters, numbers, and underscores'); + throw new Error( + "Name must contain only letters, numbers, and underscores", + ); } - + // Validate output path if specified if (options.output && !path.isAbsolute(options.output)) { // Convert to absolute path relative to current working directory @@ -158,23 +171,24 @@ class GenerateTemplateCommand extends TestCommand { try { // Check if file exists await fs.access(migrationPath); - + this.progress(`Analyzing migration file: ${migrationPath}`); - + // Read migration file - const migrationContent = await fs.readFile(migrationPath, 'utf8'); - + const migrationContent = await fs.readFile(migrationPath, "utf8"); + // Parse migration content to AST operations - const operations = await this.parseMigrationToOperations(migrationContent); - + const operations = + await this.parseMigrationToOperations(migrationContent); + // Analyze operations to determine test requirements - const analysis = await this.requirementAnalyzer.analyzeOperations(operations); - + const analysis = + await this.requirementAnalyzer.analyzeOperations(operations); + // Convert analysis results to template requirements return this.convertAnalysisToRequirements(analysis.requirements); - } catch (error) { - if (error.code === 'ENOENT') { + if (error.code === "ENOENT") { throw new Error(`Migration file not found: ${migrationPath}`); } throw new Error(`Failed to analyze migration: ${error.message}`); @@ -189,74 +203,88 @@ class GenerateTemplateCommand extends TestCommand { async parseMigrationToOperations(migrationContent) { // Simple SQL parsing for common operations // In a more complete implementation, you might use a proper SQL AST parser - + const operations = []; - const lines = migrationContent.split('\n'); - + const lines = migrationContent.split("\n"); + for (const line of lines) { const trimmed = line.trim().toUpperCase(); - + // Create table operations - if (trimmed.startsWith('CREATE TABLE')) { - const match = line.match(/CREATE TABLE\s+(?:IF NOT EXISTS\s+)?(\w+\.)?(\w+)/i); + if (trimmed.startsWith("CREATE TABLE")) { + const match = line.match( + /CREATE TABLE\s+(?:IF NOT EXISTS\s+)?(\w+\.)?(\w+)/i, + ); if (match) { operations.push({ - type: 'CREATE_TABLE', + type: "CREATE_TABLE", table: match[2], - schema: match[1] ? match[1].replace('.', '') : 'public' + schema: match[1] ? match[1].replace(".", "") : "public", }); } } - + // Create function operations - else if (trimmed.startsWith('CREATE OR REPLACE FUNCTION') || trimmed.startsWith('CREATE FUNCTION')) { - const match = line.match(/CREATE (?:OR REPLACE )?FUNCTION\s+(?:(\w+)\.)?(\w+)\s*\(/i); + else if ( + trimmed.startsWith("CREATE OR REPLACE FUNCTION") || + trimmed.startsWith("CREATE FUNCTION") + ) { + const match = line.match( + /CREATE (?:OR REPLACE )?FUNCTION\s+(?:(\w+)\.)?(\w+)\s*\(/i, + ); if (match) { operations.push({ - type: 'CREATE_FUNCTION', + type: "CREATE_FUNCTION", function: match[2], - schema: match[1] || 'public' + schema: match[1] || "public", }); } } - + // RLS enable operations - else if (trimmed.includes('ROW LEVEL SECURITY') || trimmed.includes('ENABLE RLS')) { - const match = line.match(/ALTER TABLE\s+(?:(\w+)\.)?(\w+)\s+ENABLE\s+ROW\s+LEVEL\s+SECURITY/i); + else if ( + trimmed.includes("ROW LEVEL SECURITY") || + trimmed.includes("ENABLE RLS") + ) { + const match = line.match( + /ALTER TABLE\s+(?:(\w+)\.)?(\w+)\s+ENABLE\s+ROW\s+LEVEL\s+SECURITY/i, + ); if (match) { operations.push({ - type: 'ENABLE_RLS', + type: "ENABLE_RLS", table: match[2], - schema: match[1] || 'public' + schema: match[1] || "public", }); } } - + // Trigger operations - else if (trimmed.startsWith('CREATE TRIGGER')) { - const match = line.match(/CREATE TRIGGER\s+(\w+)\s+.*ON\s+(?:(\w+)\.)?(\w+)/i); + else if (trimmed.startsWith("CREATE TRIGGER")) { + const match = line.match( + /CREATE TRIGGER\s+(\w+)\s+.*ON\s+(?:(\w+)\.)?(\w+)/i, + ); if (match) { operations.push({ - type: 'CREATE_TRIGGER', + type: "CREATE_TRIGGER", trigger: match[1], table: match[3], - schema: match[2] || 'public' + schema: match[2] || "public", }); } } - + // Constraint operations - else if (trimmed.includes('ADD CONSTRAINT')) { + else if (trimmed.includes("ADD CONSTRAINT")) { const match = line.match(/ADD CONSTRAINT\s+(\w+)/i); if (match) { operations.push({ - type: 'ADD_CONSTRAINT', - constraint: match[1] + type: "ADD_CONSTRAINT", + constraint: match[1], }); } } } - + return operations; } @@ -266,22 +294,22 @@ class GenerateTemplateCommand extends TestCommand { * @returns {Array} Template requirements */ convertAnalysisToRequirements(analysisRequirements) { - return analysisRequirements.map(req => { + return analysisRequirements.map((req) => { // Map analyzer requirement types to template types const typeMapping = { - 'FUNCTION': 'rpc', - 'RLS': 'rls', - 'TRIGGER': 'trigger', - 'CONSTRAINT': 'constraint', - 'SCHEMA': 'function' + FUNCTION: "rpc", + RLS: "rls", + TRIGGER: "trigger", + CONSTRAINT: "constraint", + SCHEMA: "function", }; - + return { - type: typeMapping[req.type] || 'function', + type: typeMapping[req.type] || "function", name: req.target, - schema: req.metadata?.schema || 'public', + schema: req.metadata?.schema || "public", description: req.description, - metadata: req.metadata + metadata: req.metadata, }; }); } @@ -295,13 +323,15 @@ class GenerateTemplateCommand extends TestCommand { return { type: options.type, name: options.name, - schema: options.schema || 'public', + schema: options.schema || "public", description: options.description, - parameters: options.parameters ? options.parameters.split(',') : undefined, + parameters: options.parameters + ? options.parameters.split(",") + : undefined, returnType: options.returnType, metadata: { - generatedFromCli: true - } + generatedFromCli: true, + }, }; } @@ -312,7 +342,7 @@ class GenerateTemplateCommand extends TestCommand { * @returns {Array} Filtered requirements */ filterRequirementsByType(requirements, type) { - return requirements.filter(req => req.type === type); + return requirements.filter((req) => req.type === type); } /** @@ -323,39 +353,53 @@ class GenerateTemplateCommand extends TestCommand { async outputTemplates(templates, outputPath) { if (outputPath) { // Output to file - const combinedContent = templates.map(template => { - return `-- =========================================================================\n` + - `-- Generated Template: ${template.metadata.name} (${template.type})\n` + - `-- File: ${template.filename}\n` + - `-- Directory: ${template.directory}\n` + - `-- Generated: ${template.metadata.generatedAt}\n` + - `-- =========================================================================\n\n` + - template.content; - }).join('\n\n'); - + const combinedContent = templates + .map((template) => { + return ( + `-- =========================================================================\n` + + `-- Generated Template: ${template.metadata.name} (${template.type})\n` + + `-- File: ${template.filename}\n` + + `-- Directory: ${template.directory}\n` + + `-- Generated: ${template.metadata.generatedAt}\n` + + `-- =========================================================================\n\n` + + template.content + ); + }) + .join("\n\n"); + // Ensure output directory exists await fs.mkdir(path.dirname(outputPath), { recursive: true }); - + // Write to file - await fs.writeFile(outputPath, combinedContent, 'utf8'); - + await fs.writeFile(outputPath, combinedContent, "utf8"); + this.progress(`Templates written to: ${outputPath}`); } else { // Output to stdout - templates.forEach(template => { - console.log(`-- Generated Template: ${template.metadata.name} (${template.type})`); - console.log(`-- Suggested path: ${path.join(template.directory, template.filename)}\n`); + templates.forEach((template) => { + console.log( + `-- Generated Template: ${template.metadata.name} (${template.type})`, + ); + console.log( + `-- Suggested path: ${path.join(template.directory, template.filename)}\n`, + ); console.log(template.content); - console.log('\n' + '='.repeat(80) + '\n'); + console.log("\n" + "=".repeat(80) + "\n"); }); } - + // Also suggest individual file creation if (templates.length > 1 && !outputPath) { - this.info('\nTo save individual template files, you can use:'); - templates.forEach(template => { - const fullPath = path.join('tests', template.directory, template.filename); - console.log(` data test generate-template --type ${template.type} --name ${template.metadata.name} --output ${fullPath}`); + this.info("\nTo save individual template files, you can use:"); + templates.forEach((template) => { + const fullPath = path.join( + "tests", + template.directory, + template.filename, + ); + console.log( + ` data test generate-template --type ${template.type} --name ${template.metadata.name} --output ${fullPath}`, + ); }); } } @@ -366,23 +410,21 @@ class GenerateTemplateCommand extends TestCommand { * @returns {string} Formatted summary */ formatGenerationSummary(result) { - const lines = [ - `Total templates generated: ${result.totalGenerated}` - ]; - + const lines = [`Total templates generated: ${result.totalGenerated}`]; + if (Object.keys(result.summary).length > 0) { - lines.push('Templates by type:'); + lines.push("Templates by type:"); Object.entries(result.summary).forEach(([type, count]) => { lines.push(` ${type}: ${count}`); }); } - + if (result.errors.length > 0) { lines.push(`Errors encountered: ${result.errors.length}`); } - - return lines.join('\n'); + + return lines.join("\n"); } } -module.exports = GenerateTemplateCommand; \ No newline at end of file +module.exports = GenerateTemplateCommand; diff --git a/src/commands/test/RunCommand.js b/src/commands/test/RunCommand.js index abdcf1f..25b1d38 100644 --- a/src/commands/test/RunCommand.js +++ b/src/commands/test/RunCommand.js @@ -2,35 +2,43 @@ * Test Run Command */ -const { Client } = require('pg'); -const chalk = require('chalk').default || require('chalk'); -const fs = require('fs').promises; -const path = require('path'); -const TestCommand = require('../../lib/TestCommand'); -const ResultParser = require('../../lib/test/ResultParser'); -const { JUnitFormatter, JSONFormatter } = require('../../lib/test/formatters'); -const TestCache = require('../../lib/test/TestCache'); -const Config = require('../../lib/config'); +const { Client } = require("pg"); +const chalk = require("chalk").default || require("chalk"); +const fs = require("fs").promises; +const path = require("path"); +const TestCommand = require("../../lib/TestCommand"); +const ResultParser = require("../../lib/test/ResultParser"); +const { JUnitFormatter, JSONFormatter } = require("../../lib/test/formatters"); +const TestCache = require("../../lib/test/TestCache"); +const Config = require("../../lib/config"); /** * Run compiled tests using pgTAP */ class RunCommand extends TestCommand { - constructor(databaseUrl, serviceRoleKey = null, testsDir, outputDir, logger = null, isProd = false, config = null) { + constructor( + databaseUrl, + serviceRoleKey = null, + testsDir, + outputDir, + logger = null, + isProd = false, + config = null, + ) { super(databaseUrl, serviceRoleKey, testsDir, outputDir, logger, isProd); this.parser = new ResultParser(); this.config = config; - + // Initialize test cache for performance optimization - this.testCache = new TestCache('.data-cache/test-results', logger); - + this.testCache = new TestCache(".data-cache/test-results", logger); + // Performance tracking this.performanceMetrics = { totalExecutionTime: 0, cacheHits: 0, cacheMisses: 0, testsExecuted: 0, - testsFromCache: 0 + testsFromCache: 0, }; } @@ -39,43 +47,43 @@ class RunCommand extends TestCommand { */ async performExecute(options = {}) { const startTime = Date.now(); - this.emit('start', { isProd: this.isProd, options }); - + this.emit("start", { isProd: this.isProd, options }); + // Enable/disable cache based on options const cacheEnabled = options.cache !== false; // Cache enabled by default - + try { // Load and apply test configuration const testConfig = await this._getTestConfig(); options = this._applyTestConfig(options, testConfig); - - this.progress('Connecting to database...'); + + this.progress("Connecting to database..."); const client = await this._createDatabaseClient(); - + // Set query timeout based on config if (testConfig.test_timeout && testConfig.test_timeout > 0) { client.query_timeout = testConfig.test_timeout * 1000; // Convert to milliseconds } - + try { - this.progress('Discovering test functions...'); + this.progress("Discovering test functions..."); const testFunctions = await this._discoverTestFunctions(client); - + if (testFunctions.length === 0) { - this.warn('No test functions found in test schema'); + this.warn("No test functions found in test schema"); const emptyResults = { total: 0, passed: 0, failed: 0, skipped: 0, - testFunctions: [] + testFunctions: [], }; - this.emit('complete', { results: emptyResults }); + this.emit("complete", { results: emptyResults }); return emptyResults; } this.success(`Found ${testFunctions.length} test function(s)`); - + // Determine which tests to run const testsToRun = this._filterTestFunctions(testFunctions, options); @@ -87,80 +95,96 @@ class RunCommand extends TestCommand { passed: 0, failed: 0, skipped: 0, - testFunctions: [] + testFunctions: [], }; - this.emit('complete', { results: emptyResults }); + this.emit("complete", { results: emptyResults }); return emptyResults; } this.progress(`Running ${testsToRun.length} test function(s)...`); - + // Determine if parallel execution is enabled (default: true for better performance) const runParallel = options.parallel !== false; const maxConcurrency = options.maxConcurrency || 5; // Limit concurrent database connections - + // Execute tests with caching let allResults = []; - + if (runParallel) { // Parallel execution for better performance const testPromises = testsToRun.map(async (testFunc) => { const funcStartTime = Date.now(); - + // Try cache first if enabled let tapOutput = null; let fromCache = false; - + if (cacheEnabled) { - const hash = await this.testCache.calculateHash(testFunc, this.databaseUrl, options); + const hash = await this.testCache.calculateHash( + testFunc, + this.databaseUrl, + options, + ); const cachedResult = await this.testCache.getCachedResult(hash); - + if (cachedResult && cachedResult.tapOutput) { tapOutput = cachedResult.tapOutput; fromCache = true; this.performanceMetrics.cacheHits++; this.performanceMetrics.testsFromCache++; - this.progress(`${chalk.blue('✓')} ${testFunc} (cached, saved ~${cachedResult.originalDuration || 0}ms)`); + this.progress( + `${chalk.blue("✓")} ${testFunc} (cached, saved ~${cachedResult.originalDuration || 0}ms)`, + ); } else { this.performanceMetrics.cacheMisses++; } } - + // Execute test if not cached if (!tapOutput) { this.progress(`Running ${testFunc}...`); const testStartTime = Date.now(); tapOutput = await this._executeTestFunction(client, testFunc); const testDuration = Date.now() - testStartTime; - + // Cache the result if caching is enabled if (cacheEnabled) { try { - const hash = await this.testCache.calculateHash(testFunc, this.databaseUrl, options); - await this.testCache.storeResult(hash, { - tapOutput: tapOutput, - originalDuration: testDuration - }, { - testFunction: testFunc, - duration: testDuration, - databaseUrl: this.databaseUrl, - options: options - }); + const hash = await this.testCache.calculateHash( + testFunc, + this.databaseUrl, + options, + ); + await this.testCache.storeResult( + hash, + { + tapOutput: tapOutput, + originalDuration: testDuration, + }, + { + testFunction: testFunc, + duration: testDuration, + databaseUrl: this.databaseUrl, + options: options, + }, + ); } catch (cacheError) { - this.warn(`Failed to cache result for ${testFunc}: ${cacheError.message}`); + this.warn( + `Failed to cache result for ${testFunc}: ${cacheError.message}`, + ); } } } - + this.performanceMetrics.testsExecuted++; - return { - function: testFunc, - output: tapOutput, + return { + function: testFunc, + output: tapOutput, fromCache: fromCache, - duration: Date.now() - funcStartTime + duration: Date.now() - funcStartTime, }; }); - + // Process tests in batches to limit concurrent connections for (let i = 0; i < testPromises.length; i += maxConcurrency) { const batch = testPromises.slice(i, i + maxConcurrency); @@ -171,65 +195,81 @@ class RunCommand extends TestCommand { // Sequential execution (fallback mode or when explicitly requested) for (const testFunc of testsToRun) { const funcStartTime = Date.now(); - + // Try cache first if enabled let tapOutput = null; let fromCache = false; - + if (cacheEnabled) { - const hash = await this.testCache.calculateHash(testFunc, this.databaseUrl, options); + const hash = await this.testCache.calculateHash( + testFunc, + this.databaseUrl, + options, + ); const cachedResult = await this.testCache.getCachedResult(hash); - + if (cachedResult && cachedResult.tapOutput) { tapOutput = cachedResult.tapOutput; fromCache = true; this.performanceMetrics.cacheHits++; this.performanceMetrics.testsFromCache++; - this.progress(`${chalk.blue('✓')} ${testFunc} (cached, saved ~${cachedResult.originalDuration || 0}ms)`); + this.progress( + `${chalk.blue("✓")} ${testFunc} (cached, saved ~${cachedResult.originalDuration || 0}ms)`, + ); } else { this.performanceMetrics.cacheMisses++; } } - + // Execute test if not cached if (!tapOutput) { this.progress(`Running ${testFunc}...`); const testStartTime = Date.now(); tapOutput = await this._executeTestFunction(client, testFunc); const testDuration = Date.now() - testStartTime; - + // Cache the result if caching is enabled if (cacheEnabled) { try { - const hash = await this.testCache.calculateHash(testFunc, this.databaseUrl, options); - await this.testCache.storeResult(hash, { - tapOutput: tapOutput, - originalDuration: testDuration - }, { - testFunction: testFunc, - duration: testDuration, - databaseUrl: this.databaseUrl, - options: options - }); + const hash = await this.testCache.calculateHash( + testFunc, + this.databaseUrl, + options, + ); + await this.testCache.storeResult( + hash, + { + tapOutput: tapOutput, + originalDuration: testDuration, + }, + { + testFunction: testFunc, + duration: testDuration, + databaseUrl: this.databaseUrl, + options: options, + }, + ); } catch (cacheError) { - this.warn(`Failed to cache result for ${testFunc}: ${cacheError.message}`); + this.warn( + `Failed to cache result for ${testFunc}: ${cacheError.message}`, + ); } } } - + this.performanceMetrics.testsExecuted++; - allResults.push({ - function: testFunc, - output: tapOutput, + allResults.push({ + function: testFunc, + output: tapOutput, fromCache: fromCache, - duration: Date.now() - funcStartTime + duration: Date.now() - funcStartTime, }); } } - + // Parse all results and add performance metadata const combinedResults = this._combineResults(allResults); - + // Add cache performance metrics const totalTime = Date.now() - startTime; combinedResults.performance = { @@ -239,26 +279,31 @@ class RunCommand extends TestCommand { cacheMisses: this.performanceMetrics.cacheMisses, testsExecuted: this.performanceMetrics.testsExecuted, testsFromCache: this.performanceMetrics.testsFromCache, - cacheHitRate: this.performanceMetrics.testsExecuted > 0 - ? (this.performanceMetrics.testsFromCache / this.performanceMetrics.testsExecuted * 100).toFixed(1) - : '0.0', - averageTestTime: this.performanceMetrics.testsExecuted > 0 - ? Math.round(totalTime / this.performanceMetrics.testsExecuted) - : 0 + cacheHitRate: + this.performanceMetrics.testsExecuted > 0 + ? ( + (this.performanceMetrics.testsFromCache / + this.performanceMetrics.testsExecuted) * + 100 + ).toFixed(1) + : "0.0", + averageTestTime: + this.performanceMetrics.testsExecuted > 0 + ? Math.round(totalTime / this.performanceMetrics.testsExecuted) + : 0, }; - + // Handle output formatting based on options await this._handleOutputFormat(combinedResults, options); - - this.emit('complete', { results: combinedResults }); + + this.emit("complete", { results: combinedResults }); return combinedResults; - } finally { await client.end(); } } catch (error) { - this.error('Failed to run tests', error); - this.emit('failed', { error }); + this.error("Failed to run tests", error); + this.emit("failed", { error }); throw error; } } @@ -269,13 +314,15 @@ class RunCommand extends TestCommand { */ async _createDatabaseClient() { if (!this.databaseUrl) { - throw new Error(`Database connection string not configured for ${this.isProd ? 'production' : 'local'} environment`); + throw new Error( + `Database connection string not configured for ${this.isProd ? "production" : "local"} environment`, + ); } - + const client = new Client({ - connectionString: this.databaseUrl + connectionString: this.databaseUrl, }); - + await client.connect(); return client; } @@ -294,9 +341,9 @@ class RunCommand extends TestCommand { AND proname LIKE 'run_%_tests' ORDER BY proname `; - + const result = await client.query(query); - return result.rows.map(row => row.proname); + return result.rows.map((row) => row.proname); } /** @@ -305,23 +352,23 @@ class RunCommand extends TestCommand { */ _filterTestFunctions(testFunctions, options) { let filtered = [...testFunctions]; - + // Apply suite filter if (options.suite) { filtered = this._filterBySuite(filtered, options.suite); } - + // Apply pattern filter (legacy support for options.function) const pattern = options.pattern || options.function; if (pattern) { filtered = this._filterByPattern(filtered, pattern); } - + // Apply tag filter if (options.tag) { filtered = this._filterByTag(filtered, options.tag); } - + return filtered; } @@ -330,10 +377,10 @@ class RunCommand extends TestCommand { * @private */ _filterBySuite(testFunctions, suite) { - return testFunctions.filter(func => { + return testFunctions.filter((func) => { // Handle suite names like "admin" -> "run_admin_*" (e.g., "run_admin_delete_pet_tests") // Also handle direct matches like "admin" -> "run_admin_tests" - const regex = new RegExp(`^run_${suite}(_.*)?_tests$`, 'i'); + const regex = new RegExp(`^run_${suite}(_.*)?_tests$`, "i"); return regex.test(func); }); } @@ -345,8 +392,8 @@ class RunCommand extends TestCommand { _filterByPattern(testFunctions, pattern) { // Convert glob patterns to regex patterns const regexPattern = this._globToRegex(pattern); - const regex = new RegExp(regexPattern, 'i'); - return testFunctions.filter(func => regex.test(func)); + const regex = new RegExp(regexPattern, "i"); + return testFunctions.filter((func) => regex.test(func)); } /** @@ -356,10 +403,10 @@ class RunCommand extends TestCommand { _globToRegex(pattern) { // Escape special regex characters except * and ? let regex = pattern - .replace(/[.+^${}()|[\]\\]/g, '\\$&') // Escape regex special chars - .replace(/\*/g, '.*') // Convert * to .* - .replace(/\?/g, '.'); // Convert ? to . - + .replace(/[.+^${}()|[\]\\]/g, "\\$&") // Escape regex special chars + .replace(/\*/g, ".*") // Convert * to .* + .replace(/\?/g, "."); // Convert ? to . + // Anchor the pattern to match the whole string return `^${regex}$`; } @@ -381,21 +428,21 @@ class RunCommand extends TestCommand { */ _getFilterDescription(options) { const filters = []; - + if (options.suite) { filters.push(`suite="${options.suite}"`); } - + const pattern = options.pattern || options.function; if (pattern) { filters.push(`pattern="${pattern}"`); } - + if (options.tag) { filters.push(`tag="${options.tag}"`); } - - return filters.length > 0 ? filters.join(', ') : 'none'; + + return filters.length > 0 ? filters.join(", ") : "none"; } /** @@ -404,11 +451,11 @@ class RunCommand extends TestCommand { */ async _executeTestFunction(client, functionName) { const query = `SELECT * FROM test.${functionName}()`; - + try { const result = await client.query(query); // Join all result rows into TAP output - return result.rows.map(row => Object.values(row)[0]).join('\n'); + return result.rows.map((row) => Object.values(row)[0]).join("\n"); } catch (error) { // Return TAP format error return `not ok 1 ${functionName} failed: ${error.message}`; @@ -429,28 +476,28 @@ class RunCommand extends TestCommand { for (const { function: funcName, output } of allResults) { const funcResults = this.parser.parse(output); - + totalPassed += funcResults.passed; totalFailed += funcResults.failed; totalSkipped += funcResults.skipped; - + // Prefix test descriptions with function name - const prefixedTests = funcResults.tests.map(test => ({ + const prefixedTests = funcResults.tests.map((test) => ({ ...test, description: `${funcName}: ${test.description}`, - function: funcName + function: funcName, })); - + allTests = allTests.concat(prefixedTests); allDiagnostics = allDiagnostics.concat(funcResults.diagnostics); - + testFunctions.push({ name: funcName, passed: funcResults.passed, failed: funcResults.failed, skipped: funcResults.skipped, total: funcResults.total, - success: funcResults.failed === 0 + success: funcResults.failed === 0, }); } @@ -461,7 +508,7 @@ class RunCommand extends TestCommand { skipped: totalSkipped, tests: allTests, diagnostics: allDiagnostics, - testFunctions + testFunctions, }; } @@ -470,17 +517,17 @@ class RunCommand extends TestCommand { * @private */ async _handleOutputFormat(results, options = {}) { - const format = options.format || 'console'; + const format = options.format || "console"; const outputFile = options.output; switch (format.toLowerCase()) { - case 'junit': + case "junit": await this._outputJUnit(results, outputFile); break; - case 'json': + case "json": await this._outputJSON(results, outputFile); break; - case 'console': + case "console": default: this._displayResults(results); break; @@ -494,9 +541,13 @@ class RunCommand extends TestCommand { async _outputJUnit(results, outputFile) { const formatter = new JUnitFormatter(); const xmlOutput = formatter.format(results); - + if (outputFile) { - await this._writeOutputFile(xmlOutput, outputFile, formatter.getFileExtension()); + await this._writeOutputFile( + xmlOutput, + outputFile, + formatter.getFileExtension(), + ); this.success(`JUnit XML results written to: ${outputFile}`); } else { console.log(xmlOutput); @@ -510,9 +561,13 @@ class RunCommand extends TestCommand { async _outputJSON(results, outputFile) { const formatter = new JSONFormatter(); const jsonOutput = formatter.format(results); - + if (outputFile) { - await this._writeOutputFile(jsonOutput, outputFile, formatter.getFileExtension()); + await this._writeOutputFile( + jsonOutput, + outputFile, + formatter.getFileExtension(), + ); this.success(`JSON results written to: ${outputFile}`); } else { console.log(jsonOutput); @@ -525,18 +580,18 @@ class RunCommand extends TestCommand { */ async _writeOutputFile(content, filePath, defaultExtension) { let fullPath = filePath; - + // Add default extension if not present if (!path.extname(filePath)) { fullPath = filePath + defaultExtension; } - + // Ensure directory exists const dir = path.dirname(fullPath); await fs.mkdir(dir, { recursive: true }); - + // Write file - await fs.writeFile(fullPath, content, 'utf8'); + await fs.writeFile(fullPath, content, "utf8"); } /** @@ -544,74 +599,99 @@ class RunCommand extends TestCommand { * @private */ _displayResults(results) { - const { total, passed, failed, skipped, tests, diagnostics, testFunctions } = results; - - console.log(''); // Empty line for spacing + const { + total, + passed, + failed, + skipped, + tests, + diagnostics, + testFunctions, + } = results; + + console.log(""); // Empty line for spacing // Summary by function if (testFunctions.length > 1) { - console.log(chalk.bold('Test Functions:')); - testFunctions.forEach(func => { - const symbol = func.success ? chalk.green('✓') : chalk.red('✗'); + console.log(chalk.bold("Test Functions:")); + testFunctions.forEach((func) => { + const symbol = func.success ? chalk.green("✓") : chalk.red("✗"); const summary = `${func.passed}/${func.total} passed`; - const skippedText = func.skipped > 0 ? `, ${func.skipped} skipped` : ''; + const skippedText = func.skipped > 0 ? `, ${func.skipped} skipped` : ""; console.log(` ${symbol} ${func.name}: ${summary}${skippedText}`); }); - console.log(''); // Empty line + console.log(""); // Empty line } // Overall summary if (failed > 0) { console.log(chalk.red.bold(`✗ ${failed}/${total} tests failed`)); } else if (skipped > 0) { - console.log(chalk.yellow.bold(`✓ ${passed}/${total} tests passed (${skipped} skipped)`)); + console.log( + chalk.yellow.bold( + `✓ ${passed}/${total} tests passed (${skipped} skipped)`, + ), + ); } else if (total > 0) { console.log(chalk.green.bold(`✓ All ${passed}/${total} tests passed`)); } else { - console.log(chalk.gray.bold('No tests executed')); + console.log(chalk.gray.bold("No tests executed")); } // Failed test details if (failed > 0) { - console.log(''); // Empty line - console.log(chalk.red.bold('Failed Tests:')); + console.log(""); // Empty line + console.log(chalk.red.bold("Failed Tests:")); tests - .filter(test => test.status === 'fail') - .forEach(test => { + .filter((test) => test.status === "fail") + .forEach((test) => { console.log(chalk.red(` ✗ ${test.description}`)); }); } // Diagnostics if (diagnostics.length > 0) { - console.log(''); // Empty line - console.log(chalk.gray.bold('Diagnostics:')); - diagnostics.forEach(diagnostic => { + console.log(""); // Empty line + console.log(chalk.gray.bold("Diagnostics:")); + diagnostics.forEach((diagnostic) => { console.log(chalk.gray(` ${diagnostic}`)); }); } // Performance metrics if (results.performance) { - console.log(''); // Empty line - console.log(chalk.cyan.bold('Performance:')); - + console.log(""); // Empty line + console.log(chalk.cyan.bold("Performance:")); + const perf = results.performance; console.log(chalk.cyan(` Execution time: ${perf.totalExecutionTime}ms`)); console.log(chalk.cyan(` Average per test: ${perf.averageTestTime}ms`)); - + if (perf.cacheEnabled) { if (perf.testsFromCache > 0) { - console.log(chalk.green(` Cache performance: ${perf.cacheHitRate}% hit rate (${perf.testsFromCache}/${perf.testsExecuted} from cache)`)); - + console.log( + chalk.green( + ` Cache performance: ${perf.cacheHitRate}% hit rate (${perf.testsFromCache}/${perf.testsExecuted} from cache)`, + ), + ); + // Calculate estimated time saved const avgExecutionTime = perf.averageTestTime; - const estimatedTimeSaved = perf.testsFromCache * avgExecutionTime * 0.8; // Assume 80% time savings + const estimatedTimeSaved = + perf.testsFromCache * avgExecutionTime * 0.8; // Assume 80% time savings if (estimatedTimeSaved > 0) { - console.log(chalk.green(` Estimated time saved: ~${Math.round(estimatedTimeSaved)}ms`)); + console.log( + chalk.green( + ` Estimated time saved: ~${Math.round(estimatedTimeSaved)}ms`, + ), + ); } } else { - console.log(chalk.yellow(` Cache performance: 0% hit rate (building cache...)`)); + console.log( + chalk.yellow( + ` Cache performance: 0% hit rate (building cache...)`, + ), + ); } } else { console.log(chalk.gray(` Cache: disabled`)); @@ -636,12 +716,12 @@ class RunCommand extends TestCommand { if (this.config) { return this.config.getTestConfig(); } - + try { const config = await Config.load(); return config.getTestConfig(); } catch (error) { - this.logger.debug('Could not load config file, using defaults'); + this.logger.debug("Could not load config file, using defaults"); const defaultConfig = new Config(); return defaultConfig.getTestConfig(); } @@ -653,14 +733,18 @@ class RunCommand extends TestCommand { */ _applyTestConfig(options, testConfig) { const mergedOptions = { ...options }; - + // Apply default output format if not specified - if (!mergedOptions.format && testConfig.output_formats && testConfig.output_formats.length > 0) { + if ( + !mergedOptions.format && + testConfig.output_formats && + testConfig.output_formats.length > 0 + ) { mergedOptions.format = testConfig.output_formats[0]; } - + return mergedOptions; } } -module.exports = RunCommand; \ No newline at end of file +module.exports = RunCommand; diff --git a/src/commands/test/ValidateCommand.js b/src/commands/test/ValidateCommand.js index 771aa74..810af13 100644 --- a/src/commands/test/ValidateCommand.js +++ b/src/commands/test/ValidateCommand.js @@ -2,10 +2,10 @@ * Test Validate Command - SQL syntax validation for pgTAP tests */ -const TestCommand = require('../../lib/TestCommand'); -const fs = require('fs').promises; -const path = require('path'); -const crypto = require('crypto'); +const TestCommand = require("../../lib/TestCommand"); +const fs = require("fs").promises; +const path = require("path"); +const crypto = require("crypto"); /** * Validate SQL syntax and pgTAP function usage in test files @@ -18,47 +18,93 @@ class ValidateCommand extends TestCommand { outputDir = null, logger = null, isProd = false, - pathResolver = null + pathResolver = null, ) { - super(databaseUrl, serviceRoleKey, testsDir, outputDir, logger, isProd, pathResolver); - + super( + databaseUrl, + serviceRoleKey, + testsDir, + outputDir, + logger, + isProd, + pathResolver, + ); + // Validation doesn't require database connection this.requiresProductionConfirmation = false; - + // Initialize validation cache this.validationCache = new Map(); - this.cacheDir = path.join(process.cwd(), '.data-cache', 'validation'); - this.cacheFile = path.join(this.cacheDir, 'validation-cache.json'); - + this.cacheDir = path.join(process.cwd(), ".data-cache", "validation"); + this.cacheFile = path.join(this.cacheDir, "validation-cache.json"); + // Valid pgTAP function names this.pgTapFunctions = new Set([ // Basic test functions - 'ok', 'nok', 'pass', 'fail', + "ok", + "nok", + "pass", + "fail", // Comparison functions - 'is', 'isnt', 'like', 'unlike', 'matches', 'imatches', + "is", + "isnt", + "like", + "unlike", + "matches", + "imatches", // NULL testing - 'is_empty', 'isnt_empty', 'is_null', 'isnt_null', 'is_not_null', + "is_empty", + "isnt_empty", + "is_null", + "isnt_null", + "is_not_null", // Numeric comparison - 'cmp_ok', + "cmp_ok", // Schema testing - 'has_schema', 'hasnt_schema', 'schema_owner_is', - 'has_table', 'hasnt_table', 'has_view', 'hasnt_view', - 'has_function', 'hasnt_function', 'function_returns', - 'has_column', 'hasnt_column', 'col_type_is', 'col_is_null', 'col_not_null', - 'col_has_default', 'col_default_is', + "has_schema", + "hasnt_schema", + "schema_owner_is", + "has_table", + "hasnt_table", + "has_view", + "hasnt_view", + "has_function", + "hasnt_function", + "function_returns", + "has_column", + "hasnt_column", + "col_type_is", + "col_is_null", + "col_not_null", + "col_has_default", + "col_default_is", // Row testing - 'results_eq', 'results_ne', 'set_eq', 'set_ne', 'bag_eq', 'bag_ne', - 'row_eq', 'throws_ok', 'throws_like', 'throws_matching', 'lives_ok', + "results_eq", + "results_ne", + "set_eq", + "set_ne", + "bag_eq", + "bag_ne", + "row_eq", + "throws_ok", + "throws_like", + "throws_matching", + "lives_ok", // Test control - 'plan', 'finish', 'diag', 'skip', 'todo', 'todo_skip' + "plan", + "finish", + "diag", + "skip", + "todo", + "todo_skip", ]); - + this.validationResults = { filesProcessed: 0, syntaxErrors: [], pgTapIssues: [], structureWarnings: [], - hasErrors: false + hasErrors: false, }; } @@ -68,16 +114,18 @@ class ValidateCommand extends TestCommand { async loadCache() { try { await fs.mkdir(this.cacheDir, { recursive: true }); - const cacheData = await fs.readFile(this.cacheFile, 'utf8'); + const cacheData = await fs.readFile(this.cacheFile, "utf8"); const cache = JSON.parse(cacheData); this.validationCache = new Map(Object.entries(cache)); - this.info(`Loaded validation cache with ${this.validationCache.size} entries`); + this.info( + `Loaded validation cache with ${this.validationCache.size} entries`, + ); } catch (error) { // Cache doesn't exist or is invalid, start fresh this.validationCache = new Map(); } } - + /** * Save validation cache to disk */ @@ -90,22 +138,22 @@ class ValidateCommand extends TestCommand { this.warn(`Failed to save validation cache: ${error.message}`); } } - + /** * Calculate hash for a file's content */ async calculateFileHash(filePath) { - const content = await fs.readFile(filePath, 'utf8'); - return crypto.createHash('sha256').update(content).digest('hex'); + const content = await fs.readFile(filePath, "utf8"); + return crypto.createHash("sha256").update(content).digest("hex"); } - + /** * Check if file validation is cached and still valid */ async isCacheValid(filePath) { const fileHash = await this.calculateFileHash(filePath); const cacheKey = `${filePath}:${fileHash}`; - + if (this.validationCache.has(cacheKey)) { const cached = this.validationCache.get(cacheKey); // Cache is valid for 24 hours @@ -114,7 +162,7 @@ class ValidateCommand extends TestCommand { } return false; } - + /** * Get cached validation result */ @@ -122,7 +170,7 @@ class ValidateCommand extends TestCommand { const cacheKey = `${filePath}:${fileHash}`; return this.validationCache.get(cacheKey); } - + /** * Store validation result in cache */ @@ -130,7 +178,7 @@ class ValidateCommand extends TestCommand { const cacheKey = `${filePath}:${fileHash}`; this.validationCache.set(cacheKey, { ...result, - timestamp: Date.now() + timestamp: Date.now(), }); } @@ -138,32 +186,32 @@ class ValidateCommand extends TestCommand { * Execute test validation */ async performExecute(options = {}) { - this.emit('start', { isProd: this.isProd, options }); - + this.emit("start", { isProd: this.isProd, options }); + try { // Load cache if caching is enabled const cacheEnabled = options.cache !== false; if (cacheEnabled) { await this.loadCache(); } - - this.progress('Scanning test files for validation...'); - - const testFiles = await this.listTestFiles('*.sql'); - + + this.progress("Scanning test files for validation..."); + + const testFiles = await this.listTestFiles("*.sql"); + if (testFiles.length === 0) { - this.warn('No test files found in tests directory'); + this.warn("No test files found in tests directory"); return this.validationResults; } - + this.progress(`Found ${testFiles.length} test files to validate`); - + let cachedCount = 0; let validatedCount = 0; - + // Validate each test file for (const filePath of testFiles) { - if (cacheEnabled && await this.isCacheValid(filePath)) { + if (cacheEnabled && (await this.isCacheValid(filePath))) { const fileHash = await this.calculateFileHash(filePath); const cached = this.getCachedResult(filePath, fileHash); if (cached && !cached.hasErrors) { @@ -172,10 +220,10 @@ class ValidateCommand extends TestCommand { continue; } } - + await this.validateFile(filePath); validatedCount++; - + // Cache the result if no errors if (cacheEnabled && !this.validationResults.hasErrors) { const fileHash = await this.calculateFileHash(filePath); @@ -183,78 +231,82 @@ class ValidateCommand extends TestCommand { hasErrors: false, syntaxErrors: [], pgTapIssues: [], - structureWarnings: [] + structureWarnings: [], }); } } - + // Save cache if caching is enabled if (cacheEnabled) { await this.saveCache(); } - + // Report results this.reportResults(); - + if (cachedCount > 0) { - this.success(`${cachedCount} files validated from cache, ${validatedCount} files validated`); + this.success( + `${cachedCount} files validated from cache, ${validatedCount} files validated`, + ); } - - this.emit('complete', { validation: this.validationResults }); + + this.emit("complete", { validation: this.validationResults }); return this.validationResults; - } catch (error) { - this.error('Failed to validate tests', error); - this.emit('failed', { error }); + this.error("Failed to validate tests", error); + this.emit("failed", { error }); throw error; } } - + /** * Validate a single SQL test file * @param {string} filePath - Path to the test file */ async validateFile(filePath) { try { - const content = await fs.readFile(filePath, 'utf8'); + const content = await fs.readFile(filePath, "utf8"); const fileName = path.basename(filePath); - + this.validationResults.filesProcessed++; - + // Basic SQL syntax validation this.validateSqlSyntax(fileName, content); - + // pgTAP function validation this.validatePgTapUsage(fileName, content); - + // Test function structure validation this.validateTestStructure(fileName, content); - } catch (error) { - this.addSyntaxError(path.basename(filePath), 0, `File read error: ${error.message}`); + this.addSyntaxError( + path.basename(filePath), + 0, + `File read error: ${error.message}`, + ); } } - + /** * Validate basic SQL syntax * @param {string} fileName - Name of the file * @param {string} content - File content */ validateSqlSyntax(fileName, content) { - const lines = content.split('\n'); - + const lines = content.split("\n"); + for (let i = 0; i < lines.length; i++) { const line = lines[i].trim(); const lineNum = i + 1; - + // Skip comments and empty lines - if (!line || line.startsWith('--')) continue; - + if (!line || line.startsWith("--")) continue; + // Check for basic syntax errors this.checkBasicSyntax(fileName, lineNum, line); } } - + /** * Check basic SQL syntax patterns * @param {string} fileName - Name of the file @@ -265,62 +317,82 @@ class ValidateCommand extends TestCommand { // Check for unmatched parentheses in single line const openParens = (line.match(/\(/g) || []).length; const closeParens = (line.match(/\)/g) || []).length; - + // Only flag obvious single-line mismatches - if (line.includes('(') && !line.includes('$$') && openParens > closeParens + 1) { - this.addSyntaxError(fileName, lineNum, 'Possible unmatched opening parenthesis'); + if ( + line.includes("(") && + !line.includes("$$") && + openParens > closeParens + 1 + ) { + this.addSyntaxError( + fileName, + lineNum, + "Possible unmatched opening parenthesis", + ); } - + // Check for common typos if (line.match(/\bSELET\b/i)) { - this.addSyntaxError(fileName, lineNum, 'Typo: "SELET" should be "SELECT"'); + this.addSyntaxError( + fileName, + lineNum, + 'Typo: "SELET" should be "SELECT"', + ); } - + if (line.match(/\bFROM\s+FROM\b/i)) { - this.addSyntaxError(fileName, lineNum, 'Duplicate FROM keyword'); + this.addSyntaxError(fileName, lineNum, "Duplicate FROM keyword"); } - + if (line.match(/\bWHERE\s+WHERE\b/i)) { - this.addSyntaxError(fileName, lineNum, 'Duplicate WHERE keyword'); + this.addSyntaxError(fileName, lineNum, "Duplicate WHERE keyword"); } - + // Check for semicolon issues if (line.match(/;;+/)) { - this.addSyntaxError(fileName, lineNum, 'Multiple consecutive semicolons'); + this.addSyntaxError(fileName, lineNum, "Multiple consecutive semicolons"); } } - + /** * Validate pgTAP function usage * @param {string} fileName - Name of the file * @param {string} content - File content */ validatePgTapUsage(fileName, content) { - const lines = content.split('\n'); - + const lines = content.split("\n"); + for (let i = 0; i < lines.length; i++) { const line = lines[i].trim(); const lineNum = i + 1; - + // Skip comments and empty lines - if (!line || line.startsWith('--')) continue; - + if (!line || line.startsWith("--")) continue; + // Find pgTAP function calls const tapMatch = line.match(/\btap\.(\w+)\s*\(/i); if (tapMatch) { const functionName = tapMatch[1].toLowerCase(); if (!this.pgTapFunctions.has(functionName)) { - this.addPgTapIssue(fileName, lineNum, `Unknown pgTAP function: tap.${functionName}`); + this.addPgTapIssue( + fileName, + lineNum, + `Unknown pgTAP function: tap.${functionName}`, + ); } } - + // Check for RETURN NEXT patterns if (line.match(/RETURN\s+NEXT/i) && !line.match(/tap\./i)) { - this.addPgTapIssue(fileName, lineNum, 'RETURN NEXT should typically use tap.* functions'); + this.addPgTapIssue( + fileName, + lineNum, + "RETURN NEXT should typically use tap.* functions", + ); } } } - + /** * Validate test function structure * @param {string} fileName - Name of the file @@ -328,53 +400,82 @@ class ValidateCommand extends TestCommand { */ validateTestStructure(fileName, content) { // Check for test function declarations - const testFunctionRegex = /CREATE\s+(?:OR\s+REPLACE\s+)?FUNCTION\s+test\.(\w+)\s*\(([^)]*)\)\s*RETURNS\s+(\w+(?:\s+\w+)*)/gi; + const testFunctionRegex = + /CREATE\s+(?:OR\s+REPLACE\s+)?FUNCTION\s+test\.(\w+)\s*\(([^)]*)\)\s*RETURNS\s+(\w+(?:\s+\w+)*)/gi; let match; - + let hasTestFunctions = false; - + while ((match = testFunctionRegex.exec(content)) !== null) { const functionName = match[1]; const returnType = match[3].toUpperCase(); - + // Skip helper functions (they don't need to be pgTAP test functions) - const isHelperFunction = functionName.startsWith('create_') || - functionName.startsWith('cleanup_') || - functionName.startsWith('set_') || - functionName.includes('_helper') || - functionName.includes('_util'); - + const isHelperFunction = + functionName.startsWith("create_") || + functionName.startsWith("cleanup_") || + functionName.startsWith("set_") || + functionName.includes("_helper") || + functionName.includes("_util"); + if (!isHelperFunction) { hasTestFunctions = true; - + // Check return type for actual test functions - if (!returnType.includes('SETOF TEXT')) { - this.addStructureWarning(fileName, 0, `Function test.${functionName} should return SETOF TEXT for pgTAP compatibility`); + if (!returnType.includes("SETOF TEXT")) { + this.addStructureWarning( + fileName, + 0, + `Function test.${functionName} should return SETOF TEXT for pgTAP compatibility`, + ); } - + // Check function name pattern for actual test functions - if (!functionName.includes('test') && !functionName.startsWith('run_')) { - this.addStructureWarning(fileName, 0, `Function test.${functionName} should include 'test' or start with 'run_' for clarity`); + if ( + !functionName.includes("test") && + !functionName.startsWith("run_") + ) { + this.addStructureWarning( + fileName, + 0, + `Function test.${functionName} should include 'test' or start with 'run_' for clarity`, + ); } } } - + // Check if file has any test functions - if (!hasTestFunctions && fileName.endsWith('.sql') && !fileName.startsWith('00_')) { - this.addStructureWarning(fileName, 0, 'File appears to be a test file but contains no test functions'); + if ( + !hasTestFunctions && + fileName.endsWith(".sql") && + !fileName.startsWith("00_") + ) { + this.addStructureWarning( + fileName, + 0, + "File appears to be a test file but contains no test functions", + ); } - + // Check for plan() call if (hasTestFunctions && !content.match(/tap\.plan\s*\(/i)) { - this.addStructureWarning(fileName, 0, 'Test functions should include tap.plan() to specify expected test count'); + this.addStructureWarning( + fileName, + 0, + "Test functions should include tap.plan() to specify expected test count", + ); } - + // Check for finish() call if (hasTestFunctions && !content.match(/tap\.finish\s*\(\s*\)/i)) { - this.addStructureWarning(fileName, 0, 'Test functions should include tap.finish() at the end'); + this.addStructureWarning( + fileName, + 0, + "Test functions should include tap.finish() at the end", + ); } } - + /** * Add a syntax error to results */ @@ -382,7 +483,7 @@ class ValidateCommand extends TestCommand { this.validationResults.syntaxErrors.push({ fileName, lineNum, message }); this.validationResults.hasErrors = true; } - + /** * Add a pgTAP issue to results */ @@ -390,58 +491,70 @@ class ValidateCommand extends TestCommand { this.validationResults.pgTapIssues.push({ fileName, lineNum, message }); this.validationResults.hasErrors = true; } - + /** * Add a structure warning to results */ addStructureWarning(fileName, lineNum, message) { - this.validationResults.structureWarnings.push({ fileName, lineNum, message }); + this.validationResults.structureWarnings.push({ + fileName, + lineNum, + message, + }); } - + /** * Report validation results */ reportResults() { - const { filesProcessed, syntaxErrors, pgTapIssues, structureWarnings, hasErrors } = this.validationResults; - + const { + filesProcessed, + syntaxErrors, + pgTapIssues, + structureWarnings, + hasErrors, + } = this.validationResults; + this.progress(`Processed ${filesProcessed} test files`); - + // Report syntax errors if (syntaxErrors.length > 0) { this.error(`Found ${syntaxErrors.length} syntax errors:`); - syntaxErrors.forEach(error => { + syntaxErrors.forEach((error) => { this.error(` ${error.fileName}:${error.lineNum} - ${error.message}`); }); } - + // Report pgTAP issues if (pgTapIssues.length > 0) { this.error(`Found ${pgTapIssues.length} pgTAP issues:`); - pgTapIssues.forEach(issue => { + pgTapIssues.forEach((issue) => { this.error(` ${issue.fileName}:${issue.lineNum} - ${issue.message}`); }); } - + // Report structure warnings if (structureWarnings.length > 0) { this.warn(`Found ${structureWarnings.length} structure warnings:`); - structureWarnings.forEach(warning => { - this.warn(` ${warning.fileName}:${warning.lineNum} - ${warning.message}`); + structureWarnings.forEach((warning) => { + this.warn( + ` ${warning.fileName}:${warning.lineNum} - ${warning.message}`, + ); }); } - + // Final status if (hasErrors) { - this.error('Validation failed - please fix the errors above'); + this.error("Validation failed - please fix the errors above"); // Set exit code for CLI process.exitCode = 1; } else { - this.success('Validation complete - no errors found'); + this.success("Validation complete - no errors found"); if (structureWarnings.length === 0) { - this.success('All tests follow best practices'); + this.success("All tests follow best practices"); } } } } -module.exports = ValidateCommand; \ No newline at end of file +module.exports = ValidateCommand; diff --git a/src/commands/test/WatchCommand.js b/src/commands/test/WatchCommand.js index 8b880b4..d2c48da 100644 --- a/src/commands/test/WatchCommand.js +++ b/src/commands/test/WatchCommand.js @@ -1,20 +1,20 @@ /** * Test Watch Command - P1.T006 - * + * * Watches test/ directory for changes and automatically: * 1. Compiles tests using TestCompileCommand - * 2. Runs tests using TestRunCommand + * 2. Runs tests using TestRunCommand * 3. Debounces rapid changes * 4. Clears console between runs */ -const chokidar = require('chokidar'); -const path = require('path'); -const chalk = require('chalk').default || require('chalk'); -const TestCommand = require('../../lib/TestCommand'); -const CompileCommand = require('./CompileCommand'); -const RunCommand = require('./RunCommand'); -const Config = require('../../lib/config'); +const chokidar = require("chokidar"); +const path = require("path"); +const chalk = require("chalk").default || require("chalk"); +const TestCommand = require("../../lib/TestCommand"); +const CompileCommand = require("./CompileCommand"); +const RunCommand = require("./RunCommand"); +const Config = require("../../lib/config"); /** * Watch for test file changes and auto-run tests @@ -22,22 +22,30 @@ const Config = require('../../lib/config'); class WatchCommand extends TestCommand { constructor( databaseUrl, - serviceRoleKey = null, + serviceRoleKey = null, testsDir, outputDir, logger = null, isProd = false, - pathResolver = null + pathResolver = null, ) { - super(databaseUrl, serviceRoleKey, testsDir, outputDir, logger, isProd, pathResolver); - + super( + databaseUrl, + serviceRoleKey, + testsDir, + outputDir, + logger, + isProd, + pathResolver, + ); + // Watch configuration this.debounceMs = 1000; // Default debounce delay this.isRunning = false; this.pendingTimeout = null; this.watcher = null; this.initialScanComplete = false; - + // Commands for auto-compilation and running this.compileCommand = null; this.runCommand = null; @@ -47,48 +55,50 @@ class WatchCommand extends TestCommand { * Execute test watch mode */ async performExecute(options = {}) { - this.emit('watch:start', { - isProd: this.isProd, + this.emit("watch:start", { + isProd: this.isProd, testsDir: this.testsDir, outputDir: this.outputDir, - options + options, }); - + try { // Load test configuration const testConfig = await this._getTestConfig(); - + // Configure debounce delay from options or config this.debounceMs = options.debounce || testConfig.debounce_delay || 1000; - this.autoCompile = options.autoCompile !== undefined ? options.autoCompile : testConfig.auto_compile; - - this.progress('Starting test watch mode...'); - + this.autoCompile = + options.autoCompile !== undefined + ? options.autoCompile + : testConfig.auto_compile; + + this.progress("Starting test watch mode..."); + // Initialize compilation and run commands await this._initializeCommands(options); - + // Ensure test directory exists const watchDir = await this.getTestsDir(); this.success(`Watching for changes in: ${watchDir}`); - + // Setup file watcher await this._setupWatcher(watchDir, options); - + // Run initial test cycle - await this._runTestCycle('Initial run'); - - this.progress(chalk.cyan('\n🔍 Watching for test file changes...')); - this.progress(chalk.gray('Press Ctrl+C to stop watching\n')); - + await this._runTestCycle("Initial run"); + + this.progress(chalk.cyan("\n🔍 Watching for test file changes...")); + this.progress(chalk.gray("Press Ctrl+C to stop watching\n")); + // Keep the process alive and listen for signals await this._waitForInterrupt(); - - this.emit('watch:complete', { message: 'Test watch stopped' }); - return { success: true, message: 'Test watch stopped' }; - + + this.emit("watch:complete", { message: "Test watch stopped" }); + return { success: true, message: "Test watch stopped" }; } catch (error) { - this.error('Failed to start test watcher', error); - this.emit('watch:failed', { error }); + this.error("Failed to start test watcher", error); + this.emit("watch:failed", { error }); throw error; } finally { await this._cleanup(); @@ -100,16 +110,16 @@ class WatchCommand extends TestCommand { * @private */ async _initializeCommands(options) { - this.progress('Initializing test commands...'); - + this.progress("Initializing test commands..."); + // Create compile command instance this.compileCommand = new CompileCommand( this.testsDir, - this.outputDir, + this.outputDir, this.logger, - this.isProd + this.isProd, ); - + // Create run command instance this.runCommand = new RunCommand( this.databaseUrl, @@ -117,9 +127,9 @@ class WatchCommand extends TestCommand { this.testsDir, this.outputDir, this.logger, - this.isProd + this.isProd, ); - + // Forward events from child commands this._forwardCommandEvents(); } @@ -129,35 +139,35 @@ class WatchCommand extends TestCommand { * @private */ async _setupWatcher(watchDir, options) { - const watchPattern = path.join(watchDir, '**/*.sql'); - + const watchPattern = path.join(watchDir, "**/*.sql"); + this.watcher = chokidar.watch(watchPattern, { - ignored: /[\/\\]\./, // ignore dotfiles + ignored: /[\/\\]\./, // ignore dotfiles persistent: true, ignoreInitial: false, followSymlinks: false, - depth: 3 // reasonable depth limit + depth: 3, // reasonable depth limit }); - + // Handle file events this.watcher - .on('ready', () => { + .on("ready", () => { this.initialScanComplete = true; - this.logger.debug('File watcher ready'); + this.logger.debug("File watcher ready"); }) - .on('add', (filePath) => { + .on("add", (filePath) => { if (this.initialScanComplete) { - this._handleFileChange('added', filePath); + this._handleFileChange("added", filePath); } }) - .on('change', (filePath) => { - this._handleFileChange('changed', filePath); + .on("change", (filePath) => { + this._handleFileChange("changed", filePath); }) - .on('unlink', (filePath) => { - this._handleFileChange('removed', filePath); + .on("unlink", (filePath) => { + this._handleFileChange("removed", filePath); }) - .on('error', (error) => { - this.error('File watcher error', error); + .on("error", (error) => { + this.error("File watcher error", error); }); } @@ -167,21 +177,21 @@ class WatchCommand extends TestCommand { */ _handleFileChange(eventType, filePath) { const relativePath = path.relative(this.testsDir, filePath); - - this.emit('watch:file_change', { + + this.emit("watch:file_change", { eventType, file: relativePath, fullPath: filePath, - timestamp: new Date().toISOString() + timestamp: new Date().toISOString(), }); - + this.progress(chalk.blue(`📄 ${eventType}: ${relativePath}`)); - + // Clear existing timeout if (this.pendingTimeout) { clearTimeout(this.pendingTimeout); } - + // Debounce the test run this.pendingTimeout = setTimeout(async () => { await this._runTestCycle(`File ${eventType}: ${relativePath}`); @@ -195,72 +205,73 @@ class WatchCommand extends TestCommand { */ async _runTestCycle(trigger) { if (this.isRunning) { - this.logger.debug('Test cycle already running, skipping'); + this.logger.debug("Test cycle already running, skipping"); return; } - + this.isRunning = true; - + try { // Clear console for clean output this._clearConsole(); - - this.emit('watch:cycle_start', { + + this.emit("watch:cycle_start", { trigger, - timestamp: new Date().toISOString() + timestamp: new Date().toISOString(), }); - + const cycleStartTime = Date.now(); this.progress(chalk.yellow(`🔄 ${trigger} - Running test cycle...`)); - + // Step 1: Compile tests (if auto_compile is enabled) let compileResult = null; if (this.autoCompile) { - this.progress('📦 Compiling tests...'); + this.progress("📦 Compiling tests..."); compileResult = await this.compileCommand.performExecute(); - + if (!compileResult.success) { - throw new Error('Test compilation failed'); + throw new Error("Test compilation failed"); } - - this.success(`✓ Compilation complete: ${compileResult.stats.filesProcessed} files`); + + this.success( + `✓ Compilation complete: ${compileResult.stats.filesProcessed} files`, + ); } else { - this.progress('⏭️ Skipping compilation (auto_compile disabled)'); + this.progress("⏭️ Skipping compilation (auto_compile disabled)"); compileResult = { success: true, stats: { filesProcessed: 0 } }; } - - // Step 2: Run tests - this.progress('🧪 Running tests...'); + + // Step 2: Run tests + this.progress("🧪 Running tests..."); const runResult = await this.runCommand.performExecute(); - + const cycleEndTime = Date.now(); const cycleDuration = cycleEndTime - cycleStartTime; - + // Display summary this._displayCycleSummary(runResult, cycleDuration); - - this.emit('watch:cycle_complete', { + + this.emit("watch:cycle_complete", { trigger, compileResult, runResult, duration: cycleDuration, - timestamp: new Date().toISOString() + timestamp: new Date().toISOString(), }); - } catch (error) { - this.error('Test cycle failed', error); - - this.emit('watch:cycle_failed', { + this.error("Test cycle failed", error); + + this.emit("watch:cycle_failed", { trigger, error, - timestamp: new Date().toISOString() + timestamp: new Date().toISOString(), }); } finally { this.isRunning = false; - + // Separator for next cycle - console.log(chalk.gray('─'.repeat(60))); - console.log(chalk.cyan('🔍 Watching for changes...')); + console.log(chalk.gray("─".repeat(60))); + console.log(chalk.cyan("🔍 Watching for changes...")); } } @@ -270,13 +281,13 @@ class WatchCommand extends TestCommand { */ _clearConsole() { // Clear console but preserve some context - process.stdout.write('\x1Bc'); // Clear screen - + process.stdout.write("\x1Bc"); // Clear screen + // Re-display header - console.log(chalk.bold.cyan('⛰️ data Test Watcher')); + console.log(chalk.bold.cyan("⛰️ data Test Watcher")); console.log(chalk.gray(`Watching: ${this.testsDir}`)); console.log(chalk.gray(`Output: ${this.outputDir}`)); - console.log(''); + console.log(""); } /** @@ -285,22 +296,26 @@ class WatchCommand extends TestCommand { */ _displayCycleSummary(runResult, duration) { const { total, passed, failed, skipped } = runResult; - - console.log(''); - console.log(chalk.bold('📊 Test Results Summary:')); - + + console.log(""); + console.log(chalk.bold("📊 Test Results Summary:")); + if (failed > 0) { console.log(chalk.red(` ✗ ${failed}/${total} tests failed`)); } else if (skipped > 0) { - console.log(chalk.yellow(` ⚠ ${passed}/${total} tests passed (${skipped} skipped)`)); + console.log( + chalk.yellow( + ` ⚠ ${passed}/${total} tests passed (${skipped} skipped)`, + ), + ); } else if (total > 0) { console.log(chalk.green(` ✓ All ${passed}/${total} tests passed`)); } else { - console.log(chalk.gray(' No tests executed')); + console.log(chalk.gray(" No tests executed")); } - + console.log(chalk.gray(` ⏱ Completed in ${duration}ms`)); - console.log(''); + console.log(""); } /** @@ -309,30 +324,30 @@ class WatchCommand extends TestCommand { */ _forwardCommandEvents() { if (this.compileCommand) { - this.compileCommand.on('compilation:start', (data) => { - this.emit('watch:compilation_start', data); + this.compileCommand.on("compilation:start", (data) => { + this.emit("watch:compilation_start", data); }); - - this.compileCommand.on('compilation:complete', (data) => { - this.emit('watch:compilation_complete', data); + + this.compileCommand.on("compilation:complete", (data) => { + this.emit("watch:compilation_complete", data); }); - - this.compileCommand.on('compilation:failed', (data) => { - this.emit('watch:compilation_failed', data); + + this.compileCommand.on("compilation:failed", (data) => { + this.emit("watch:compilation_failed", data); }); } - + if (this.runCommand) { - this.runCommand.on('start', (data) => { - this.emit('watch:run_start', data); + this.runCommand.on("start", (data) => { + this.emit("watch:run_start", data); }); - - this.runCommand.on('complete', (data) => { - this.emit('watch:run_complete', data); + + this.runCommand.on("complete", (data) => { + this.emit("watch:run_complete", data); }); - - this.runCommand.on('failed', (data) => { - this.emit('watch:run_failed', data); + + this.runCommand.on("failed", (data) => { + this.emit("watch:run_failed", data); }); } } @@ -344,12 +359,12 @@ class WatchCommand extends TestCommand { async _waitForInterrupt() { return new Promise((resolve) => { const handleSignal = () => { - console.log(chalk.yellow('\n⏹ Stopping test watcher...')); + console.log(chalk.yellow("\n⏹ Stopping test watcher...")); resolve(); }; - - process.on('SIGINT', handleSignal); - process.on('SIGTERM', handleSignal); + + process.on("SIGINT", handleSignal); + process.on("SIGTERM", handleSignal); }); } @@ -362,13 +377,13 @@ class WatchCommand extends TestCommand { clearTimeout(this.pendingTimeout); this.pendingTimeout = null; } - + if (this.watcher) { await this.watcher.close(); this.watcher = null; } - - this.logger.debug('Test watcher cleanup complete'); + + this.logger.debug("Test watcher cleanup complete"); } /** @@ -376,8 +391,8 @@ class WatchCommand extends TestCommand { * @param {number} ms - Milliseconds to debounce */ setDebounceDelay(ms) { - if (typeof ms !== 'number' || ms < 0) { - throw new Error('Debounce delay must be a non-negative number'); + if (typeof ms !== "number" || ms < 0) { + throw new Error("Debounce delay must be a non-negative number"); } this.debounceMs = ms; } @@ -393,7 +408,7 @@ class WatchCommand extends TestCommand { debounceMs: this.debounceMs, testsDir: this.testsDir, outputDir: this.outputDir, - hasPendingChanges: !!this.pendingTimeout + hasPendingChanges: !!this.pendingTimeout, }; } @@ -406,11 +421,11 @@ class WatchCommand extends TestCommand { const config = await Config.load(); return config.getTestConfig(); } catch (error) { - this.logger.debug('Could not load config file, using defaults'); + this.logger.debug("Could not load config file, using defaults"); const defaultConfig = new Config(); return defaultConfig.getTestConfig(); } } } -module.exports = WatchCommand; \ No newline at end of file +module.exports = WatchCommand; diff --git a/src/commands/test/ci/CICoverageCommand.js b/src/commands/test/ci/CICoverageCommand.js index cce7c7d..4e3c5db 100644 --- a/src/commands/test/ci/CICoverageCommand.js +++ b/src/commands/test/ci/CICoverageCommand.js @@ -1,11 +1,11 @@ /** * CI Coverage Command - CI-optimized test coverage analysis - * + * * Wraps CoverageCommand with machine-friendly output, JSON reports, * and proper exit codes for CI/CD environments. */ -const CoverageCommand = require('../CoverageCommand'); +const CoverageCommand = require("../CoverageCommand"); /** * CI-friendly test coverage analysis with structured output @@ -13,7 +13,7 @@ const CoverageCommand = require('../CoverageCommand'); class CICoverageCommand extends CoverageCommand { constructor(config = null, logger = null, isProd = false) { super(config, logger, isProd); - + // Force CI mode behavior this.ciMode = true; this.suppressProgress = true; @@ -24,34 +24,34 @@ class CICoverageCommand extends CoverageCommand { */ async performExecute(options = {}) { const startTime = Date.now(); - const isCI = process.env.CI !== 'false'; - + const isCI = process.env.CI !== "false"; + // Parse enforcement options with CI-friendly defaults const ciOptions = { enforce: options.enforce || false, - minCoverage: parseInt(options.minCoverage || '80', 10), - minRpcCoverage: parseInt(options.minRpcCoverage || '75', 10), - minRlsCoverage: parseInt(options.minRlsCoverage || '70', 10), - format: options.format || 'json', - output: options.output || (isCI ? 'coverage' : null) + minCoverage: parseInt(options.minCoverage || "80", 10), + minRpcCoverage: parseInt(options.minRpcCoverage || "75", 10), + minRlsCoverage: parseInt(options.minRlsCoverage || "70", 10), + format: options.format || "json", + output: options.output || (isCI ? "coverage" : null), }; - + try { // Emit structured start event - this.emitCIEvent('coverage_analysis_started', { + this.emitCIEvent("coverage_analysis_started", { options: ciOptions, - timestamp: new Date().toISOString() + timestamp: new Date().toISOString(), }); - + // Execute coverage analysis using parent class logic const results = await super.performExecute(ciOptions); - + // Calculate execution time const duration = Date.now() - startTime; - + // Generate CI-friendly report const ciReport = this.generateCIReport(results, duration, ciOptions); - + // Output report (structured for CI consumption) if (isCI) { // Machine-readable JSON output for CI @@ -60,53 +60,55 @@ class CICoverageCommand extends CoverageCommand { // Human-readable for local development this.displayCIReport(ciReport); } - + // Write CI artifacts await this.writeCIArtifacts(results, ciReport, ciOptions); - + // Handle enforcement results const success = this.handleEnforcement(ciReport, ciOptions); - + // Emit structured completion event - this.emitCIEvent('coverage_analysis_completed', { + this.emitCIEvent("coverage_analysis_completed", { success, duration, - summary: ciReport.summary + summary: ciReport.summary, }); - + // Set proper exit code const exitCode = success ? 0 : 1; process.exitCode = exitCode; - + return ciReport; - } catch (error) { const duration = Date.now() - startTime; - + // Structured error output const errorReport = { - status: 'error', + status: "error", error: { message: error.message, - type: error.constructor.name + type: error.constructor.name, }, duration, - timestamp: new Date().toISOString() + timestamp: new Date().toISOString(), }; - + if (isCI) { console.error(JSON.stringify(errorReport, null, 2)); } else { console.error(`COVERAGE_ANALYSIS_ERROR: ${error.message}`); } - - this.emitCIEvent('coverage_analysis_failed', { error: error.message, duration }); - + + this.emitCIEvent("coverage_analysis_failed", { + error: error.message, + duration, + }); + process.exitCode = 1; throw error; } } - + /** * Generate CI-friendly coverage report * @param {Object} results - Coverage results from parent class @@ -116,39 +118,48 @@ class CICoverageCommand extends CoverageCommand { */ generateCIReport(results, duration, options) { // Generate stats using parent class analyzer - const stats = this.analyzer.generateCoverageStats(results.rpc, results.policies); - + const stats = this.analyzer.generateCoverageStats( + results.rpc, + results.policies, + ); + // Calculate enforcement status if enabled let enforcement = null; if (options.enforce) { enforcement = this.calculateEnforcement(stats, options); } - + return { - status: enforcement ? (enforcement.passed ? 'passed' : 'failed') : 'analyzed', + status: enforcement + ? enforcement.passed + ? "passed" + : "failed" + : "analyzed", summary: { overall: stats.overall || { total: 0, covered: 0, percentage: 0 }, rpcFunctions: stats.rpc || { total: 0, covered: 0, percentage: 0 }, - rlsPolicies: stats.policies || { total: 0, covered: 0, percentage: 0 } + rlsPolicies: stats.policies || { total: 0, covered: 0, percentage: 0 }, }, details: { uncoveredRpcFunctions: this.extractUncoveredRpc(results.rpc), uncoveredPolicies: this.extractUncoveredPolicies(results.policies), - coverageBreakdown: this.generateBreakdown(results) + coverageBreakdown: this.generateBreakdown(results), }, enforcement: enforcement, execution: { duration, timestamp: new Date().toISOString(), - thresholds: options.enforce ? { - overall: options.minCoverage, - rpcFunctions: options.minRpcCoverage, - rlsPolicies: options.minRlsCoverage - } : null - } + thresholds: options.enforce + ? { + overall: options.minCoverage, + rpcFunctions: options.minRpcCoverage, + rlsPolicies: options.minRlsCoverage, + } + : null, + }, }; } - + /** * Calculate enforcement results * @param {Object} stats - Coverage statistics @@ -158,51 +169,51 @@ class CICoverageCommand extends CoverageCommand { calculateEnforcement(stats, options) { const failures = []; let passed = true; - + // Check overall coverage if (stats.overall && stats.overall.percentage < options.minCoverage) { failures.push({ - type: 'overall', + type: "overall", actual: stats.overall.percentage, expected: options.minCoverage, - message: `Overall coverage ${stats.overall.percentage}% below threshold ${options.minCoverage}%` + message: `Overall coverage ${stats.overall.percentage}% below threshold ${options.minCoverage}%`, }); passed = false; } - + // Check RPC coverage if (stats.rpc && stats.rpc.percentage < options.minRpcCoverage) { failures.push({ - type: 'rpc', + type: "rpc", actual: stats.rpc.percentage, expected: options.minRpcCoverage, - message: `RPC function coverage ${stats.rpc.percentage}% below threshold ${options.minRpcCoverage}%` + message: `RPC function coverage ${stats.rpc.percentage}% below threshold ${options.minRpcCoverage}%`, }); passed = false; } - + // Check RLS policy coverage if (stats.policies && stats.policies.percentage < options.minRlsCoverage) { failures.push({ - type: 'rls', + type: "rls", actual: stats.policies.percentage, expected: options.minRlsCoverage, - message: `RLS policy coverage ${stats.policies.percentage}% below threshold ${options.minRlsCoverage}%` + message: `RLS policy coverage ${stats.policies.percentage}% below threshold ${options.minRlsCoverage}%`, }); passed = false; } - + return { passed, failures, thresholds: { overall: options.minCoverage, rpc: options.minRpcCoverage, - rls: options.minRlsCoverage - } + rls: options.minRlsCoverage, + }, }; } - + /** * Extract uncovered RPC functions * @param {Object} rpcResults - RPC analysis results @@ -210,16 +221,16 @@ class CICoverageCommand extends CoverageCommand { */ extractUncoveredRpc(rpcResults) { if (!rpcResults || !Array.isArray(rpcResults)) return []; - + return rpcResults - .filter(rpc => !rpc.has_tests || rpc.has_tests === false) - .map(rpc => ({ + .filter((rpc) => !rpc.has_tests || rpc.has_tests === false) + .map((rpc) => ({ schema: rpc.schema_name, function: rpc.function_name, - signature: rpc.function_signature || `${rpc.function_name}(...)` + signature: rpc.function_signature || `${rpc.function_name}(...)`, })); } - + /** * Extract uncovered RLS policies * @param {Object} policyResults - Policy analysis results @@ -227,17 +238,17 @@ class CICoverageCommand extends CoverageCommand { */ extractUncoveredPolicies(policyResults) { if (!policyResults || !Array.isArray(policyResults)) return []; - + return policyResults - .filter(policy => !policy.has_tests || policy.has_tests === false) - .map(policy => ({ + .filter((policy) => !policy.has_tests || policy.has_tests === false) + .map((policy) => ({ schema: policy.schema_name, table: policy.table_name, policy: policy.policy_name, - command: policy.command_type || 'unknown' + command: policy.command_type || "unknown", })); } - + /** * Generate detailed coverage breakdown * @param {Object} results - Coverage results @@ -246,12 +257,12 @@ class CICoverageCommand extends CoverageCommand { generateBreakdown(results) { const breakdown = { schemas: {}, - tables: {} + tables: {}, }; - + // Process RPC functions by schema if (results.rpc && Array.isArray(results.rpc)) { - results.rpc.forEach(rpc => { + results.rpc.forEach((rpc) => { const schema = rpc.schema_name; if (!breakdown.schemas[schema]) { breakdown.schemas[schema] = { total: 0, covered: 0, functions: [] }; @@ -262,14 +273,14 @@ class CICoverageCommand extends CoverageCommand { } breakdown.schemas[schema].functions.push({ name: rpc.function_name, - covered: rpc.has_tests || false + covered: rpc.has_tests || false, }); }); } - + // Process RLS policies by table if (results.policies && Array.isArray(results.policies)) { - results.policies.forEach(policy => { + results.policies.forEach((policy) => { const table = `${policy.schema_name}.${policy.table_name}`; if (!breakdown.tables[table]) { breakdown.tables[table] = { total: 0, covered: 0, policies: [] }; @@ -281,39 +292,47 @@ class CICoverageCommand extends CoverageCommand { breakdown.tables[table].policies.push({ name: policy.policy_name, command: policy.command_type, - covered: policy.has_tests || false + covered: policy.has_tests || false, }); }); } - + return breakdown; } - + /** * Display CI report in human-readable format (for local development) * @param {Object} report - CI report */ displayCIReport(report) { const { status, summary, enforcement } = report; - + console.log(`\nCOVERAGE_STATUS: ${status.toUpperCase()}`); - console.log(`OVERALL_COVERAGE: ${summary.overall.percentage}% (${summary.overall.covered}/${summary.overall.total})`); - console.log(`RPC_COVERAGE: ${summary.rpcFunctions.percentage}% (${summary.rpcFunctions.covered}/${summary.rpcFunctions.total})`); - console.log(`RLS_COVERAGE: ${summary.rlsPolicies.percentage}% (${summary.rlsPolicies.covered}/${summary.rlsPolicies.total})`); - + console.log( + `OVERALL_COVERAGE: ${summary.overall.percentage}% (${summary.overall.covered}/${summary.overall.total})`, + ); + console.log( + `RPC_COVERAGE: ${summary.rpcFunctions.percentage}% (${summary.rpcFunctions.covered}/${summary.rpcFunctions.total})`, + ); + console.log( + `RLS_COVERAGE: ${summary.rlsPolicies.percentage}% (${summary.rlsPolicies.covered}/${summary.rlsPolicies.total})`, + ); + if (enforcement) { - console.log(`\nENFORCEMENT: ${enforcement.passed ? 'PASSED' : 'FAILED'}`); + console.log(`\nENFORCEMENT: ${enforcement.passed ? "PASSED" : "FAILED"}`); if (enforcement.failures.length > 0) { - console.log('ENFORCEMENT_FAILURES:'); - enforcement.failures.forEach(failure => { - console.log(` ${failure.type}: ${failure.actual}% < ${failure.expected}%`); + console.log("ENFORCEMENT_FAILURES:"); + enforcement.failures.forEach((failure) => { + console.log( + ` ${failure.type}: ${failure.actual}% < ${failure.expected}%`, + ); }); } } - + console.log(`\nEXECUTION_TIME: ${report.execution.duration}ms`); } - + /** * Write CI artifacts (JSON reports, coverage files) * @param {Object} results - Full coverage results @@ -324,27 +343,31 @@ class CICoverageCommand extends CoverageCommand { try { if (this.outputDir) { // Write structured coverage report - await this.writeJSONArtifact(report, 'coverage-report.json'); - + await this.writeJSONArtifact(report, "coverage-report.json"); + // Write detailed results for further analysis - await this.writeJSONArtifact(results, 'coverage-details.json'); - + await this.writeJSONArtifact(results, "coverage-details.json"); + // Write enforcement results if enabled if (options.enforce && report.enforcement) { - await this.writeJSONArtifact(report.enforcement, 'coverage-enforcement.json'); + await this.writeJSONArtifact( + report.enforcement, + "coverage-enforcement.json", + ); } - + // Write coverage badges data for README/CI const badges = this.generateBadgeData(report.summary); - await this.writeJSONArtifact(badges, 'coverage-badges.json'); + await this.writeJSONArtifact(badges, "coverage-badges.json"); } - } catch (error) { // Don't fail coverage analysis if we can't write artifacts - console.error(`Warning: Could not write coverage artifacts: ${error.message}`); + console.error( + `Warning: Could not write coverage artifacts: ${error.message}`, + ); } } - + /** * Generate badge data for shields.io or similar services * @param {Object} summary - Coverage summary @@ -352,30 +375,30 @@ class CICoverageCommand extends CoverageCommand { */ generateBadgeData(summary) { const getColor = (percentage) => { - if (percentage >= 80) return 'brightgreen'; - if (percentage >= 60) return 'yellow'; - return 'red'; + if (percentage >= 80) return "brightgreen"; + if (percentage >= 60) return "yellow"; + return "red"; }; - + return { overall: { - label: 'coverage', + label: "coverage", message: `${summary.overall.percentage}%`, - color: getColor(summary.overall.percentage) + color: getColor(summary.overall.percentage), }, rpc: { - label: 'rpc-coverage', + label: "rpc-coverage", message: `${summary.rpcFunctions.percentage}%`, - color: getColor(summary.rpcFunctions.percentage) + color: getColor(summary.rpcFunctions.percentage), }, rls: { - label: 'rls-coverage', + label: "rls-coverage", message: `${summary.rlsPolicies.percentage}%`, - color: getColor(summary.rlsPolicies.percentage) - } + color: getColor(summary.rlsPolicies.percentage), + }, }; } - + /** * Write JSON artifact to output directory * @param {Object} data - Data to write @@ -384,13 +407,13 @@ class CICoverageCommand extends CoverageCommand { async writeJSONArtifact(data, filename) { try { const filePath = await this.getOutputFile(filename); - const fs = require('fs').promises; - await fs.writeFile(filePath, JSON.stringify(data, null, 2), 'utf8'); + const fs = require("fs").promises; + await fs.writeFile(filePath, JSON.stringify(data, null, 2), "utf8"); } catch (error) { throw new Error(`Failed to write ${filename}: ${error.message}`); } } - + /** * Handle enforcement logic and exit codes * @param {Object} report - CI report @@ -401,80 +424,86 @@ class CICoverageCommand extends CoverageCommand { if (!options.enforce || !report.enforcement) { return true; // No enforcement requested } - + return report.enforcement.passed; } - + /** * Emit structured CI events * @param {string} eventType - Type of event * @param {Object} data - Event data */ emitCIEvent(eventType, data) { - this.emit('ci:event', { + this.emit("ci:event", { type: eventType, - ...data + ...data, }); } - + /** * Override progress method to suppress output in CI mode */ progress(message) { // Only show progress if explicitly not in CI mode - if (process.env.CI === 'false') { + if (process.env.CI === "false") { super.progress(message); } } - + /** * Override warn method for structured CI output */ warn(message) { - if (process.env.CI !== 'false') { + if (process.env.CI !== "false") { // Structured warning for CI - console.error(JSON.stringify({ - level: 'warning', - message, - timestamp: new Date().toISOString() - })); + console.error( + JSON.stringify({ + level: "warning", + message, + timestamp: new Date().toISOString(), + }), + ); } else { super.warn(message); } } - + /** * Override error method for structured CI output */ error(message, error = null) { - if (process.env.CI !== 'false') { + if (process.env.CI !== "false") { // Structured error for CI - console.error(JSON.stringify({ - level: 'error', - message, - error: error ? error.message : null, - timestamp: new Date().toISOString() - })); + console.error( + JSON.stringify({ + level: "error", + message, + error: error ? error.message : null, + timestamp: new Date().toISOString(), + }), + ); } else { super.error(message, error); } } - + /** * Override success method for structured CI output */ success(message) { - if (process.env.CI !== 'false') { + if (process.env.CI !== "false") { // Structured success for CI - console.log(JSON.stringify({ - level: 'info', - message, - timestamp: new Date().toISOString() - })); + console.log( + JSON.stringify({ + level: "info", + message, + timestamp: new Date().toISOString(), + }), + ); } else { super.success(message); } } } -module.exports = CICoverageCommand; \ No newline at end of file +module.exports = CICoverageCommand; diff --git a/src/commands/test/ci/CIRunCommand.js b/src/commands/test/ci/CIRunCommand.js index f25e6cc..eb7a2e9 100644 --- a/src/commands/test/ci/CIRunCommand.js +++ b/src/commands/test/ci/CIRunCommand.js @@ -1,19 +1,26 @@ /** * CI Run Command - CI-optimized test execution - * + * * Wraps RunCommand with machine-friendly output, JUnit XML generation, * and proper exit codes for CI/CD environments. */ -const RunCommand = require('../RunCommand'); +const RunCommand = require("../RunCommand"); /** * CI-friendly test execution with structured output */ class CIRunCommand extends RunCommand { - constructor(databaseUrl, serviceRoleKey = null, testsDir, outputDir, logger = null, isProd = false) { + constructor( + databaseUrl, + serviceRoleKey = null, + testsDir, + outputDir, + logger = null, + isProd = false, + ) { super(databaseUrl, serviceRoleKey, testsDir, outputDir, logger, isProd); - + // Force CI mode behavior this.ciMode = true; this.suppressProgress = true; @@ -24,32 +31,32 @@ class CIRunCommand extends RunCommand { */ async performExecute(options = {}) { const startTime = Date.now(); - const isCI = process.env.CI !== 'false'; - + const isCI = process.env.CI !== "false"; + // Force machine-readable output by default in CI mode const ciOptions = { ...options, - format: options.format || (isCI ? 'junit' : 'console'), - output: options.output || (isCI ? 'test-results' : null) + format: options.format || (isCI ? "junit" : "console"), + output: options.output || (isCI ? "test-results" : null), }; - + try { // Emit structured start event - this.emitCIEvent('test_run_started', { + this.emitCIEvent("test_run_started", { testsDir: this.testsDir, options: ciOptions, - timestamp: new Date().toISOString() + timestamp: new Date().toISOString(), }); - + // Execute tests using parent class logic const results = await super.performExecute(ciOptions); - + // Calculate execution time const duration = Date.now() - startTime; - + // Generate CI-friendly summary const ciSummary = this.generateCISummary(results, duration); - + // Output summary for CI if (isCI) { // Always output summary to stdout for CI parsing @@ -58,50 +65,49 @@ class CIRunCommand extends RunCommand { // Human-readable summary for local development this.displayCISummary(ciSummary); } - + // Write additional CI artifacts await this.writeCIArtifacts(results, ciSummary, ciOptions); - + // Emit structured completion event - this.emitCIEvent('test_run_completed', { + this.emitCIEvent("test_run_completed", { success: results.failed === 0, duration, - summary: ciSummary.summary + summary: ciSummary.summary, }); - + // Set proper exit code based on test results const exitCode = this.getExitCode(results); process.exitCode = exitCode; - + return ciSummary; - } catch (error) { const duration = Date.now() - startTime; - + // Structured error output const errorReport = { - status: 'error', + status: "error", error: { message: error.message, - type: error.constructor.name + type: error.constructor.name, }, duration, - timestamp: new Date().toISOString() + timestamp: new Date().toISOString(), }; - + if (isCI) { console.error(JSON.stringify(errorReport, null, 2)); } else { console.error(`TEST_RUN_ERROR: ${error.message}`); } - - this.emitCIEvent('test_run_failed', { error: error.message, duration }); - + + this.emitCIEvent("test_run_failed", { error: error.message, duration }); + process.exitCode = 1; throw error; } } - + /** * Generate CI-friendly test summary * @param {Object} results - Test results from parent class @@ -110,67 +116,67 @@ class CIRunCommand extends RunCommand { */ generateCISummary(results, duration) { const { total, passed, failed, skipped, testFunctions } = results; - + return { - status: failed > 0 ? 'failed' : 'passed', + status: failed > 0 ? "failed" : "passed", summary: { total, passed, failed, skipped, success: failed === 0, - functionCount: testFunctions.length + functionCount: testFunctions.length, }, - testFunctions: testFunctions.map(func => ({ + testFunctions: testFunctions.map((func) => ({ name: func.name, - status: func.success ? 'passed' : 'failed', + status: func.success ? "passed" : "failed", total: func.total, passed: func.passed, failed: func.failed, - skipped: func.skipped + skipped: func.skipped, })), failedTests: results.tests - .filter(test => test.status === 'fail') - .map(test => ({ + .filter((test) => test.status === "fail") + .map((test) => ({ description: test.description, function: test.function, - message: test.message || null + message: test.message || null, })), execution: { duration, timestamp: new Date().toISOString(), - testsDirectory: this.testsDir - } + testsDirectory: this.testsDir, + }, }; } - + /** * Display CI summary in human-readable format (for local development) * @param {Object} summary - CI summary */ displayCISummary(summary) { const { status, summary: stats, failedTests, execution } = summary; - + console.log(`\nTEST_RUN_STATUS: ${status.toUpperCase()}`); console.log(`TOTAL_TESTS: ${stats.total}`); console.log(`PASSED: ${stats.passed}`); console.log(`FAILED: ${stats.failed}`); console.log(`SKIPPED: ${stats.skipped}`); console.log(`SUCCESS: ${stats.success}`); - + if (failedTests.length > 0) { - console.log('\nFAILED_TESTS:'); - failedTests.forEach(test => { + console.log("\nFAILED_TESTS:"); + failedTests.forEach((test) => { console.log(` ${test.function}: ${test.description}`); if (test.message) { console.log(` ${test.message}`); } }); } - + console.log(`\nEXECUTION_TIME: ${execution.duration}ms`); } - + /** * Write CI artifacts (JUnit XML, JSON reports, etc.) * @param {Object} results - Full test results @@ -181,23 +187,22 @@ class CIRunCommand extends RunCommand { try { // Always write JSON summary for CI consumption if (this.outputDir) { - await this.writeJSONArtifact(summary, 'test-summary.json'); - + await this.writeJSONArtifact(summary, "test-summary.json"); + // Write detailed results if requested if (options.detailed !== false) { - await this.writeJSONArtifact(results, 'test-results.json'); + await this.writeJSONArtifact(results, "test-results.json"); } } - + // JUnit XML is handled by parent class via format option // JSON format is handled by parent class via format option - } catch (error) { // Don't fail tests if we can't write artifacts console.error(`Warning: Could not write CI artifacts: ${error.message}`); } } - + /** * Write JSON artifact to output directory * @param {Object} data - Data to write @@ -206,95 +211,101 @@ class CIRunCommand extends RunCommand { async writeJSONArtifact(data, filename) { try { const filePath = await this.getOutputFile(filename); - const fs = require('fs').promises; - await fs.writeFile(filePath, JSON.stringify(data, null, 2), 'utf8'); + const fs = require("fs").promises; + await fs.writeFile(filePath, JSON.stringify(data, null, 2), "utf8"); } catch (error) { throw new Error(`Failed to write ${filename}: ${error.message}`); } } - + /** * Emit structured CI events * @param {string} eventType - Type of event * @param {Object} data - Event data */ emitCIEvent(eventType, data) { - this.emit('ci:event', { + this.emit("ci:event", { type: eventType, - ...data + ...data, }); } - + /** * Override _displayResults to suppress console output in CI mode */ _displayResults(results) { // Only display results if explicitly not in CI mode - if (process.env.CI === 'false') { + if (process.env.CI === "false") { super._displayResults(results); } // In CI mode, output is handled by generateCISummary } - + /** * Override progress method to suppress output in CI mode */ progress(message) { // Only show progress if explicitly not in CI mode - if (process.env.CI === 'false') { + if (process.env.CI === "false") { super.progress(message); } } - + /** * Override warn method for structured CI output */ warn(message) { - if (process.env.CI !== 'false') { + if (process.env.CI !== "false") { // Structured warning for CI - console.error(JSON.stringify({ - level: 'warning', - message, - timestamp: new Date().toISOString() - })); + console.error( + JSON.stringify({ + level: "warning", + message, + timestamp: new Date().toISOString(), + }), + ); } else { super.warn(message); } } - + /** * Override error method for structured CI output */ error(message, error = null) { - if (process.env.CI !== 'false') { + if (process.env.CI !== "false") { // Structured error for CI - console.error(JSON.stringify({ - level: 'error', - message, - error: error ? error.message : null, - timestamp: new Date().toISOString() - })); + console.error( + JSON.stringify({ + level: "error", + message, + error: error ? error.message : null, + timestamp: new Date().toISOString(), + }), + ); } else { super.error(message, error); } } - + /** * Override success method for structured CI output */ success(message) { - if (process.env.CI !== 'false') { + if (process.env.CI !== "false") { // Structured success for CI - console.log(JSON.stringify({ - level: 'info', - message, - timestamp: new Date().toISOString() - })); + console.log( + JSON.stringify({ + level: "info", + message, + timestamp: new Date().toISOString(), + }), + ); } else { super.success(message); } } - + /** * Get detailed test metrics for CI reporting * @param {Object} results - Test results @@ -304,23 +315,23 @@ class CIRunCommand extends RunCommand { const metrics = { totalExecutionTime: 0, averageTestTime: 0, - testFunctionMetrics: [] + testFunctionMetrics: [], }; - + // Calculate per-function metrics if available if (results.testFunctions) { - results.testFunctions.forEach(func => { + results.testFunctions.forEach((func) => { metrics.testFunctionMetrics.push({ name: func.name, testCount: func.total, passRate: func.total > 0 ? (func.passed / func.total) * 100 : 0, - success: func.success + success: func.success, }); }); } - + return metrics; } } -module.exports = CIRunCommand; \ No newline at end of file +module.exports = CIRunCommand; diff --git a/src/commands/test/ci/CIValidateCommand.js b/src/commands/test/ci/CIValidateCommand.js index 87b0b2c..2fcc68d 100644 --- a/src/commands/test/ci/CIValidateCommand.js +++ b/src/commands/test/ci/CIValidateCommand.js @@ -1,11 +1,11 @@ /** * CI Validate Command - CI-optimized test validation - * + * * Wraps ValidateCommand with machine-friendly output and proper exit codes * for CI/CD environments. */ -const ValidateCommand = require('../ValidateCommand'); +const ValidateCommand = require("../ValidateCommand"); /** * CI-friendly test validation with structured output @@ -18,10 +18,18 @@ class CIValidateCommand extends ValidateCommand { outputDir = null, logger = null, isProd = false, - pathResolver = null + pathResolver = null, ) { - super(databaseUrl, serviceRoleKey, testsDir, outputDir, logger, isProd, pathResolver); - + super( + databaseUrl, + serviceRoleKey, + testsDir, + outputDir, + logger, + isProd, + pathResolver, + ); + // Force CI mode behavior this.ciMode = true; this.suppressProgress = true; @@ -32,26 +40,26 @@ class CIValidateCommand extends ValidateCommand { */ async performExecute(options = {}) { const startTime = Date.now(); - + // Force silent mode unless explicitly disabled - const isCI = process.env.CI !== 'false'; - + const isCI = process.env.CI !== "false"; + try { // Emit structured start event - this.emitCIEvent('validation_started', { + this.emitCIEvent("validation_started", { testsDir: this.testsDir, - timestamp: new Date().toISOString() + timestamp: new Date().toISOString(), }); - + // Execute validation (parent class handles the logic) const results = await super.performExecute(options); - + // Calculate execution time const duration = Date.now() - startTime; - + // Generate CI-friendly report const ciReport = this.generateCIReport(results, duration); - + // Output report (structured for CI consumption) if (isCI) { // Machine-readable JSON output for CI @@ -60,51 +68,50 @@ class CIValidateCommand extends ValidateCommand { // Human-readable for local development this.displayCIReport(ciReport); } - + // Write results to file if outputDir provided if (this.outputDir) { - await this.writeCIResults(ciReport, 'validation-results.json'); + await this.writeCIResults(ciReport, "validation-results.json"); } - + // Emit structured completion event - this.emitCIEvent('validation_completed', { + this.emitCIEvent("validation_completed", { success: !results.hasErrors, duration, - summary: ciReport.summary + summary: ciReport.summary, }); - + // Set proper exit code process.exitCode = results.hasErrors ? 1 : 0; - + return ciReport; - } catch (error) { const duration = Date.now() - startTime; - + // Structured error output const errorReport = { - status: 'error', + status: "error", error: { message: error.message, - type: error.constructor.name + type: error.constructor.name, }, duration, - timestamp: new Date().toISOString() + timestamp: new Date().toISOString(), }; - + if (isCI) { console.error(JSON.stringify(errorReport, null, 2)); } else { console.error(`VALIDATION_ERROR: ${error.message}`); } - - this.emitCIEvent('validation_failed', { error: error.message, duration }); - + + this.emitCIEvent("validation_failed", { error: error.message, duration }); + process.exitCode = 1; throw error; } } - + /** * Generate CI-friendly report * @param {Object} results - Validation results from parent class @@ -112,85 +119,92 @@ class CIValidateCommand extends ValidateCommand { * @returns {Object} Structured CI report */ generateCIReport(results, duration) { - const { filesProcessed, syntaxErrors, pgTapIssues, structureWarnings, hasErrors } = results; - + const { + filesProcessed, + syntaxErrors, + pgTapIssues, + structureWarnings, + hasErrors, + } = results; + return { - status: hasErrors ? 'failed' : 'passed', + status: hasErrors ? "failed" : "passed", summary: { filesProcessed, - totalIssues: syntaxErrors.length + pgTapIssues.length + structureWarnings.length, + totalIssues: + syntaxErrors.length + pgTapIssues.length + structureWarnings.length, errors: syntaxErrors.length + pgTapIssues.length, warnings: structureWarnings.length, - hasErrors + hasErrors, }, details: { - syntaxErrors: syntaxErrors.map(error => ({ + syntaxErrors: syntaxErrors.map((error) => ({ file: error.fileName, line: error.lineNum, - severity: 'error', + severity: "error", message: error.message, - category: 'syntax' + category: "syntax", })), - pgTapIssues: pgTapIssues.map(issue => ({ + pgTapIssues: pgTapIssues.map((issue) => ({ file: issue.fileName, line: issue.lineNum, - severity: 'error', + severity: "error", message: issue.message, - category: 'pgtap' + category: "pgtap", })), - structureWarnings: structureWarnings.map(warning => ({ + structureWarnings: structureWarnings.map((warning) => ({ file: warning.fileName, line: warning.lineNum, - severity: 'warning', + severity: "warning", message: warning.message, - category: 'structure' - })) + category: "structure", + })), }, execution: { duration, timestamp: new Date().toISOString(), - testsDirectory: this.testsDir - } + testsDirectory: this.testsDir, + }, }; } - + /** * Display CI report in human-readable format (for local development) * @param {Object} report - CI report */ displayCIReport(report) { const { status, summary, details } = report; - + console.log(`\nVALIDATION_STATUS: ${status.toUpperCase()}`); console.log(`FILES_PROCESSED: ${summary.filesProcessed}`); console.log(`TOTAL_ISSUES: ${summary.totalIssues}`); console.log(`ERRORS: ${summary.errors}`); console.log(`WARNINGS: ${summary.warnings}`); - + if (details.syntaxErrors.length > 0) { - console.log('\nSYNTAX_ERRORS:'); - details.syntaxErrors.forEach(error => { + console.log("\nSYNTAX_ERRORS:"); + details.syntaxErrors.forEach((error) => { console.log(` ${error.file}:${error.line} - ${error.message}`); }); } - + if (details.pgTapIssues.length > 0) { - console.log('\nPGTAP_ISSUES:'); - details.pgTapIssues.forEach(issue => { + console.log("\nPGTAP_ISSUES:"); + details.pgTapIssues.forEach((issue) => { console.log(` ${issue.file}:${issue.line} - ${issue.message}`); }); } - + if (details.structureWarnings.length > 0) { - console.log('\nSTRUCTURE_WARNINGS:'); - details.structureWarnings.forEach(warning => { + console.log("\nSTRUCTURE_WARNINGS:"); + details.structureWarnings.forEach((warning) => { console.log(` ${warning.file}:${warning.line} - ${warning.message}`); }); } - + console.log(`\nEXECUTION_TIME: ${report.execution.duration}ms`); } - + /** * Write CI results to file * @param {Object} report - CI report @@ -199,84 +213,92 @@ class CIValidateCommand extends ValidateCommand { async writeCIResults(report, filename) { try { const filePath = await this.getOutputFile(filename); - const fs = require('fs').promises; - await fs.writeFile(filePath, JSON.stringify(report, null, 2), 'utf8'); + const fs = require("fs").promises; + await fs.writeFile(filePath, JSON.stringify(report, null, 2), "utf8"); } catch (error) { // Don't fail the entire validation if we can't write results - console.error(`Warning: Could not write validation results to file: ${error.message}`); + console.error( + `Warning: Could not write validation results to file: ${error.message}`, + ); } } - + /** * Emit structured CI events * @param {string} eventType - Type of event * @param {Object} data - Event data */ emitCIEvent(eventType, data) { - this.emit('ci:event', { + this.emit("ci:event", { type: eventType, - ...data + ...data, }); } - + /** * Override progress method to suppress output in CI mode */ progress(message) { // Only show progress if explicitly not in CI mode - if (process.env.CI === 'false') { + if (process.env.CI === "false") { super.progress(message); } } - + /** * Override warn method for structured CI output */ warn(message) { - if (process.env.CI !== 'false') { + if (process.env.CI !== "false") { // Structured warning for CI - console.error(JSON.stringify({ - level: 'warning', - message, - timestamp: new Date().toISOString() - })); + console.error( + JSON.stringify({ + level: "warning", + message, + timestamp: new Date().toISOString(), + }), + ); } else { super.warn(message); } } - + /** * Override error method for structured CI output */ error(message, error = null) { - if (process.env.CI !== 'false') { + if (process.env.CI !== "false") { // Structured error for CI - console.error(JSON.stringify({ - level: 'error', - message, - error: error ? error.message : null, - timestamp: new Date().toISOString() - })); + console.error( + JSON.stringify({ + level: "error", + message, + error: error ? error.message : null, + timestamp: new Date().toISOString(), + }), + ); } else { super.error(message, error); } } - + /** * Override success method for structured CI output */ success(message) { - if (process.env.CI !== 'false') { + if (process.env.CI !== "false") { // Structured success for CI - console.log(JSON.stringify({ - level: 'info', - message, - timestamp: new Date().toISOString() - })); + console.log( + JSON.stringify({ + level: "info", + message, + timestamp: new Date().toISOString(), + }), + ); } else { super.success(message); } } } -module.exports = CIValidateCommand; \ No newline at end of file +module.exports = CIValidateCommand; diff --git a/src/commands/test/index.js b/src/commands/test/index.js index a8f499b..b45d63d 100644 --- a/src/commands/test/index.js +++ b/src/commands/test/index.js @@ -2,20 +2,20 @@ * Test Commands for data CLI */ -const CompileCommand = require('./CompileCommand'); -const RunCommand = require('./RunCommand'); -const DevCycleCommand = require('./DevCycleCommand'); -const CoverageCommand = require('./CoverageCommand'); -const WatchCommand = require('./WatchCommand'); -const ValidateCommand = require('./ValidateCommand'); -const GenerateCommand = require('./GenerateCommand'); -const GenerateTemplateCommand = require('./GenerateTemplateCommand'); -const CacheCommand = require('./CacheCommand'); +const CompileCommand = require("./CompileCommand"); +const RunCommand = require("./RunCommand"); +const DevCycleCommand = require("./DevCycleCommand"); +const CoverageCommand = require("./CoverageCommand"); +const WatchCommand = require("./WatchCommand"); +const ValidateCommand = require("./ValidateCommand"); +const GenerateCommand = require("./GenerateCommand"); +const GenerateTemplateCommand = require("./GenerateTemplateCommand"); +const CacheCommand = require("./CacheCommand"); // CI Commands for automated testing -const CIValidateCommand = require('./ci/CIValidateCommand'); -const CIRunCommand = require('./ci/CIRunCommand'); -const CICoverageCommand = require('./ci/CICoverageCommand'); +const CIValidateCommand = require("./ci/CIValidateCommand"); +const CIRunCommand = require("./ci/CIRunCommand"); +const CICoverageCommand = require("./ci/CICoverageCommand"); module.exports = { CompileCommand, @@ -30,5 +30,5 @@ module.exports = { // CI Commands CIValidateCommand, CIRunCommand, - CICoverageCommand -}; \ No newline at end of file + CICoverageCommand, +}; diff --git a/src/index.js b/src/index.js index b8fed1b..d204494 100644 --- a/src/index.js +++ b/src/index.js @@ -2,9 +2,9 @@ * data CLI Main Entry Point */ -const { Command } = require('commander'); -const { displayLogo } = require('./ui/logo'); -const { version } = require('../package.json'); +const { Command } = require("commander"); +const { displayLogo } = require("./ui/logo"); +const { version } = require("../package.json"); // Note: Commands are loaded dynamically in their respective action handlers @@ -13,47 +13,51 @@ const { version } = require('../package.json'); */ async function cli(argv) { // Check if this is a help request or no arguments (which shows help) - const isHelpRequest = argv.includes('--help') || argv.includes('-h') || argv.length <= 2; - + const isHelpRequest = + argv.includes("--help") || argv.includes("-h") || argv.length <= 2; + // Display logo for interactive sessions and help requests if ((process.stdout.isTTY && !process.env.CI) || isHelpRequest) { await displayLogo(); } - + // Configuration now handled via CLI args and env vars - + // Create main command const program = new Command(); - + // Initialize paths and database credentials in preAction hook let paths = null; let databaseUrl = null; let serviceRoleKey = null; let anonKey = null; let outputConfig = null; - - program.hook('preAction', (thisCommand) => { + + program.hook("preAction", (thisCommand) => { const opts = thisCommand.opts(); - + // Collect path options paths = { // Input paths - sqlDir: opts.sqlDir || process.env.data_SQL_DIR || './sql', - testsDir: opts.testsDir || process.env.data_TESTS_DIR || './tests', - functionsDir: opts.functionsDir || process.env.data_FUNCTIONS_DIR || './functions', + sqlDir: opts.sqlDir || process.env.data_SQL_DIR || "./sql", + testsDir: opts.testsDir || process.env.data_TESTS_DIR || "./tests", + functionsDir: + opts.functionsDir || process.env.data_FUNCTIONS_DIR || "./functions", // Output paths - migrationsDir: opts.migrationsDir || process.env.data_MIGRATIONS_DIR || './migrations', - buildDir: opts.buildDir || process.env.data_BUILD_DIR || './build', - reportsDir: opts.reportsDir || process.env.data_REPORTS_DIR || './reports' + migrationsDir: + opts.migrationsDir || process.env.data_MIGRATIONS_DIR || "./migrations", + buildDir: opts.buildDir || process.env.data_BUILD_DIR || "./build", + reportsDir: + opts.reportsDir || process.env.data_REPORTS_DIR || "./reports", }; - + // Get database credentials from environment databaseUrl = process.env.DATABASE_URL || process.env.data_DATABASE_URL; serviceRoleKey = process.env.data_SERVICE_ROLE_KEY; anonKey = process.env.data_ANON_KEY; - + // Initialize OutputConfig - const OutputConfig = require('./lib/OutputConfig'); + const OutputConfig = require("./lib/OutputConfig"); outputConfig = new OutputConfig( opts.config, null, // cliSupabaseDir @@ -62,57 +66,69 @@ async function cli(argv) { paths.sqlDir, paths.functionsDir, paths.buildDir, - null // cliProjectRoot + null, // cliProjectRoot ); - + // Debug output if requested if (process.env.data_DEBUG_PATHS) { - console.log('data Path Configuration:'); - console.log('Input Paths:', { + console.log("data Path Configuration:"); + console.log("Input Paths:", { sqlDir: paths.sqlDir, testsDir: paths.testsDir, - functionsDir: paths.functionsDir + functionsDir: paths.functionsDir, }); - console.log('Output Paths:', { + console.log("Output Paths:", { migrationsDir: paths.migrationsDir, buildDir: paths.buildDir, - reportsDir: paths.reportsDir + reportsDir: paths.reportsDir, }); } }); - + program - .name('data') - .description('⛰️ Advanced Resource Command Hub for PostgreSQL') + .name("data") + .description("⛰️ Advanced Resource Command Hub for PostgreSQL") .version(version) - .option('--prod', 'Target production environment (requires confirmation for write operations)') - .option('--json', 'Output results as JSON') - .option('--no-color', 'Disable colored output') - .option('--config ', 'Path to configuration file (default: .datarc.json)') + .option( + "--prod", + "Target production environment (requires confirmation for write operations)", + ) + .option("--json", "Output results as JSON") + .option("--no-color", "Disable colored output") + .option( + "--config ", + "Path to configuration file (default: .datarc.json)", + ) // Input directories (for reading) - .option('--sql-dir ', 'Directory containing SQL source files') - .option('--tests-dir ', 'Directory containing test files') - .option('--functions-dir ', 'Directory containing function definitions') + .option("--sql-dir ", "Directory containing SQL source files") + .option("--tests-dir ", "Directory containing test files") + .option( + "--functions-dir ", + "Directory containing function definitions", + ) // Output directories (for writing) - .option('--migrations-dir ', 'Directory for migration output') - .option('--build-dir ', 'Directory for build artifacts') - .option('--reports-dir ', 'Directory for test reports and coverage'); - + .option("--migrations-dir ", "Directory for migration output") + .option("--build-dir ", "Directory for build artifacts") + .option("--reports-dir ", "Directory for test reports and coverage"); + // Add init command program - .command('init') - .description('Initialize a new D.A.T.A. project structure') - .option('--path ', 'Path to initialize project (default: current directory)') + .command("init") + .description("Initialize a new D.A.T.A. project structure") + .option( + "--path ", + "Path to initialize project (default: current directory)", + ) .action(async (options) => { - const InitCommand = require('./commands/InitCommand'); - const CliReporter = require('./reporters/CliReporter'); - + const InitCommand = require("./commands/InitCommand"); + const CliReporter = require("./reporters/CliReporter"); + const command = new InitCommand({ - path: options.path || process.cwd() + path: options.path || process.cwd(), }); const reporter = new CliReporter(program.opts().json); reporter.attach(command); - + try { await command.execute(); } catch (error) { @@ -123,29 +139,27 @@ async function cli(argv) { }); // Add database commands - const db = program - .command('db') - .description('Database operations'); - - db.command('reset') - .description('Reset the local database') + const db = program.command("db").description("Database operations"); + + db.command("reset") + .description("Reset the local database") .action(async (options) => { const parentOpts = program.opts(); - const ResetCommand = require('./commands/db/ResetCommand'); - const CliReporter = require('./reporters/CliReporter'); - + const ResetCommand = require("./commands/db/ResetCommand"); + const CliReporter = require("./reporters/CliReporter"); + const command = new ResetCommand( databaseUrl, serviceRoleKey, anonKey, null, // logger will be added by CliReporter - parentOpts.prod + parentOpts.prod, ); // ResetCommand needs access to outputConfig for supabase directory command.outputConfig = outputConfig; const reporter = new CliReporter(parentOpts.json); reporter.attach(command); - + try { await command.execute(); } catch (error) { @@ -154,25 +168,25 @@ async function cli(argv) { } } }); - - db.command('query ') - .description('Run an SQL query') - .option('-f, --file', 'Treat input as file path instead of SQL') + + db.command("query ") + .description("Run an SQL query") + .option("-f, --file", "Treat input as file path instead of SQL") .action(async (sql, options) => { const parentOpts = program.opts(); - const { QueryCommand } = require('./commands/db'); - const CliReporter = require('./reporters/CliReporter'); - + const { QueryCommand } = require("./commands/db"); + const CliReporter = require("./reporters/CliReporter"); + const command = new QueryCommand( databaseUrl, serviceRoleKey, anonKey, null, // logger will be added by CliReporter - parentOpts.prod + parentOpts.prod, ); const reporter = new CliReporter(parentOpts.json); reporter.attach(command); - + try { await command.execute(sql, options.file); } catch (error) { @@ -181,35 +195,46 @@ async function cli(argv) { } } }); - - db.command('compile') - .description('Compile SQL sources into migration with optional functions deployment') - .option('--deploy-functions', 'Deploy Edge Functions after successful compilation') - .option('--functions [names...]', 'Specific functions to deploy (comma-separated)') - .option('--skip-import-map', 'Skip using import map in production function deployment') - .option('--debug-functions', 'Enable debug output for function deployment') + + db.command("compile") + .description( + "Compile SQL sources into migration with optional functions deployment", + ) + .option( + "--deploy-functions", + "Deploy Edge Functions after successful compilation", + ) + .option( + "--functions [names...]", + "Specific functions to deploy (comma-separated)", + ) + .option( + "--skip-import-map", + "Skip using import map in production function deployment", + ) + .option("--debug-functions", "Enable debug output for function deployment") .action(async (options) => { const parentOpts = program.opts(); - const { CompileCommand } = require('./commands/db'); - const CliReporter = require('./reporters/CliReporter'); - + const { CompileCommand } = require("./commands/db"); + const CliReporter = require("./reporters/CliReporter"); + const command = new CompileCommand( paths.sqlDir, paths.migrationsDir, null, - parentOpts.prod + parentOpts.prod, ); const reporter = new CliReporter(parentOpts.json); reporter.attach(command); - + // Prepare compile options with functions deployment const compileOptions = { deployFunctions: options.deployFunctions, functionsToDeploy: options.functions || null, skipImportMap: options.skipImportMap, - debug: options.debugFunctions + debug: options.debugFunctions, }; - + try { await command.execute(compileOptions); } catch (error) { @@ -218,51 +243,55 @@ async function cli(argv) { } } }); - + // Add migrate subcommands const migrate = db - .command('migrate') - .description('Database migration management'); - - migrate.command('generate') - .description('Generate migration from schema diff') - .option('--name ', 'Migration name (required)') - .option('--skip-compile', 'Skip source compilation step') - .option('--dry-run', 'Show diff without saving migration') - .option('--current-db ', 'Current database URL (defaults to local)') - .option('--desired-db ', 'Desired database URL (defaults to compiled SQL)') + .command("migrate") + .description("Database migration management"); + + migrate + .command("generate") + .description("Generate migration from schema diff") + .option("--name ", "Migration name (required)") + .option("--skip-compile", "Skip source compilation step") + .option("--dry-run", "Show diff without saving migration") + .option("--current-db ", "Current database URL (defaults to local)") + .option( + "--desired-db ", + "Desired database URL (defaults to compiled SQL)", + ) .action(async (options) => { const parentOpts = program.opts(); - const MigrateGenerateCommand = require('./commands/db/migrate/generate'); - const CliReporter = require('./reporters/CliReporter'); - + const MigrateGenerateCommand = require("./commands/db/migrate/generate"); + const CliReporter = require("./reporters/CliReporter"); + const command = new MigrateGenerateCommand( null, // config will use default - null, // logger will be added by CliReporter - parentOpts.prod + null, // logger will be added by CliReporter + parentOpts.prod, ); const reporter = new CliReporter(parentOpts.json); reporter.attach(command); - + try { // Convert commander options to args array for our command const args = []; if (options.name) { - args.push('--name', options.name); + args.push("--name", options.name); } if (options.skipCompile) { - args.push('--skip-compile'); + args.push("--skip-compile"); } if (options.dryRun) { - args.push('--dry-run'); + args.push("--dry-run"); } if (options.currentDb) { - args.push('--current-db', options.currentDb); + args.push("--current-db", options.currentDb); } if (options.desiredDb) { - args.push('--desired-db', options.desiredDb); + args.push("--desired-db", options.desiredDb); } - + await command.execute(args); } catch (error) { if (!parentOpts.json) { @@ -271,23 +300,24 @@ async function cli(argv) { } }); - migrate.command('promote') - .description('Promote tested migration to production') - .option('-m, --migration ', 'Migration to promote', 'current') - .option('--no-git', 'Skip Git staging') + migrate + .command("promote") + .description("Promote tested migration to production") + .option("-m, --migration ", "Migration to promote", "current") + .option("--no-git", "Skip Git staging") .action(async (options) => { const parentOpts = program.opts(); - const MigratePromoteCommand = require('./commands/db/migrate/promote'); - const CliReporter = require('./reporters/CliReporter'); - + const MigratePromoteCommand = require("./commands/db/migrate/promote"); + const CliReporter = require("./reporters/CliReporter"); + const command = new MigratePromoteCommand( null, // config will use default null, // logger will be added by CliReporter - parentOpts.prod + parentOpts.prod, ); const reporter = new CliReporter(parentOpts.json); reporter.attach(command); - + try { await command.execute(options); } catch (error) { @@ -298,24 +328,29 @@ async function cli(argv) { }); // Add functions commands const functions = program - .command('functions') - .alias('fn') - .description('Edge Functions deployment and management'); - - functions.command('deploy [functions...]') - .description('Deploy Edge Functions to Supabase') - .option('--no-verify-jwt', 'Skip JWT verification during deployment') - .option('--debug', 'Enable debug output') - .option('--skip-import-map', 'Skip using import map in production') + .command("functions") + .alias("fn") + .description("Edge Functions deployment and management"); + + functions + .command("deploy [functions...]") + .description("Deploy Edge Functions to Supabase") + .option("--no-verify-jwt", "Skip JWT verification during deployment") + .option("--debug", "Enable debug output") + .option("--skip-import-map", "Skip using import map in production") .action(async (functionNames, options) => { const parentOpts = program.opts(); - const { DeployCommand } = require('./commands/functions'); - const CliReporter = require('./reporters/CliReporter'); - - const command = new DeployCommand(paths.functionsDir, null, parentOpts.prod); + const { DeployCommand } = require("./commands/functions"); + const CliReporter = require("./reporters/CliReporter"); + + const command = new DeployCommand( + paths.functionsDir, + null, + parentOpts.prod, + ); const reporter = new CliReporter(parentOpts.json); reporter.attach(command); - + try { await command.execute(functionNames, options); } catch (error) { @@ -325,22 +360,23 @@ async function cli(argv) { } }); - functions.command('validate [functions...]') - .description('Validate Edge Functions without deploying') + functions + .command("validate [functions...]") + .description("Validate Edge Functions without deploying") .action(async (functionNames, options) => { const parentOpts = program.opts(); - const { ValidateCommand } = require('./commands/functions'); - const CliReporter = require('./reporters/CliReporter'); - + const { ValidateCommand } = require("./commands/functions"); + const CliReporter = require("./reporters/CliReporter"); + const command = new ValidateCommand( paths.testsDir, paths.reportsDir, null, - parentOpts.prod + parentOpts.prod, ); const reporter = new CliReporter(parentOpts.json); reporter.attach(command); - + try { await command.execute(functionNames); } catch (error) { @@ -350,17 +386,22 @@ async function cli(argv) { } }); - functions.command('status [functions...]') - .description('Show Edge Functions deployment status') + functions + .command("status [functions...]") + .description("Show Edge Functions deployment status") .action(async (functionNames, options) => { const parentOpts = program.opts(); - const { StatusCommand } = require('./commands/functions'); - const CliReporter = require('./reporters/CliReporter'); - - const command = new StatusCommand(paths.functionsDir, null, parentOpts.prod); + const { StatusCommand } = require("./commands/functions"); + const CliReporter = require("./reporters/CliReporter"); + + const command = new StatusCommand( + paths.functionsDir, + null, + parentOpts.prod, + ); const reporter = new CliReporter(parentOpts.json); reporter.attach(command); - + try { await command.execute(functionNames); } catch (error) { @@ -372,25 +413,26 @@ async function cli(argv) { // Add test commands const test = program - .command('test') - .description('Database and application testing'); + .command("test") + .description("Database and application testing"); - test.command('compile') - .description('Compile tests for execution') + test + .command("compile") + .description("Compile tests for execution") .action(async (options) => { const parentOpts = program.opts(); - const { CompileCommand } = require('./commands/test'); - const CliReporter = require('./reporters/CliReporter'); - + const { CompileCommand } = require("./commands/test"); + const CliReporter = require("./reporters/CliReporter"); + const command = new CompileCommand( paths.testsDir, paths.migrationsDir, null, - parentOpts.prod + parentOpts.prod, ); const reporter = new CliReporter(parentOpts.json); reporter.attach(command); - + try { await command.execute(); } catch (error) { @@ -400,34 +442,39 @@ async function cli(argv) { } }); - test.command('run') - .description('Run compiled tests') - .option('--pattern ', 'Pattern to match test function names') - .option('--suite ', 'Run only tests in this suite') - .option('--tag ', 'Run only tests with this tag') - .option('--timeout ', 'Test timeout in milliseconds', '30000') - .option('--verbose', 'Verbose output') - .option('--format ', 'Output format (console, junit, json)', 'console') - .option('--output ', 'Output file for junit/json formats') + test + .command("run") + .description("Run compiled tests") + .option("--pattern ", "Pattern to match test function names") + .option("--suite ", "Run only tests in this suite") + .option("--tag ", "Run only tests with this tag") + .option("--timeout ", "Test timeout in milliseconds", "30000") + .option("--verbose", "Verbose output") + .option( + "--format ", + "Output format (console, junit, json)", + "console", + ) + .option("--output ", "Output file for junit/json formats") .action(async (options) => { const parentOpts = program.opts(); - const { RunCommand } = require('./commands/test'); - const CliReporter = require('./reporters/CliReporter'); - + const { RunCommand } = require("./commands/test"); + const CliReporter = require("./reporters/CliReporter"); + const command = new RunCommand( databaseUrl, serviceRoleKey, paths.testsDir, paths.reportsDir, null, - parentOpts.prod + parentOpts.prod, ); const reporter = new CliReporter(parentOpts.json); reporter.attach(command); - + try { const results = await command.execute(options); - + // Set proper exit code based on test results if (results && command.getExitCode) { const exitCode = command.getExitCode(results); @@ -442,32 +489,37 @@ async function cli(argv) { } }); - test.command('dev-cycle') - .description('Run full development cycle: compile → reset → test') - .option('--pattern ', 'Pattern to match test function names') - .option('--suite ', 'Run only tests in this suite') - .option('--tag ', 'Run only tests with this tag') - .option('--format ', 'Output format (console, junit, json)', 'console') - .option('--output ', 'Output file for junit/json formats') + test + .command("dev-cycle") + .description("Run full development cycle: compile → reset → test") + .option("--pattern ", "Pattern to match test function names") + .option("--suite ", "Run only tests in this suite") + .option("--tag ", "Run only tests with this tag") + .option( + "--format ", + "Output format (console, junit, json)", + "console", + ) + .option("--output ", "Output file for junit/json formats") .action(async (options) => { const parentOpts = program.opts(); - const DevCycleCommand = require('./commands/test/DevCycleCommand'); - const CliReporter = require('./reporters/CliReporter'); - + const DevCycleCommand = require("./commands/test/DevCycleCommand"); + const CliReporter = require("./reporters/CliReporter"); + const command = new DevCycleCommand( databaseUrl, serviceRoleKey, paths.testsDir, paths.migrationsDir, null, - parentOpts.prod + parentOpts.prod, ); const reporter = new CliReporter(parentOpts.json); reporter.attach(command); - + try { const results = await command.execute(options); - + // Set proper exit code based on test results if (results && command.getExitCode) { const exitCode = command.getExitCode(results); @@ -482,30 +534,46 @@ async function cli(argv) { } }); - test.command('coverage') - .description('Generate test coverage reports') - .option('--format ', 'Output format (html, json, lcov)', 'html') - .option('--output ', 'Output directory', 'coverage') - .option('--enforce', 'Enforce coverage thresholds (exits with code 1 if below threshold)') - .option('--min-coverage ', 'Minimum overall coverage percentage', '80') - .option('--min-rpc-coverage ', 'Minimum RPC function coverage percentage', '75') - .option('--min-rls-coverage ', 'Minimum RLS policy coverage percentage', '70') + test + .command("coverage") + .description("Generate test coverage reports") + .option("--format ", "Output format (html, json, lcov)", "html") + .option("--output ", "Output directory", "coverage") + .option( + "--enforce", + "Enforce coverage thresholds (exits with code 1 if below threshold)", + ) + .option( + "--min-coverage ", + "Minimum overall coverage percentage", + "80", + ) + .option( + "--min-rpc-coverage ", + "Minimum RPC function coverage percentage", + "75", + ) + .option( + "--min-rls-coverage ", + "Minimum RLS policy coverage percentage", + "70", + ) .action(async (options) => { const parentOpts = program.opts(); - const { CoverageCommand } = require('./commands/test'); - const CliReporter = require('./reporters/CliReporter'); - + const { CoverageCommand } = require("./commands/test"); + const CliReporter = require("./reporters/CliReporter"); + const command = new CoverageCommand( databaseUrl, serviceRoleKey, paths.testsDir, paths.reportsDir, null, - parentOpts.prod + parentOpts.prod, ); const reporter = new CliReporter(parentOpts.json); reporter.attach(command); - + try { await command.execute(options); } catch (error) { @@ -515,26 +583,27 @@ async function cli(argv) { } }); - test.command('watch') - .description('Watch for changes and re-run tests') - .option('--pattern ', 'Pattern to match test files') - .option('--ignore ', 'Pattern to ignore files') + test + .command("watch") + .description("Watch for changes and re-run tests") + .option("--pattern ", "Pattern to match test files") + .option("--ignore ", "Pattern to ignore files") .action(async (options) => { const parentOpts = program.opts(); - const { WatchCommand } = require('./commands/test'); - const CliReporter = require('./reporters/CliReporter'); - + const { WatchCommand } = require("./commands/test"); + const CliReporter = require("./reporters/CliReporter"); + const command = new WatchCommand( databaseUrl, serviceRoleKey, paths.testsDir, paths.migrationsDir, null, - parentOpts.prod + parentOpts.prod, ); const reporter = new CliReporter(parentOpts.json); reporter.attach(command); - + try { await command.execute(options); } catch (error) { @@ -544,25 +613,26 @@ async function cli(argv) { } }); - test.command('validate') - .description('Validate test configuration and setup') - .option('--fix', 'Attempt to fix validation issues') + test + .command("validate") + .description("Validate test configuration and setup") + .option("--fix", "Attempt to fix validation issues") .action(async (options) => { const parentOpts = program.opts(); - const { ValidateCommand } = require('./commands/test'); - const CliReporter = require('./reporters/CliReporter'); - + const { ValidateCommand } = require("./commands/test"); + const CliReporter = require("./reporters/CliReporter"); + const command = new ValidateCommand( databaseUrl, serviceRoleKey, paths.testsDir, paths.reportsDir, null, - parentOpts.prod + parentOpts.prod, ); const reporter = new CliReporter(parentOpts.json); reporter.attach(command); - + try { await command.execute(options); } catch (error) { @@ -572,37 +642,42 @@ async function cli(argv) { } }); - test.command('generate') - .description('Generate pgTAP test templates for RPC functions and RLS policies') - .option('--rpc ', 'Generate RPC function test template') - .option('--rls ', 'Generate RLS policy test template') + test + .command("generate") + .description( + "Generate pgTAP test templates for RPC functions and RLS policies", + ) + .option("--rpc ", "Generate RPC function test template") + .option("--rls ", "Generate RLS policy test template") .action(async (options) => { const parentOpts = program.opts(); - const { GenerateCommand } = require('./commands/test'); - const CliReporter = require('./reporters/CliReporter'); - + const { GenerateCommand } = require("./commands/test"); + const CliReporter = require("./reporters/CliReporter"); + // Determine test type and name from options let testType, testName; if (options.rpc) { - testType = 'rpc'; + testType = "rpc"; testName = options.rpc; } else if (options.rls) { - testType = 'rls'; + testType = "rls"; testName = options.rls; } else { - console.error('Error: Must specify either --rpc or --rls '); + console.error( + "Error: Must specify either --rpc or --rls ", + ); process.exit(1); } - + const command = new GenerateCommand( paths.testsDir, paths.reportsDir, null, - parentOpts.prod + parentOpts.prod, ); const reporter = new CliReporter(parentOpts.json); reporter.attach(command); - + try { await command.execute({ type: testType, name: testName }); } catch (error) { @@ -611,31 +686,46 @@ async function cli(argv) { } } }); - - test.command('generate-template') - .description('Generate pgTAP test templates using TestTemplateGenerator and TestRequirementAnalyzer') - .option('--migration ', 'Migration file to analyze for test requirements') - .option('--type ', 'Test type (rpc, rls, trigger, constraint, function)') - .option('--name ', 'Name of entity to generate tests for (required if not using --migration)') - .option('--output ', 'Output file path (default: stdout)') - .option('--schema ', 'Schema name (default: public)') - .option('--parameters ', 'Comma-separated function parameters for RPC tests') - .option('--return-type ', 'Expected return type for functions') - .option('--description ', 'Description for the test') + + test + .command("generate-template") + .description( + "Generate pgTAP test templates using TestTemplateGenerator and TestRequirementAnalyzer", + ) + .option( + "--migration ", + "Migration file to analyze for test requirements", + ) + .option( + "--type ", + "Test type (rpc, rls, trigger, constraint, function)", + ) + .option( + "--name ", + "Name of entity to generate tests for (required if not using --migration)", + ) + .option("--output ", "Output file path (default: stdout)") + .option("--schema ", "Schema name (default: public)") + .option( + "--parameters ", + "Comma-separated function parameters for RPC tests", + ) + .option("--return-type ", "Expected return type for functions") + .option("--description ", "Description for the test") .action(async (options) => { const parentOpts = program.opts(); - const { GenerateTemplateCommand } = require('./commands/test'); - const CliReporter = require('./reporters/CliReporter'); - + const { GenerateTemplateCommand } = require("./commands/test"); + const CliReporter = require("./reporters/CliReporter"); + const command = new GenerateTemplateCommand( paths.testsDir, paths.reportsDir, null, - parentOpts.prod + parentOpts.prod, ); const reporter = new CliReporter(parentOpts.json); reporter.attach(command); - + try { await command.execute(options); } catch (error) { @@ -646,25 +736,29 @@ async function cli(argv) { }); // CI Commands - Optimized for continuous integration - test.command('ci-validate') - .description('CI-optimized test validation with machine-readable output') - .option('--output ', 'Output file for validation results (JSON format)') + test + .command("ci-validate") + .description("CI-optimized test validation with machine-readable output") + .option( + "--output ", + "Output file for validation results (JSON format)", + ) .action(async (options) => { const parentOpts = program.opts(); - const { CIValidateCommand } = require('./commands/test'); - const CliReporter = require('./reporters/CliReporter'); - + const { CIValidateCommand } = require("./commands/test"); + const CliReporter = require("./reporters/CliReporter"); + const command = new CIValidateCommand( databaseUrl, serviceRoleKey, paths.testsDir, paths.reportsDir, null, - parentOpts.prod + parentOpts.prod, ); const reporter = new CliReporter(parentOpts.json); reporter.attach(command); - + try { await command.execute(options); } catch (error) { @@ -673,32 +767,33 @@ async function cli(argv) { } }); - test.command('ci-run') - .description('CI-optimized test execution with JUnit/JSON output') - .option('--pattern ', 'Pattern to match test function names') - .option('--suite ', 'Run only tests in this suite') - .option('--format ', 'Output format (junit, json)', 'junit') - .option('--output ', 'Output file for test results') - .option('--detailed', 'Include detailed results in JSON output', true) + test + .command("ci-run") + .description("CI-optimized test execution with JUnit/JSON output") + .option("--pattern ", "Pattern to match test function names") + .option("--suite ", "Run only tests in this suite") + .option("--format ", "Output format (junit, json)", "junit") + .option("--output ", "Output file for test results") + .option("--detailed", "Include detailed results in JSON output", true) .action(async (options) => { const parentOpts = program.opts(); - const { CIRunCommand } = require('./commands/test'); - const CliReporter = require('./reporters/CliReporter'); - + const { CIRunCommand } = require("./commands/test"); + const CliReporter = require("./reporters/CliReporter"); + const command = new CIRunCommand( databaseUrl, serviceRoleKey, paths.testsDir, paths.reportsDir, null, - parentOpts.prod + parentOpts.prod, ); const reporter = new CliReporter(parentOpts.json); reporter.attach(command); - + try { const results = await command.execute(options); - + // CI commands handle their own exit codes const exitCode = command.getExitCode(results); process.exit(exitCode); @@ -707,27 +802,40 @@ async function cli(argv) { } }); - test.command('ci-coverage') - .description('CI-optimized coverage analysis with enforcement') - .option('--enforce', 'Enforce coverage thresholds (default: false)', false) - .option('--min-coverage ', 'Minimum overall coverage percentage', '80') - .option('--min-rpc-coverage ', 'Minimum RPC function coverage percentage', '75') - .option('--min-rls-coverage ', 'Minimum RLS policy coverage percentage', '70') - .option('--format ', 'Output format (json)', 'json') - .option('--output ', 'Output file prefix for coverage reports') + test + .command("ci-coverage") + .description("CI-optimized coverage analysis with enforcement") + .option("--enforce", "Enforce coverage thresholds (default: false)", false) + .option( + "--min-coverage ", + "Minimum overall coverage percentage", + "80", + ) + .option( + "--min-rpc-coverage ", + "Minimum RPC function coverage percentage", + "75", + ) + .option( + "--min-rls-coverage ", + "Minimum RLS policy coverage percentage", + "70", + ) + .option("--format ", "Output format (json)", "json") + .option("--output ", "Output file prefix for coverage reports") .action(async (options) => { const parentOpts = program.opts(); - const { CICoverageCommand } = require('./commands/test'); - const CliReporter = require('./reporters/CliReporter'); - + const { CICoverageCommand } = require("./commands/test"); + const CliReporter = require("./reporters/CliReporter"); + const command = new CICoverageCommand( null, // config - uses default null, // logger - added by reporter - parentOpts.prod + parentOpts.prod, ); const reporter = new CliReporter(parentOpts.json); reporter.attach(command); - + try { await command.execute(options); // CI coverage command handles its own exit codes via process.exitCode @@ -741,8 +849,8 @@ async function cli(argv) { // .command('maintenance') // .alias('maint') // .description('Maintenance mode management'); - - // TODO: Add status command when implemented + + // TODO: Add status command when implemented // program // .command('status') // .description('Show comprehensive system status') @@ -750,14 +858,14 @@ async function cli(argv) { // const parentOpts = program.opts(); // await statusCommand.execute({ ...options, ...parentOpts, config }); // }); - + // Parse arguments await program.parseAsync(argv); - + // Show help if no command provided if (argv.length === 2) { program.help(); } } -module.exports = { cli }; \ No newline at end of file +module.exports = { cli }; diff --git a/src/lib/ArchyError/ArchyErrorBase.js b/src/lib/ArchyError/ArchyErrorBase.js index 3220704..ab9ff90 100644 --- a/src/lib/ArchyError/ArchyErrorBase.js +++ b/src/lib/ArchyError/ArchyErrorBase.js @@ -5,65 +5,65 @@ * @extends Error */ class dataErrorBase extends Error { - /** - * Constructor for dataError - * @param {string} message Error message - * @param {number} code Error code - * @param {object} context Contextual information about the error - * @constructor - */ - constructor(message, code, context = {}) { - if (new.target === dataErrorBase) { - throw new TypeError("Cannot construct dataErrorBase instances directly"); - } + /** + * Constructor for dataError + * @param {string} message Error message + * @param {number} code Error code + * @param {object} context Contextual information about the error + * @constructor + */ + constructor(message, code, context = {}) { + if (new.target === dataErrorBase) { + throw new TypeError("Cannot construct dataErrorBase instances directly"); + } - if (typeof code !== 'number') { - throw new TypeError("Error code must be a number"); - } + if (typeof code !== "number") { + throw new TypeError("Error code must be a number"); + } - if (typeof message !== 'string' || message.trim() === '') { - throw new TypeError("Error message must be a non-empty string"); - } + if (typeof message !== "string" || message.trim() === "") { + throw new TypeError("Error message must be a non-empty string"); + } - super(message); + super(message); - this.name = this.constructor.name; - this.timestamp = new Date().toISOString(); - this.code = code; - this.context = context; - } + this.name = this.constructor.name; + this.timestamp = new Date().toISOString(); + this.code = code; + this.context = context; + } - /** - * Error code associated with the error - * @returns {number} Error code - */ - getCode() { - return this.code; - } + /** + * Error code associated with the error + * @returns {number} Error code + */ + getCode() { + return this.code; + } - /** - * Contextual information about the error - * @returns {object} Context - */ - getContext() { - return this.context; - } - - /** - * Timestamp when the error was created - * @returns {string} ISO timestamp - */ - getTimestamp() { - return this.timestamp; - } - - /** - * Error message - * @returns {string} Error message - */ - getMessage() { - return this.message; - } -}; + /** + * Contextual information about the error + * @returns {object} Context + */ + getContext() { + return this.context; + } + + /** + * Timestamp when the error was created + * @returns {string} ISO timestamp + */ + getTimestamp() { + return this.timestamp; + } + + /** + * Error message + * @returns {string} Error message + */ + getMessage() { + return this.message; + } +} module.exports = dataErrorBase; diff --git a/src/lib/BuildCommand.js b/src/lib/BuildCommand.js index c9d724e..6253520 100644 --- a/src/lib/BuildCommand.js +++ b/src/lib/BuildCommand.js @@ -1,15 +1,15 @@ -const Command = require('./Command'); -const PathResolver = require('./PathResolver'); +const Command = require("./Command"); +const PathResolver = require("./PathResolver"); const { BuildProgressEvent, BuildStartEvent, BuildCompleteEvent, - BuildFailedEvent -} = require('./events/CommandEvents'); + BuildFailedEvent, +} = require("./events/CommandEvents"); /** * BuildCommand - Base class for compilation/build operations - * + * * Commands that transform or compile files without database interaction. * Provides path resolution and file handling utilities. */ @@ -27,22 +27,22 @@ class BuildCommand extends Command { outputDir, logger = null, isProd = false, - pathResolver = null + pathResolver = null, ) { // Call parent with minimal config super(null, logger, isProd, null); - + // Store paths this.inputDir = inputDir; this.outputDir = outputDir; - + // Path resolver for ensuring directories exist this.pathResolver = pathResolver || new PathResolver(); - + // Build operations typically don't need production confirmation this.requiresProductionConfirmation = false; } - + /** * Ensure input directory exists and is readable * @returns {Promise} Resolved input directory path @@ -50,7 +50,7 @@ class BuildCommand extends Command { getInputDir() { return this.pathResolver.resolveDirectoryForRead(this.inputDir); } - + /** * Ensure output directory exists and is writable * @returns {Promise} Resolved output directory path @@ -58,39 +58,39 @@ class BuildCommand extends Command { getOutputDir() { return this.pathResolver.resolveDirectoryForWrite(this.outputDir); } - + /** * Get a specific input file path * @param {string} filename - The filename relative to input dir * @returns {Promise} Resolved file path */ async getInputFile(filename) { - const path = require('path'); + const path = require("path"); const dir = await this.getInputDir(); return this.pathResolver.resolveFileForRead(path.join(dir, filename)); } - + /** * Get a specific output file path * @param {string} filename - The filename relative to output dir * @returns {Promise} Resolved file path */ async getOutputFile(filename) { - const path = require('path'); + const path = require("path"); const dir = await this.getOutputDir(); return this.pathResolver.resolveFileForWrite(path.join(dir, filename)); } - + /** * List files in input directory * @param {string} pattern - Glob pattern (optional) * @returns {Promise} List of file paths */ - async listInputFiles(pattern = '*') { - const glob = require('glob'); - const path = require('path'); + async listInputFiles(pattern = "*") { + const glob = require("glob"); + const path = require("path"); const dir = await this.getInputDir(); - + return new Promise((resolve, reject) => { glob(path.join(dir, pattern), (err, files) => { if (err) reject(err); @@ -98,18 +98,18 @@ class BuildCommand extends Command { }); }); } - + /** * Read a file from input directory * @param {string} filename - The filename to read * @returns {Promise} File contents */ async readInputFile(filename) { - const fs = require('fs').promises; + const fs = require("fs").promises; const filePath = await this.getInputFile(filename); - return fs.readFile(filePath, 'utf8'); + return fs.readFile(filePath, "utf8"); } - + /** * Write a file to output directory * @param {string} filename - The filename to write @@ -117,31 +117,41 @@ class BuildCommand extends Command { * @returns {Promise} */ async writeOutputFile(filename, content) { - const fs = require('fs').promises; + const fs = require("fs").promises; const filePath = await this.getOutputFile(filename); - await fs.writeFile(filePath, content, 'utf8'); + await fs.writeFile(filePath, content, "utf8"); } - + /** * Emit build progress events * @param {string} stage - Current build stage * @param {Object} details - Additional event details */ emitBuildProgress(stage, details = {}) { - const event = new BuildProgressEvent(stage, this.inputDir, this.outputDir, details); - this.emit('build:progress', event.toEventData()); + const event = new BuildProgressEvent( + stage, + this.inputDir, + this.outputDir, + details, + ); + this.emit("build:progress", event.toEventData()); } - + /** * Emit build start event * @param {string} type - Type of build operation * @param {Object} details - Additional event details */ emitBuildStart(type, details = {}) { - const event = new BuildStartEvent(type, this.inputDir, this.outputDir, details); - this.emit('build:start', event.toEventData()); + const event = new BuildStartEvent( + type, + this.inputDir, + this.outputDir, + details, + ); + this.emit("build:start", event.toEventData()); } - + /** * Emit build complete event * @param {Object} result - Build result details @@ -149,9 +159,9 @@ class BuildCommand extends Command { */ emitBuildComplete(result, details = {}) { const event = new BuildCompleteEvent(result, details); - this.emit('build:complete', event.toEventData()); + this.emit("build:complete", event.toEventData()); } - + /** * Emit build failure event * @param {Error} error - The error that caused the build to fail @@ -159,8 +169,8 @@ class BuildCommand extends Command { */ emitBuildFailed(error, details = {}) { const event = new BuildFailedEvent(error, details); - this.emit('build:failed', event.toEventData()); + this.emit("build:failed", event.toEventData()); } } -module.exports = BuildCommand; \ No newline at end of file +module.exports = BuildCommand; diff --git a/src/lib/ChildProcessWrapper.js b/src/lib/ChildProcessWrapper.js index f83d21e..b608265 100644 --- a/src/lib/ChildProcessWrapper.js +++ b/src/lib/ChildProcessWrapper.js @@ -1,6 +1,6 @@ /** * ChildProcessWrapper - Safe child process management with proper cleanup - * + * * Features: * - Automatic process cleanup on timeout * - Command injection prevention via whitelist validation @@ -9,149 +9,155 @@ * - Safe argument sanitization */ -const { spawn } = require('child_process'); -const EventEmitter = require('events'); +const { spawn } = require("child_process"); +const EventEmitter = require("events"); class ChildProcessWrapper extends EventEmitter { constructor(logger = console) { super(); this.logger = logger; this.activeProcesses = new Map(); - + // Whitelist of allowed commands this.allowedCommands = new Set([ - 'node', - 'npm', - 'npx', - 'pnpm', - 'yarn', - 'supabase', - 'psql', - 'pg_dump', - 'pg_restore', - 'docker', - 'git', - 'deno' + "node", + "npm", + "npx", + "pnpm", + "yarn", + "supabase", + "psql", + "pg_dump", + "pg_restore", + "docker", + "git", + "deno", ]); - + // Setup cleanup on process exit - process.on('exit', () => this.cleanupAll()); - process.on('SIGINT', () => this.cleanupAll()); - process.on('SIGTERM', () => this.cleanupAll()); + process.on("exit", () => this.cleanupAll()); + process.on("SIGINT", () => this.cleanupAll()); + process.on("SIGTERM", () => this.cleanupAll()); } - + /** * Validate command against whitelist */ validateCommand(command) { - const baseCommand = command.split(' ')[0].split('/').pop(); - + const baseCommand = command.split(" ")[0].split("/").pop(); + if (!this.allowedCommands.has(baseCommand)) { - throw new Error(`Command '${baseCommand}' is not in the allowed command whitelist`); + throw new Error( + `Command '${baseCommand}' is not in the allowed command whitelist`, + ); } - + return true; } - + /** * Sanitize arguments to prevent injection */ sanitizeArgs(args) { - return args.map(arg => { + return args.map((arg) => { // Remove dangerous characters that could break out of arguments const sanitized = String(arg) - .replace(/[;&|`$(){}[\]<>]/g, '') // Remove shell metacharacters - .replace(/\n|\r/g, ' '); // Replace newlines with spaces - + .replace(/[;&|`$(){}[\]<>]/g, "") // Remove shell metacharacters + .replace(/\n|\r/g, " "); // Replace newlines with spaces + // Warn if sanitization changed the argument if (sanitized !== String(arg)) { this.logger.warn(`Argument sanitized: "${arg}" -> "${sanitized}"`); } - + return sanitized; }); } - + /** * Execute a command with proper timeout and cleanup */ execute(command, args = [], options = {}) { // Validate command this.validateCommand(command); - + // Sanitize arguments const safeArgs = this.sanitizeArgs(args); - + // Default options const execOptions = { timeout: 30000, // 30 seconds default shell: false, // Never use shell to prevent injection ...options, // Force some security options - windowsHide: true + windowsHide: true, }; - + return new Promise((resolve, reject) => { const startTime = Date.now(); - let stdout = ''; - let stderr = ''; + let stdout = ""; + let stderr = ""; let timedOut = false; let timeoutHandle = null; - + // Spawn the process const child = spawn(command, safeArgs, execOptions); const pid = child.pid; - + // Track the process this.activeProcesses.set(pid, { process: child, - command: `${command} ${safeArgs.join(' ')}`, - startTime + command: `${command} ${safeArgs.join(" ")}`, + startTime, }); - + // Setup timeout if (execOptions.timeout > 0) { timeoutHandle = setTimeout(() => { timedOut = true; - this.logger.warn(`Process ${pid} timed out after ${execOptions.timeout}ms`); - this.killProcess(pid, 'SIGTERM'); - + this.logger.warn( + `Process ${pid} timed out after ${execOptions.timeout}ms`, + ); + this.killProcess(pid, "SIGTERM"); + // Give it 5 seconds to die gracefully, then force kill setTimeout(() => { if (this.activeProcesses.has(pid)) { - this.logger.warn(`Process ${pid} didn't respond to SIGTERM, sending SIGKILL`); - this.killProcess(pid, 'SIGKILL'); + this.logger.warn( + `Process ${pid} didn't respond to SIGTERM, sending SIGKILL`, + ); + this.killProcess(pid, "SIGKILL"); } }, 5000); }, execOptions.timeout); } - + // Capture stdout if (child.stdout) { - child.stdout.on('data', (data) => { + child.stdout.on("data", (data) => { stdout += data.toString(); }); } - + // Capture stderr if (child.stderr) { - child.stderr.on('data', (data) => { + child.stderr.on("data", (data) => { stderr += data.toString(); }); } - + // Handle process completion - child.on('close', (code, signal) => { + child.on("close", (code, signal) => { // Clear timeout if (timeoutHandle) { clearTimeout(timeoutHandle); } - + // Remove from active processes this.activeProcesses.delete(pid); - + const duration = Date.now() - startTime; - + if (timedOut) { reject(new Error(`Process timed out after ${execOptions.timeout}ms`)); } else if (code !== 0) { @@ -168,117 +174,119 @@ class ChildProcessWrapper extends EventEmitter { stderr, code, signal, - duration + duration, }); } }); - + // Handle process errors - child.on('error', (error) => { + child.on("error", (error) => { // Clear timeout if (timeoutHandle) { clearTimeout(timeoutHandle); } - + // Remove from active processes this.activeProcesses.delete(pid); - + reject(error); }); }); } - + /** * Execute a command and stream output in real-time */ stream(command, args = [], options = {}) { // Validate command this.validateCommand(command); - + // Sanitize arguments const safeArgs = this.sanitizeArgs(args); - + // Default options const execOptions = { timeout: 0, // No timeout for streaming by default shell: false, - stdio: 'pipe', - ...options + stdio: "pipe", + ...options, }; - + const startTime = Date.now(); const child = spawn(command, safeArgs, execOptions); const pid = child.pid; - + // Track the process this.activeProcesses.set(pid, { process: child, - command: `${command} ${safeArgs.join(' ')}`, - startTime + command: `${command} ${safeArgs.join(" ")}`, + startTime, }); - + // Setup timeout if specified let timeoutHandle = null; if (execOptions.timeout > 0) { timeoutHandle = setTimeout(() => { - this.logger.warn(`Streaming process ${pid} timed out after ${execOptions.timeout}ms`); - this.killProcess(pid, 'SIGTERM'); - + this.logger.warn( + `Streaming process ${pid} timed out after ${execOptions.timeout}ms`, + ); + this.killProcess(pid, "SIGTERM"); + setTimeout(() => { if (this.activeProcesses.has(pid)) { - this.killProcess(pid, 'SIGKILL'); + this.killProcess(pid, "SIGKILL"); } }, 5000); }, execOptions.timeout); } - + // Emit events for streaming if (child.stdout) { - child.stdout.on('data', (data) => { - this.emit('stdout', data.toString()); + child.stdout.on("data", (data) => { + this.emit("stdout", data.toString()); }); } - + if (child.stderr) { - child.stderr.on('data', (data) => { - this.emit('stderr', data.toString()); + child.stderr.on("data", (data) => { + this.emit("stderr", data.toString()); }); } - + // Cleanup on completion - child.on('close', (code, signal) => { + child.on("close", (code, signal) => { if (timeoutHandle) { clearTimeout(timeoutHandle); } this.activeProcesses.delete(pid); - this.emit('close', { code, signal, duration: Date.now() - startTime }); + this.emit("close", { code, signal, duration: Date.now() - startTime }); }); - - child.on('error', (error) => { + + child.on("error", (error) => { if (timeoutHandle) { clearTimeout(timeoutHandle); } this.activeProcesses.delete(pid); - this.emit('error', error); + this.emit("error", error); }); - + return child; } - + /** * Kill a specific process */ - killProcess(pid, signal = 'SIGTERM') { + killProcess(pid, signal = "SIGTERM") { const processInfo = this.activeProcesses.get(pid); if (processInfo && processInfo.process) { try { processInfo.process.kill(signal); - + // On Windows, we need to use taskkill for proper cleanup - if (process.platform === 'win32' && signal === 'SIGKILL') { - spawn('taskkill', ['/F', '/T', '/PID', pid.toString()], { + if (process.platform === "win32" && signal === "SIGKILL") { + spawn("taskkill", ["/F", "/T", "/PID", pid.toString()], { detached: true, - stdio: 'ignore' + stdio: "ignore", }); } } catch (error) { @@ -286,25 +294,25 @@ class ChildProcessWrapper extends EventEmitter { } } } - + /** * Clean up all active processes */ cleanupAll() { for (const [pid, info] of this.activeProcesses) { this.logger.warn(`Cleaning up process ${pid}: ${info.command}`); - this.killProcess(pid, 'SIGTERM'); + this.killProcess(pid, "SIGTERM"); } - + // Give them a moment to die gracefully setTimeout(() => { for (const [pid] of this.activeProcesses) { this.logger.warn(`Force killing process ${pid}`); - this.killProcess(pid, 'SIGKILL'); + this.killProcess(pid, "SIGKILL"); } }, 1000); } - + /** * Get list of active processes */ @@ -312,17 +320,17 @@ class ChildProcessWrapper extends EventEmitter { return Array.from(this.activeProcesses.entries()).map(([pid, info]) => ({ pid, command: info.command, - uptime: Date.now() - info.startTime + uptime: Date.now() - info.startTime, })); } - + /** * Add a command to the whitelist */ allowCommand(command) { this.allowedCommands.add(command); } - + /** * Remove a command from the whitelist */ @@ -331,4 +339,4 @@ class ChildProcessWrapper extends EventEmitter { } } -module.exports = ChildProcessWrapper; \ No newline at end of file +module.exports = ChildProcessWrapper; diff --git a/src/lib/Command.js b/src/lib/Command.js index 8b7df10..59f43b0 100644 --- a/src/lib/Command.js +++ b/src/lib/Command.js @@ -2,8 +2,8 @@ * Base Command Class for Event-Driven Architecture */ -const { EventEmitter } = require('events'); -const pino = require('pino'); +const { EventEmitter } = require("events"); +const pino = require("pino"); const { ProgressEvent, WarningEvent, @@ -12,30 +12,30 @@ const { StartEvent, CompleteEvent, CancelledEvent, - validateCommandEvent -} = require('./events/CommandEvents.js'); + validateCommandEvent, +} = require("./events/CommandEvents.js"); /** * Base command class that all commands extend from */ class Command extends EventEmitter { constructor( - legacyConfig = null, // Config class instance is OK - it's a typed class + legacyConfig = null, // Config class instance is OK - it's a typed class logger = null, isProd = false, - outputConfig = null // OutputConfig class instance for paths + outputConfig = null, // OutputConfig class instance for paths ) { super(); // Store the Config instance (this is fine - it's a proper class) this.config = legacyConfig; - + // Logging and environment this.isProd = isProd; this.logger = logger || this.createLogger(); - + // Path configuration via dependency injection this.outputConfig = outputConfig; - + // Command behavior flags this.requiresProductionConfirmation = true; // Can be overridden by subclasses } @@ -44,18 +44,20 @@ class Command extends EventEmitter { * Create a default pino logger */ createLogger() { - const isDev = process.env.NODE_ENV !== 'production'; - + const isDev = process.env.NODE_ENV !== "production"; + return pino({ - level: this.config?.get ? this.config.get('logging.level') : 'info', - transport: isDev ? { - target: 'pino-pretty', - options: { - colorize: true, - translateTime: 'HH:MM:ss', - ignore: 'pid,hostname' - } - } : undefined + level: this.config?.get ? this.config.get("logging.level") : "info", + transport: isDev + ? { + target: "pino-pretty", + options: { + colorize: true, + translateTime: "HH:MM:ss", + ignore: "pid,hostname", + }, + } + : undefined, }); } @@ -64,45 +66,50 @@ class Command extends EventEmitter { */ async execute(...args) { // Emit start event - const startEvent = new StartEvent(`Starting ${this.constructor.name}`, { isProd: this.isProd }); - this.emit('start', { + const startEvent = new StartEvent(`Starting ${this.constructor.name}`, { + isProd: this.isProd, + }); + this.emit("start", { message: startEvent.message, data: startEvent.details, timestamp: startEvent.timestamp, type: startEvent.type, - isProd: this.isProd + isProd: this.isProd, }); - + try { // Check for production confirmation if needed if (this.isProd && this.requiresProductionConfirmation) { const confirmed = await this.confirmProduction(); if (!confirmed) { - this.success('Operation cancelled'); - const cancelledEvent = new CancelledEvent('Operation cancelled'); - this.emit('cancelled', { + this.success("Operation cancelled"); + const cancelledEvent = new CancelledEvent("Operation cancelled"); + this.emit("cancelled", { message: cancelledEvent.message, data: cancelledEvent.details, timestamp: cancelledEvent.timestamp, - type: cancelledEvent.type + type: cancelledEvent.type, }); return; } } - + // Call the actual implementation const result = await this.performExecute(...args); - + // Emit completion event - const completeEvent = new CompleteEvent(`${this.constructor.name} completed successfully`, result); - this.emit('complete', { + const completeEvent = new CompleteEvent( + `${this.constructor.name} completed successfully`, + result, + ); + this.emit("complete", { message: completeEvent.message, result: completeEvent.result, data: completeEvent.details, timestamp: completeEvent.timestamp, - type: completeEvent.type + type: completeEvent.type, }); - + return result; } catch (error) { this.error(`${this.constructor.name} failed`, error); @@ -115,20 +122,20 @@ class Command extends EventEmitter { */ // eslint-disable-next-line require-await async performExecute(..._args) { - throw new Error('Command.performExecute() must be implemented by subclass'); + throw new Error("Command.performExecute() must be implemented by subclass"); } /** * Confirm production operation */ async confirmProduction() { - this.warn('Production operation requested!', { - environment: 'PRODUCTION', - command: this.constructor.name + this.warn("Production operation requested!", { + environment: "PRODUCTION", + command: this.constructor.name, }); - + return await this.confirm( - 'Are you sure you want to perform this operation in PRODUCTION?' + "Are you sure you want to perform this operation in PRODUCTION?", ); } @@ -138,12 +145,12 @@ class Command extends EventEmitter { progress(message, data = {}) { const event = new ProgressEvent(message, null, data); // null percentage for indeterminate progress // Emit typed event - maintain existing event object structure for backward compatibility - this.emit('progress', { + this.emit("progress", { message: event.message, data: event.details, timestamp: event.timestamp, type: event.type, - percentage: event.percentage + percentage: event.percentage, }); this.logger.info({ ...data }, message); } @@ -154,11 +161,11 @@ class Command extends EventEmitter { warn(message, data = {}) { const event = new WarningEvent(message, data); // Emit typed event - maintain existing event object structure for backward compatibility - this.emit('warning', { + this.emit("warning", { message: event.message, data: event.details, timestamp: event.timestamp, - type: event.type + type: event.type, }); this.logger.warn({ ...data }, message); } @@ -171,12 +178,12 @@ class Command extends EventEmitter { const code = data.code || error?.code || null; const event = new ErrorEvent(message, error, code, data); // Emit typed event - maintain existing event object structure for backward compatibility - this.emit('error', { + this.emit("error", { message: event.message, error: event.error, data: event.details, timestamp: event.timestamp, - type: event.type + type: event.type, }); this.logger.error({ err: error, ...data }, message); } @@ -187,11 +194,11 @@ class Command extends EventEmitter { success(message, data = {}) { const event = new SuccessEvent(message, data); // Emit typed event - maintain existing event object structure for backward compatibility - this.emit('success', { + this.emit("success", { message: event.message, data: event.details, timestamp: event.timestamp, - type: event.type + type: event.type, }); this.logger.info({ ...data }, message); } @@ -201,7 +208,7 @@ class Command extends EventEmitter { */ prompt(type, options) { return new Promise((resolve) => { - this.emit('prompt', { type, options, resolve }); + this.emit("prompt", { type, options, resolve }); }); } @@ -209,19 +216,19 @@ class Command extends EventEmitter { * Emit a confirmation event and wait for response */ async confirm(message, defaultValue = false) { - return await this.prompt('confirm', { message, default: defaultValue }); + return await this.prompt("confirm", { message, default: defaultValue }); } /** * Emit an input event and wait for response */ async input(message, options = {}) { - return await this.prompt('input', { message, ...options }); + return await this.prompt("input", { message, ...options }); } /** * Validate an event against expected class type - * @param {Object} event - The event object to validate + * @param {Object} event - The event object to validate * @param {Function} expectedClass - Expected event class constructor * @returns {Object} Validation result with success/error properties */ @@ -230,7 +237,10 @@ class Command extends EventEmitter { // If no specific class expected, just check if it has the basic event structure return { success: !!(event && event.type && event.message && event.timestamp), - error: event && event.type && event.message && event.timestamp ? null : 'Invalid event structure' + error: + event && event.type && event.message && event.timestamp + ? null + : "Invalid event structure", }; } @@ -251,18 +261,21 @@ class Command extends EventEmitter { emitTypedEvent(eventName, eventData, expectedClass = null) { const validation = this.validateEvent(eventData, expectedClass); if (!validation.success) { - this.logger.warn({ validationError: validation.error }, `Invalid event data for ${eventName}`); + this.logger.warn( + { validationError: validation.error }, + `Invalid event data for ${eventName}`, + ); // Still emit the event for backward compatibility, but log the validation issue } - + // If eventData is a CommandEvent instance, convert it to the expected format - if (eventData && typeof eventData.toJSON === 'function') { + if (eventData && typeof eventData.toJSON === "function") { const jsonData = eventData.toJSON(); this.emit(eventName, { message: jsonData.message, data: jsonData.details || {}, timestamp: new Date(jsonData.timestamp), - type: jsonData.type + type: jsonData.type, }); } else { this.emit(eventName, eventData); diff --git a/src/lib/CommandRouter.js b/src/lib/CommandRouter.js index 2691789..3e2b1da 100644 --- a/src/lib/CommandRouter.js +++ b/src/lib/CommandRouter.js @@ -1,10 +1,10 @@ /** * CommandRouter - Fluent routing system with Zod schema validation - * + * * Example usage: * const router = new CommandRouter(); * const { z } = require('zod'); - * + * * router * .command("migrate") * .subcommand("generate") @@ -21,8 +21,8 @@ * }); */ -const EventEmitter = require('events'); -const { z } = require('zod'); +const EventEmitter = require("events"); +const { z } = require("zod"); class CommandRouter extends EventEmitter { constructor() { @@ -46,7 +46,7 @@ class CommandRouter extends EventEmitter { */ registerRoute(path, config) { this.routes.set(path, config); - this.emit('route:registered', { path, config }); + this.emit("route:registered", { path, config }); } /** @@ -67,13 +67,13 @@ class CommandRouter extends EventEmitter { */ async execute(commandPath, rawArgs = {}) { const route = this.findRoute(commandPath); - + if (!route) { throw new Error(`No handler registered for command: ${commandPath}`); } // Check for help flag first - if (rawArgs['--help'] || rawArgs['-h'] || rawArgs.help) { + if (rawArgs["--help"] || rawArgs["-h"] || rawArgs.help) { this.showHelp(commandPath, route); return { help: true }; } @@ -83,35 +83,39 @@ class CommandRouter extends EventEmitter { path: commandPath, rawArgs, route, - router: this + router: this, }; try { // Run global middleware - await Promise.all(this.globalMiddleware.map(middleware => middleware(context))); + await Promise.all( + this.globalMiddleware.map((middleware) => middleware(context)), + ); // Parse and validate arguments with Zod schema let parsedArgs = rawArgs; if (route.schema) { // Convert CLI args to match schema shape const argsToValidate = this.prepareArgsForSchema(rawArgs, route); - + // Validate with Zod const result = await route.schema.safeParseAsync(argsToValidate); - + if (!result.success) { const errors = result.error.format(); this.showValidationErrors(commandPath, errors, route); - throw new Error('Validation failed'); + throw new Error("Validation failed"); } - + parsedArgs = result.data; } context.args = parsedArgs; // Run route-specific middleware - await Promise.all(route.middleware.map(middleware => middleware(context))); + await Promise.all( + route.middleware.map((middleware) => middleware(context)), + ); // Execute the handler if (!route.handler) { @@ -119,9 +123,8 @@ class CommandRouter extends EventEmitter { } return await route.handler(parsedArgs, context); - } catch (error) { - this.emit('error', { path: commandPath, error }); + this.emit("error", { path: commandPath, error }); throw error; } } @@ -133,26 +136,28 @@ class CommandRouter extends EventEmitter { */ prepareArgsForSchema(rawArgs, route) { const prepared = {}; - + for (const [key, value] of Object.entries(rawArgs)) { // Skip special args - if (key === '_' || key === '$0') continue; - + if (key === "_" || key === "$0") continue; + // Convert --kebab-case to camelCase - const propName = key.replace(/^-+/, '').replace(/-([a-z])/g, (g) => g[1].toUpperCase()); - + const propName = key + .replace(/^-+/, "") + .replace(/-([a-z])/g, (g) => g[1].toUpperCase()); + // Handle boolean flags (presence = true) if (value === true || value === undefined) { prepared[propName] = true; - } else if (value === 'true') { + } else if (value === "true") { prepared[propName] = true; - } else if (value === 'false') { + } else if (value === "false") { prepared[propName] = false; } else { prepared[propName] = value; } } - + // Apply any custom mappings from route config if (route.argMappings) { for (const [from, to] of Object.entries(route.argMappings)) { @@ -161,7 +166,7 @@ class CommandRouter extends EventEmitter { } } } - + return prepared; } @@ -170,58 +175,61 @@ class CommandRouter extends EventEmitter { * @private */ showHelp(commandPath, route) { - const parts = commandPath.split('/'); - const commandName = parts.join(' '); - + const parts = commandPath.split("/"); + const commandName = parts.join(" "); + console.log(`\nUsage: data ${commandName} [OPTIONS]\n`); - + if (route.description) { console.log(`${route.description}\n`); } if (route.schema) { - console.log('Options:'); - + console.log("Options:"); + // Extract schema shape for help generation const shape = route.schema._def.shape || route.schema.shape || {}; - + for (const [key, field] of Object.entries(shape)) { - let line = ' '; - + let line = " "; + // Convert camelCase to kebab-case for CLI - const cliName = key.replace(/[A-Z]/g, letter => `-${letter.toLowerCase()}`); + const cliName = key.replace( + /[A-Z]/g, + (letter) => `-${letter.toLowerCase()}`, + ); line += `--${cliName}`; - + // Get type from Zod schema const typeName = this.getZodTypeName(field); - if (typeName !== 'boolean') { + if (typeName !== "boolean") { line += ` <${typeName}>`; } - + // Add description if available const description = field.description || field._def?.description; if (description) { line = line.padEnd(30) + description; } - + // Add constraints const constraints = this.getZodConstraints(field); if (constraints.length > 0) { - line += ` (${constraints.join(', ')})`; + line += ` (${constraints.join(", ")})`; } - + console.log(line); } } if (route.examples && route.examples.length > 0) { - console.log('\nExamples:'); + console.log("\nExamples:"); for (const example of route.examples) { console.log(` ${example}`); } } - console.log(''); + console.log(""); } /** @@ -230,17 +238,20 @@ class CommandRouter extends EventEmitter { */ getZodTypeName(schema) { const def = schema._def; - - if (def.typeName === 'ZodString') return 'string'; - if (def.typeName === 'ZodNumber') return 'number'; - if (def.typeName === 'ZodBoolean') return 'boolean'; - if (def.typeName === 'ZodArray') return 'array'; - if (def.typeName === 'ZodEnum') return 'choice'; - if (def.typeName === 'ZodOptional') return this.getZodTypeName(def.innerType); - if (def.typeName === 'ZodDefault') return this.getZodTypeName(def.innerType); - if (def.typeName === 'ZodNullable') return this.getZodTypeName(def.innerType); - - return 'value'; + + if (def.typeName === "ZodString") return "string"; + if (def.typeName === "ZodNumber") return "number"; + if (def.typeName === "ZodBoolean") return "boolean"; + if (def.typeName === "ZodArray") return "array"; + if (def.typeName === "ZodEnum") return "choice"; + if (def.typeName === "ZodOptional") + return this.getZodTypeName(def.innerType); + if (def.typeName === "ZodDefault") + return this.getZodTypeName(def.innerType); + if (def.typeName === "ZodNullable") + return this.getZodTypeName(def.innerType); + + return "value"; } /** @@ -250,47 +261,50 @@ class CommandRouter extends EventEmitter { getZodConstraints(schema) { const constraints = []; const def = schema._def; - + // Check if optional - if (def.typeName === 'ZodOptional') { - constraints.push('optional'); + if (def.typeName === "ZodOptional") { + constraints.push("optional"); return [...constraints, ...this.getZodConstraints(def.innerType)]; } - + // Check for default - if (def.typeName === 'ZodDefault') { + if (def.typeName === "ZodDefault") { const defaultValue = def.defaultValue(); constraints.push(`default: ${JSON.stringify(defaultValue)}`); return [...constraints, ...this.getZodConstraints(def.innerType)]; } - + // String constraints - if (def.typeName === 'ZodString') { + if (def.typeName === "ZodString") { if (def.checks) { for (const check of def.checks) { - if (check.kind === 'min') constraints.push(`min length: ${check.value}`); - if (check.kind === 'max') constraints.push(`max length: ${check.value}`); - if (check.kind === 'regex') constraints.push(`pattern: ${check.regex}`); + if (check.kind === "min") + constraints.push(`min length: ${check.value}`); + if (check.kind === "max") + constraints.push(`max length: ${check.value}`); + if (check.kind === "regex") + constraints.push(`pattern: ${check.regex}`); } } } - + // Number constraints - if (def.typeName === 'ZodNumber') { + if (def.typeName === "ZodNumber") { if (def.checks) { for (const check of def.checks) { - if (check.kind === 'min') constraints.push(`min: ${check.value}`); - if (check.kind === 'max') constraints.push(`max: ${check.value}`); - if (check.kind === 'int') constraints.push('integer'); + if (check.kind === "min") constraints.push(`min: ${check.value}`); + if (check.kind === "max") constraints.push(`max: ${check.value}`); + if (check.kind === "int") constraints.push("integer"); } } } - + // Enum values - if (def.typeName === 'ZodEnum') { - constraints.push(`values: ${def.values.join(', ')}`); + if (def.typeName === "ZodEnum") { + constraints.push(`values: ${def.values.join(", ")}`); } - + return constraints; } @@ -300,18 +314,21 @@ class CommandRouter extends EventEmitter { */ showValidationErrors(commandPath, errors, _route) { console.error(`\nValidation errors for command: ${commandPath}\n`); - + // Remove the _errors property which is just metadata delete errors._errors; - + for (const [field, fieldErrors] of Object.entries(errors)) { if (fieldErrors._errors && fieldErrors._errors.length > 0) { - const cliName = field.replace(/[A-Z]/g, letter => `-${letter.toLowerCase()}`); - console.error(` --${cliName}: ${fieldErrors._errors.join(', ')}`); + const cliName = field.replace( + /[A-Z]/g, + (letter) => `-${letter.toLowerCase()}`, + ); + console.error(` --${cliName}: ${fieldErrors._errors.join(", ")}`); } } - - console.error('\nRun with --help for usage information\n'); + + console.error("\nRun with --help for usage information\n"); } /** @@ -339,8 +356,8 @@ class CommandRouter extends EventEmitter { * @private */ matchesPattern(path, pattern) { - if (pattern.includes('*')) { - const regex = new RegExp('^' + pattern.replace(/\*/g, '.*') + '$'); + if (pattern.includes("*")) { + const regex = new RegExp("^" + pattern.replace(/\*/g, ".*") + "$"); return regex.test(path); } return path === pattern; @@ -355,7 +372,7 @@ class CommandRouter extends EventEmitter { hasHandler: !!config.handler, hasSchema: !!config.schema, middleware: config.middleware.length, - description: config.description + description: config.description, })); } } @@ -371,9 +388,9 @@ class CommandBuilder { handler: null, schema: null, middleware: [], - description: '', + description: "", examples: [], - argMappings: {} + argMappings: {}, }; } @@ -444,25 +461,42 @@ class CommandBuilder { */ handler(handler) { // Support both function handlers and class handlers - if (typeof handler === 'function' && handler.prototype && handler.prototype.execute) { + if ( + typeof handler === "function" && + handler.prototype && + handler.prototype.execute + ) { // It's a class - wrap it this.config.handler = async (args, context) => { - const instance = new handler(context.router.config, context.router.logger, args.prod); - + const instance = new handler( + context.router.config, + context.router.logger, + args.prod, + ); + // Forward events from subcommand to router if (instance.on) { - ['start', 'progress', 'warning', 'error', 'success', 'complete', 'failed', 'cancelled', 'prompt'] - .forEach(event => { - instance.on(event, (data) => context.router.emit(event, data)); - }); + [ + "start", + "progress", + "warning", + "error", + "success", + "complete", + "failed", + "cancelled", + "prompt", + ].forEach((event) => { + instance.on(event, (data) => context.router.emit(event, data)); + }); } - + return await instance.execute(args); }; } else { this.config.handler = handler; } - + this.router.registerRoute(this.path, this.config); return this.router; } @@ -481,16 +515,16 @@ class CommandBuilder { CommandRouter.schemas = { // Common CLI argument types port: z.number().int().min(1).max(65535), - + url: z.string().url(), - + email: z.string().email(), - + path: z.string(), - + existingPath: z.string().refine( (val) => { - const fs = require('fs'); + const fs = require("fs"); try { fs.accessSync(val); return true; @@ -498,12 +532,12 @@ CommandRouter.schemas = { return false; } }, - { message: "Path does not exist" } + { message: "Path does not exist" }, ), - + directory: z.string().refine( (val) => { - const fs = require('fs'); + const fs = require("fs"); try { const stats = fs.statSync(val); return stats.isDirectory(); @@ -511,12 +545,12 @@ CommandRouter.schemas = { return false; } }, - { message: "Path must be a directory" } + { message: "Path must be a directory" }, ), - + file: z.string().refine( (val) => { - const fs = require('fs'); + const fs = require("fs"); try { const stats = fs.statSync(val); return stats.isFile(); @@ -524,19 +558,25 @@ CommandRouter.schemas = { return false; } }, - { message: "Path must be a file" } + { message: "Path must be a file" }, ), - + // Common flag combinations verbose: z.boolean().default(false).describe("Enable verbose output"), - + quiet: z.boolean().default(false).describe("Suppress output"), - - force: z.boolean().default(false).describe("Force operation without confirmation"), - - dryRun: z.boolean().default(false).describe("Preview changes without applying them"), - - prod: z.boolean().default(false).describe("Target production environment") + + force: z + .boolean() + .default(false) + .describe("Force operation without confirmation"), + + dryRun: z + .boolean() + .default(false) + .describe("Preview changes without applying them"), + + prod: z.boolean().default(false).describe("Target production environment"), }; -module.exports = CommandRouter; \ No newline at end of file +module.exports = CommandRouter; diff --git a/src/lib/DataInputPaths.js b/src/lib/DataInputPaths.js index dd3fbc7..ce597c6 100644 --- a/src/lib/DataInputPaths.js +++ b/src/lib/DataInputPaths.js @@ -1,9 +1,9 @@ -const PathResolver = require('./PathResolver'); -const path = require('path'); +const PathResolver = require("./PathResolver"); +const path = require("path"); /** * dataInputPaths - Manages all input/read sources for data - * + * * This class handles all directories where data reads files from. * It uses PathResolver to ensure directories exist and are readable. * All paths are resolved to absolute paths and cached. @@ -24,23 +24,24 @@ class DataInputPaths { functionsDir = null, schemasDir = null, configDir = null, - pathResolver = null + pathResolver = null, ) { this.pathResolver = pathResolver || new PathResolver(); - + // Store configuration with defaults this._config = { - sqlDir: sqlDir || process.env.data_SQL_DIR || './sql', - testsDir: testsDir || process.env.data_TESTS_DIR || './tests', - functionsDir: functionsDir || process.env.data_FUNCTIONS_DIR || './functions', - schemasDir: schemasDir || process.env.data_SCHEMAS_DIR || './schemas', - configDir: configDir || process.env.data_CONFIG_DIR || '.' + sqlDir: sqlDir || process.env.data_SQL_DIR || "./sql", + testsDir: testsDir || process.env.data_TESTS_DIR || "./tests", + functionsDir: + functionsDir || process.env.data_FUNCTIONS_DIR || "./functions", + schemasDir: schemasDir || process.env.data_SCHEMAS_DIR || "./schemas", + configDir: configDir || process.env.data_CONFIG_DIR || ".", }; - + // Cache for resolved paths this._resolvedPaths = {}; this._resolving = {}; // Prevent duplicate resolution attempts - + // Cache for file listings this._fileCache = {}; } @@ -51,7 +52,7 @@ class DataInputPaths { * @throws {Error} If directory doesn't exist or isn't readable */ getSqlDir() { - return this._resolvePath('sqlDir'); + return this._resolvePath("sqlDir"); } /** @@ -60,7 +61,7 @@ class DataInputPaths { * @throws {Error} If directory doesn't exist or isn't readable */ getTestsDir() { - return this._resolvePath('testsDir'); + return this._resolvePath("testsDir"); } /** @@ -69,7 +70,7 @@ class DataInputPaths { * @throws {Error} If directory doesn't exist or isn't readable */ getFunctionsDir() { - return this._resolvePath('functionsDir'); + return this._resolvePath("functionsDir"); } /** @@ -78,7 +79,7 @@ class DataInputPaths { * @throws {Error} If directory doesn't exist or isn't readable */ getSchemasDir() { - return this._resolvePath('schemasDir'); + return this._resolvePath("schemasDir"); } /** @@ -87,7 +88,7 @@ class DataInputPaths { * @throws {Error} If directory doesn't exist or isn't readable */ getConfigDir() { - return this._resolvePath('configDir'); + return this._resolvePath("configDir"); } /** @@ -143,7 +144,7 @@ class DataInputPaths { if (!Object.prototype.hasOwnProperty.call(this._config, key)) { throw new Error(`Unknown path configuration: ${key}`); } - + try { await this.pathResolver.resolveDirectoryForRead(this._config[key]); return true; @@ -159,15 +160,15 @@ class DataInputPaths { * @returns {Promise} First existing path or null */ async findDirectory(key, candidates) { - const checkPromises = candidates.map(async candidate => { + const checkPromises = candidates.map(async (candidate) => { this._config[key] = candidate; const exists = await this.hasDirectory(key); return exists ? { candidate, exists } : null; }); - + const results = await Promise.allSettled(checkPromises); for (const result of results) { - if (result.status === 'fulfilled' && result.value) { + if (result.status === "fulfilled" && result.value) { this._config[key] = result.value.candidate; return this._resolvePath(key); } @@ -224,19 +225,22 @@ class DataInputPaths { } // Start resolution - this._resolving[key] = this.pathResolver.resolveDirectoryForRead(this._config[key]) - .then(resolved => { + this._resolving[key] = this.pathResolver + .resolveDirectoryForRead(this._config[key]) + .then((resolved) => { this._resolvedPaths[key] = resolved; delete this._resolving[key]; return resolved; }) - .catch(error => { + .catch((error) => { delete this._resolving[key]; - throw new Error(`Failed to resolve input path ${key}: ${error.message}`); + throw new Error( + `Failed to resolve input path ${key}: ${error.message}`, + ); }); return this._resolving[key]; } } -module.exports = DataInputPaths; \ No newline at end of file +module.exports = DataInputPaths; diff --git a/src/lib/DataOutputPaths.js b/src/lib/DataOutputPaths.js index 1e01caa..bad4bfc 100644 --- a/src/lib/DataOutputPaths.js +++ b/src/lib/DataOutputPaths.js @@ -1,9 +1,9 @@ -const PathResolver = require('./PathResolver'); -const path = require('path'); +const PathResolver = require("./PathResolver"); +const path = require("path"); /** * dataOutputPaths - Manages all output/write destinations for data - * + * * This class handles all directories where data writes files. * It uses PathResolver to ensure directories exist and are writable. * All paths are resolved to absolute paths and cached. @@ -22,18 +22,19 @@ class DataOutputPaths { buildDir = null, reportsDir = null, tempDir = null, - pathResolver = null + pathResolver = null, ) { this.pathResolver = pathResolver || new PathResolver(); - + // Store configuration this._config = { - migrationsDir: migrationsDir || process.env.data_MIGRATIONS_DIR || './migrations', - buildDir: buildDir || process.env.data_BUILD_DIR || './build', - reportsDir: reportsDir || process.env.data_REPORTS_DIR || './reports', - tempDir: tempDir || process.env.data_TEMP_DIR || './tmp' + migrationsDir: + migrationsDir || process.env.data_MIGRATIONS_DIR || "./migrations", + buildDir: buildDir || process.env.data_BUILD_DIR || "./build", + reportsDir: reportsDir || process.env.data_REPORTS_DIR || "./reports", + tempDir: tempDir || process.env.data_TEMP_DIR || "./tmp", }; - + // Cache for resolved paths this._resolvedPaths = {}; this._resolving = {}; // Prevent duplicate resolution attempts @@ -44,7 +45,7 @@ class DataOutputPaths { * @returns {Promise} Absolute path to migrations directory */ getMigrationsDir() { - return this._resolvePath('migrationsDir'); + return this._resolvePath("migrationsDir"); } /** @@ -52,7 +53,7 @@ class DataOutputPaths { * @returns {Promise} Absolute path to build directory */ getBuildDir() { - return this._resolvePath('buildDir'); + return this._resolvePath("buildDir"); } /** @@ -60,7 +61,7 @@ class DataOutputPaths { * @returns {Promise} Absolute path to reports directory */ getReportsDir() { - return this._resolvePath('reportsDir'); + return this._resolvePath("reportsDir"); } /** @@ -68,7 +69,7 @@ class DataOutputPaths { * @returns {Promise} Absolute path to temp directory */ getTempDir() { - return this._resolvePath('tempDir'); + return this._resolvePath("tempDir"); } /** @@ -158,19 +159,22 @@ class DataOutputPaths { } // Start resolution - this._resolving[key] = this.pathResolver.resolveDirectoryForWrite(this._config[key]) - .then(resolved => { + this._resolving[key] = this.pathResolver + .resolveDirectoryForWrite(this._config[key]) + .then((resolved) => { this._resolvedPaths[key] = resolved; delete this._resolving[key]; return resolved; }) - .catch(error => { + .catch((error) => { delete this._resolving[key]; - throw new Error(`Failed to resolve output path ${key}: ${error.message}`); + throw new Error( + `Failed to resolve output path ${key}: ${error.message}`, + ); }); return this._resolving[key]; } } -module.exports = DataOutputPaths; \ No newline at end of file +module.exports = DataOutputPaths; diff --git a/src/lib/DatabaseCommand.js b/src/lib/DatabaseCommand.js index 23b169f..9e5d90a 100644 --- a/src/lib/DatabaseCommand.js +++ b/src/lib/DatabaseCommand.js @@ -1,8 +1,8 @@ -const Command = require('./Command'); +const Command = require("./Command"); /** * DatabaseCommand - Base class for commands that interact with the database - * + * * This class provides database connection handling for commands that need * to execute SQL queries or manage database state. */ @@ -22,35 +22,35 @@ class DatabaseCommand extends Command { anonKey = null, logger = null, isProd = false, - requiresConfirmation = true + requiresConfirmation = true, ) { // Call parent with minimal config super(null, logger, isProd, null); - + // Store database credentials this.databaseUrl = databaseUrl; this.serviceRoleKey = serviceRoleKey; this.anonKey = anonKey; - + // Set confirmation requirement based on params this.requiresProductionConfirmation = isProd && requiresConfirmation; - + // Database connection will be created on demand this.db = null; } - + /** * Get database connection (lazy initialization) * @returns {Object} Database connection */ async getDatabase() { if (!this.db) { - const DatabaseUtils = require('./db-utils'); + const DatabaseUtils = require("./db-utils"); this.db = await DatabaseUtils.createConnection(this.databaseUrl); } return this.db; } - + /** * Execute a SQL query * @param {string} sql - The SQL query to execute @@ -61,18 +61,18 @@ class DatabaseCommand extends Command { const db = await this.getDatabase(); return db.query(sql, params); } - + /** * Execute a SQL file * @param {string} filePath - Path to the SQL file * @returns {Promise} Query result */ async executeFile(filePath) { - const fs = require('fs').promises; - const sql = await fs.readFile(filePath, 'utf8'); + const fs = require("fs").promises; + const sql = await fs.readFile(filePath, "utf8"); return this.query(sql); } - + /** * Clean up database connection */ @@ -82,7 +82,7 @@ class DatabaseCommand extends Command { this.db = null; } } - + /** * Override execute to ensure cleanup */ @@ -95,4 +95,4 @@ class DatabaseCommand extends Command { } } -module.exports = DatabaseCommand; \ No newline at end of file +module.exports = DatabaseCommand; diff --git a/src/lib/DiffEngine.js b/src/lib/DiffEngine.js index b3820ae..8e260b2 100644 --- a/src/lib/DiffEngine.js +++ b/src/lib/DiffEngine.js @@ -1,9 +1,9 @@ -const EventEmitter = require('events'); -const DatabaseUtils = require('./db-utils'); +const EventEmitter = require("events"); +const DatabaseUtils = require("./db-utils"); /** * DiffEngine - Event-driven database schema difference generator - * + * * Emits events: * - 'start': When diff generation begins * - 'progress': During processing with step information @@ -13,15 +13,15 @@ const DatabaseUtils = require('./db-utils'); class DiffEngine extends EventEmitter { constructor(config = {}) { super(); - + // Configuration with defaults this.config = { // Diff generation options includeData: false, - excludeSchemas: ['information_schema', 'pg_catalog'], + excludeSchemas: ["information_schema", "pg_catalog"], includeDropStatements: true, sortOutput: true, - ...config + ...config, }; // Internal state @@ -37,7 +37,7 @@ class DiffEngine extends EventEmitter { /** * Generate schema differences between current and desired database states - * + * * @param {Object} currentDb - Current database connection/state * @param {Object} desiredDb - Desired database connection/state * @param {Object} options - Override options for this diff operation @@ -45,7 +45,9 @@ class DiffEngine extends EventEmitter { */ generateDiff(currentDb, desiredDb, options = {}) { if (this.isRunning) { - throw new Error('DiffEngine is already running. Wait for current operation to complete.'); + throw new Error( + "DiffEngine is already running. Wait for current operation to complete.", + ); } this.isRunning = true; @@ -57,36 +59,36 @@ class DiffEngine extends EventEmitter { const mergedOptions = { ...this.config, ...options }; // Emit start event - this.emit('start', { + this.emit("start", { currentDb: this._sanitizeDbInfo(currentDb), desiredDb: this._sanitizeDbInfo(desiredDb), options: mergedOptions, - timestamp: this.startTime + timestamp: this.startTime, }); // Emit initial progress - this.emit('progress', { - step: 'initializing', - message: 'Preparing diff generation', - timestamp: new Date() + this.emit("progress", { + step: "initializing", + message: "Preparing diff generation", + timestamp: new Date(), }); // Validate inputs this._validateDatabaseInputs(currentDb, desiredDb); - this.emit('progress', { - step: 'validation_complete', - message: 'Database inputs validated', - timestamp: new Date() + this.emit("progress", { + step: "validation_complete", + message: "Database inputs validated", + timestamp: new Date(), }); // TODO: Actual diff logic will be implemented in P1.T005 // For now, return a placeholder result - - this.emit('progress', { - step: 'analysis_complete', - message: 'Schema analysis completed', - timestamp: new Date() + + this.emit("progress", { + step: "analysis_complete", + message: "Schema analysis completed", + timestamp: new Date(), }); // Placeholder diff result @@ -98,8 +100,8 @@ class DiffEngine extends EventEmitter { tablesAnalyzed: 0, functionsAnalyzed: 0, generatedAt: new Date(), - generationTimeMs: 0 - } + generationTimeMs: 0, + }, }; this.lastDiff = diffResult; @@ -107,24 +109,23 @@ class DiffEngine extends EventEmitter { diffResult.metadata.generationTimeMs = this.endTime - this.startTime; // Emit completion - this.emit('complete', { + this.emit("complete", { diff: diffResult, duration: diffResult.metadata.generationTimeMs, - timestamp: this.endTime + timestamp: this.endTime, }); return diffResult; - } catch (error) { this.endTime = new Date(); - + // Emit error event - this.emit('error', { + this.emit("error", { error, message: error.message, stack: error.stack, duration: this.endTime - this.startTime, - timestamp: this.endTime + timestamp: this.endTime, }); throw error; @@ -154,15 +155,15 @@ class DiffEngine extends EventEmitter { * @param {string} suffix - Suffix for the database name (optional) * @returns {Promise} Connection string for the created temp database */ - async createTempDatabase(suffix = 'default') { + async createTempDatabase(suffix = "default") { try { // Generate unique database name const dbName = this.dbUtils.generateTempDatabaseName(suffix); - - this.emit('progress', { - step: 'temp_db_creating', + + this.emit("progress", { + step: "temp_db_creating", message: `Creating temporary database: ${dbName}`, - timestamp: new Date() + timestamp: new Date(), }); // Check if database already exists (shouldn't happen with timestamps, but safety first) @@ -173,40 +174,38 @@ class DiffEngine extends EventEmitter { // Create the database const adminClient = this.dbUtils.createAdminClient(); - + try { await adminClient.connect(); - + // Use identifier to prevent SQL injection await adminClient.query(`CREATE DATABASE "${dbName}"`); - + // Track the temp database for cleanup this.tempDatabases.add(dbName); - + const connectionString = this.dbUtils.getConnectionString(dbName); - - this.emit('progress', { - step: 'temp_db_created', + + this.emit("progress", { + step: "temp_db_created", message: `Temporary database created: ${dbName}`, database: dbName, connectionString, - timestamp: new Date() + timestamp: new Date(), }); return connectionString; - } finally { await adminClient.end(); } - } catch (error) { - this.emit('error', { + this.emit("error", { error, message: `Failed to create temporary database: ${error.message}`, - operation: 'createTempDatabase', - timestamp: new Date() + operation: "createTempDatabase", + timestamp: new Date(), }); - + throw error; } } @@ -218,68 +217,69 @@ class DiffEngine extends EventEmitter { */ async cleanupTempDatabase(dbName) { try { - this.emit('progress', { - step: 'temp_db_cleanup', + this.emit("progress", { + step: "temp_db_cleanup", message: `Cleaning up temporary database: ${dbName}`, database: dbName, - timestamp: new Date() + timestamp: new Date(), }); // Check if database exists before attempting cleanup const exists = await this.dbUtils.databaseExists(dbName); if (!exists) { - this.emit('progress', { - step: 'temp_db_not_found', + this.emit("progress", { + step: "temp_db_not_found", message: `Database ${dbName} does not exist, skipping cleanup`, database: dbName, - timestamp: new Date() + timestamp: new Date(), }); - + // Remove from tracking set regardless this.tempDatabases.delete(dbName); return true; } const adminClient = this.dbUtils.createAdminClient(); - + try { await adminClient.connect(); - + // Terminate all connections to the database first - await adminClient.query(` + await adminClient.query( + ` SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = $1 AND pid <> pg_backend_pid() - `, [dbName]); + `, + [dbName], + ); // Drop the database await adminClient.query(`DROP DATABASE IF EXISTS "${dbName}"`); - + // Remove from tracking set this.tempDatabases.delete(dbName); - - this.emit('progress', { - step: 'temp_db_cleaned', + + this.emit("progress", { + step: "temp_db_cleaned", message: `Temporary database cleaned up: ${dbName}`, database: dbName, - timestamp: new Date() + timestamp: new Date(), }); return true; - } finally { await adminClient.end(); } - } catch (error) { - this.emit('error', { + this.emit("error", { error, message: `Failed to cleanup temporary database ${dbName}: ${error.message}`, - operation: 'cleanupTempDatabase', + operation: "cleanupTempDatabase", database: dbName, - timestamp: new Date() + timestamp: new Date(), }); - + // Don't throw - cleanup should be non-fatal return false; } @@ -292,61 +292,59 @@ class DiffEngine extends EventEmitter { * @returns {Promise} Result of schema application */ async applySchemaToTemp(dbUrl, sqlContent) { - if (!sqlContent || typeof sqlContent !== 'string') { - throw new Error('SQL content must be a non-empty string'); + if (!sqlContent || typeof sqlContent !== "string") { + throw new Error("SQL content must be a non-empty string"); } // Extract database name from URL for logging const dbNameMatch = dbUrl.match(/\/([^/?]+)(?:\?|$)/); - const dbName = dbNameMatch ? dbNameMatch[1] : 'unknown'; + const dbName = dbNameMatch ? dbNameMatch[1] : "unknown"; try { - this.emit('progress', { - step: 'schema_applying', + this.emit("progress", { + step: "schema_applying", message: `Applying schema to database: ${dbName}`, database: dbName, sqlLength: sqlContent.length, - timestamp: new Date() + timestamp: new Date(), }); // Parse database URL to get connection parameters const url = new globalThis.URL(dbUrl); const client = this.dbUtils.createDatabaseClient(url.pathname.slice(1)); - + try { await client.connect(); - + // Apply the SQL schema using our utility method const result = await this.dbUtils.executeSql(client, sqlContent); - - this.emit('progress', { - step: 'schema_applied', + + this.emit("progress", { + step: "schema_applied", message: `Schema applied successfully to: ${dbName}`, database: dbName, statementsExecuted: result.statementCount, - timestamp: new Date() + timestamp: new Date(), }); return { success: true, database: dbName, statementsExecuted: result.statementCount, - results: result.results + results: result.results, }; - } finally { await client.end(); } - } catch (error) { - this.emit('error', { + this.emit("error", { error, message: `Failed to apply schema to ${dbName}: ${error.message}`, - operation: 'applySchemaToTemp', + operation: "applySchemaToTemp", database: dbName, - timestamp: new Date() + timestamp: new Date(), }); - + throw error; } } @@ -360,24 +358,24 @@ class DiffEngine extends EventEmitter { attempted: 0, successful: 0, failed: 0, - databases: [] + databases: [], }; - this.emit('progress', { - step: 'cleanup_all_starting', + this.emit("progress", { + step: "cleanup_all_starting", message: `Cleaning up ${this.tempDatabases.size} temporary databases`, count: this.tempDatabases.size, - timestamp: new Date() + timestamp: new Date(), }); // Convert to array to avoid mutation during iteration const databasesToCleanup = Array.from(this.tempDatabases); - + // Process all cleanup operations in parallel const cleanupPromises = databasesToCleanup.map(async (dbName) => { summary.attempted++; summary.databases.push(dbName); - + try { const success = await this.cleanupTempDatabase(dbName); if (success) { @@ -389,14 +387,14 @@ class DiffEngine extends EventEmitter { summary.failed++; } }); - + await Promise.all(cleanupPromises); - this.emit('progress', { - step: 'cleanup_all_complete', + this.emit("progress", { + step: "cleanup_all_complete", message: `Cleanup complete: ${summary.successful}/${summary.attempted} databases cleaned`, summary, - timestamp: new Date() + timestamp: new Date(), }); return summary; @@ -415,12 +413,16 @@ class DiffEngine extends EventEmitter { * @private */ _validateDatabaseInputs(currentDb, desiredDb) { - if (!currentDb || typeof currentDb !== 'object') { - throw new Error('currentDb parameter must be a valid database connection object'); + if (!currentDb || typeof currentDb !== "object") { + throw new Error( + "currentDb parameter must be a valid database connection object", + ); } - - if (!desiredDb || typeof desiredDb !== 'object') { - throw new Error('desiredDb parameter must be a valid database connection object'); + + if (!desiredDb || typeof desiredDb !== "object") { + throw new Error( + "desiredDb parameter must be a valid database connection object", + ); } } @@ -430,14 +432,14 @@ class DiffEngine extends EventEmitter { */ _sanitizeDbInfo(dbInfo) { if (!dbInfo) return null; - + return { - host: dbInfo.host || 'unknown', - port: dbInfo.port || 'unknown', - database: dbInfo.database || 'unknown', + host: dbInfo.host || "unknown", + port: dbInfo.port || "unknown", + database: dbInfo.database || "unknown", // Never include passwords or sensitive connection info }; } } -module.exports = DiffEngine; \ No newline at end of file +module.exports = DiffEngine; diff --git a/src/lib/MigrationMetadata.js b/src/lib/MigrationMetadata.js index 5155f95..1687fea 100644 --- a/src/lib/MigrationMetadata.js +++ b/src/lib/MigrationMetadata.js @@ -1,5 +1,5 @@ -const fs = require('fs'); -const path = require('path'); +const fs = require("fs"); +const path = require("path"); /** * Migration metadata management class @@ -7,15 +7,15 @@ const path = require('path'); */ class MigrationMetadata { constructor(migrationPath) { - if (!migrationPath || typeof migrationPath !== 'string') { - throw new Error('migrationPath is required and must be a string'); + if (!migrationPath || typeof migrationPath !== "string") { + throw new Error("migrationPath is required and must be a string"); } - + this.migrationPath = migrationPath; - this.metadataFile = path.join(migrationPath, 'metadata.json'); + this.metadataFile = path.join(migrationPath, "metadata.json"); this.schema = this._getSchema(); } - + /** * Read metadata from metadata.json file * @returns {Object} Parsed metadata object @@ -24,14 +24,14 @@ class MigrationMetadata { if (!fs.existsSync(this.metadataFile)) { throw new Error(`Metadata file not found: ${this.metadataFile}`); } - + try { - const content = fs.readFileSync(this.metadataFile, 'utf8'); + const content = fs.readFileSync(this.metadataFile, "utf8"); const metadata = JSON.parse(content); - + // Validate the loaded metadata this.validate(metadata); - + return metadata; } catch (error) { if (error instanceof SyntaxError) { @@ -40,131 +40,152 @@ class MigrationMetadata { throw error; } } - + /** * Write metadata to metadata.json file with validation * @param {Object} metadata - Metadata object to write */ write(metadata) { - if (!metadata || typeof metadata !== 'object') { - throw new Error('Metadata must be an object'); + if (!metadata || typeof metadata !== "object") { + throw new Error("Metadata must be an object"); } - + // Validate before writing this.validate(metadata); - + // Ensure migration directory exists if (!fs.existsSync(this.migrationPath)) { fs.mkdirSync(this.migrationPath, { recursive: true }); } - + try { const content = JSON.stringify(metadata, null, 2); - fs.writeFileSync(this.metadataFile, content, 'utf8'); + fs.writeFileSync(this.metadataFile, content, "utf8"); } catch (error) { throw new Error(`Failed to write metadata file: ${error.message}`); } } - + /** * Validate metadata against schema * @param {Object} metadata - Metadata object to validate */ validate(metadata) { - if (!metadata || typeof metadata !== 'object') { - throw new Error('Metadata must be an object'); + if (!metadata || typeof metadata !== "object") { + throw new Error("Metadata must be an object"); } - + const errors = []; - + // Required fields - if (!metadata.id || typeof metadata.id !== 'string') { - errors.push('id is required and must be a string'); + if (!metadata.id || typeof metadata.id !== "string") { + errors.push("id is required and must be a string"); } - - if (!metadata.name || typeof metadata.name !== 'string') { - errors.push('name is required and must be a string'); + + if (!metadata.name || typeof metadata.name !== "string") { + errors.push("name is required and must be a string"); } - - if (!metadata.generated || typeof metadata.generated !== 'string') { - errors.push('generated is required and must be a string'); + + if (!metadata.generated || typeof metadata.generated !== "string") { + errors.push("generated is required and must be a string"); } else if (!this._isValidISO8601(metadata.generated)) { - errors.push('generated must be a valid ISO 8601 date string'); + errors.push("generated must be a valid ISO 8601 date string"); } - + // Status validation - const validStatuses = ['pending', 'tested', 'promoted']; + const validStatuses = ["pending", "tested", "promoted"]; if (!metadata.status || !validStatuses.includes(metadata.status)) { - errors.push(`status must be one of: ${validStatuses.join(', ')}`); + errors.push(`status must be one of: ${validStatuses.join(", ")}`); } - + // Testing object validation if (metadata.testing) { - if (typeof metadata.testing !== 'object') { - errors.push('testing must be an object'); + if (typeof metadata.testing !== "object") { + errors.push("testing must be an object"); } else { - if (metadata.testing.tested_at !== null && - (!metadata.testing.tested_at || !this._isValidISO8601(metadata.testing.tested_at))) { - errors.push('testing.tested_at must be null or valid ISO 8601 date string'); + if ( + metadata.testing.tested_at !== null && + (!metadata.testing.tested_at || + !this._isValidISO8601(metadata.testing.tested_at)) + ) { + errors.push( + "testing.tested_at must be null or valid ISO 8601 date string", + ); } - - if (metadata.testing.tests_passed !== undefined && - (!Number.isInteger(metadata.testing.tests_passed) || metadata.testing.tests_passed < 0)) { - errors.push('testing.tests_passed must be a non-negative integer'); + + if ( + metadata.testing.tests_passed !== undefined && + (!Number.isInteger(metadata.testing.tests_passed) || + metadata.testing.tests_passed < 0) + ) { + errors.push("testing.tests_passed must be a non-negative integer"); } - - if (metadata.testing.tests_failed !== undefined && - (!Number.isInteger(metadata.testing.tests_failed) || metadata.testing.tests_failed < 0)) { - errors.push('testing.tests_failed must be a non-negative integer'); + + if ( + metadata.testing.tests_failed !== undefined && + (!Number.isInteger(metadata.testing.tests_failed) || + metadata.testing.tests_failed < 0) + ) { + errors.push("testing.tests_failed must be a non-negative integer"); } } } - + // Promotion object validation if (metadata.promotion) { - if (typeof metadata.promotion !== 'object') { - errors.push('promotion must be an object'); + if (typeof metadata.promotion !== "object") { + errors.push("promotion must be an object"); } else { - if (metadata.promotion.promoted_at !== null && - (!metadata.promotion.promoted_at || !this._isValidISO8601(metadata.promotion.promoted_at))) { - errors.push('promotion.promoted_at must be null or valid ISO 8601 date string'); + if ( + metadata.promotion.promoted_at !== null && + (!metadata.promotion.promoted_at || + !this._isValidISO8601(metadata.promotion.promoted_at)) + ) { + errors.push( + "promotion.promoted_at must be null or valid ISO 8601 date string", + ); } - - if (metadata.promotion.promoted_by !== null && - (!metadata.promotion.promoted_by || typeof metadata.promotion.promoted_by !== 'string')) { - errors.push('promotion.promoted_by must be null or a non-empty string'); + + if ( + metadata.promotion.promoted_by !== null && + (!metadata.promotion.promoted_by || + typeof metadata.promotion.promoted_by !== "string") + ) { + errors.push( + "promotion.promoted_by must be null or a non-empty string", + ); } } } - + if (errors.length > 0) { - throw new Error(`Metadata validation failed:\n${errors.join('\n')}`); + throw new Error(`Metadata validation failed:\n${errors.join("\n")}`); } } - + /** * Partially update metadata with new values * @param {Object} updates - Object containing fields to update * @returns {Object} Updated metadata object */ update(updates) { - if (!updates || typeof updates !== 'object') { - throw new Error('Updates must be an object'); + if (!updates || typeof updates !== "object") { + throw new Error("Updates must be an object"); } - + // Read existing metadata const existing = this.read(); - + // Deep merge updates const updated = this._deepMerge(existing, updates); - + // Validate and write updated metadata this.validate(updated); this.write(updated); - + return updated; } - + /** * Create a new metadata object with default values * @param {string} id - Migration ID @@ -172,31 +193,31 @@ class MigrationMetadata { * @returns {Object} New metadata object */ static createDefault(id, name) { - if (!id || typeof id !== 'string') { - throw new Error('id is required and must be a string'); + if (!id || typeof id !== "string") { + throw new Error("id is required and must be a string"); } - - if (!name || typeof name !== 'string') { - throw new Error('name is required and must be a string'); + + if (!name || typeof name !== "string") { + throw new Error("name is required and must be a string"); } - + return { id, name, generated: new Date().toISOString(), - status: 'pending', + status: "pending", testing: { tested_at: null, tests_passed: 0, - tests_failed: 0 + tests_failed: 0, }, promotion: { promoted_at: null, - promoted_by: null - } + promoted_by: null, + }, }; } - + /** * Get the metadata schema definition * @returns {Object} Schema object @@ -204,32 +225,32 @@ class MigrationMetadata { */ _getSchema() { return { - type: 'object', - required: ['id', 'name', 'generated', 'status'], + type: "object", + required: ["id", "name", "generated", "status"], properties: { - id: { type: 'string' }, - name: { type: 'string' }, - generated: { type: 'string', format: 'date-time' }, - status: { enum: ['pending', 'tested', 'promoted'] }, + id: { type: "string" }, + name: { type: "string" }, + generated: { type: "string", format: "date-time" }, + status: { enum: ["pending", "tested", "promoted"] }, testing: { - type: 'object', + type: "object", properties: { - tested_at: { type: ['string', 'null'], format: 'date-time' }, - tests_passed: { type: 'integer', minimum: 0 }, - tests_failed: { type: 'integer', minimum: 0 } - } + tested_at: { type: ["string", "null"], format: "date-time" }, + tests_passed: { type: "integer", minimum: 0 }, + tests_failed: { type: "integer", minimum: 0 }, + }, }, promotion: { - type: 'object', + type: "object", properties: { - promoted_at: { type: ['string', 'null'], format: 'date-time' }, - promoted_by: { type: ['string', 'null'] } - } - } - } + promoted_at: { type: ["string", "null"], format: "date-time" }, + promoted_by: { type: ["string", "null"] }, + }, + }, + }, }; } - + /** * Validate ISO 8601 date string * @param {string} dateString - Date string to validate @@ -238,10 +259,13 @@ class MigrationMetadata { */ _isValidISO8601(dateString) { const date = new Date(dateString); - return date instanceof Date && !isNaN(date.getTime()) && - dateString === date.toISOString(); + return ( + date instanceof Date && + !isNaN(date.getTime()) && + dateString === date.toISOString() + ); } - + /** * Deep merge two objects * @param {Object} target - Target object @@ -251,19 +275,23 @@ class MigrationMetadata { */ _deepMerge(target, source) { const result = { ...target }; - + for (const key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { - if (source[key] && typeof source[key] === 'object' && !Array.isArray(source[key])) { + if ( + source[key] && + typeof source[key] === "object" && + !Array.isArray(source[key]) + ) { result[key] = this._deepMerge(result[key] || {}, source[key]); } else { result[key] = source[key]; } } } - + return result; } } -module.exports = MigrationMetadata; \ No newline at end of file +module.exports = MigrationMetadata; diff --git a/src/lib/OutputConfig.js b/src/lib/OutputConfig.js index 2a77db7..1f33ddb 100644 --- a/src/lib/OutputConfig.js +++ b/src/lib/OutputConfig.js @@ -1,12 +1,12 @@ /** * OutputConfig - Centralized path configuration for data - * + * * A proper class with typed properties for all paths. * Uses dependency injection - no singletons! */ -const path = require('path'); -const fs = require('fs'); +const path = require("path"); +const fs = require("fs"); class OutputConfig { constructor( @@ -17,7 +17,7 @@ class OutputConfig { cliSqlDir = null, cliFunctionsDir = null, cliOutputDir = null, - cliProjectRoot = null + cliProjectRoot = null, ) { // Initialize all path properties as direct class properties this.projectRoot = null; @@ -34,7 +34,7 @@ class OutputConfig { this.tempDir = null; this.logFile = null; this.errorLogFile = null; - + // Build configuration from various sources this._setDefaults(); this._applyAutoDetection(); @@ -50,7 +50,7 @@ class OutputConfig { cliTestsDir, cliSqlDir, cliFunctionsDir, - cliOutputDir + cliOutputDir, ); this._resolveAllPaths(); this._validatePaths(); @@ -58,85 +58,89 @@ class OutputConfig { _setDefaults() { const cwd = process.cwd(); - + this.projectRoot = cwd; - this.supabaseDir = path.join(cwd, 'supabase'); - this.migrationsDir = path.join(cwd, 'supabase', 'migrations'); - this.testsDir = path.join(cwd, 'supabase', 'tests'); - this.sqlDir = path.join(cwd, 'supabase', 'sql'); - this.functionsDir = path.join(cwd, 'supabase', 'functions'); - this.seedDir = path.join(cwd, 'supabase', 'seed'); - this.supabaseConfig = path.join(cwd, 'supabase', 'config.toml'); - this.dataConfig = path.join(cwd, '.datarc.json'); - this.buildDir = path.join(cwd, '.data', 'build'); - this.cacheDir = path.join(cwd, '.data', 'cache'); - this.tempDir = path.join(cwd, '.data', 'temp'); - this.logFile = path.join(cwd, '.data', 'data.log'); - this.errorLogFile = path.join(cwd, '.data', 'error.log'); + this.supabaseDir = path.join(cwd, "supabase"); + this.migrationsDir = path.join(cwd, "supabase", "migrations"); + this.testsDir = path.join(cwd, "supabase", "tests"); + this.sqlDir = path.join(cwd, "supabase", "sql"); + this.functionsDir = path.join(cwd, "supabase", "functions"); + this.seedDir = path.join(cwd, "supabase", "seed"); + this.supabaseConfig = path.join(cwd, "supabase", "config.toml"); + this.dataConfig = path.join(cwd, ".datarc.json"); + this.buildDir = path.join(cwd, ".data", "build"); + this.cacheDir = path.join(cwd, ".data", "cache"); + this.tempDir = path.join(cwd, ".data", "temp"); + this.logFile = path.join(cwd, ".data", "data.log"); + this.errorLogFile = path.join(cwd, ".data", "error.log"); } _applyAutoDetection() { const cwd = process.cwd(); - + // Check if we're inside a supabase directory - if (fs.existsSync(path.join(cwd, 'config.toml'))) { + if (fs.existsSync(path.join(cwd, "config.toml"))) { this.supabaseDir = cwd; this.projectRoot = path.dirname(cwd); this._updateRelativePaths(); return; } - + // Check if we have a supabase subdirectory - if (fs.existsSync(path.join(cwd, 'supabase', 'config.toml'))) { + if (fs.existsSync(path.join(cwd, "supabase", "config.toml"))) { this.projectRoot = cwd; - this.supabaseDir = path.join(cwd, 'supabase'); + this.supabaseDir = path.join(cwd, "supabase"); this._updateRelativePaths(); return; } - + // Search up the tree for a project root let searchDir = cwd; let depth = 0; const maxDepth = 5; - + while (depth < maxDepth) { const parentDir = path.dirname(searchDir); if (parentDir === searchDir) break; - - if (fs.existsSync(path.join(parentDir, 'supabase', 'config.toml'))) { + + if (fs.existsSync(path.join(parentDir, "supabase", "config.toml"))) { this.projectRoot = parentDir; - this.supabaseDir = path.join(parentDir, 'supabase'); + this.supabaseDir = path.join(parentDir, "supabase"); this._updateRelativePaths(); return; } - + searchDir = parentDir; depth++; } } _updateRelativePaths() { - this.migrationsDir = path.join(this.supabaseDir, 'migrations'); - this.testsDir = path.join(this.supabaseDir, 'tests'); - this.sqlDir = path.join(this.supabaseDir, 'sql'); - this.functionsDir = path.join(this.supabaseDir, 'functions'); - this.seedDir = path.join(this.supabaseDir, 'seed'); - this.supabaseConfig = path.join(this.supabaseDir, 'config.toml'); - this.dataConfig = path.join(this.projectRoot, '.datarc.json'); - this.buildDir = path.join(this.projectRoot, '.data', 'build'); - this.cacheDir = path.join(this.projectRoot, '.data', 'cache'); - this.tempDir = path.join(this.projectRoot, '.data', 'temp'); - this.logFile = path.join(this.projectRoot, '.data', 'data.log'); - this.errorLogFile = path.join(this.projectRoot, '.data', 'error.log'); + this.migrationsDir = path.join(this.supabaseDir, "migrations"); + this.testsDir = path.join(this.supabaseDir, "tests"); + this.sqlDir = path.join(this.supabaseDir, "sql"); + this.functionsDir = path.join(this.supabaseDir, "functions"); + this.seedDir = path.join(this.supabaseDir, "seed"); + this.supabaseConfig = path.join(this.supabaseDir, "config.toml"); + this.dataConfig = path.join(this.projectRoot, ".datarc.json"); + this.buildDir = path.join(this.projectRoot, ".data", "build"); + this.cacheDir = path.join(this.projectRoot, ".data", "cache"); + this.tempDir = path.join(this.projectRoot, ".data", "temp"); + this.logFile = path.join(this.projectRoot, ".data", "data.log"); + this.errorLogFile = path.join(this.projectRoot, ".data", "error.log"); } _applyEnvironmentVariables() { - if (process.env.data_PROJECT_ROOT) this.projectRoot = process.env.data_PROJECT_ROOT; - if (process.env.data_SUPABASE_DIR) this.supabaseDir = process.env.data_SUPABASE_DIR; - if (process.env.data_MIGRATIONS_DIR) this.migrationsDir = process.env.data_MIGRATIONS_DIR; + if (process.env.data_PROJECT_ROOT) + this.projectRoot = process.env.data_PROJECT_ROOT; + if (process.env.data_SUPABASE_DIR) + this.supabaseDir = process.env.data_SUPABASE_DIR; + if (process.env.data_MIGRATIONS_DIR) + this.migrationsDir = process.env.data_MIGRATIONS_DIR; if (process.env.data_TESTS_DIR) this.testsDir = process.env.data_TESTS_DIR; if (process.env.data_SQL_DIR) this.sqlDir = process.env.data_SQL_DIR; - if (process.env.data_FUNCTIONS_DIR) this.functionsDir = process.env.data_FUNCTIONS_DIR; + if (process.env.data_FUNCTIONS_DIR) + this.functionsDir = process.env.data_FUNCTIONS_DIR; if (process.env.data_BUILD_DIR) this.buildDir = process.env.data_BUILD_DIR; if (process.env.data_CACHE_DIR) this.cacheDir = process.env.data_CACHE_DIR; if (process.env.data_LOG_FILE) this.logFile = process.env.data_LOG_FILE; @@ -144,23 +148,26 @@ class OutputConfig { _loadConfigFile(configPath) { const configFile = configPath || this.dataConfig; - + if (!fs.existsSync(configFile)) { return; } - + try { - const config = JSON.parse(fs.readFileSync(configFile, 'utf8')); - + const config = JSON.parse(fs.readFileSync(configFile, "utf8")); + if (config.paths) { Object.assign(this, config.paths); } - + if (config.directories) { Object.assign(this, config.directories); } } catch (error) { - console.warn(`Warning: Could not parse config file ${configFile}:`, error.message); + console.warn( + `Warning: Could not parse config file ${configFile}:`, + error.message, + ); } } @@ -171,7 +178,7 @@ class OutputConfig { testsDir, sqlDir, functionsDir, - outputDir + outputDir, ) { if (projectRoot) this.projectRoot = projectRoot; if (supabaseDir) this.supabaseDir = supabaseDir; @@ -184,14 +191,28 @@ class OutputConfig { _resolveAllPaths() { const pathProps = [ - 'projectRoot', 'supabaseDir', 'migrationsDir', 'testsDir', - 'sqlDir', 'functionsDir', 'seedDir', 'supabaseConfig', - 'dataConfig', 'buildDir', 'cacheDir', 'tempDir', - 'logFile', 'errorLogFile' + "projectRoot", + "supabaseDir", + "migrationsDir", + "testsDir", + "sqlDir", + "functionsDir", + "seedDir", + "supabaseConfig", + "dataConfig", + "buildDir", + "cacheDir", + "tempDir", + "logFile", + "errorLogFile", ]; - + for (const prop of pathProps) { - if (this[prop] && typeof this[prop] === 'string' && !path.isAbsolute(this[prop])) { + if ( + this[prop] && + typeof this[prop] === "string" && + !path.isAbsolute(this[prop]) + ) { this[prop] = path.resolve(this[prop]); } } @@ -202,9 +223,9 @@ class OutputConfig { this.buildDir, this.cacheDir, this.tempDir, - this.migrationsDir + this.migrationsDir, ]; - + for (const dir of createIfMissing) { if (dir && !fs.existsSync(dir)) { try { @@ -227,30 +248,36 @@ class OutputConfig { } debug() { - console.log('\nOutputConfig Paths:'); - console.log('═'.repeat(60)); - + console.log("\nOutputConfig Paths:"); + console.log("═".repeat(60)); + const categories = { - 'Core': ['projectRoot', 'supabaseDir'], - 'Supabase': ['migrationsDir', 'testsDir', 'sqlDir', 'functionsDir', 'seedDir'], - 'Config': ['supabaseConfig', 'dataConfig'], - 'Output': ['buildDir', 'cacheDir', 'tempDir'], - 'Logs': ['logFile', 'errorLogFile'] + Core: ["projectRoot", "supabaseDir"], + Supabase: [ + "migrationsDir", + "testsDir", + "sqlDir", + "functionsDir", + "seedDir", + ], + Config: ["supabaseConfig", "dataConfig"], + Output: ["buildDir", "cacheDir", "tempDir"], + Logs: ["logFile", "errorLogFile"], }; - + for (const [category, props] of Object.entries(categories)) { console.log(`\n${category}:`); for (const prop of props) { const value = this[prop]; const exists = value && fs.existsSync(value); - const mark = exists ? '✓' : '✗'; - const display = this.getRelative(prop) || value || '(not set)'; + const mark = exists ? "✓" : "✗"; + const display = this.getRelative(prop) || value || "(not set)"; console.log(` ${mark} ${prop}: ${display}`); } } - - console.log('\n' + '═'.repeat(60) + '\n'); + + console.log("\n" + "═".repeat(60) + "\n"); } } -module.exports = OutputConfig; \ No newline at end of file +module.exports = OutputConfig; diff --git a/src/lib/PathResolver.js b/src/lib/PathResolver.js index 90d886b..90e134b 100644 --- a/src/lib/PathResolver.js +++ b/src/lib/PathResolver.js @@ -1,6 +1,6 @@ -const path = require('path'); -const fs = require('fs'); -const { promisify } = require('util'); +const path = require("path"); +const fs = require("fs"); +const { promisify } = require("util"); /** * A utility class for resolving and ensuring the existence of file and directory paths. @@ -27,7 +27,7 @@ class PathResolver { */ async resolveDirectoryForRead(dirPath) { const absolutePath = path.resolve(dirPath); - + try { await this.fsAccess(absolutePath, this.fs.constants.R_OK); const stats = await this.fsStat(absolutePath); @@ -36,10 +36,12 @@ class PathResolver { } return absolutePath; } catch (error) { - if (error.code === 'ENOENT') { - throw new Error(`Directory does not exist for reading: ${absolutePath}`); + if (error.code === "ENOENT") { + throw new Error( + `Directory does not exist for reading: ${absolutePath}`, + ); } - if (error.code === 'EACCES') { + if (error.code === "EACCES") { throw new Error(`Directory is not readable: ${absolutePath}`); } throw error; @@ -54,19 +56,21 @@ class PathResolver { */ async resolveDirectoryForWrite(dirPath) { const absolutePath = path.resolve(dirPath); - + try { // Try to create the directory (will succeed if it already exists) await this.fsMkdir(absolutePath, { recursive: true }); - + // Verify write access await this.fsAccess(absolutePath, this.fs.constants.W_OK); return absolutePath; } catch (error) { - if (error.code === 'EACCES') { + if (error.code === "EACCES") { throw new Error(`Directory is not writable: ${absolutePath}`); } - throw new Error(`Failed to create/access directory for writing: ${absolutePath} - ${error.message}`); + throw new Error( + `Failed to create/access directory for writing: ${absolutePath} - ${error.message}`, + ); } } @@ -78,7 +82,7 @@ class PathResolver { */ async resolveFileForRead(filePath) { const absolutePath = path.resolve(filePath); - + try { await this.fsAccess(absolutePath, this.fs.constants.R_OK); const stats = await this.fsStat(absolutePath); @@ -87,10 +91,10 @@ class PathResolver { } return absolutePath; } catch (error) { - if (error.code === 'ENOENT') { + if (error.code === "ENOENT") { throw new Error(`File does not exist for reading: ${absolutePath}`); } - if (error.code === 'EACCES') { + if (error.code === "EACCES") { throw new Error(`File is not readable: ${absolutePath}`); } throw error; @@ -106,24 +110,24 @@ class PathResolver { async resolveFileForWrite(filePath) { const absolutePath = path.resolve(filePath); const parentDir = path.dirname(absolutePath); - + // Ensure parent directory exists and is writable await this.resolveDirectoryForWrite(parentDir); - + // Check if file exists and is writable, or if parent dir is writable for new file try { await this.fsAccess(absolutePath, this.fs.constants.W_OK); } catch (error) { - if (error.code === 'ENOENT') { + if (error.code === "ENOENT") { // File doesn't exist, that's OK for writing, just check parent dir // (already checked above) - } else if (error.code === 'EACCES') { + } else if (error.code === "EACCES") { throw new Error(`File exists but is not writable: ${absolutePath}`); } else { throw error; } } - + return absolutePath; } diff --git a/src/lib/SupabaseCommand.js b/src/lib/SupabaseCommand.js index 1e4b6d4..ab4ea6f 100644 --- a/src/lib/SupabaseCommand.js +++ b/src/lib/SupabaseCommand.js @@ -1,12 +1,12 @@ /** * SupabaseCommand - Base class for commands that use Supabase API - * + * * Replaces raw PostgreSQL connections with Supabase client * Provides automatic connection management and cleanup */ -const Command = require('./Command'); -const { createClient } = require('@supabase/supabase-js'); +const Command = require("./Command"); +const { createClient } = require("@supabase/supabase-js"); class SupabaseCommand extends Command { /** @@ -22,27 +22,31 @@ class SupabaseCommand extends Command { serviceRoleKey = null, logger = null, isProd = false, - requiresConfirmation = true + requiresConfirmation = true, ) { super(null, logger, isProd, null); - + // Get Supabase credentials from params or environment - this.supabaseUrl = supabaseUrl || process.env.SUPABASE_URL || 'http://localhost:54321'; - this.serviceRoleKey = serviceRoleKey || process.env.SUPABASE_SERVICE_ROLE_KEY; + this.supabaseUrl = + supabaseUrl || process.env.SUPABASE_URL || "http://localhost:54321"; + this.serviceRoleKey = + serviceRoleKey || process.env.SUPABASE_SERVICE_ROLE_KEY; this.anonKey = process.env.SUPABASE_ANON_KEY; - + // Validate we have necessary credentials if (!this.serviceRoleKey && !this.anonKey) { - throw new Error('Either SUPABASE_SERVICE_ROLE_KEY or SUPABASE_ANON_KEY is required'); + throw new Error( + "Either SUPABASE_SERVICE_ROLE_KEY or SUPABASE_ANON_KEY is required", + ); } - + // Set confirmation requirement this.requiresProductionConfirmation = isProd && requiresConfirmation; - + // Supabase client will be created on demand this.supabase = null; } - + /** * Get Supabase client (lazy initialization) * @param {boolean} useServiceRole - Use service role key (default: true) @@ -51,30 +55,32 @@ class SupabaseCommand extends Command { getSupabase(useServiceRole = true) { if (!this.supabase) { const key = useServiceRole ? this.serviceRoleKey : this.anonKey; - + if (!key) { - throw new Error(`${useServiceRole ? 'Service role' : 'Anon'} key not configured`); + throw new Error( + `${useServiceRole ? "Service role" : "Anon"} key not configured`, + ); } - + this.supabase = createClient(this.supabaseUrl, key, { auth: { persistSession: false, - autoRefreshToken: false + autoRefreshToken: false, }, db: { - schema: 'public' - } + schema: "public", + }, }); - - this.progress('Supabase client initialized'); + + this.progress("Supabase client initialized"); } return this.supabase; } - + /** * Execute arbitrary SQL using Supabase RPC * Requires an exec_sql function in your database: - * + * * CREATE OR REPLACE FUNCTION exec_sql(sql text) * RETURNS json * LANGUAGE plpgsql @@ -94,20 +100,20 @@ class SupabaseCommand extends Command { */ async executeSql(sql) { const supabase = this.getSupabase(true); // Need service role for DDL - - const { data, error } = await supabase.rpc('exec_sql', { sql }); - + + const { data, error } = await supabase.rpc("exec_sql", { sql }); + if (error) { throw new Error(`SQL execution failed: ${error.message}`); } - + if (data && !data.success) { throw new Error(`SQL error: ${data.error}`); } - + return data; } - + /** * Call an RPC function * @param {string} functionName - Name of the RPC function @@ -117,16 +123,16 @@ class SupabaseCommand extends Command { */ async rpc(functionName, params = {}, useServiceRole = false) { const supabase = this.getSupabase(useServiceRole); - + const { data, error } = await supabase.rpc(functionName, params); - + if (error) { throw new Error(`RPC ${functionName} failed: ${error.message}`); } - + return data; } - + /** * Query a table using Supabase client * @param {string} table - Table name @@ -136,7 +142,7 @@ class SupabaseCommand extends Command { const supabase = this.getSupabase(); return supabase.from(table); } - + /** * Clean up Supabase connection */ @@ -145,19 +151,19 @@ class SupabaseCommand extends Command { try { // Sign out if authenticated await this.supabase.auth.signOut(); - + // Remove all realtime channels this.supabase.removeAllChannels(); - - this.progress('Supabase client cleaned up'); + + this.progress("Supabase client cleaned up"); } catch (error) { this.warn(`Cleanup warning: ${error.message}`); } - + this.supabase = null; } } - + /** * Override execute to ensure cleanup */ @@ -168,7 +174,7 @@ class SupabaseCommand extends Command { await this.cleanup(); } } - + /** * Helper to create a temporary schema for testing * @param {string} schemaName - Name for the schema (optional) @@ -176,25 +182,27 @@ class SupabaseCommand extends Command { */ async createTempSchema(schemaName = null) { const name = schemaName || `"@data.temp.${Math.floor(Date.now() / 1000)}"`; - + await this.executeSql(`CREATE SCHEMA IF NOT EXISTS ${name}`); this.success(`Created temporary schema: ${name}`); - + return name; } - + /** * Helper to drop a schema * @param {string} schemaName - Name of schema to drop * @param {boolean} cascade - Use CASCADE (default: true) */ async dropSchema(schemaName, cascade = true) { - const cascadeClause = cascade ? 'CASCADE' : ''; - - await this.executeSql(`DROP SCHEMA IF EXISTS ${schemaName} ${cascadeClause}`); + const cascadeClause = cascade ? "CASCADE" : ""; + + await this.executeSql( + `DROP SCHEMA IF EXISTS ${schemaName} ${cascadeClause}`, + ); this.success(`Dropped schema: ${schemaName}`); } - + /** * Check if we have exec_sql function available * @returns {Promise} True if exec_sql exists @@ -202,16 +210,16 @@ class SupabaseCommand extends Command { async hasExecSqlFunction() { try { const supabase = this.getSupabase(true); - const { error } = await supabase.rpc('exec_sql', { - sql: 'SELECT 1' + const { error } = await supabase.rpc("exec_sql", { + sql: "SELECT 1", }); - + return !error; } catch { return false; } } - + /** * Install exec_sql function if needed * This allows arbitrary SQL execution via RPC @@ -220,8 +228,10 @@ class SupabaseCommand extends Command { if (await this.hasExecSqlFunction()) { return; } - - this.warn('exec_sql function not found. You need to add it to your migrations:'); + + this.warn( + "exec_sql function not found. You need to add it to your migrations:", + ); this.warn(` CREATE OR REPLACE FUNCTION exec_sql(sql text) RETURNS json @@ -239,9 +249,9 @@ EXCEPTION END; $$; `); - - throw new Error('exec_sql function required for DDL operations'); + + throw new Error("exec_sql function required for DDL operations"); } } -module.exports = SupabaseCommand; \ No newline at end of file +module.exports = SupabaseCommand; diff --git a/src/lib/SupabaseTestCommand.js b/src/lib/SupabaseTestCommand.js index 4cf5ea8..59313fa 100644 --- a/src/lib/SupabaseTestCommand.js +++ b/src/lib/SupabaseTestCommand.js @@ -1,14 +1,14 @@ /** * SupabaseTestCommand - Base class for test operations using Supabase API - * + * * Replaces TestCommand's raw PostgreSQL with Supabase client */ -const SupabaseCommand = require('./SupabaseCommand'); -const PathResolver = require('./PathResolver'); -const fs = require('fs').promises; -const path = require('path'); -const { glob } = require('glob'); +const SupabaseCommand = require("./SupabaseCommand"); +const PathResolver = require("./PathResolver"); +const fs = require("fs").promises; +const path = require("path"); +const { glob } = require("glob"); class SupabaseTestCommand extends SupabaseCommand { /** @@ -28,47 +28,50 @@ class SupabaseTestCommand extends SupabaseCommand { outputDir, logger = null, isProd = false, - pathResolver = null + pathResolver = null, ) { // Call parent with Supabase config super(supabaseUrl, serviceRoleKey, logger, isProd, false); - + // Initialize path resolver - this.pathResolver = pathResolver || new PathResolver({ - testsDir: testsDir || path.join(process.cwd(), 'supabase', 'test'), - outputDir: outputDir || path.join(process.cwd(), 'supabase', 'test-output') - }); - + this.pathResolver = + pathResolver || + new PathResolver({ + testsDir: testsDir || path.join(process.cwd(), "supabase", "test"), + outputDir: + outputDir || path.join(process.cwd(), "supabase", "test-output"), + }); + // Store resolved paths - this.testsDir = this.pathResolver.resolve('testsDir'); - this.outputDir = this.pathResolver.resolve('outputDir'); - + this.testsDir = this.pathResolver.resolve("testsDir"); + this.outputDir = this.pathResolver.resolve("outputDir"); + // Test configuration this.testConfig = { timeout: 30000, parallel: true, - maxConcurrency: 5 + maxConcurrency: 5, }; } - + /** * List test files in the tests directory * @param {string} pattern - Glob pattern (default: '**\/*.sql') * @returns {Promise} List of test file paths */ async listTestFiles(pattern) { - if (!pattern) pattern = '**/*.sql'; + if (!pattern) pattern = "**/*.sql"; const searchPattern = path.join(this.testsDir, pattern); const files = await glob(searchPattern); return files.sort(); } - + /** * Discover test functions in the database * @param {string} schema - Schema to search (default: 'test') * @returns {Promise} List of test function names */ - async discoverTestFunctions(schema = 'test') { + async discoverTestFunctions(schema = "test") { try { const sql = ` SELECT routine_name @@ -77,49 +80,49 @@ class SupabaseTestCommand extends SupabaseCommand { AND routine_name LIKE '%test%' ORDER BY routine_name `; - - const result = await this.executeSql(sql.replace('$1', `'${schema}'`)); - + + const result = await this.executeSql(sql.replace("$1", `'${schema}'`)); + if (!result || !result.data) { return []; } - - return result.data.map(row => row.routine_name); + + return result.data.map((row) => row.routine_name); } catch (error) { this.warn(`Failed to discover test functions: ${error.message}`); return []; } } - + /** * Run a test function and get TAP output * @param {string} functionName - Name of test function * @param {string} schema - Schema containing the function (default: 'test') * @returns {Promise} Test results */ - async runTestFunction(functionName, _schema = 'test') { + async runTestFunction(functionName, _schema = "test") { try { // Validate function name to prevent SQL injection if (!/^[a-zA-Z_][a-zA-Z0-9_]*$/.test(functionName)) { throw new Error(`Invalid function name: ${functionName}`); } - + const result = await this.rpc(functionName, {}, true); - + return { success: true, function: functionName, - output: result + output: result, }; } catch (error) { return { success: false, function: functionName, - error: error.message + error: error.message, }; } } - + /** * Compile test files into a single migration * @param {Object} options - Compilation options @@ -127,73 +130,79 @@ class SupabaseTestCommand extends SupabaseCommand { */ async compileTests(_options = {}) { const testFiles = await this.listTestFiles(); - + if (testFiles.length === 0) { - throw new Error('No test files found'); + throw new Error("No test files found"); } - - let compiledSql = '-- Compiled test migration\n\n'; - + + let compiledSql = "-- Compiled test migration\n\n"; + // Add pgTAP extension - compiledSql += 'CREATE EXTENSION IF NOT EXISTS pgtap;\n\n'; - + compiledSql += "CREATE EXTENSION IF NOT EXISTS pgtap;\n\n"; + // Compile each test file const filePromises = testFiles.map(async (filePath) => { - const content = await fs.readFile(filePath, 'utf8'); + const content = await fs.readFile(filePath, "utf8"); const fileName = path.basename(filePath); - + return `-- Source: ${fileName}\n${content}\n\n`; }); - + const fileContents = await Promise.all(filePromises); - compiledSql += fileContents.join(''); - + compiledSql += fileContents.join(""); + // Write to output - const timestamp = new Date().toISOString().replace(/[:.]/g, '').slice(0, 15); - const outputFile = path.join(this.outputDir, `${timestamp}_compiled_tests.sql`); - + const timestamp = new Date() + .toISOString() + .replace(/[:.]/g, "") + .slice(0, 15); + const outputFile = path.join( + this.outputDir, + `${timestamp}_compiled_tests.sql`, + ); + await fs.mkdir(this.outputDir, { recursive: true }); await fs.writeFile(outputFile, compiledSql); - + return { outputFile, filesCompiled: testFiles.length, - size: compiledSql.length + size: compiledSql.length, }; } - + /** * Create a temporary test schema * @returns {Promise} Schema name */ async createTestSchema() { const schemaName = `"@data.tests.${Math.floor(Date.now() / 1000)}"`; - + await this.executeSql(`CREATE SCHEMA IF NOT EXISTS ${schemaName}`); this.success(`Created test schema: ${schemaName}`); - + return schemaName; } - + /** * Apply migration to test schema * @param {string} schemaName - Target schema * @param {string} migrationFile - Path to migration file */ async applyMigrationToSchema(schemaName, migrationFile) { - const migrationSql = await fs.readFile(migrationFile, 'utf8'); - + const migrationSql = await fs.readFile(migrationFile, "utf8"); + // Wrap in schema context const wrappedSql = ` SET search_path TO ${schemaName}, public; ${migrationSql} SET search_path TO public; `; - + await this.executeSql(wrappedSql); this.success(`Applied migration to ${schemaName}`); } - + /** * Clean up test schema * @param {string} schemaName - Schema to drop @@ -201,7 +210,7 @@ class SupabaseTestCommand extends SupabaseCommand { async cleanupTestSchema(schemaName) { await this.dropSchema(schemaName, true); } - + /** * Parse TAP output * @param {Array|string} output - TAP output @@ -211,31 +220,31 @@ class SupabaseTestCommand extends SupabaseCommand { if (!output) { return { total: 0, passed: 0, failed: 0, skipped: 0 }; } - - const lines = Array.isArray(output) ? output : output.split('\n'); + + const lines = Array.isArray(output) ? output : output.split("\n"); let passed = 0; let failed = 0; let skipped = 0; - + for (const line of lines) { - const str = typeof line === 'object' ? JSON.stringify(line) : line; - - if (str.includes('ok ')) { + const str = typeof line === "object" ? JSON.stringify(line) : line; + + if (str.includes("ok ")) { passed++; - } else if (str.includes('not ok ')) { + } else if (str.includes("not ok ")) { failed++; - } else if (str.includes('# SKIP')) { + } else if (str.includes("# SKIP")) { skipped++; } } - + return { total: passed + failed + skipped, passed, failed, - skipped + skipped, }; } } -module.exports = SupabaseTestCommand; \ No newline at end of file +module.exports = SupabaseTestCommand; diff --git a/src/lib/TestCommand.js b/src/lib/TestCommand.js index 28ff0da..ea3becd 100644 --- a/src/lib/TestCommand.js +++ b/src/lib/TestCommand.js @@ -1,9 +1,9 @@ -const DatabaseCommand = require('./DatabaseCommand'); -const PathResolver = require('./PathResolver'); +const DatabaseCommand = require("./DatabaseCommand"); +const PathResolver = require("./PathResolver"); /** * TestCommand - Base class for test operations - * + * * Commands that compile and run tests need both database access * and file system operations. */ @@ -25,22 +25,22 @@ class TestCommand extends DatabaseCommand { outputDir, logger = null, isProd = false, - pathResolver = null + pathResolver = null, ) { // Call parent with database config super(databaseUrl, serviceRoleKey, null, logger, isProd); - + // Store test paths this.testsDir = testsDir; this.outputDir = outputDir; - + // Path resolver for ensuring directories exist this.pathResolver = pathResolver || new PathResolver(); - + // Test operations typically don't need production confirmation this.requiresProductionConfirmation = false; } - + /** * Ensure tests directory exists and is readable * @returns {Promise} Resolved tests directory path @@ -48,7 +48,7 @@ class TestCommand extends DatabaseCommand { async getTestsDir() { return await this.pathResolver.resolveDirectoryForRead(this.testsDir); } - + /** * Ensure output directory exists and is writable * @returns {Promise} Resolved output directory path @@ -56,77 +56,79 @@ class TestCommand extends DatabaseCommand { async getOutputDir() { return await this.pathResolver.resolveDirectoryForWrite(this.outputDir); } - + /** * Get a specific test file path * @param {string} filename - The filename relative to tests dir * @returns {Promise} Resolved file path */ async getTestFile(filename) { - const path = require('path'); + const path = require("path"); const dir = await this.getTestsDir(); return await this.pathResolver.resolveFileForRead(path.join(dir, filename)); } - + /** * Get a specific output file path * @param {string} filename - The filename relative to output dir * @returns {Promise} Resolved file path */ async getOutputFile(filename) { - const path = require('path'); + const path = require("path"); const dir = await this.getOutputDir(); - return await this.pathResolver.resolveFileForWrite(path.join(dir, filename)); + return await this.pathResolver.resolveFileForWrite( + path.join(dir, filename), + ); } - + /** * List test files * @param {string} pattern - Glob pattern (optional) * @returns {Promise} List of test file paths */ - async listTestFiles(pattern = '*.sql') { - const fs = require('fs').promises; - const path = require('path'); + async listTestFiles(pattern = "*.sql") { + const fs = require("fs").promises; + const path = require("path"); const dir = await this.getTestsDir(); - + try { const files = await fs.readdir(dir); return files - .filter(file => { - if (pattern === '*.sql') { - return file.endsWith('.sql'); + .filter((file) => { + if (pattern === "*.sql") { + return file.endsWith(".sql"); } // For now, just support simple *.ext patterns - if (pattern.startsWith('*.')) { + if (pattern.startsWith("*.")) { const ext = pattern.slice(1); // Remove the * return file.endsWith(ext); } return file.includes(pattern); }) - .map(file => path.join(dir, file)); + .map((file) => path.join(dir, file)); } catch (error) { throw new Error(`Failed to list test files in ${dir}: ${error.message}`); } } - + /** * Compile test files into a single migration * @returns {Promise} Compiled SQL content */ async compileTests() { - const fs = require('fs').promises; + const fs = require("fs").promises; const testFiles = await this.listTestFiles(); - + const readPromises = testFiles.map(async (file) => { - const content = await fs.readFile(file, 'utf8'); + const content = await fs.readFile(file, "utf8"); return `-- Test file: ${file}\n${content}`; }); - + const contents = await Promise.all(readPromises); - - return contents.join('\n\n'); + + return contents.join("\n\n"); } - + /** * Run a test query and parse results * @param {string} sql - The test SQL to execute @@ -136,7 +138,7 @@ class TestCommand extends DatabaseCommand { const result = await this.query(sql); return this.parseTestResults(result); } - + /** * Parse pgTAP test results * @param {Object} queryResult - Raw query result @@ -149,20 +151,20 @@ class TestCommand extends DatabaseCommand { passed: 0, failed: 0, skipped: 0, - tests: [] + tests: [], }; - + if (queryResult.rows) { - queryResult.rows.forEach(row => { + queryResult.rows.forEach((row) => { // Parse TAP output format const tapLine = row[Object.keys(row)[0]]; - if (typeof tapLine === 'string') { - if (tapLine.startsWith('ok')) { + if (typeof tapLine === "string") { + if (tapLine.startsWith("ok")) { results.passed++; - results.tests.push({ status: 'passed', message: tapLine }); - } else if (tapLine.startsWith('not ok')) { + results.tests.push({ status: "passed", message: tapLine }); + } else if (tapLine.startsWith("not ok")) { results.failed++; - results.tests.push({ status: 'failed', message: tapLine }); + results.tests.push({ status: "failed", message: tapLine }); } else if (tapLine.match(/^1\.\.(\d+)/)) { const match = tapLine.match(/^1\.\.(\d+)/); results.total = parseInt(match[1]); @@ -170,10 +172,10 @@ class TestCommand extends DatabaseCommand { } }); } - + return results; } - + /** * Write test results to file * @param {Object} results - Test results @@ -181,22 +183,22 @@ class TestCommand extends DatabaseCommand { * @param {string} format - Output format (json, junit, etc.) * @returns {Promise} */ - async writeResults(results, filename, format = 'json') { - const fs = require('fs').promises; + async writeResults(results, filename, format = "json") { + const fs = require("fs").promises; const filePath = await this.getOutputFile(filename); - + let content; - if (format === 'json') { + if (format === "json") { content = JSON.stringify(results, null, 2); - } else if (format === 'junit') { + } else if (format === "junit") { content = this.formatAsJUnit(results); } else { content = JSON.stringify(results); } - - await fs.writeFile(filePath, content, 'utf8'); + + await fs.writeFile(filePath, content, "utf8"); } - + /** * Format results as JUnit XML * @param {Object} results - Test results @@ -205,41 +207,43 @@ class TestCommand extends DatabaseCommand { formatAsJUnit(results) { const xml = []; xml.push(''); - xml.push(``); - + xml.push( + ``, + ); + results.tests.forEach((test, i) => { xml.push(` `); - if (test.status === 'failed') { + if (test.status === "failed") { xml.push(` `); } - xml.push(' '); + xml.push(" "); }); - - xml.push(''); - return xml.join('\n'); + + xml.push(""); + return xml.join("\n"); } - + /** * Emit test progress events */ emitTestProgress(stage, details = {}) { - this.emit('test:progress', { + this.emit("test:progress", { stage, timestamp: new Date().toISOString(), testsDir: this.testsDir, - ...details + ...details, }); } - + /** * Emit test results */ emitTestResults(results) { - this.emit('test:results', { + this.emit("test:results", { ...results, - timestamp: new Date().toISOString() + timestamp: new Date().toISOString(), }); } } -module.exports = TestCommand; \ No newline at end of file +module.exports = TestCommand; diff --git a/src/lib/config.js b/src/lib/config.js index 84be898..3019531 100644 --- a/src/lib/config.js +++ b/src/lib/config.js @@ -2,10 +2,13 @@ * Configuration management for data CLI */ -const fs = require('fs').promises; -const path = require('path'); -const os = require('os'); -const { safeParsedataConfig, mergeConfigs } = require('./schemas/dataConfigSchema'); +const fs = require("fs").promises; +const path = require("path"); +const os = require("os"); +const { + safeParsedataConfig, + mergeConfigs, +} = require("./schemas/dataConfigSchema"); /** * Configuration class for data CLI @@ -26,37 +29,44 @@ class Config { const config = { environments: { local: { - db: this.envVars.DATABASE_URL || this.envVars.data_DATABASE_URL || 'postgresql://postgres:postgres@127.0.0.1:54332/postgres', - supabase_url: this.envVars.SUPABASE_URL || this.envVars.data_SUPABASE_URL, - supabase_anon_key: this.envVars.SUPABASE_ANON_KEY || this.envVars.data_ANON_KEY, - supabase_service_role_key: this.envVars.SUPABASE_SERVICE_ROLE_KEY || this.envVars.data_SERVICE_ROLE_KEY - } + db: + this.envVars.DATABASE_URL || + this.envVars.data_DATABASE_URL || + "postgresql://postgres:postgres@127.0.0.1:54332/postgres", + supabase_url: + this.envVars.SUPABASE_URL || this.envVars.data_SUPABASE_URL, + supabase_anon_key: + this.envVars.SUPABASE_ANON_KEY || this.envVars.data_ANON_KEY, + supabase_service_role_key: + this.envVars.SUPABASE_SERVICE_ROLE_KEY || + this.envVars.data_SERVICE_ROLE_KEY, + }, }, paths: { - sql_dir: this.envVars.data_SQL_DIR || './sql', - tests_dir: this.envVars.data_TESTS_DIR || './tests', - migrations_dir: this.envVars.data_MIGRATIONS_DIR || './migrations', - functions_dir: this.envVars.data_FUNCTIONS_DIR || './functions', - schemas_dir: this.envVars.data_SCHEMAS_DIR || './schemas' + sql_dir: this.envVars.data_SQL_DIR || "./sql", + tests_dir: this.envVars.data_TESTS_DIR || "./tests", + migrations_dir: this.envVars.data_MIGRATIONS_DIR || "./migrations", + functions_dir: this.envVars.data_FUNCTIONS_DIR || "./functions", + schemas_dir: this.envVars.data_SCHEMAS_DIR || "./schemas", }, test: { minimum_coverage: 80, test_timeout: 300, - output_formats: ['console', 'json'] + output_formats: ["console", "json"], }, safety: { require_prod_flag: true, - require_confirmation: true - } + require_confirmation: true, + }, }; // Add prod environment if variables are present if (this.envVars.PROD_DATABASE_URL || this.envVars.PROD_SUPABASE_URL) { config.environments.prod = { - db: this.envVars.PROD_DATABASE_URL || '', + db: this.envVars.PROD_DATABASE_URL || "", supabase_url: this.envVars.PROD_SUPABASE_URL, supabase_anon_key: this.envVars.PROD_SUPABASE_ANON_KEY, - supabase_service_role_key: this.envVars.PROD_SUPABASE_SERVICE_ROLE_KEY + supabase_service_role_key: this.envVars.PROD_SUPABASE_SERVICE_ROLE_KEY, }; } @@ -70,22 +80,22 @@ class Config { // Determine config file paths to check const paths = [ configPath, - path.join(process.cwd(), '.datarc.json'), - path.join(process.cwd(), '.datarc'), - path.join(os.homedir(), '.datarc.json'), - path.join(os.homedir(), '.datarc') + path.join(process.cwd(), ".datarc.json"), + path.join(process.cwd(), ".datarc"), + path.join(os.homedir(), ".datarc.json"), + path.join(os.homedir(), ".datarc"), ].filter(Boolean); - + // Try to load config from each path const configPromises = paths.map(async (configFile) => { try { - const content = await fs.readFile(configFile, 'utf8'); + const content = await fs.readFile(configFile, "utf8"); const rawConfig = JSON.parse(content); - + // Create new Config with defaults const config = new Config(null, envVars); const defaults = config.getDefaultConfig(); - + // Validate and merge with Zod const parseResult = safeParsedataConfig(rawConfig); if (parseResult.success) { @@ -94,27 +104,27 @@ class Config { } else { // Log validation errors but use what we can console.warn(`Configuration validation warnings in ${configFile}:`); - parseResult.error.errors.forEach(err => { - console.warn(` - ${err.path.join('.')}: ${err.message}`); + parseResult.error.errors.forEach((err) => { + console.warn(` - ${err.path.join(".")}: ${err.message}`); }); // Fall back to manual merge for partial configs config.data = config.merge(defaults, rawConfig); } - + return config; } catch { // Continue to next path return null; } }); - + const configs = await Promise.all(configPromises); - const validConfig = configs.find(config => config !== null); - + const validConfig = configs.find((config) => config !== null); + if (validConfig) { return validConfig; } - + // Return default config if no file found return new Config(null, envVars); } @@ -124,15 +134,19 @@ class Config { */ merge(defaults, overrides) { const result = { ...defaults }; - + for (const key in overrides) { - if (typeof overrides[key] === 'object' && !Array.isArray(overrides[key]) && overrides[key] !== null) { + if ( + typeof overrides[key] === "object" && + !Array.isArray(overrides[key]) && + overrides[key] !== null + ) { result[key] = this.merge(defaults[key] || {}, overrides[key]); } else { result[key] = overrides[key]; } } - + return result; } @@ -147,39 +161,41 @@ class Config { * Save configuration to file */ async save(configPath = null) { - const filePath = configPath || path.join(process.cwd(), '.datarc.json'); - + const filePath = configPath || path.join(process.cwd(), ".datarc.json"); + // Validate before saving const parseResult = safeParsedataConfig(this.data); if (!parseResult.success) { - throw new Error(`Cannot save invalid configuration: ${parseResult.error.message}`); + throw new Error( + `Cannot save invalid configuration: ${parseResult.error.message}`, + ); } - + // Add schema reference for IDE support const configWithSchema = { - $schema: './datarc.schema.json', - ...parseResult.data + $schema: "./datarc.schema.json", + ...parseResult.data, }; - + const content = JSON.stringify(configWithSchema, null, 2); - await fs.writeFile(filePath, content, 'utf8'); + await fs.writeFile(filePath, content, "utf8"); } /** * Get a configuration value by path */ get(path) { - const keys = path.split('.'); + const keys = path.split("."); let value = this.data; - + for (const key of keys) { - if (value && typeof value === 'object') { + if (value && typeof value === "object") { value = value[key]; } else { return undefined; } } - + return value; } @@ -187,17 +203,17 @@ class Config { * Set a configuration value by path */ set(path, value) { - const keys = path.split('.'); + const keys = path.split("."); const lastKey = keys.pop(); let target = this.data; - + for (const key of keys) { - if (!target[key] || typeof target[key] !== 'object') { + if (!target[key] || typeof target[key] !== "object") { target[key] = {}; } target = target[key]; } - + target[lastKey] = value; } @@ -205,7 +221,7 @@ class Config { * Get test configuration */ getTestConfig() { - return this.get('test') || {}; + return this.get("test") || {}; } /** @@ -220,4 +236,4 @@ class Config { } } -module.exports = Config; \ No newline at end of file +module.exports = Config; diff --git a/src/lib/db-utils.js b/src/lib/db-utils.js index 9152e01..a4d18f4 100644 --- a/src/lib/db-utils.js +++ b/src/lib/db-utils.js @@ -1,4 +1,4 @@ -const { Client } = require('pg'); +const { Client } = require("pg"); /** * Database utility functions for temp database management @@ -6,11 +6,11 @@ const { Client } = require('pg'); class DatabaseUtils { constructor(baseConfig = {}) { this.baseConfig = { - host: 'localhost', + host: "localhost", port: 54332, - user: 'postgres', - password: 'postgres', - ...baseConfig + user: "postgres", + password: "postgres", + ...baseConfig, }; } @@ -21,7 +21,7 @@ class DatabaseUtils { createAdminClient() { return new Client({ ...this.baseConfig, - database: 'postgres' // Connect to default postgres database + database: "postgres", // Connect to default postgres database }); } @@ -33,7 +33,7 @@ class DatabaseUtils { createDatabaseClient(databaseName) { return new Client({ ...this.baseConfig, - database: databaseName + database: databaseName, }); } @@ -42,9 +42,9 @@ class DatabaseUtils { * @param {string} suffix - Optional suffix for the database name * @returns {string} Unique database name */ - generateTempDatabaseName(suffix = 'default') { + generateTempDatabaseName(suffix = "default") { const timestamp = Date.now(); - const cleanSuffix = suffix.replace(/[^a-zA-Z0-9_]/g, '_'); + const cleanSuffix = suffix.replace(/[^a-zA-Z0-9_]/g, "_"); return `temp_migra_${timestamp}_${cleanSuffix}`; } @@ -55,15 +55,15 @@ class DatabaseUtils { */ async databaseExists(databaseName) { const client = this.createAdminClient(); - + try { await client.connect(); - + const result = await client.query( - 'SELECT 1 FROM pg_database WHERE datname = $1', - [databaseName] + "SELECT 1 FROM pg_database WHERE datname = $1", + [databaseName], ); - + return result.rows.length > 0; } finally { await client.end(); @@ -91,16 +91,16 @@ class DatabaseUtils { const results = []; const queryPromises = statements - .filter(statement => statement.trim()) - .map(statement => client.query(statement)); - + .filter((statement) => statement.trim()) + .map((statement) => client.query(statement)); + const queryResults = await Promise.all(queryPromises); results.push(...queryResults); return { success: true, results, - statementCount: queryResults.length + statementCount: queryResults.length, }; } @@ -115,10 +115,10 @@ class DatabaseUtils { // More sophisticated parsing could be added if needed return sql .split(/;\s*\n/) - .map(stmt => stmt.trim()) - .filter(stmt => stmt.length > 0) - .map(stmt => stmt.endsWith(';') ? stmt : stmt + ';'); + .map((stmt) => stmt.trim()) + .filter((stmt) => stmt.length > 0) + .map((stmt) => (stmt.endsWith(";") ? stmt : stmt + ";")); } } -module.exports = DatabaseUtils; \ No newline at end of file +module.exports = DatabaseUtils; diff --git a/src/lib/events/CommandEvents.js b/src/lib/events/CommandEvents.js index 03deee9..e3da7f3 100644 --- a/src/lib/events/CommandEvents.js +++ b/src/lib/events/CommandEvents.js @@ -1,10 +1,10 @@ /** * Command Event System for D.A.T.A. CLI - * + * * This module provides a comprehensive event class hierarchy for the event-driven - * architecture used throughout the D.A.T.A. (Database Automation, Testing, and + * architecture used throughout the D.A.T.A. (Database Automation, Testing, and * Alignment) CLI tool. All events support instanceof checks for runtime type safety. - * + * * @fileoverview Event classes for robust event-driven command architecture * @author Supa Base 12 Engineering Team * @version 1.0.0 @@ -26,17 +26,17 @@ /** * Base class for all command events in the D.A.T.A. system - * + * * Provides the foundational structure for all events emitted by commands. * All events include a timestamp and support structured data through the * details property. - * + * * @class */ class CommandEvent { /** * Create a new command event - * + * * @param {string} type - Event type identifier (e.g., 'progress', 'error') * @param {string} message - Human-readable message describing the event * @param {EventDetails} [details={}] - Additional structured data @@ -46,17 +46,17 @@ class CommandEvent { * @type {string} Event type identifier */ this.type = type; - + /** * @type {string} Human-readable message */ this.message = message; - + /** * @type {EventDetails} Additional structured event data */ this.details = details; - + /** * @type {Date} Timestamp when event was created */ @@ -65,7 +65,7 @@ class CommandEvent { /** * Convert event to JSON-serializable object - * + * * @returns {Object} JSON representation of the event */ toJSON() { @@ -73,13 +73,13 @@ class CommandEvent { type: this.type, message: this.message, details: this.details, - timestamp: this.timestamp.toISOString() + timestamp: this.timestamp.toISOString(), }; } /** * Get a string representation of the event - * + * * @returns {string} String representation */ toString() { @@ -89,37 +89,40 @@ class CommandEvent { /** * Progress event for long-running operations - * + * * Used to indicate progress during operations that may take significant time, * such as database migrations, file processing, or compilation tasks. - * + * * @extends CommandEvent */ class ProgressEvent extends CommandEvent { /** * Create a new progress event - * + * * @param {string} message - Progress message describing current operation * @param {number|null} [percentage=null] - Completion percentage (0-100), null if unknown * @param {EventDetails} [details={}] - Additional progress details */ constructor(message, percentage = null, details = {}) { - super('progress', message, details); - + super("progress", message, details); + /** * @type {number|null} Completion percentage (0-100) or null if indeterminate */ this.percentage = percentage; - + // Validate percentage if provided - if (percentage !== null && (typeof percentage !== 'number' || percentage < 0 || percentage > 100)) { - throw new Error('Percentage must be a number between 0 and 100, or null'); + if ( + percentage !== null && + (typeof percentage !== "number" || percentage < 0 || percentage > 100) + ) { + throw new Error("Percentage must be a number between 0 and 100, or null"); } } /** * Create a progress event with percentage - * + * * @param {string} message - Progress message * @param {number} completed - Number of items completed * @param {number} total - Total number of items @@ -131,13 +134,13 @@ class ProgressEvent extends CommandEvent { return new ProgressEvent(message, percentage, { ...details, completed, - total + total, }); } /** * Create an indeterminate progress event - * + * * @param {string} message - Progress message * @param {EventDetails} [details={}] - Additional details * @returns {ProgressEvent} New indeterminate progress event @@ -149,29 +152,29 @@ class ProgressEvent extends CommandEvent { /** * Error event for operation failures - * + * * Represents errors, failures, or exceptions that occur during command execution. * Includes the original error object and optional error categorization. - * + * * @extends CommandEvent */ class ErrorEvent extends CommandEvent { /** * Create a new error event - * + * * @param {string} message - Error message describing what went wrong * @param {Error} error - The actual error object that was thrown * @param {string|null} [code=null] - Error code for categorization * @param {EventDetails} [details={}] - Additional error context */ constructor(message, error, code = null, details = {}) { - super('error', message, { ...details, error, code }); - + super("error", message, { ...details, error, code }); + /** * @type {Error} The original error object */ this.error = error; - + /** * @type {string|null} Error code for categorization */ @@ -180,56 +183,56 @@ class ErrorEvent extends CommandEvent { /** * Create an error event from an exception - * + * * @param {Error} error - The error object * @param {string} [context='Operation failed'] - Context message * @param {EventDetails} [details={}] - Additional details * @returns {ErrorEvent} New error event */ - static fromError(error, context = 'Operation failed', details = {}) { + static fromError(error, context = "Operation failed", details = {}) { return new ErrorEvent( `${context}: ${error.message}`, error, error.code || null, - details + details, ); } /** * Get the full error stack trace - * + * * @returns {string} Stack trace string */ getStackTrace() { - return this.error?.stack || 'No stack trace available'; + return this.error?.stack || "No stack trace available"; } } /** * Directory operation event for filesystem operations - * + * * Represents events related to directory processing, creation, scanning, * or other filesystem operations on directories. - * + * * @extends CommandEvent */ class DirectoryEvent extends CommandEvent { /** * Create a new directory event - * + * * @param {string} message - Message describing the directory operation * @param {string} directoryPath - Path to the directory being processed * @param {string} [operation='process'] - Type of operation (process, create, scan, etc.) * @param {EventDetails} [details={}] - Additional directory details */ - constructor(message, directoryPath, operation = 'process', details = {}) { - super('directory', message, { ...details, directoryPath, operation }); - + constructor(message, directoryPath, operation = "process", details = {}) { + super("directory", message, { ...details, directoryPath, operation }); + /** * @type {string} Path to the directory */ this.directoryPath = directoryPath; - + /** * @type {string} Type of directory operation */ @@ -238,7 +241,7 @@ class DirectoryEvent extends CommandEvent { /** * Create a directory scanning event - * + * * @param {string} directoryPath - Directory being scanned * @param {number} [fileCount=0] - Number of files found * @param {EventDetails} [details={}] - Additional details @@ -248,14 +251,14 @@ class DirectoryEvent extends CommandEvent { return new DirectoryEvent( `Scanning directory: ${directoryPath}`, directoryPath, - 'scan', - { ...details, fileCount } + "scan", + { ...details, fileCount }, ); } /** * Create a directory creation event - * + * * @param {string} directoryPath - Directory being created * @param {EventDetails} [details={}] - Additional details * @returns {DirectoryEvent} New directory creation event @@ -264,31 +267,31 @@ class DirectoryEvent extends CommandEvent { return new DirectoryEvent( `Creating directory: ${directoryPath}`, directoryPath, - 'create', - details + "create", + details, ); } } /** * Success event for successful operations - * + * * Indicates successful completion of operations, commands, or tasks. * Often the final event emitted by a command. - * + * * @extends CommandEvent */ class SuccessEvent extends CommandEvent { /** * Create a new success event - * + * * @param {string} message - Success message describing what was accomplished * @param {EventDetails} [details={}] - Additional success details * @param {number|null} [duration=null] - Operation duration in milliseconds */ constructor(message, details = {}, duration = null) { - super('success', message, { ...details, duration }); - + super("success", message, { ...details, duration }); + /** * @type {number|null} Duration of the operation in milliseconds */ @@ -297,7 +300,7 @@ class SuccessEvent extends CommandEvent { /** * Create a success event with timing information - * + * * @param {string} message - Success message * @param {Date} startTime - When the operation started * @param {EventDetails} [details={}] - Additional details @@ -310,40 +313,40 @@ class SuccessEvent extends CommandEvent { /** * Get formatted duration string - * + * * @returns {string|null} Formatted duration or null if no duration set */ getFormattedDuration() { if (this.duration === null) return null; - + if (this.duration < 1000) { return `${this.duration}ms`; } - - const seconds = Math.round(this.duration / 1000 * 100) / 100; + + const seconds = Math.round((this.duration / 1000) * 100) / 100; return `${seconds}s`; } } /** * Warning event for non-fatal issues - * + * * Represents warnings, non-critical issues, or situations that require * attention but don't prevent operation completion. - * + * * @extends CommandEvent */ class WarningEvent extends CommandEvent { /** * Create a new warning event - * + * * @param {string} message - Warning message * @param {EventDetails} [details={}] - Additional warning details * @param {string|null} [code=null] - Warning code for categorization */ constructor(message, details = {}, code = null) { - super('warning', message, { ...details, code }); - + super("warning", message, { ...details, code }); + /** * @type {string|null} Warning code for categorization */ @@ -353,26 +356,26 @@ class WarningEvent extends CommandEvent { /** * Start event for operation initiation - * + * * Indicates the beginning of a command or operation. Often includes * configuration or context information. - * + * * @extends CommandEvent */ class StartEvent extends CommandEvent { /** * Create a new start event - * + * * @param {string} message - Start message describing what's beginning * @param {EventDetails} [details={}] - Additional start details */ constructor(message, details = {}) { - super('start', message, details); + super("start", message, details); } /** * Create a start event for production operations - * + * * @param {string} message - Start message * @param {EventDetails} [details={}] - Additional details * @returns {StartEvent} New production start event @@ -384,23 +387,23 @@ class StartEvent extends CommandEvent { /** * Status event for system state information - * + * * Represents status checks, health reports, or system state information * that doesn't fit into other event categories. - * + * * @extends CommandEvent */ class StatusEvent extends CommandEvent { /** * Create a new status event - * + * * @param {string} message - Status message * @param {string} status - Status value (healthy, degraded, error, etc.) * @param {EventDetails} [details={}] - Additional status details */ constructor(message, status, details = {}) { - super('status', message, { ...details, status }); - + super("status", message, { ...details, status }); + /** * @type {string} Current status value */ @@ -409,34 +412,34 @@ class StatusEvent extends CommandEvent { /** * Check if status indicates a healthy state - * + * * @returns {boolean} True if status is healthy */ isHealthy() { - const healthyStatuses = ['healthy', 'ok', 'success', 'active', 'running']; + const healthyStatuses = ["healthy", "ok", "success", "active", "running"]; return healthyStatuses.includes(this.status.toLowerCase()); } } /** * Complete event for successful operation completion - * + * * Indicates that an operation has completed successfully with optional result data. * Similar to SuccessEvent but specifically for completion of multi-step operations. - * + * * @extends CommandEvent */ class CompleteEvent extends CommandEvent { /** * Create a new complete event - * + * * @param {string} message - Completion message * @param {*} [result=null] - Operation result data * @param {EventDetails} [details={}] - Additional completion details */ constructor(message, result = null, details = {}) { - super('complete', message, { ...details, result }); - + super("complete", message, { ...details, result }); + /** * @type {*} The result of the completed operation */ @@ -446,22 +449,22 @@ class CompleteEvent extends CommandEvent { /** * Cancelled event for operations that were cancelled - * + * * Indicates that an operation was cancelled by the user or system before completion. - * + * * @extends CommandEvent */ class CancelledEvent extends CommandEvent { /** * Create a new cancelled event - * + * * @param {string} [message='Operation cancelled'] - Cancellation message * @param {string|null} [reason=null] - Reason for cancellation * @param {EventDetails} [details={}] - Additional cancellation details */ - constructor(message = 'Operation cancelled', reason = null, details = {}) { - super('cancelled', message, { ...details, reason }); - + constructor(message = "Operation cancelled", reason = null, details = {}) { + super("cancelled", message, { ...details, reason }); + /** * @type {string|null} Reason for the cancellation */ @@ -486,11 +489,11 @@ class BuildProgressEvent extends CommandEvent { * @param {EventDetails} [details={}] - Additional details */ constructor(stage, inputDir, outputDir, details = {}) { - super('build:progress', `Build stage: ${stage}`, { + super("build:progress", `Build stage: ${stage}`, { ...details, stage, inputDir, - outputDir + outputDir, }); this.stage = stage; this.inputDir = inputDir; @@ -504,11 +507,11 @@ class BuildProgressEvent extends CommandEvent { toEventData() { return { timestamp: this.timestamp.toISOString(), - eventType: 'BuildProgressEvent', + eventType: "BuildProgressEvent", stage: this.stage, inputDir: this.inputDir, outputDir: this.outputDir, - ...this.details + ...this.details, }; } } @@ -526,11 +529,11 @@ class BuildStartEvent extends CommandEvent { * @param {EventDetails} [details={}] - Additional details */ constructor(type, inputDir, outputDir, details = {}) { - super('build:start', `Starting ${type} build`, { + super("build:start", `Starting ${type} build`, { ...details, type, inputDir, - outputDir + outputDir, }); this.type = type; this.inputDir = inputDir; @@ -544,11 +547,11 @@ class BuildStartEvent extends CommandEvent { toEventData() { return { timestamp: this.timestamp.toISOString(), - eventType: 'BuildStartEvent', + eventType: "BuildStartEvent", type: this.type, inputDir: this.inputDir, outputDir: this.outputDir, - ...this.details + ...this.details, }; } } @@ -564,9 +567,9 @@ class BuildCompleteEvent extends CommandEvent { * @param {EventDetails} [details={}] - Additional details */ constructor(result, details = {}) { - super('build:complete', 'Build completed successfully', { + super("build:complete", "Build completed successfully", { ...details, - result + result, }); this.result = result; } @@ -578,9 +581,9 @@ class BuildCompleteEvent extends CommandEvent { toEventData() { return { timestamp: this.timestamp.toISOString(), - eventType: 'BuildCompleteEvent', + eventType: "BuildCompleteEvent", result: this.result, - ...this.details + ...this.details, }; } } @@ -596,13 +599,13 @@ class BuildFailedEvent extends CommandEvent { * @param {EventDetails} [details={}] - Additional details */ constructor(error, details = {}) { - super('build:failed', `Build failed: ${error.message}`, { + super("build:failed", `Build failed: ${error.message}`, { ...details, error: { message: error.message, stack: error.stack, - name: error.name - } + name: error.name, + }, }); this.buildError = error; } @@ -614,24 +617,24 @@ class BuildFailedEvent extends CommandEvent { toEventData() { return { timestamp: this.timestamp.toISOString(), - eventType: 'BuildFailedEvent', + eventType: "BuildFailedEvent", error: { message: this.buildError.message, stack: this.buildError.stack, - name: this.buildError.name + name: this.buildError.name, }, - ...this.details + ...this.details, }; } } /** * Utility function to validate event types at runtime - * + * * Provides runtime type checking for events, ensuring they are instances * of the expected event class. This is the runtime equivalent of TypeScript * type checking, using JavaScript's native instanceof operator. - * + * * @param {*} event - The event to validate * @param {Function} expectedClass - The expected event class constructor * @throws {TypeError} If event is not an instance of expectedClass @@ -642,7 +645,7 @@ function validateCommandEvent(event, expectedClass) { const actualType = event?.constructor?.name || typeof event; const expectedType = expectedClass.name; throw new TypeError( - `Invalid event type: expected ${expectedType}, got ${actualType}` + `Invalid event type: expected ${expectedType}, got ${actualType}`, ); } return true; @@ -650,10 +653,10 @@ function validateCommandEvent(event, expectedClass) { /** * Factory function to create typed events with validation - * + * * Creates events using a type string, providing a convenient way to * instantiate events while maintaining type safety through the class system. - * + * * @param {string} type - Event type string * @param {...*} args - Arguments to pass to the event constructor * @returns {CommandEvent} New event instance of the appropriate type @@ -670,15 +673,17 @@ function createCommandEvent(type, ...args) { status: StatusEvent, complete: CompleteEvent, cancelled: CancelledEvent, - 'build:progress': BuildProgressEvent, - 'build:start': BuildStartEvent, - 'build:complete': BuildCompleteEvent, - 'build:failed': BuildFailedEvent + "build:progress": BuildProgressEvent, + "build:start": BuildStartEvent, + "build:complete": BuildCompleteEvent, + "build:failed": BuildFailedEvent, }; const EventClass = eventClasses[type]; if (!EventClass) { - throw new Error(`Unknown event type: ${type}. Available types: ${Object.keys(eventClasses).join(', ')}`); + throw new Error( + `Unknown event type: ${type}. Available types: ${Object.keys(eventClasses).join(", ")}`, + ); } return new EventClass(...args); @@ -688,7 +693,7 @@ function createCommandEvent(type, ...args) { module.exports = { // Base class CommandEvent, - + // Core event classes ProgressEvent, ErrorEvent, @@ -699,14 +704,14 @@ module.exports = { StatusEvent, CompleteEvent, CancelledEvent, - + // Build-specific event classes BuildProgressEvent, BuildStartEvent, BuildCompleteEvent, BuildFailedEvent, - + // Utilities validateCommandEvent, - createCommandEvent -}; \ No newline at end of file + createCommandEvent, +}; diff --git a/src/lib/migration/ASTMigrationEngine.js b/src/lib/migration/ASTMigrationEngine.js index 2ad414b..ed69eb8 100644 --- a/src/lib/migration/ASTMigrationEngine.js +++ b/src/lib/migration/ASTMigrationEngine.js @@ -1,14 +1,14 @@ /** * AST-based Migration Engine for D.A.T.A. - * + * * Pure JavaScript PostgreSQL migration generator using AST parsing * No Python dependencies, no temporary databases - * + * * @module ASTMigrationEngine */ -const { parse } = require('pgsql-parser'); -const { EventEmitter } = require('events'); +const { parse } = require("pgsql-parser"); +const { EventEmitter } = require("events"); /** * Represents a single migration operation @@ -35,29 +35,29 @@ const { EventEmitter } = require('events'); class ASTMigrationEngine extends EventEmitter { constructor() { super(); - + // Destructive operation patterns this.DESTRUCTIVE_PATTERNS = [ - 'DROP TABLE', - 'DROP COLUMN', - 'DROP CONSTRAINT', - 'DELETE FROM', - 'TRUNCATE', - 'DROP INDEX', - 'DROP FUNCTION', - 'DROP TRIGGER', - 'DROP POLICY', - 'DROP TYPE', - 'ALTER COLUMN.*DROP DEFAULT', - 'ALTER COLUMN.*DROP NOT NULL' + "DROP TABLE", + "DROP COLUMN", + "DROP CONSTRAINT", + "DELETE FROM", + "TRUNCATE", + "DROP INDEX", + "DROP FUNCTION", + "DROP TRIGGER", + "DROP POLICY", + "DROP TYPE", + "ALTER COLUMN.*DROP DEFAULT", + "ALTER COLUMN.*DROP NOT NULL", ]; - + // Supabase-specific object patterns this.SUPABASE_PATTERNS = { storage: /storage\.(buckets|objects)/i, auth: /auth\.(users|refresh_tokens|audit_log_entries)/i, realtime: /realtime\.(subscription)/i, - rls: /CREATE POLICY|ALTER POLICY|DROP POLICY/i + rls: /CREATE POLICY|ALTER POLICY|DROP POLICY/i, }; } @@ -68,61 +68,75 @@ class ASTMigrationEngine extends EventEmitter { * @returns {Promise} Array of migration operations */ async generateMigration(fromSQL, toSQL) { - this.emit('start', { message: 'Parsing SQL into AST...' }); - + this.emit("start", { message: "Parsing SQL into AST..." }); + try { // Parse both SQL states into AST const fromSchema = await this.parseSchema(fromSQL); const toSchema = await this.parseSchema(toSQL); - - this.emit('progress', { - message: 'Analyzing schema differences...', + + this.emit("progress", { + message: "Analyzing schema differences...", fromObjects: this.countObjects(fromSchema), - toObjects: this.countObjects(toSchema) + toObjects: this.countObjects(toSchema), }); - + // Generate migrations for each object type const migrations = []; - + // Tables (most complex - includes columns, constraints) - migrations.push(...await this.diffTables(fromSchema.tables, toSchema.tables)); - + migrations.push( + ...(await this.diffTables(fromSchema.tables, toSchema.tables)), + ); + // Functions and Triggers - migrations.push(...await this.diffFunctions(fromSchema.functions, toSchema.functions)); - migrations.push(...await this.diffTriggers(fromSchema.triggers, toSchema.triggers)); - + migrations.push( + ...(await this.diffFunctions(fromSchema.functions, toSchema.functions)), + ); + migrations.push( + ...(await this.diffTriggers(fromSchema.triggers, toSchema.triggers)), + ); + // RLS Policies (Supabase critical) - migrations.push(...await this.diffPolicies(fromSchema.policies, toSchema.policies)); - + migrations.push( + ...(await this.diffPolicies(fromSchema.policies, toSchema.policies)), + ); + // Enums and Custom Types - migrations.push(...await this.diffEnums(fromSchema.enums, toSchema.enums)); - + migrations.push( + ...(await this.diffEnums(fromSchema.enums, toSchema.enums)), + ); + // Indexes - migrations.push(...await this.diffIndexes(fromSchema.indexes, toSchema.indexes)); - + migrations.push( + ...(await this.diffIndexes(fromSchema.indexes, toSchema.indexes)), + ); + // Views - migrations.push(...await this.diffViews(fromSchema.views, toSchema.views)); - + migrations.push( + ...(await this.diffViews(fromSchema.views, toSchema.views)), + ); + // Detect destructive operations - const destructive = migrations.filter(m => m.type === 'DESTRUCTIVE'); + const destructive = migrations.filter((m) => m.type === "DESTRUCTIVE"); if (destructive.length > 0) { - this.emit('warning', { + this.emit("warning", { message: `${destructive.length} destructive operations detected`, - operations: destructive + operations: destructive, }); } - - this.emit('complete', { - message: 'Migration generation complete', + + this.emit("complete", { + message: "Migration generation complete", totalOperations: migrations.length, - destructiveCount: destructive.length + destructiveCount: destructive.length, }); - + return migrations; } catch (error) { - this.emit('error', { - message: 'Failed to generate migration', - error + this.emit("error", { + message: "Failed to generate migration", + error, }); throw error; } @@ -143,62 +157,72 @@ class ASTMigrationEngine extends EventEmitter { indexes: new Map(), views: new Map(), extensions: new Map(), - grants: new Map() + grants: new Map(), }; - + try { const ast = parse(sql); - + for (const statement of ast) { const stmt = statement.RawStmt?.stmt; if (!stmt) continue; - - switch (stmt.CreateStmt ? 'CreateStmt' : - stmt.AlterTableStmt ? 'AlterTableStmt' : - stmt.CreateFunctionStmt ? 'CreateFunctionStmt' : - stmt.CreateTrigStmt ? 'CreateTrigStmt' : - stmt.CreatePolicyStmt ? 'CreatePolicyStmt' : - stmt.CreateEnumStmt ? 'CreateEnumStmt' : - stmt.IndexStmt ? 'IndexStmt' : - stmt.ViewStmt ? 'ViewStmt' : null) { - - case 'CreateStmt': + + switch ( + stmt.CreateStmt + ? "CreateStmt" + : stmt.AlterTableStmt + ? "AlterTableStmt" + : stmt.CreateFunctionStmt + ? "CreateFunctionStmt" + : stmt.CreateTrigStmt + ? "CreateTrigStmt" + : stmt.CreatePolicyStmt + ? "CreatePolicyStmt" + : stmt.CreateEnumStmt + ? "CreateEnumStmt" + : stmt.IndexStmt + ? "IndexStmt" + : stmt.ViewStmt + ? "ViewStmt" + : null + ) { + case "CreateStmt": this.parseTable(stmt.CreateStmt, schema.tables); break; - - case 'CreateFunctionStmt': + + case "CreateFunctionStmt": this.parseFunction(stmt.CreateFunctionStmt, schema.functions); break; - - case 'CreateTrigStmt': + + case "CreateTrigStmt": this.parseTrigger(stmt.CreateTrigStmt, schema.triggers); break; - - case 'CreatePolicyStmt': + + case "CreatePolicyStmt": this.parsePolicy(stmt.CreatePolicyStmt, schema.policies); break; - - case 'CreateEnumStmt': + + case "CreateEnumStmt": this.parseEnum(stmt.CreateEnumStmt, schema.enums); break; - - case 'IndexStmt': + + case "IndexStmt": this.parseIndex(stmt.IndexStmt, schema.indexes); break; - - case 'ViewStmt': + + case "ViewStmt": this.parseView(stmt.ViewStmt, schema.views); break; } } } catch (error) { // Some SQL might not parse perfectly, log but continue - this.emit('warning', { - message: 'Some SQL statements could not be parsed', - error: error.message + this.emit("warning", { + message: "Some SQL statements could not be parsed", + error: error.message, }); } - + return schema; } @@ -207,31 +231,31 @@ class ASTMigrationEngine extends EventEmitter { */ async diffTables(fromTables, toTables) { const migrations = []; - + // New tables for (const [name, table] of toTables) { if (!fromTables.has(name)) { migrations.push({ - type: 'SAFE', + type: "SAFE", sql: this.reconstructCreateTable(table), - description: `Create new table: ${name}` + description: `Create new table: ${name}`, }); } } - + // Dropped tables (DESTRUCTIVE!) for (const [name, table] of fromTables) { if (!toTables.has(name)) { migrations.push({ - type: 'DESTRUCTIVE', + type: "DESTRUCTIVE", sql: `DROP TABLE IF EXISTS ${name} CASCADE`, description: `Drop table: ${name}`, warning: `THIS WILL DELETE ALL DATA IN TABLE ${name}`, - requiresConfirmation: true + requiresConfirmation: true, }); } } - + // Modified tables (column changes) for (const [name, toTable] of toTables) { if (fromTables.has(name)) { @@ -239,7 +263,7 @@ class ASTMigrationEngine extends EventEmitter { migrations.push(...this.diffTableColumns(name, fromTable, toTable)); } } - + return migrations; } @@ -248,85 +272,87 @@ class ASTMigrationEngine extends EventEmitter { */ diffTableColumns(tableName, fromTable, toTable) { const migrations = []; - const fromColumns = new Map(fromTable.columns?.map(c => [c.name, c]) || []); - const toColumns = new Map(toTable.columns?.map(c => [c.name, c]) || []); - + const fromColumns = new Map( + fromTable.columns?.map((c) => [c.name, c]) || [], + ); + const toColumns = new Map(toTable.columns?.map((c) => [c.name, c]) || []); + // Added columns (SAFE) for (const [colName, col] of toColumns) { if (!fromColumns.has(colName)) { migrations.push({ - type: 'SAFE', + type: "SAFE", sql: `ALTER TABLE ${tableName} ADD COLUMN ${this.reconstructColumn(col)}`, - description: `Add column ${tableName}.${colName}` + description: `Add column ${tableName}.${colName}`, }); } } - + // Dropped columns (DESTRUCTIVE!) for (const [colName, col] of fromColumns) { if (!toColumns.has(colName)) { migrations.push({ - type: 'DESTRUCTIVE', + type: "DESTRUCTIVE", sql: `ALTER TABLE ${tableName} DROP COLUMN ${colName}`, description: `Drop column ${tableName}.${colName}`, warning: `THIS WILL DELETE ALL DATA IN COLUMN ${tableName}.${colName}`, - requiresConfirmation: true + requiresConfirmation: true, }); } } - + // Modified columns (check type, nullable, default) for (const [colName, toCol] of toColumns) { if (fromColumns.has(colName)) { const fromCol = fromColumns.get(colName); - + // Type change (potentially DESTRUCTIVE) if (this.columnTypesDiffer(fromCol, toCol)) { migrations.push({ - type: 'WARNING', + type: "WARNING", sql: `ALTER TABLE ${tableName} ALTER COLUMN ${colName} TYPE ${toCol.type}`, description: `Change type of ${tableName}.${colName}`, - warning: 'Type change may result in data loss or errors' + warning: "Type change may result in data loss or errors", }); } - + // Nullable change if (fromCol.nullable !== toCol.nullable) { if (toCol.nullable) { migrations.push({ - type: 'SAFE', + type: "SAFE", sql: `ALTER TABLE ${tableName} ALTER COLUMN ${colName} DROP NOT NULL`, - description: `Make ${tableName}.${colName} nullable` + description: `Make ${tableName}.${colName} nullable`, }); } else { migrations.push({ - type: 'WARNING', + type: "WARNING", sql: `ALTER TABLE ${tableName} ALTER COLUMN ${colName} SET NOT NULL`, description: `Make ${tableName}.${colName} required`, - warning: 'Will fail if column contains NULL values' + warning: "Will fail if column contains NULL values", }); } } - + // Default value change if (this.defaultsDiffer(fromCol.default, toCol.default)) { if (toCol.default) { migrations.push({ - type: 'SAFE', + type: "SAFE", sql: `ALTER TABLE ${tableName} ALTER COLUMN ${colName} SET DEFAULT ${toCol.default}`, - description: `Set default for ${tableName}.${colName}` + description: `Set default for ${tableName}.${colName}`, }); } else { migrations.push({ - type: 'SAFE', + type: "SAFE", sql: `ALTER TABLE ${tableName} ALTER COLUMN ${colName} DROP DEFAULT`, - description: `Remove default from ${tableName}.${colName}` + description: `Remove default from ${tableName}.${colName}`, }); } } } } - + return migrations; } @@ -335,45 +361,45 @@ class ASTMigrationEngine extends EventEmitter { */ async diffPolicies(fromPolicies, toPolicies) { const migrations = []; - + // New policies for (const [key, policy] of toPolicies) { if (!fromPolicies.has(key)) { migrations.push({ - type: 'SAFE', + type: "SAFE", sql: this.reconstructPolicy(policy), - description: `Create RLS policy: ${policy.name} on ${policy.table}` + description: `Create RLS policy: ${policy.name} on ${policy.table}`, }); } } - + // Dropped policies for (const [key, policy] of fromPolicies) { if (!toPolicies.has(key)) { migrations.push({ - type: 'WARNING', + type: "WARNING", sql: `DROP POLICY IF EXISTS ${policy.name} ON ${policy.table}`, description: `Drop RLS policy: ${policy.name}`, - warning: 'Removing security policy - ensure this is intentional' + warning: "Removing security policy - ensure this is intentional", }); } } - + // Modified policies (drop and recreate) for (const [key, toPolicy] of toPolicies) { if (fromPolicies.has(key)) { const fromPolicy = fromPolicies.get(key); if (this.policiesDiffer(fromPolicy, toPolicy)) { migrations.push({ - type: 'WARNING', + type: "WARNING", sql: `DROP POLICY IF EXISTS ${fromPolicy.name} ON ${fromPolicy.table};\n${this.reconstructPolicy(toPolicy)}`, description: `Recreate RLS policy: ${toPolicy.name}`, - warning: 'Policy will be briefly removed during migration' + warning: "Policy will be briefly removed during migration", }); } } } - + return migrations; } @@ -382,96 +408,98 @@ class ASTMigrationEngine extends EventEmitter { */ async diffFunctions(fromFunctions, toFunctions) { const migrations = []; - + for (const [signature, toFunc] of toFunctions) { if (!fromFunctions.has(signature)) { // New function migrations.push({ - type: 'SAFE', + type: "SAFE", sql: this.reconstructFunction(toFunc), - description: `Create function: ${signature}` + description: `Create function: ${signature}`, }); } else { // Check if function body changed const fromFunc = fromFunctions.get(signature); if (fromFunc.body !== toFunc.body) { migrations.push({ - type: 'SAFE', + type: "SAFE", sql: `CREATE OR REPLACE FUNCTION ${this.reconstructFunction(toFunc)}`, - description: `Update function: ${signature}` + description: `Update function: ${signature}`, }); } } } - + // Dropped functions for (const [signature, func] of fromFunctions) { if (!toFunctions.has(signature)) { migrations.push({ - type: 'WARNING', + type: "WARNING", sql: `DROP FUNCTION IF EXISTS ${signature} CASCADE`, description: `Drop function: ${signature}`, - warning: 'May break dependent objects' + warning: "May break dependent objects", }); } } - + return migrations; } /** * Helper methods for reconstruction and comparison */ - + reconstructColumn(col) { let sql = `${col.name} ${col.type}`; if (col.default) sql += ` DEFAULT ${col.default}`; - if (!col.nullable) sql += ' NOT NULL'; - if (col.unique) sql += ' UNIQUE'; + if (!col.nullable) sql += " NOT NULL"; + if (col.unique) sql += " UNIQUE"; return sql; } - + reconstructCreateTable(table) { - const columns = table.columns.map(c => this.reconstructColumn(c)); - return `CREATE TABLE ${table.name} (\n ${columns.join(',\n ')}\n)`; + const columns = table.columns.map((c) => this.reconstructColumn(c)); + return `CREATE TABLE ${table.name} (\n ${columns.join(",\n ")}\n)`; } - + reconstructPolicy(policy) { return `CREATE POLICY ${policy.name} ON ${policy.table} - FOR ${policy.command || 'ALL'} - TO ${policy.role || 'public'} - ${policy.permissive ? 'AS PERMISSIVE' : 'AS RESTRICTIVE'} - ${policy.using ? `USING (${policy.using})` : ''} - ${policy.check ? `WITH CHECK (${policy.check})` : ''}`; + FOR ${policy.command || "ALL"} + TO ${policy.role || "public"} + ${policy.permissive ? "AS PERMISSIVE" : "AS RESTRICTIVE"} + ${policy.using ? `USING (${policy.using})` : ""} + ${policy.check ? `WITH CHECK (${policy.check})` : ""}`; } - + reconstructFunction(func) { - return `${func.name}(${func.arguments || ''}) + return `${func.name}(${func.arguments || ""}) RETURNS ${func.returnType} LANGUAGE ${func.language} - ${func.volatility || ''} + ${func.volatility || ""} AS $$${func.body}$$`; } - + columnTypesDiffer(col1, col2) { // Normalize types for comparison - const normalize = (type) => type?.toLowerCase().replace(/\s+/g, ''); + const normalize = (type) => type?.toLowerCase().replace(/\s+/g, ""); return normalize(col1.type) !== normalize(col2.type); } - + defaultsDiffer(def1, def2) { // Handle various default formats - const normalize = (def) => def?.toString().replace(/['"]/g, '').trim(); + const normalize = (def) => def?.toString().replace(/['"]/g, "").trim(); return normalize(def1) !== normalize(def2); } - + policiesDiffer(pol1, pol2) { - return pol1.using !== pol2.using || - pol1.check !== pol2.check || - pol1.command !== pol2.command || - pol1.role !== pol2.role; + return ( + pol1.using !== pol2.using || + pol1.check !== pol2.check || + pol1.command !== pol2.command || + pol1.role !== pol2.role + ); } - + countObjects(schema) { return { tables: schema.tables.size, @@ -480,45 +508,49 @@ class ASTMigrationEngine extends EventEmitter { triggers: schema.triggers.size, indexes: schema.indexes.size, enums: schema.enums.size, - views: schema.views.size + views: schema.views.size, }; } /** * Parse individual object types from AST */ - + parseTable(stmt, tables) { const tableName = stmt.relation?.relname; if (!tableName) return; - - const columns = stmt.tableElts?.map(elt => { - if (elt.ColumnDef) { - return { - name: elt.ColumnDef.colname, - type: this.extractType(elt.ColumnDef.typeName), - nullable: !elt.ColumnDef.is_not_null, - default: elt.ColumnDef.raw_default, - constraints: elt.ColumnDef.constraints - }; - } - }).filter(Boolean) || []; - + + const columns = + stmt.tableElts + ?.map((elt) => { + if (elt.ColumnDef) { + return { + name: elt.ColumnDef.colname, + type: this.extractType(elt.ColumnDef.typeName), + nullable: !elt.ColumnDef.is_not_null, + default: elt.ColumnDef.raw_default, + constraints: elt.ColumnDef.constraints, + }; + } + }) + .filter(Boolean) || []; + tables.set(tableName, { name: tableName, columns, - raw: stmt + raw: stmt, }); } - + parseFunction(stmt, functions) { const funcName = stmt.funcname?.[0]?.String?.str; if (!funcName) return; - + // Build signature - const args = stmt.parameters?.map(p => `${p.name} ${p.type}`).join(', ') || ''; + const args = + stmt.parameters?.map((p) => `${p.name} ${p.type}`).join(", ") || ""; const signature = `${funcName}(${args})`; - + functions.set(signature, { name: funcName, signature, @@ -526,15 +558,15 @@ class ASTMigrationEngine extends EventEmitter { returnType: this.extractType(stmt.returnType), language: stmt.language, body: stmt.as?.[0] || stmt.sql_body, - raw: stmt + raw: stmt, }); } - + parsePolicy(stmt, policies) { const policyName = stmt.policy_name; const tableName = stmt.table?.relname; if (!policyName || !tableName) return; - + const key = `${tableName}.${policyName}`; policies.set(key, { name: policyName, @@ -544,224 +576,229 @@ class ASTMigrationEngine extends EventEmitter { permissive: stmt.permissive, using: stmt.qual, check: stmt.with_check, - raw: stmt + raw: stmt, }); } - + parseEnum(stmt, enums) { const typeName = stmt.typeName?.[0]?.String?.str; if (!typeName) return; - - const values = stmt.vals?.map(v => v.String?.str).filter(Boolean) || []; - + + const values = stmt.vals?.map((v) => v.String?.str).filter(Boolean) || []; + enums.set(typeName, { name: typeName, values, - raw: stmt + raw: stmt, }); } - + parseIndex(stmt, indexes) { const indexName = stmt.idxname; const tableName = stmt.relation?.relname; if (!indexName) return; - + indexes.set(indexName, { name: indexName, table: tableName, unique: stmt.unique, columns: stmt.indexParams, - raw: stmt + raw: stmt, }); } - + parseTrigger(stmt, triggers) { const triggerName = stmt.trigname; const tableName = stmt.relation?.relname; if (!triggerName) return; - + triggers.set(triggerName, { name: triggerName, table: tableName, timing: stmt.timing, events: stmt.events, function: stmt.funcname, - raw: stmt + raw: stmt, }); } - + parseView(stmt, views) { const viewName = stmt.view?.relname; if (!viewName) return; - + views.set(viewName, { name: viewName, query: stmt.query, - raw: stmt + raw: stmt, }); } - + extractType(typeName) { - if (!typeName) return 'unknown'; + if (!typeName) return "unknown"; if (typeName.String) return typeName.String.str; - if (typeName.names) return typeName.names.map(n => n.String?.str).join('.'); - return 'unknown'; + if (typeName.names) + return typeName.names.map((n) => n.String?.str).join("."); + return "unknown"; } /** * Additional diff methods */ - + async diffEnums(fromEnums, toEnums) { const migrations = []; - + // New enums for (const [name, enumDef] of toEnums) { if (!fromEnums.has(name)) { migrations.push({ - type: 'SAFE', - sql: `CREATE TYPE ${name} AS ENUM (${enumDef.values.map(v => `'${v}'`).join(', ')})`, - description: `Create enum type: ${name}` + type: "SAFE", + sql: `CREATE TYPE ${name} AS ENUM (${enumDef.values.map((v) => `'${v}'`).join(", ")})`, + description: `Create enum type: ${name}`, }); } } - + // Modified enums (can only ADD values, not remove) for (const [name, toEnum] of toEnums) { if (fromEnums.has(name)) { const fromEnum = fromEnums.get(name); - const newValues = toEnum.values.filter(v => !fromEnum.values.includes(v)); - + const newValues = toEnum.values.filter( + (v) => !fromEnum.values.includes(v), + ); + for (const value of newValues) { migrations.push({ - type: 'SAFE', + type: "SAFE", sql: `ALTER TYPE ${name} ADD VALUE '${value}'`, - description: `Add value '${value}' to enum ${name}` + description: `Add value '${value}' to enum ${name}`, }); } - + // Check for removed values (PROBLEM!) - const removedValues = fromEnum.values.filter(v => !toEnum.values.includes(v)); + const removedValues = fromEnum.values.filter( + (v) => !toEnum.values.includes(v), + ); if (removedValues.length > 0) { migrations.push({ - type: 'DESTRUCTIVE', - sql: `-- MANUAL INTERVENTION REQUIRED: Cannot remove enum values ${removedValues.join(', ')} from ${name}`, + type: "DESTRUCTIVE", + sql: `-- MANUAL INTERVENTION REQUIRED: Cannot remove enum values ${removedValues.join(", ")} from ${name}`, description: `Cannot remove enum values from ${name}`, warning: `PostgreSQL does not support removing enum values. Manual data migration required.`, - requiresConfirmation: true + requiresConfirmation: true, }); } } } - + return migrations; } - + async diffIndexes(fromIndexes, toIndexes) { const migrations = []; - + // New indexes for (const [name, index] of toIndexes) { if (!fromIndexes.has(name)) { migrations.push({ - type: 'SAFE', - sql: `CREATE ${index.unique ? 'UNIQUE ' : ''}INDEX ${name} ON ${index.table} (${index.columns})`, - description: `Create index: ${name}` + type: "SAFE", + sql: `CREATE ${index.unique ? "UNIQUE " : ""}INDEX ${name} ON ${index.table} (${index.columns})`, + description: `Create index: ${name}`, }); } } - + // Dropped indexes for (const [name, index] of fromIndexes) { if (!toIndexes.has(name)) { migrations.push({ - type: 'WARNING', + type: "WARNING", sql: `DROP INDEX IF EXISTS ${name}`, description: `Drop index: ${name}`, - warning: 'May impact query performance' + warning: "May impact query performance", }); } } - + return migrations; } - + async diffTriggers(fromTriggers, toTriggers) { const migrations = []; - + // For triggers, we'll drop and recreate if changed for (const [name, toTrigger] of toTriggers) { if (!fromTriggers.has(name)) { migrations.push({ - type: 'SAFE', + type: "SAFE", sql: this.reconstructTrigger(toTrigger), - description: `Create trigger: ${name}` + description: `Create trigger: ${name}`, }); } else { const fromTrigger = fromTriggers.get(name); if (this.triggersDiffer(fromTrigger, toTrigger)) { migrations.push({ - type: 'WARNING', + type: "WARNING", sql: `DROP TRIGGER IF EXISTS ${name} ON ${fromTrigger.table};\n${this.reconstructTrigger(toTrigger)}`, description: `Recreate trigger: ${name}`, - warning: 'Trigger will be briefly removed during migration' + warning: "Trigger will be briefly removed during migration", }); } } } - + // Dropped triggers for (const [name, trigger] of fromTriggers) { if (!toTriggers.has(name)) { migrations.push({ - type: 'WARNING', + type: "WARNING", sql: `DROP TRIGGER IF EXISTS ${name} ON ${trigger.table}`, - description: `Drop trigger: ${name}` + description: `Drop trigger: ${name}`, }); } } - + return migrations; } - + async diffViews(fromViews, toViews) { const migrations = []; - + // Views are typically dropped and recreated for (const [name, toView] of toViews) { if (!fromViews.has(name)) { migrations.push({ - type: 'SAFE', + type: "SAFE", sql: `CREATE VIEW ${name} AS ${toView.query}`, - description: `Create view: ${name}` + description: `Create view: ${name}`, }); } else { const fromView = fromViews.get(name); if (fromView.query !== toView.query) { migrations.push({ - type: 'SAFE', + type: "SAFE", sql: `CREATE OR REPLACE VIEW ${name} AS ${toView.query}`, - description: `Update view: ${name}` + description: `Update view: ${name}`, }); } } } - + // Dropped views for (const [name, view] of fromViews) { if (!toViews.has(name)) { migrations.push({ - type: 'WARNING', + type: "WARNING", sql: `DROP VIEW IF EXISTS ${name} CASCADE`, description: `Drop view: ${name}`, - warning: 'May break dependent objects' + warning: "May break dependent objects", }); } } - + return migrations; } - + reconstructTrigger(trigger) { return `CREATE TRIGGER ${trigger.name} ${trigger.timing} ${trigger.events} @@ -769,12 +806,14 @@ class ASTMigrationEngine extends EventEmitter { FOR EACH ROW EXECUTE FUNCTION ${trigger.function}()`; } - + triggersDiffer(t1, t2) { - return t1.timing !== t2.timing || - t1.events !== t2.events || - t1.function !== t2.function; + return ( + t1.timing !== t2.timing || + t1.events !== t2.events || + t1.function !== t2.function + ); } } -module.exports = ASTMigrationEngine; \ No newline at end of file +module.exports = ASTMigrationEngine; diff --git a/src/lib/migration/DiffEngine.js b/src/lib/migration/DiffEngine.js index b518a9c..f250d78 100644 --- a/src/lib/migration/DiffEngine.js +++ b/src/lib/migration/DiffEngine.js @@ -1,34 +1,37 @@ /** * DiffEngine - Git-based migration diff generator - * + * * Generates incremental migrations by comparing Golden SQL * between git commits/tags (not full database introspection) */ -const { EventEmitter } = require('events'); -const { execSync } = require('child_process'); -const fs = require('fs').promises; -const path = require('path'); +const { EventEmitter } = require("events"); +const { execSync } = require("child_process"); +const fs = require("fs").promises; +const path = require("path"); class DiffEngine extends EventEmitter { constructor(config = {}) { super(); - + this.config = { // Git-related config gitRoot: config.gitRoot || process.cwd(), - sqlDir: config.sqlDir || './sql', - + sqlDir: config.sqlDir || "./sql", + // Diff behavior includeData: config.includeData || false, includeDropStatements: config.includeDropStatements !== false, sortOutput: config.sortOutput !== false, - excludeSchemas: config.excludeSchemas || ['pg_catalog', 'information_schema'], - + excludeSchemas: config.excludeSchemas || [ + "pg_catalog", + "information_schema", + ], + // Custom options preserved - ...config + ...config, }; - + // State management this.isRunning = false; this.lastDiff = null; @@ -41,64 +44,66 @@ class DiffEngine extends EventEmitter { */ async generateDiff(currentDb, desiredDb) { if (this.isRunning) { - throw new Error('Diff generation already running'); + throw new Error("Diff generation already running"); } - + if (!currentDb || !desiredDb) { - const error = new Error('Both current and desired states must be provided'); - this.emit('error', { + const error = new Error( + "Both current and desired states must be provided", + ); + this.emit("error", { error, message: error.message, - timestamp: new Date() + timestamp: new Date(), }); throw error; } - + this.isRunning = true; const startTime = new Date(); - - this.emit('start', { + + this.emit("start", { currentDb, desiredDb, - timestamp: startTime + timestamp: startTime, }); - + try { // Step 1: Initialize - this.emit('progress', { - step: 'initializing', - message: 'Initializing diff engine', - timestamp: new Date() + this.emit("progress", { + step: "initializing", + message: "Initializing diff engine", + timestamp: new Date(), }); - + // Validate git repository await this.validateGitRepository(); - + // Step 2: Get git refs const currentRef = this.resolveGitRef(currentDb); const desiredRef = this.resolveGitRef(desiredDb); - - this.emit('progress', { - step: 'refs_resolved', + + this.emit("progress", { + step: "refs_resolved", message: `Comparing ${currentRef} to ${desiredRef}`, currentRef, desiredRef, - timestamp: new Date() + timestamp: new Date(), }); - + // Step 3: Generate SQL diffs const sqlDiff = await this.generateSqlDiff(currentRef, desiredRef); - + // Step 4: Parse and analyze changes const migration = await this.analyzeDiff(sqlDiff); - + // Step 5: Generate migration SQL const migrationSql = await this.generateMigrationSql(migration); - + // Complete const endTime = new Date(); const duration = endTime - startTime; - + const result = { diff: migrationSql, stats: { @@ -108,26 +113,25 @@ class DiffEngine extends EventEmitter { changes: migration.changes.length, additions: migration.additions.length, deletions: migration.deletions.length, - modifications: migration.modifications.length + modifications: migration.modifications.length, }, - timestamp: endTime + timestamp: endTime, }; - + this.lastDiff = result; - - this.emit('complete', { + + this.emit("complete", { diff: result.diff, duration, - timestamp: endTime + timestamp: endTime, }); - + return result; - } catch (error) { - this.emit('error', { + this.emit("error", { error, message: error.message, - timestamp: new Date() + timestamp: new Date(), }); throw error; } finally { @@ -140,12 +144,12 @@ class DiffEngine extends EventEmitter { */ async validateGitRepository() { try { - execSync('git rev-parse --git-dir', { + execSync("git rev-parse --git-dir", { cwd: this.config.gitRoot, - stdio: 'pipe' + stdio: "pipe", }); } catch (error) { - throw new Error('Not in a git repository'); + throw new Error("Not in a git repository"); } } @@ -154,33 +158,36 @@ class DiffEngine extends EventEmitter { */ resolveGitRef(dbConfig) { // Handle different input formats - if (typeof dbConfig === 'string') { + if (typeof dbConfig === "string") { return dbConfig; // Already a git ref } - + if (dbConfig.tag) { return dbConfig.tag; } - + if (dbConfig.branch) { return dbConfig.branch; } - + if (dbConfig.commit) { return dbConfig.commit; } - + // Default to HEAD for current database - if (dbConfig.database === 'current' || dbConfig.host === 'localhost') { - return 'HEAD'; + if (dbConfig.database === "current" || dbConfig.host === "localhost") { + return "HEAD"; } - + // Look for last deployment tag - if (dbConfig.database === 'production' || dbConfig.database === 'test_desired') { + if ( + dbConfig.database === "production" || + dbConfig.database === "test_desired" + ) { return this.getLastDeploymentTag(); } - - return 'HEAD'; + + return "HEAD"; } /** @@ -188,15 +195,20 @@ class DiffEngine extends EventEmitter { */ getLastDeploymentTag() { try { - const tag = execSync('git describe --tags --abbrev=0 --match="data/prod/*"', { - cwd: this.config.gitRoot, - stdio: 'pipe' - }).toString().trim(); - - return tag || 'HEAD'; + const tag = execSync( + 'git describe --tags --abbrev=0 --match="data/prod/*"', + { + cwd: this.config.gitRoot, + stdio: "pipe", + }, + ) + .toString() + .trim(); + + return tag || "HEAD"; } catch (error) { // No tags found, use HEAD - return 'HEAD'; + return "HEAD"; } } @@ -204,12 +216,12 @@ class DiffEngine extends EventEmitter { * Generate SQL diff between two git refs */ async generateSqlDiff(fromRef, toRef) { - this.emit('progress', { - step: 'generating_diff', - message: 'Generating SQL diff from git', - timestamp: new Date() + this.emit("progress", { + step: "generating_diff", + message: "Generating SQL diff from git", + timestamp: new Date(), }); - + try { // Get the diff of SQL files between two refs const diff = execSync( @@ -217,10 +229,10 @@ class DiffEngine extends EventEmitter { { cwd: this.config.gitRoot, maxBuffer: 10 * 1024 * 1024, // 10MB buffer - stdio: 'pipe' - } + stdio: "pipe", + }, ).toString(); - + return diff; } catch (error) { throw new Error(`Failed to generate git diff: ${error.message}`); @@ -231,33 +243,33 @@ class DiffEngine extends EventEmitter { * Analyze the git diff to extract SQL changes */ async analyzeDiff(gitDiff) { - this.emit('progress', { - step: 'analyzing', - message: 'Analyzing SQL changes', - timestamp: new Date() + this.emit("progress", { + step: "analyzing", + message: "Analyzing SQL changes", + timestamp: new Date(), }); - + const migration = { additions: [], deletions: [], modifications: [], - changes: [] + changes: [], }; - + if (!gitDiff || gitDiff.trim().length === 0) { return migration; } - + // Parse git diff output - const lines = gitDiff.split('\n'); + const lines = gitDiff.split("\n"); let currentFile = null; let inSqlBlock = false; let sqlBuffer = []; let changeType = null; - + for (const line of lines) { // File header - if (line.startsWith('diff --git')) { + if (line.startsWith("diff --git")) { if (sqlBuffer.length > 0 && currentFile) { this.processSqlBuffer(migration, sqlBuffer, changeType, currentFile); sqlBuffer = []; @@ -266,47 +278,47 @@ class DiffEngine extends EventEmitter { currentFile = match ? match[1] : null; continue; } - + // New file - if (line.startsWith('new file')) { - changeType = 'addition'; + if (line.startsWith("new file")) { + changeType = "addition"; continue; } - + // Deleted file - if (line.startsWith('deleted file')) { - changeType = 'deletion'; + if (line.startsWith("deleted file")) { + changeType = "deletion"; continue; } - + // Modified file - if (line.startsWith('index ')) { - changeType = 'modification'; + if (line.startsWith("index ")) { + changeType = "modification"; continue; } - + // Added lines - if (line.startsWith('+') && !line.startsWith('+++')) { + if (line.startsWith("+") && !line.startsWith("+++")) { sqlBuffer.push({ - type: 'add', - content: line.substring(1) + type: "add", + content: line.substring(1), }); } - + // Removed lines - if (line.startsWith('-') && !line.startsWith('---')) { + if (line.startsWith("-") && !line.startsWith("---")) { sqlBuffer.push({ - type: 'remove', - content: line.substring(1) + type: "remove", + content: line.substring(1), }); } } - + // Process final buffer if (sqlBuffer.length > 0 && currentFile) { this.processSqlBuffer(migration, sqlBuffer, changeType, currentFile); } - + return migration; } @@ -314,21 +326,27 @@ class DiffEngine extends EventEmitter { * Process SQL buffer and categorize changes */ processSqlBuffer(migration, buffer, changeType, file) { - const added = buffer.filter(b => b.type === 'add').map(b => b.content).join('\n'); - const removed = buffer.filter(b => b.type === 'remove').map(b => b.content).join('\n'); - + const added = buffer + .filter((b) => b.type === "add") + .map((b) => b.content) + .join("\n"); + const removed = buffer + .filter((b) => b.type === "remove") + .map((b) => b.content) + .join("\n"); + const change = { file, type: changeType, added, - removed + removed, }; - + migration.changes.push(change); - - if (changeType === 'addition') { + + if (changeType === "addition") { migration.additions.push(change); - } else if (changeType === 'deletion') { + } else if (changeType === "deletion") { migration.deletions.push(change); } else { migration.modifications.push(change); @@ -339,14 +357,14 @@ class DiffEngine extends EventEmitter { * Generate migration SQL from analyzed changes */ async generateMigrationSql(migration) { - this.emit('progress', { - step: 'generating_sql', - message: 'Generating migration SQL', - timestamp: new Date() + this.emit("progress", { + step: "generating_sql", + message: "Generating migration SQL", + timestamp: new Date(), }); - + const sections = []; - + // Header sections.push(`-- ═══════════════════════════════════════════════════════════════════════════ -- INCREMENTAL MIGRATION @@ -355,56 +373,60 @@ class DiffEngine extends EventEmitter { -- ═══════════════════════════════════════════════════════════════════════════ `); - + // Process additions if (migration.additions.length > 0) { - sections.push('-- ADDITIONS\n'); + sections.push("-- ADDITIONS\n"); for (const add of migration.additions) { sections.push(`-- File: ${add.file}\n`); - sections.push(add.added + '\n\n'); + sections.push(add.added + "\n\n"); } } - + // Process modifications if (migration.modifications.length > 0) { - sections.push('-- MODIFICATIONS\n'); + sections.push("-- MODIFICATIONS\n"); for (const mod of migration.modifications) { sections.push(`-- File: ${mod.file}\n`); - + // Try to intelligently generate ALTER statements const alterStatements = this.generateAlterStatements(mod); if (alterStatements) { - sections.push(alterStatements + '\n\n'); + sections.push(alterStatements + "\n\n"); } else { // Fallback to showing raw changes if (mod.removed) { - sections.push('-- Removed:\n-- ' + mod.removed.replace(/\n/g, '\n-- ') + '\n'); + sections.push( + "-- Removed:\n-- " + mod.removed.replace(/\n/g, "\n-- ") + "\n", + ); } if (mod.added) { - sections.push('-- Added:\n' + mod.added + '\n\n'); + sections.push("-- Added:\n" + mod.added + "\n\n"); } } } } - + // Process deletions if (migration.deletions.length > 0 && this.config.includeDropStatements) { - sections.push('-- DELETIONS\n'); + sections.push("-- DELETIONS\n"); for (const del of migration.deletions) { sections.push(`-- File: ${del.file}\n`); - sections.push(`-- WARNING: Manual review required for DROP statements\n`); - sections.push(`-- ${del.removed.replace(/\n/g, '\n-- ')}\n\n`); + sections.push( + `-- WARNING: Manual review required for DROP statements\n`, + ); + sections.push(`-- ${del.removed.replace(/\n/g, "\n-- ")}\n\n`); } } - + // Footer sections.push(`-- ═══════════════════════════════════════════════════════════════════════════ -- END OF MIGRATION -- Total changes: ${migration.changes.length} -- ═══════════════════════════════════════════════════════════════════════════ `); - - return sections.join(''); + + return sections.join(""); } /** @@ -414,13 +436,13 @@ class DiffEngine extends EventEmitter { generateAlterStatements(modification) { const added = modification.added; const removed = modification.removed; - + // Look for table modifications - if (added.includes('ALTER TABLE') || removed.includes('CREATE TABLE')) { + if (added.includes("ALTER TABLE") || removed.includes("CREATE TABLE")) { // Already has ALTER statements return added; } - + // Look for column additions const columnMatch = added.match(/^\s+(\w+)\s+(\w+.*),?$/m); if (columnMatch) { @@ -429,7 +451,7 @@ class DiffEngine extends EventEmitter { return `ALTER TABLE ${tableMatch[1]} ADD COLUMN ${columnMatch[1]} ${columnMatch[2]};`; } } - + // For complex changes, return null to use fallback return null; } @@ -449,4 +471,4 @@ class DiffEngine extends EventEmitter { } } -module.exports = DiffEngine; \ No newline at end of file +module.exports = DiffEngine; diff --git a/src/lib/migration/GitDeploymentTracker.js b/src/lib/migration/GitDeploymentTracker.js index 2e8ea69..b906932 100644 --- a/src/lib/migration/GitDeploymentTracker.js +++ b/src/lib/migration/GitDeploymentTracker.js @@ -1,21 +1,21 @@ /** * Git Deployment Tracker for D.A.T.A. - * + * * Manages git-based deployment tracking using tags and commit history. * Provides deployment state management and SQL retrieval from git history. - * + * * @module GitDeploymentTracker */ -const { EventEmitter } = require('events'); -const ChildProcessWrapper = require('../ChildProcessWrapper'); -const path = require('path'); -const fs = require('fs').promises; +const { EventEmitter } = require("events"); +const ChildProcessWrapper = require("../ChildProcessWrapper"); +const path = require("path"); +const fs = require("fs").promises; /** * Git deployment tag prefix for D.A.T.A. deployments */ -const DEPLOYMENT_TAG_PREFIX = 'data-deploy-'; +const DEPLOYMENT_TAG_PREFIX = "data-deploy-"; /** * Deployment metadata structure @@ -30,228 +30,255 @@ const DEPLOYMENT_TAG_PREFIX = 'data-deploy-'; class GitDeploymentTracker extends EventEmitter { constructor(options = {}) { super(); - + this.childProcess = options.childProcess || new ChildProcessWrapper(); this.gitDir = options.gitDir || process.cwd(); - this.sqlDir = options.sqlDir || process.env.ARCHY_SQL_DIR || './sql'; + this.sqlDir = options.sqlDir || process.env.ARCHY_SQL_DIR || "./sql"; } - + /** * Check if working tree is clean (no uncommitted changes) * @returns {Promise} True if working tree is clean */ async isWorkingTreeClean() { try { - const result = await this.childProcess.execute('git', ['status', '--porcelain'], { - cwd: this.gitDir, - timeout: 10000 - }); - - return result.stdout.trim() === ''; + const result = await this.childProcess.execute( + "git", + ["status", "--porcelain"], + { + cwd: this.gitDir, + timeout: 10000, + }, + ); + + return result.stdout.trim() === ""; } catch (error) { - this.emit('error', { - message: 'Failed to check git status', - error + this.emit("error", { + message: "Failed to check git status", + error, }); throw new Error(`Git status check failed: ${error.message}`); } } - + /** * Get detailed working tree status * @returns {Promise} Status with modified, untracked, and staged files */ async getWorkingTreeStatus() { try { - const result = await this.childProcess.execute('git', ['status', '--porcelain'], { - cwd: this.gitDir, - timeout: 10000 - }); - - const lines = result.stdout.trim().split('\n').filter(Boolean); + const result = await this.childProcess.execute( + "git", + ["status", "--porcelain"], + { + cwd: this.gitDir, + timeout: 10000, + }, + ); + + const lines = result.stdout.trim().split("\n").filter(Boolean); const status = { modified: [], untracked: [], staged: [], - deleted: [] + deleted: [], }; - + for (const line of lines) { const statusCode = line.substring(0, 2); const file = line.substring(3); - - if (statusCode.includes('M')) status.modified.push(file); - if (statusCode.includes('A')) status.staged.push(file); - if (statusCode.includes('D')) status.deleted.push(file); - if (statusCode.includes('?')) status.untracked.push(file); + + if (statusCode.includes("M")) status.modified.push(file); + if (statusCode.includes("A")) status.staged.push(file); + if (statusCode.includes("D")) status.deleted.push(file); + if (statusCode.includes("?")) status.untracked.push(file); } - + return status; } catch (error) { throw new Error(`Failed to get git status: ${error.message}`); } } - + /** * Get current git branch name * @returns {Promise} Current branch name */ async getCurrentBranch() { try { - const result = await this.childProcess.execute('git', ['rev-parse', '--abbrev-ref', 'HEAD'], { - cwd: this.gitDir, - timeout: 10000 - }); - + const result = await this.childProcess.execute( + "git", + ["rev-parse", "--abbrev-ref", "HEAD"], + { + cwd: this.gitDir, + timeout: 10000, + }, + ); + return result.stdout.trim(); } catch (error) { throw new Error(`Failed to get current branch: ${error.message}`); } } - + /** * Get current git commit hash * @returns {Promise} Current commit hash */ async getCurrentCommit() { try { - const result = await this.childProcess.execute('git', ['rev-parse', 'HEAD'], { - cwd: this.gitDir, - timeout: 10000 - }); - + const result = await this.childProcess.execute( + "git", + ["rev-parse", "HEAD"], + { + cwd: this.gitDir, + timeout: 10000, + }, + ); + return result.stdout.trim(); } catch (error) { throw new Error(`Failed to get current commit: ${error.message}`); } } - + /** * Comprehensive git state validation for deployment safety * Ensures: * 1. Working tree is clean (no uncommitted changes) * 2. On main/master branch * 3. Branch is up-to-date with origin (not ahead, not behind) - * + * * @returns {Promise} Validation result with errors and warnings */ async validateDeploymentReadiness() { const errors = []; const warnings = []; - + try { // 1. Check working tree is clean const isClean = await this.isWorkingTreeClean(); if (!isClean) { const status = await this.getWorkingTreeStatus(); - errors.push('Working tree is not clean. Commit or stash changes first.'); - + errors.push( + "Working tree is not clean. Commit or stash changes first.", + ); + if (status.modified.length > 0) { - warnings.push(`Modified files: ${status.modified.join(', ')}`); + warnings.push(`Modified files: ${status.modified.join(", ")}`); } if (status.untracked.length > 0) { - warnings.push(`Untracked files: ${status.untracked.join(', ')}`); + warnings.push(`Untracked files: ${status.untracked.join(", ")}`); } if (status.staged.length > 0) { - warnings.push(`Staged files: ${status.staged.join(', ')}`); + warnings.push(`Staged files: ${status.staged.join(", ")}`); } } - + // 2. Check on main/master branch const currentBranch = await this.getCurrentBranch(); - const allowedBranches = ['main', 'master']; - + const allowedBranches = ["main", "master"]; + if (!allowedBranches.includes(currentBranch)) { - errors.push(`Must be on main or master branch. Currently on: ${currentBranch}`); + errors.push( + `Must be on main or master branch. Currently on: ${currentBranch}`, + ); } - + // 3. Check synchronization with origin if (allowedBranches.includes(currentBranch)) { try { // Fetch latest refs from origin (lightweight) - await this.childProcess.execute('git', ['fetch', 'origin', currentBranch], { - cwd: this.gitDir, - timeout: 30000 - }); - + await this.childProcess.execute( + "git", + ["fetch", "origin", currentBranch], + { + cwd: this.gitDir, + timeout: 30000, + }, + ); + // Get local and remote HEAD commits const localHead = await this.getCurrentCommit(); const remoteResult = await this.childProcess.execute( - 'git', - ['rev-parse', `origin/${currentBranch}`], - { cwd: this.gitDir, timeout: 10000 } + "git", + ["rev-parse", `origin/${currentBranch}`], + { cwd: this.gitDir, timeout: 10000 }, ); const remoteHead = remoteResult.stdout.trim(); - + if (localHead !== remoteHead) { // Check if ahead, behind, or diverged const aheadResult = await this.childProcess.execute( - 'git', - ['rev-list', '--count', `origin/${currentBranch}..HEAD`], - { cwd: this.gitDir, timeout: 10000 } + "git", + ["rev-list", "--count", `origin/${currentBranch}..HEAD`], + { cwd: this.gitDir, timeout: 10000 }, ); const aheadCount = parseInt(aheadResult.stdout.trim()); - + const behindResult = await this.childProcess.execute( - 'git', - ['rev-list', '--count', `HEAD..origin/${currentBranch}`], - { cwd: this.gitDir, timeout: 10000 } + "git", + ["rev-list", "--count", `HEAD..origin/${currentBranch}`], + { cwd: this.gitDir, timeout: 10000 }, ); const behindCount = parseInt(behindResult.stdout.trim()); - + if (behindCount > 0 && aheadCount > 0) { errors.push( `Branch has diverged from origin/${currentBranch}. ` + - `${aheadCount} commits ahead, ${behindCount} commits behind. ` + - `Resolve conflicts and synchronize first.` + `${aheadCount} commits ahead, ${behindCount} commits behind. ` + + `Resolve conflicts and synchronize first.`, ); } else if (behindCount > 0) { errors.push( `Branch is ${behindCount} commits behind origin/${currentBranch}. ` + - `Pull latest changes: git pull origin ${currentBranch}` + `Pull latest changes: git pull origin ${currentBranch}`, ); } else if (aheadCount > 0) { errors.push( `Branch is ${aheadCount} commits ahead of origin/${currentBranch}. ` + - `Push your changes first: git push origin ${currentBranch}` + `Push your changes first: git push origin ${currentBranch}`, ); } } } catch (error) { - if (error.message.includes('unknown revision')) { + if (error.message.includes("unknown revision")) { warnings.push( - 'No remote origin configured. This is unusual for production deployments.' + "No remote origin configured. This is unusual for production deployments.", + ); + } else if (error.message.includes("Could not resolve host")) { + errors.push( + "Cannot connect to remote repository. Check network connection.", ); - } else if (error.message.includes('Could not resolve host')) { - errors.push('Cannot connect to remote repository. Check network connection.'); } else { - errors.push(`Failed to check remote synchronization: ${error.message}`); + errors.push( + `Failed to check remote synchronization: ${error.message}`, + ); } } } - + return { valid: errors.length === 0, errors, warnings, branch: currentBranch, - clean: isClean + clean: isClean, }; - } catch (error) { - this.emit('error', { - message: 'Git validation failed', - error + this.emit("error", { + message: "Git validation failed", + error, }); - + return { valid: false, errors: [`Git validation failed: ${error.message}`], - warnings + warnings, }; } } - + /** * Get the last deployment tag and its metadata * @returns {Promise} Last deployment tag info or null if none @@ -259,202 +286,223 @@ class GitDeploymentTracker extends EventEmitter { async getLastDeploymentTag() { try { // Get all deployment tags sorted by version (most recent first) - const result = await this.childProcess.execute('git', [ - 'tag', '-l', `${DEPLOYMENT_TAG_PREFIX}*`, - '--sort=-version:refname' - ], { - cwd: this.gitDir, - timeout: 10000 - }); - - const tags = result.stdout.trim().split('\n').filter(Boolean); + const result = await this.childProcess.execute( + "git", + ["tag", "-l", `${DEPLOYMENT_TAG_PREFIX}*`, "--sort=-version:refname"], + { + cwd: this.gitDir, + timeout: 10000, + }, + ); + + const tags = result.stdout.trim().split("\n").filter(Boolean); if (tags.length === 0) { - this.emit('progress', { message: 'No deployment tags found' }); + this.emit("progress", { message: "No deployment tags found" }); return null; } - + const latestTag = tags[0]; - + // Get the commit hash for this tag - const hashResult = await this.childProcess.execute('git', [ - 'rev-list', '-n', '1', latestTag - ], { - cwd: this.gitDir, - timeout: 10000 - }); - + const hashResult = await this.childProcess.execute( + "git", + ["rev-list", "-n", "1", latestTag], + { + cwd: this.gitDir, + timeout: 10000, + }, + ); + const hash = hashResult.stdout.trim(); - + // Get tag metadata (if any) let metadata = {}; try { - const messageResult = await this.childProcess.execute('git', [ - 'tag', '-l', '--format=%(contents)', latestTag - ], { - cwd: this.gitDir, - timeout: 10000 - }); - + const messageResult = await this.childProcess.execute( + "git", + ["tag", "-l", "--format=%(contents)", latestTag], + { + cwd: this.gitDir, + timeout: 10000, + }, + ); + const message = messageResult.stdout.trim(); - if (message && message.startsWith('{')) { + if (message && message.startsWith("{")) { metadata = JSON.parse(message); } } catch { // Ignore metadata parsing errors } - - this.emit('progress', { + + this.emit("progress", { message: `Found last deployment: ${latestTag}`, tag: latestTag, - hash + hash, }); - + return { tag: latestTag, hash, - metadata + metadata, }; - } catch (error) { throw new Error(`Failed to get deployment tags: ${error.message}`); } } - + /** * Get all deployment tags with their metadata * @returns {Promise} Array of deployment tag objects */ async getDeploymentHistory(limit = 10) { try { - const result = await this.childProcess.execute('git', [ - 'tag', '-l', `${DEPLOYMENT_TAG_PREFIX}*`, - '--sort=-version:refname' - ], { - cwd: this.gitDir, - timeout: 15000 - }); - - const tags = result.stdout.trim().split('\n').filter(Boolean).slice(0, limit); + const result = await this.childProcess.execute( + "git", + ["tag", "-l", `${DEPLOYMENT_TAG_PREFIX}*`, "--sort=-version:refname"], + { + cwd: this.gitDir, + timeout: 15000, + }, + ); + + const tags = result.stdout + .trim() + .split("\n") + .filter(Boolean) + .slice(0, limit); const history = []; - + for (const tag of tags) { // Get commit hash - const hashResult = await this.childProcess.execute('git', [ - 'rev-list', '-n', '1', tag - ], { - cwd: this.gitDir, - timeout: 10000 - }); - + const hashResult = await this.childProcess.execute( + "git", + ["rev-list", "-n", "1", tag], + { + cwd: this.gitDir, + timeout: 10000, + }, + ); + const hash = hashResult.stdout.trim(); - + // Get commit timestamp - const timestampResult = await this.childProcess.execute('git', [ - 'log', '-1', '--format=%ct', hash - ], { - cwd: this.gitDir, - timeout: 10000 - }); - - const timestamp = new Date(parseInt(timestampResult.stdout.trim()) * 1000); - + const timestampResult = await this.childProcess.execute( + "git", + ["log", "-1", "--format=%ct", hash], + { + cwd: this.gitDir, + timeout: 10000, + }, + ); + + const timestamp = new Date( + parseInt(timestampResult.stdout.trim()) * 1000, + ); + // Get tag metadata let metadata = {}; try { - const messageResult = await this.childProcess.execute('git', [ - 'tag', '-l', '--format=%(contents)', tag - ], { - cwd: this.gitDir, - timeout: 10000 - }); - + const messageResult = await this.childProcess.execute( + "git", + ["tag", "-l", "--format=%(contents)", tag], + { + cwd: this.gitDir, + timeout: 10000, + }, + ); + const message = messageResult.stdout.trim(); - if (message && message.startsWith('{')) { + if (message && message.startsWith("{")) { metadata = JSON.parse(message); } } catch { // Ignore metadata parsing errors } - + history.push({ tag, hash, timestamp, - metadata + metadata, }); } - + return history; - } catch (error) { throw new Error(`Failed to get deployment history: ${error.message}`); } } - + /** * Get SQL content at a specific git commit * @param {string} commitHash - Git commit hash * @returns {Promise} Combined SQL content */ async getSQLAtCommit(commitHash) { - this.emit('progress', { - message: `Retrieving SQL at commit ${commitHash.substring(0, 8)}` + this.emit("progress", { + message: `Retrieving SQL at commit ${commitHash.substring(0, 8)}`, }); - + try { // Get list of SQL files at the commit - const listResult = await this.childProcess.execute('git', [ - 'ls-tree', '-r', '--name-only', commitHash, '--', this.sqlDir - ], { - cwd: this.gitDir, - timeout: 15000 - }); - + const listResult = await this.childProcess.execute( + "git", + ["ls-tree", "-r", "--name-only", commitHash, "--", this.sqlDir], + { + cwd: this.gitDir, + timeout: 15000, + }, + ); + const sqlFiles = listResult.stdout .trim() - .split('\n') - .filter(file => file.endsWith('.sql')) + .split("\n") + .filter((file) => file.endsWith(".sql")) .sort(); - + if (sqlFiles.length === 0) { - this.emit('progress', { message: 'No SQL files found at commit' }); - return ''; + this.emit("progress", { message: "No SQL files found at commit" }); + return ""; } - - let combinedSQL = ''; - + + let combinedSQL = ""; + for (const file of sqlFiles) { try { - const fileResult = await this.childProcess.execute('git', [ - 'show', `${commitHash}:${file}` - ], { - cwd: this.gitDir, - timeout: 30000 - }); - + const fileResult = await this.childProcess.execute( + "git", + ["show", `${commitHash}:${file}`], + { + cwd: this.gitDir, + timeout: 30000, + }, + ); + combinedSQL += `-- File: ${file}\n`; - combinedSQL += fileResult.stdout + '\n\n'; + combinedSQL += fileResult.stdout + "\n\n"; } catch (error) { // File might not exist at this commit, skip it - this.emit('progress', { - message: `Skipping ${file} (not found at commit ${commitHash.substring(0, 8)})` + this.emit("progress", { + message: `Skipping ${file} (not found at commit ${commitHash.substring(0, 8)})`, }); } } - - this.emit('progress', { + + this.emit("progress", { message: `Retrieved SQL from ${sqlFiles.length} files`, files: sqlFiles.length, - size: combinedSQL.length + size: combinedSQL.length, }); - + return combinedSQL; - } catch (error) { - throw new Error(`Failed to get SQL at commit ${commitHash}: ${error.message}`); + throw new Error( + `Failed to get SQL at commit ${commitHash}: ${error.message}`, + ); } } - + /** * Create a new deployment tag with metadata * @param {string} tag - Tag name (without prefix) @@ -462,105 +510,122 @@ class GitDeploymentTracker extends EventEmitter { * @returns {Promise} Full tag name created */ async createDeploymentTag(tag, metadata) { - const fullTag = tag.startsWith(DEPLOYMENT_TAG_PREFIX) ? tag : `${DEPLOYMENT_TAG_PREFIX}${tag}`; - - this.emit('progress', { message: `Creating deployment tag: ${fullTag}` }); - + const fullTag = tag.startsWith(DEPLOYMENT_TAG_PREFIX) + ? tag + : `${DEPLOYMENT_TAG_PREFIX}${tag}`; + + this.emit("progress", { message: `Creating deployment tag: ${fullTag}` }); + try { // Create annotated tag with metadata as JSON const metadataJson = JSON.stringify(metadata, null, 2); - - await this.childProcess.execute('git', [ - 'tag', '-a', fullTag, '-m', metadataJson - ], { - cwd: this.gitDir, - timeout: 15000 - }); - - this.emit('progress', { + + await this.childProcess.execute( + "git", + ["tag", "-a", fullTag, "-m", metadataJson], + { + cwd: this.gitDir, + timeout: 15000, + }, + ); + + this.emit("progress", { message: `Created deployment tag: ${fullTag}`, tag: fullTag, - metadata + metadata, }); - + return fullTag; - } catch (error) { throw new Error(`Failed to create deployment tag: ${error.message}`); } } - + /** * Delete a deployment tag * @param {string} tag - Tag name to delete */ async deleteDeploymentTag(tag) { - const fullTag = tag.startsWith(DEPLOYMENT_TAG_PREFIX) ? tag : `${DEPLOYMENT_TAG_PREFIX}${tag}`; - + const fullTag = tag.startsWith(DEPLOYMENT_TAG_PREFIX) + ? tag + : `${DEPLOYMENT_TAG_PREFIX}${tag}`; + try { - await this.childProcess.execute('git', ['tag', '-d', fullTag], { + await this.childProcess.execute("git", ["tag", "-d", fullTag], { cwd: this.gitDir, - timeout: 10000 + timeout: 10000, }); - - this.emit('progress', { message: `Deleted tag: ${fullTag}` }); - + + this.emit("progress", { message: `Deleted tag: ${fullTag}` }); } catch (error) { throw new Error(`Failed to delete tag ${fullTag}: ${error.message}`); } } - + /** * Get changes between two commits * @param {string} fromCommit - Starting commit hash * @param {string} toCommit - Ending commit hash (default: HEAD) * @returns {Promise} Changes summary */ - async getChangesBetweenCommits(fromCommit, toCommit = 'HEAD') { + async getChangesBetweenCommits(fromCommit, toCommit = "HEAD") { try { // Get diff stat - const statResult = await this.childProcess.execute('git', [ - 'diff', '--stat', `${fromCommit}..${toCommit}`, '--', this.sqlDir - ], { - cwd: this.gitDir, - timeout: 15000 - }); - + const statResult = await this.childProcess.execute( + "git", + ["diff", "--stat", `${fromCommit}..${toCommit}`, "--", this.sqlDir], + { + cwd: this.gitDir, + timeout: 15000, + }, + ); + // Get list of changed files - const filesResult = await this.childProcess.execute('git', [ - 'diff', '--name-status', `${fromCommit}..${toCommit}`, '--', this.sqlDir - ], { - cwd: this.gitDir, - timeout: 15000 - }); - + const filesResult = await this.childProcess.execute( + "git", + [ + "diff", + "--name-status", + `${fromCommit}..${toCommit}`, + "--", + this.sqlDir, + ], + { + cwd: this.gitDir, + timeout: 15000, + }, + ); + const changes = { summary: statResult.stdout.trim(), files: [], additions: 0, - deletions: 0 + deletions: 0, }; - - const fileLines = filesResult.stdout.trim().split('\n').filter(Boolean); + + const fileLines = filesResult.stdout.trim().split("\n").filter(Boolean); for (const line of fileLines) { - const [status, file] = line.split('\t'); + const [status, file] = line.split("\t"); changes.files.push({ status, file }); } - + // Extract additions and deletions from summary - const summaryMatch = changes.summary.match(/(\d+) insertions?\(\+\), (\d+) deletions?\(-\)/); + const summaryMatch = changes.summary.match( + /(\d+) insertions?\(\+\), (\d+) deletions?\(-\)/, + ); if (summaryMatch) { changes.additions = parseInt(summaryMatch[1]); changes.deletions = parseInt(summaryMatch[2]); } - + return changes; - } catch (error) { - throw new Error(`Failed to get changes between commits: ${error.message}`); + throw new Error( + `Failed to get changes between commits: ${error.message}`, + ); } } - + /** * Check if a tag exists * @param {string} tag - Tag name to check @@ -568,38 +633,43 @@ class GitDeploymentTracker extends EventEmitter { */ async tagExists(tag) { try { - await this.childProcess.execute('git', ['rev-parse', `refs/tags/${tag}`], { - cwd: this.gitDir, - timeout: 10000 - }); + await this.childProcess.execute( + "git", + ["rev-parse", `refs/tags/${tag}`], + { + cwd: this.gitDir, + timeout: 10000, + }, + ); return true; } catch { return false; } } - + /** * Push deployment tags to remote * @param {string} remote - Remote name (default: origin) */ - async pushDeploymentTags(remote = 'origin') { + async pushDeploymentTags(remote = "origin") { try { - await this.childProcess.execute('git', [ - 'push', remote, `refs/tags/${DEPLOYMENT_TAG_PREFIX}*` - ], { - cwd: this.gitDir, - timeout: 30000 - }); - - this.emit('progress', { message: `Pushed deployment tags to ${remote}` }); - + await this.childProcess.execute( + "git", + ["push", remote, `refs/tags/${DEPLOYMENT_TAG_PREFIX}*`], + { + cwd: this.gitDir, + timeout: 30000, + }, + ); + + this.emit("progress", { message: `Pushed deployment tags to ${remote}` }); } catch (error) { // Don't fail the deployment if tag push fails - this.emit('progress', { - message: `Failed to push tags to ${remote}: ${error.message}` + this.emit("progress", { + message: `Failed to push tags to ${remote}: ${error.message}`, }); } } } -module.exports = GitDeploymentTracker; \ No newline at end of file +module.exports = GitDeploymentTracker; diff --git a/src/lib/migration/MigrationCompiler.js b/src/lib/migration/MigrationCompiler.js index 3f5a3ea..9ff8f39 100644 --- a/src/lib/migration/MigrationCompiler.js +++ b/src/lib/migration/MigrationCompiler.js @@ -1,21 +1,21 @@ /** * MigrationCompiler - Compiles Golden SQL from numbered directories - * + * * Following Supa Fleet Directive 34.1 section 3 subsection 12: * SQL directories MUST follow strict numerical naming convention * to control compilation order (extensions first, etc.) */ -const { EventEmitter } = require('events'); -const fs = require('fs').promises; -const path = require('path'); -const { glob } = require('glob'); +const { EventEmitter } = require("events"); +const fs = require("fs").promises; +const path = require("path"); +const { glob } = require("glob"); /** * Expected directory structure with internal ordering: * /sql/ * extensions/ -- PostgreSQL extensions (processed first) - * schemas/ -- Schema definitions + * schemas/ -- Schema definitions * types/ -- Custom types and enums * tables/ -- Table definitions * functions/ -- Stored procedures @@ -28,39 +28,39 @@ const { glob } = require('glob'); // Internal processing order - ensures dependencies are resolved const DIRECTORY_ORDER = [ - 'extensions', - 'schemas', - 'types', - 'tables', - 'functions', - 'views', - 'policies', - 'triggers', - 'indexes', - 'data' + "extensions", + "schemas", + "types", + "tables", + "functions", + "views", + "policies", + "triggers", + "indexes", + "data", ]; class MigrationCompiler extends EventEmitter { constructor(config = {}) { super(); - + this.config = { - sqlDir: config.sqlDir || './sql', - outputDir: config.outputDir || './migrations', + sqlDir: config.sqlDir || "./sql", + outputDir: config.outputDir || "./migrations", verbose: config.verbose || false, validateSql: config.validateSql !== false, includeComments: config.includeComments !== false, - timestamp: config.timestamp || new Date() + timestamp: config.timestamp || new Date(), }; - + // Statistics tracking this.stats = { filesProcessed: 0, linesWritten: 0, startTime: null, endTime: null, - directories: [] + directories: [], }; - + // State management this.isRunning = false; this.lastCompilation = null; @@ -71,65 +71,64 @@ class MigrationCompiler extends EventEmitter { */ async compile() { if (this.isRunning) { - throw new Error('Compilation already in progress'); + throw new Error("Compilation already in progress"); } - + this.isRunning = true; this.stats.startTime = new Date(); - - this.emit('start', { + + this.emit("start", { timestamp: this.stats.startTime, - config: this.config + config: this.config, }); - + try { // Validate SQL directory exists await this.validateSqlDirectory(); - + // Ensure output directory exists await fs.mkdir(this.config.outputDir, { recursive: true }); - + // Generate output filename const outputFile = this.generateOutputFilename(); - + // Write header await this.writeHeader(outputFile); - + // Process directories in dependency-resolved order const directories = await this.getOrderedDirectories(); - + for (const dir of directories) { await this.processDirectory(dir, outputFile); } - + // Write footer await this.writeFooter(outputFile); - + // Complete this.stats.endTime = new Date(); const duration = this.stats.endTime - this.stats.startTime; - + const result = { success: true, outputFile, stats: this.stats, - duration + duration, }; - + this.lastCompilation = result; - - this.emit('complete', { + + this.emit("complete", { result, - timestamp: this.stats.endTime + timestamp: this.stats.endTime, }); - + return result; - } catch (error) { - this.emit('error', { + this.emit("error", { error, message: error.message, - timestamp: new Date() + timestamp: new Date(), }); throw error; } finally { @@ -147,7 +146,7 @@ class MigrationCompiler extends EventEmitter { throw new Error(`SQL path is not a directory: ${this.config.sqlDir}`); } } catch (error) { - if (error.code === 'ENOENT') { + if (error.code === "ENOENT") { throw new Error(`SQL directory not found: ${this.config.sqlDir}`); } throw error; @@ -158,13 +157,15 @@ class MigrationCompiler extends EventEmitter { * Get directories in dependency-resolved order */ async getOrderedDirectories() { - const entries = await fs.readdir(this.config.sqlDir, { withFileTypes: true }); - + const entries = await fs.readdir(this.config.sqlDir, { + withFileTypes: true, + }); + // Get all directories const availableDirs = entries - .filter(entry => entry.isDirectory()) - .map(entry => entry.name); - + .filter((entry) => entry.isDirectory()) + .map((entry) => entry.name); + // Order directories according to DIRECTORY_ORDER const orderedDirs = []; for (const dirName of DIRECTORY_ORDER) { @@ -172,31 +173,33 @@ class MigrationCompiler extends EventEmitter { orderedDirs.push(dirName); } } - + // Add any directories not in our standard list (for custom directories) - const customDirs = availableDirs.filter(dir => !DIRECTORY_ORDER.includes(dir)); + const customDirs = availableDirs.filter( + (dir) => !DIRECTORY_ORDER.includes(dir), + ); if (customDirs.length > 0) { - this.emit('warning', { - message: `Found non-standard directories: ${customDirs.join(', ')}. These will be processed last.`, - timestamp: new Date() + this.emit("warning", { + message: `Found non-standard directories: ${customDirs.join(", ")}. These will be processed last.`, + timestamp: new Date(), }); orderedDirs.push(...customDirs.sort()); } - + if (orderedDirs.length === 0) { - this.emit('warning', { - message: 'No directories found. Looking for SQL files in root.', - timestamp: new Date() + this.emit("warning", { + message: "No directories found. Looking for SQL files in root.", + timestamp: new Date(), }); - return ['']; // Process root directory + return [""]; // Process root directory } - - this.emit('progress', { + + this.emit("progress", { message: `Processing ${orderedDirs.length} directories in order`, directories: orderedDirs, - timestamp: new Date() + timestamp: new Date(), }); - + return orderedDirs; } @@ -205,29 +208,29 @@ class MigrationCompiler extends EventEmitter { */ async processDirectory(dirName, outputFile) { const fullPath = path.join(this.config.sqlDir, dirName); - - this.emit('directory:start', { - directory: dirName || 'root', + + this.emit("directory:start", { + directory: dirName || "root", path: fullPath, - timestamp: new Date() + timestamp: new Date(), }); - + // Find all SQL files in directory - const pattern = path.join(fullPath, '**/*.sql'); + const pattern = path.join(fullPath, "**/*.sql"); const sqlFiles = await glob(pattern); - + if (sqlFiles.length === 0) { - this.emit('directory:skip', { + this.emit("directory:skip", { directory: dirName, - reason: 'No SQL files found', - timestamp: new Date() + reason: "No SQL files found", + timestamp: new Date(), }); return; } - + // Sort files for consistent ordering sqlFiles.sort(); - + // Write directory section header if (dirName) { const sectionHeader = ` @@ -237,20 +240,20 @@ class MigrationCompiler extends EventEmitter { `; await fs.appendFile(outputFile, sectionHeader); - this.stats.linesWritten += sectionHeader.split('\n').length; + this.stats.linesWritten += sectionHeader.split("\n").length; } - + // Process each SQL file for (const sqlFile of sqlFiles) { await this.processFile(sqlFile, outputFile); } - + this.stats.directories.push(dirName); - - this.emit('directory:complete', { + + this.emit("directory:complete", { directory: dirName, filesProcessed: sqlFiles.length, - timestamp: new Date() + timestamp: new Date(), }); } @@ -259,16 +262,16 @@ class MigrationCompiler extends EventEmitter { */ async processFile(filePath, outputFile) { const relativePath = path.relative(this.config.sqlDir, filePath); - - this.emit('file:process', { + + this.emit("file:process", { file: relativePath, - timestamp: new Date() + timestamp: new Date(), }); - + try { - const content = await fs.readFile(filePath, 'utf8'); - const lines = content.split('\n'); - + const content = await fs.readFile(filePath, "utf8"); + const lines = content.split("\n"); + // Write file header comment if (this.config.includeComments) { const fileHeader = `-- ─────────────────────────────────────────────────────────────────────────── @@ -276,30 +279,29 @@ class MigrationCompiler extends EventEmitter { -- ─────────────────────────────────────────────────────────────────────────── `; await fs.appendFile(outputFile, fileHeader); - this.stats.linesWritten += fileHeader.split('\n').length; + this.stats.linesWritten += fileHeader.split("\n").length; } - + // Write file content await fs.appendFile(outputFile, content); - if (!content.endsWith('\n')) { - await fs.appendFile(outputFile, '\n'); + if (!content.endsWith("\n")) { + await fs.appendFile(outputFile, "\n"); } - await fs.appendFile(outputFile, '\n'); // Extra newline between files - + await fs.appendFile(outputFile, "\n"); // Extra newline between files + this.stats.linesWritten += lines.length + 1; this.stats.filesProcessed++; - - this.emit('file:complete', { + + this.emit("file:complete", { file: relativePath, lineCount: lines.length, - timestamp: new Date() + timestamp: new Date(), }); - } catch (error) { - this.emit('file:error', { + this.emit("file:error", { file: relativePath, error: error.message, - timestamp: new Date() + timestamp: new Date(), }); throw error; } @@ -311,11 +313,11 @@ class MigrationCompiler extends EventEmitter { generateOutputFilename() { const timestamp = this.config.timestamp .toISOString() - .replace(/[T:]/g, '') - .replace(/\..+/, '') - .replace(/-/g, '') + .replace(/[T:]/g, "") + .replace(/\..+/, "") + .replace(/-/g, "") .slice(0, 14); - + return path.join(this.config.outputDir, `${timestamp}_compiled.sql`); } @@ -338,13 +340,13 @@ class MigrationCompiler extends EventEmitter { -- ═══════════════════════════════════════════════════════════════════════════ `; - + await fs.writeFile(outputFile, header); - this.stats.linesWritten += header.split('\n').length; - - this.emit('header:written', { + this.stats.linesWritten += header.split("\n").length; + + this.emit("header:written", { outputFile, - timestamp: new Date() + timestamp: new Date(), }); } @@ -360,17 +362,17 @@ class MigrationCompiler extends EventEmitter { -- Statistics: -- Files processed: ${this.stats.filesProcessed} -- Lines written: ${this.stats.linesWritten} --- Directories: ${this.stats.directories.join(', ') || 'root'} +-- Directories: ${this.stats.directories.join(", ") || "root"} -- -- "The compilation is complete, Captain." - Lt. Commander Data -- ═══════════════════════════════════════════════════════════════════════════ `; - + await fs.appendFile(outputFile, footer); - this.stats.linesWritten += footer.split('\n').length; - - this.emit('footer:written', { - timestamp: new Date() + this.stats.linesWritten += footer.split("\n").length; + + this.emit("footer:written", { + timestamp: new Date(), }); } @@ -389,4 +391,4 @@ class MigrationCompiler extends EventEmitter { } } -module.exports = MigrationCompiler; \ No newline at end of file +module.exports = MigrationCompiler; diff --git a/src/lib/migration/MigrationOrchestrator.js b/src/lib/migration/MigrationOrchestrator.js index 2007476..2256077 100644 --- a/src/lib/migration/MigrationOrchestrator.js +++ b/src/lib/migration/MigrationOrchestrator.js @@ -1,6 +1,6 @@ /** * Migration Orchestrator for D.A.T.A. (Database Automation, Testing, and Alignment) - * + * * Main entry point that coordinates the entire migration workflow: * 1. Git state validation -> clean working tree required * 2. Test execution -> all tests must pass before migration @@ -10,19 +10,19 @@ * 6. Edge Functions deployment -> deploy functions after DB changes * 7. Git tagging -> tag deployment for future reference * 8. Rollback support -> revert to previous deployment if needed - * + * * @module MigrationOrchestrator */ -const { EventEmitter } = require('events'); -const SupabaseCommand = require('../SupabaseCommand'); -const ASTMigrationEngine = require('./ASTMigrationEngine'); -const GitDeploymentTracker = require('./GitDeploymentTracker'); -const SchemaDiffAnalyzer = require('./SchemaDiffAnalyzer'); -const ChildProcessWrapper = require('../ChildProcessWrapper'); -const TestCoverageOrchestrator = require('../testing/TestCoverageOrchestrator'); -const path = require('path'); -const fs = require('fs').promises; +const { EventEmitter } = require("events"); +const SupabaseCommand = require("../SupabaseCommand"); +const ASTMigrationEngine = require("./ASTMigrationEngine"); +const GitDeploymentTracker = require("./GitDeploymentTracker"); +const SchemaDiffAnalyzer = require("./SchemaDiffAnalyzer"); +const ChildProcessWrapper = require("../ChildProcessWrapper"); +const TestCoverageOrchestrator = require("../testing/TestCoverageOrchestrator"); +const path = require("path"); +const fs = require("fs").promises; /** * Orchestrator workflow phases @@ -30,17 +30,17 @@ const fs = require('fs').promises; * @enum {string} */ const PHASES = { - VALIDATION: 'validation', - TESTING: 'testing', - COVERAGE: 'coverage', - ANALYSIS: 'analysis', - PREVIEW: 'preview', - CONFIRMATION: 'confirmation', - MIGRATION: 'migration', - FUNCTIONS: 'functions', - TAGGING: 'tagging', - COMPLETE: 'complete', - ROLLBACK: 'rollback' + VALIDATION: "validation", + TESTING: "testing", + COVERAGE: "coverage", + ANALYSIS: "analysis", + PREVIEW: "preview", + CONFIRMATION: "confirmation", + MIGRATION: "migration", + FUNCTIONS: "functions", + TAGGING: "tagging", + COMPLETE: "complete", + ROLLBACK: "rollback", }; /** @@ -49,10 +49,10 @@ const PHASES = { * @enum {string} */ const OPERATIONS = { - FULL_DEPLOY: 'full_deploy', - SCHEMA_ONLY: 'schema_only', - FUNCTIONS_ONLY: 'functions_only', - ROLLBACK: 'rollback' + FULL_DEPLOY: "full_deploy", + SCHEMA_ONLY: "schema_only", + FUNCTIONS_ONLY: "functions_only", + ROLLBACK: "rollback", }; class MigrationOrchestrator extends SupabaseCommand { @@ -62,28 +62,35 @@ class MigrationOrchestrator extends SupabaseCommand { options.serviceRoleKey, options.logger, options.isProd || false, - options.requiresConfirmation !== false // Default to true + options.requiresConfirmation !== false, // Default to true ); - + // Configuration - this.sqlDir = options.sqlDir || process.env.ARCHY_SQL_DIR || './sql'; - this.migrationsDir = options.migrationsDir || process.env.ARCHY_MIGRATIONS_DIR || './migrations'; - this.functionsDir = options.functionsDir || process.env.ARCHY_FUNCTIONS_DIR || './functions'; - this.testsDir = options.testsDir || process.env.ARCHY_TESTS_DIR || './tests'; - + this.sqlDir = options.sqlDir || process.env.ARCHY_SQL_DIR || "./sql"; + this.migrationsDir = + options.migrationsDir || + process.env.ARCHY_MIGRATIONS_DIR || + "./migrations"; + this.functionsDir = + options.functionsDir || process.env.ARCHY_FUNCTIONS_DIR || "./functions"; + this.testsDir = + options.testsDir || process.env.ARCHY_TESTS_DIR || "./tests"; + // Dependencies (injected for testing) this.astEngine = options.astEngine || new ASTMigrationEngine(); this.gitTracker = options.gitTracker || new GitDeploymentTracker(); this.diffAnalyzer = options.diffAnalyzer || new SchemaDiffAnalyzer(); this.childProcess = options.childProcess || new ChildProcessWrapper(); - this.coverageOrchestrator = options.coverageOrchestrator || new TestCoverageOrchestrator({ - testsDir: this.testsDir, - sqlDir: this.sqlDir, - enforcementLevel: options.coverageEnforcementLevel || 'normal', - generateTemplates: options.generateCoverageTemplates || false, - allowBypass: options.allowCoverageBypass || false - }); - + this.coverageOrchestrator = + options.coverageOrchestrator || + new TestCoverageOrchestrator({ + testsDir: this.testsDir, + sqlDir: this.sqlDir, + enforcementLevel: options.coverageEnforcementLevel || "normal", + generateTemplates: options.generateCoverageTemplates || false, + allowBypass: options.allowCoverageBypass || false, + }); + // Workflow options this.skipTests = options.skipTests || false; this.skipCoverage = options.skipCoverage || false; @@ -91,45 +98,75 @@ class MigrationOrchestrator extends SupabaseCommand { this.dryRun = options.dryRun || false; this.forceRollback = options.forceRollback || false; this.coverageBypassReason = options.coverageBypassReason || null; - + // Current operation state this.currentOperation = null; this.currentPhase = null; this.migrationOperations = []; this.rollbackPoint = null; - + // Wire up engine events this.setupEngineEventHandlers(); } - + /** * Setup event forwarding from sub-components */ setupEngineEventHandlers() { // Forward AST engine events - this.astEngine.on('start', (data) => this.progress('AST Engine: ' + data.message, data)); - this.astEngine.on('progress', (data) => this.progress('AST Engine: ' + data.message, data)); - + this.astEngine.on("start", (data) => + this.progress("AST Engine: " + data.message, data), + ); + this.astEngine.on("progress", (data) => + this.progress("AST Engine: " + data.message, data), + ); + // Forward coverage orchestrator events - this.coverageOrchestrator.on('progress', (data) => this.progress('Coverage: ' + data.message, data)); - this.coverageOrchestrator.on('warning', (data) => this.warn('Coverage: ' + data.message, data)); - this.coverageOrchestrator.on('enforcement_failed', (data) => this.error('Coverage enforcement failed', data)); - this.coverageOrchestrator.on('enforcement_bypassed', (data) => this.warn('Coverage enforcement bypassed', data)); - this.astEngine.on('warning', (data) => this.warn('AST Engine: ' + data.message, data)); - this.astEngine.on('error', (data) => this.error('AST Engine: ' + data.message, data.error, data)); - this.astEngine.on('complete', (data) => this.progress('AST Engine: ' + data.message, data)); - + this.coverageOrchestrator.on("progress", (data) => + this.progress("Coverage: " + data.message, data), + ); + this.coverageOrchestrator.on("warning", (data) => + this.warn("Coverage: " + data.message, data), + ); + this.coverageOrchestrator.on("enforcement_failed", (data) => + this.error("Coverage enforcement failed", data), + ); + this.coverageOrchestrator.on("enforcement_bypassed", (data) => + this.warn("Coverage enforcement bypassed", data), + ); + this.astEngine.on("warning", (data) => + this.warn("AST Engine: " + data.message, data), + ); + this.astEngine.on("error", (data) => + this.error("AST Engine: " + data.message, data.error, data), + ); + this.astEngine.on("complete", (data) => + this.progress("AST Engine: " + data.message, data), + ); + // Forward Git tracker events - this.gitTracker.on('progress', (data) => this.progress('Git Tracker: ' + data.message, data)); - this.gitTracker.on('warning', (data) => this.warn('Git Tracker: ' + data.message, data)); - this.gitTracker.on('error', (data) => this.error('Git Tracker: ' + data.message, data.error, data)); - + this.gitTracker.on("progress", (data) => + this.progress("Git Tracker: " + data.message, data), + ); + this.gitTracker.on("warning", (data) => + this.warn("Git Tracker: " + data.message, data), + ); + this.gitTracker.on("error", (data) => + this.error("Git Tracker: " + data.message, data.error, data), + ); + // Forward diff analyzer events - this.diffAnalyzer.on('progress', (data) => this.progress('Diff Analyzer: ' + data.message, data)); - this.diffAnalyzer.on('warning', (data) => this.warn('Diff Analyzer: ' + data.message, data)); - this.diffAnalyzer.on('complete', (data) => this.progress('Diff Analyzer: ' + data.message, data)); + this.diffAnalyzer.on("progress", (data) => + this.progress("Diff Analyzer: " + data.message, data), + ); + this.diffAnalyzer.on("warning", (data) => + this.warn("Diff Analyzer: " + data.message, data), + ); + this.diffAnalyzer.on("complete", (data) => + this.progress("Diff Analyzer: " + data.message, data), + ); } - + /** * Execute full deployment workflow * @param {Object} options - Deployment options @@ -137,113 +174,123 @@ class MigrationOrchestrator extends SupabaseCommand { */ async performExecute(options = {}) { this.currentOperation = OPERATIONS.FULL_DEPLOY; - + try { // Phase 1: Validation await this.executePhase(PHASES.VALIDATION); const gitState = await this.validateGitState(); - + // Phase 2: Testing (unless skipped) if (!this.skipTests) { await this.executePhase(PHASES.TESTING); await this.runTests(); } - + // Phase 3: Analysis await this.executePhase(PHASES.ANALYSIS); const analysisResult = await this.analyzeChanges(gitState); - + // Phase 4: Test Coverage Check (unless skipped) if (!this.skipCoverage && analysisResult.operations.length > 0) { await this.executePhase(PHASES.COVERAGE); - const coverageResult = await this.checkTestCoverage(analysisResult.operations); - + const coverageResult = await this.checkTestCoverage( + analysisResult.operations, + ); + if (!coverageResult.passed) { - this.error('Test coverage requirements not met'); + this.error("Test coverage requirements not met"); throw new Error( `Deployment blocked: Test coverage is ${coverageResult.coveragePercentage}%. ` + - `${coverageResult.gaps.length} test requirements are missing. ` + - `Run 'data test generate-template' to create missing tests.` + `${coverageResult.gaps.length} test requirements are missing. ` + + `Run 'data test generate-template' to create missing tests.`, ); } } - + if (analysisResult.operations.length === 0) { - this.success('No schema changes detected - nothing to migrate'); + this.success("No schema changes detected - nothing to migrate"); return { success: true, operations: [], deployed: false }; } - + // Phase 4: Preview await this.executePhase(PHASES.PREVIEW); await this.previewChanges(analysisResult); - + // Phase 5: Confirmation if (!this.dryRun) { await this.executePhase(PHASES.CONFIRMATION); const confirmed = await this.confirmDeployment(analysisResult); if (!confirmed) { - this.success('Deployment cancelled by user'); + this.success("Deployment cancelled by user"); return { success: true, cancelled: true }; } } - + // Phase 6: Migration if (!this.dryRun) { await this.executePhase(PHASES.MIGRATION); - const migrationResult = await this.executeMigration(analysisResult.operations); - + const migrationResult = await this.executeMigration( + analysisResult.operations, + ); + // Phase 7: Functions (unless skipped) - if (!this.skipFunctions && await this.hasFunctionsTodeploy()) { + if (!this.skipFunctions && (await this.hasFunctionsTodeploy())) { await this.executePhase(PHASES.FUNCTIONS); await this.deployFunctions(); } - + // Phase 8: Tagging await this.executePhase(PHASES.TAGGING); const tag = await this.tagDeployment(migrationResult); - + await this.executePhase(PHASES.COMPLETE); - this.success('Deployment completed successfully', { + this.success("Deployment completed successfully", { operations: analysisResult.operations.length, tag, - migrationId: migrationResult.id + migrationId: migrationResult.id, }); - + return { success: true, operations: analysisResult.operations, migrationResult, tag, - deployed: true + deployed: true, }; } else { - this.success('Dry run completed - no changes made', { - operations: analysisResult.operations.length + this.success("Dry run completed - no changes made", { + operations: analysisResult.operations.length, }); - return { success: true, dryRun: true, operations: analysisResult.operations }; + return { + success: true, + dryRun: true, + operations: analysisResult.operations, + }; } - } catch (error) { this.error(`Deployment failed in phase ${this.currentPhase}`, error); - + // Attempt rollback if we're past the migration phase - if (this.currentPhase === PHASES.FUNCTIONS || - this.currentPhase === PHASES.TAGGING || - this.currentPhase === PHASES.COMPLETE) { - - this.warn('Attempting automatic rollback due to deployment failure'); + if ( + this.currentPhase === PHASES.FUNCTIONS || + this.currentPhase === PHASES.TAGGING || + this.currentPhase === PHASES.COMPLETE + ) { + this.warn("Attempting automatic rollback due to deployment failure"); try { await this.rollback({ automatic: true }); } catch (rollbackError) { - this.error('Rollback failed', rollbackError); - throw new Error(`Deployment failed and rollback failed: ${error.message}. Manual intervention required.`); + this.error("Rollback failed", rollbackError); + throw new Error( + `Deployment failed and rollback failed: ${error.message}. Manual intervention required.`, + ); } } - + throw error; } } - + /** * Execute a specific workflow phase */ @@ -251,421 +298,452 @@ class MigrationOrchestrator extends SupabaseCommand { this.currentPhase = phase; this.progress(`Starting phase: ${phase}`); } - + /** * Validate git working tree is clean and ready for deployment */ async validateGitState() { - this.progress('Validating git repository state...'); - + this.progress("Validating git repository state..."); + const isClean = await this.gitTracker.isWorkingTreeClean(); if (!isClean) { const status = await this.gitTracker.getWorkingTreeStatus(); throw new Error( - 'Git working tree must be clean before deployment. ' + - `Uncommitted changes found: ${status.modified.length} modified, ` + - `${status.untracked.length} untracked files` + "Git working tree must be clean before deployment. " + + `Uncommitted changes found: ${status.modified.length} modified, ` + + `${status.untracked.length} untracked files`, ); } - + const currentBranch = await this.gitTracker.getCurrentBranch(); const lastDeployment = await this.gitTracker.getLastDeploymentTag(); - - this.progress('Git state validated', { + + this.progress("Git state validated", { branch: currentBranch, lastDeployment: lastDeployment?.tag, - lastDeploymentHash: lastDeployment?.hash + lastDeploymentHash: lastDeployment?.hash, }); - + return { branch: currentBranch, lastDeployment, - isClean: true + isClean: true, }; } - + /** * Run all tests to ensure code quality before deployment */ async runTests() { - this.progress('Running test suite...'); - + this.progress("Running test suite..."); + // Run unit tests (Vitest) try { - const testResult = await this.childProcess.execute('npm', ['test'], { + const testResult = await this.childProcess.execute("npm", ["test"], { cwd: process.cwd(), - timeout: 300000 // 5 minutes + timeout: 300000, // 5 minutes }); - - this.progress('Unit tests passed', { exitCode: testResult.exitCode }); + + this.progress("Unit tests passed", { exitCode: testResult.exitCode }); } catch (error) { throw new Error(`Unit tests failed: ${error.message}`); } - + // Run database tests (pgTAP) if tests directory exists try { await fs.access(this.testsDir); - this.progress('Running database tests...'); - + this.progress("Running database tests..."); + // This would call a pgTAP test runner - implementation depends on setup const dbTestResult = await this.runDatabaseTests(); - this.progress('Database tests passed', dbTestResult); + this.progress("Database tests passed", dbTestResult); } catch (error) { - if (error.code !== 'ENOENT') { + if (error.code !== "ENOENT") { throw new Error(`Database tests failed: ${error.message}`); } - this.progress('No database tests found, skipping'); + this.progress("No database tests found, skipping"); } } - + /** * Check test coverage for migration operations * @param {Array} operations - Migration operations to check coverage for * @returns {Promise} Coverage check results */ async checkTestCoverage(operations) { - this.progress('Checking test coverage for migration operations...'); - + this.progress("Checking test coverage for migration operations..."); + try { - const coverageResult = await this.coverageOrchestrator.checkCoverage(operations, { - enforcementLevel: this.isProd ? 'strict' : 'normal', - bypassReason: this.coverageBypassReason, - allowBypass: this.coverageBypassReason !== null - }); - + const coverageResult = await this.coverageOrchestrator.checkCoverage( + operations, + { + enforcementLevel: this.isProd ? "strict" : "normal", + bypassReason: this.coverageBypassReason, + allowBypass: this.coverageBypassReason !== null, + }, + ); + if (coverageResult.passed) { - this.success(`Test coverage check passed (${coverageResult.coveragePercentage}% coverage)`); + this.success( + `Test coverage check passed (${coverageResult.coveragePercentage}% coverage)`, + ); } else if (coverageResult.bypassReason) { this.warn(`Test coverage bypassed: ${coverageResult.bypassReason}`); } else { - this.warn(`Test coverage insufficient: ${coverageResult.coveragePercentage}%`); - this.warn(`Missing tests for ${coverageResult.gaps.length} requirements`); - + this.warn( + `Test coverage insufficient: ${coverageResult.coveragePercentage}%`, + ); + this.warn( + `Missing tests for ${coverageResult.gaps.length} requirements`, + ); + // Show top 5 gaps const topGaps = coverageResult.gaps.slice(0, 5); for (const gap of topGaps) { - this.warn(` - ${gap.requirement.type}: ${gap.requirement.name} (${gap.requirement.priority})`); + this.warn( + ` - ${gap.requirement.type}: ${gap.requirement.name} (${gap.requirement.priority})`, + ); } - + if (coverageResult.gaps.length > 5) { this.warn(` ... and ${coverageResult.gaps.length - 5} more`); } } - + return coverageResult; - } catch (error) { - this.error('Test coverage check failed', error); - + this.error("Test coverage check failed", error); + // Always block on technical failures in production if (this.isProd) { throw new Error( `Critical: Test coverage system failure in production. ` + - `Error: ${error.message}. Manual intervention required.` + `Error: ${error.message}. Manual intervention required.`, ); } - + // In non-production, require explicit bypass for technical failures if (!this.coverageBypassReason) { throw new Error( `Test coverage check failed technically: ${error.message}. ` + - `Use --coverage-bypass-reason "" to continue despite this failure.` + `Use --coverage-bypass-reason "" to continue despite this failure.`, ); } - + // Log the bypass with full context this.warn(`Coverage check bypassed due to technical failure`, { error: error.message, bypassReason: this.coverageBypassReason, - stack: error.stack + stack: error.stack, }); - - return { - passed: true, + + return { + passed: true, bypassReason: this.coverageBypassReason, technicalFailure: true, - error: error.message + error: error.message, }; } } - + /** * Analyze schema changes using AST engine and diff analyzer */ async analyzeChanges(gitState) { - this.progress('Analyzing schema changes...'); - + this.progress("Analyzing schema changes..."); + // Get SQL from last deployment and current HEAD const currentSQL = await this.loadCurrentSQL(); - const previousSQL = gitState.lastDeployment + const previousSQL = gitState.lastDeployment ? await this.gitTracker.getSQLAtCommit(gitState.lastDeployment.hash) - : ''; - + : ""; + // Generate migration operations using AST engine - const operations = await this.astEngine.generateMigration(previousSQL, currentSQL); - + const operations = await this.astEngine.generateMigration( + previousSQL, + currentSQL, + ); + // Analyze the diff for risk assessment and recommendations const analysis = await this.diffAnalyzer.analyzeMigration(operations, { previousSQL, currentSQL, - isProd: this.isProd + isProd: this.isProd, }); - + this.migrationOperations = operations; - - this.progress('Schema analysis complete', { + + this.progress("Schema analysis complete", { operationsCount: operations.length, riskLevel: analysis.riskLevel, - destructiveOps: operations.filter(op => op.type === 'DESTRUCTIVE').length, - warningOps: operations.filter(op => op.type === 'WARNING').length + destructiveOps: operations.filter((op) => op.type === "DESTRUCTIVE") + .length, + warningOps: operations.filter((op) => op.type === "WARNING").length, }); - + return { operations, analysis, previousSQL, - currentSQL + currentSQL, }; } - + /** * Preview changes to user before deployment */ async previewChanges(analysisResult) { - this.progress('Generating deployment preview...'); - + this.progress("Generating deployment preview..."); + const { operations, analysis } = analysisResult; - + // Emit preview event with structured data - this.emit('preview', { + this.emit("preview", { summary: { totalOperations: operations.length, - safeOperations: operations.filter(op => op.type === 'SAFE').length, - warningOperations: operations.filter(op => op.type === 'WARNING').length, - destructiveOperations: operations.filter(op => op.type === 'DESTRUCTIVE').length, - riskLevel: analysis.riskLevel + safeOperations: operations.filter((op) => op.type === "SAFE").length, + warningOperations: operations.filter((op) => op.type === "WARNING") + .length, + destructiveOperations: operations.filter( + (op) => op.type === "DESTRUCTIVE", + ).length, + riskLevel: analysis.riskLevel, }, - operations: operations.map(op => ({ + operations: operations.map((op) => ({ type: op.type, description: op.description, sql: op.sql, warning: op.warning, - requiresConfirmation: op.requiresConfirmation + requiresConfirmation: op.requiresConfirmation, })), recommendations: analysis.recommendations, - estimatedDuration: analysis.estimatedDuration + estimatedDuration: analysis.estimatedDuration, }); } - + /** * Get user confirmation for deployment */ async confirmDeployment(analysisResult) { const { operations, analysis } = analysisResult; - const destructive = operations.filter(op => op.type === 'DESTRUCTIVE'); - + const destructive = operations.filter((op) => op.type === "DESTRUCTIVE"); + if (destructive.length > 0) { this.warn(`${destructive.length} DESTRUCTIVE operations detected!`); - + for (const op of destructive) { this.warn(`DESTRUCTIVE: ${op.description} - ${op.warning}`); } - + const confirmed = await this.confirm( - 'You are about to perform DESTRUCTIVE operations that may result in DATA LOSS. ' + - 'Are you absolutely sure you want to continue?', - false + "You are about to perform DESTRUCTIVE operations that may result in DATA LOSS. " + + "Are you absolutely sure you want to continue?", + false, ); - + if (!confirmed) return false; } - + if (this.isProd) { const prodConfirmed = await this.confirm( `Deploy ${operations.length} operations to PRODUCTION environment?`, - false + false, ); if (!prodConfirmed) return false; } - + return true; } - + /** * Execute the database migration in a transaction */ async executeMigration(operations) { - this.progress('Executing database migration...'); - + this.progress("Executing database migration..."); + // Ensure we have exec_sql function for DDL operations await this.ensureExecSqlFunction(); - + const migrationId = `migration_${Date.now()}`; this.rollbackPoint = await this.createRollbackPoint(); - + try { // Execute operations in transaction - await this.executeSql('BEGIN'); - + await this.executeSql("BEGIN"); + for (let i = 0; i < operations.length; i++) { const operation = operations[i]; - - this.progress(`Executing operation ${i + 1}/${operations.length}: ${operation.description}`); - + + this.progress( + `Executing operation ${i + 1}/${operations.length}: ${operation.description}`, + ); + try { await this.executeSql(operation.sql); } catch (error) { // Rollback transaction and throw - await this.executeSql('ROLLBACK'); - throw new Error(`Operation failed: ${operation.description}. Error: ${error.message}`); + await this.executeSql("ROLLBACK"); + throw new Error( + `Operation failed: ${operation.description}. Error: ${error.message}`, + ); } } - + // Record migration in tracking table await this.recordMigration(migrationId, operations); - + // Commit transaction - await this.executeSql('COMMIT'); - - this.success('Database migration completed successfully', { + await this.executeSql("COMMIT"); + + this.success("Database migration completed successfully", { migrationId, - operations: operations.length + operations: operations.length, }); - + return { id: migrationId, operations: operations.length }; - } catch (error) { - this.error('Migration failed, transaction rolled back', error); + this.error("Migration failed, transaction rolled back", error); throw error; } } - + /** * Deploy Edge Functions after successful database migration */ async deployFunctions() { - this.progress('Deploying Edge Functions...'); - + this.progress("Deploying Edge Functions..."); + try { // Use supabase CLI to deploy functions - const deployResult = await this.childProcess.execute('supabase', [ - 'functions', 'deploy', - '--project-ref', this.getProjectRef() - ], { - cwd: process.cwd(), - timeout: 120000 // 2 minutes - }); - - this.success('Edge Functions deployed successfully', { - output: deployResult.stdout + const deployResult = await this.childProcess.execute( + "supabase", + ["functions", "deploy", "--project-ref", this.getProjectRef()], + { + cwd: process.cwd(), + timeout: 120000, // 2 minutes + }, + ); + + this.success("Edge Functions deployed successfully", { + output: deployResult.stdout, }); - } catch (error) { // Functions deployment failure shouldn't stop the migration // but we should warn and potentially offer rollback - this.warn('Edge Functions deployment failed', { error: error.message }); + this.warn("Edge Functions deployment failed", { error: error.message }); throw new Error(`Functions deployment failed: ${error.message}`); } } - + /** * Tag the successful deployment in git */ async tagDeployment(migrationResult) { const tag = `deploy-${Date.now()}-${migrationResult.id}`; - + this.progress(`Creating deployment tag: ${tag}`); - + await this.gitTracker.createDeploymentTag(tag, { migrationId: migrationResult.id, operations: migrationResult.operations, timestamp: new Date().toISOString(), - environment: this.isProd ? 'production' : 'development' + environment: this.isProd ? "production" : "development", }); - + return tag; } - + /** * Rollback to previous deployment */ async rollback(options = {}) { this.currentOperation = OPERATIONS.ROLLBACK; this.currentPhase = PHASES.ROLLBACK; - - this.progress('Initiating rollback...'); - + + this.progress("Initiating rollback..."); + const lastDeployment = await this.gitTracker.getLastDeploymentTag(); if (!lastDeployment) { - throw new Error('No previous deployment found to rollback to'); + throw new Error("No previous deployment found to rollback to"); } - + try { // Get the SQL state at the last deployment - const targetSQL = await this.gitTracker.getSQLAtCommit(lastDeployment.hash); + const targetSQL = await this.gitTracker.getSQLAtCommit( + lastDeployment.hash, + ); const currentSQL = await this.loadCurrentSQL(); - + // Generate reverse migration - const rollbackOps = await this.astEngine.generateMigration(currentSQL, targetSQL); - + const rollbackOps = await this.astEngine.generateMigration( + currentSQL, + targetSQL, + ); + if (!options.automatic) { - this.warn(`Rolling back ${rollbackOps.length} operations to deployment ${lastDeployment.tag}`); - + this.warn( + `Rolling back ${rollbackOps.length} operations to deployment ${lastDeployment.tag}`, + ); + const confirmed = await this.confirm( `Confirm rollback to ${lastDeployment.tag}? This may result in data loss.`, - false + false, ); - + if (!confirmed) { - this.success('Rollback cancelled'); + this.success("Rollback cancelled"); return { success: false, cancelled: true }; } } - + // Execute rollback await this.executeMigration(rollbackOps); - + // Tag the rollback const rollbackTag = `rollback-${Date.now()}-from-${lastDeployment.tag}`; - await this.tagDeployment({ id: 'rollback', operations: rollbackOps.length }); - - this.success('Rollback completed successfully', { + await this.tagDeployment({ + id: "rollback", + operations: rollbackOps.length, + }); + + this.success("Rollback completed successfully", { tag: rollbackTag, - operations: rollbackOps.length + operations: rollbackOps.length, }); - + return { success: true, tag: rollbackTag, operations: rollbackOps }; - } catch (error) { - this.error('Rollback failed', error); - throw new Error(`Rollback failed: ${error.message}. Manual intervention required.`); + this.error("Rollback failed", error); + throw new Error( + `Rollback failed: ${error.message}. Manual intervention required.`, + ); } } - + /** * Helper methods */ - + async loadCurrentSQL() { try { const files = await fs.readdir(this.sqlDir); - const sqlFiles = files.filter(f => f.endsWith('.sql')); - - let combinedSQL = ''; + const sqlFiles = files.filter((f) => f.endsWith(".sql")); + + let combinedSQL = ""; for (const file of sqlFiles.sort()) { - const content = await fs.readFile(path.join(this.sqlDir, file), 'utf8'); - combinedSQL += content + '\n\n'; + const content = await fs.readFile(path.join(this.sqlDir, file), "utf8"); + combinedSQL += content + "\n\n"; } - + return combinedSQL; } catch (error) { - if (error.code === 'ENOENT') { - return ''; + if (error.code === "ENOENT") { + return ""; } throw error; } } - + async hasFunctionsToDeploy() { try { await fs.access(this.functionsDir); @@ -675,16 +753,16 @@ class MigrationOrchestrator extends SupabaseCommand { return false; } } - + async createRollbackPoint() { // In a real implementation, this might create a database snapshot // or save the current schema state for quick rollback return { timestamp: new Date().toISOString(), - schema: 'public' // Could capture actual schema state here + schema: "public", // Could capture actual schema state here }; } - + async recordMigration(migrationId, operations) { // Record migration in a tracking table for audit purposes const migrationRecord = { @@ -692,23 +770,23 @@ class MigrationOrchestrator extends SupabaseCommand { timestamp: new Date().toISOString(), operations: operations.length, sql_operations: JSON.stringify(operations), - environment: this.isProd ? 'production' : 'development' + environment: this.isProd ? "production" : "development", }; - + // This would insert into a migrations tracking table // await this.from('_data_migrations').insert(migrationRecord); } - + async runDatabaseTests() { // Placeholder for pgTAP test runner // In real implementation, would execute pgTAP tests return { passed: true, tests: 0 }; } - + getProjectRef() { // Extract project ref from Supabase URL const url = new URL(this.supabaseUrl); - return url.hostname.split('.')[0]; + return url.hostname.split(".")[0]; } } @@ -716,5 +794,5 @@ class MigrationOrchestrator extends SupabaseCommand { module.exports = { MigrationOrchestrator, PHASES, - OPERATIONS -}; \ No newline at end of file + OPERATIONS, +}; diff --git a/src/lib/migration/SchemaDiffAnalyzer.js b/src/lib/migration/SchemaDiffAnalyzer.js index 4943fef..fc18f59 100644 --- a/src/lib/migration/SchemaDiffAnalyzer.js +++ b/src/lib/migration/SchemaDiffAnalyzer.js @@ -1,13 +1,13 @@ /** * Schema Diff Analyzer for D.A.T.A. - * + * * Analyzes migration operations for risk assessment, performance impact, * and provides intelligent recommendations for safer deployments. - * + * * @module SchemaDiffAnalyzer */ -const { EventEmitter } = require('events'); +const { EventEmitter } = require("events"); /** * Risk levels for migration operations @@ -15,10 +15,10 @@ const { EventEmitter } = require('events'); * @enum {string} */ const RISK_LEVELS = { - LOW: 'LOW', - MEDIUM: 'MEDIUM', - HIGH: 'HIGH', - CRITICAL: 'CRITICAL' + LOW: "LOW", + MEDIUM: "MEDIUM", + HIGH: "HIGH", + CRITICAL: "CRITICAL", }; /** @@ -27,10 +27,10 @@ const RISK_LEVELS = { * @enum {string} */ const PERFORMANCE_IMPACT = { - NONE: 'NONE', - LOW: 'LOW', - MEDIUM: 'MEDIUM', - HIGH: 'HIGH' + NONE: "NONE", + LOW: "LOW", + MEDIUM: "MEDIUM", + HIGH: "HIGH", }; /** @@ -49,45 +49,45 @@ const PERFORMANCE_IMPACT = { class SchemaDiffAnalyzer extends EventEmitter { constructor(options = {}) { super(); - + // Risk assessment thresholds this.thresholds = { largeTable: options.largeTableRows || 1000000, // 1M rows slowQuery: options.slowQueryTime || 30, // 30 seconds indexCreation: options.indexCreationTime || 60, // 1 minute per 100k rows - ...options.thresholds + ...options.thresholds, }; - + // Known high-impact operations this.highRiskPatterns = [ - 'DROP TABLE', - 'DROP COLUMN', - 'TRUNCATE', - 'DELETE FROM', - 'ALTER COLUMN.*TYPE', - 'DROP CONSTRAINT', - 'ALTER TABLE.*ALTER COLUMN.*NOT NULL' + "DROP TABLE", + "DROP COLUMN", + "TRUNCATE", + "DELETE FROM", + "ALTER COLUMN.*TYPE", + "DROP CONSTRAINT", + "ALTER TABLE.*ALTER COLUMN.*NOT NULL", ]; - + // Performance-impacting operations this.performancePatterns = [ - 'CREATE INDEX', - 'CREATE UNIQUE INDEX', - 'ALTER TABLE.*ADD CONSTRAINT', - 'VACUUM', - 'ANALYZE', - 'REINDEX' + "CREATE INDEX", + "CREATE UNIQUE INDEX", + "ALTER TABLE.*ADD CONSTRAINT", + "VACUUM", + "ANALYZE", + "REINDEX", ]; - + // Supabase-specific patterns this.supabasePatterns = { rls: /CREATE POLICY|ALTER POLICY|DROP POLICY/i, auth: /auth\.(users|refresh_tokens|audit_log_entries)/i, storage: /storage\.(buckets|objects)/i, - realtime: /realtime\.(subscription)/i + realtime: /realtime\.(subscription)/i, }; } - + /** * Analyze migration operations for risks and recommendations * @param {Array} operations - Array of migration operations @@ -95,8 +95,8 @@ class SchemaDiffAnalyzer extends EventEmitter { * @returns {Promise} Analysis results */ async analyzeMigration(operations, context = {}) { - this.emit('progress', { message: 'Analyzing migration operations...' }); - + this.emit("progress", { message: "Analyzing migration operations..." }); + const analysis = { riskLevel: RISK_LEVELS.LOW, performanceImpact: PERFORMANCE_IMPACT.NONE, @@ -105,57 +105,68 @@ class SchemaDiffAnalyzer extends EventEmitter { warnings: [], statistics: this.calculateStatistics(operations), requiresDowntime: false, - rollbackPlan: [] + rollbackPlan: [], }; - + // Analyze each operation for (const operation of operations) { const opAnalysis = await this.analyzeOperation(operation, context); - + // Update overall risk level - if (this.compareRiskLevels(opAnalysis.riskLevel, analysis.riskLevel) > 0) { + if ( + this.compareRiskLevels(opAnalysis.riskLevel, analysis.riskLevel) > 0 + ) { analysis.riskLevel = opAnalysis.riskLevel; } - + // Update performance impact - if (this.comparePerformanceImpact(opAnalysis.performanceImpact, analysis.performanceImpact) > 0) { + if ( + this.comparePerformanceImpact( + opAnalysis.performanceImpact, + analysis.performanceImpact, + ) > 0 + ) { analysis.performanceImpact = opAnalysis.performanceImpact; } - + // Accumulate duration analysis.estimatedDuration += opAnalysis.estimatedDuration; - + // Collect recommendations and warnings analysis.recommendations.push(...opAnalysis.recommendations); analysis.warnings.push(...opAnalysis.warnings); - + // Check if requires downtime if (opAnalysis.requiresDowntime) { analysis.requiresDowntime = true; } - + // Add to rollback plan if (opAnalysis.rollbackStep) { analysis.rollbackPlan.push(opAnalysis.rollbackStep); } } - + // Generate overall recommendations - analysis.recommendations.push(...this.generateOverallRecommendations(analysis, context)); - + analysis.recommendations.push( + ...this.generateOverallRecommendations(analysis, context), + ); + // Sort recommendations by priority - analysis.recommendations.sort((a, b) => this.comparePriority(a.priority, b.priority)); - - this.emit('complete', { - message: 'Migration analysis complete', + analysis.recommendations.sort((a, b) => + this.comparePriority(a.priority, b.priority), + ); + + this.emit("complete", { + message: "Migration analysis complete", riskLevel: analysis.riskLevel, operations: operations.length, - estimatedDuration: analysis.estimatedDuration + estimatedDuration: analysis.estimatedDuration, }); - + return analysis; } - + /** * Analyze a single migration operation * @param {Object} operation - Migration operation @@ -170,146 +181,151 @@ class SchemaDiffAnalyzer extends EventEmitter { recommendations: [], warnings: [], requiresDowntime: false, - rollbackStep: null + rollbackStep: null, }; - + // Risk-specific analysis - if (operation.type === 'DESTRUCTIVE') { + if (operation.type === "DESTRUCTIVE") { analysis.recommendations.push({ - type: 'BACKUP', - priority: 'HIGH', - message: 'Create full database backup before executing destructive operation', - operation: operation.description + type: "BACKUP", + priority: "HIGH", + message: + "Create full database backup before executing destructive operation", + operation: operation.description, }); - + analysis.warnings.push({ - type: 'DATA_LOSS', + type: "DATA_LOSS", message: `${operation.description} may result in permanent data loss`, - severity: 'CRITICAL' + severity: "CRITICAL", }); - + analysis.rollbackStep = { description: `Manual intervention required to reverse: ${operation.description}`, - manual: true + manual: true, }; } - + // Column type changes - if (this.matchesPattern(operation.sql, 'ALTER COLUMN.*TYPE')) { + if (this.matchesPattern(operation.sql, "ALTER COLUMN.*TYPE")) { analysis.recommendations.push({ - type: 'TYPE_SAFETY', - priority: 'MEDIUM', - message: 'Verify data compatibility before changing column type', - operation: operation.description + type: "TYPE_SAFETY", + priority: "MEDIUM", + message: "Verify data compatibility before changing column type", + operation: operation.description, }); - + analysis.warnings.push({ - type: 'TYPE_CONVERSION', - message: 'Column type change may fail if existing data is incompatible', - severity: 'WARNING' + type: "TYPE_CONVERSION", + message: "Column type change may fail if existing data is incompatible", + severity: "WARNING", }); } - + // Index creation - if (this.matchesPattern(operation.sql, 'CREATE.*INDEX')) { - const concurrent = operation.sql.includes('CONCURRENTLY'); - + if (this.matchesPattern(operation.sql, "CREATE.*INDEX")) { + const concurrent = operation.sql.includes("CONCURRENTLY"); + if (!concurrent && context.isProd) { analysis.recommendations.push({ - type: 'CONCURRENT_INDEX', - priority: 'HIGH', - message: 'Use CREATE INDEX CONCURRENTLY in production to avoid locks', - operation: operation.description + type: "CONCURRENT_INDEX", + priority: "HIGH", + message: "Use CREATE INDEX CONCURRENTLY in production to avoid locks", + operation: operation.description, }); - + analysis.requiresDowntime = true; } - + analysis.warnings.push({ - type: 'INDEX_CREATION', + type: "INDEX_CREATION", message: `Index creation may take significant time on large tables`, - severity: 'INFO' + severity: "INFO", }); } - + // NOT NULL constraints - if (this.matchesPattern(operation.sql, 'ALTER COLUMN.*SET NOT NULL')) { + if (this.matchesPattern(operation.sql, "ALTER COLUMN.*SET NOT NULL")) { analysis.recommendations.push({ - type: 'NULL_CHECK', - priority: 'HIGH', - message: 'Ensure no NULL values exist before adding NOT NULL constraint', - operation: operation.description + type: "NULL_CHECK", + priority: "HIGH", + message: + "Ensure no NULL values exist before adding NOT NULL constraint", + operation: operation.description, }); - + analysis.warnings.push({ - type: 'CONSTRAINT_FAILURE', - message: 'NOT NULL constraint will fail if NULL values exist', - severity: 'WARNING' + type: "CONSTRAINT_FAILURE", + message: "NOT NULL constraint will fail if NULL values exist", + severity: "WARNING", }); } - + // RLS Policy changes (Supabase-specific) if (this.supabasePatterns.rls.test(operation.sql)) { - if (operation.sql.includes('DROP POLICY')) { + if (operation.sql.includes("DROP POLICY")) { analysis.warnings.push({ - type: 'SECURITY', - message: 'Removing RLS policy may expose data - verify security implications', - severity: 'HIGH' + type: "SECURITY", + message: + "Removing RLS policy may expose data - verify security implications", + severity: "HIGH", }); } - + analysis.recommendations.push({ - type: 'RLS_TESTING', - priority: 'MEDIUM', - message: 'Test RLS policies with different user roles before deployment', - operation: operation.description + type: "RLS_TESTING", + priority: "MEDIUM", + message: + "Test RLS policies with different user roles before deployment", + operation: operation.description, }); } - + // Function changes - if (this.matchesPattern(operation.sql, 'CREATE OR REPLACE FUNCTION')) { + if (this.matchesPattern(operation.sql, "CREATE OR REPLACE FUNCTION")) { analysis.recommendations.push({ - type: 'FUNCTION_TESTING', - priority: 'MEDIUM', - message: 'Test function changes thoroughly, especially if used in triggers', - operation: operation.description + type: "FUNCTION_TESTING", + priority: "MEDIUM", + message: + "Test function changes thoroughly, especially if used in triggers", + operation: operation.description, }); } - + return analysis; } - + /** * Assess the risk level of an operation * @param {Object} operation - Migration operation * @returns {string} Risk level */ assessOperationRisk(operation) { - if (operation.type === 'DESTRUCTIVE') { + if (operation.type === "DESTRUCTIVE") { return RISK_LEVELS.CRITICAL; } - - if (operation.type === 'WARNING') { + + if (operation.type === "WARNING") { // Check specific patterns for risk escalation - if (this.matchesPattern(operation.sql, 'ALTER COLUMN.*TYPE')) { + if (this.matchesPattern(operation.sql, "ALTER COLUMN.*TYPE")) { return RISK_LEVELS.HIGH; } - - if (this.matchesPattern(operation.sql, 'DROP POLICY')) { + + if (this.matchesPattern(operation.sql, "DROP POLICY")) { return RISK_LEVELS.HIGH; // Security risk } - + return RISK_LEVELS.MEDIUM; } - + // SAFE operations can still have some risk - if (this.matchesPattern(operation.sql, 'CREATE.*INDEX')) { + if (this.matchesPattern(operation.sql, "CREATE.*INDEX")) { return RISK_LEVELS.LOW; // Performance risk but safe } - + return RISK_LEVELS.LOW; } - + /** * Assess performance impact of operation * @param {Object} operation - Migration operation @@ -318,21 +334,23 @@ class SchemaDiffAnalyzer extends EventEmitter { assessPerformanceImpact(operation) { for (const pattern of this.performancePatterns) { if (this.matchesPattern(operation.sql, pattern)) { - if (pattern.includes('INDEX')) { + if (pattern.includes("INDEX")) { return PERFORMANCE_IMPACT.HIGH; } return PERFORMANCE_IMPACT.MEDIUM; } } - + // Lock-inducing operations - if (this.matchesPattern(operation.sql, 'ALTER TABLE.*ADD COLUMN.*NOT NULL')) { + if ( + this.matchesPattern(operation.sql, "ALTER TABLE.*ADD COLUMN.*NOT NULL") + ) { return PERFORMANCE_IMPACT.MEDIUM; } - + return PERFORMANCE_IMPACT.LOW; } - + /** * Estimate operation duration in minutes * @param {Object} operation - Migration operation @@ -342,12 +360,12 @@ class SchemaDiffAnalyzer extends EventEmitter { estimateDuration(operation, context) { // Base duration let duration = 0.1; // 6 seconds minimum - + // Index creation - estimate based on table size - if (this.matchesPattern(operation.sql, 'CREATE.*INDEX')) { - const concurrent = operation.sql.includes('CONCURRENTLY'); + if (this.matchesPattern(operation.sql, "CREATE.*INDEX")) { + const concurrent = operation.sql.includes("CONCURRENTLY"); duration = concurrent ? 5 : 2; // Concurrent takes longer but safer - + // If we know table size, adjust estimate if (context.tableStats) { const tableName = this.extractTableName(operation.sql); @@ -357,30 +375,32 @@ class SchemaDiffAnalyzer extends EventEmitter { } } } - + // Column type changes - else if (this.matchesPattern(operation.sql, 'ALTER COLUMN.*TYPE')) { + else if (this.matchesPattern(operation.sql, "ALTER COLUMN.*TYPE")) { duration = 1; // Depends on table size and type conversion } - + // NOT NULL constraints require table scan - else if (this.matchesPattern(operation.sql, 'ALTER COLUMN.*NOT NULL')) { + else if (this.matchesPattern(operation.sql, "ALTER COLUMN.*NOT NULL")) { duration = 0.5; // Table scan required } - + // Function/view changes are usually fast - else if (this.matchesPattern(operation.sql, 'CREATE.*FUNCTION|CREATE.*VIEW')) { + else if ( + this.matchesPattern(operation.sql, "CREATE.*FUNCTION|CREATE.*VIEW") + ) { duration = 0.1; } - + // RLS policies are fast else if (this.supabasePatterns.rls.test(operation.sql)) { duration = 0.1; } - + return Math.round(duration * 10) / 10; // Round to 1 decimal } - + /** * Generate overall recommendations based on analysis * @param {Object} analysis - Current analysis state @@ -389,64 +409,68 @@ class SchemaDiffAnalyzer extends EventEmitter { */ generateOverallRecommendations(analysis, context) { const recommendations = []; - + // High-risk migration recommendations if (analysis.riskLevel === RISK_LEVELS.CRITICAL) { recommendations.push({ - type: 'DEPLOYMENT_STRATEGY', - priority: 'CRITICAL', - message: 'Consider blue-green deployment or maintenance window for critical operations' + type: "DEPLOYMENT_STRATEGY", + priority: "CRITICAL", + message: + "Consider blue-green deployment or maintenance window for critical operations", }); } - + // Performance recommendations if (analysis.performanceImpact === PERFORMANCE_IMPACT.HIGH) { recommendations.push({ - type: 'MAINTENANCE_WINDOW', - priority: 'HIGH', - message: 'Schedule during low-traffic period due to high performance impact' + type: "MAINTENANCE_WINDOW", + priority: "HIGH", + message: + "Schedule during low-traffic period due to high performance impact", }); } - + // Long-running migration recommendations if (analysis.estimatedDuration > 30) { recommendations.push({ - type: 'MONITORING', - priority: 'MEDIUM', - message: 'Monitor migration progress and database performance during execution' + type: "MONITORING", + priority: "MEDIUM", + message: + "Monitor migration progress and database performance during execution", }); } - + // Production-specific recommendations if (context.isProd) { if (analysis.riskLevel !== RISK_LEVELS.LOW) { recommendations.push({ - type: 'STAGING_TEST', - priority: 'HIGH', - message: 'Test migration on staging environment with production-like data' + type: "STAGING_TEST", + priority: "HIGH", + message: + "Test migration on staging environment with production-like data", }); } - + recommendations.push({ - type: 'ROLLBACK_PLAN', - priority: 'MEDIUM', - message: 'Prepare rollback plan and verify rollback procedures' + type: "ROLLBACK_PLAN", + priority: "MEDIUM", + message: "Prepare rollback plan and verify rollback procedures", }); } - + // Multiple destructive operations const destructiveCount = analysis.statistics.destructiveOperations; if (destructiveCount > 1) { recommendations.push({ - type: 'PHASED_DEPLOYMENT', - priority: 'HIGH', - message: `Consider breaking ${destructiveCount} destructive operations into separate deployments` + type: "PHASED_DEPLOYMENT", + priority: "HIGH", + message: `Consider breaking ${destructiveCount} destructive operations into separate deployments`, }); } - + return recommendations; } - + /** * Calculate migration statistics * @param {Array} operations - Migration operations @@ -466,58 +490,76 @@ class SchemaDiffAnalyzer extends EventEmitter { droppedIndexes: 0, newFunctions: 0, droppedFunctions: 0, - rlsPolicies: 0 + rlsPolicies: 0, }; - + for (const op of operations) { // Count by risk type - if (op.type === 'SAFE') stats.safeOperations++; - else if (op.type === 'WARNING') stats.warningOperations++; - else if (op.type === 'DESTRUCTIVE') stats.destructiveOperations++; - + if (op.type === "SAFE") stats.safeOperations++; + else if (op.type === "WARNING") stats.warningOperations++; + else if (op.type === "DESTRUCTIVE") stats.destructiveOperations++; + // Count specific operations const sql = op.sql.toUpperCase(); - if (sql.includes('CREATE TABLE')) stats.newTables++; - if (sql.includes('DROP TABLE')) stats.droppedTables++; - if (sql.includes('ADD COLUMN')) stats.newColumns++; - if (sql.includes('DROP COLUMN')) stats.droppedColumns++; - if (sql.includes('CREATE INDEX') || sql.includes('CREATE UNIQUE INDEX')) stats.newIndexes++; - if (sql.includes('DROP INDEX')) stats.droppedIndexes++; - if (sql.includes('CREATE FUNCTION') || sql.includes('CREATE OR REPLACE FUNCTION')) stats.newFunctions++; - if (sql.includes('DROP FUNCTION')) stats.droppedFunctions++; - if (sql.includes('CREATE POLICY') || sql.includes('DROP POLICY')) stats.rlsPolicies++; + if (sql.includes("CREATE TABLE")) stats.newTables++; + if (sql.includes("DROP TABLE")) stats.droppedTables++; + if (sql.includes("ADD COLUMN")) stats.newColumns++; + if (sql.includes("DROP COLUMN")) stats.droppedColumns++; + if (sql.includes("CREATE INDEX") || sql.includes("CREATE UNIQUE INDEX")) + stats.newIndexes++; + if (sql.includes("DROP INDEX")) stats.droppedIndexes++; + if ( + sql.includes("CREATE FUNCTION") || + sql.includes("CREATE OR REPLACE FUNCTION") + ) + stats.newFunctions++; + if (sql.includes("DROP FUNCTION")) stats.droppedFunctions++; + if (sql.includes("CREATE POLICY") || sql.includes("DROP POLICY")) + stats.rlsPolicies++; } - + return stats; } - + /** * Helper methods */ - + matchesPattern(sql, pattern) { - const regex = new RegExp(pattern, 'i'); + const regex = new RegExp(pattern, "i"); return regex.test(sql); } - + extractTableName(sql) { // Simple table name extraction - could be more sophisticated - const match = sql.match(/(?:CREATE INDEX.*ON|ALTER TABLE|DROP TABLE)\s+([^\s(]+)/i); + const match = sql.match( + /(?:CREATE INDEX.*ON|ALTER TABLE|DROP TABLE)\s+([^\s(]+)/i, + ); return match ? match[1] : null; } - + compareRiskLevels(level1, level2) { - const levels = [RISK_LEVELS.LOW, RISK_LEVELS.MEDIUM, RISK_LEVELS.HIGH, RISK_LEVELS.CRITICAL]; + const levels = [ + RISK_LEVELS.LOW, + RISK_LEVELS.MEDIUM, + RISK_LEVELS.HIGH, + RISK_LEVELS.CRITICAL, + ]; return levels.indexOf(level1) - levels.indexOf(level2); } - + comparePerformanceImpact(impact1, impact2) { - const impacts = [PERFORMANCE_IMPACT.NONE, PERFORMANCE_IMPACT.LOW, PERFORMANCE_IMPACT.MEDIUM, PERFORMANCE_IMPACT.HIGH]; + const impacts = [ + PERFORMANCE_IMPACT.NONE, + PERFORMANCE_IMPACT.LOW, + PERFORMANCE_IMPACT.MEDIUM, + PERFORMANCE_IMPACT.HIGH, + ]; return impacts.indexOf(impact1) - impacts.indexOf(impact2); } - + comparePriority(priority1, priority2) { - const priorities = ['LOW', 'MEDIUM', 'HIGH', 'CRITICAL']; + const priorities = ["LOW", "MEDIUM", "HIGH", "CRITICAL"]; return priorities.indexOf(priority2) - priorities.indexOf(priority1); // Reverse order (highest first) } } @@ -525,5 +567,5 @@ class SchemaDiffAnalyzer extends EventEmitter { module.exports = { SchemaDiffAnalyzer, RISK_LEVELS, - PERFORMANCE_IMPACT -}; \ No newline at end of file + PERFORMANCE_IMPACT, +}; diff --git a/src/lib/schemas/DataConfigSchema.js b/src/lib/schemas/DataConfigSchema.js index 44e3427..7d30816 100644 --- a/src/lib/schemas/DataConfigSchema.js +++ b/src/lib/schemas/DataConfigSchema.js @@ -1,4 +1,4 @@ -const { z } = require('zod'); +const { z } = require("zod"); /** * Zod schema for data configuration validation @@ -6,85 +6,124 @@ const { z } = require('zod'); */ // Test configuration schema -const TestConfigSchema = z.object({ - minimum_coverage: z.number().min(0).max(100).default(80).optional(), - test_timeout: z.number().min(1).default(300).optional(), - output_formats: z.array( - z.enum(['console', 'junit', 'json', 'tap', 'html']) - ).default(['console']).optional(), - parallel: z.boolean().default(false).optional(), - verbose: z.boolean().default(false).optional() -}).strict().optional(); +const TestConfigSchema = z + .object({ + minimum_coverage: z.number().min(0).max(100).default(80).optional(), + test_timeout: z.number().min(1).default(300).optional(), + output_formats: z + .array(z.enum(["console", "junit", "json", "tap", "html"])) + .default(["console"]) + .optional(), + parallel: z.boolean().default(false).optional(), + verbose: z.boolean().default(false).optional(), + }) + .strict() + .optional(); // Environment configuration schema -const EnvironmentSchema = z.object({ - db: z.string().url().regex(/^postgresql:\/\/.*/, 'Must be a PostgreSQL URL'), - supabase_url: z.string().url().optional(), - supabase_anon_key: z.string().optional(), - supabase_service_role_key: z.string().optional() -}).strict(); +const EnvironmentSchema = z + .object({ + db: z + .string() + .url() + .regex(/^postgresql:\/\/.*/, "Must be a PostgreSQL URL"), + supabase_url: z.string().url().optional(), + supabase_anon_key: z.string().optional(), + supabase_service_role_key: z.string().optional(), + }) + .strict(); // Paths configuration schema -const PathsConfigSchema = z.object({ - sql_dir: z.string().default('./sql').optional(), - tests_dir: z.string().default('./tests').optional(), - migrations_dir: z.string().default('./migrations').optional(), - functions_dir: z.string().default('./functions').optional(), - schemas_dir: z.string().default('./schemas').optional() -}).strict().optional(); +const PathsConfigSchema = z + .object({ + sql_dir: z.string().default("./sql").optional(), + tests_dir: z.string().default("./tests").optional(), + migrations_dir: z.string().default("./migrations").optional(), + functions_dir: z.string().default("./functions").optional(), + schemas_dir: z.string().default("./schemas").optional(), + }) + .strict() + .optional(); // Compile configuration schema -const CompileConfigSchema = z.object({ - auto_squash: z.boolean().default(false).optional(), - include_comments: z.boolean().default(true).optional(), - validate_syntax: z.boolean().default(true).optional() -}).strict().optional(); +const CompileConfigSchema = z + .object({ + auto_squash: z.boolean().default(false).optional(), + include_comments: z.boolean().default(true).optional(), + validate_syntax: z.boolean().default(true).optional(), + }) + .strict() + .optional(); // Migration configuration schema -const MigrateConfigSchema = z.object({ - auto_rollback: z.boolean().default(true).optional(), - dry_run: z.boolean().default(false).optional(), - lock_timeout: z.number().min(1).default(10).optional(), - batch_size: z.number().min(1).default(10).optional() -}).strict().optional(); +const MigrateConfigSchema = z + .object({ + auto_rollback: z.boolean().default(true).optional(), + dry_run: z.boolean().default(false).optional(), + lock_timeout: z.number().min(1).default(10).optional(), + batch_size: z.number().min(1).default(10).optional(), + }) + .strict() + .optional(); // Functions configuration schema -const FunctionsConfigSchema = z.object({ - deploy_on_migrate: z.boolean().default(false).optional(), - import_map: z.string().default('./import_map.json').optional(), - verify_jwt: z.boolean().default(true).optional() -}).strict().optional(); +const FunctionsConfigSchema = z + .object({ + deploy_on_migrate: z.boolean().default(false).optional(), + import_map: z.string().default("./import_map.json").optional(), + verify_jwt: z.boolean().default(true).optional(), + }) + .strict() + .optional(); // Safety configuration schema -const SafetyConfigSchema = z.object({ - require_prod_flag: z.boolean().default(true).optional(), - require_confirmation: z.boolean().default(true).optional(), - backup_before_migrate: z.boolean().default(true).optional(), - max_affected_rows: z.number().min(0).default(10000).optional() -}).strict().optional(); +const SafetyConfigSchema = z + .object({ + require_prod_flag: z.boolean().default(true).optional(), + require_confirmation: z.boolean().default(true).optional(), + backup_before_migrate: z.boolean().default(true).optional(), + max_affected_rows: z.number().min(0).default(10000).optional(), + }) + .strict() + .optional(); // Logging configuration schema -const LoggingConfigSchema = z.object({ - level: z.enum(['debug', 'info', 'warn', 'error', 'silent']).default('info').optional(), - format: z.enum(['text', 'json']).default('text').optional(), - timestamps: z.boolean().default(true).optional() -}).strict().optional(); +const LoggingConfigSchema = z + .object({ + level: z + .enum(["debug", "info", "warn", "error", "silent"]) + .default("info") + .optional(), + format: z.enum(["text", "json"]).default("text").optional(), + timestamps: z.boolean().default(true).optional(), + }) + .strict() + .optional(); // Main data configuration schema -const DataConfigSchema = z.object({ - $schema: z.string().optional(), // Allow but don't require the schema reference - test: TestConfigSchema, - environments: z.record( - z.string().regex(/^[a-zA-Z][a-zA-Z0-9_-]*$/, 'Environment name must start with a letter'), - EnvironmentSchema - ).optional(), - paths: PathsConfigSchema, - compile: CompileConfigSchema, - migrate: MigrateConfigSchema, - functions: FunctionsConfigSchema, - safety: SafetyConfigSchema, - logging: LoggingConfigSchema -}).strict(); +const DataConfigSchema = z + .object({ + $schema: z.string().optional(), // Allow but don't require the schema reference + test: TestConfigSchema, + environments: z + .record( + z + .string() + .regex( + /^[a-zA-Z][a-zA-Z0-9_-]*$/, + "Environment name must start with a letter", + ), + EnvironmentSchema, + ) + .optional(), + paths: PathsConfigSchema, + compile: CompileConfigSchema, + migrate: MigrateConfigSchema, + functions: FunctionsConfigSchema, + safety: SafetyConfigSchema, + logging: LoggingConfigSchema, + }) + .strict(); /** * Parse and validate data configuration @@ -123,7 +162,7 @@ function mergeConfigs(baseConfig, overrides) { // Parse both configs to ensure they're valid const base = dataConfigSchema.parse(baseConfig || {}); const over = dataConfigSchema.parse(overrides || {}); - + // Deep merge the configurations const merged = { ...base, @@ -135,9 +174,9 @@ function mergeConfigs(baseConfig, overrides) { migrate: { ...base.migrate, ...over.migrate }, functions: { ...base.functions, ...over.functions }, safety: { ...base.safety, ...over.safety }, - logging: { ...base.logging, ...over.logging } + logging: { ...base.logging, ...over.logging }, }; - + // Validate the merged result return dataConfigSchema.parse(merged); } @@ -156,5 +195,5 @@ module.exports = { MigrateConfigSchema, FunctionsConfigSchema, SafetyConfigSchema, - LoggingConfigSchema -}; \ No newline at end of file + LoggingConfigSchema, +}; diff --git a/src/lib/test/CoverageAnalyzer.js b/src/lib/test/CoverageAnalyzer.js index 93761a4..980065a 100644 --- a/src/lib/test/CoverageAnalyzer.js +++ b/src/lib/test/CoverageAnalyzer.js @@ -1,16 +1,16 @@ -const chalk = require('chalk'); +const chalk = require("chalk"); /** * Test Coverage Analyzer - * + * * Processes test coverage data from database queries and formats results * with color coding and statistics. */ class CoverageAnalyzer { constructor() { this.coverageThresholds = { - good: 80, // Green: >80% coverage - medium: 50 // Yellow: 50-80%, Red: <50% + good: 80, // Green: >80% coverage + medium: 50, // Yellow: 50-80%, Red: <50% }; } @@ -26,12 +26,12 @@ class CoverageAnalyzer { tested: 0, percentage: 0, items: [], - untested: [] + untested: [], }; } - const tested = rpcResults.filter(item => item.has_test); - const untested = rpcResults.filter(item => !item.has_test); + const tested = rpcResults.filter((item) => item.has_test); + const untested = rpcResults.filter((item) => !item.has_test); const percentage = Math.round((tested.length / rpcResults.length) * 100); // Group by schema @@ -50,7 +50,7 @@ class CoverageAnalyzer { items: rpcResults, untested, bySchema, - colorClass: this.getColorClass(percentage) + colorClass: this.getColorClass(percentage), }; } @@ -66,12 +66,12 @@ class CoverageAnalyzer { tested: 0, percentage: 0, items: [], - untested: [] + untested: [], }; } - const tested = policyResults.filter(item => item.has_test); - const untested = policyResults.filter(item => !item.has_test); + const tested = policyResults.filter((item) => item.has_test); + const untested = policyResults.filter((item) => !item.has_test); const percentage = Math.round((tested.length / policyResults.length) * 100); // Group by table @@ -91,7 +91,7 @@ class CoverageAnalyzer { items: policyResults, untested, byTable, - colorClass: this.getColorClass(percentage) + colorClass: this.getColorClass(percentage), }; } @@ -104,23 +104,23 @@ class CoverageAnalyzer { if (!summaryResults || summaryResults.length === 0) { return { rpc: { total: 0, tested: 0, percentage: 0 }, - policies: { total: 0, tested: 0, percentage: 0 } + policies: { total: 0, tested: 0, percentage: 0 }, }; } const summary = {}; - summaryResults.forEach(item => { - if (item.coverage_type === 'RPC Functions') { + summaryResults.forEach((item) => { + if (item.coverage_type === "RPC Functions") { summary.rpc = { total: item.total_count, tested: item.tested_count, - percentage: parseFloat(item.coverage_percentage) + percentage: parseFloat(item.coverage_percentage), }; - } else if (item.coverage_type === 'RLS Policies') { + } else if (item.coverage_type === "RLS Policies") { summary.policies = { total: item.total_count, tested: item.tested_count, - percentage: parseFloat(item.coverage_percentage) + percentage: parseFloat(item.coverage_percentage), }; } }); @@ -134,9 +134,9 @@ class CoverageAnalyzer { * @returns {string} Color class identifier */ getColorClass(percentage) { - if (percentage >= this.coverageThresholds.good) return 'good'; - if (percentage >= this.coverageThresholds.medium) return 'medium'; - return 'poor'; + if (percentage >= this.coverageThresholds.good) return "good"; + if (percentage >= this.coverageThresholds.medium) return "medium"; + return "poor"; } /** @@ -148,11 +148,11 @@ class CoverageAnalyzer { colorizeByPercentage(text, percentage) { const colorClass = this.getColorClass(percentage); switch (colorClass) { - case 'good': + case "good": return chalk.green(text); - case 'medium': + case "medium": return chalk.yellow(text); - case 'poor': + case "poor": return chalk.red(text); default: return text; @@ -170,117 +170,161 @@ class CoverageAnalyzer { const output = []; // Header - output.push(chalk.bold.blue('=== Test Coverage Report ===\n')); + output.push(chalk.bold.blue("=== Test Coverage Report ===\n")); // Overall Summary if (summary && (summary.rpc || summary.policies)) { - output.push(chalk.bold('📊 Overall Coverage Summary:')); - + output.push(chalk.bold("📊 Overall Coverage Summary:")); + if (summary.rpc) { const rpcText = `RPC Functions: ${summary.rpc.percentage}% (${summary.rpc.tested}/${summary.rpc.total})`; - output.push(` ${this.colorizeByPercentage(rpcText, summary.rpc.percentage)}`); + output.push( + ` ${this.colorizeByPercentage(rpcText, summary.rpc.percentage)}`, + ); } - + if (summary.policies) { const policyText = `RLS Policies: ${summary.policies.percentage}% (${summary.policies.tested}/${summary.policies.total})`; - output.push(` ${this.colorizeByPercentage(policyText, summary.policies.percentage)}`); + output.push( + ` ${this.colorizeByPercentage(policyText, summary.policies.percentage)}`, + ); } - - output.push(''); + + output.push(""); } // RPC Function Details if (rpcAnalysis && rpcAnalysis.total > 0) { const rpcTitle = `🔧 RPC Function Coverage: ${rpcAnalysis.percentage}% (${rpcAnalysis.tested}/${rpcAnalysis.total})`; - output.push(chalk.bold(this.colorizeByPercentage(rpcTitle, rpcAnalysis.percentage))); + output.push( + chalk.bold(this.colorizeByPercentage(rpcTitle, rpcAnalysis.percentage)), + ); // Group by schema - Object.keys(rpcAnalysis.bySchema).sort().forEach(schema => { - output.push(chalk.cyan(`\n ${schema} schema:`)); - - rpcAnalysis.bySchema[schema].forEach(func => { - const status = func.has_test ? '✓' : '✗'; - const color = func.has_test ? chalk.green : chalk.red; - const testInfo = func.has_test ? - `(${func.test_count} test${func.test_count !== 1 ? 's' : ''})` : - '(0 tests)'; - - output.push(` ${color(status)} ${func.function_name} ${chalk.gray(testInfo)}`); - - // Show test function names if available - if (func.has_test && func.test_function_names && func.test_function_names.length > 0) { - func.test_function_names.forEach(testName => { - output.push(` ${chalk.gray('↳')} ${chalk.gray(testName)}`); - }); - } + Object.keys(rpcAnalysis.bySchema) + .sort() + .forEach((schema) => { + output.push(chalk.cyan(`\n ${schema} schema:`)); + + rpcAnalysis.bySchema[schema].forEach((func) => { + const status = func.has_test ? "✓" : "✗"; + const color = func.has_test ? chalk.green : chalk.red; + const testInfo = func.has_test + ? `(${func.test_count} test${func.test_count !== 1 ? "s" : ""})` + : "(0 tests)"; + + output.push( + ` ${color(status)} ${func.function_name} ${chalk.gray(testInfo)}`, + ); + + // Show test function names if available + if ( + func.has_test && + func.test_function_names && + func.test_function_names.length > 0 + ) { + func.test_function_names.forEach((testName) => { + output.push(` ${chalk.gray("↳")} ${chalk.gray(testName)}`); + }); + } + }); }); - }); } // RLS Policy Details if (policyAnalysis && policyAnalysis.total > 0) { - output.push(''); + output.push(""); const policyTitle = `🛡️ RLS Policy Coverage: ${policyAnalysis.percentage}% (${policyAnalysis.tested}/${policyAnalysis.total})`; - output.push(chalk.bold(this.colorizeByPercentage(policyTitle, policyAnalysis.percentage))); + output.push( + chalk.bold( + this.colorizeByPercentage(policyTitle, policyAnalysis.percentage), + ), + ); // Group by table - Object.keys(policyAnalysis.byTable).sort().forEach(table => { - output.push(chalk.cyan(`\n ${table}:`)); - - policyAnalysis.byTable[table].forEach(policy => { - const status = policy.has_test ? '✓' : '✗'; - const color = policy.has_test ? chalk.green : chalk.red; - const testInfo = policy.has_test && policy.test_evidence ? - `(${policy.test_evidence.length} test${policy.test_evidence.length !== 1 ? 's' : ''})` : - '(0 tests)'; - - output.push(` ${color(status)} ${policy.policy_name} [${policy.policy_type}] ${chalk.gray(testInfo)}`); - - // Show test evidence if available - if (policy.has_test && policy.test_evidence && policy.test_evidence.length > 0) { - policy.test_evidence.forEach(testName => { - output.push(` ${chalk.gray('↳')} ${chalk.gray(testName)}`); - }); - } + Object.keys(policyAnalysis.byTable) + .sort() + .forEach((table) => { + output.push(chalk.cyan(`\n ${table}:`)); + + policyAnalysis.byTable[table].forEach((policy) => { + const status = policy.has_test ? "✓" : "✗"; + const color = policy.has_test ? chalk.green : chalk.red; + const testInfo = + policy.has_test && policy.test_evidence + ? `(${policy.test_evidence.length} test${policy.test_evidence.length !== 1 ? "s" : ""})` + : "(0 tests)"; + + output.push( + ` ${color(status)} ${policy.policy_name} [${policy.policy_type}] ${chalk.gray(testInfo)}`, + ); + + // Show test evidence if available + if ( + policy.has_test && + policy.test_evidence && + policy.test_evidence.length > 0 + ) { + policy.test_evidence.forEach((testName) => { + output.push(` ${chalk.gray("↳")} ${chalk.gray(testName)}`); + }); + } + }); }); - }); } // Untested Items Summary const allUntested = []; if (rpcAnalysis && rpcAnalysis.untested.length > 0) { - allUntested.push(...rpcAnalysis.untested.map(item => ({ - type: 'RPC Function', - name: `${item.schema_name}.${item.function_name}`, - schema: item.schema_name - }))); + allUntested.push( + ...rpcAnalysis.untested.map((item) => ({ + type: "RPC Function", + name: `${item.schema_name}.${item.function_name}`, + schema: item.schema_name, + })), + ); } if (policyAnalysis && policyAnalysis.untested.length > 0) { - allUntested.push(...policyAnalysis.untested.map(item => ({ - type: 'RLS Policy', - name: `${item.schema_name}.${item.table_name}.${item.policy_name}`, - schema: item.schema_name - }))); + allUntested.push( + ...policyAnalysis.untested.map((item) => ({ + type: "RLS Policy", + name: `${item.schema_name}.${item.table_name}.${item.policy_name}`, + schema: item.schema_name, + })), + ); } if (allUntested.length > 0) { - output.push('\n' + chalk.bold.red('🚨 Untested Items:')); - allUntested.forEach(item => { - output.push(` ${chalk.red('•')} ${chalk.gray(`[${item.type}]`)} ${item.name}`); + output.push("\n" + chalk.bold.red("🚨 Untested Items:")); + allUntested.forEach((item) => { + output.push( + ` ${chalk.red("•")} ${chalk.gray(`[${item.type}]`)} ${item.name}`, + ); }); } // No coverage found message - if ((!rpcAnalysis || rpcAnalysis.total === 0) && (!policyAnalysis || policyAnalysis.total === 0)) { - output.push(chalk.yellow('⚠️ No RPC functions or RLS policies found for coverage analysis.')); - output.push(chalk.gray(' This could mean:')); - output.push(chalk.gray(' • No functions/policies exist in public, private, or security schemas')); - output.push(chalk.gray(' • Database connection issues')); - output.push(chalk.gray(' • Test schema is not properly configured')); + if ( + (!rpcAnalysis || rpcAnalysis.total === 0) && + (!policyAnalysis || policyAnalysis.total === 0) + ) { + output.push( + chalk.yellow( + "⚠️ No RPC functions or RLS policies found for coverage analysis.", + ), + ); + output.push(chalk.gray(" This could mean:")); + output.push( + chalk.gray( + " • No functions/policies exist in public, private, or security schemas", + ), + ); + output.push(chalk.gray(" • Database connection issues")); + output.push(chalk.gray(" • Test schema is not properly configured")); } - output.push(''); // Final newline - return output.join('\n'); + output.push(""); // Final newline + return output.join("\n"); } /** @@ -291,30 +335,36 @@ class CoverageAnalyzer { */ generateCoverageStats(rpcAnalysis, policyAnalysis) { const totalItems = (rpcAnalysis?.total || 0) + (policyAnalysis?.total || 0); - const totalTested = (rpcAnalysis?.tested || 0) + (policyAnalysis?.tested || 0); - const overallPercentage = totalItems > 0 ? Math.round((totalTested / totalItems) * 100) : 0; + const totalTested = + (rpcAnalysis?.tested || 0) + (policyAnalysis?.tested || 0); + const overallPercentage = + totalItems > 0 ? Math.round((totalTested / totalItems) * 100) : 0; return { overall: { total: totalItems, tested: totalTested, percentage: overallPercentage, - colorClass: this.getColorClass(overallPercentage) + colorClass: this.getColorClass(overallPercentage), }, - rpc: rpcAnalysis ? { - total: rpcAnalysis.total, - tested: rpcAnalysis.tested, - percentage: rpcAnalysis.percentage, - colorClass: rpcAnalysis.colorClass - } : null, - policies: policyAnalysis ? { - total: policyAnalysis.total, - tested: policyAnalysis.tested, - percentage: policyAnalysis.percentage, - colorClass: policyAnalysis.colorClass - } : null + rpc: rpcAnalysis + ? { + total: rpcAnalysis.total, + tested: rpcAnalysis.tested, + percentage: rpcAnalysis.percentage, + colorClass: rpcAnalysis.colorClass, + } + : null, + policies: policyAnalysis + ? { + total: policyAnalysis.total, + tested: policyAnalysis.tested, + percentage: policyAnalysis.percentage, + colorClass: policyAnalysis.colorClass, + } + : null, }; } } -module.exports = CoverageAnalyzer; \ No newline at end of file +module.exports = CoverageAnalyzer; diff --git a/src/lib/test/README-TestCache.md b/src/lib/test/README-TestCache.md index df18783..63f7b28 100644 --- a/src/lib/test/README-TestCache.md +++ b/src/lib/test/README-TestCache.md @@ -7,7 +7,7 @@ The TestCache system provides hash-based caching for data test executions, deliv ## Key Features - **Hash-based cache invalidation** - Detects changes in test files, database schema, and dependencies -- **Performance optimization** - Achieves >50% speedup on cached test executions +- **Performance optimization** - Achieves >50% speedup on cached test executions - **File-based storage** - Uses JSON files in `.data-cache/test-results/` directory - **Cache management** - Clear, stats, and pattern-based invalidation commands - **Automatic invalidation** - Cache expires when files or database schema change @@ -51,6 +51,7 @@ node test/test-cache-performance.js ### Hash Calculation The cache hash is calculated from: + - Test function name - Database connection details (without credentials) - Test execution options @@ -67,6 +68,7 @@ The cache hash is calculated from: ``` Each cache file contains: + ```json { "result": { @@ -88,6 +90,7 @@ Each cache file contains: ### Cache Invalidation Cache entries are invalidated when: + - Test file content changes - Database schema changes (detected via migration hash) - Cache entry exceeds maximum age (24 hours) @@ -205,6 +208,7 @@ node test/test-cache-performance.js ``` Expected output: + ``` 🚀 data Test Cache Performance Validation ================================================== @@ -239,4 +243,4 @@ P1.T015 implementation validated and ready for deployment. - **Compression** - Reduce cache file sizes - **Smart invalidation** - More granular dependency tracking - **Cache warming** - Pre-populate cache for common test suites -- **Analytics** - Detailed cache performance analysis and recommendations \ No newline at end of file +- **Analytics** - Detailed cache performance analysis and recommendations diff --git a/src/lib/test/ResultParser.js b/src/lib/test/ResultParser.js index 2c4293a..e0542c4 100644 --- a/src/lib/test/ResultParser.js +++ b/src/lib/test/ResultParser.js @@ -15,7 +15,7 @@ class ResultParser { skipped: 0, tests: [], diagnostics: [], - plan: null + plan: null, }; } @@ -25,27 +25,28 @@ class ResultParser { * @returns {object} Parsed test results */ parse(tapOutput) { - if (!tapOutput || typeof tapOutput !== 'string') { + if (!tapOutput || typeof tapOutput !== "string") { return this.results; } - const lines = tapOutput.split('\n'); - + const lines = tapOutput.split("\n"); + for (let i = 0; i < lines.length; i++) { const line = lines[i].trim(); - - if (line.startsWith('1..')) { + + if (line.startsWith("1..")) { this._parsePlan(line); - } else if (line.startsWith('ok ') || line.startsWith('not ok ')) { + } else if (line.startsWith("ok ") || line.startsWith("not ok ")) { this._parseTest(line); - } else if (line.startsWith('#')) { + } else if (line.startsWith("#")) { this._parseDiagnostic(line); } } // Calculate totals - this.results.total = this.results.passed + this.results.failed + this.results.skipped; - + this.results.total = + this.results.passed + this.results.failed + this.results.skipped; + return this.results; } @@ -67,38 +68,38 @@ class ResultParser { _parseTest(line) { const okMatch = line.match(/^ok (\d+)(.*)/); const notOkMatch = line.match(/^not ok (\d+)(.*)/); - + if (okMatch) { const testNumber = parseInt(okMatch[1], 10); - const description = okMatch[2].replace(/^[^\w]*/, '').trim(); - + const description = okMatch[2].replace(/^[^\w]*/, "").trim(); + // Check for SKIP directive - if (description.includes('# SKIP')) { + if (description.includes("# SKIP")) { this.results.skipped++; this.results.tests.push({ number: testNumber, - status: 'skip', - description: description.replace(/# SKIP.*$/, '').trim(), - directive: 'SKIP', - reason: this._extractSkipReason(description) + status: "skip", + description: description.replace(/# SKIP.*$/, "").trim(), + directive: "SKIP", + reason: this._extractSkipReason(description), }); } else { this.results.passed++; this.results.tests.push({ number: testNumber, - status: 'pass', - description: description + status: "pass", + description: description, }); } } else if (notOkMatch) { const testNumber = parseInt(notOkMatch[1], 10); - const description = notOkMatch[2].replace(/^[^\w]*/, '').trim(); - + const description = notOkMatch[2].replace(/^[^\w]*/, "").trim(); + this.results.failed++; this.results.tests.push({ number: testNumber, - status: 'fail', - description: description + status: "fail", + description: description, }); } } @@ -108,7 +109,7 @@ class ResultParser { * @private */ _parseDiagnostic(line) { - const diagnostic = line.replace(/^#\s*/, ''); + const diagnostic = line.replace(/^#\s*/, ""); this.results.diagnostics.push(diagnostic); } @@ -118,7 +119,7 @@ class ResultParser { */ _extractSkipReason(line) { const match = line.match(/# SKIP (.*)$/); - return match ? match[1].trim() : ''; + return match ? match[1].trim() : ""; } /** @@ -134,37 +135,39 @@ class ResultParser { if (failed > 0) { lines.push(chalk.red(`✗ ${failed}/${total} tests failed`)); } else if (skipped > 0) { - lines.push(chalk.yellow(`✓ ${passed}/${total} tests passed (${skipped} skipped)`)); + lines.push( + chalk.yellow(`✓ ${passed}/${total} tests passed (${skipped} skipped)`), + ); } else { lines.push(chalk.green(`✓ All ${passed}/${total} tests passed`)); } // Individual test results if (tests.length > 0) { - lines.push(''); - tests.forEach(test => { + lines.push(""); + tests.forEach((test) => { let symbol, color; - + switch (test.status) { - case 'pass': - symbol = '✓'; + case "pass": + symbol = "✓"; color = chalk.green; break; - case 'fail': - symbol = '✗'; + case "fail": + symbol = "✗"; color = chalk.red; break; - case 'skip': - symbol = '○'; + case "skip": + symbol = "○"; color = chalk.yellow; break; default: - symbol = '?'; + symbol = "?"; color = chalk.gray; } let line = color(` ${symbol} ${test.description}`); - if (test.directive === 'SKIP' && test.reason) { + if (test.directive === "SKIP" && test.reason) { line += chalk.gray(` (${test.reason})`); } lines.push(line); @@ -173,14 +176,14 @@ class ResultParser { // Diagnostics (if any) if (diagnostics.length > 0) { - lines.push(''); - lines.push(chalk.gray('Diagnostics:')); - diagnostics.forEach(diagnostic => { + lines.push(""); + lines.push(chalk.gray("Diagnostics:")); + diagnostics.forEach((diagnostic) => { lines.push(chalk.gray(` ${diagnostic}`)); }); } - return lines.join('\n'); + return lines.join("\n"); } /** @@ -200,4 +203,4 @@ class ResultParser { } } -module.exports = ResultParser; \ No newline at end of file +module.exports = ResultParser; diff --git a/src/lib/test/TestCache.js b/src/lib/test/TestCache.js index 7498e49..6f66efb 100644 --- a/src/lib/test/TestCache.js +++ b/src/lib/test/TestCache.js @@ -1,14 +1,14 @@ /** * TestCache - High-performance test result caching system - * + * * Provides hash-based cache invalidation and performance optimization * for data test executions. Achieves >50% performance improvement * on repeat test runs. */ -const fs = require('fs').promises; -const path = require('path'); -const crypto = require('crypto'); +const fs = require("fs").promises; +const path = require("path"); +const crypto = require("crypto"); /** * TestCache manages cached test results for performance optimization @@ -19,20 +19,20 @@ class TestCache { * @param {string} cacheDir - Directory for cache storage (.data-cache/test-results/) * @param {Object} logger - Logger instance (optional) */ - constructor(cacheDir = '.data-cache/test-results', logger = null) { + constructor(cacheDir = ".data-cache/test-results", logger = null) { this.cacheDir = cacheDir; this.logger = logger; this.stats = { hits: 0, misses: 0, invalidations: 0, - totalCacheRequests: 0 + totalCacheRequests: 0, }; - + // Performance tracking this.timings = { cacheOperations: [], - hashCalculations: [] + hashCalculations: [], }; } @@ -43,7 +43,7 @@ class TestCache { async initialize() { try { await fs.mkdir(this.cacheDir, { recursive: true }); - this._log('debug', `Cache directory initialized: ${this.cacheDir}`); + this._log("debug", `Cache directory initialized: ${this.cacheDir}`); } catch (error) { throw new Error(`Failed to initialize cache directory: ${error.message}`); } @@ -58,50 +58,62 @@ class TestCache { */ async calculateHash(testFunction, databaseUrl, options = {}) { const startTime = Date.now(); - + try { const hashInputs = []; - + // Add test function name hashInputs.push(`function:${testFunction}`); - + // Add database connection (without credentials for security) const dbUrl = new URL(databaseUrl); hashInputs.push(`db:${dbUrl.host}:${dbUrl.port}:${dbUrl.pathname}`); - + // Add test execution options (serialized) - const optionsString = JSON.stringify(options, Object.keys(options).sort()); + const optionsString = JSON.stringify( + options, + Object.keys(options).sort(), + ); hashInputs.push(`options:${optionsString}`); - + // Add schema hash (migration state) const schemaHash = await this._calculateSchemaHash(databaseUrl); hashInputs.push(`schema:${schemaHash}`); - + // Add test file content hash if available const testFileHash = await this._calculateTestFileHash(testFunction); if (testFileHash) { hashInputs.push(`testfile:${testFileHash}`); } - + // Create final hash - const combinedInput = hashInputs.join('|'); - const hash = crypto.createHash('sha256').update(combinedInput).digest('hex'); - + const combinedInput = hashInputs.join("|"); + const hash = crypto + .createHash("sha256") + .update(combinedInput) + .digest("hex"); + this.timings.hashCalculations.push({ function: testFunction, duration: Date.now() - startTime, - timestamp: new Date().toISOString() + timestamp: new Date().toISOString(), }); - - this._log('debug', `Hash calculated for ${testFunction}: ${hash.substring(0, 8)}... (${Date.now() - startTime}ms)`); + + this._log( + "debug", + `Hash calculated for ${testFunction}: ${hash.substring(0, 8)}... (${Date.now() - startTime}ms)`, + ); return hash; - } catch (error) { - this._log('warn', `Failed to calculate hash for ${testFunction}: ${error.message}`); + this._log( + "warn", + `Failed to calculate hash for ${testFunction}: ${error.message}`, + ); // Return fallback hash based on function name and timestamp - return crypto.createHash('sha256') + return crypto + .createHash("sha256") .update(`${testFunction}:${Date.now()}`) - .digest('hex'); + .digest("hex"); } } @@ -113,56 +125,68 @@ class TestCache { async getCachedResult(hash) { const startTime = Date.now(); this.stats.totalCacheRequests++; - + try { const cacheFile = path.join(this.cacheDir, `${hash}.json`); - + // Check if cache file exists try { await fs.access(cacheFile); } catch { this.stats.misses++; - this._log('debug', `Cache miss: ${hash.substring(0, 8)}...`); + this._log("debug", `Cache miss: ${hash.substring(0, 8)}...`); return null; } - + // Read and parse cache file - const cacheContent = await fs.readFile(cacheFile, 'utf8'); + const cacheContent = await fs.readFile(cacheFile, "utf8"); const cachedData = JSON.parse(cacheContent); - + // Validate cache structure if (!this._validateCacheStructure(cachedData)) { - this._log('warn', `Invalid cache structure for ${hash.substring(0, 8)}..., removing`); + this._log( + "warn", + `Invalid cache structure for ${hash.substring(0, 8)}..., removing`, + ); await this._removeCacheFile(cacheFile); this.stats.misses++; return null; } - + // Check if cache is still fresh (default: 24 hours) const maxAge = 24 * 60 * 60 * 1000; // 24 hours in milliseconds - const age = Date.now() - new Date(cachedData.metadata.timestamp).getTime(); - + const age = + Date.now() - new Date(cachedData.metadata.timestamp).getTime(); + if (age > maxAge) { - this._log('debug', `Cache expired for ${hash.substring(0, 8)}... (age: ${Math.round(age / 1000 / 60)}min)`); + this._log( + "debug", + `Cache expired for ${hash.substring(0, 8)}... (age: ${Math.round(age / 1000 / 60)}min)`, + ); await this._removeCacheFile(cacheFile); this.stats.misses++; return null; } - + // Cache hit! this.stats.hits++; this.timings.cacheOperations.push({ - operation: 'hit', + operation: "hit", hash: hash.substring(0, 8), duration: Date.now() - startTime, - timestamp: new Date().toISOString() + timestamp: new Date().toISOString(), }); - - this._log('info', `Cache hit: ${cachedData.metadata.testFunction} (saved ${cachedData.metadata.originalDuration}ms)`); + + this._log( + "info", + `Cache hit: ${cachedData.metadata.testFunction} (saved ${cachedData.metadata.originalDuration}ms)`, + ); return cachedData.result; - } catch (error) { - this._log('error', `Cache read error for ${hash.substring(0, 8)}...: ${error.message}`); + this._log( + "error", + `Cache read error for ${hash.substring(0, 8)}...: ${error.message}`, + ); this.stats.misses++; return null; } @@ -177,37 +201,44 @@ class TestCache { */ async storeResult(hash, result, metadata = {}) { const startTime = Date.now(); - + try { await this.initialize(); - + const cacheData = { result: result, metadata: { hash: hash, timestamp: new Date().toISOString(), - testFunction: metadata.testFunction || 'unknown', + testFunction: metadata.testFunction || "unknown", originalDuration: metadata.duration || 0, - databaseUrl: metadata.databaseUrl ? this._sanitizeUrl(metadata.databaseUrl) : null, + databaseUrl: metadata.databaseUrl + ? this._sanitizeUrl(metadata.databaseUrl) + : null, options: metadata.options || {}, - dataVersion: require('../../../package.json').version - } + dataVersion: require("../../../package.json").version, + }, }; - + const cacheFile = path.join(this.cacheDir, `${hash}.json`); - await fs.writeFile(cacheFile, JSON.stringify(cacheData, null, 2), 'utf8'); - + await fs.writeFile(cacheFile, JSON.stringify(cacheData, null, 2), "utf8"); + this.timings.cacheOperations.push({ - operation: 'store', + operation: "store", hash: hash.substring(0, 8), duration: Date.now() - startTime, - timestamp: new Date().toISOString() + timestamp: new Date().toISOString(), }); - - this._log('debug', `Cached result for ${metadata.testFunction || hash.substring(0, 8)}: ${cacheFile}`); - + + this._log( + "debug", + `Cached result for ${metadata.testFunction || hash.substring(0, 8)}: ${cacheFile}`, + ); } catch (error) { - this._log('error', `Failed to store cache for ${hash.substring(0, 8)}...: ${error.message}`); + this._log( + "error", + `Failed to store cache for ${hash.substring(0, 8)}...: ${error.message}`, + ); throw error; } } @@ -218,43 +249,45 @@ class TestCache { */ async clearCache() { const startTime = Date.now(); - + try { const files = await fs.readdir(this.cacheDir); - const jsonFiles = files.filter(f => f.endsWith('.json')); - + const jsonFiles = files.filter((f) => f.endsWith(".json")); + let removedCount = 0; for (const file of jsonFiles) { const filePath = path.join(this.cacheDir, file); await fs.unlink(filePath); removedCount++; } - + // Reset stats this.stats = { hits: 0, misses: 0, invalidations: 0, - totalCacheRequests: 0 + totalCacheRequests: 0, }; - + const duration = Date.now() - startTime; - this._log('info', `Cache cleared: ${removedCount} files removed in ${duration}ms`); - + this._log( + "info", + `Cache cleared: ${removedCount} files removed in ${duration}ms`, + ); + return { filesRemoved: removedCount, duration: duration, - timestamp: new Date().toISOString() + timestamp: new Date().toISOString(), }; - } catch (error) { - if (error.code === 'ENOENT') { + if (error.code === "ENOENT") { // Cache directory doesn't exist, nothing to clear - this._log('debug', 'Cache directory does not exist, nothing to clear'); + this._log("debug", "Cache directory does not exist, nothing to clear"); return { filesRemoved: 0, duration: Date.now() - startTime, - timestamp: new Date().toISOString() + timestamp: new Date().toISOString(), }; } throw error; @@ -268,18 +301,18 @@ class TestCache { async getStats() { try { const files = await fs.readdir(this.cacheDir); - const jsonFiles = files.filter(f => f.endsWith('.json')); - + const jsonFiles = files.filter((f) => f.endsWith(".json")); + // Calculate cache file sizes let totalSize = 0; let oldestFile = null; let newestFile = null; - + for (const file of jsonFiles) { const filePath = path.join(this.cacheDir, file); const stat = await fs.stat(filePath); totalSize += stat.size; - + if (!oldestFile || stat.mtime < oldestFile.mtime) { oldestFile = { name: file, mtime: stat.mtime }; } @@ -287,34 +320,52 @@ class TestCache { newestFile = { name: file, mtime: stat.mtime }; } } - + // Calculate hit rate - const hitRate = this.stats.totalCacheRequests > 0 - ? (this.stats.hits / this.stats.totalCacheRequests * 100).toFixed(2) - : '0.00'; - + const hitRate = + this.stats.totalCacheRequests > 0 + ? ((this.stats.hits / this.stats.totalCacheRequests) * 100).toFixed(2) + : "0.00"; + // Performance metrics - const avgHashTime = this.timings.hashCalculations.length > 0 - ? this.timings.hashCalculations.reduce((sum, t) => sum + t.duration, 0) / this.timings.hashCalculations.length - : 0; - - const avgCacheOpTime = this.timings.cacheOperations.length > 0 - ? this.timings.cacheOperations.reduce((sum, t) => sum + t.duration, 0) / this.timings.cacheOperations.length - : 0; - + const avgHashTime = + this.timings.hashCalculations.length > 0 + ? this.timings.hashCalculations.reduce( + (sum, t) => sum + t.duration, + 0, + ) / this.timings.hashCalculations.length + : 0; + + const avgCacheOpTime = + this.timings.cacheOperations.length > 0 + ? this.timings.cacheOperations.reduce( + (sum, t) => sum + t.duration, + 0, + ) / this.timings.cacheOperations.length + : 0; + return { files: { count: jsonFiles.length, totalSize: totalSize, - averageSize: jsonFiles.length > 0 ? Math.round(totalSize / jsonFiles.length) : 0, - oldest: oldestFile ? { - file: oldestFile.name, - age: Math.round((Date.now() - oldestFile.mtime.getTime()) / 1000 / 60) // minutes - } : null, - newest: newestFile ? { - file: newestFile.name, - age: Math.round((Date.now() - newestFile.mtime.getTime()) / 1000 / 60) // minutes - } : null + averageSize: + jsonFiles.length > 0 ? Math.round(totalSize / jsonFiles.length) : 0, + oldest: oldestFile + ? { + file: oldestFile.name, + age: Math.round( + (Date.now() - oldestFile.mtime.getTime()) / 1000 / 60, + ), // minutes + } + : null, + newest: newestFile + ? { + file: newestFile.name, + age: Math.round( + (Date.now() - newestFile.mtime.getTime()) / 1000 / 60, + ), // minutes + } + : null, }, performance: { hitRate: hitRate, @@ -323,22 +374,35 @@ class TestCache { invalidations: this.stats.invalidations, totalRequests: this.stats.totalCacheRequests, averageHashTime: Math.round(avgHashTime * 100) / 100, // ms - averageCacheOpTime: Math.round(avgCacheOpTime * 100) / 100 // ms + averageCacheOpTime: Math.round(avgCacheOpTime * 100) / 100, // ms }, timings: { recentHashes: this.timings.hashCalculations.slice(-5), - recentCacheOps: this.timings.cacheOperations.slice(-10) + recentCacheOps: this.timings.cacheOperations.slice(-10), }, - directory: this.cacheDir + directory: this.cacheDir, }; - } catch (error) { - if (error.code === 'ENOENT') { + if (error.code === "ENOENT") { return { - files: { count: 0, totalSize: 0, averageSize: 0, oldest: null, newest: null }, - performance: { hitRate: '0.00', hits: 0, misses: 0, invalidations: 0, totalRequests: 0, averageHashTime: 0, averageCacheOpTime: 0 }, + files: { + count: 0, + totalSize: 0, + averageSize: 0, + oldest: null, + newest: null, + }, + performance: { + hitRate: "0.00", + hits: 0, + misses: 0, + invalidations: 0, + totalRequests: 0, + averageHashTime: 0, + averageCacheOpTime: 0, + }, timings: { recentHashes: [], recentCacheOps: [] }, - directory: this.cacheDir + directory: this.cacheDir, }; } throw error; @@ -353,21 +417,21 @@ class TestCache { async invalidateByPattern(pattern) { try { const files = await fs.readdir(this.cacheDir); - const jsonFiles = files.filter(f => f.endsWith('.json')); - + const jsonFiles = files.filter((f) => f.endsWith(".json")); + let invalidatedCount = 0; - + for (const file of jsonFiles) { const filePath = path.join(this.cacheDir, file); - + try { - const content = await fs.readFile(filePath, 'utf8'); + const content = await fs.readFile(filePath, "utf8"); const data = JSON.parse(content); - + // Check if pattern matches test function or hash - const testFunction = data.metadata?.testFunction || ''; - const hash = data.metadata?.hash || ''; - + const testFunction = data.metadata?.testFunction || ""; + const hash = data.metadata?.hash || ""; + if (testFunction.includes(pattern) || hash.includes(pattern)) { await fs.unlink(filePath); invalidatedCount++; @@ -375,15 +439,17 @@ class TestCache { } } catch (err) { // Skip files that can't be read or parsed - this._log('warn', `Skipping invalid cache file: ${file}`); + this._log("warn", `Skipping invalid cache file: ${file}`); } } - - this._log('info', `Invalidated ${invalidatedCount} cache entries matching pattern: ${pattern}`); + + this._log( + "info", + `Invalidated ${invalidatedCount} cache entries matching pattern: ${pattern}`, + ); return invalidatedCount; - } catch (error) { - if (error.code === 'ENOENT') { + if (error.code === "ENOENT") { return 0; // No cache directory, nothing to invalidate } throw error; @@ -403,29 +469,30 @@ class TestCache { // For now, use a simple timestamp-based approach // In a real implementation, we would query migration history // or calculate hash of database schema objects - const migrationDir = path.resolve(process.cwd(), '../../migrations'); - + const migrationDir = path.resolve(process.cwd(), "../../migrations"); + try { const files = await fs.readdir(migrationDir); - const migrationFiles = files.filter(f => f.endsWith('.sql')).sort(); - + const migrationFiles = files.filter((f) => f.endsWith(".sql")).sort(); + if (migrationFiles.length === 0) { - return 'no-migrations'; + return "no-migrations"; } - + // Use the latest migration file as schema state indicator const latestMigration = migrationFiles[migrationFiles.length - 1]; - return crypto.createHash('md5').update(latestMigration).digest('hex'); - + return crypto.createHash("md5").update(latestMigration).digest("hex"); } catch { // If we can't read migrations, use current timestamp rounded to hour // This provides reasonable cache invalidation for schema changes const hourlyTimestamp = Math.floor(Date.now() / (1000 * 60 * 60)); - return crypto.createHash('md5').update(hourlyTimestamp.toString()).digest('hex'); + return crypto + .createHash("md5") + .update(hourlyTimestamp.toString()) + .digest("hex"); } - } catch { - return 'unknown-schema'; + return "unknown-schema"; } } @@ -439,23 +506,26 @@ class TestCache { try { // Look for test files in common locations const testDirs = [ - path.resolve(process.cwd(), '../../tests'), - path.resolve(process.cwd(), '../../test') + path.resolve(process.cwd(), "../../tests"), + path.resolve(process.cwd(), "../../test"), ]; - + for (const testDir of testDirs) { try { const files = await fs.readdir(testDir); - + // Find files that might contain this test function for (const file of files) { - if (file.endsWith('.sql') && ( - file.includes(testFunction.replace('run_', '').replace('_tests', '')) || - testFunction.includes(file.replace('.sql', '')) - )) { + if ( + file.endsWith(".sql") && + (file.includes( + testFunction.replace("run_", "").replace("_tests", ""), + ) || + testFunction.includes(file.replace(".sql", ""))) + ) { const filePath = path.join(testDir, file); - const content = await fs.readFile(filePath, 'utf8'); - return crypto.createHash('md5').update(content).digest('hex'); + const content = await fs.readFile(filePath, "utf8"); + return crypto.createHash("md5").update(content).digest("hex"); } } } catch { @@ -463,7 +533,7 @@ class TestCache { continue; } } - + return null; } catch { return null; @@ -477,13 +547,15 @@ class TestCache { * @private */ _validateCacheStructure(data) { - return data && - typeof data === 'object' && - data.result && - data.metadata && - typeof data.metadata === 'object' && - data.metadata.timestamp && - data.metadata.hash; + return ( + data && + typeof data === "object" && + data.result && + data.metadata && + typeof data.metadata === "object" && + data.metadata.timestamp && + data.metadata.hash + ); } /** @@ -496,7 +568,10 @@ class TestCache { try { await fs.unlink(filePath); } catch (error) { - this._log('warn', `Failed to remove cache file ${filePath}: ${error.message}`); + this._log( + "warn", + `Failed to remove cache file ${filePath}: ${error.message}`, + ); } } @@ -511,7 +586,7 @@ class TestCache { const parsed = new URL(url); return `${parsed.protocol}//${parsed.host}${parsed.pathname}`; } catch { - return 'invalid-url'; + return "invalid-url"; } } @@ -522,12 +597,12 @@ class TestCache { * @private */ _log(level, message) { - if (this.logger && typeof this.logger[level] === 'function') { + if (this.logger && typeof this.logger[level] === "function") { this.logger[level](`[TestCache] ${message}`); - } else if (level === 'error' || level === 'warn') { + } else if (level === "error" || level === "warn") { console.error(`[TestCache] ${level.toUpperCase()}: ${message}`); } } } -module.exports = TestCache; \ No newline at end of file +module.exports = TestCache; diff --git a/src/lib/test/formatters/JSONFormatter.js b/src/lib/test/formatters/JSONFormatter.js index 6bf9fb3..baad07a 100644 --- a/src/lib/test/formatters/JSONFormatter.js +++ b/src/lib/test/formatters/JSONFormatter.js @@ -17,44 +17,53 @@ class JSONFormatter { * @returns {string} JSON formatted output */ format(results) { - const { total, passed, failed, skipped, tests, diagnostics, testFunctions } = results; + const { + total, + passed, + failed, + skipped, + tests, + diagnostics, + testFunctions, + } = results; const endTime = Date.now(); const duration = endTime - this.startTime; // Keep in milliseconds for JSON const jsonResult = { metadata: { - format: 'json', - version: '1.0', + format: "json", + version: "1.0", timestamp: new Date().toISOString(), - duration: duration + duration: duration, }, stats: { total: total, passed: passed, failed: failed, skipped: skipped, - success: failed === 0 + success: failed === 0, }, - testFunctions: testFunctions.map(func => ({ + testFunctions: testFunctions.map((func) => ({ name: func.name, total: func.total, passed: func.passed, failed: func.failed, skipped: func.skipped, success: func.success, - passRate: func.total > 0 ? (func.passed / func.total * 100).toFixed(1) : 0 + passRate: + func.total > 0 ? ((func.passed / func.total) * 100).toFixed(1) : 0, })), tests: tests.map((test, index) => { const testResult = { id: index + 1, name: test.description, status: test.status, - function: test.function || 'unknown' + function: test.function || "unknown", }; // Add additional properties for specific test types - if (test.status === 'skip') { - testResult.reason = test.reason || 'No reason provided'; + if (test.status === "skip") { + testResult.reason = test.reason || "No reason provided"; testResult.directive = test.directive; } @@ -66,30 +75,30 @@ class JSONFormatter { }), diagnostics: diagnostics || [], summary: { - passRate: total > 0 ? (passed / total * 100).toFixed(1) : 0, - failRate: total > 0 ? (failed / total * 100).toFixed(1) : 0, - skipRate: total > 0 ? (skipped / total * 100).toFixed(1) : 0, + passRate: total > 0 ? ((passed / total) * 100).toFixed(1) : 0, + failRate: total > 0 ? ((failed / total) * 100).toFixed(1) : 0, + skipRate: total > 0 ? ((skipped / total) * 100).toFixed(1) : 0, overallSuccess: failed === 0, executionTime: { total: duration, average: total > 0 ? (duration / total).toFixed(2) : 0, - unit: 'milliseconds' - } - } + unit: "milliseconds", + }, + }, }; // Add function-level breakdown if multiple functions if (testFunctions && testFunctions.length > 1) { jsonResult.functionBreakdown = testFunctions.reduce((breakdown, func) => { breakdown[func.name] = { - tests: tests.filter(test => test.function === func.name), + tests: tests.filter((test) => test.function === func.name), stats: { total: func.total, passed: func.passed, failed: func.failed, skipped: func.skipped, - success: func.success - } + success: func.success, + }, }; return breakdown; }, {}); @@ -103,7 +112,7 @@ class JSONFormatter { * @returns {string} File extension */ getFileExtension() { - return '.json'; + return ".json"; } /** @@ -111,8 +120,8 @@ class JSONFormatter { * @returns {string} MIME type */ getMimeType() { - return 'application/json'; + return "application/json"; } } -module.exports = JSONFormatter; \ No newline at end of file +module.exports = JSONFormatter; diff --git a/src/lib/test/formatters/JUnitFormatter.js b/src/lib/test/formatters/JUnitFormatter.js index 20ec788..828b9a1 100644 --- a/src/lib/test/formatters/JUnitFormatter.js +++ b/src/lib/test/formatters/JUnitFormatter.js @@ -23,45 +23,55 @@ class JUnitFormatter { const xml = []; xml.push(''); - xml.push(''); - + xml.push(""); + // Create one testsuite containing all tests - xml.push(` `); - + xml.push( + ` `, + ); + // Add individual test cases - tests.forEach(test => { + tests.forEach((test) => { const testName = this._escapeXml(test.description); const testTime = this._calculateTestTime(test, duration, total); - - if (test.status === 'fail') { - xml.push(` `); + + if (test.status === "fail") { + xml.push( + ` `, + ); xml.push(` `); - xml.push(' '); - } else if (test.status === 'skip') { - xml.push(` `); - const reason = test.reason ? this._escapeXml(test.reason) : 'Skipped'; + xml.push(" "); + } else if (test.status === "skip") { + xml.push( + ` `, + ); + const reason = test.reason ? this._escapeXml(test.reason) : "Skipped"; xml.push(` `); - xml.push(' '); + xml.push(" "); } else { - xml.push(` `); + xml.push( + ` `, + ); } }); - + // Add system-out with function-level summary if (testFunctions && testFunctions.length > 0) { - xml.push(' { - const status = func.success ? 'PASSED' : 'FAILED'; - xml.push(`${func.name}: ${func.passed}/${func.total} passed (${status})`); + xml.push(" { + const status = func.success ? "PASSED" : "FAILED"; + xml.push( + `${func.name}: ${func.passed}/${func.total} passed (${status})`, + ); }); - xml.push(']]>'); + xml.push("]]>"); } - - xml.push(' '); - xml.push(''); - - return xml.join('\n'); + + xml.push(" "); + xml.push(""); + + return xml.join("\n"); } /** @@ -69,13 +79,13 @@ class JUnitFormatter { * @private */ _escapeXml(str) { - if (!str) return ''; + if (!str) return ""; return str - .replace(/&/g, '&') - .replace(//g, '>') - .replace(/"/g, '"') - .replace(/'/g, '''); + .replace(/&/g, "&") + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); } /** @@ -92,7 +102,7 @@ class JUnitFormatter { * @returns {string} File extension */ getFileExtension() { - return '.xml'; + return ".xml"; } /** @@ -100,8 +110,8 @@ class JUnitFormatter { * @returns {string} MIME type */ getMimeType() { - return 'application/xml'; + return "application/xml"; } } -module.exports = JUnitFormatter; \ No newline at end of file +module.exports = JUnitFormatter; diff --git a/src/lib/test/formatters/index.js b/src/lib/test/formatters/index.js index acfa321..44eec2f 100644 --- a/src/lib/test/formatters/index.js +++ b/src/lib/test/formatters/index.js @@ -3,10 +3,10 @@ * Export all available formatters for test output */ -const JUnitFormatter = require('./JUnitFormatter'); -const JSONFormatter = require('./JSONFormatter'); +const JUnitFormatter = require("./JUnitFormatter"); +const JSONFormatter = require("./JSONFormatter"); module.exports = { JUnitFormatter, - JSONFormatter -}; \ No newline at end of file + JSONFormatter, +}; diff --git a/src/lib/testing/BatchProcessor.js b/src/lib/testing/BatchProcessor.js index db31fe0..fb51384 100644 --- a/src/lib/testing/BatchProcessor.js +++ b/src/lib/testing/BatchProcessor.js @@ -1,14 +1,14 @@ /** * Memory-aware batch processor for D.A.T.A. CLI - * + * * Processes large datasets in batches with memory monitoring * and cleanup to prevent OOM errors. - * + * * @class BatchProcessor * @author D.A.T.A. Engineering Team */ -import MemoryMonitor from './MemoryMonitor.js'; +import MemoryMonitor from "./MemoryMonitor.js"; class BatchProcessor { /** @@ -24,10 +24,10 @@ class BatchProcessor { this.options = { batchSize: options.batchSize || 100, maxMemoryMB: options.maxMemoryMB || 500, - enableGC: options.enableGC || true, - ...options + enableGC: options.enableGC ?? true, + ...options, }; - + this.processedBatches = 0; this.totalItems = 0; } @@ -44,31 +44,40 @@ class BatchProcessor { for (let i = 0; i < batches.length; i++) { const batch = batches[i]; - + // Check memory before processing batch const memBefore = MemoryMonitor.getMemoryUsage(); - - if (MemoryMonitor.shouldTriggerCleanup(memBefore.heapUsed, this.options.maxMemoryMB)) { + + if ( + MemoryMonitor.shouldTriggerCleanup( + memBefore.heapUsed, + this.options.maxMemoryMB, + ) + ) { + // We must run cleanup BEFORE proceeding to the next await to avoid OOM. + // eslint-disable-next-line no-await-in-loop await this.performCleanup(); } - // Process batch + // Process batch SEQUENTIALLY to keep memory bounded (intentional). + // eslint-disable-next-line no-await-in-loop const batchResults = await processor(batch, i); results = results.concat(batchResults); - + this.processedBatches++; this.totalItems += batch.length; // Emit progress - this.scanner.emit('progress', { - type: 'batch_processed', + this.scanner.emit("progress", { + type: "batch_processed", batch: i + 1, totalBatches: batches.length, itemsProcessed: this.totalItems, - memoryUsage: MemoryMonitor.getMemoryUsage() + memoryUsage: MemoryMonitor.getMemoryUsage(), }); - // Yield to event loop + // Yield to event loop to keep UI/other tasks responsive. + // eslint-disable-next-line no-await-in-loop await this.yieldToEventLoop(); } @@ -90,9 +99,9 @@ class BatchProcessor { /** * Perform memory cleanup operations - * @returns {Promise} + * (Synchronous; callers can still `await` this safely.) */ - async performCleanup() { + performCleanup() { // Clear temporary references if (this.scanner.weakRefs) { this.scanner.weakRefs = new WeakMap(); @@ -112,25 +121,26 @@ class BatchProcessor { this.scanner.memoryState.currentUsageMB = usage.heapUsed; this.scanner.memoryState.maxUsageMB = Math.max( this.scanner.memoryState.maxUsageMB, - usage.heapUsed + usage.heapUsed, ); this.scanner.memoryState.lastCleanup = Date.now(); } // Emit cleanup event - this.scanner.emit('cleanup', { - type: 'memory_cleanup', + this.scanner.emit("cleanup", { + type: "memory_cleanup", memoryUsage: MemoryMonitor.getMemoryUsage(), - gcPerformed: true + gcPerformed: true, }); } /** * Yield control to event loop - * @returns {Promise} + * Return a Promise; no need for `async`. */ - async yieldToEventLoop() { - return new Promise(resolve => setImmediate(resolve)); + yieldToEventLoop() { + // Avoid setImmediate no-undef; setTimeout(0) is fine here. + return new Promise((resolve) => setTimeout(resolve, 0)); } /** @@ -151,9 +161,9 @@ class BatchProcessor { totalItems: this.totalItems, batchSize: this.options.batchSize, maxMemoryMB: this.options.maxMemoryMB, - enableGC: this.options.enableGC + enableGC: this.options.enableGC, }; } } -export default BatchProcessor; \ No newline at end of file +export default BatchProcessor; diff --git a/src/lib/testing/CoverageEnforcer.js b/src/lib/testing/CoverageEnforcer.js index 7cdbfee..30b8a50 100644 --- a/src/lib/testing/CoverageEnforcer.js +++ b/src/lib/testing/CoverageEnforcer.js @@ -1,13 +1,14 @@ /** * Coverage Enforcer for D.A.T.A. - * - * Main enforcement engine that compares required vs actual coverage and + * + * Main enforcement engine that compares required vs actual coverage and * enforces coverage policies to prevent deployment of untested database changes. - * + * * @module CoverageEnforcer */ -const { EventEmitter } = require('events'); +const { EventEmitter } = require("events"); +const { ValidationError } = require("./errors"); /** * Coverage enforcement levels @@ -15,9 +16,9 @@ const { EventEmitter } = require('events'); * @enum {string} */ const ENFORCEMENT_LEVELS = { - STRICT: 'STRICT', // Block any missing coverage - NORMAL: 'NORMAL', // Block critical missing coverage - LENIENT: 'LENIENT' // Warn but allow deployment + STRICT: "STRICT", // Block any missing coverage + NORMAL: "NORMAL", // Block critical missing coverage + LENIENT: "LENIENT", // Warn but allow deployment }; /** @@ -26,10 +27,10 @@ const ENFORCEMENT_LEVELS = { * @enum {string} */ const GAP_SEVERITY = { - CRITICAL: 'CRITICAL', // Destructive operations without tests - HIGH: 'HIGH', // New tables/functions without tests - MEDIUM: 'MEDIUM', // Column/index changes without tests - LOW: 'LOW' // Minor changes without tests + CRITICAL: "CRITICAL", // Destructive operations without tests + HIGH: "HIGH", // New tables/functions without tests + MEDIUM: "MEDIUM", // Column/index changes without tests + LOW: "LOW", // Minor changes without tests }; /** @@ -91,7 +92,7 @@ const GAP_SEVERITY = { class CoverageEnforcer extends EventEmitter { constructor(options = {}) { super(); - + // Default enforcement configuration this.config = { level: options.level || ENFORCEMENT_LEVELS.NORMAL, @@ -100,41 +101,44 @@ class CoverageEnforcer extends EventEmitter { tables: options.tableThreshold || 90, functions: options.functionThreshold || 85, policies: options.policyThreshold || 90, - ...options.thresholds + ...options.thresholds, }, - ignoredSchemas: options.ignoredSchemas || ['information_schema', 'pg_catalog'], + ignoredSchemas: options.ignoredSchemas || [ + "information_schema", + "pg_catalog", + ], ignoredTables: options.ignoredTables || [], allowBypass: options.allowBypass !== false, bypassReason: options.bypassReason || null, - ...options + ...options, }; - + // Severity mapping for different operations this.operationSeverity = { - 'DROP_TABLE': GAP_SEVERITY.CRITICAL, - 'DROP_COLUMN': GAP_SEVERITY.CRITICAL, - 'TRUNCATE_TABLE': GAP_SEVERITY.CRITICAL, - 'CREATE_TABLE': GAP_SEVERITY.HIGH, - 'CREATE_FUNCTION': GAP_SEVERITY.HIGH, - 'ALTER_TABLE': GAP_SEVERITY.MEDIUM, - 'ALTER_COLUMN': GAP_SEVERITY.MEDIUM, - 'CREATE_INDEX': GAP_SEVERITY.MEDIUM, - 'CREATE_POLICY': GAP_SEVERITY.HIGH, - 'DROP_POLICY': GAP_SEVERITY.CRITICAL, - 'DEFAULT': GAP_SEVERITY.LOW + DROP_TABLE: GAP_SEVERITY.CRITICAL, + DROP_COLUMN: GAP_SEVERITY.CRITICAL, + TRUNCATE_TABLE: GAP_SEVERITY.CRITICAL, + CREATE_TABLE: GAP_SEVERITY.HIGH, + CREATE_FUNCTION: GAP_SEVERITY.HIGH, + ALTER_TABLE: GAP_SEVERITY.MEDIUM, + ALTER_COLUMN: GAP_SEVERITY.MEDIUM, + CREATE_INDEX: GAP_SEVERITY.MEDIUM, + CREATE_POLICY: GAP_SEVERITY.HIGH, + DROP_POLICY: GAP_SEVERITY.CRITICAL, + DEFAULT: GAP_SEVERITY.LOW, }; - + // Test suggestions by object type this.testSuggestions = { - table: ['has_table', 'table_privs_are', 'tables_are'], - column: ['has_column', 'col_type_is', 'col_is_null', 'col_not_null'], - function: ['has_function', 'function_returns', 'function_lang_is'], - index: ['has_index', 'index_is_unique', 'index_is_primary'], - policy: ['policy_exists', 'policy_cmd_is', 'policy_role_is'], - trigger: ['has_trigger', 'trigger_is'] + table: ["has_table", "table_privs_are", "tables_are"], + column: ["has_column", "col_type_is", "col_is_null", "col_not_null"], + function: ["has_function", "function_returns", "function_lang_is"], + index: ["has_index", "index_is_unique", "index_is_primary"], + policy: ["policy_exists", "policy_cmd_is", "policy_role_is"], + trigger: ["has_trigger", "trigger_is"], }; } - + /** * Main enforcement method - compares requirements vs coverage * @param {Array} requirements - Test requirements from analyzer @@ -143,71 +147,88 @@ class CoverageEnforcer extends EventEmitter { * @returns {Promise} Enforcement report */ async enforce(requirements, coverage, options = {}) { - this.emit('progress', { message: 'Starting coverage enforcement analysis...' }); - + this.emit("progress", { + message: "Starting coverage enforcement analysis...", + }); + // Merge options with config const config = { ...this.config, ...options }; - + // Filter ignored items const filteredRequirements = this.filterRequirements(requirements, config); const filteredCoverage = this.filterCoverage(coverage, config); - - this.emit('progress', { - message: `Analyzing ${filteredRequirements.length} requirements against ${filteredCoverage.length} coverage items` + + this.emit("progress", { + message: `Analyzing ${filteredRequirements.length} requirements against ${filteredCoverage.length} coverage items`, }); - + // Compare coverage - const comparison = await this.compareCoverage(filteredRequirements, filteredCoverage); - + const comparison = await this.compareCoverage( + filteredRequirements, + filteredCoverage, + ); + // Analyze gaps const gaps = this.analyzeGaps(comparison.unmetRequirements, config); - + // Determine if deployment should be blocked const shouldBlock = this.shouldBlock(gaps, config); - + // Generate recommendations - const recommendations = this.generateRecommendations(gaps, comparison, config); - + const recommendations = this.generateRecommendations( + gaps, + comparison, + config, + ); + // Calculate statistics - const statistics = this.calculateStatistics(filteredRequirements, filteredCoverage, gaps); - + const statistics = this.calculateStatistics( + filteredRequirements, + filteredCoverage, + gaps, + ); + const report = { enforcementLevel: config.level, totalRequirements: filteredRequirements.length, metRequirements: comparison.metRequirements.length, - coveragePercentage: Math.round((comparison.metRequirements.length / filteredRequirements.length) * 100) || 0, + coveragePercentage: + Math.round( + (comparison.metRequirements.length / filteredRequirements.length) * + 100, + ) || 0, gaps: gaps, shouldBlock: shouldBlock, recommendations: recommendations, statistics: statistics, - bypassReason: config.bypassReason || null + bypassReason: config.bypassReason || null, }; - + // Emit appropriate events if (shouldBlock) { - this.emit('enforcement_failed', { - message: 'Coverage enforcement failed - deployment blocked', + this.emit("enforcement_failed", { + message: "Coverage enforcement failed - deployment blocked", gaps: gaps.length, - coverage: report.coveragePercentage + coverage: report.coveragePercentage, }); } else { - this.emit('enforcement_passed', { - message: 'Coverage enforcement passed', + this.emit("enforcement_passed", { + message: "Coverage enforcement passed", coverage: report.coveragePercentage, - gaps: gaps.length + gaps: gaps.length, }); } - - this.emit('complete', { - message: 'Coverage enforcement analysis complete', + + this.emit("complete", { + message: "Coverage enforcement analysis complete", shouldBlock: shouldBlock, gaps: gaps.length, - coverage: report.coveragePercentage + coverage: report.coveragePercentage, }); - + return report; } - + /** * Generate normalized coverage key for consistent lookups * @param {Object} item - Item with schema, name, and type @@ -216,28 +237,30 @@ class CoverageEnforcer extends EventEmitter { */ _generateCoverageKey(item) { // Normalize schema (default to 'public' per PostgreSQL convention) - const schema = (item.schema || 'public').toLowerCase().trim(); - + const schema = (item.schema || "public").toLowerCase().trim(); + // Normalize name and type - const name = (item.name || '').toLowerCase().trim(); - const type = (item.type || '').toLowerCase().trim(); - + const name = (item.name || "").toLowerCase().trim(); + const type = (item.type || "").toLowerCase().trim(); + // Validate components - if (!name) { - throw new Error(`Invalid coverage item: missing name property`); - } - if (!type) { - throw new Error(`Invalid coverage item: missing type property`); - } - + if (!item.name) + throw new ValidationError("Invalid coverage item: missing name", { + item, + }); + if (!item.type) + throw new ValidationError("Invalid coverage item: missing type", { + item, + }); + // Use separator that won't appear in PostgreSQL identifiers - const separator = '::'; - + const separator = "::"; + // Escape any separator sequences in the components (shouldn't happen in valid identifiers) - const escapedSchema = schema.replace(/::/g, '\\:\\:'); - const escapedName = name.replace(/::/g, '\\:\\:'); - const escapedType = type.replace(/::/g, '\\:\\:'); - + const escapedSchema = schema.replace(/::/g, "\\:\\:"); + const escapedName = name.replace(/::/g, "\\:\\:"); + const escapedType = type.replace(/::/g, "\\:\\:"); + return `${escapedSchema}${separator}${escapedName}${separator}${escapedType}`; } @@ -248,11 +271,13 @@ class CoverageEnforcer extends EventEmitter { * @returns {Object} Comparison results */ compareCoverage(requirements, coverage) { - this.emit('progress', { message: 'Comparing requirements against actual coverage...' }); - + this.emit("progress", { + message: "Comparing requirements against actual coverage...", + }); + // Build coverage lookup for efficient matching const coverageLookup = new Map(); - coverage.forEach(item => { + coverage.forEach((item) => { try { const key = this._generateCoverageKey(item); if (!coverageLookup.has(key)) { @@ -260,53 +285,53 @@ class CoverageEnforcer extends EventEmitter { } coverageLookup.get(key).push(item); } catch (error) { - this.emit('warning', { + this.emit("warning", { message: `Skipping invalid coverage item: ${error.message}`, - item: item + item: item, }); } }); - + const metRequirements = []; const unmetRequirements = []; - + for (const requirement of requirements) { try { const key = this._generateCoverageKey(requirement); const matchingCoverage = coverageLookup.get(key) || []; - + if (this.isRequirementMet(requirement, matchingCoverage)) { metRequirements.push({ requirement: requirement, - coverage: matchingCoverage + coverage: matchingCoverage, }); } else { unmetRequirements.push({ requirement: requirement, - coverage: matchingCoverage + coverage: matchingCoverage, }); } } catch (error) { - this.emit('warning', { + this.emit("warning", { message: `Error processing requirement: ${error.message}`, - requirement: requirement + requirement: requirement, }); // Treat as unmet if we can't process it unmetRequirements.push({ requirement: requirement, coverage: [], - error: error.message + error: error.message, }); } } - + return { metRequirements: metRequirements, unmetRequirements: unmetRequirements, - coverageLookup: coverageLookup + coverageLookup: coverageLookup, }; } - + /** * Check if a requirement is met by available coverage * @param {TestRequirement} requirement - The requirement to check @@ -317,26 +342,26 @@ class CoverageEnforcer extends EventEmitter { if (!coverage || coverage.length === 0) { return false; } - + // For basic requirements, any coverage is sufficient if (!requirement.requiredTests || requirement.requiredTests.length === 0) { return true; } - + // Check for specific required tests const availableTests = new Set(); - coverage.forEach(item => { + coverage.forEach((item) => { if (item.tests) { - item.tests.forEach(test => availableTests.add(test)); + item.tests.forEach((test) => availableTests.add(test)); } }); - + // All required tests must be present - return requirement.requiredTests.every(requiredTest => - availableTests.has(requiredTest) + return requirement.requiredTests.every((requiredTest) => + availableTests.has(requiredTest), ); } - + /** * Analyze coverage gaps for severity and blocking status * @param {Array} unmetRequirements - Requirements without coverage @@ -344,33 +369,33 @@ class CoverageEnforcer extends EventEmitter { * @returns {Array} Coverage gaps with analysis */ analyzeGaps(unmetRequirements, config) { - this.emit('progress', { message: 'Analyzing coverage gaps...' }); - + this.emit("progress", { message: "Analyzing coverage gaps..." }); + const gaps = []; - + for (const unmet of unmetRequirements) { const requirement = unmet.requirement; const severity = this.assessGapSeverity(requirement); const isBlocking = this.isGapBlocking(severity, config); - + const gap = { requirement: requirement, severity: severity, message: this.generateGapMessage(requirement), suggestions: this.generateTestSuggestions(requirement), isBlocking: isBlocking, - availableCoverage: unmet.coverage || [] + availableCoverage: unmet.coverage || [], }; - + gaps.push(gap); } - + // Sort gaps by severity (most severe first) gaps.sort((a, b) => this.compareSeverity(b.severity, a.severity)); - + return gaps; } - + /** * Assess the severity of a coverage gap * @param {TestRequirement} requirement - The requirement @@ -382,11 +407,11 @@ class CoverageEnforcer extends EventEmitter { if (operationSeverity) { return operationSeverity; } - + // Fall back to requirement severity or default return requirement.severity || this.operationSeverity.DEFAULT; } - + /** * Determine if a gap should block deployment * @param {string} severity - Gap severity @@ -397,18 +422,20 @@ class CoverageEnforcer extends EventEmitter { switch (config.level) { case ENFORCEMENT_LEVELS.STRICT: return true; // Block all gaps - + case ENFORCEMENT_LEVELS.NORMAL: - return severity === GAP_SEVERITY.CRITICAL || severity === GAP_SEVERITY.HIGH; - + return ( + severity === GAP_SEVERITY.CRITICAL || severity === GAP_SEVERITY.HIGH + ); + case ENFORCEMENT_LEVELS.LENIENT: return severity === GAP_SEVERITY.CRITICAL; - + default: return false; } } - + /** * Determine if deployment should be blocked based on gaps * @param {Array} gaps - Coverage gaps @@ -418,18 +445,18 @@ class CoverageEnforcer extends EventEmitter { shouldBlock(gaps, config) { // Check for bypass if (config.allowBypass && config.bypassReason) { - this.emit('bypass_used', { - message: 'Coverage enforcement bypassed', + this.emit("bypass_used", { + message: "Coverage enforcement bypassed", reason: config.bypassReason, - gaps: gaps.length + gaps: gaps.length, }); return false; } - + // Check if any gaps are blocking - return gaps.some(gap => gap.isBlocking); + return gaps.some((gap) => gap.isBlocking); } - + /** * Generate coverage gap report * @param {Array} gaps - Coverage gaps @@ -437,43 +464,50 @@ class CoverageEnforcer extends EventEmitter { */ generateReport(gaps) { if (!gaps || gaps.length === 0) { - return '✅ No coverage gaps found - all requirements satisfied!'; + return "✅ No coverage gaps found - all requirements satisfied!"; } - + const lines = []; lines.push(`📊 Coverage Gap Report (${gaps.length} gaps found)\n`); - + // Group by severity const bySeverity = gaps.reduce((acc, gap) => { if (!acc[gap.severity]) acc[gap.severity] = []; acc[gap.severity].push(gap); return acc; }, {}); - + // Report each severity level - for (const severity of [GAP_SEVERITY.CRITICAL, GAP_SEVERITY.HIGH, GAP_SEVERITY.MEDIUM, GAP_SEVERITY.LOW]) { + for (const severity of [ + GAP_SEVERITY.CRITICAL, + GAP_SEVERITY.HIGH, + GAP_SEVERITY.MEDIUM, + GAP_SEVERITY.LOW, + ]) { const severityGaps = bySeverity[severity]; if (!severityGaps || severityGaps.length === 0) continue; - + const icon = this.getSeverityIcon(severity); - const blockingCount = severityGaps.filter(g => g.isBlocking).length; - - lines.push(`${icon} ${severity} (${severityGaps.length} gaps${blockingCount > 0 ? `, ${blockingCount} blocking` : ''})`); - - severityGaps.forEach(gap => { - const blocking = gap.isBlocking ? ' 🚫' : ''; + const blockingCount = severityGaps.filter((g) => g.isBlocking).length; + + lines.push( + `${icon} ${severity} (${severityGaps.length} gaps${blockingCount > 0 ? `, ${blockingCount} blocking` : ""})`, + ); + + severityGaps.forEach((gap) => { + const blocking = gap.isBlocking ? " 🚫" : ""; lines.push(` • ${gap.message}${blocking}`); - + if (gap.suggestions && gap.suggestions.length > 0) { - lines.push(` Suggested tests: ${gap.suggestions.join(', ')}`); + lines.push(` Suggested tests: ${gap.suggestions.join(", ")}`); } }); - lines.push(''); + lines.push(""); } - - return lines.join('\n'); + + return lines.join("\n"); } - + /** * Generate human-readable message for a coverage gap * @param {TestRequirement} requirement - The requirement @@ -481,26 +515,26 @@ class CoverageEnforcer extends EventEmitter { */ generateGapMessage(requirement) { const objectDesc = `${requirement.schema}.${requirement.name}`; - const operation = requirement.operation?.toLowerCase() || 'change'; - + const operation = requirement.operation?.toLowerCase() || "change"; + switch (requirement.type) { - case 'table': + case "table": return `Table ${objectDesc} (${operation}) lacks test coverage`; - case 'column': + case "column": return `Column ${objectDesc} (${operation}) lacks test coverage`; - case 'function': + case "function": return `Function ${objectDesc} (${operation}) lacks test coverage`; - case 'policy': + case "policy": return `RLS Policy ${objectDesc} (${operation}) lacks test coverage`; - case 'index': + case "index": return `Index ${objectDesc} (${operation}) lacks test coverage`; - case 'trigger': + case "trigger": return `Trigger ${objectDesc} (${operation}) lacks test coverage`; default: return `${requirement.type} ${objectDesc} (${operation}) lacks test coverage`; } } - + /** * Generate test suggestions for a requirement * @param {TestRequirement} requirement - The requirement @@ -508,19 +542,19 @@ class CoverageEnforcer extends EventEmitter { */ generateTestSuggestions(requirement) { const suggestions = this.testSuggestions[requirement.type] || []; - + // Add operation-specific suggestions - if (requirement.operation === 'CREATE') { - if (requirement.type === 'table') { - suggestions.unshift('has_table'); - } else if (requirement.type === 'function') { - suggestions.unshift('has_function'); + if (requirement.operation === "CREATE") { + if (requirement.type === "table") { + suggestions.unshift("has_table"); + } else if (requirement.type === "function") { + suggestions.unshift("has_function"); } } - + return [...new Set(suggestions)]; // Remove duplicates } - + /** * Generate recommendations for improving coverage * @param {Array} gaps - Coverage gaps @@ -530,64 +564,79 @@ class CoverageEnforcer extends EventEmitter { */ generateRecommendations(gaps, comparison, config) { const recommendations = []; - + // Critical gaps recommendation - const criticalGaps = gaps.filter(g => g.severity === GAP_SEVERITY.CRITICAL); + const criticalGaps = gaps.filter( + (g) => g.severity === GAP_SEVERITY.CRITICAL, + ); if (criticalGaps.length > 0) { recommendations.push({ - type: 'CRITICAL_COVERAGE', - priority: 'CRITICAL', + type: "CRITICAL_COVERAGE", + priority: "CRITICAL", message: `${criticalGaps.length} critical operations lack test coverage - add tests before deployment`, - gaps: criticalGaps.length + gaps: criticalGaps.length, }); } - + // High-priority gaps - const highGaps = gaps.filter(g => g.severity === GAP_SEVERITY.HIGH); + const highGaps = gaps.filter((g) => g.severity === GAP_SEVERITY.HIGH); if (highGaps.length > 0) { recommendations.push({ - type: 'HIGH_PRIORITY_COVERAGE', - priority: 'HIGH', + type: "HIGH_PRIORITY_COVERAGE", + priority: "HIGH", message: `${highGaps.length} high-priority changes lack test coverage`, - gaps: highGaps.length + gaps: highGaps.length, }); } - + // Coverage threshold recommendations - const coveragePercentage = Math.round((comparison.metRequirements.length / (comparison.metRequirements.length + comparison.unmetRequirements.length)) * 100) || 0; + const coveragePercentage = + Math.round( + (comparison.metRequirements.length / + (comparison.metRequirements.length + + comparison.unmetRequirements.length)) * + 100, + ) || 0; if (coveragePercentage < config.thresholds.overall) { recommendations.push({ - type: 'COVERAGE_THRESHOLD', - priority: 'MEDIUM', + type: "COVERAGE_THRESHOLD", + priority: "MEDIUM", message: `Overall coverage (${coveragePercentage}%) is below threshold (${config.thresholds.overall}%)`, current: coveragePercentage, - required: config.thresholds.overall + required: config.thresholds.overall, }); } - + // Test organization recommendations if (gaps.length > 10) { recommendations.push({ - type: 'TEST_ORGANIZATION', - priority: 'MEDIUM', - message: 'Consider organizing tests by schema or module for better maintainability', - gaps: gaps.length + type: "TEST_ORGANIZATION", + priority: "MEDIUM", + message: + "Consider organizing tests by schema or module for better maintainability", + gaps: gaps.length, }); } - + // Enforcement level recommendations - if (config.level === ENFORCEMENT_LEVELS.LENIENT && criticalGaps.length > 0) { + if ( + config.level === ENFORCEMENT_LEVELS.LENIENT && + criticalGaps.length > 0 + ) { recommendations.push({ - type: 'ENFORCEMENT_LEVEL', - priority: 'MEDIUM', - message: 'Consider using NORMAL or STRICT enforcement for better coverage', - currentLevel: config.level + type: "ENFORCEMENT_LEVEL", + priority: "MEDIUM", + message: + "Consider using NORMAL or STRICT enforcement for better coverage", + currentLevel: config.level, }); } - - return recommendations.sort((a, b) => this.comparePriority(a.priority, b.priority)); + + return recommendations.sort((a, b) => + this.comparePriority(a.priority, b.priority), + ); } - + /** * Calculate detailed coverage statistics * @param {Array} requirements - All requirements @@ -599,36 +648,40 @@ class CoverageEnforcer extends EventEmitter { const stats = { requirements: { total: requirements.length, - byType: this.groupBy(requirements, 'type'), - bySeverity: this.groupBy(requirements, 'severity'), - byOperation: this.groupBy(requirements, 'operation') + byType: this.groupBy(requirements, "type"), + bySeverity: this.groupBy(requirements, "severity"), + byOperation: this.groupBy(requirements, "operation"), }, coverage: { total: coverage.length, - byType: this.groupBy(coverage, 'type'), - bySchema: this.groupBy(coverage, 'schema') + byType: this.groupBy(coverage, "type"), + bySchema: this.groupBy(coverage, "schema"), }, gaps: { total: gaps.length, - bySeverity: this.groupBy(gaps, 'severity'), - blocking: gaps.filter(g => g.isBlocking).length + bySeverity: this.groupBy(gaps, "severity"), + blocking: gaps.filter((g) => g.isBlocking).length, }, percentages: { - overall: Math.round(((requirements.length - gaps.length) / requirements.length) * 100) || 0, - byType: {} - } + overall: + Math.round( + ((requirements.length - gaps.length) / requirements.length) * 100, + ) || 0, + byType: {}, + }, }; - + // Calculate coverage percentages by type - Object.keys(stats.requirements.byType).forEach(type => { + Object.keys(stats.requirements.byType).forEach((type) => { const totalByType = stats.requirements.byType[type]; - const gapsByType = gaps.filter(g => g.requirement.type === type).length; - stats.percentages.byType[type] = Math.round(((totalByType - gapsByType) / totalByType) * 100) || 0; + const gapsByType = gaps.filter((g) => g.requirement.type === type).length; + stats.percentages.byType[type] = + Math.round(((totalByType - gapsByType) / totalByType) * 100) || 0; }); - + return stats; } - + /** * Filter requirements based on configuration * @param {Array} requirements - Requirements to filter @@ -636,21 +689,21 @@ class CoverageEnforcer extends EventEmitter { * @returns {Array} Filtered requirements */ filterRequirements(requirements, config) { - return requirements.filter(req => { + return requirements.filter((req) => { // Filter ignored schemas if (config.ignoredSchemas.includes(req.schema)) { return false; } - + // Filter ignored tables - if (req.type === 'table' && config.ignoredTables.includes(req.name)) { + if (req.type === "table" && config.ignoredTables.includes(req.name)) { return false; } - + return true; }); } - + /** * Filter coverage based on configuration * @param {Array} coverage - Coverage to filter @@ -658,45 +711,55 @@ class CoverageEnforcer extends EventEmitter { * @returns {Array} Filtered coverage */ filterCoverage(coverage, config) { - return coverage.filter(cov => { + return coverage.filter((cov) => { // Filter ignored schemas if (config.ignoredSchemas.includes(cov.schema)) { return false; } - + return true; }); } - + /** * Helper methods */ - + groupBy(array, property) { return array.reduce((acc, item) => { - const key = item[property] || 'unknown'; + const key = item[property] || "unknown"; acc[key] = (acc[key] || 0) + 1; return acc; }, {}); } - + getSeverityIcon(severity) { switch (severity) { - case GAP_SEVERITY.CRITICAL: return '🔴'; - case GAP_SEVERITY.HIGH: return '🟠'; - case GAP_SEVERITY.MEDIUM: return '🟡'; - case GAP_SEVERITY.LOW: return '🟢'; - default: return '⚪'; + case GAP_SEVERITY.CRITICAL: + return "🔴"; + case GAP_SEVERITY.HIGH: + return "🟠"; + case GAP_SEVERITY.MEDIUM: + return "🟡"; + case GAP_SEVERITY.LOW: + return "🟢"; + default: + return "⚪"; } } - + compareSeverity(severity1, severity2) { - const levels = [GAP_SEVERITY.LOW, GAP_SEVERITY.MEDIUM, GAP_SEVERITY.HIGH, GAP_SEVERITY.CRITICAL]; + const levels = [ + GAP_SEVERITY.LOW, + GAP_SEVERITY.MEDIUM, + GAP_SEVERITY.HIGH, + GAP_SEVERITY.CRITICAL, + ]; return levels.indexOf(severity1) - levels.indexOf(severity2); } - + comparePriority(priority1, priority2) { - const priorities = ['LOW', 'MEDIUM', 'HIGH', 'CRITICAL']; + const priorities = ["LOW", "MEDIUM", "HIGH", "CRITICAL"]; return priorities.indexOf(priority2) - priorities.indexOf(priority1); // Reverse order (highest first) } } @@ -704,5 +767,5 @@ class CoverageEnforcer extends EventEmitter { module.exports = { CoverageEnforcer, ENFORCEMENT_LEVELS, - GAP_SEVERITY -}; \ No newline at end of file + GAP_SEVERITY, +}; diff --git a/src/lib/testing/CoverageVisualizer.js b/src/lib/testing/CoverageVisualizer.js index 791b536..090293b 100644 --- a/src/lib/testing/CoverageVisualizer.js +++ b/src/lib/testing/CoverageVisualizer.js @@ -1,11 +1,11 @@ /** * CoverageVisualizer - CLI visualization for test coverage status - * + * * Creates ASCII-based visualizations with Star Trek LCARS-style theming * for terminal output of coverage data, gaps, and progress indicators. */ -const chalk = require('chalk'); +const chalk = require("chalk"); /** * @typedef {Object} CoverageData @@ -38,49 +38,49 @@ class CoverageVisualizer { // LCARS color scheme this.colors = { // Primary LCARS colors - orange: chalk.rgb(255, 153, 0), // LCARS Orange - blue: chalk.rgb(153, 204, 255), // LCARS Light Blue - purple: chalk.rgb(204, 153, 255), // LCARS Purple - red: chalk.rgb(255, 102, 102), // LCARS Red - + orange: chalk.rgb(255, 153, 0), // LCARS Orange + blue: chalk.rgb(153, 204, 255), // LCARS Light Blue + purple: chalk.rgb(204, 153, 255), // LCARS Purple + red: chalk.rgb(255, 102, 102), // LCARS Red + // Coverage status colors covered: chalk.green, uncovered: chalk.red, warning: chalk.yellow, - + // UI elements - frame: chalk.rgb(0, 153, 255), // Frame blue - accent: chalk.rgb(255, 204, 0), // Accent yellow + frame: chalk.rgb(0, 153, 255), // Frame blue + accent: chalk.rgb(255, 204, 0), // Accent yellow text: chalk.white, - dim: chalk.gray + dim: chalk.gray, }; - + // LCARS-style box drawing characters this.chars = { - horizontal: '═', - vertical: '║', - topLeft: '╔', - topRight: '╗', - bottomLeft: '╚', - bottomRight: '╝', - cross: '╬', - teeDown: '╦', - teeUp: '╩', - teeLeft: '╣', - teeRight: '╠', - + horizontal: "═", + vertical: "║", + topLeft: "╔", + topRight: "╗", + bottomLeft: "╚", + bottomRight: "╝", + cross: "╬", + teeDown: "╦", + teeUp: "╩", + teeLeft: "╣", + teeRight: "╠", + // Progress bar characters - filled: '█', - empty: '░', - partial: '▓', - + filled: "█", + empty: "░", + partial: "▓", + // Matrix characters - covered: '●', - uncovered: '○', - partial: '◐' + covered: "●", + uncovered: "○", + partial: "◐", }; } - + /** * Display comprehensive coverage status * @param {CoverageData} coverage - Coverage data @@ -90,108 +90,158 @@ class CoverageVisualizer { this._displayHeader(); this._displayOverallStatus(coverage); this._displayCategoryBreakdown(coverage.categories); - + if (gaps && gaps.length > 0) { this._displayGaps(gaps); } - + this._displaySummary(coverage, gaps); this._displayFooter(); } - + /** * Create and display a coverage matrix visualization * @param {MatrixData} data - Matrix data structure */ formatMatrix(data) { - console.log(this.colors.frame('\n╔══ COVERAGE MATRIX ══════════════════════════════════════╗')); - + console.log( + this.colors.frame( + "\n╔══ COVERAGE MATRIX ══════════════════════════════════════╗", + ), + ); + if (!data.rows || !data.columns || !data.matrix) { - console.log(this.colors.red(' Invalid matrix data provided')); - console.log(this.colors.frame('╚═════════════════════════════════════════════════════════╝\n')); + console.log(this.colors.red(" Invalid matrix data provided")); + console.log( + this.colors.frame( + "╚═════════════════════════════════════════════════════════╝\n", + ), + ); return; } - + // Calculate column widths - const maxRowNameLength = Math.max(...data.rows.map(r => r.length), 8); - const colWidth = Math.max(3, Math.max(...data.columns.map(c => c.length))); - + const maxRowNameLength = Math.max(...data.rows.map((r) => r.length), 8); + const colWidth = Math.max( + 3, + Math.max(...data.columns.map((c) => c.length)), + ); + // Header row with column names - const headerSpacing = ' '.repeat(maxRowNameLength + 2); - const headerRow = headerSpacing + data.columns - .map(col => this.colors.blue(col.padEnd(colWidth))) - .join(' '); - console.log('║ ' + headerRow + ' ║'); - + const headerSpacing = " ".repeat(maxRowNameLength + 2); + const headerRow = + headerSpacing + + data.columns + .map((col) => this.colors.blue(col.padEnd(colWidth))) + .join(" "); + console.log("║ " + headerRow + " ║"); + // Separator line - const separatorLine = '║ ' + '─'.repeat(maxRowNameLength) + '─┼─' + - data.columns.map(() => '─'.repeat(colWidth)).join('─┼─') + ' ║'; + const separatorLine = + "║ " + + "─".repeat(maxRowNameLength) + + "─┼─" + + data.columns.map(() => "─".repeat(colWidth)).join("─┼─") + + " ║"; console.log(this.colors.frame(separatorLine)); - + // Data rows data.matrix.forEach((row, rowIndex) => { const rowName = data.rows[rowIndex].padEnd(maxRowNameLength); - const cells = row.map((covered, colIndex) => { - const char = covered ? this.chars.covered : this.chars.uncovered; - const color = covered ? this.colors.covered : this.colors.uncovered; - return color(char.padEnd(colWidth)); - }).join(' '); - - console.log('║ ' + this.colors.text(rowName) + ' │ ' + cells + ' ║'); + const cells = row + .map((covered, colIndex) => { + const char = covered ? this.chars.covered : this.chars.uncovered; + const color = covered ? this.colors.covered : this.colors.uncovered; + return color(char.padEnd(colWidth)); + }) + .join(" "); + + console.log("║ " + this.colors.text(rowName) + " │ " + cells + " ║"); }); - + // Legend - console.log(this.colors.frame('╠═══════════════════════════════════════════════════════════╣')); - console.log('║ ' + this.colors.covered(this.chars.covered) + ' Covered ' + - this.colors.uncovered(this.chars.uncovered) + ' Not Covered' + - ' '.repeat(39) + ' ║'); - console.log(this.colors.frame('╚═════════════════════════════════════════════════════════════╝\n')); + console.log( + this.colors.frame( + "╠═══════════════════════════════════════════════════════════╣", + ), + ); + console.log( + "║ " + + this.colors.covered(this.chars.covered) + + " Covered " + + this.colors.uncovered(this.chars.uncovered) + + " Not Covered" + + " ".repeat(39) + + " ║", + ); + console.log( + this.colors.frame( + "╚═════════════════════════════════════════════════════════════╝\n", + ), + ); } - + /** * Display progress indicator during analysis * @param {number} current - Current progress * @param {number} total - Total items to process * @param {string} [operation] - Description of current operation */ - showProgress(current, total, operation = 'Analyzing') { + showProgress(current, total, operation = "Analyzing") { const percentage = Math.round((current / total) * 100); const barWidth = 30; const filledWidth = Math.round((current / total) * barWidth); - + // Create progress bar const filled = this.chars.filled.repeat(filledWidth); const empty = this.chars.empty.repeat(barWidth - filledWidth); const bar = this.colors.blue(filled) + this.colors.dim(empty); - + // Progress line with LCARS styling - const progressLine = - this.colors.orange('█ ') + - this.colors.text(operation) + ': [' + bar + '] ' + - this.colors.accent(`${percentage}%`) + + const progressLine = + this.colors.orange("█ ") + + this.colors.text(operation) + + ": [" + + bar + + "] " + + this.colors.accent(`${percentage}%`) + this.colors.dim(` (${current}/${total})`); - + // Use carriage return to overwrite previous line - process.stdout.write('\r' + progressLine + ' '.repeat(10)); - + process.stdout.write("\r" + progressLine + " ".repeat(10)); + // New line when complete if (current === total) { - console.log(''); + console.log(""); } } - + /** * Display LCARS-style header * @private */ _displayHeader() { - console.log(this.colors.frame('\n╔══════════════════════════════════════════════════════════╗')); - console.log('║ ' + this.colors.orange('█████') + ' ' + - this.colors.text('DATABASE COVERAGE ANALYSIS') + ' ' + - this.colors.orange('█████') + ' ║'); - console.log(this.colors.frame('╠══════════════════════════════════════════════════════════╣')); + console.log( + this.colors.frame( + "\n╔══════════════════════════════════════════════════════════╗", + ), + ); + console.log( + "║ " + + this.colors.orange("█████") + + " " + + this.colors.text("DATABASE COVERAGE ANALYSIS") + + " " + + this.colors.orange("█████") + + " ║", + ); + console.log( + this.colors.frame( + "╠══════════════════════════════════════════════════════════╣", + ), + ); } - + /** * Display overall coverage status with progress bar * @private @@ -200,33 +250,46 @@ class CoverageVisualizer { const percentage = Math.round(coverage.percentage); const barWidth = 40; const filledWidth = Math.round((percentage / 100) * barWidth); - + // Color based on coverage level let statusColor = this.colors.covered; - let statusText = 'OPTIMAL'; - + let statusText = "OPTIMAL"; + if (percentage < 50) { statusColor = this.colors.red; - statusText = 'CRITICAL'; + statusText = "CRITICAL"; } else if (percentage < 75) { statusColor = this.colors.warning; - statusText = 'WARNING'; + statusText = "WARNING"; } else if (percentage < 90) { statusColor = this.colors.blue; - statusText = 'ACCEPTABLE'; + statusText = "ACCEPTABLE"; } - + // Create visual progress bar const filled = this.chars.filled.repeat(filledWidth); const empty = this.chars.empty.repeat(barWidth - filledWidth); const bar = statusColor(filled) + this.colors.dim(empty); - - console.log('║ Overall Coverage: [' + bar + '] ' + - statusColor(`${percentage}%`) + ' ' + statusColor(statusText) + ' ║'); - console.log('║ ' + this.colors.dim(`Items: ${coverage.covered}/${coverage.total} covered`) + - ' '.repeat(35) + ' ║'); + + console.log( + "║ Overall Coverage: [" + + bar + + "] " + + statusColor(`${percentage}%`) + + " " + + statusColor(statusText) + + " ║", + ); + console.log( + "║ " + + this.colors.dim( + `Items: ${coverage.covered}/${coverage.total} covered`, + ) + + " ".repeat(35) + + " ║", + ); } - + /** * Display coverage breakdown by category * @private @@ -235,44 +298,72 @@ class CoverageVisualizer { if (!categories || Object.keys(categories).length === 0) { return; } - - console.log(this.colors.frame('╠══════════════════════════════════════════════════════════╣')); - console.log('║ ' + this.colors.blue('COVERAGE BY CATEGORY') + - ' '.repeat(37) + ' ║'); - console.log(this.colors.frame('╠══════════════════════════════════════════════════════════╣')); - + + console.log( + this.colors.frame( + "╠══════════════════════════════════════════════════════════╣", + ), + ); + console.log( + "║ " + this.colors.blue("COVERAGE BY CATEGORY") + " ".repeat(37) + " ║", + ); + console.log( + this.colors.frame( + "╠══════════════════════════════════════════════════════════╣", + ), + ); + Object.entries(categories).forEach(([category, percentage]) => { const barWidth = 20; const filledWidth = Math.round((percentage / 100) * barWidth); - + // Color based on percentage - const color = percentage >= 90 ? this.colors.covered : - percentage >= 75 ? this.colors.warning : - this.colors.uncovered; - + const color = + percentage >= 90 + ? this.colors.covered + : percentage >= 75 + ? this.colors.warning + : this.colors.uncovered; + const filled = this.chars.filled.repeat(filledWidth); const empty = this.chars.empty.repeat(barWidth - filledWidth); const bar = color(filled) + this.colors.dim(empty); - + const categoryName = category.padEnd(12); const percentageText = `${Math.round(percentage)}%`.padStart(4); - - console.log('║ ' + this.colors.text(categoryName) + - ' [' + bar + '] ' + - color(percentageText) + ' '.repeat(19) + ' ║'); + + console.log( + "║ " + + this.colors.text(categoryName) + + " [" + + bar + + "] " + + color(percentageText) + + " ".repeat(19) + + " ║", + ); }); } - + /** * Display coverage gaps with highlighting * @private */ _displayGaps(gaps) { - console.log(this.colors.frame('╠══════════════════════════════════════════════════════════╣')); - console.log('║ ' + this.colors.red('COVERAGE GAPS DETECTED') + - ' '.repeat(35) + ' ║'); - console.log(this.colors.frame('╠══════════════════════════════════════════════════════════╣')); - + console.log( + this.colors.frame( + "╠══════════════════════════════════════════════════════════╣", + ), + ); + console.log( + "║ " + this.colors.red("COVERAGE GAPS DETECTED") + " ".repeat(35) + " ║", + ); + console.log( + this.colors.frame( + "╠══════════════════════════════════════════════════════════╣", + ), + ); + // Group gaps by category const groupedGaps = gaps.reduce((acc, gap) => { if (!acc[gap.category]) { @@ -281,68 +372,96 @@ class CoverageVisualizer { acc[gap.category].push(gap); return acc; }, {}); - + Object.entries(groupedGaps).forEach(([category, categoryGaps]) => { - console.log('║ ' + this.colors.warning(`${category.toUpperCase()}:`) + - ' '.repeat(55 - category.length) + ' ║'); - - categoryGaps.slice(0, 5).forEach(gap => { // Limit to first 5 per category - const indicator = this.colors.red('●'); - const name = gap.name.length > 40 ? gap.name.substring(0, 37) + '...' : gap.name; - const reason = gap.reason ? ` (${gap.reason})` : ''; + console.log( + "║ " + + this.colors.warning(`${category.toUpperCase()}:`) + + " ".repeat(55 - category.length) + + " ║", + ); + + categoryGaps.slice(0, 5).forEach((gap) => { + // Limit to first 5 per category + const indicator = this.colors.red("●"); + const name = + gap.name.length > 40 ? gap.name.substring(0, 37) + "..." : gap.name; + const reason = gap.reason ? ` (${gap.reason})` : ""; const maxReasonLength = Math.max(0, 54 - name.length - reason.length); - const truncatedReason = reason.length > maxReasonLength ? - reason.substring(0, maxReasonLength - 3) + '...' : reason; - - console.log('║ ' + indicator + ' ' + - this.colors.text(name) + - this.colors.dim(truncatedReason) + - ' '.repeat(Math.max(0, 54 - name.length - truncatedReason.length)) + ' ║'); + const truncatedReason = + reason.length > maxReasonLength + ? reason.substring(0, maxReasonLength - 3) + "..." + : reason; + + console.log( + "║ " + + indicator + + " " + + this.colors.text(name) + + this.colors.dim(truncatedReason) + + " ".repeat(Math.max(0, 54 - name.length - truncatedReason.length)) + + " ║", + ); }); - + if (categoryGaps.length > 5) { - console.log('║ ' + this.colors.dim(`... and ${categoryGaps.length - 5} more`) + - ' '.repeat(45) + ' ║'); + console.log( + "║ " + + this.colors.dim(`... and ${categoryGaps.length - 5} more`) + + " ".repeat(45) + + " ║", + ); } }); } - + /** * Display summary and recommendations * @private */ _displaySummary(coverage, gaps) { - console.log(this.colors.frame('╠══════════════════════════════════════════════════════════╣')); - console.log('║ ' + this.colors.blue('ANALYSIS SUMMARY') + - ' '.repeat(41) + ' ║'); - console.log(this.colors.frame('╠══════════════════════════════════════════════════════════╣')); - + console.log( + this.colors.frame( + "╠══════════════════════════════════════════════════════════╣", + ), + ); + console.log( + "║ " + this.colors.blue("ANALYSIS SUMMARY") + " ".repeat(41) + " ║", + ); + console.log( + this.colors.frame( + "╠══════════════════════════════════════════════════════════╣", + ), + ); + // Status assessment const percentage = Math.round(coverage.percentage); - let recommendation = ''; + let recommendation = ""; let priorityColor = this.colors.text; - + if (percentage >= 90) { - recommendation = 'Coverage is excellent. Maintain current test standards.'; + recommendation = + "Coverage is excellent. Maintain current test standards."; priorityColor = this.colors.covered; } else if (percentage >= 75) { - recommendation = 'Good coverage. Consider adding tests for critical gaps.'; + recommendation = + "Good coverage. Consider adding tests for critical gaps."; priorityColor = this.colors.blue; } else if (percentage >= 50) { - recommendation = 'Moderate coverage. Focus on high-priority areas first.'; + recommendation = "Moderate coverage. Focus on high-priority areas first."; priorityColor = this.colors.warning; } else { - recommendation = 'Low coverage detected. Immediate attention required.'; + recommendation = "Low coverage detected. Immediate attention required."; priorityColor = this.colors.red; } - + // Split long recommendations into multiple lines const maxLineLength = 55; - const words = recommendation.split(' '); + const words = recommendation.split(" "); const lines = []; - let currentLine = ''; - - words.forEach(word => { + let currentLine = ""; + + words.forEach((word) => { if ((currentLine + word).length <= maxLineLength) { currentLine = currentLine ? `${currentLine} ${word}` : word; } else { @@ -351,26 +470,38 @@ class CoverageVisualizer { } }); if (currentLine) lines.push(currentLine); - - lines.forEach(line => { - console.log('║ ' + priorityColor(line) + - ' '.repeat(Math.max(0, 57 - line.length)) + ' ║'); + + lines.forEach((line) => { + console.log( + "║ " + + priorityColor(line) + + " ".repeat(Math.max(0, 57 - line.length)) + + " ║", + ); }); - + if (gaps && gaps.length > 0) { - console.log('║ ' + this.colors.dim(`Priority: Address ${gaps.length} identified gaps`) + - ' '.repeat(25) + ' ║'); + console.log( + "║ " + + this.colors.dim(`Priority: Address ${gaps.length} identified gaps`) + + " ".repeat(25) + + " ║", + ); } } - + /** * Display LCARS-style footer * @private */ _displayFooter() { - console.log(this.colors.frame('╚══════════════════════════════════════════════════════════╝')); - console.log(''); + console.log( + this.colors.frame( + "╚══════════════════════════════════════════════════════════╝", + ), + ); + console.log(""); } } -module.exports = CoverageVisualizer; \ No newline at end of file +module.exports = CoverageVisualizer; diff --git a/src/lib/testing/MemoryMonitor.js b/src/lib/testing/MemoryMonitor.js index 588efd5..822ef1f 100644 --- a/src/lib/testing/MemoryMonitor.js +++ b/src/lib/testing/MemoryMonitor.js @@ -1,9 +1,9 @@ /** * Memory monitoring utilities for D.A.T.A. CLI - * + * * Provides static methods for monitoring and managing memory usage * to prevent OOM errors when processing large datasets. - * + * * @class MemoryMonitor * @author D.A.T.A. Engineering Team */ @@ -45,7 +45,7 @@ class MemoryMonitor { * @static */ static shouldTriggerCleanup(currentMB, maxMB) { - return currentMB > (maxMB * 0.8); // Trigger at 80% of max + return currentMB > maxMB * 0.8; // Trigger at 80% of max } /** @@ -54,9 +54,11 @@ class MemoryMonitor { * @param {Object} logger - Logger instance (optional) * @static */ - static logMemoryStats(label = 'Memory', logger = console) { + static logMemoryStats(label = "Memory", logger = console) { const stats = MemoryMonitor.getMemoryUsage(); - logger.log(`[${label}] Heap: ${stats.heapUsed}/${stats.heapTotal} MB, RSS: ${stats.rss} MB`); + logger.log( + `[${label}] Heap: ${stats.heapUsed}/${stats.heapTotal} MB, RSS: ${stats.rss} MB`, + ); } /** @@ -72,4 +74,4 @@ class MemoryMonitor { } } -export default MemoryMonitor; \ No newline at end of file +export default MemoryMonitor; diff --git a/src/lib/testing/README-TestPatternLibrary.md b/src/lib/testing/README-TestPatternLibrary.md index f7225f9..92f8b92 100644 --- a/src/lib/testing/README-TestPatternLibrary.md +++ b/src/lib/testing/README-TestPatternLibrary.md @@ -30,9 +30,9 @@ TestTemplateGenerator ```javascript const generator = new TestTemplateGenerator(); const requirement = { - type: 'table', - name: 'users', - schema: 'public' + type: "table", + name: "users", + schema: "public", }; const template = generator.generateTemplate(requirement); ``` @@ -42,24 +42,26 @@ const template = generator.generateTemplate(requirement); ```javascript const generator = new TestTemplateGenerator(); const requirement = { - type: 'rls', - name: 'users', - schema: 'public', + type: "rls", + name: "users", + schema: "public", metadata: { policies: [ - { name: 'users_select_own', commands: ['SELECT'] }, - { name: 'users_update_own', commands: ['UPDATE'] } - ] - } + { name: "users_select_own", commands: ["SELECT"] }, + { name: "users_update_own", commands: ["UPDATE"] }, + ], + }, }; // Generate enhanced template with additional patterns const enhancedTemplate = generator.generateEnhancedTemplate( - requirement, - ['privilege_escalation_test'] // Additional patterns beyond recommended ones + requirement, + ["privilege_escalation_test"], // Additional patterns beyond recommended ones ); -console.log(`Enhanced template uses ${enhancedTemplate.metadata.patternsUsed.length} patterns`); +console.log( + `Enhanced template uses ${enhancedTemplate.metadata.patternsUsed.length} patterns`, +); ``` ### Direct Pattern Access @@ -68,15 +70,15 @@ console.log(`Enhanced template uses ${enhancedTemplate.metadata.patternsUsed.len const generator = new TestTemplateGenerator(); // Get recommended patterns for a test type -const rlsPatterns = generator.getRecommendedPatterns('rls'); +const rlsPatterns = generator.getRecommendedPatterns("rls"); console.log(`RLS tests use ${rlsPatterns.length} patterns`); // Get patterns by category -const securityPatterns = generator.getPatternsByCategory('security_testing'); +const securityPatterns = generator.getPatternsByCategory("security_testing"); // Render a specific pattern with custom variables -const variables = { schema: 'public', tableName: 'posts' }; -const rendered = generator.renderPattern('table_exists_basic', variables); +const variables = { schema: "public", tableName: "posts" }; +const rendered = generator.renderPattern("table_exists_basic", variables); ``` ### Documentation Generation @@ -85,7 +87,7 @@ const rendered = generator.renderPattern('table_exists_basic', variables); const generator = new TestTemplateGenerator(); // Generate best practices for a test type -const bestPractices = generator.generateBestPracticesDoc('rls'); +const bestPractices = generator.generateBestPracticesDoc("rls"); // Generate complete pattern library documentation const libraryDoc = generator.generatePatternLibraryDoc(); @@ -97,26 +99,31 @@ const examples = generator.generateUsageExamples(); ## Available Patterns ### Data Validation Patterns + - **table_exists_basic** - Basic table existence validation - **column_structure_validation** - Comprehensive column structure validation - **foreign_key_relationship** - Foreign key relationship validation with referential integrity - **constraint_validation** - CHECK constraint validation with boundary testing ### Security Testing Patterns + - **rls_enablement_check** - Row Level Security enablement validation - **policy_existence_check** - RLS policy existence and configuration validation - **multi_user_access_test** - Multi-user access pattern testing data isolation - **privilege_escalation_test** - Test that privilege escalation is properly prevented ### Performance Testing Patterns + - **index_usage_verification** - Verify that indexes are used by query plans - **function_performance_test** - Function execution time and resource usage validation ### Error Handling Patterns + - **constraint_violation_handling** - Test proper constraint violation error handling - **function_exception_handling** - Test function exception handling and error recovery ### Multi-User Scenario Patterns + - **concurrent_modification_test** - Test concurrent data modification scenarios - **data_isolation_verification** - Verify data isolation between different user contexts @@ -124,20 +131,21 @@ const examples = generator.generateUsageExamples(); ### Test Types and Recommended Patterns -| Test Type | Recommended Patterns | -|-------------|---------------------| -| **RPC** | 5 patterns including multi-user access and privilege escalation tests | -| **RLS** | 5 patterns focusing on security and data isolation | -| **Trigger** | 4 patterns including error handling and concurrent modification | -| **Constraint** | 3 patterns focusing on validation and error handling | -| **Function** | 3 patterns including performance and exception handling | -| **Table** | 5 patterns covering structure, constraints, and indexes | -| **Column** | 3 patterns focusing on structure validation | -| **Index** | 2 patterns covering usage verification and performance | +| Test Type | Recommended Patterns | +| -------------- | --------------------------------------------------------------------- | +| **RPC** | 5 patterns including multi-user access and privilege escalation tests | +| **RLS** | 5 patterns focusing on security and data isolation | +| **Trigger** | 4 patterns including error handling and concurrent modification | +| **Constraint** | 3 patterns focusing on validation and error handling | +| **Function** | 3 patterns including performance and exception handling | +| **Table** | 5 patterns covering structure, constraints, and indexes | +| **Column** | 3 patterns focusing on structure validation | +| **Index** | 2 patterns covering usage verification and performance | ## Pattern Structure Each pattern includes: + - **Name** - Unique identifier - **Category** - One of the 5 main categories - **Description** - Human-readable description @@ -152,17 +160,20 @@ Each pattern includes: ## Best Practices ### When to Use Enhanced Templates + - **Complex Requirements**: Use `generateEnhancedTemplate()` for requirements with rich metadata - **Security-Critical Tests**: Always use enhanced templates for RLS and security tests - **Performance Tests**: Use enhanced templates for function and index tests - **Multi-User Scenarios**: Use enhanced templates when testing user isolation ### When to Use Basic Templates + - **Simple Existence Tests**: Use `generateTemplate()` for basic table/column existence - **Quick Prototyping**: Use basic templates for initial test scaffolding - **Legacy Compatibility**: Use basic templates to maintain existing test structure ### Pattern Selection Guidelines + 1. **Start with Recommended Patterns**: Use `getRecommendedPatterns(testType)` first 2. **Add Category-Specific Patterns**: Use `getPatternsByCategory()` for specialized needs 3. **Consider Difficulty Level**: Match pattern difficulty to your test complexity needs @@ -179,12 +190,14 @@ The TestPatternLibrary is designed to be fully backward compatible: ## Extension and Customization ### Adding New Patterns + 1. Add pattern definition in `TestPatternLibrary.initializePatterns()` 2. Update category assignments 3. Add to test type mappings in `initializeTestTypeMapping()` 4. Update documentation and examples ### Creating Custom Categories + 1. Add category in `initializePatternCategories()` 2. Define category metadata and best practices 3. Assign patterns to the new category @@ -200,6 +213,7 @@ The TestPatternLibrary is designed to be fully backward compatible: ## Future Enhancements Planned improvements include: + - **Dynamic Pattern Loading**: Load patterns from external configuration - **Pattern Composition**: Combine multiple patterns into composite templates - **AI-Assisted Pattern Generation**: Generate patterns based on schema analysis @@ -209,11 +223,13 @@ Planned improvements include: ## Debugging and Troubleshooting ### Common Issues + - **Missing Variables**: Ensure all pattern placeholders have corresponding variables - **Pattern Conflicts**: Check for overlapping patterns in enhanced generation - **Template Formatting**: Verify pgTAP structure formatting is correct ### Debug Methods + ```javascript const generator = new TestTemplateGenerator(); @@ -222,19 +238,20 @@ console.log(generator.getAvailablePatterns()); // Verify pattern rendering try { - const rendered = generator.renderPattern('pattern_name', variables); + const rendered = generator.renderPattern("pattern_name", variables); } catch (error) { - console.log('Pattern rendering failed:', error.message); + console.log("Pattern rendering failed:", error.message); } // Inspect enhanced template metadata const template = generator.generateEnhancedTemplate(requirement); -console.log('Patterns used:', template.metadata.patternsUsed); +console.log("Patterns used:", template.metadata.patternsUsed); ``` ## Contributing When contributing new patterns: + 1. Follow existing pattern structure and naming conventions 2. Include comprehensive documentation and examples 3. Test pattern rendering with various variable combinations @@ -243,4 +260,4 @@ When contributing new patterns: --- -For more information, see the complete pattern library documentation generated by `generatePatternLibraryDoc()`. \ No newline at end of file +For more information, see the complete pattern library documentation generated by `generatePatternLibraryDoc()`. diff --git a/src/lib/testing/StreamingCoverageDatabase.js b/src/lib/testing/StreamingCoverageDatabase.js index f8e20d6..d9aa3e1 100644 --- a/src/lib/testing/StreamingCoverageDatabase.js +++ b/src/lib/testing/StreamingCoverageDatabase.js @@ -1,9 +1,9 @@ /** * Streaming coverage database implementation for D.A.T.A. CLI - * + * * Provides memory-efficient storage for large coverage datasets with * overflow protection and optional compression. - * + * * @class StreamingCoverageDatabase * @author D.A.T.A. Engineering Team */ @@ -20,9 +20,9 @@ class StreamingCoverageDatabase { maxObjectsPerType: options.maxObjectsPerType || 10000, enableCompression: options.enableCompression || true, batchSize: options.batchSize || 100, - ...options + ...options, }; - + this.objectCounts = new Map(); this.compressed = new Map(); this.overflow = new Set(); // Track overflowed object types @@ -38,7 +38,7 @@ class StreamingCoverageDatabase { */ addObject(objectType, objectName, data) { const count = this.objectCounts.get(objectType) || 0; - + if (count >= this.options.maxObjectsPerType) { this.overflow.add(objectType); return false; // Reject to prevent memory overflow @@ -49,14 +49,17 @@ class StreamingCoverageDatabase { this.data.set(objectType, new Map()); } this.data.get(objectType).set(objectName, data); - + this.objectCounts.set(objectType, count + 1); - + // Auto-compress if enabled and threshold reached - if (this.options.enableCompression && count > this.options.maxObjectsPerType * 0.7) { + if ( + this.options.enableCompression && + count > this.options.maxObjectsPerType * 0.7 + ) { this.compress(objectType); } - + return true; } @@ -88,11 +91,11 @@ class StreamingCoverageDatabase { */ compress(objectType) { if (this.compressed.has(objectType)) return; - + // TODO: Implement actual compression logic // For now, just mark as compressed this.compressed.set(objectType, Date.now()); - + // In production, you might: // 1. Convert Map to more compact structure // 2. Remove redundant data @@ -105,11 +108,14 @@ class StreamingCoverageDatabase { */ getStats() { return { - totalObjects: Array.from(this.objectCounts.values()).reduce((a, b) => a + b, 0), + totalObjects: Array.from(this.objectCounts.values()).reduce( + (a, b) => a + b, + 0, + ), objectsByType: Object.fromEntries(this.objectCounts), compressedTypes: Array.from(this.compressed.keys()), overflowedTypes: Array.from(this.overflow), - memoryUsage: this._estimateMemoryUsage() + memoryUsage: this._estimateMemoryUsage(), }; } @@ -150,4 +156,4 @@ class StreamingCoverageDatabase { } } -export default StreamingCoverageDatabase; \ No newline at end of file +export default StreamingCoverageDatabase; diff --git a/src/lib/testing/TestCoverageOrchestrator.js b/src/lib/testing/TestCoverageOrchestrator.js index d911319..05451f5 100644 --- a/src/lib/testing/TestCoverageOrchestrator.js +++ b/src/lib/testing/TestCoverageOrchestrator.js @@ -1,20 +1,27 @@ /** * Test Coverage Orchestrator for D.A.T.A. - * + * * Integrates test coverage enforcement into the migration workflow. * Coordinates between TestRequirementAnalyzer, pgTAPTestScanner, and CoverageEnforcer * to ensure adequate test coverage before allowing database deployments. - * + * * @module TestCoverageOrchestrator */ -const { EventEmitter } = require('events'); -const TestRequirementAnalyzer = require('./TestRequirementAnalyzer'); -const pgTAPTestScanner = require('./pgTAPTestScanner'); -const CoverageEnforcer = require('./CoverageEnforcer'); -const TestTemplateGenerator = require('./TestTemplateGenerator'); -const path = require('path'); -const fs = require('fs').promises; +const { EventEmitter } = require("events"); +const TestRequirementAnalyzer = require("./TestRequirementAnalyzer"); +const pgTAPTestScanner = require("./pgTAPTestScanner"); +const CoverageEnforcer = require("./CoverageEnforcer"); +const TestTemplateGenerator = require("./TestTemplateGenerator"); +const path = require("path"); +const fs = require("fs").promises; + +const { + TestCoverageError, + ValidationError, + CoverageEnforcementError, + ParsingError, +} = require("./errors"); /** * @typedef {Object} CoverageCheckResult @@ -24,6 +31,7 @@ const fs = require('fs').promises; * @property {Array} suggestions - Test suggestions for gaps * @property {boolean} shouldBlock - Whether deployment should be blocked * @property {string} [bypassReason] - Reason if coverage was bypassed + * @property {Array} [templates] - Generated templates for gaps */ /** @@ -34,7 +42,7 @@ const fs = require('fs').promises; * @property {boolean} [generateTemplates] - Whether to generate test templates for gaps * @property {Object} [thresholds] - Coverage thresholds by object type * @property {boolean} [allowBypass] - Whether to allow coverage bypass - * @property {Function} [logger] - Logger function + * @property {Console|{info:Function,warn:Function,error:Function}} [logger] - Logger */ class TestCoverageOrchestrator extends EventEmitter { @@ -43,83 +51,100 @@ class TestCoverageOrchestrator extends EventEmitter { */ constructor(options = {}) { super(); - + // Configuration - this.testsDir = options.testsDir || './tests'; - this.sqlDir = options.sqlDir || './sql'; - this.enforcementLevel = options.enforcementLevel || 'normal'; + this.testsDir = options.testsDir || "./tests"; + this.sqlDir = options.sqlDir || "./sql"; + this.enforcementLevel = options.enforcementLevel || "normal"; this.generateTemplates = options.generateTemplates || false; this.thresholds = options.thresholds || {}; this.allowBypass = options.allowBypass || false; - this.logger = options.logger || console.log; - + this.logger = options.logger || console; + // Initialize components this.analyzer = new TestRequirementAnalyzer(); this.scanner = new pgTAPTestScanner(); this.enforcer = new CoverageEnforcer(); this.generator = new TestTemplateGenerator(); - + // Wire up event forwarding this.setupEventHandlers(); } - + /** * Setup event forwarding from sub-components */ setupEventHandlers() { // Forward analyzer events - this.analyzer.on('progress', (data) => - this.emit('progress', { component: 'analyzer', ...data })); - this.analyzer.on('warning', (data) => - this.emit('warning', { component: 'analyzer', ...data })); - + this.analyzer.on("progress", (data) => + this.emit("progress", { component: "analyzer", ...data }), + ); + this.analyzer.on("warning", (data) => + this.emit("warning", { component: "analyzer", ...data }), + ); + // Forward scanner events - this.scanner.on('progress', (data) => - this.emit('progress', { component: 'scanner', ...data })); - this.scanner.on('file', (data) => - this.emit('progress', { component: 'scanner', message: `Scanning ${data.file}` })); - + this.scanner.on("progress", (data) => + this.emit("progress", { component: "scanner", ...data }), + ); + this.scanner.on("file", (data) => + this.emit("progress", { + component: "scanner", + message: `Scanning ${data.file}`, + }), + ); + // Forward enforcer events - this.enforcer.on('progress', (data) => - this.emit('progress', { component: 'enforcer', ...data })); - this.enforcer.on('enforcement_failed', (data) => - this.emit('enforcement_failed', data)); - this.enforcer.on('enforcement_bypassed', (data) => - this.emit('enforcement_bypassed', data)); + this.enforcer.on("progress", (data) => + this.emit("progress", { component: "enforcer", ...data }), + ); + this.enforcer.on("enforcement_failed", (data) => + this.emit("enforcement_failed", data), + ); + this.enforcer.on("enforcement_bypassed", (data) => + this.emit("enforcement_bypassed", data), + ); } - + /** * Check test coverage for migration operations * @param {Array} operations - Migration operations from AST analysis * @param {Object} options - Check options * @returns {Promise} Coverage check results + * @throws {ValidationError|CoverageEnforcementError|ParsingError|TestCoverageError} */ async checkCoverage(operations, options = {}) { - this.emit('start', { - message: 'Starting test coverage analysis', - operations: operations.length + this.emit("start", { + message: "Starting test coverage analysis", + operations: operations.length, }); - + try { // Step 1: Analyze operations to determine test requirements - this.emit('progress', { message: 'Analyzing test requirements...' }); + this.emit("progress", { message: "Analyzing test requirements..." }); const requirements = await this.analyzeRequirements(operations); - + // Step 2: Scan existing tests for coverage - this.emit('progress', { message: 'Scanning existing test coverage...' }); + this.emit("progress", { message: "Scanning existing test coverage..." }); const coverage = await this.scanTestCoverage(); - + // Step 3: Enforce coverage requirements - this.emit('progress', { message: 'Enforcing coverage requirements...' }); - const enforcement = await this.enforceCoverage(requirements, coverage, options); - + this.emit("progress", { message: "Enforcing coverage requirements..." }); + const enforcement = await this.enforceCoverage( + requirements, + coverage, + options, + ); + // Step 4: Generate templates if requested and there are gaps if (this.generateTemplates && enforcement.gaps.length > 0) { - this.emit('progress', { message: 'Generating test templates for gaps...' }); + this.emit("progress", { + message: "Generating test templates for gaps...", + }); const templates = await this.generateTestTemplates(enforcement.gaps); enforcement.templates = templates; } - + // Step 5: Determine final result const result = { passed: !enforcement.shouldBlock, @@ -128,99 +153,104 @@ class TestCoverageOrchestrator extends EventEmitter { suggestions: enforcement.suggestions, shouldBlock: enforcement.shouldBlock, bypassReason: enforcement.bypassReason, - templates: enforcement.templates || [] + templates: enforcement.templates || [], }; - - this.emit('complete', { - message: 'Test coverage analysis complete', + + this.emit("complete", { + message: "Test coverage analysis complete", passed: result.passed, - coverage: result.coveragePercentage + coverage: result.coveragePercentage, }); - + return result; - - } catch (error) { - this.emit('error', { - message: 'Test coverage analysis failed', - error: error.message - }); - throw error; + } catch (err) { + // Emit structured error info; rethrow without wrapping to preserve type + if (err instanceof TestCoverageError) { + this.emit("error", { + message: "Test coverage analysis failed", + name: err.name, + code: err.code, + details: err.details, + }); + } else { + this.emit("error", { + message: "Test coverage analysis failed", + name: err?.name || "Error", + error: err?.message, + }); + } + throw err; } } - + /** * Analyze migration operations to determine test requirements * @param {Array} operations - Migration operations * @returns {Promise} Test requirements analysis + * @throws {ValidationError} */ async analyzeRequirements(operations) { - // Use TestRequirementAnalyzer to determine what tests are needed const analysis = await this.analyzer.analyzeOperations(operations, { includeDataTests: true, includeConstraintTests: true, - includePerformanceTests: this.enforcementLevel === 'strict', - includeSecurityTests: true + includePerformanceTests: this.enforcementLevel === "strict", + includeSecurityTests: true, }); - - this.emit('progress', { + + this.emit("progress", { message: `Identified ${analysis.requirements.length} test requirements`, critical: analysis.summary.criticalCount, - high: analysis.summary.highCount + high: analysis.summary.highCount, }); - + return analysis; } - + /** * Scan existing tests for coverage * @returns {Promise} Coverage scan results + * @throws {ParsingError|ValidationError} */ async scanTestCoverage() { - // Check if tests directory exists try { await fs.access(this.testsDir); - } catch (error) { - this.emit('warning', { - message: 'Tests directory not found', - path: this.testsDir + } catch { + // No tests directory: warn and proceed with empty coverage (not an error) + this.emit("warning", { + message: "Tests directory not found", + path: this.testsDir, }); return { coverage: [], statistics: {} }; } - - // Scan all test files + await this.scanner.scanDirectory(this.testsDir); - - // Build coverage database const database = this.scanner.buildCoverageDatabase(); - - // Get coverage statistics const stats = this.scanner.getCoverageStatistics(); - - this.emit('progress', { + + this.emit("progress", { message: `Found ${stats.totalFiles} test files with ${stats.totalAssertions} assertions`, tables: stats.tablesWithTests, functions: stats.functionsWithTests, - policies: stats.policiesWithTests + policies: stats.policiesWithTests, }); - + return { coverage: database.objects, - statistics: stats + statistics: stats, }; } - + /** * Enforce coverage requirements * @param {Object} requirements - Test requirements analysis * @param {Object} coverage - Current test coverage * @param {Object} options - Enforcement options * @returns {Promise} Enforcement results + * @throws {CoverageEnforcementError|ValidationError} */ async enforceCoverage(requirements, coverage, options = {}) { - // Convert coverage database to format expected by enforcer const coverageArray = this.convertCoverageToArray(coverage.coverage); - - // Run enforcement + const enforcement = await this.enforcer.enforce( requirements.requirements, coverageArray, @@ -228,13 +258,16 @@ class TestCoverageOrchestrator extends EventEmitter { enforcementLevel: options.enforcementLevel || this.enforcementLevel, thresholds: options.thresholds || this.thresholds, bypassReason: options.bypassReason, - allowBypass: options.allowBypass !== undefined ? options.allowBypass : this.allowBypass - } + allowBypass: + options.allowBypass !== undefined + ? options.allowBypass + : this.allowBypass, + }, ); - + return enforcement; } - + /** * Generate test templates for coverage gaps * @param {Array} gaps - Coverage gaps @@ -242,30 +275,40 @@ class TestCoverageOrchestrator extends EventEmitter { */ async generateTestTemplates(gaps) { const templates = []; - + for (const gap of gaps) { try { const template = this.generator.generateTemplate(gap.requirement); templates.push({ requirement: gap.requirement, template: template, - path: this.getTemplateOutputPath(gap.requirement) + path: this.getTemplateOutputPath(gap.requirement), }); } catch (error) { - this.emit('warning', { - message: `Failed to generate template for ${gap.requirement.name}`, - error: error.message - }); + // Preserve typed error details in warnings for better debugging / logs + if (error instanceof TestCoverageError) { + this.emit("warning", { + message: `Failed to generate template for ${gap.requirement.name}`, + name: error.name, + code: error.code, + details: error.details, + }); + } else { + this.emit("warning", { + message: `Failed to generate template for ${gap.requirement.name}`, + error: error.message, + }); + } } } - - this.emit('progress', { - message: `Generated ${templates.length} test templates` + + this.emit("progress", { + message: `Generated ${templates.length} test templates`, }); - + return templates; } - + /** * Convert coverage database to array format * @param {Map} coverageMap - Coverage database map @@ -273,42 +316,43 @@ class TestCoverageOrchestrator extends EventEmitter { */ convertCoverageToArray(coverageMap) { const coverageArray = []; - + for (const [type, objects] of Object.entries(coverageMap)) { for (const [name, data] of Object.entries(objects)) { coverageArray.push({ - type: type, - name: name, - schema: data.schema || 'public', + type, + name, + schema: data.schema || "public", assertions: data.assertions || [], - files: data.files || [] + files: data.files || [], }); } } - + return coverageArray; } - + /** * Get output path for test template * @param {Object} requirement - Test requirement * @returns {string} Template output path */ getTemplateOutputPath(requirement) { - const typeDir = { - 'rpc': '002_rpc_tests', - 'rls': '003_rls_tests', - 'trigger': '004_trigger_tests', - 'constraint': '005_constraint_tests', - 'function': '006_function_tests', - 'table': '001_table_tests', - 'column': '007_column_tests', - 'index': '008_index_tests' - }[requirement.type] || '999_other_tests'; - + const typeDir = + { + rpc: "002_rpc_tests", + rls: "003_rls_tests", + trigger: "004_trigger_tests", + constraint: "005_constraint_tests", + function: "006_function_tests", + table: "001_table_tests", + column: "007_column_tests", + index: "008_index_tests", + }[requirement.type] || "999_other_tests"; + return path.join(this.testsDir, typeDir, `${requirement.name}.test.sql`); } - + /** * Write test templates to disk * @param {Array} templates - Generated templates @@ -317,18 +361,15 @@ class TestCoverageOrchestrator extends EventEmitter { async writeTemplates(templates) { for (const { template, path: templatePath } of templates) { const dir = path.dirname(templatePath); - - // Ensure directory exists + await fs.mkdir(dir, { recursive: true }); - - // Write template - await fs.writeFile(templatePath, template, 'utf8'); - - this.emit('progress', { - message: `Wrote template to ${templatePath}` + await fs.writeFile(templatePath, template, "utf8"); + + this.emit("progress", { + message: `Wrote template to ${templatePath}`, }); } } } -module.exports = TestCoverageOrchestrator; \ No newline at end of file +module.exports = TestCoverageOrchestrator; diff --git a/src/lib/testing/TestPatternLibrary.js b/src/lib/testing/TestPatternLibrary.js index 3b005b0..c29ae18 100644 --- a/src/lib/testing/TestPatternLibrary.js +++ b/src/lib/testing/TestPatternLibrary.js @@ -1,15 +1,15 @@ /** * Test Pattern Library - * + * * Comprehensive library of reusable test patterns for pgTAP test generation. * Provides common patterns for each test type with best practices and examples. * Used by TestTemplateGenerator to ensure consistency across all generated tests. - * + * * @fileoverview Test Pattern Library for pgTAP test generation * @author D.A.T.A. Engineering Team * @version 1.0.0 */ - +const { ValidationError } = require("./errors"); // using your barrel export /** * @typedef {Object} TestPattern * @property {string} name - Pattern name identifier @@ -27,7 +27,7 @@ /** * @typedef {Object} PatternCategory * @property {string} name - Category name - * @property {string} description - Category description + * @property {string} description - Category description * @property {Array} patterns - Patterns in this category * @property {Array} commonUseCases - Common use cases for this category * @property {Array} bestPractices - Category-level best practices @@ -40,13 +40,13 @@ class TestPatternLibrary { * @private */ this.categories = this.initializePatternCategories(); - + /** - * @type {Map} + * @type {Map} * @private */ this.patterns = this.initializePatterns(); - + /** * @type {Object>} * @private @@ -80,7 +80,7 @@ class TestPatternLibrary { */ getRecommendedPatterns(testType) { const patternNames = this.testTypePatterns[testType] || []; - return patternNames.map(name => this.patterns.get(name)).filter(Boolean); + return patternNames.map((name) => this.patterns.get(name)).filter(Boolean); } /** @@ -92,14 +92,14 @@ class TestPatternLibrary { renderPattern(patternName, variables = {}) { const pattern = this.getPattern(patternName); if (!pattern) { - throw new Error(`Pattern not found: ${patternName}`); + throw new ValidationError(`Pattern not found: ${patternName}`); } let rendered = pattern.sqlTemplate; - + // Replace placeholders with variables for (const [key, value] of Object.entries(variables)) { - const placeholder = new RegExp(`\\$\\{${key}\\}`, 'g'); + const placeholder = new RegExp(`\\$\\{${key}\\}`, "g"); rendered = rendered.replace(placeholder, value); } @@ -120,8 +120,9 @@ class TestPatternLibrary { * @returns {Array} Patterns at the specified difficulty */ getPatternsByDifficulty(difficulty) { - return Array.from(this.patterns.values()) - .filter(pattern => pattern.difficulty === difficulty); + return Array.from(this.patterns.values()).filter( + (pattern) => pattern.difficulty === difficulty, + ); } /** @@ -132,94 +133,99 @@ class TestPatternLibrary { initializePatternCategories() { const categories = new Map(); - categories.set('data_validation', { - name: 'data_validation', - description: 'Patterns for validating data integrity, constraints, and business rules', + categories.set("data_validation", { + name: "data_validation", + description: + "Patterns for validating data integrity, constraints, and business rules", patterns: [], // Will be populated by initializePatterns commonUseCases: [ - 'Validating column constraints (NOT NULL, CHECK, UNIQUE)', - 'Testing foreign key relationships', - 'Verifying data type constraints', - 'Validating business rule enforcement' + "Validating column constraints (NOT NULL, CHECK, UNIQUE)", + "Testing foreign key relationships", + "Verifying data type constraints", + "Validating business rule enforcement", ], bestPractices: [ - 'Always test both valid and invalid data scenarios', - 'Include boundary value testing', - 'Test constraint cascading behavior', - 'Verify error messages are appropriate' - ] + "Always test both valid and invalid data scenarios", + "Include boundary value testing", + "Test constraint cascading behavior", + "Verify error messages are appropriate", + ], }); - categories.set('security_testing', { - name: 'security_testing', - description: 'Patterns for testing security features, RLS policies, and access controls', + categories.set("security_testing", { + name: "security_testing", + description: + "Patterns for testing security features, RLS policies, and access controls", patterns: [], commonUseCases: [ - 'Testing Row Level Security policies', - 'Verifying role-based access controls', - 'Testing data isolation between users', - 'Validating privilege escalation prevention' + "Testing Row Level Security policies", + "Verifying role-based access controls", + "Testing data isolation between users", + "Validating privilege escalation prevention", ], bestPractices: [ - 'Test with multiple user roles and contexts', - 'Verify policy expressions are not bypassable', - 'Test edge cases and boundary conditions', - 'Include SQL injection resistance testing' - ] + "Test with multiple user roles and contexts", + "Verify policy expressions are not bypassable", + "Test edge cases and boundary conditions", + "Include SQL injection resistance testing", + ], }); - categories.set('performance_testing', { - name: 'performance_testing', - description: 'Patterns for testing performance characteristics and query efficiency', + categories.set("performance_testing", { + name: "performance_testing", + description: + "Patterns for testing performance characteristics and query efficiency", patterns: [], commonUseCases: [ - 'Testing index usage and effectiveness', - 'Validating query performance under load', - 'Testing function execution time', - 'Verifying materialized view refresh performance' + "Testing index usage and effectiveness", + "Validating query performance under load", + "Testing function execution time", + "Verifying materialized view refresh performance", ], bestPractices: [ - 'Use realistic data volumes for testing', - 'Test performance regression scenarios', - 'Monitor resource usage during tests', - 'Include both cold and warm cache scenarios' - ] + "Use realistic data volumes for testing", + "Test performance regression scenarios", + "Monitor resource usage during tests", + "Include both cold and warm cache scenarios", + ], }); - categories.set('error_handling', { - name: 'error_handling', - description: 'Patterns for testing error conditions and exception handling', + categories.set("error_handling", { + name: "error_handling", + description: + "Patterns for testing error conditions and exception handling", patterns: [], commonUseCases: [ - 'Testing constraint violation handling', - 'Validating custom exception throwing', - 'Testing transaction rollback scenarios', - 'Verifying graceful degradation' + "Testing constraint violation handling", + "Validating custom exception throwing", + "Testing transaction rollback scenarios", + "Verifying graceful degradation", ], bestPractices: [ - 'Test all expected error conditions', - 'Verify error codes and messages', - 'Test error handling under concurrent access', - 'Ensure proper cleanup after errors' - ] + "Test all expected error conditions", + "Verify error codes and messages", + "Test error handling under concurrent access", + "Ensure proper cleanup after errors", + ], }); - categories.set('multi_user_scenarios', { - name: 'multi_user_scenarios', - description: 'Patterns for testing concurrent access and multi-user interactions', + categories.set("multi_user_scenarios", { + name: "multi_user_scenarios", + description: + "Patterns for testing concurrent access and multi-user interactions", patterns: [], commonUseCases: [ - 'Testing concurrent data modifications', - 'Validating lock contention handling', - 'Testing user isolation in multi-tenant scenarios', - 'Verifying audit trail accuracy with concurrent users' + "Testing concurrent data modifications", + "Validating lock contention handling", + "Testing user isolation in multi-tenant scenarios", + "Verifying audit trail accuracy with concurrent users", ], bestPractices: [ - 'Use realistic concurrency scenarios', - 'Test both read and write conflicts', - 'Verify data consistency after concurrent operations', - 'Include deadlock detection and resolution testing' - ] + "Use realistic concurrency scenarios", + "Test both read and write conflicts", + "Verify data consistency after concurrent operations", + "Include deadlock detection and resolution testing", + ], }); return categories; @@ -236,39 +242,40 @@ class TestPatternLibrary { // =========================================================================== // DATA VALIDATION PATTERNS // =========================================================================== - - patterns.set('table_exists_basic', { - name: 'table_exists_basic', - category: 'data_validation', - description: 'Basic table existence validation', + + patterns.set("table_exists_basic", { + name: "table_exists_basic", + category: "data_validation", + description: "Basic table existence validation", sqlTemplate: `-- Test: Table exists RETURN NEXT tap.has_table( '\${schema}', '\${tableName}', 'Table \${tableName} exists in \${schema} schema' );`, - placeholders: ['schema', 'tableName'], + placeholders: ["schema", "tableName"], metadata: { - testType: 'existence', - complexity: 'low', - executionTime: 'fast' + testType: "existence", + complexity: "low", + executionTime: "fast", }, bestPractices: [ - 'Always test table existence before testing structure', - 'Include schema name for clarity', - 'Use descriptive test messages' + "Always test table existence before testing structure", + "Include schema name for clarity", + "Use descriptive test messages", ], examples: [ - "renderPattern('table_exists_basic', { schema: 'public', tableName: 'users' })" + "renderPattern('table_exists_basic', { schema: 'public', tableName: 'users' })", ], - difficulty: 'basic', - dependencies: [] + difficulty: "basic", + dependencies: [], }); - patterns.set('column_structure_validation', { - name: 'column_structure_validation', - category: 'data_validation', - description: 'Comprehensive column structure validation including type, constraints, and defaults', + patterns.set("column_structure_validation", { + name: "column_structure_validation", + category: "data_validation", + description: + "Comprehensive column structure validation including type, constraints, and defaults", sqlTemplate: `-- Column: \${columnName} RETURN NEXT tap.has_column( '\${schema}', @@ -286,32 +293,48 @@ RETURN NEXT tap.has_column( \${primaryKeyTest} \${foreignKeyTest}`, - placeholders: ['schema', 'tableName', 'columnName', 'dataTypeTest', 'notNullTest', 'defaultValueTest', 'primaryKeyTest', 'foreignKeyTest'], + placeholders: [ + "schema", + "tableName", + "columnName", + "dataTypeTest", + "notNullTest", + "defaultValueTest", + "primaryKeyTest", + "foreignKeyTest", + ], metadata: { - testType: 'structure', - complexity: 'medium', - executionTime: 'medium', - conditionalSections: ['dataTypeTest', 'notNullTest', 'defaultValueTest', 'primaryKeyTest', 'foreignKeyTest'] + testType: "structure", + complexity: "medium", + executionTime: "medium", + conditionalSections: [ + "dataTypeTest", + "notNullTest", + "defaultValueTest", + "primaryKeyTest", + "foreignKeyTest", + ], }, bestPractices: [ - 'Test column existence before testing properties', - 'Include both positive and negative test cases', - 'Test all relevant column properties', - 'Use consistent naming patterns' + "Test column existence before testing properties", + "Include both positive and negative test cases", + "Test all relevant column properties", + "Use consistent naming patterns", ], examples: [ - 'Test user_id column with all properties', - 'Test email column with unique constraint', - 'Test timestamp columns with defaults' + "Test user_id column with all properties", + "Test email column with unique constraint", + "Test timestamp columns with defaults", ], - difficulty: 'intermediate', - dependencies: ['column_exists_basic'] + difficulty: "intermediate", + dependencies: ["column_exists_basic"], }); - patterns.set('foreign_key_relationship', { - name: 'foreign_key_relationship', - category: 'data_validation', - description: 'Foreign key relationship validation with referential integrity testing', + patterns.set("foreign_key_relationship", { + name: "foreign_key_relationship", + category: "data_validation", + description: + "Foreign key relationship validation with referential integrity testing", sqlTemplate: `-- Test: Foreign key relationship RETURN NEXT tap.fk_ok( '\${sourceSchema}', @@ -332,32 +355,41 @@ RETURN NEXT tap.throws_ok( -- Test: Cascade behavior (if applicable) \${cascadeTest}`, - placeholders: ['sourceSchema', 'sourceTable', 'sourceColumn', 'targetSchema', 'targetTable', 'targetColumn', 'invalidValue', 'cascadeTest'], + placeholders: [ + "sourceSchema", + "sourceTable", + "sourceColumn", + "targetSchema", + "targetTable", + "targetColumn", + "invalidValue", + "cascadeTest", + ], metadata: { - testType: 'integrity', - complexity: 'medium', - executionTime: 'medium', - requiresTestData: true + testType: "integrity", + complexity: "medium", + executionTime: "medium", + requiresTestData: true, }, bestPractices: [ - 'Test both valid and invalid foreign key values', - 'Include cascade behavior testing', - 'Test referential integrity under concurrent access', - 'Verify constraint error messages' + "Test both valid and invalid foreign key values", + "Include cascade behavior testing", + "Test referential integrity under concurrent access", + "Verify constraint error messages", ], examples: [ - 'Test user_id references users.id', - 'Test order references with cascade delete', - 'Test nullable foreign keys' + "Test user_id references users.id", + "Test order references with cascade delete", + "Test nullable foreign keys", ], - difficulty: 'intermediate', - dependencies: ['test_data_setup'] + difficulty: "intermediate", + dependencies: ["test_data_setup"], }); - patterns.set('constraint_validation', { - name: 'constraint_validation', - category: 'data_validation', - description: 'CHECK constraint validation with boundary testing', + patterns.set("constraint_validation", { + name: "constraint_validation", + category: "data_validation", + description: "CHECK constraint validation with boundary testing", sqlTemplate: `-- Test: Constraint exists RETURN NEXT tap.has_check( '\${schema}', @@ -384,36 +416,44 @@ RETURN NEXT tap.throws_ok( -- Test: Boundary conditions \${boundaryTests}`, - placeholders: ['schema', 'tableName', 'constraintName', 'testColumns', 'validValues', 'invalidValues', 'boundaryTests'], + placeholders: [ + "schema", + "tableName", + "constraintName", + "testColumns", + "validValues", + "invalidValues", + "boundaryTests", + ], metadata: { - testType: 'validation', - complexity: 'medium', - executionTime: 'medium', - requiresTestData: true + testType: "validation", + complexity: "medium", + executionTime: "medium", + requiresTestData: true, }, bestPractices: [ - 'Test boundary values and edge cases', - 'Include both acceptance and rejection tests', - 'Test multiple invalid scenarios', - 'Verify proper error codes' + "Test boundary values and edge cases", + "Include both acceptance and rejection tests", + "Test multiple invalid scenarios", + "Verify proper error codes", ], examples: [ - 'Age constraint: age >= 0 AND age <= 150', - 'Email format constraint validation', - 'Status enum constraint testing' + "Age constraint: age >= 0 AND age <= 150", + "Email format constraint validation", + "Status enum constraint testing", ], - difficulty: 'intermediate', - dependencies: ['test_data_setup'] + difficulty: "intermediate", + dependencies: ["test_data_setup"], }); // =========================================================================== // SECURITY TESTING PATTERNS // =========================================================================== - patterns.set('rls_enablement_check', { - name: 'rls_enablement_check', - category: 'security_testing', - description: 'Row Level Security enablement validation', + patterns.set("rls_enablement_check", { + name: "rls_enablement_check", + category: "security_testing", + description: "Row Level Security enablement validation", sqlTemplate: `-- Test: RLS is enabled on table RETURN NEXT tap.ok( (SELECT relrowsecurity FROM pg_class @@ -432,29 +472,29 @@ RETURN NEXT tap.ok( AND relnamespace = '\${schema}'::regnamespace), 'RLS enforcement is properly configured on \${schema}.\${tableName}' );`, - placeholders: ['schema', 'tableName'], + placeholders: ["schema", "tableName"], metadata: { - testType: 'security_config', - complexity: 'low', - executionTime: 'fast' + testType: "security_config", + complexity: "low", + executionTime: "fast", }, bestPractices: [ - 'Always verify RLS is both enabled and enforced', - 'Test RLS configuration before testing policies', - 'Include both positive and negative checks' + "Always verify RLS is both enabled and enforced", + "Test RLS configuration before testing policies", + "Include both positive and negative checks", ], examples: [ - 'Check RLS on users table', - 'Verify RLS enforcement on sensitive data tables' + "Check RLS on users table", + "Verify RLS enforcement on sensitive data tables", ], - difficulty: 'basic', - dependencies: [] + difficulty: "basic", + dependencies: [], }); - patterns.set('policy_existence_check', { - name: 'policy_existence_check', - category: 'security_testing', - description: 'RLS policy existence and configuration validation', + patterns.set("policy_existence_check", { + name: "policy_existence_check", + category: "security_testing", + description: "RLS policy existence and configuration validation", sqlTemplate: `-- Test: Policy '\${policyName}' exists RETURN NEXT tap.ok( (SELECT COUNT(*) > 0 FROM pg_policies @@ -476,29 +516,36 @@ RETURN NEXT tap.ok( -- Test: Policy has correct roles (if specified) \${roleTest}`, - placeholders: ['schema', 'tableName', 'policyName', 'commandType', 'roleTest'], + placeholders: [ + "schema", + "tableName", + "policyName", + "commandType", + "roleTest", + ], metadata: { - testType: 'security_config', - complexity: 'medium', - executionTime: 'fast' + testType: "security_config", + complexity: "medium", + executionTime: "fast", }, bestPractices: [ - 'Test policy existence before testing behavior', - 'Verify policy applies to intended SQL commands', - 'Check role assignments if policies are role-specific' + "Test policy existence before testing behavior", + "Verify policy applies to intended SQL commands", + "Check role assignments if policies are role-specific", ], examples: [ - 'Check user_select_own_data policy exists', - 'Verify admin policies apply to all commands' + "Check user_select_own_data policy exists", + "Verify admin policies apply to all commands", ], - difficulty: 'intermediate', - dependencies: ['rls_enablement_check'] + difficulty: "intermediate", + dependencies: ["rls_enablement_check"], }); - patterns.set('multi_user_access_test', { - name: 'multi_user_access_test', - category: 'security_testing', - description: 'Multi-user access pattern testing data isolation and permissions', + patterns.set("multi_user_access_test", { + name: "multi_user_access_test", + category: "security_testing", + description: + "Multi-user access pattern testing data isolation and permissions", sqlTemplate: `-- Setup: Create test users and data v_user1_id := test.create_test_user('user1@test.com'); v_user2_id := test.create_test_user('user2@test.com'); @@ -534,33 +581,43 @@ RETURN NEXT tap.is( PERFORM test.set_auth_context(v_admin_id, 'authenticated'); \${adminAccessTest}`, - placeholders: ['schema', 'tableName', 'createUser1Data', 'userFilter', 'adminAccessTest'], + placeholders: [ + "schema", + "tableName", + "createUser1Data", + "userFilter", + "adminAccessTest", + ], metadata: { - testType: 'security_isolation', - complexity: 'high', - executionTime: 'medium', + testType: "security_isolation", + complexity: "high", + executionTime: "medium", requiresTestUsers: true, - requiresTestData: true + requiresTestData: true, }, bestPractices: [ - 'Create fresh test users for each test run', - 'Test both positive and negative access scenarios', - 'Include admin override testing where applicable', - 'Clean up test data after tests complete' + "Create fresh test users for each test run", + "Test both positive and negative access scenarios", + "Include admin override testing where applicable", + "Clean up test data after tests complete", ], examples: [ - 'Test user can only see their own posts', - 'Test admin can see all user data', - 'Test service role bypasses RLS' + "Test user can only see their own posts", + "Test admin can see all user data", + "Test service role bypasses RLS", + ], + difficulty: "advanced", + dependencies: [ + "test_user_setup", + "rls_enablement_check", + "policy_existence_check", ], - difficulty: 'advanced', - dependencies: ['test_user_setup', 'rls_enablement_check', 'policy_existence_check'] }); - patterns.set('privilege_escalation_test', { - name: 'privilege_escalation_test', - category: 'security_testing', - description: 'Test that privilege escalation is properly prevented', + patterns.set("privilege_escalation_test", { + name: "privilege_escalation_test", + category: "security_testing", + description: "Test that privilege escalation is properly prevented", sqlTemplate: `-- Test: Regular user cannot escalate privileges PERFORM test.set_auth_context(v_user_id, 'authenticated'); @@ -583,36 +640,42 @@ RETURN NEXT tap.throws_ok( -- Test: User cannot access restricted schemas \${restrictedSchemaTest}`, - placeholders: ['privilegeEscalationAttempt', 'expectedErrorCode', 'escalationType', 'functionBypassTest', 'restrictedSchemaTest'], + placeholders: [ + "privilegeEscalationAttempt", + "expectedErrorCode", + "escalationType", + "functionBypassTest", + "restrictedSchemaTest", + ], metadata: { - testType: 'security_hardening', - complexity: 'high', - executionTime: 'medium', - requiresTestUsers: true + testType: "security_hardening", + complexity: "high", + executionTime: "medium", + requiresTestUsers: true, }, bestPractices: [ - 'Test common privilege escalation vectors', - 'Verify proper error codes are returned', - 'Include tests for function-based bypasses', - 'Test access to system catalogs' + "Test common privilege escalation vectors", + "Verify proper error codes are returned", + "Include tests for function-based bypasses", + "Test access to system catalogs", ], examples: [ - 'Test user cannot SET ROLE to admin', - 'Test user cannot create SECURITY DEFINER functions', - 'Test user cannot access information_schema' + "Test user cannot SET ROLE to admin", + "Test user cannot create SECURITY DEFINER functions", + "Test user cannot access information_schema", ], - difficulty: 'advanced', - dependencies: ['test_user_setup'] + difficulty: "advanced", + dependencies: ["test_user_setup"], }); // =========================================================================== // PERFORMANCE TESTING PATTERNS // =========================================================================== - patterns.set('index_usage_verification', { - name: 'index_usage_verification', - category: 'performance_testing', - description: 'Verify that indexes are used by query plans', + patterns.set("index_usage_verification", { + name: "index_usage_verification", + category: "performance_testing", + description: "Verify that indexes are used by query plans", sqlTemplate: `-- Test: Index exists and has correct structure RETURN NEXT tap.has_index( '\${schema}', @@ -629,32 +692,39 @@ RETURN NEXT tap.has_index( -- Performance baseline test \${performanceTest}`, - placeholders: ['schema', 'tableName', 'indexName', 'queryPlanTest', 'selectivityTest', 'performanceTest'], + placeholders: [ + "schema", + "tableName", + "indexName", + "queryPlanTest", + "selectivityTest", + "performanceTest", + ], metadata: { - testType: 'performance_validation', - complexity: 'high', - executionTime: 'slow', - requiresTestData: true + testType: "performance_validation", + complexity: "high", + executionTime: "slow", + requiresTestData: true, }, bestPractices: [ - 'Test with realistic data volumes', - 'Include both selective and non-selective queries', - 'Test index usage under different query patterns', - 'Monitor index maintenance overhead' + "Test with realistic data volumes", + "Include both selective and non-selective queries", + "Test index usage under different query patterns", + "Monitor index maintenance overhead", ], examples: [ - 'Test email index is used for login queries', - 'Test composite index covers complex WHERE clauses', - 'Test partial index effectiveness' + "Test email index is used for login queries", + "Test composite index covers complex WHERE clauses", + "Test partial index effectiveness", ], - difficulty: 'advanced', - dependencies: ['performance_test_data'] + difficulty: "advanced", + dependencies: ["performance_test_data"], }); - patterns.set('function_performance_test', { - name: 'function_performance_test', - category: 'performance_testing', - description: 'Function execution time and resource usage validation', + patterns.set("function_performance_test", { + name: "function_performance_test", + category: "performance_testing", + description: "Function execution time and resource usage validation", sqlTemplate: `-- Test: Function executes within time limit DECLARE v_start_time timestamp; @@ -680,36 +750,42 @@ END; -- Test: Function resource usage is reasonable \${resourceUsageTest}`, - placeholders: ['functionCall', 'maxExecutionTime', 'functionName', 'concurrencyTest', 'resourceUsageTest'], + placeholders: [ + "functionCall", + "maxExecutionTime", + "functionName", + "concurrencyTest", + "resourceUsageTest", + ], metadata: { - testType: 'performance_validation', - complexity: 'high', - executionTime: 'slow', - requiresTestData: true + testType: "performance_validation", + complexity: "high", + executionTime: "slow", + requiresTestData: true, }, bestPractices: [ - 'Set realistic performance expectations', - 'Test with various input sizes', - 'Include concurrent execution testing', - 'Monitor memory and CPU usage' + "Set realistic performance expectations", + "Test with various input sizes", + "Include concurrent execution testing", + "Monitor memory and CPU usage", ], examples: [ - 'Test report generation completes within 30 seconds', - 'Test batch processing handles 10,000 records', - 'Test concurrent user function calls' + "Test report generation completes within 30 seconds", + "Test batch processing handles 10,000 records", + "Test concurrent user function calls", ], - difficulty: 'advanced', - dependencies: ['performance_test_data'] + difficulty: "advanced", + dependencies: ["performance_test_data"], }); // =========================================================================== // ERROR HANDLING PATTERNS // =========================================================================== - patterns.set('constraint_violation_handling', { - name: 'constraint_violation_handling', - category: 'error_handling', - description: 'Test proper constraint violation error handling', + patterns.set("constraint_violation_handling", { + name: "constraint_violation_handling", + category: "error_handling", + description: "Test proper constraint violation error handling", sqlTemplate: `-- Test: NOT NULL constraint violation RETURN NEXT tap.throws_ok( 'INSERT INTO \${schema}.\${tableName} (\${columns}) VALUES (\${nullValues})', @@ -737,32 +813,41 @@ RETURN NEXT tap.throws_ok( '23503', 'Foreign key constraint violation properly detected: \${constraintName}' );`, - placeholders: ['schema', 'tableName', 'columns', 'nullValues', 'constraintName', 'duplicateInsert', 'invalidValues', 'orphanValues'], + placeholders: [ + "schema", + "tableName", + "columns", + "nullValues", + "constraintName", + "duplicateInsert", + "invalidValues", + "orphanValues", + ], metadata: { - testType: 'error_validation', - complexity: 'medium', - executionTime: 'medium', - requiresTestData: true + testType: "error_validation", + complexity: "medium", + executionTime: "medium", + requiresTestData: true, }, bestPractices: [ - 'Test all constraint types individually', - 'Verify correct error codes are returned', - 'Test constraint violation messages', - 'Include cascade constraint testing' + "Test all constraint types individually", + "Verify correct error codes are returned", + "Test constraint violation messages", + "Include cascade constraint testing", ], examples: [ - 'Test email uniqueness constraint', - 'Test age check constraint violation', - 'Test foreign key reference violations' + "Test email uniqueness constraint", + "Test age check constraint violation", + "Test foreign key reference violations", ], - difficulty: 'intermediate', - dependencies: ['test_data_setup'] + difficulty: "intermediate", + dependencies: ["test_data_setup"], }); - patterns.set('function_exception_handling', { - name: 'function_exception_handling', - category: 'error_handling', - description: 'Test function exception handling and error recovery', + patterns.set("function_exception_handling", { + name: "function_exception_handling", + category: "error_handling", + description: "Test function exception handling and error recovery", sqlTemplate: `-- Test: Function handles invalid input gracefully RETURN NEXT tap.lives_ok( 'SELECT \${schema}.\${functionName}(\${invalidInput})', @@ -797,36 +882,46 @@ EXCEPTION WHEN OTHERS THEN RETURN NEXT tap.fail('Function \${functionName} should maintain transaction integrity on error'); END;`, - placeholders: ['schema', 'functionName', 'invalidInput', 'errorInput', 'expectedErrorCode', 'errorMessage', 'transactionSetup', 'transactionStateCheck'], + placeholders: [ + "schema", + "functionName", + "invalidInput", + "errorInput", + "expectedErrorCode", + "errorMessage", + "transactionSetup", + "transactionStateCheck", + ], metadata: { - testType: 'error_validation', - complexity: 'high', - executionTime: 'medium', - requiresTestData: true + testType: "error_validation", + complexity: "high", + executionTime: "medium", + requiresTestData: true, }, bestPractices: [ - 'Test both recoverable and non-recoverable errors', - 'Verify transaction integrity is maintained', - 'Test error propagation in nested calls', - 'Include proper cleanup after errors' + "Test both recoverable and non-recoverable errors", + "Verify transaction integrity is maintained", + "Test error propagation in nested calls", + "Include proper cleanup after errors", ], examples: [ - 'Test division by zero handling', - 'Test invalid JSON parsing errors', - 'Test authorization failures in functions' + "Test division by zero handling", + "Test invalid JSON parsing errors", + "Test authorization failures in functions", ], - difficulty: 'advanced', - dependencies: ['test_data_setup', 'transaction_test_helpers'] + difficulty: "advanced", + dependencies: ["test_data_setup", "transaction_test_helpers"], }); // =========================================================================== // MULTI-USER SCENARIO PATTERNS // =========================================================================== - patterns.set('concurrent_modification_test', { - name: 'concurrent_modification_test', - category: 'multi_user_scenarios', - description: 'Test concurrent data modification scenarios and conflict resolution', + patterns.set("concurrent_modification_test", { + name: "concurrent_modification_test", + category: "multi_user_scenarios", + description: + "Test concurrent data modification scenarios and conflict resolution", sqlTemplate: `-- Test: Concurrent updates with optimistic locking DECLARE v_initial_version integer; @@ -870,32 +965,32 @@ END; -- Test: Concurrent insert conflict resolution \${concurrentInsertTest}`, - placeholders: ['schema', 'tableName', 'testId', 'concurrentInsertTest'], + placeholders: ["schema", "tableName", "testId", "concurrentInsertTest"], metadata: { - testType: 'concurrency_validation', - complexity: 'high', - executionTime: 'slow', - requiresTestData: true + testType: "concurrency_validation", + complexity: "high", + executionTime: "slow", + requiresTestData: true, }, bestPractices: [ - 'Use realistic concurrency scenarios', - 'Test both optimistic and pessimistic locking', - 'Verify data consistency after conflicts', - 'Include deadlock detection testing' + "Use realistic concurrency scenarios", + "Test both optimistic and pessimistic locking", + "Verify data consistency after conflicts", + "Include deadlock detection testing", ], examples: [ - 'Test shopping cart concurrent updates', - 'Test inventory concurrent modifications', - 'Test user profile concurrent edits' + "Test shopping cart concurrent updates", + "Test inventory concurrent modifications", + "Test user profile concurrent edits", ], - difficulty: 'advanced', - dependencies: ['test_data_setup', 'concurrency_test_helpers'] + difficulty: "advanced", + dependencies: ["test_data_setup", "concurrency_test_helpers"], }); - patterns.set('data_isolation_verification', { - name: 'data_isolation_verification', - category: 'multi_user_scenarios', - description: 'Verify data isolation between different user contexts', + patterns.set("data_isolation_verification", { + name: "data_isolation_verification", + category: "multi_user_scenarios", + description: "Verify data isolation between different user contexts", sqlTemplate: `-- Setup: Create isolated test data for multiple users DECLARE v_user1_data_count integer; @@ -942,65 +1037,73 @@ BEGIN 'No cross-user data leakage detected' ); END;`, - placeholders: ['createUser1Data', 'createUser2Data', 'schema', 'tableName', 'expectedUser1Count', 'expectedUser2Count', 'user2Filter'], + placeholders: [ + "createUser1Data", + "createUser2Data", + "schema", + "tableName", + "expectedUser1Count", + "expectedUser2Count", + "user2Filter", + ], metadata: { - testType: 'isolation_validation', - complexity: 'high', - executionTime: 'medium', + testType: "isolation_validation", + complexity: "high", + executionTime: "medium", requiresTestUsers: true, - requiresTestData: true + requiresTestData: true, }, bestPractices: [ - 'Test isolation with realistic data volumes', - 'Verify both read and write isolation', - 'Include edge cases for data boundaries', - 'Test isolation under concurrent access' + "Test isolation with realistic data volumes", + "Verify both read and write isolation", + "Include edge cases for data boundaries", + "Test isolation under concurrent access", ], examples: [ - 'Test user posts are isolated from each other', - 'Test tenant data isolation in SaaS applications', - 'Test role-based data filtering' + "Test user posts are isolated from each other", + "Test tenant data isolation in SaaS applications", + "Test role-based data filtering", ], - difficulty: 'advanced', - dependencies: ['test_user_setup', 'multi_user_data_setup'] + difficulty: "advanced", + dependencies: ["test_user_setup", "multi_user_data_setup"], }); // Add patterns to their respective categories const dataValidationPatterns = [ - patterns.get('table_exists_basic'), - patterns.get('column_structure_validation'), - patterns.get('foreign_key_relationship'), - patterns.get('constraint_validation') + patterns.get("table_exists_basic"), + patterns.get("column_structure_validation"), + patterns.get("foreign_key_relationship"), + patterns.get("constraint_validation"), ]; const securityPatterns = [ - patterns.get('rls_enablement_check'), - patterns.get('policy_existence_check'), - patterns.get('multi_user_access_test'), - patterns.get('privilege_escalation_test') + patterns.get("rls_enablement_check"), + patterns.get("policy_existence_check"), + patterns.get("multi_user_access_test"), + patterns.get("privilege_escalation_test"), ]; const performancePatterns = [ - patterns.get('index_usage_verification'), - patterns.get('function_performance_test') + patterns.get("index_usage_verification"), + patterns.get("function_performance_test"), ]; const errorHandlingPatterns = [ - patterns.get('constraint_violation_handling'), - patterns.get('function_exception_handling') + patterns.get("constraint_violation_handling"), + patterns.get("function_exception_handling"), ]; const multiUserPatterns = [ - patterns.get('concurrent_modification_test'), - patterns.get('data_isolation_verification') + patterns.get("concurrent_modification_test"), + patterns.get("data_isolation_verification"), ]; // Update category references - this.categories.get('data_validation').patterns = dataValidationPatterns; - this.categories.get('security_testing').patterns = securityPatterns; - this.categories.get('performance_testing').patterns = performancePatterns; - this.categories.get('error_handling').patterns = errorHandlingPatterns; - this.categories.get('multi_user_scenarios').patterns = multiUserPatterns; + this.categories.get("data_validation").patterns = dataValidationPatterns; + this.categories.get("security_testing").patterns = securityPatterns; + this.categories.get("performance_testing").patterns = performancePatterns; + this.categories.get("error_handling").patterns = errorHandlingPatterns; + this.categories.get("multi_user_scenarios").patterns = multiUserPatterns; return patterns; } @@ -1014,65 +1117,62 @@ END;`, return { // RPC function tests rpc: [ - 'table_exists_basic', - 'function_exception_handling', - 'function_performance_test', - 'multi_user_access_test', - 'privilege_escalation_test' + "table_exists_basic", + "function_exception_handling", + "function_performance_test", + "multi_user_access_test", + "privilege_escalation_test", ], - // Row Level Security tests + // Row Level Security tests rls: [ - 'rls_enablement_check', - 'policy_existence_check', - 'multi_user_access_test', - 'data_isolation_verification', - 'privilege_escalation_test' + "rls_enablement_check", + "policy_existence_check", + "multi_user_access_test", + "data_isolation_verification", + "privilege_escalation_test", ], // Trigger tests trigger: [ - 'table_exists_basic', - 'constraint_violation_handling', - 'concurrent_modification_test', - 'function_exception_handling' + "table_exists_basic", + "constraint_violation_handling", + "concurrent_modification_test", + "function_exception_handling", ], // Constraint tests constraint: [ - 'constraint_validation', - 'constraint_violation_handling', - 'foreign_key_relationship' + "constraint_validation", + "constraint_violation_handling", + "foreign_key_relationship", ], // Function tests (database functions) function: [ - 'function_exception_handling', - 'function_performance_test', - 'constraint_violation_handling' + "function_exception_handling", + "function_performance_test", + "constraint_violation_handling", ], // Table tests table: [ - 'table_exists_basic', - 'column_structure_validation', - 'constraint_validation', - 'foreign_key_relationship', - 'index_usage_verification' + "table_exists_basic", + "column_structure_validation", + "constraint_validation", + "foreign_key_relationship", + "index_usage_verification", ], // Column tests column: [ - 'column_structure_validation', - 'constraint_validation', - 'constraint_violation_handling' + "column_structure_validation", + "constraint_validation", + "constraint_violation_handling", ], // Index tests - index: [ - 'index_usage_verification', - 'function_performance_test' - ] + index: ["index_usage_verification", "function_performance_test"], }; } @@ -1084,11 +1184,11 @@ END;`, getBestPractices(testType) { const patterns = this.getRecommendedPatterns(testType); const practices = new Set(); - - patterns.forEach(pattern => { - pattern.bestPractices.forEach(practice => practices.add(practice)); + + patterns.forEach((pattern) => { + pattern.bestPractices.forEach((practice) => practices.add(practice)); }); - + return Array.from(practices); } @@ -1100,13 +1200,13 @@ END;`, getUsageExamples(testType) { const patterns = this.getRecommendedPatterns(testType); const examples = []; - - patterns.forEach(pattern => { + + patterns.forEach((pattern) => { if (pattern.examples && pattern.examples.length > 0) { examples.push(...pattern.examples); } }); - + return examples; } @@ -1122,30 +1222,30 @@ END;`, // Document each category for (const [categoryName, category] of this.categories) { - doc += `## ${category.name.toUpperCase().replace('_', ' ')}\n\n`; + doc += `## ${category.name.toUpperCase().replace("_", " ")}\n\n`; doc += `${category.description}\n\n`; - + doc += `### Common Use Cases\n`; - category.commonUseCases.forEach(useCase => { + category.commonUseCases.forEach((useCase) => { doc += `- ${useCase}\n`; }); doc += `\n`; doc += `### Best Practices\n`; - category.bestPractices.forEach(practice => { + category.bestPractices.forEach((practice) => { doc += `- ${practice}\n`; }); doc += `\n`; doc += `### Available Patterns\n`; - category.patterns.forEach(pattern => { + category.patterns.forEach((pattern) => { doc += `#### ${pattern.name}\n`; doc += `**Difficulty:** ${pattern.difficulty}\n`; doc += `**Description:** ${pattern.description}\n\n`; - + if (pattern.examples && pattern.examples.length > 0) { doc += `**Examples:**\n`; - pattern.examples.forEach(example => { + pattern.examples.forEach((example) => { doc += `- ${example}\n`; }); doc += `\n`; @@ -1158,4 +1258,4 @@ END;`, } } -module.exports = TestPatternLibrary; \ No newline at end of file +module.exports = TestPatternLibrary; diff --git a/src/lib/testing/TestRequirementAnalyzer.js b/src/lib/testing/TestRequirementAnalyzer.js index 84a835b..3fc4151 100644 --- a/src/lib/testing/TestRequirementAnalyzer.js +++ b/src/lib/testing/TestRequirementAnalyzer.js @@ -1,31 +1,31 @@ /** * Test Requirement Analyzer for D.A.T.A. - * + * * Analyzes AST migration operations and determines what pgTAP tests are required * to ensure database schema changes are properly validated. Maps schema operations * to specific test requirements for comprehensive coverage. - * + * * @module TestRequirementAnalyzer */ -const { EventEmitter } = require('events'); - +const { EventEmitter } = require("events"); +const { ValidationError } = require("../errors"); /** * Test requirement types * @readonly * @enum {string} */ const TEST_TYPES = { - SCHEMA: 'SCHEMA', // Table structure tests - DATA: 'DATA', // Data integrity tests - CONSTRAINT: 'CONSTRAINT', // Constraint validation tests - INDEX: 'INDEX', // Index existence and performance tests - FUNCTION: 'FUNCTION', // Function behavior tests - TRIGGER: 'TRIGGER', // Trigger functionality tests - RLS: 'RLS', // Row Level Security tests - VIEW: 'VIEW', // View definition tests - ENUM: 'ENUM', // Enum type tests - PERMISSION: 'PERMISSION' // Permission and security tests + SCHEMA: "SCHEMA", // Table structure tests + DATA: "DATA", // Data integrity tests + CONSTRAINT: "CONSTRAINT", // Constraint validation tests + INDEX: "INDEX", // Index existence and performance tests + FUNCTION: "FUNCTION", // Function behavior tests + TRIGGER: "TRIGGER", // Trigger functionality tests + RLS: "RLS", // Row Level Security tests + VIEW: "VIEW", // View definition tests + ENUM: "ENUM", // Enum type tests + PERMISSION: "PERMISSION", // Permission and security tests }; /** @@ -34,10 +34,10 @@ const TEST_TYPES = { * @enum {string} */ const TEST_PRIORITIES = { - CRITICAL: 'CRITICAL', // Must have - blocks deployment - HIGH: 'HIGH', // Should have - important coverage - MEDIUM: 'MEDIUM', // Nice to have - good practice - LOW: 'LOW' // Optional - comprehensive coverage + CRITICAL: "CRITICAL", // Must have - blocks deployment + HIGH: "HIGH", // Should have - important coverage + MEDIUM: "MEDIUM", // Nice to have - good practice + LOW: "LOW", // Optional - comprehensive coverage }; /** @@ -67,7 +67,7 @@ const TEST_PRIORITIES = { class TestRequirementAnalyzer extends EventEmitter { constructor(options = {}) { super(); - + // Configuration options this.options = { // Test coverage requirements @@ -75,41 +75,44 @@ class TestRequirementAnalyzer extends EventEmitter { requireConstraintTests: options.requireConstraintTests !== false, requirePerformanceTests: options.requirePerformanceTests || false, requireSecurityTests: options.requireSecurityTests !== false, - + // Risk-based test priorities - destructiveOperationPriority: options.destructiveOperationPriority || TEST_PRIORITIES.CRITICAL, - warningOperationPriority: options.warningOperationPriority || TEST_PRIORITIES.HIGH, - safeOperationPriority: options.safeOperationPriority || TEST_PRIORITIES.MEDIUM, - + destructiveOperationPriority: + options.destructiveOperationPriority || TEST_PRIORITIES.CRITICAL, + warningOperationPriority: + options.warningOperationPriority || TEST_PRIORITIES.HIGH, + safeOperationPriority: + options.safeOperationPriority || TEST_PRIORITIES.MEDIUM, + // Test complexity thresholds maxTestCasesPerRequirement: options.maxTestCasesPerRequirement || 10, estimatedEffortPerTest: options.estimatedEffortPerTest || 0.5, // hours - - ...options + + ...options, }; // Operation type to test type mappings this.operationTestMappings = this._initializeTestMappings(); - + // Risk patterns that require additional testing this.highRiskPatterns = [ /DROP TABLE/i, - /DROP COLUMN/i, + /DROP COLUMN/i, /ALTER.*TYPE/i, /DROP CONSTRAINT/i, /TRUNCATE/i, - /DELETE FROM/i + /DELETE FROM/i, ]; - + // Security-sensitive patterns this.securityPatterns = [ /CREATE POLICY|DROP POLICY|ALTER POLICY/i, /GRANT|REVOKE/i, /auth\.|storage\.|realtime\./i, - /security_definer|security_invoker/i + /security_definer|security_invoker/i, ]; } - + /** * Analyze migration operations for test requirements * @param {Array} operations - Array of migration operations @@ -117,70 +120,83 @@ class TestRequirementAnalyzer extends EventEmitter { * @returns {Promise} Analysis results with test requirements */ async analyzeOperations(operations, context = {}) { - this.emit('progress', { - message: 'Analyzing operations for test requirements...', - operations: operations.length + this.emit("progress", { + message: "Analyzing operations for test requirements...", + operations: operations.length, }); - + const analysis = { requirements: [], summary: { totalRequirements: 0, byType: {}, byPriority: {}, - operationsAnalyzed: operations.length + operationsAnalyzed: operations.length, }, suggestions: [], estimatedEffort: 0, - riskAreas: [] + riskAreas: [], }; - + // Analyze each operation for (let i = 0; i < operations.length; i++) { const operation = operations[i]; - - this.emit('progress', { + + this.emit("progress", { message: `Analyzing operation ${i + 1}/${operations.length}...`, - operation: operation.description || operation.sql?.substring(0, 50) + operation: operation.description || operation.sql?.substring(0, 50), }); - - const requirements = await this.determineTestRequirements(operation, context); - + + const requirements = await this.determineTestRequirements( + operation, + context, + ); + // Add requirements to analysis analysis.requirements.push(...requirements); - + // Update statistics for (const req of requirements) { - analysis.summary.byType[req.type] = (analysis.summary.byType[req.type] || 0) + 1; - analysis.summary.byPriority[req.priority] = (analysis.summary.byPriority[req.priority] || 0) + 1; + analysis.summary.byType[req.type] = + (analysis.summary.byType[req.type] || 0) + 1; + analysis.summary.byPriority[req.priority] = + (analysis.summary.byPriority[req.priority] || 0) + 1; analysis.estimatedEffort += this._estimateTestEffort(req); } - + // Check for risk areas if (this._isHighRiskOperation(operation)) { - analysis.riskAreas.push(operation.description || this._extractOperationDescription(operation)); + analysis.riskAreas.push( + operation.description || this._extractOperationDescription(operation), + ); } } - + // Update total requirements analysis.summary.totalRequirements = analysis.requirements.length; - + // Generate high-level suggestions - analysis.suggestions = this._generateTestingSuggestions(analysis, operations, context); - + analysis.suggestions = this._generateTestingSuggestions( + analysis, + operations, + context, + ); + // Sort requirements by priority - analysis.requirements.sort((a, b) => this._comparePriority(a.priority, b.priority)); - - this.emit('complete', { - message: 'Test requirement analysis complete', + analysis.requirements.sort((a, b) => + this._comparePriority(a.priority, b.priority), + ); + + this.emit("complete", { + message: "Test requirement analysis complete", totalRequirements: analysis.summary.totalRequirements, estimatedEffort: Math.round(analysis.estimatedEffort * 10) / 10, - riskAreas: analysis.riskAreas.length + riskAreas: analysis.riskAreas.length, }); - + return analysis; } - + /** * Validate operation structure before processing * @param {Object} operation - Operation to validate @@ -189,47 +205,57 @@ class TestRequirementAnalyzer extends EventEmitter { */ _validateOperation(operation) { // Check operation is an object - if (!operation || typeof operation !== 'object') { - throw new Error('Invalid operation: must be a non-null object'); + if (!operation || typeof operation !== "object") { + throw new ValidationError("Invalid operation: must be a non-null object"); } - + // Check required properties - if (!operation.sql || typeof operation.sql !== 'string') { - throw new Error(`Invalid operation: missing or invalid 'sql' property (got ${typeof operation.sql})`); + if (!operation.sql || typeof operation.sql !== "string") { + throw new ValidationError( + `Invalid operation: missing or invalid 'sql' property (got ${typeof operation.sql})`, + ); } - - if (!operation.type || typeof operation.type !== 'string') { - throw new Error(`Invalid operation: missing or invalid 'type' property (got ${typeof operation.type})`); + + if (!operation.type || typeof operation.type !== "string") { + throw new ValidationError( + `Invalid operation: missing or invalid 'type' property (got ${typeof operation.type})`, + ); } - + // Validate operation type is known - const validTypes = ['SAFE', 'WARNING', 'DESTRUCTIVE']; + const validTypes = ["SAFE", "WARNING", "DESTRUCTIVE"]; if (!validTypes.includes(operation.type)) { - this.emit('warning', { + this.emit("warning", { message: `Unknown operation type: ${operation.type}`, operation: operation, - validTypes: validTypes + validTypes: validTypes, }); } - + // Validate optional properties if present - if (operation.description && typeof operation.description !== 'string') { - throw new Error(`Invalid operation: 'description' must be a string (got ${typeof operation.description})`); + if (operation.description && typeof operation.description !== "string") { + throw new ValidationError( + `Invalid operation: 'description' must be a string (got ${typeof operation.description})`, + ); } - - if (operation.warning && typeof operation.warning !== 'string') { - throw new Error(`Invalid operation: 'warning' must be a string (got ${typeof operation.warning})`); + + if (operation.warning && typeof operation.warning !== "string") { + throw new ValidationError( + `Invalid operation: 'warning' must be a string (got ${typeof operation.warning})`, + ); } - + // Check for malformed SQL (basic validation) if (operation.sql.length === 0) { - throw new Error('Invalid operation: SQL cannot be empty'); + throw new ValidationError("Invalid operation: SQL cannot be empty"); } - + if (operation.sql.length > 100000) { - throw new Error('Invalid operation: SQL exceeds maximum length (100KB)'); + throw new ValidationError( + "Invalid operation: SQL exceeds maximum length (100KB)", + ); } - + return true; } @@ -244,114 +270,160 @@ class TestRequirementAnalyzer extends EventEmitter { try { this._validateOperation(operation); } catch (error) { - this.emit('error', { - message: 'Operation validation failed', + this.emit("error", { + message: "Operation validation failed", error: error.message, - operation: operation + operation: operation, }); throw error; } - + const requirements = []; - + // Base priority based on operation type const basePriority = this._getBasePriority(operation); - + // Extract operation details const operationType = this._categorizeOperation(operation); const target = this._extractTargetObject(operation); - + // Generate requirements based on operation type switch (operationType) { - case 'CREATE_TABLE': - requirements.push(...this._generateTableCreationTests(operation, target, basePriority)); + case "CREATE_TABLE": + requirements.push( + ...this._generateTableCreationTests(operation, target, basePriority), + ); break; - - case 'DROP_TABLE': - requirements.push(...this._generateTableDropTests(operation, target, basePriority)); + + case "DROP_TABLE": + requirements.push( + ...this._generateTableDropTests(operation, target, basePriority), + ); break; - - case 'ALTER_TABLE': - requirements.push(...this._generateTableAlterationTests(operation, target, basePriority)); + + case "ALTER_TABLE": + requirements.push( + ...this._generateTableAlterationTests( + operation, + target, + basePriority, + ), + ); break; - - case 'CREATE_INDEX': - requirements.push(...this._generateIndexTests(operation, target, basePriority)); + + case "CREATE_INDEX": + requirements.push( + ...this._generateIndexTests(operation, target, basePriority), + ); break; - - case 'CREATE_FUNCTION': - requirements.push(...this._generateFunctionTests(operation, target, basePriority)); + + case "CREATE_FUNCTION": + requirements.push( + ...this._generateFunctionTests(operation, target, basePriority), + ); break; - - case 'CREATE_POLICY': - requirements.push(...this._generateCreatePolicyTests(operation, target, basePriority)); + + case "CREATE_POLICY": + requirements.push( + ...this._generateCreatePolicyTests(operation, target, basePriority), + ); break; - - case 'ALTER_POLICY': - requirements.push(...this._generateAlterPolicyTests(operation, target, basePriority)); + + case "ALTER_POLICY": + requirements.push( + ...this._generateAlterPolicyTests(operation, target, basePriority), + ); break; - - case 'DROP_POLICY': - requirements.push(...this._generateDropPolicyTests(operation, target, basePriority)); + + case "DROP_POLICY": + requirements.push( + ...this._generateDropPolicyTests(operation, target, basePriority), + ); break; - - case 'ENABLE_RLS': - requirements.push(...this._generateEnableRLSTests(operation, target, basePriority)); + + case "ENABLE_RLS": + requirements.push( + ...this._generateEnableRLSTests(operation, target, basePriority), + ); break; - - case 'DISABLE_RLS': - requirements.push(...this._generateDisableRLSTests(operation, target, basePriority)); + + case "DISABLE_RLS": + requirements.push( + ...this._generateDisableRLSTests(operation, target, basePriority), + ); break; - - case 'CREATE_VIEW': - requirements.push(...this._generateViewTests(operation, target, basePriority)); + + case "CREATE_VIEW": + requirements.push( + ...this._generateViewTests(operation, target, basePriority), + ); break; - - case 'CREATE_ENUM': - requirements.push(...this._generateEnumTests(operation, target, basePriority)); + + case "CREATE_ENUM": + requirements.push( + ...this._generateEnumTests(operation, target, basePriority), + ); break; - - case 'CREATE_TRIGGER': - requirements.push(...this._generateTriggerTests(operation, target, basePriority)); + + case "CREATE_TRIGGER": + requirements.push( + ...this._generateTriggerTests(operation, target, basePriority), + ); break; - - case 'ALTER_TRIGGER': - requirements.push(...this._generateTriggerTests(operation, target, basePriority)); + + case "ALTER_TRIGGER": + requirements.push( + ...this._generateTriggerTests(operation, target, basePriority), + ); break; - - case 'DROP_TRIGGER': - requirements.push(...this._generateTriggerTests(operation, target, basePriority)); + + case "DROP_TRIGGER": + requirements.push( + ...this._generateTriggerTests(operation, target, basePriority), + ); break; - - case 'CREATE_EVENT_TRIGGER': - requirements.push(...this._generateTriggerTests(operation, target, basePriority)); + + case "CREATE_EVENT_TRIGGER": + requirements.push( + ...this._generateTriggerTests(operation, target, basePriority), + ); break; - + default: // Generic tests for unclassified operations - requirements.push(...this._generateGenericTests(operation, target, basePriority)); + requirements.push( + ...this._generateGenericTests(operation, target, basePriority), + ); } - + // Add security tests for sensitive operations if (this._requiresSecurityTests(operation)) { - requirements.push(...this._generateSecurityTests(operation, target, basePriority)); + requirements.push( + ...this._generateSecurityTests(operation, target, basePriority), + ); } - + // Add performance tests for performance-impacting operations - if (this.options.requirePerformanceTests && this._requiresPerformanceTests(operation)) { - requirements.push(...this._generatePerformanceTests(operation, target, basePriority)); + if ( + this.options.requirePerformanceTests && + this._requiresPerformanceTests(operation) + ) { + requirements.push( + ...this._generatePerformanceTests(operation, target, basePriority), + ); } - + // Enhance requirements with metadata for (const req of requirements) { req.operation = operation; req.reason = req.reason || this._generateTestReason(req, operation); - req.metadata = req.metadata || this._generateTestMetadata(req, operation, context); + req.metadata = + req.metadata || this._generateTestMetadata(req, operation, context); } - + return requirements; } - + /** * Initialize operation to test type mappings * @private @@ -359,35 +431,39 @@ class TestRequirementAnalyzer extends EventEmitter { */ _initializeTestMappings() { return { - 'CREATE TABLE': [TEST_TYPES.SCHEMA, TEST_TYPES.CONSTRAINT], - 'DROP TABLE': [TEST_TYPES.SCHEMA, TEST_TYPES.DATA], - 'ALTER TABLE': [TEST_TYPES.SCHEMA, TEST_TYPES.CONSTRAINT, TEST_TYPES.DATA], - 'CREATE INDEX': [TEST_TYPES.INDEX, TEST_TYPES.SCHEMA], - 'DROP INDEX': [TEST_TYPES.INDEX], - 'CREATE FUNCTION': [TEST_TYPES.FUNCTION], - 'DROP FUNCTION': [TEST_TYPES.FUNCTION], - 'CREATE POLICY': [TEST_TYPES.RLS, TEST_TYPES.PERMISSION], - 'ALTER POLICY': [TEST_TYPES.RLS, TEST_TYPES.PERMISSION], - 'DROP POLICY': [TEST_TYPES.RLS, TEST_TYPES.PERMISSION], - 'ENABLE RLS': [TEST_TYPES.RLS, TEST_TYPES.PERMISSION], - 'DISABLE RLS': [TEST_TYPES.RLS, TEST_TYPES.PERMISSION], - 'CREATE VIEW': [TEST_TYPES.VIEW, TEST_TYPES.SCHEMA], - 'DROP VIEW': [TEST_TYPES.VIEW], - 'CREATE TYPE': [TEST_TYPES.ENUM, TEST_TYPES.SCHEMA], - 'CREATE TRIGGER': [TEST_TYPES.TRIGGER, TEST_TYPES.FUNCTION], - 'DROP TRIGGER': [TEST_TYPES.TRIGGER] + "CREATE TABLE": [TEST_TYPES.SCHEMA, TEST_TYPES.CONSTRAINT], + "DROP TABLE": [TEST_TYPES.SCHEMA, TEST_TYPES.DATA], + "ALTER TABLE": [ + TEST_TYPES.SCHEMA, + TEST_TYPES.CONSTRAINT, + TEST_TYPES.DATA, + ], + "CREATE INDEX": [TEST_TYPES.INDEX, TEST_TYPES.SCHEMA], + "DROP INDEX": [TEST_TYPES.INDEX], + "CREATE FUNCTION": [TEST_TYPES.FUNCTION], + "DROP FUNCTION": [TEST_TYPES.FUNCTION], + "CREATE POLICY": [TEST_TYPES.RLS, TEST_TYPES.PERMISSION], + "ALTER POLICY": [TEST_TYPES.RLS, TEST_TYPES.PERMISSION], + "DROP POLICY": [TEST_TYPES.RLS, TEST_TYPES.PERMISSION], + "ENABLE RLS": [TEST_TYPES.RLS, TEST_TYPES.PERMISSION], + "DISABLE RLS": [TEST_TYPES.RLS, TEST_TYPES.PERMISSION], + "CREATE VIEW": [TEST_TYPES.VIEW, TEST_TYPES.SCHEMA], + "DROP VIEW": [TEST_TYPES.VIEW], + "CREATE TYPE": [TEST_TYPES.ENUM, TEST_TYPES.SCHEMA], + "CREATE TRIGGER": [TEST_TYPES.TRIGGER, TEST_TYPES.FUNCTION], + "DROP TRIGGER": [TEST_TYPES.TRIGGER], }; } - + /** * Generate test requirements for table creation * @private */ _generateTableCreationTests(operation, target, priority) { const requirements = []; - const sql = operation.sql || ''; + const sql = operation.sql || ""; const tableStructure = this._parseTableStructure(sql); - + // Basic table existence test requirements.push({ type: TEST_TYPES.SCHEMA, @@ -396,13 +472,19 @@ class TestRequirementAnalyzer extends EventEmitter { target, testCases: [ `has_table('${target}')`, - `columns_are('${target}', ARRAY[${tableStructure.columns.map(c => `'${c.name}'`).join(', ')}])`, - ...tableStructure.columns.map(col => `col_type_is('${target}', '${col.name}', '${col.type}')`), - ...tableStructure.columns.filter(col => col.notNull).map(col => `col_not_null('${target}', '${col.name}')`), - ...tableStructure.columns.filter(col => col.hasDefault).map(col => `col_has_default('${target}', '${col.name}')`) - ] + `columns_are('${target}', ARRAY[${tableStructure.columns.map((c) => `'${c.name}'`).join(", ")}])`, + ...tableStructure.columns.map( + (col) => `col_type_is('${target}', '${col.name}', '${col.type}')`, + ), + ...tableStructure.columns + .filter((col) => col.notNull) + .map((col) => `col_not_null('${target}', '${col.name}')`), + ...tableStructure.columns + .filter((col) => col.hasDefault) + .map((col) => `col_has_default('${target}', '${col.name}')`), + ], }); - + // Primary key tests if (tableStructure.primaryKeys.length > 0) { requirements.push({ @@ -412,11 +494,13 @@ class TestRequirementAnalyzer extends EventEmitter { target, testCases: [ `has_pk('${target}')`, - ...tableStructure.primaryKeys.map(pk => `col_is_pk('${target}', '${pk}')`) - ] + ...tableStructure.primaryKeys.map( + (pk) => `col_is_pk('${target}', '${pk}')`, + ), + ], }); } - + // Foreign key tests if (tableStructure.foreignKeys.length > 0) { requirements.push({ @@ -425,37 +509,42 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Verify foreign key constraints on table ${target}`, target, testCases: [ - ...tableStructure.foreignKeys.map(fk => `has_fk('${target}', '${fk.column}')`), - ...tableStructure.foreignKeys.map(fk => `fk_ok('${target}', '${fk.column}', '${fk.referencedTable}', '${fk.referencedColumn}')`) - ] + ...tableStructure.foreignKeys.map( + (fk) => `has_fk('${target}', '${fk.column}')`, + ), + ...tableStructure.foreignKeys.map( + (fk) => + `fk_ok('${target}', '${fk.column}', '${fk.referencedTable}', '${fk.referencedColumn}')`, + ), + ], }); } - + // Constraint tests (check constraints, unique constraints) if (this.options.requireConstraintTests) { const constraintTests = []; - + // Check constraints - tableStructure.checkConstraints.forEach(constraint => { + tableStructure.checkConstraints.forEach((constraint) => { constraintTests.push(`has_check('${target}', '${constraint.name}')`); }); - + // Unique constraints - tableStructure.uniqueConstraints.forEach(constraint => { + tableStructure.uniqueConstraints.forEach((constraint) => { constraintTests.push(`has_unique('${target}', '${constraint.name}')`); }); - + if (constraintTests.length > 0) { requirements.push({ type: TEST_TYPES.CONSTRAINT, priority, description: `Verify additional constraints on table ${target}`, target, - testCases: constraintTests + testCases: constraintTests, }); } } - + // Index tests for inline indexes if (tableStructure.indexes.length > 0) { requirements.push({ @@ -464,95 +553,104 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Verify indexes created for table ${target}`, target, testCases: [ - ...tableStructure.indexes.map(idx => `has_index('${target}', '${idx.name}', ARRAY[${idx.columns.map(c => `'${c}'`).join(', ')}])`) - ] + ...tableStructure.indexes.map( + (idx) => + `has_index('${target}', '${idx.name}', ARRAY[${idx.columns.map((c) => `'${c}'`).join(", ")}])`, + ), + ], }); } - + return requirements; } - + /** * Generate test requirements for table drops * @private */ _generateTableDropTests(operation, target, priority) { - const sql = operation.sql || ''; - const isCascade = sql.toUpperCase().includes('CASCADE'); - + const sql = operation.sql || ""; + const isCascade = sql.toUpperCase().includes("CASCADE"); + const testCases = [ `hasnt_table('${target}')`, - `-- Verify table no longer exists in schema` + `-- Verify table no longer exists in schema`, ]; - + if (isCascade) { testCases.push( `-- Verify dependent objects were also dropped (CASCADE)`, `-- Check that foreign key references are cleaned up`, `-- Ensure dependent views were dropped`, - `-- Verify dependent functions/triggers were dropped` + `-- Verify dependent functions/triggers were dropped`, ); } else { testCases.push( `-- Verify no dependent objects were affected (RESTRICT)`, `-- Check that foreign key references are handled properly`, - `-- Ensure operation failed if dependencies existed` + `-- Ensure operation failed if dependencies existed`, ); } - - return [{ - type: TEST_TYPES.SCHEMA, - priority: TEST_PRIORITIES.CRITICAL, // Always critical for destructive ops - description: `Verify table ${target} is properly dropped${isCascade ? ' with CASCADE' : ''}`, - target, - testCases - }]; + + return [ + { + type: TEST_TYPES.SCHEMA, + priority: TEST_PRIORITIES.CRITICAL, // Always critical for destructive ops + description: `Verify table ${target} is properly dropped${isCascade ? " with CASCADE" : ""}`, + target, + testCases, + }, + ]; } - + /** * Generate test requirements for table alterations * @private */ _generateTableAlterationTests(operation, target, priority) { const requirements = []; - const sql = operation.sql || ''; + const sql = operation.sql || ""; const alterations = this._parseTableAlterations(sql, target); - + // Handle ADD COLUMN operations - alterations.addedColumns.forEach(column => { + alterations.addedColumns.forEach((column) => { const testCases = [ `has_column('${target}', '${column.name}')`, - `col_type_is('${target}', '${column.name}', '${column.type}')` + `col_type_is('${target}', '${column.name}', '${column.type}')`, ]; - + if (column.notNull) { testCases.push(`col_not_null('${target}', '${column.name}')`); } - + if (column.hasDefault) { testCases.push(`col_has_default('${target}', '${column.name}')`); if (column.defaultValue) { - testCases.push(`col_default_is('${target}', '${column.name}', ${column.defaultValue})`); + testCases.push( + `col_default_is('${target}', '${column.name}', ${column.defaultValue})`, + ); } } - + // Add foreign key test if it's a reference column if (column.foreignKey) { testCases.push(`has_fk('${target}', '${column.name}')`); - testCases.push(`fk_ok('${target}', '${column.name}', '${column.foreignKey.referencedTable}', '${column.foreignKey.referencedColumn}')`); + testCases.push( + `fk_ok('${target}', '${column.name}', '${column.foreignKey.referencedTable}', '${column.foreignKey.referencedColumn}')`, + ); } - + requirements.push({ type: TEST_TYPES.SCHEMA, priority, description: `Verify new column '${column.name}' added to ${target}`, target, - testCases + testCases, }); }); - + // Handle DROP COLUMN operations - alterations.droppedColumns.forEach(columnName => { + alterations.droppedColumns.forEach((columnName) => { requirements.push({ type: TEST_TYPES.SCHEMA, priority: TEST_PRIORITIES.CRITICAL, @@ -561,35 +659,35 @@ class TestRequirementAnalyzer extends EventEmitter { testCases: [ `hasnt_column('${target}', '${columnName}')`, `-- Verify remaining columns are intact`, - `-- Check that dependent objects were handled properly` - ] + `-- Check that dependent objects were handled properly`, + ], }); }); - + // Handle ALTER COLUMN TYPE operations - alterations.alteredColumns.forEach(column => { + alterations.alteredColumns.forEach((column) => { const testCases = [ - `col_type_is('${target}', '${column.name}', '${column.newType}')` + `col_type_is('${target}', '${column.name}', '${column.newType}')`, ]; - + // Add data integrity tests for type changes if (column.oldType !== column.newType) { testCases.push( `-- Test data conversion from ${column.oldType} to ${column.newType}`, `-- Verify no data loss occurred during type conversion`, - `-- Test edge cases for type conversion` + `-- Test edge cases for type conversion`, ); } - + requirements.push({ type: TEST_TYPES.DATA, priority: TEST_PRIORITIES.HIGH, description: `Verify column '${column.name}' type change in ${target}`, target, - testCases + testCases, }); }); - + // Handle RENAME TABLE operations if (alterations.renamedTo) { requirements.push({ @@ -601,13 +699,13 @@ class TestRequirementAnalyzer extends EventEmitter { `has_table('${alterations.renamedTo}')`, `hasnt_table('${target}')`, `-- Verify all dependent objects reference new table name`, - `-- Check that foreign key references are updated` - ] + `-- Check that foreign key references are updated`, + ], }); } - + // Handle RENAME COLUMN operations - alterations.renamedColumns.forEach(rename => { + alterations.renamedColumns.forEach((rename) => { requirements.push({ type: TEST_TYPES.SCHEMA, priority: TEST_PRIORITIES.HIGH, @@ -617,45 +715,47 @@ class TestRequirementAnalyzer extends EventEmitter { `has_column('${target}', '${rename.newName}')`, `hasnt_column('${target}', '${rename.oldName}')`, `col_type_is('${target}', '${rename.newName}', '${rename.type}')`, - `-- Verify column maintains all constraints and properties` - ] + `-- Verify column maintains all constraints and properties`, + ], }); }); - + // Handle ADD CONSTRAINT operations - alterations.addedConstraints.forEach(constraint => { + alterations.addedConstraints.forEach((constraint) => { const testCases = []; - + switch (constraint.type) { - case 'PRIMARY KEY': + case "PRIMARY KEY": testCases.push(`has_pk('${target}')`); - constraint.columns.forEach(col => { + constraint.columns.forEach((col) => { testCases.push(`col_is_pk('${target}', '${col}')`); }); break; - case 'FOREIGN KEY': + case "FOREIGN KEY": testCases.push(`has_fk('${target}', '${constraint.column}')`); - testCases.push(`fk_ok('${target}', '${constraint.column}', '${constraint.referencedTable}', '${constraint.referencedColumn}')`); + testCases.push( + `fk_ok('${target}', '${constraint.column}', '${constraint.referencedTable}', '${constraint.referencedColumn}')`, + ); break; - case 'UNIQUE': + case "UNIQUE": testCases.push(`has_unique('${target}', '${constraint.name}')`); break; - case 'CHECK': + case "CHECK": testCases.push(`has_check('${target}', '${constraint.name}')`); break; } - + requirements.push({ type: TEST_TYPES.CONSTRAINT, priority, description: `Verify ${constraint.type} constraint added to ${target}`, target, - testCases + testCases, }); }); - + // Handle DROP CONSTRAINT operations - alterations.droppedConstraints.forEach(constraint => { + alterations.droppedConstraints.forEach((constraint) => { requirements.push({ type: TEST_TYPES.CONSTRAINT, priority: TEST_PRIORITIES.CRITICAL, @@ -664,42 +764,55 @@ class TestRequirementAnalyzer extends EventEmitter { testCases: [ `-- Verify constraint ${constraint.name} no longer exists`, `-- Check that dependent functionality still works`, - `-- Test that constraint is truly removed` - ] + `-- Test that constraint is truly removed`, + ], }); }); - + return requirements; } - + /** * Generate test requirements for indexes * @private */ _generateIndexTests(operation, target, priority) { const requirements = []; - const sql = operation.sql?.toUpperCase() || ''; - + const sql = operation.sql?.toUpperCase() || ""; + // Determine index operation type const operationType = this._categorizeIndexOperation(sql); - + switch (operationType) { - case 'CREATE_INDEX': - requirements.push(...this._generateCreateIndexTests(operation, target, priority, sql)); + case "CREATE_INDEX": + requirements.push( + ...this._generateCreateIndexTests(operation, target, priority, sql), + ); break; - - case 'CREATE_UNIQUE_INDEX': - requirements.push(...this._generateCreateUniqueIndexTests(operation, target, priority, sql)); + + case "CREATE_UNIQUE_INDEX": + requirements.push( + ...this._generateCreateUniqueIndexTests( + operation, + target, + priority, + sql, + ), + ); break; - - case 'DROP_INDEX': - requirements.push(...this._generateDropIndexTests(operation, target, priority, sql)); + + case "DROP_INDEX": + requirements.push( + ...this._generateDropIndexTests(operation, target, priority, sql), + ); break; - - case 'ALTER_INDEX': - requirements.push(...this._generateAlterIndexTests(operation, target, priority, sql)); + + case "ALTER_INDEX": + requirements.push( + ...this._generateAlterIndexTests(operation, target, priority, sql), + ); break; - + default: // Fallback for generic index operations requirements.push({ @@ -708,20 +821,27 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Verify index ${target} operation`, target, testCases: [ - 'has_index()', - 'Verify index operation completed successfully' - ] + "has_index()", + "Verify index operation completed successfully", + ], }); } - + // Add performance tests for large table considerations if (this._requiresIndexPerformanceTests(sql)) { - requirements.push(...this._generateIndexPerformanceTests(operation, target, priority, sql)); + requirements.push( + ...this._generateIndexPerformanceTests( + operation, + target, + priority, + sql, + ), + ); } - + return requirements; } - + /** * Categorize index operation type * @private @@ -729,13 +849,13 @@ class TestRequirementAnalyzer extends EventEmitter { * @returns {string} Operation category */ _categorizeIndexOperation(sql) { - if (sql.includes('CREATE UNIQUE INDEX')) return 'CREATE_UNIQUE_INDEX'; - if (sql.includes('CREATE INDEX')) return 'CREATE_INDEX'; - if (sql.includes('DROP INDEX')) return 'DROP_INDEX'; - if (sql.includes('ALTER INDEX')) return 'ALTER_INDEX'; - return 'UNKNOWN_INDEX'; + if (sql.includes("CREATE UNIQUE INDEX")) return "CREATE_UNIQUE_INDEX"; + if (sql.includes("CREATE INDEX")) return "CREATE_INDEX"; + if (sql.includes("DROP INDEX")) return "DROP_INDEX"; + if (sql.includes("ALTER INDEX")) return "ALTER_INDEX"; + return "UNKNOWN_INDEX"; } - + /** * Generate test requirements for CREATE INDEX operations * @private @@ -743,7 +863,7 @@ class TestRequirementAnalyzer extends EventEmitter { _generateCreateIndexTests(operation, target, priority, sql) { const requirements = []; const indexDetails = this._parseIndexDetails(sql); - + // Basic index existence tests requirements.push({ type: TEST_TYPES.INDEX, @@ -751,18 +871,18 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Verify index ${target} exists with correct structure`, target, testCases: [ - 'has_index()', - 'index_is_on() for column verification', - 'index_is_type() for index type verification' + "has_index()", + "index_is_on() for column verification", + "index_is_type() for index type verification", ], metadata: { indexType: indexDetails.type, columns: indexDetails.columns, tableName: indexDetails.tableName, - isPartial: indexDetails.isPartial - } + isPartial: indexDetails.isPartial, + }, }); - + // Column-specific tests if (indexDetails.columns && indexDetails.columns.length > 0) { requirements.push({ @@ -770,14 +890,17 @@ class TestRequirementAnalyzer extends EventEmitter { priority, description: `Verify index ${target} column mappings`, target, - testCases: indexDetails.columns.map(col => `index_is_on('${indexDetails.tableName}', '${target}', '${col}')`), + testCases: indexDetails.columns.map( + (col) => + `index_is_on('${indexDetails.tableName}', '${target}', '${col}')`, + ), metadata: { columns: indexDetails.columns, - tableName: indexDetails.tableName - } + tableName: indexDetails.tableName, + }, }); } - + // Partial index condition tests if (indexDetails.isPartial && indexDetails.whereClause) { requirements.push({ @@ -786,17 +909,17 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Verify partial index ${target} condition`, target, testCases: [ - 'Test index is used only when condition is met', - 'Test index is not used when condition is not met', - 'Verify partial index filter condition accuracy' + "Test index is used only when condition is met", + "Test index is not used when condition is not met", + "Verify partial index filter condition accuracy", ], metadata: { whereClause: indexDetails.whereClause, - isPartial: true - } + isPartial: true, + }, }); } - + // Index type-specific tests if (indexDetails.type) { requirements.push({ @@ -805,17 +928,17 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Verify index ${target} type is ${indexDetails.type}`, target, testCases: [ - `index_is_type('${indexDetails.tableName}', '${target}', '${indexDetails.type}')` + `index_is_type('${indexDetails.tableName}', '${target}', '${indexDetails.type}')`, ], metadata: { - indexType: indexDetails.type - } + indexType: indexDetails.type, + }, }); } - + return requirements; } - + /** * Generate test requirements for CREATE UNIQUE INDEX operations * @private @@ -823,10 +946,12 @@ class TestRequirementAnalyzer extends EventEmitter { _generateCreateUniqueIndexTests(operation, target, priority, sql) { const requirements = []; const indexDetails = this._parseIndexDetails(sql); - + // Include all regular index tests - requirements.push(...this._generateCreateIndexTests(operation, target, priority, sql)); - + requirements.push( + ...this._generateCreateIndexTests(operation, target, priority, sql), + ); + // Unique constraint validation tests requirements.push({ type: TEST_TYPES.CONSTRAINT, @@ -834,18 +959,18 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Verify unique constraint enforcement for index ${target}`, target, testCases: [ - 'has_unique()', - 'Test unique constraint allows distinct values', - 'Test unique constraint rejects duplicate values', - 'Test NULL value handling in unique constraint' + "has_unique()", + "Test unique constraint allows distinct values", + "Test unique constraint rejects duplicate values", + "Test NULL value handling in unique constraint", ], metadata: { isUnique: true, columns: indexDetails.columns, - tableName: indexDetails.tableName - } + tableName: indexDetails.tableName, + }, }); - + // Unique constraint violation tests if (indexDetails.columns && indexDetails.columns.length > 0) { requirements.push({ @@ -854,22 +979,22 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Test unique constraint violations for ${target}`, target, testCases: [ - 'Test INSERT with duplicate values fails', - 'Test UPDATE creating duplicates fails', - 'Test constraint error messages are appropriate', - 'Test partial unique constraints (if applicable)' + "Test INSERT with duplicate values fails", + "Test UPDATE creating duplicates fails", + "Test constraint error messages are appropriate", + "Test partial unique constraints (if applicable)", ], metadata: { - constraintType: 'unique', + constraintType: "unique", columns: indexDetails.columns, - violationTests: true - } + violationTests: true, + }, }); } - + return requirements; } - + /** * Generate test requirements for DROP INDEX operations * @private @@ -877,7 +1002,7 @@ class TestRequirementAnalyzer extends EventEmitter { _generateDropIndexTests(operation, target, priority, sql) { const requirements = []; const indexDetails = this._parseIndexDetails(sql); - + // Index removal verification requirements.push({ type: TEST_TYPES.INDEX, @@ -885,35 +1010,35 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Verify index ${target} is properly removed`, target, testCases: [ - 'hasnt_index()', - 'Verify dependent queries still function', - 'Check for performance impact after removal' + "hasnt_index()", + "Verify dependent queries still function", + "Check for performance impact after removal", ], metadata: { - operation: 'drop', - tableName: indexDetails.tableName - } + operation: "drop", + tableName: indexDetails.tableName, + }, }); - + // If it was a unique index, verify unique constraint is also removed - if (sql.includes('UNIQUE')) { + if (sql.includes("UNIQUE")) { requirements.push({ type: TEST_TYPES.CONSTRAINT, priority: TEST_PRIORITIES.HIGH, description: `Verify unique constraint removed with index ${target}`, target, testCases: [ - 'hasnt_unique()', - 'Test duplicate values are now allowed', - 'Verify constraint-dependent code still works' + "hasnt_unique()", + "Test duplicate values are now allowed", + "Verify constraint-dependent code still works", ], metadata: { wasUnique: true, - constraintRemoved: true - } + constraintRemoved: true, + }, }); } - + // Performance impact tests requirements.push({ type: TEST_TYPES.INDEX, @@ -921,19 +1046,19 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Verify performance impact of removing index ${target}`, target, testCases: [ - 'Measure query performance after index removal', - 'Identify queries that may be affected', - 'Verify no critical performance regressions' + "Measure query performance after index removal", + "Identify queries that may be affected", + "Verify no critical performance regressions", ], metadata: { performanceTest: true, - expectedImpact: 'degradation' - } + expectedImpact: "degradation", + }, }); - + return requirements; } - + /** * Generate test requirements for ALTER INDEX operations * @private @@ -941,7 +1066,7 @@ class TestRequirementAnalyzer extends EventEmitter { _generateAlterIndexTests(operation, target, priority, sql) { const requirements = []; const indexDetails = this._parseIndexDetails(sql); - + // Index alteration verification requirements.push({ type: TEST_TYPES.INDEX, @@ -949,18 +1074,18 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Verify index ${target} alteration applied correctly`, target, testCases: [ - 'has_index()', - 'Verify index properties after alteration', - 'Test index functionality after changes' + "has_index()", + "Verify index properties after alteration", + "Test index functionality after changes", ], metadata: { - operation: 'alter', - tableName: indexDetails.tableName - } + operation: "alter", + tableName: indexDetails.tableName, + }, }); - + // Rename operations - if (sql.includes('RENAME TO')) { + if (sql.includes("RENAME TO")) { const newName = this._extractNewIndexName(sql); requirements.push({ type: TEST_TYPES.SCHEMA, @@ -970,37 +1095,37 @@ class TestRequirementAnalyzer extends EventEmitter { testCases: [ `has_index('${indexDetails.tableName}', '${newName}')`, `hasnt_index('${indexDetails.tableName}', '${target}')`, - 'Verify dependent objects reference new name' + "Verify dependent objects reference new name", ], metadata: { oldName: target, newName: newName, - operation: 'rename' - } + operation: "rename", + }, }); } - + // Tablespace or other property changes - if (sql.includes('SET TABLESPACE') || sql.includes('SET (')) { + if (sql.includes("SET TABLESPACE") || sql.includes("SET (")) { requirements.push({ type: TEST_TYPES.INDEX, priority: TEST_PRIORITIES.MEDIUM, description: `Verify index ${target} property changes`, target, testCases: [ - 'Verify tablespace assignment (if applicable)', - 'Test index parameters are updated', - 'Verify index performance characteristics' + "Verify tablespace assignment (if applicable)", + "Test index parameters are updated", + "Verify index performance characteristics", ], metadata: { - propertyChange: true - } + propertyChange: true, + }, }); } - + return requirements; } - + /** * Generate performance test requirements for indexes * @private @@ -1008,26 +1133,26 @@ class TestRequirementAnalyzer extends EventEmitter { _generateIndexPerformanceTests(operation, target, priority, sql) { const requirements = []; const indexDetails = this._parseIndexDetails(sql); - + requirements.push({ type: TEST_TYPES.INDEX, priority: TEST_PRIORITIES.MEDIUM, description: `Performance testing for index ${target}`, target, testCases: [ - 'Measure query execution plans before/after', - 'Test index selectivity and effectiveness', - 'Benchmark with realistic data volumes', - 'Verify index is being utilized by optimizer' + "Measure query execution plans before/after", + "Test index selectivity and effectiveness", + "Benchmark with realistic data volumes", + "Verify index is being utilized by optimizer", ], metadata: { performanceTest: true, tableName: indexDetails.tableName, - suggestedDataVolume: 'large', - measureMetrics: ['execution_time', 'index_usage', 'selectivity'] - } + suggestedDataVolume: "large", + measureMetrics: ["execution_time", "index_usage", "selectivity"], + }, }); - + // Large table considerations if (this._isLargeTableOperation(indexDetails.tableName)) { requirements.push({ @@ -1036,21 +1161,25 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Large table performance considerations for ${target}`, target, testCases: [ - 'Test index creation time on large dataset', - 'Verify maintenance overhead is acceptable', - 'Test concurrent access during index operations', - 'Monitor storage space impact' + "Test index creation time on large dataset", + "Verify maintenance overhead is acceptable", + "Test concurrent access during index operations", + "Monitor storage space impact", ], metadata: { largeTable: true, - performanceConsiderations: ['creation_time', 'maintenance_overhead', 'storage_impact'] - } + performanceConsiderations: [ + "creation_time", + "maintenance_overhead", + "storage_impact", + ], + }, }); } - + return requirements; } - + /** * Parse index details from SQL statement * @private @@ -1064,45 +1193,45 @@ class TestRequirementAnalyzer extends EventEmitter { tableName: null, isPartial: false, whereClause: null, - isUnique: false + isUnique: false, }; - + // Extract table name let match = sql.match(/ON\s+([^\s(]+)/i); if (match) { details.tableName = match[1]; } - + // Extract columns match = sql.match(/\(([^)]+)\)/); if (match) { details.columns = match[1] - .split(',') - .map(col => col.trim()) - .map(col => col.replace(/["'`]/g, '')); // Remove quotes + .split(",") + .map((col) => col.trim()) + .map((col) => col.replace(/["'`]/g, "")); // Remove quotes } - + // Check for index type match = sql.match(/USING\s+(\w+)/i); if (match) { details.type = match[1].toLowerCase(); } else { - details.type = 'btree'; // Default PostgreSQL index type + details.type = "btree"; // Default PostgreSQL index type } - + // Check if unique - details.isUnique = sql.includes('UNIQUE'); - + details.isUnique = sql.includes("UNIQUE"); + // Check for partial index (WHERE clause) match = sql.match(/WHERE\s+(.+?)(?:$|\s*;)/i); if (match) { details.isPartial = true; details.whereClause = match[1].trim(); } - + return details; } - + /** * Extract new index name from ALTER INDEX RENAME statement * @private @@ -1111,9 +1240,9 @@ class TestRequirementAnalyzer extends EventEmitter { */ _extractNewIndexName(sql) { const match = sql.match(/RENAME TO\s+([^\s;]+)/i); - return match ? match[1] : 'unknown_name'; + return match ? match[1] : "unknown_name"; } - + /** * Check if index operation requires performance tests * @private @@ -1125,12 +1254,14 @@ class TestRequirementAnalyzer extends EventEmitter { // - Indexes on likely large tables // - Complex expressions or functions in indexes // - Partial indexes with complex conditions - return sql.includes('CREATE INDEX') && - (sql.includes('WHERE') || // Partial index - sql.includes('(') && sql.includes('||') || // Expression index - this.options.requirePerformanceTests); + return ( + sql.includes("CREATE INDEX") && + (sql.includes("WHERE") || // Partial index + (sql.includes("(") && sql.includes("||")) || // Expression index + this.options.requirePerformanceTests) + ); } - + /** * Determine if operation is on a large table (heuristic) * @private @@ -1139,7 +1270,7 @@ class TestRequirementAnalyzer extends EventEmitter { */ _isLargeTableOperation(tableName) { if (!tableName) return false; - + // Heuristic: tables with certain naming patterns are likely large const largeTablePatterns = [ /events?$/i, @@ -1149,79 +1280,112 @@ class TestRequirementAnalyzer extends EventEmitter { /analytics?$/i, /audit/i, /history$/i, - /metrics?$/i + /metrics?$/i, ]; - - return largeTablePatterns.some(pattern => pattern.test(tableName)); + + return largeTablePatterns.some((pattern) => pattern.test(tableName)); } - + /** * Generate test requirements for functions * @private */ _generateFunctionTests(operation, target, priority) { - const sql = (operation.sql || '').toUpperCase(); + const sql = (operation.sql || "").toUpperCase(); const requirements = []; - + // Determine specific function operation type const functionOperation = this._categorizeFunction(operation); - + // Extract function metadata const functionMetadata = this._extractFunctionMetadata(operation); - + // Base function existence and structure tests switch (functionOperation) { - case 'CREATE_FUNCTION': - case 'CREATE_OR_REPLACE_FUNCTION': - requirements.push(...this._generateFunctionCreationTests(operation, target, priority, functionMetadata)); + case "CREATE_FUNCTION": + case "CREATE_OR_REPLACE_FUNCTION": + requirements.push( + ...this._generateFunctionCreationTests( + operation, + target, + priority, + functionMetadata, + ), + ); break; - - case 'DROP_FUNCTION': - requirements.push(...this._generateFunctionDropTests(operation, target, priority)); + + case "DROP_FUNCTION": + requirements.push( + ...this._generateFunctionDropTests(operation, target, priority), + ); break; - - case 'ALTER_FUNCTION': - requirements.push(...this._generateFunctionAlterationTests(operation, target, priority, functionMetadata)); + + case "ALTER_FUNCTION": + requirements.push( + ...this._generateFunctionAlterationTests( + operation, + target, + priority, + functionMetadata, + ), + ); break; - + default: - requirements.push(...this._generateGenericFunctionTests(operation, target, priority)); + requirements.push( + ...this._generateGenericFunctionTests(operation, target, priority), + ); } - + // Add Supabase RPC-specific tests if applicable if (this._isSupabaseRpcFunction(operation, functionMetadata)) { - requirements.push(...this._generateSupabaseRpcTests(operation, target, priority, functionMetadata)); + requirements.push( + ...this._generateSupabaseRpcTests( + operation, + target, + priority, + functionMetadata, + ), + ); } - + // Add security tests for security definer functions if (functionMetadata.securityDefiner) { - requirements.push(...this._generateFunctionSecurityTests(operation, target, priority, functionMetadata)); + requirements.push( + ...this._generateFunctionSecurityTests( + operation, + target, + priority, + functionMetadata, + ), + ); } - + return requirements; } - + /** * Categorize function operations for specific test mapping * @private */ _categorizeFunction(operation) { - const sql = (operation.sql || '').toUpperCase(); - - if (sql.includes('CREATE OR REPLACE FUNCTION')) return 'CREATE_OR_REPLACE_FUNCTION'; - if (sql.includes('CREATE FUNCTION')) return 'CREATE_FUNCTION'; - if (sql.includes('DROP FUNCTION')) return 'DROP_FUNCTION'; - if (sql.includes('ALTER FUNCTION')) return 'ALTER_FUNCTION'; - - return 'UNKNOWN_FUNCTION'; + const sql = (operation.sql || "").toUpperCase(); + + if (sql.includes("CREATE OR REPLACE FUNCTION")) + return "CREATE_OR_REPLACE_FUNCTION"; + if (sql.includes("CREATE FUNCTION")) return "CREATE_FUNCTION"; + if (sql.includes("DROP FUNCTION")) return "DROP_FUNCTION"; + if (sql.includes("ALTER FUNCTION")) return "ALTER_FUNCTION"; + + return "UNKNOWN_FUNCTION"; } - + /** * Extract function metadata from SQL operation * @private */ _extractFunctionMetadata(operation) { - const sql = operation.sql || ''; + const sql = operation.sql || ""; const metadata = { parameterTypes: [], returnType: null, @@ -1229,50 +1393,52 @@ class TestRequirementAnalyzer extends EventEmitter { securityDefiner: false, isVolatile: null, language: null, - hasParameters: false + hasParameters: false, }; - + // Extract parameter types const paramMatch = sql.match(/\((.*?)\)\s*RETURNS/i); if (paramMatch && paramMatch[1].trim()) { metadata.hasParameters = true; // Basic parameter extraction - can be enhanced - metadata.parameterTypes = paramMatch[1].split(',').map(p => p.trim().split(' ').pop()); + metadata.parameterTypes = paramMatch[1] + .split(",") + .map((p) => p.trim().split(" ").pop()); } - + // Extract return type const returnMatch = sql.match(/RETURNS\s+([^\s]+)/i); if (returnMatch) { metadata.returnType = returnMatch[1]; } - + // Check for security definer metadata.securityDefiner = /SECURITY\s+DEFINER/i.test(sql); - + // Check volatility if (/VOLATILE/i.test(sql)) metadata.isVolatile = true; else if (/STABLE/i.test(sql)) metadata.isVolatile = false; else if (/IMMUTABLE/i.test(sql)) metadata.isVolatile = false; - + // Extract language const langMatch = sql.match(/LANGUAGE\s+(\w+)/i); if (langMatch) { metadata.language = langMatch[1].toLowerCase(); } - + // Check if it's likely an RPC function (exposed via API) metadata.isRpcFunction = this._isLikelyRpcFunction(sql, metadata); - + return metadata; } - + /** * Generate tests for function creation (CREATE FUNCTION or CREATE OR REPLACE) * @private */ _generateFunctionCreationTests(operation, target, priority, metadata) { const requirements = []; - + // Core function existence and structure requirements.push({ type: TEST_TYPES.FUNCTION, @@ -1280,154 +1446,172 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Verify function ${target} exists with correct structure`, target, testCases: [ - 'has_function() - function exists', - 'function_returns() - return type validation', - ...(metadata.hasParameters ? ['function_args() - parameter validation'] : []), - ...(metadata.language ? [`function_lang_is() - language is ${metadata.language}`] : []), - ...(metadata.securityDefiner ? ['is_definer() - security definer check'] : []), - ...(metadata.isVolatile !== null ? [`volatility_is() - volatility check`] : []) + "has_function() - function exists", + "function_returns() - return type validation", + ...(metadata.hasParameters + ? ["function_args() - parameter validation"] + : []), + ...(metadata.language + ? [`function_lang_is() - language is ${metadata.language}`] + : []), + ...(metadata.securityDefiner + ? ["is_definer() - security definer check"] + : []), + ...(metadata.isVolatile !== null + ? [`volatility_is() - volatility check`] + : []), ], metadata: { functionMetadata: metadata, - testType: 'structure' - } + testType: "structure", + }, }); - + // Behavioral testing requirements.push({ type: TEST_TYPES.FUNCTION, - priority: priority === TEST_PRIORITIES.CRITICAL ? TEST_PRIORITIES.HIGH : TEST_PRIORITIES.MEDIUM, + priority: + priority === TEST_PRIORITIES.CRITICAL + ? TEST_PRIORITIES.HIGH + : TEST_PRIORITIES.MEDIUM, description: `Verify function ${target} behavior and logic`, target, testCases: [ - 'Test with valid input parameters', - 'Test return value correctness', - 'Test error handling for invalid inputs', - 'Test edge cases and boundary conditions', - ...(metadata.returnType === 'SETOF' || metadata.returnType?.includes('[]') ? ['Test result set completeness'] : []) + "Test with valid input parameters", + "Test return value correctness", + "Test error handling for invalid inputs", + "Test edge cases and boundary conditions", + ...(metadata.returnType === "SETOF" || + metadata.returnType?.includes("[]") + ? ["Test result set completeness"] + : []), ], metadata: { functionMetadata: metadata, - testType: 'behavior' - } + testType: "behavior", + }, }); - + return requirements; } - + /** * Generate tests for function drops * @private */ _generateFunctionDropTests(operation, target, priority) { - return [{ - type: TEST_TYPES.FUNCTION, - priority: TEST_PRIORITIES.CRITICAL, - description: `Verify function ${target} is properly dropped`, - target, - testCases: [ - 'hasnt_function() - function no longer exists', - 'Verify dependent objects are handled', - 'Check cascade behavior if applicable', - 'Verify no orphaned permissions remain' - ], - metadata: { - testType: 'removal' - } - }]; + return [ + { + type: TEST_TYPES.FUNCTION, + priority: TEST_PRIORITIES.CRITICAL, + description: `Verify function ${target} is properly dropped`, + target, + testCases: [ + "hasnt_function() - function no longer exists", + "Verify dependent objects are handled", + "Check cascade behavior if applicable", + "Verify no orphaned permissions remain", + ], + metadata: { + testType: "removal", + }, + }, + ]; } - + /** * Generate tests for function alterations * @private */ _generateFunctionAlterationTests(operation, target, priority, metadata) { - const sql = (operation.sql || '').toUpperCase(); + const sql = (operation.sql || "").toUpperCase(); const requirements = []; - - if (sql.includes('RENAME TO')) { + + if (sql.includes("RENAME TO")) { requirements.push({ type: TEST_TYPES.FUNCTION, priority: TEST_PRIORITIES.HIGH, description: `Verify function rename from ${target}`, target, testCases: [ - 'hasnt_function() - old function name gone', - 'has_function() - new function name exists', - 'Verify function signature unchanged', - 'Test function behavior unchanged' + "hasnt_function() - old function name gone", + "has_function() - new function name exists", + "Verify function signature unchanged", + "Test function behavior unchanged", ], metadata: { - testType: 'rename' - } + testType: "rename", + }, }); } - - if (sql.includes('OWNER TO')) { + + if (sql.includes("OWNER TO")) { requirements.push({ type: TEST_TYPES.FUNCTION, priority: TEST_PRIORITIES.MEDIUM, description: `Verify function ${target} ownership change`, target, testCases: [ - 'Verify new owner has correct permissions', - 'Test function accessibility from different roles', - 'Verify function behavior unchanged' + "Verify new owner has correct permissions", + "Test function accessibility from different roles", + "Verify function behavior unchanged", ], metadata: { - testType: 'ownership' - } + testType: "ownership", + }, }); } - - if (sql.includes('SET') || sql.includes('RESET')) { + + if (sql.includes("SET") || sql.includes("RESET")) { requirements.push({ type: TEST_TYPES.FUNCTION, priority: TEST_PRIORITIES.MEDIUM, description: `Verify function ${target} configuration changes`, target, testCases: [ - 'Test function behavior with new settings', - 'Verify configuration parameters applied', - 'Test performance impact of changes' + "Test function behavior with new settings", + "Verify configuration parameters applied", + "Test performance impact of changes", ], metadata: { - testType: 'configuration' - } + testType: "configuration", + }, }); } - + return requirements; } - + /** * Generate generic function tests for unknown operations * @private */ _generateGenericFunctionTests(operation, target, priority) { - return [{ - type: TEST_TYPES.FUNCTION, - priority: TEST_PRIORITIES.LOW, - description: `Verify function ${target} after operation`, - target, - testCases: [ - 'has_function() - function exists', - 'Test basic function execution', - 'Verify no unexpected side effects' - ], - metadata: { - testType: 'generic' - } - }]; + return [ + { + type: TEST_TYPES.FUNCTION, + priority: TEST_PRIORITIES.LOW, + description: `Verify function ${target} after operation`, + target, + testCases: [ + "has_function() - function exists", + "Test basic function execution", + "Verify no unexpected side effects", + ], + metadata: { + testType: "generic", + }, + }, + ]; } - + /** * Generate Supabase RPC-specific tests * @private */ _generateSupabaseRpcTests(operation, target, priority, metadata) { const requirements = []; - + // API accessibility tests requirements.push({ type: TEST_TYPES.FUNCTION, @@ -1435,19 +1619,19 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Verify Supabase RPC function ${target} API access`, target, testCases: [ - 'Test function call with authenticated user', - 'Test function call with anonymous user', - 'Test function parameter validation via RPC', - 'Test return structure matches expected format', - 'Test function accessibility via PostgREST' + "Test function call with authenticated user", + "Test function call with anonymous user", + "Test function parameter validation via RPC", + "Test return structure matches expected format", + "Test function accessibility via PostgREST", ], metadata: { functionMetadata: metadata, - testType: 'supabase_rpc', - requiresSupabaseClient: true - } + testType: "supabase_rpc", + requiresSupabaseClient: true, + }, }); - + // Security and permissions for RPC requirements.push({ type: TEST_TYPES.PERMISSION, @@ -1455,19 +1639,19 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Verify Supabase RPC function ${target} security`, target, testCases: [ - 'Test RLS enforcement in function calls', - 'Test unauthorized access scenarios', - 'Test parameter injection protection', - 'Verify rate limiting (if applicable)', - 'Test with different user contexts' + "Test RLS enforcement in function calls", + "Test unauthorized access scenarios", + "Test parameter injection protection", + "Verify rate limiting (if applicable)", + "Test with different user contexts", ], metadata: { functionMetadata: metadata, - testType: 'supabase_security', - requiresSupabaseClient: true - } + testType: "supabase_security", + requiresSupabaseClient: true, + }, }); - + // Error handling for RPC requirements.push({ type: TEST_TYPES.FUNCTION, @@ -1475,48 +1659,50 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Verify Supabase RPC function ${target} error handling`, target, testCases: [ - 'Test error response format via RPC', - 'Test invalid parameter handling', - 'Test timeout scenarios', - 'Test connection error handling', - 'Verify error messages are user-friendly' + "Test error response format via RPC", + "Test invalid parameter handling", + "Test timeout scenarios", + "Test connection error handling", + "Verify error messages are user-friendly", ], metadata: { functionMetadata: metadata, - testType: 'supabase_errors', - requiresSupabaseClient: true - } + testType: "supabase_errors", + requiresSupabaseClient: true, + }, }); - + return requirements; } - + /** * Generate security tests for security definer functions * @private */ _generateFunctionSecurityTests(operation, target, priority, metadata) { - return [{ - type: TEST_TYPES.PERMISSION, - priority: TEST_PRIORITIES.CRITICAL, - description: `Verify security definer function ${target} security`, - target, - testCases: [ - 'is_definer() - verify security definer setting', - 'Test function executes with definer privileges', - 'Test privilege escalation protection', - 'Verify input parameter sanitization', - 'Test SQL injection protection', - 'Test with different invoker roles' - ], - metadata: { - functionMetadata: metadata, - testType: 'security_definer', - securityCritical: true - } - }]; + return [ + { + type: TEST_TYPES.PERMISSION, + priority: TEST_PRIORITIES.CRITICAL, + description: `Verify security definer function ${target} security`, + target, + testCases: [ + "is_definer() - verify security definer setting", + "Test function executes with definer privileges", + "Test privilege escalation protection", + "Verify input parameter sanitization", + "Test SQL injection protection", + "Test with different invoker roles", + ], + metadata: { + functionMetadata: metadata, + testType: "security_definer", + securityCritical: true, + }, + }, + ]; } - + /** * Determine if function is likely RPC-accessible based on characteristics * @private @@ -1530,63 +1716,90 @@ class TestRequirementAnalyzer extends EventEmitter { /delete_.*\(/i, /search_.*\(/i, /list_.*\(/i, - /find_.*\(/i + /find_.*\(/i, ]; - - return apiPatterns.some(pattern => pattern.test(sql)) || - metadata.language === 'plpgsql' || - metadata.returnType?.toLowerCase().includes('json'); + + return ( + apiPatterns.some((pattern) => pattern.test(sql)) || + metadata.language === "plpgsql" || + metadata.returnType?.toLowerCase().includes("json") + ); } - + /** * Check if operation creates a Supabase RPC function * @private */ _isSupabaseRpcFunction(operation, metadata) { if (!metadata) return false; - - const sql = operation.sql || ''; - + + const sql = operation.sql || ""; + // Skip system schema functions - if (sql.includes('auth.') || sql.includes('storage.') || sql.includes('realtime.') || sql.includes('supabase_functions.')) { + if ( + sql.includes("auth.") || + sql.includes("storage.") || + sql.includes("realtime.") || + sql.includes("supabase_functions.") + ) { return false; } - + // Functions in public schema are typically RPC-accessible - if (sql.includes('public.') || (!sql.includes('.') && !sql.includes('CREATE FUNCTION auth.') && !sql.includes('CREATE FUNCTION storage.'))) { + if ( + sql.includes("public.") || + (!sql.includes(".") && + !sql.includes("CREATE FUNCTION auth.") && + !sql.includes("CREATE FUNCTION storage.")) + ) { return true; } - + // Functions with simple parameter types are more likely to be RPC - if (metadata.parameterTypes.length === 0 || - metadata.parameterTypes.every(type => ['text', 'integer', 'boolean', 'json', 'jsonb', 'uuid'].includes(type.toLowerCase()))) { + if ( + metadata.parameterTypes.length === 0 || + metadata.parameterTypes.every((type) => + ["text", "integer", "boolean", "json", "jsonb", "uuid"].includes( + type.toLowerCase(), + ), + ) + ) { return true; } - + // Functions returning JSON or simple types - if (metadata.returnType && ['json', 'jsonb', 'text', 'integer', 'boolean', 'uuid'].includes(metadata.returnType.toLowerCase())) { + if ( + metadata.returnType && + ["json", "jsonb", "text", "integer", "boolean", "uuid"].includes( + metadata.returnType.toLowerCase(), + ) + ) { return true; } - + // Functions in public schema or without schema qualifier are likely RPC - return metadata.isRpcFunction || - sql.includes('public.') || - (!sql.includes('.') && !sql.includes('pg_') && !sql.includes('information_schema')); + return ( + metadata.isRpcFunction || + sql.includes("public.") || + (!sql.includes(".") && + !sql.includes("pg_") && + !sql.includes("information_schema")) + ); } - + /** * Generate test requirements for CREATE POLICY operations * @private */ _generateCreatePolicyTests(operation, policyName, priority) { const requirements = []; - const sql = operation.sql || ''; + const sql = operation.sql || ""; const tableMatch = sql.match(/ON\s+([^\s(]+)/i); - const tableName = tableMatch ? tableMatch[1] : 'unknown_table'; - + const tableName = tableMatch ? tableMatch[1] : "unknown_table"; + // Extract policy details const policyDetails = this._extractPolicyDetails(sql); - + // Core policy existence test requirements.push({ type: TEST_TYPES.RLS, @@ -1594,9 +1807,9 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Verify policy ${policyName} exists and is properly configured`, target: policyName, testCases: [ - 'policy_exists() - verify policy was created', - 'policy_cmd_is() - verify policy applies to correct commands', - 'policy_roles_are() - verify policy applies to correct roles' + "policy_exists() - verify policy was created", + "policy_cmd_is() - verify policy applies to correct commands", + "policy_roles_are() - verify policy applies to correct roles", ], metadata: { tableName, @@ -1604,28 +1817,26 @@ class TestRequirementAnalyzer extends EventEmitter { commands: policyDetails.commands, roles: policyDetails.roles, isPermissive: policyDetails.isPermissive, - testType: 'policy_creation' - } + testType: "policy_creation", + }, }); - + // RLS enablement test requirements.push({ type: TEST_TYPES.RLS, priority: TEST_PRIORITIES.CRITICAL, description: `Verify RLS is enabled on table ${tableName}`, target: tableName, - testCases: [ - 'is_rls_enabled() - ensure RLS is active on the table' - ], + testCases: ["is_rls_enabled() - ensure RLS is active on the table"], metadata: { tableName, - testType: 'rls_enablement', - reason: `Policy ${policyName} requires RLS to be enabled` - } + testType: "rls_enablement", + reason: `Policy ${policyName} requires RLS to be enabled`, + }, }); - + // Security boundary tests for different user roles - const userRoles = ['anon', 'authenticated', 'service_role']; + const userRoles = ["anon", "authenticated", "service_role"]; for (const role of userRoles) { requirements.push({ type: TEST_TYPES.PERMISSION, @@ -1634,36 +1845,39 @@ class TestRequirementAnalyzer extends EventEmitter { target: `${policyName}_${role}`, testCases: [ `results_eq() - test data visibility as ${role}`, - 'Test authorized operations are allowed', - 'Test unauthorized operations are blocked', - 'Test edge cases and boundary conditions' + "Test authorized operations are allowed", + "Test unauthorized operations are blocked", + "Test edge cases and boundary conditions", ], metadata: { tableName, policyName, testRole: role, commands: policyDetails.commands, - testType: 'security_boundary', - testScenarios: this._generateSecurityTestScenarios(policyDetails, role) - } + testType: "security_boundary", + testScenarios: this._generateSecurityTestScenarios( + policyDetails, + role, + ), + }, }); } - + return requirements; } - + /** * Generate test requirements for ALTER POLICY operations * @private */ _generateAlterPolicyTests(operation, policyName, priority) { const requirements = []; - const sql = operation.sql || ''; + const sql = operation.sql || ""; const tableMatch = sql.match(/ON\s+([^\s(]+)/i); - const tableName = tableMatch ? tableMatch[1] : 'unknown_table'; - + const tableName = tableMatch ? tableMatch[1] : "unknown_table"; + const policyDetails = this._extractPolicyDetails(sql); - + // Policy validation after alteration requirements.push({ type: TEST_TYPES.RLS, @@ -1671,21 +1885,24 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Verify altered policy ${policyName} configuration`, target: policyName, testCases: [ - 'policy_exists() - verify policy still exists after alteration', - 'policy_cmd_is() - verify updated command restrictions', - 'policy_roles_are() - verify updated role assignments' + "policy_exists() - verify policy still exists after alteration", + "policy_cmd_is() - verify updated command restrictions", + "policy_roles_are() - verify updated role assignments", ], metadata: { tableName, policyName, commands: policyDetails.commands, roles: policyDetails.roles, - testType: 'policy_alteration' - } + testType: "policy_alteration", + }, }); - + // Re-test security boundaries with updated policy - const userRoles = policyDetails.roles.length > 0 ? policyDetails.roles : ['anon', 'authenticated']; + const userRoles = + policyDetails.roles.length > 0 + ? policyDetails.roles + : ["anon", "authenticated"]; for (const role of userRoles) { requirements.push({ type: TEST_TYPES.PERMISSION, @@ -1694,32 +1911,35 @@ class TestRequirementAnalyzer extends EventEmitter { target: `${policyName}_altered_${role}`, testCases: [ `results_eq() - verify updated policy behavior for ${role}`, - 'Test that policy changes work as expected', - 'Verify no unintended access granted or denied' + "Test that policy changes work as expected", + "Verify no unintended access granted or denied", ], metadata: { tableName, policyName, testRole: role, - testType: 'altered_security_boundary', - testScenarios: this._generateSecurityTestScenarios(policyDetails, role) - } + testType: "altered_security_boundary", + testScenarios: this._generateSecurityTestScenarios( + policyDetails, + role, + ), + }, }); } - + return requirements; } - + /** * Generate test requirements for DROP POLICY operations * @private */ _generateDropPolicyTests(operation, policyName, priority) { const requirements = []; - const sql = operation.sql || ''; + const sql = operation.sql || ""; const tableMatch = sql.match(/ON\s+([^\s(]+)/i); - const tableName = tableMatch ? tableMatch[1] : 'unknown_table'; - + const tableName = tableMatch ? tableMatch[1] : "unknown_table"; + // Policy absence test requirements.push({ type: TEST_TYPES.RLS, @@ -1727,16 +1947,16 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Verify policy ${policyName} is properly removed`, target: policyName, testCases: [ - 'policy_exists() should return false', - 'Verify policy no longer appears in policy list' + "policy_exists() should return false", + "Verify policy no longer appears in policy list", ], metadata: { tableName, policyName, - testType: 'policy_removal' - } + testType: "policy_removal", + }, }); - + // Test security implications of policy removal requirements.push({ type: TEST_TYPES.PERMISSION, @@ -1744,29 +1964,29 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Test security implications of removing policy ${policyName}`, target: `${tableName}_post_drop`, testCases: [ - 'results_eq() - verify expected access changes after policy drop', - 'Test that removal doesn\'t unexpectedly grant access', - 'Verify other policies still function correctly', - 'Test with different user roles' + "results_eq() - verify expected access changes after policy drop", + "Test that removal doesn't unexpectedly grant access", + "Verify other policies still function correctly", + "Test with different user roles", ], metadata: { tableName, policyName, - testType: 'post_drop_security', - reason: 'Dropping policies can unexpectedly grant broader access' - } + testType: "post_drop_security", + reason: "Dropping policies can unexpectedly grant broader access", + }, }); - + return requirements; } - + /** * Generate test requirements for ENABLE ROW LEVEL SECURITY operations * @private */ _generateEnableRLSTests(operation, tableName, priority) { const requirements = []; - + // RLS enablement test requirements.push({ type: TEST_TYPES.RLS, @@ -1774,15 +1994,15 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Verify RLS is enabled on table ${tableName}`, target: tableName, testCases: [ - 'is_rls_enabled() - verify RLS is active', - 'Test that access is properly restricted when RLS is enabled' + "is_rls_enabled() - verify RLS is active", + "Test that access is properly restricted when RLS is enabled", ], metadata: { tableName, - testType: 'rls_enablement' - } + testType: "rls_enablement", + }, }); - + // Security impact test - RLS should restrict access by default requirements.push({ type: TEST_TYPES.PERMISSION, @@ -1790,47 +2010,45 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Test security impact of enabling RLS on ${tableName}`, target: `${tableName}_rls_enabled`, testCases: [ - 'results_eq() - verify restricted access with no policies', - 'Test with anon role (should have no access by default)', - 'Test with authenticated role', - 'Verify service_role bypasses RLS' + "results_eq() - verify restricted access with no policies", + "Test with anon role (should have no access by default)", + "Test with authenticated role", + "Verify service_role bypasses RLS", ], metadata: { tableName, - testType: 'rls_security_impact', + testType: "rls_security_impact", testScenarios: [ - { role: 'anon', operation: 'SELECT', shouldAllow: false }, - { role: 'authenticated', operation: 'SELECT', shouldAllow: false }, - { role: 'service_role', operation: 'SELECT', shouldAllow: true } - ] - } + { role: "anon", operation: "SELECT", shouldAllow: false }, + { role: "authenticated", operation: "SELECT", shouldAllow: false }, + { role: "service_role", operation: "SELECT", shouldAllow: true }, + ], + }, }); - + return requirements; } - + /** * Generate test requirements for DISABLE ROW LEVEL SECURITY operations * @private */ _generateDisableRLSTests(operation, tableName, priority) { const requirements = []; - + // RLS disablement test requirements.push({ type: TEST_TYPES.RLS, priority: TEST_PRIORITIES.CRITICAL, description: `Verify RLS is disabled on table ${tableName}`, target: tableName, - testCases: [ - 'is_rls_enabled() - verify RLS is inactive' - ], + testCases: ["is_rls_enabled() - verify RLS is inactive"], metadata: { tableName, - testType: 'rls_disablement' - } + testType: "rls_disablement", + }, }); - + // Security impact test - disabling RLS might grant broader access requirements.push({ type: TEST_TYPES.PERMISSION, @@ -1838,20 +2056,20 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Test security impact of disabling RLS on ${tableName}`, target: `${tableName}_rls_disabled`, testCases: [ - 'results_eq() - verify access patterns after RLS disabled', - 'Test with different user roles', - 'Verify no unintended data exposure' + "results_eq() - verify access patterns after RLS disabled", + "Test with different user roles", + "Verify no unintended data exposure", ], metadata: { tableName, - testType: 'rls_disable_security_impact', - reason: 'Disabling RLS can unexpectedly grant broader access' - } + testType: "rls_disable_security_impact", + reason: "Disabling RLS can unexpectedly grant broader access", + }, }); - + return requirements; } - + /** * Extract policy details from SQL statement * @private @@ -1862,108 +2080,118 @@ class TestRequirementAnalyzer extends EventEmitter { roles: [], isPermissive: true, expression: null, - checkExpression: null + checkExpression: null, }; - + // Extract commands (FOR SELECT, INSERT, UPDATE, DELETE, ALL) const commandMatch = sql.match(/FOR\s+(SELECT|INSERT|UPDATE|DELETE|ALL)/i); if (commandMatch) { - if (commandMatch[1].toUpperCase() === 'ALL') { - details.commands = ['SELECT', 'INSERT', 'UPDATE', 'DELETE']; + if (commandMatch[1].toUpperCase() === "ALL") { + details.commands = ["SELECT", "INSERT", "UPDATE", "DELETE"]; } else { details.commands = [commandMatch[1].toUpperCase()]; } } else { // Default is ALL commands - details.commands = ['SELECT', 'INSERT', 'UPDATE', 'DELETE']; + details.commands = ["SELECT", "INSERT", "UPDATE", "DELETE"]; } - + // Extract roles (TO role1, role2, ...) - const rolesMatch = sql.match(/TO\s+((?:\w+(?:\s*,\s*\w+)*))\s+(?:USING|WITH|$)/i); + const rolesMatch = sql.match( + /TO\s+((?:\w+(?:\s*,\s*\w+)*))\s+(?:USING|WITH|$)/i, + ); if (rolesMatch) { - details.roles = rolesMatch[1].split(',').map(role => role.trim()); + details.roles = rolesMatch[1].split(",").map((role) => role.trim()); } - + // Check if restrictive policy - details.isPermissive = !sql.toUpperCase().includes('AS RESTRICTIVE'); - + details.isPermissive = !sql.toUpperCase().includes("AS RESTRICTIVE"); + // Extract USING expression (handle nested parentheses) const usingStart = sql.search(/USING\s*\(/i); if (usingStart !== -1) { let parenCount = 0; - let startIdx = sql.indexOf('(', usingStart); + let startIdx = sql.indexOf("(", usingStart); let endIdx = startIdx; - + for (let i = startIdx; i < sql.length; i++) { - if (sql[i] === '(') parenCount++; - if (sql[i] === ')') parenCount--; + if (sql[i] === "(") parenCount++; + if (sql[i] === ")") parenCount--; if (parenCount === 0) { endIdx = i; break; } } - + if (parenCount === 0) { details.expression = sql.substring(startIdx + 1, endIdx); } } - - // Extract WITH CHECK expression + + // Extract WITH CHECK expression const checkMatch = sql.match(/WITH\s+CHECK\s*\(([^)]+)\)/i); if (checkMatch) { details.checkExpression = checkMatch[1]; } - + return details; } - + /** * Generate security test scenarios for a policy and role combination * @private */ _generateSecurityTestScenarios(policyDetails, role) { const scenarios = []; - + for (const command of policyDetails.commands) { // Basic allowed scenario scenarios.push({ role, operation: command, - shouldAllow: this._shouldPolicyAllowOperation(policyDetails, role, command), - context: { description: `Test ${command} operation for ${role}` } + shouldAllow: this._shouldPolicyAllowOperation( + policyDetails, + role, + command, + ), + context: { description: `Test ${command} operation for ${role}` }, }); - + // Edge case scenarios - if (command === 'SELECT') { + if (command === "SELECT") { scenarios.push({ role, - operation: 'SELECT with WHERE clause', - shouldAllow: this._shouldPolicyAllowOperation(policyDetails, role, command), - context: { description: `Test filtered SELECT for ${role}` } + operation: "SELECT with WHERE clause", + shouldAllow: this._shouldPolicyAllowOperation( + policyDetails, + role, + command, + ), + context: { description: `Test filtered SELECT for ${role}` }, }); } - - if (command === 'INSERT') { + + if (command === "INSERT") { scenarios.push({ role, - operation: 'INSERT with invalid data', + operation: "INSERT with invalid data", shouldAllow: false, - context: { description: `Test INSERT validation for ${role}` } + context: { description: `Test INSERT validation for ${role}` }, }); } } - + // Test policy bypass scenarios scenarios.push({ - role: 'service_role', - operation: 'bypass_test', + role: "service_role", + operation: "bypass_test", shouldAllow: true, - context: { description: 'Verify service_role bypasses RLS policies' } + context: { description: "Verify service_role bypasses RLS policies" }, }); - + return scenarios; } - + /** * Determine if a policy should allow an operation for a role * @private @@ -1971,104 +2199,110 @@ class TestRequirementAnalyzer extends EventEmitter { _shouldPolicyAllowOperation(policyDetails, role, command) { // This is a simplified heuristic - in practice, this would depend on // the specific policy expression and database state - - if (role === 'service_role') { + + if (role === "service_role") { return true; // service_role bypasses RLS } - + if (policyDetails.roles.length > 0 && !policyDetails.roles.includes(role)) { return false; // Role not in policy } - + if (!policyDetails.commands.includes(command)) { return false; // Command not covered by policy } - + // Default assumption - policy allows the operation // In practice, this would need to evaluate the USING expression return true; } - + /** * Generate test requirements for views * @private */ _generateViewTests(operation, target, priority) { - return [{ - type: TEST_TYPES.VIEW, - priority, - description: `Verify view ${target} definition and data`, - target, - testCases: [ - 'has_view()', - 'Verify view returns expected columns', - 'Test view data accuracy', - 'Verify view permissions' - ] - }]; + return [ + { + type: TEST_TYPES.VIEW, + priority, + description: `Verify view ${target} definition and data`, + target, + testCases: [ + "has_view()", + "Verify view returns expected columns", + "Test view data accuracy", + "Verify view permissions", + ], + }, + ]; } - + /** * Generate test requirements for enum types * @private */ _generateEnumTests(operation, target, priority) { - return [{ - type: TEST_TYPES.ENUM, - priority, - description: `Verify enum type ${target} values`, - target, - testCases: [ - 'has_type()', - 'Test all enum values are valid', - 'Test invalid values are rejected', - 'Verify enum usage in tables' - ] - }]; + return [ + { + type: TEST_TYPES.ENUM, + priority, + description: `Verify enum type ${target} values`, + target, + testCases: [ + "has_type()", + "Test all enum values are valid", + "Test invalid values are rejected", + "Verify enum usage in tables", + ], + }, + ]; } - + /** * Generate test requirements for triggers * @private */ _generateTriggerTests(operation, target, priority) { - const sql = (operation.sql || '').toUpperCase(); - + const sql = (operation.sql || "").toUpperCase(); + // Route to specific trigger test methods based on operation type - if (sql.includes('CREATE EVENT TRIGGER')) { + if (sql.includes("CREATE EVENT TRIGGER")) { return this._generateEventTriggerTests(operation, target, priority); } - - if (sql.includes('CREATE TRIGGER')) { + + if (sql.includes("CREATE TRIGGER")) { return this._generateTriggerCreationTests(operation, target, priority); } - - if (sql.includes('ALTER TRIGGER')) { + + if (sql.includes("ALTER TRIGGER")) { return this._generateTriggerAlterationTests(operation, target, priority); } - - if (sql.includes('DROP TRIGGER')) { + + if (sql.includes("DROP TRIGGER")) { return this._generateTriggerDropTests(operation, target, priority); } - + // Fallback for unknown trigger operations - return [{ - type: TEST_TYPES.TRIGGER, - priority, - description: `Verify trigger ${target} functionality`, - target, - testCases: [ - 'has_trigger() - trigger exists', - 'trigger_is() - verify trigger properties', - 'Test trigger fires on correct events', - 'Test trigger function execution', - 'Verify trigger timing (BEFORE/AFTER)', - 'Test trigger with different data scenarios' - ], - metadata: { - testType: 'functionality' - } - }]; + return [ + { + type: TEST_TYPES.TRIGGER, + priority, + description: `Verify trigger ${target} functionality`, + target, + testCases: [ + "has_trigger() - trigger exists", + "trigger_is() - verify trigger properties", + "Test trigger fires on correct events", + "Test trigger function execution", + "Verify trigger timing (BEFORE/AFTER)", + "Test trigger with different data scenarios", + ], + metadata: { + testType: "functionality", + }, + }, + ]; } /** @@ -2076,79 +2310,87 @@ class TestRequirementAnalyzer extends EventEmitter { * @private */ _generateGenericTests(operation, target, priority) { - return [{ - type: TEST_TYPES.SCHEMA, - priority: TEST_PRIORITIES.LOW, - description: `Verify operation executed successfully: ${operation.description || 'Unknown operation'}`, - target: target || 'Unknown', - testCases: [ - 'Verify operation completed without errors', - 'Check database state consistency' - ] - }]; + return [ + { + type: TEST_TYPES.SCHEMA, + priority: TEST_PRIORITIES.LOW, + description: `Verify operation executed successfully: ${operation.description || "Unknown operation"}`, + target: target || "Unknown", + testCases: [ + "Verify operation completed without errors", + "Check database state consistency", + ], + }, + ]; } - + /** * Generate security test requirements * @private */ _generateSecurityTests(operation, target, priority) { - return [{ - type: TEST_TYPES.PERMISSION, - priority: TEST_PRIORITIES.CRITICAL, - description: `Verify security implications of ${target} changes`, - target, - testCases: [ - 'Test access control enforcement', - 'Verify unauthorized access is blocked', - 'Test with different user roles', - 'Audit security policy changes' - ] - }]; + return [ + { + type: TEST_TYPES.PERMISSION, + priority: TEST_PRIORITIES.CRITICAL, + description: `Verify security implications of ${target} changes`, + target, + testCases: [ + "Test access control enforcement", + "Verify unauthorized access is blocked", + "Test with different user roles", + "Audit security policy changes", + ], + }, + ]; } - + /** * Generate performance test requirements * @private */ _generatePerformanceTests(operation, target, priority) { - return [{ - type: TEST_TYPES.INDEX, - priority: TEST_PRIORITIES.MEDIUM, - description: `Verify performance impact of ${target} changes`, - target, - testCases: [ - 'Measure query performance before/after', - 'Verify indexes are utilized', - 'Check for performance regressions', - 'Test with realistic data volumes' - ] - }]; + return [ + { + type: TEST_TYPES.INDEX, + priority: TEST_PRIORITIES.MEDIUM, + description: `Verify performance impact of ${target} changes`, + target, + testCases: [ + "Measure query performance before/after", + "Verify indexes are utilized", + "Check for performance regressions", + "Test with realistic data volumes", + ], + }, + ]; } - + /** * Generate test requirements for column addition * @private */ _generateColumnAdditionTests(operation, tableName, columnName, priority) { const requirements = []; - const sql = operation.sql || ''; - + const sql = operation.sql || ""; + // Extract column metadata from SQL const columnMetadata = this._parseColumnConstraints(sql, columnName); - + requirements.push({ type: TEST_TYPES.SCHEMA, priority, description: `Verify column ${columnName} added to ${tableName}`, target: `${tableName}.${columnName}`, testCases: [ - 'has_column()', - 'col_type_is()', - ...(columnMetadata.notNull ? ['col_not_null()'] : ['col_is_null()']), - ...(columnMetadata.hasDefault ? ['col_has_default()', 'col_default_is()'] : []), - ...(columnMetadata.isUnique ? ['col_is_unique()'] : []), - ...(columnMetadata.isForeignKey ? ['has_fk()', 'fk_ok()'] : []) + "has_column()", + "col_type_is()", + ...(columnMetadata.notNull ? ["col_not_null()"] : ["col_is_null()"]), + ...(columnMetadata.hasDefault + ? ["col_has_default()", "col_default_is()"] + : []), + ...(columnMetadata.isUnique ? ["col_is_unique()"] : []), + ...(columnMetadata.isForeignKey ? ["has_fk()", "fk_ok()"] : []), ], metadata: { columnType: columnMetadata.type, @@ -2158,10 +2400,10 @@ class TestRequirementAnalyzer extends EventEmitter { isUnique: columnMetadata.isUnique, isForeignKey: columnMetadata.isForeignKey, referencedTable: columnMetadata.referencedTable, - referencedColumn: columnMetadata.referencedColumn - } + referencedColumn: columnMetadata.referencedColumn, + }, }); - + // Add constraint tests if applicable if (columnMetadata.hasCheckConstraint) { requirements.push({ @@ -2169,70 +2411,69 @@ class TestRequirementAnalyzer extends EventEmitter { priority, description: `Verify check constraint on ${tableName}.${columnName}`, target: `${tableName}.${columnName}`, - testCases: [ - 'has_check()', - 'check_test()' - ], + testCases: ["has_check()", "check_test()"], metadata: { - checkExpression: columnMetadata.checkExpression - } + checkExpression: columnMetadata.checkExpression, + }, }); } - + return requirements; } - + /** * Generate test requirements for column drops * @private */ _generateColumnDropTests(operation, tableName, columnName, priority) { - return [{ - type: TEST_TYPES.SCHEMA, - priority: TEST_PRIORITIES.CRITICAL, // Always critical for destructive ops - description: `Verify column ${columnName} dropped from ${tableName}`, - target: `${tableName}.${columnName}`, - testCases: [ - 'hasnt_column()', - 'Verify dependent constraints are handled', - 'Verify dependent indexes are handled', - 'Check data integrity after column drop' - ], - metadata: { - destructive: true, - requiresDataValidation: true - } - }]; + return [ + { + type: TEST_TYPES.SCHEMA, + priority: TEST_PRIORITIES.CRITICAL, // Always critical for destructive ops + description: `Verify column ${columnName} dropped from ${tableName}`, + target: `${tableName}.${columnName}`, + testCases: [ + "hasnt_column()", + "Verify dependent constraints are handled", + "Verify dependent indexes are handled", + "Check data integrity after column drop", + ], + metadata: { + destructive: true, + requiresDataValidation: true, + }, + }, + ]; } - + /** * Generate test requirements for column type changes * @private */ _generateColumnTypeChangeTests(operation, tableName, columnName, priority) { const requirements = []; - const sql = operation.sql || ''; - + const sql = operation.sql || ""; + // Extract old and new types from SQL const typeChangeInfo = this._parseTypeChange(sql, columnName); - + requirements.push({ type: TEST_TYPES.SCHEMA, priority: TEST_PRIORITIES.HIGH, description: `Verify column ${columnName} type change in ${tableName}`, target: `${tableName}.${columnName}`, testCases: [ - 'col_type_is()', - 'Verify existing data compatibility', - 'Test data conversion accuracy' + "col_type_is()", + "Verify existing data compatibility", + "Test data conversion accuracy", ], metadata: { oldType: typeChangeInfo.oldType, newType: typeChangeInfo.newType, - requiresDataMigration: typeChangeInfo.requiresConversion - } + requiresDataMigration: typeChangeInfo.requiresConversion, + }, }); - + // Add data migration tests for incompatible type changes if (typeChangeInfo.requiresConversion) { requirements.push({ @@ -2241,140 +2482,148 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Verify data migration for ${tableName}.${columnName} type change`, target: `${tableName}.${columnName}`, testCases: [ - 'Test data conversion edge cases', - 'Verify no data loss during conversion', - 'Test boundary values', - 'Validate converted data accuracy' + "Test data conversion edge cases", + "Verify no data loss during conversion", + "Test boundary values", + "Validate converted data accuracy", ], metadata: { conversionRequired: true, - dataValidationCritical: true - } + dataValidationCritical: true, + }, }); } - + return requirements; } - + /** * Generate test requirements for setting column NOT NULL * @private */ _generateColumnNotNullTests(operation, tableName, columnName, priority) { - return [{ - type: TEST_TYPES.CONSTRAINT, - priority: TEST_PRIORITIES.HIGH, - description: `Verify column ${columnName} NOT NULL constraint in ${tableName}`, - target: `${tableName}.${columnName}`, - testCases: [ - 'col_not_null()', - 'Test null insertion rejection', - 'Verify existing data has no nulls', - 'Test constraint enforcement' - ], - metadata: { - constraintType: 'NOT NULL', - requiresDataValidation: true - } - }]; + return [ + { + type: TEST_TYPES.CONSTRAINT, + priority: TEST_PRIORITIES.HIGH, + description: `Verify column ${columnName} NOT NULL constraint in ${tableName}`, + target: `${tableName}.${columnName}`, + testCases: [ + "col_not_null()", + "Test null insertion rejection", + "Verify existing data has no nulls", + "Test constraint enforcement", + ], + metadata: { + constraintType: "NOT NULL", + requiresDataValidation: true, + }, + }, + ]; } - + /** * Generate test requirements for dropping NOT NULL constraint * @private */ _generateColumnNullableTests(operation, tableName, columnName, priority) { - return [{ - type: TEST_TYPES.CONSTRAINT, - priority, - description: `Verify column ${columnName} nullable constraint removed in ${tableName}`, - target: `${tableName}.${columnName}`, - testCases: [ - 'col_is_null() - column allows nulls', - 'Test null insertion acceptance', - 'Verify constraint properly removed' - ], - metadata: { - constraintType: 'NULLABLE', - constraintRemoved: true - } - }]; + return [ + { + type: TEST_TYPES.CONSTRAINT, + priority, + description: `Verify column ${columnName} nullable constraint removed in ${tableName}`, + target: `${tableName}.${columnName}`, + testCases: [ + "col_is_null() - column allows nulls", + "Test null insertion acceptance", + "Verify constraint properly removed", + ], + metadata: { + constraintType: "NULLABLE", + constraintRemoved: true, + }, + }, + ]; } - + /** * Generate test requirements for setting column default * @private */ _generateColumnSetDefaultTests(operation, tableName, columnName, priority) { - const sql = operation.sql || ''; + const sql = operation.sql || ""; const defaultValue = this._extractDefaultValue(sql, columnName); - - return [{ - type: TEST_TYPES.CONSTRAINT, - priority, - description: `Verify column ${columnName} default value set in ${tableName}`, - target: `${tableName}.${columnName}`, - testCases: [ - 'col_has_default()', - 'col_default_is()', - 'Test default value application on insert', - 'Verify default value type compatibility' - ], - metadata: { - defaultValue, - requiresInsertTest: true - } - }]; + + return [ + { + type: TEST_TYPES.CONSTRAINT, + priority, + description: `Verify column ${columnName} default value set in ${tableName}`, + target: `${tableName}.${columnName}`, + testCases: [ + "col_has_default()", + "col_default_is()", + "Test default value application on insert", + "Verify default value type compatibility", + ], + metadata: { + defaultValue, + requiresInsertTest: true, + }, + }, + ]; } - + /** * Generate test requirements for dropping column default * @private */ _generateColumnDropDefaultTests(operation, tableName, columnName, priority) { - return [{ - type: TEST_TYPES.CONSTRAINT, - priority, - description: `Verify column ${columnName} default value removed in ${tableName}`, - target: `${tableName}.${columnName}`, - testCases: [ - 'col_hasnt_default()', - 'Test explicit value requirement on insert', - 'Verify default properly removed' - ], - metadata: { - defaultRemoved: true, - requiresInsertTest: true - } - }]; + return [ + { + type: TEST_TYPES.CONSTRAINT, + priority, + description: `Verify column ${columnName} default value removed in ${tableName}`, + target: `${tableName}.${columnName}`, + testCases: [ + "col_hasnt_default()", + "Test explicit value requirement on insert", + "Verify default properly removed", + ], + metadata: { + defaultRemoved: true, + requiresInsertTest: true, + }, + }, + ]; } - + /** * Generate test requirements for constraint additions * @private */ _generateConstraintTests(operation, tableName, constraintName, priority) { const requirements = []; - const sql = operation.sql || ''; + const sql = operation.sql || ""; const constraintType = this._identifyConstraintType(sql); - + switch (constraintType) { - case 'PRIMARY_KEY': + case "PRIMARY_KEY": requirements.push({ type: TEST_TYPES.CONSTRAINT, priority: TEST_PRIORITIES.CRITICAL, description: `Verify primary key constraint ${constraintName} on ${tableName}`, target: `${tableName}.${constraintName}`, testCases: [ - 'has_pk()', - 'Test uniqueness enforcement', - 'Test null rejection', - 'Verify constraint naming' - ] + "has_pk()", + "Test uniqueness enforcement", + "Test null rejection", + "Verify constraint naming", + ], }); break; - - case 'FOREIGN_KEY': + + case "FOREIGN_KEY": const fkInfo = this._parseForeignKeyConstraint(sql); requirements.push({ type: TEST_TYPES.CONSTRAINT, @@ -2382,36 +2631,36 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Verify foreign key constraint ${constraintName} on ${tableName}`, target: `${tableName}.${constraintName}`, testCases: [ - 'has_fk()', - 'fk_ok()', - 'Test referential integrity', - 'Test cascade behavior if applicable' + "has_fk()", + "fk_ok()", + "Test referential integrity", + "Test cascade behavior if applicable", ], metadata: { referencedTable: fkInfo.referencedTable, referencedColumn: fkInfo.referencedColumn, onDelete: fkInfo.onDelete, - onUpdate: fkInfo.onUpdate - } + onUpdate: fkInfo.onUpdate, + }, }); break; - - case 'UNIQUE': + + case "UNIQUE": requirements.push({ type: TEST_TYPES.CONSTRAINT, priority: TEST_PRIORITIES.HIGH, description: `Verify unique constraint ${constraintName} on ${tableName}`, target: `${tableName}.${constraintName}`, testCases: [ - 'has_unique()', - 'Test uniqueness enforcement', - 'Test duplicate rejection', - 'Verify constraint scope' - ] + "has_unique()", + "Test uniqueness enforcement", + "Test duplicate rejection", + "Verify constraint scope", + ], }); break; - - case 'CHECK': + + case "CHECK": const checkExpression = this._extractCheckExpression(sql); requirements.push({ type: TEST_TYPES.CONSTRAINT, @@ -2419,17 +2668,17 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Verify check constraint ${constraintName} on ${tableName}`, target: `${tableName}.${constraintName}`, testCases: [ - 'has_check()', - 'check_test()', - 'Test constraint violation rejection', - 'Test valid values acceptance' + "has_check()", + "check_test()", + "Test constraint violation rejection", + "Test valid values acceptance", ], metadata: { - checkExpression - } + checkExpression, + }, }); break; - + default: requirements.push({ type: TEST_TYPES.CONSTRAINT, @@ -2437,175 +2686,189 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Verify constraint ${constraintName} on ${tableName}`, target: `${tableName}.${constraintName}`, testCases: [ - 'Verify constraint existence', - 'Test constraint enforcement' - ] + "Verify constraint existence", + "Test constraint enforcement", + ], }); } - + return requirements; } - /** * Generate comprehensive column validation test * This ensures all aspects of a column are properly tested after critical changes * @private */ - _generateComprehensiveColumnValidation(operation, tableName, columnName, priority) { + _generateComprehensiveColumnValidation( + operation, + tableName, + columnName, + priority, + ) { return { type: TEST_TYPES.SCHEMA, priority: TEST_PRIORITIES.HIGH, description: `Comprehensive validation for ${tableName}.${columnName}`, target: `${tableName}.${columnName}`, testCases: [ - 'has_column() - verify column exists', - 'col_type_is() - verify correct data type', - 'col_not_null() or col_is_null() - verify nullability', - 'col_has_default() or col_hasnt_default() - verify default state', - 'Verify dependent objects (indexes, constraints)', - 'Test data integrity and constraints', - 'Validate column in table schema' + "has_column() - verify column exists", + "col_type_is() - verify correct data type", + "col_not_null() or col_is_null() - verify nullability", + "col_has_default() or col_hasnt_default() - verify default state", + "Verify dependent objects (indexes, constraints)", + "Test data integrity and constraints", + "Validate column in table schema", ], metadata: { comprehensive: true, validatesAllAspects: true, criticalOperation: true, - requiresFullValidation: true - } + requiresFullValidation: true, + }, }; } /** * Helper methods for column operation parsing */ - + _extractColumnName(sql, operation) { const patterns = { - 'ADD COLUMN': /ADD\s+COLUMN\s+([^\s(]+)/i, - 'DROP COLUMN': /DROP\s+COLUMN\s+([^\s,;]+)/i, - 'ALTER COLUMN': /ALTER\s+COLUMN\s+([^\s]+)/i + "ADD COLUMN": /ADD\s+COLUMN\s+([^\s(]+)/i, + "DROP COLUMN": /DROP\s+COLUMN\s+([^\s,;]+)/i, + "ALTER COLUMN": /ALTER\s+COLUMN\s+([^\s]+)/i, }; - + const pattern = patterns[operation]; - if (!pattern) return 'unknown_column'; - + if (!pattern) return "unknown_column"; + const match = sql.match(pattern); - return match ? match[1] : 'unknown_column'; + return match ? match[1] : "unknown_column"; } - + _extractConstraintName(sql, operation) { const pattern = /ADD\s+CONSTRAINT\s+([^\s]+)/i; const match = sql.match(pattern); - return match ? match[1] : 'unknown_constraint'; + return match ? match[1] : "unknown_constraint"; } - + _parseColumnConstraints(sql, columnName) { // Extract column definition from SQL - everything after the column name - const columnDefPattern = new RegExp(`${columnName}\\s+(.+?)(?:,|$)`, 'i'); + const columnDefPattern = new RegExp(`${columnName}\\s+(.+?)(?:,|$)`, "i"); const match = sql.match(columnDefPattern); - + if (!match) { return { - type: 'unknown', + type: "unknown", notNull: false, hasDefault: false, isUnique: false, isForeignKey: false, - hasCheckConstraint: false + hasCheckConstraint: false, }; } - + const definition = match[1].toUpperCase(); - + return { type: this._extractDataType(definition), - notNull: definition.includes('NOT NULL'), - hasDefault: definition.includes('DEFAULT'), + notNull: definition.includes("NOT NULL"), + hasDefault: definition.includes("DEFAULT"), defaultValue: this._extractDefaultFromDefinition(definition), - isUnique: definition.includes('UNIQUE'), - isForeignKey: definition.includes('REFERENCES'), - hasCheckConstraint: definition.includes('CHECK'), + isUnique: definition.includes("UNIQUE"), + isForeignKey: definition.includes("REFERENCES"), + hasCheckConstraint: definition.includes("CHECK"), checkExpression: this._extractCheckFromDefinition(definition), referencedTable: this._extractReferencedTable(definition), - referencedColumn: this._extractReferencedColumn(definition) + referencedColumn: this._extractReferencedColumn(definition), }; } - + _parseTypeChange(sql, columnName) { // This is simplified - in production you'd want more sophisticated parsing - const typePattern = new RegExp(`ALTER\\s+COLUMN\\s+${columnName}\\s+(?:SET\\s+DATA\\s+)?TYPE\\s+([^\\s,;]+)`, 'i'); + const typePattern = new RegExp( + `ALTER\\s+COLUMN\\s+${columnName}\\s+(?:SET\\s+DATA\\s+)?TYPE\\s+([^\\s,;]+)`, + "i", + ); const match = sql.match(typePattern); - + return { - oldType: 'unknown', // Would need schema introspection - newType: match ? match[1] : 'unknown', - requiresConversion: true // Conservative assumption + oldType: "unknown", // Would need schema introspection + newType: match ? match[1] : "unknown", + requiresConversion: true, // Conservative assumption }; } - + _extractDefaultValue(sql, columnName) { - const defaultPattern = new RegExp(`ALTER\\s+COLUMN\\s+${columnName}\\s+SET\\s+DEFAULT\\s+([^;,\\s]+(?:\\s*'[^']*')?[^;,]*)`, 'i'); + const defaultPattern = new RegExp( + `ALTER\\s+COLUMN\\s+${columnName}\\s+SET\\s+DEFAULT\\s+([^;,\\s]+(?:\\s*'[^']*')?[^;,]*)`, + "i", + ); const match = sql.match(defaultPattern); return match ? match[1].trim() : null; } - + _identifyConstraintType(sql) { const upperSql = sql.toUpperCase(); - if (upperSql.includes('PRIMARY KEY')) return 'PRIMARY_KEY'; - if (upperSql.includes('FOREIGN KEY') || upperSql.includes('REFERENCES')) return 'FOREIGN_KEY'; - if (upperSql.includes('UNIQUE')) return 'UNIQUE'; - if (upperSql.includes('CHECK')) return 'CHECK'; - return 'UNKNOWN'; + if (upperSql.includes("PRIMARY KEY")) return "PRIMARY_KEY"; + if (upperSql.includes("FOREIGN KEY") || upperSql.includes("REFERENCES")) + return "FOREIGN_KEY"; + if (upperSql.includes("UNIQUE")) return "UNIQUE"; + if (upperSql.includes("CHECK")) return "CHECK"; + return "UNKNOWN"; } - + _parseForeignKeyConstraint(sql) { - const referencesPattern = /REFERENCES\s+([^\s(]+)(?:\s*\(\s*([^)]+)\s*\))?/i; - const onDeletePattern = /ON\s+DELETE\s+(CASCADE|RESTRICT|SET\s+NULL|SET\s+DEFAULT|NO\s+ACTION)/i; - const onUpdatePattern = /ON\s+UPDATE\s+(CASCADE|RESTRICT|SET\s+NULL|SET\s+DEFAULT|NO\s+ACTION)/i; - + const referencesPattern = + /REFERENCES\s+([^\s(]+)(?:\s*\(\s*([^)]+)\s*\))?/i; + const onDeletePattern = + /ON\s+DELETE\s+(CASCADE|RESTRICT|SET\s+NULL|SET\s+DEFAULT|NO\s+ACTION)/i; + const onUpdatePattern = + /ON\s+UPDATE\s+(CASCADE|RESTRICT|SET\s+NULL|SET\s+DEFAULT|NO\s+ACTION)/i; + const referencesMatch = sql.match(referencesPattern); const onDeleteMatch = sql.match(onDeletePattern); const onUpdateMatch = sql.match(onUpdatePattern); - + return { referencedTable: referencesMatch ? referencesMatch[1] : null, referencedColumn: referencesMatch ? referencesMatch[2] : null, onDelete: onDeleteMatch ? onDeleteMatch[1] : null, - onUpdate: onUpdateMatch ? onUpdateMatch[1] : null + onUpdate: onUpdateMatch ? onUpdateMatch[1] : null, }; } - + _extractCheckExpression(sql) { const checkPattern = /CHECK\s*\(\s*([^)]+)\s*\)/i; const match = sql.match(checkPattern); return match ? match[1] : null; } - + _extractDataType(definition) { // Extract the data type including size/precision in parentheses const typeMatch = definition.match(/^([^\s]+(?:\([^)]+\))?)/); - return typeMatch ? typeMatch[1] : 'unknown'; + return typeMatch ? typeMatch[1] : "unknown"; } - + _extractDefaultFromDefinition(definition) { const defaultPattern = /DEFAULT\s+('[^']*'|[^\s]+)/i; const match = definition.match(defaultPattern); return match ? match[1].trim() : null; } - + _extractCheckFromDefinition(definition) { const checkPattern = /CHECK\s*\(\s*([^)]+)\s*\)/i; const match = definition.match(checkPattern); return match ? match[1] : null; } - + _extractReferencedTable(definition) { const referencesPattern = /REFERENCES\s+([^\s(]+)/i; const match = definition.match(referencesPattern); return match ? match[1] : null; } - + _extractReferencedColumn(definition) { const referencesPattern = /REFERENCES\s+[^\s(]+\s*\(\s*([^)]+)\s*\)/i; const match = definition.match(referencesPattern); @@ -2615,196 +2878,221 @@ class TestRequirementAnalyzer extends EventEmitter { /** * Helper methods */ - + _getBasePriority(operation) { switch (operation.type) { - case 'DESTRUCTIVE': + case "DESTRUCTIVE": return this.options.destructiveOperationPriority; - case 'WARNING': + case "WARNING": return this.options.warningOperationPriority; default: return this.options.safeOperationPriority; } } - + _categorizeOperation(operation) { - const sql = (operation.sql || '').toUpperCase().trim(); - - if (sql.startsWith('CREATE TABLE')) return 'CREATE_TABLE'; - if (sql.startsWith('DROP TABLE')) return 'DROP_TABLE'; - if (sql.startsWith('ALTER TABLE')) { - if (sql.includes('ENABLE ROW LEVEL SECURITY')) return 'ENABLE_RLS'; - if (sql.includes('DISABLE ROW LEVEL SECURITY')) return 'DISABLE_RLS'; - return 'ALTER_TABLE'; + const sql = (operation.sql || "").toUpperCase().trim(); + + if (sql.startsWith("CREATE TABLE")) return "CREATE_TABLE"; + if (sql.startsWith("DROP TABLE")) return "DROP_TABLE"; + if (sql.startsWith("ALTER TABLE")) { + if (sql.includes("ENABLE ROW LEVEL SECURITY")) return "ENABLE_RLS"; + if (sql.includes("DISABLE ROW LEVEL SECURITY")) return "DISABLE_RLS"; + return "ALTER_TABLE"; } - + // Index operations - all use CREATE_INDEX for main switching, specialized handling in _generateIndexTests - if (sql.includes('CREATE UNIQUE INDEX')) return 'CREATE_INDEX'; - if (sql.includes('CREATE INDEX')) return 'CREATE_INDEX'; - if (sql.includes('DROP INDEX')) return 'CREATE_INDEX'; - if (sql.includes('ALTER INDEX')) return 'CREATE_INDEX'; - + if (sql.includes("CREATE UNIQUE INDEX")) return "CREATE_INDEX"; + if (sql.includes("CREATE INDEX")) return "CREATE_INDEX"; + if (sql.includes("DROP INDEX")) return "CREATE_INDEX"; + if (sql.includes("ALTER INDEX")) return "CREATE_INDEX"; + // Function operations - all use CREATE_FUNCTION for main switching - if (sql.includes('CREATE OR REPLACE FUNCTION')) return 'CREATE_FUNCTION'; - if (sql.includes('CREATE FUNCTION')) return 'CREATE_FUNCTION'; - if (sql.includes('DROP FUNCTION')) return 'CREATE_FUNCTION'; - if (sql.includes('ALTER FUNCTION')) return 'CREATE_FUNCTION'; + if (sql.includes("CREATE OR REPLACE FUNCTION")) return "CREATE_FUNCTION"; + if (sql.includes("CREATE FUNCTION")) return "CREATE_FUNCTION"; + if (sql.includes("DROP FUNCTION")) return "CREATE_FUNCTION"; + if (sql.includes("ALTER FUNCTION")) return "CREATE_FUNCTION"; // Policy operations - if (sql.includes('CREATE POLICY')) return 'CREATE_POLICY'; - if (sql.includes('ALTER POLICY')) return 'ALTER_POLICY'; - if (sql.includes('DROP POLICY')) return 'DROP_POLICY'; - if (sql.includes('CREATE VIEW')) return 'CREATE_VIEW'; - if (sql.includes('CREATE TYPE')) return 'CREATE_ENUM'; - + if (sql.includes("CREATE POLICY")) return "CREATE_POLICY"; + if (sql.includes("ALTER POLICY")) return "ALTER_POLICY"; + if (sql.includes("DROP POLICY")) return "DROP_POLICY"; + if (sql.includes("CREATE VIEW")) return "CREATE_VIEW"; + if (sql.includes("CREATE TYPE")) return "CREATE_ENUM"; + // Trigger operations (check EVENT TRIGGER before TRIGGER to avoid false matches) - if (sql.includes('CREATE EVENT TRIGGER')) return 'CREATE_EVENT_TRIGGER'; - if (sql.includes('CREATE TRIGGER')) return 'CREATE_TRIGGER'; - if (sql.includes('ALTER TRIGGER')) return 'ALTER_TRIGGER'; - if (sql.includes('DROP TRIGGER')) return 'DROP_TRIGGER'; - - return 'UNKNOWN'; + if (sql.includes("CREATE EVENT TRIGGER")) return "CREATE_EVENT_TRIGGER"; + if (sql.includes("CREATE TRIGGER")) return "CREATE_TRIGGER"; + if (sql.includes("ALTER TRIGGER")) return "ALTER_TRIGGER"; + if (sql.includes("DROP TRIGGER")) return "DROP_TRIGGER"; + + return "UNKNOWN"; } - + _extractTargetObject(operation) { - const sql = operation.sql || ''; - + const sql = operation.sql || ""; + // Extract table name - let match = sql.match(/(?:CREATE TABLE|DROP TABLE|ALTER TABLE)\s+([^\s(]+)/i); + let match = sql.match( + /(?:CREATE TABLE|DROP TABLE|ALTER TABLE)\s+([^\s(]+)/i, + ); if (match) return match[1]; - + // Extract index name (handles CREATE, DROP, ALTER INDEX) - match = sql.match(/(?:CREATE(?:\s+UNIQUE)?\s+INDEX|DROP\s+INDEX|ALTER\s+INDEX)\s+([^\s]+)/i); + match = sql.match( + /(?:CREATE(?:\s+UNIQUE)?\s+INDEX|DROP\s+INDEX|ALTER\s+INDEX)\s+([^\s]+)/i, + ); if (match) return match[1]; - + // Extract function name (handles CREATE, CREATE OR REPLACE, DROP, ALTER) // Handle DROP FUNCTION IF EXISTS specially - if (sql.includes('DROP FUNCTION IF EXISTS')) { + if (sql.includes("DROP FUNCTION IF EXISTS")) { match = sql.match(/DROP\s+FUNCTION\s+IF\s+EXISTS\s+([^\s(]+)/i); if (match) return match[1]; } - match = sql.match(/(?:CREATE(?:\s+OR\s+REPLACE)?|DROP|ALTER)\s+FUNCTION\s+([^\s(]+)/i); + match = sql.match( + /(?:CREATE(?:\s+OR\s+REPLACE)?|DROP|ALTER)\s+FUNCTION\s+([^\s(]+)/i, + ); if (match) return match[1]; - + // Extract policy name for CREATE, ALTER, DROP POLICY match = sql.match(/(?:CREATE|ALTER|DROP)\s+POLICY\s+([^\s]+)/i); if (match) return match[1]; - + // Extract view name match = sql.match(/CREATE VIEW\s+([^\s]+)/i); if (match) return match[1]; - + // Extract type name match = sql.match(/CREATE TYPE\s+([^\s]+)/i); if (match) return match[1]; - + // Extract trigger name for CREATE, ALTER, DROP TRIGGER match = sql.match(/(?:CREATE|ALTER|DROP)\s+TRIGGER\s+([^\s]+)/i); if (match) return match[1]; - + // Extract event trigger name match = sql.match(/CREATE\s+EVENT\s+TRIGGER\s+([^\s]+)/i); if (match) return match[1]; - - return 'unknown'; + + return "unknown"; } - + _isHighRiskOperation(operation) { - const sql = operation.sql || ''; - return this.highRiskPatterns.some(pattern => pattern.test(sql)) || - operation.type === 'DESTRUCTIVE'; + const sql = operation.sql || ""; + return ( + this.highRiskPatterns.some((pattern) => pattern.test(sql)) || + operation.type === "DESTRUCTIVE" + ); } - + _requiresSecurityTests(operation) { if (!this.options.requireSecurityTests) return false; - - const sql = operation.sql || ''; - return this.securityPatterns.some(pattern => pattern.test(sql)); + + const sql = operation.sql || ""; + return this.securityPatterns.some((pattern) => pattern.test(sql)); } - + _requiresPerformanceTests(operation) { - const sql = operation.sql || ''; + const sql = operation.sql || ""; return /CREATE.*INDEX|ALTER TABLE.*ADD|VACUUM|ANALYZE/i.test(sql); } - + _estimateTestEffort(requirement) { const baseEffort = this.options.estimatedEffortPerTest; const complexityMultiplier = Math.min(requirement.testCases.length / 3, 3); - + return baseEffort * complexityMultiplier; } - + _generateTestReason(requirement, operation) { switch (requirement.type) { case TEST_TYPES.SCHEMA: - return 'Ensure schema changes are applied correctly'; + return "Ensure schema changes are applied correctly"; case TEST_TYPES.DATA: - return 'Verify data integrity after migration'; + return "Verify data integrity after migration"; case TEST_TYPES.CONSTRAINT: - return 'Validate constraint enforcement'; + return "Validate constraint enforcement"; case TEST_TYPES.RLS: case TEST_TYPES.PERMISSION: - return 'Critical security validation required'; + return "Critical security validation required"; case TEST_TYPES.FUNCTION: - return 'Ensure function behavior meets requirements'; + return "Ensure function behavior meets requirements"; default: - return 'Validate operation completed successfully'; + return "Validate operation completed successfully"; } } - + _generateTestMetadata(requirement, operation, context) { return { operationType: operation.type, operationSQL: operation.sql, analysisContext: { - environment: context.environment || 'unknown', - timestamp: new Date().toISOString() + environment: context.environment || "unknown", + timestamp: new Date().toISOString(), }, - estimatedEffort: this._estimateTestEffort(requirement) + estimatedEffort: this._estimateTestEffort(requirement), }; } - + _extractOperationDescription(operation) { - return operation.description || - operation.sql?.substring(0, 100) + '...' || - 'Unknown operation'; + return ( + operation.description || + operation.sql?.substring(0, 100) + "..." || + "Unknown operation" + ); } - + _generateTestingSuggestions(analysis, operations, context) { const suggestions = []; - + // High-level coverage suggestions if (analysis.summary.totalRequirements === 0) { - suggestions.push('No test requirements identified - consider reviewing migration complexity'); + suggestions.push( + "No test requirements identified - consider reviewing migration complexity", + ); } else { - suggestions.push(`${analysis.summary.totalRequirements} test requirements identified`); + suggestions.push( + `${analysis.summary.totalRequirements} test requirements identified`, + ); } - + // Priority-based suggestions - const criticalTests = analysis.summary.byPriority[TEST_PRIORITIES.CRITICAL] || 0; + const criticalTests = + analysis.summary.byPriority[TEST_PRIORITIES.CRITICAL] || 0; if (criticalTests > 0) { - suggestions.push(`${criticalTests} critical tests required - these must pass before deployment`); + suggestions.push( + `${criticalTests} critical tests required - these must pass before deployment`, + ); } - + // Risk area suggestions if (analysis.riskAreas.length > 0) { - suggestions.push(`${analysis.riskAreas.length} high-risk operations require extra testing attention`); + suggestions.push( + `${analysis.riskAreas.length} high-risk operations require extra testing attention`, + ); } - + // Effort estimation if (analysis.estimatedEffort > 8) { - suggestions.push('Consider parallelizing test implementation due to high effort estimate'); + suggestions.push( + "Consider parallelizing test implementation due to high effort estimate", + ); } - + // Security focus - const securityTests = analysis.summary.byType[TEST_TYPES.RLS] || 0 + - analysis.summary.byType[TEST_TYPES.PERMISSION] || 0; + const securityTests = + analysis.summary.byType[TEST_TYPES.RLS] || + 0 + analysis.summary.byType[TEST_TYPES.PERMISSION] || + 0; if (securityTests > 0) { - suggestions.push('Security-related changes detected - prioritize RLS and permission tests'); + suggestions.push( + "Security-related changes detected - prioritize RLS and permission tests", + ); } - + return suggestions; } - + /** * Generate column test requirements based on operation type * @param {Object} operation - Migration operation @@ -2814,30 +3102,85 @@ class TestRequirementAnalyzer extends EventEmitter { * @param {string} priority - Test priority * @returns {Array} Array of test requirements */ - generateColumnTestRequirements(operation, tableName, columnName, operationType, priority) { + generateColumnTestRequirements( + operation, + tableName, + columnName, + operationType, + priority, + ) { const requirements = []; - + switch (operationType) { - case 'ADD_COLUMN': - requirements.push(...this._generateColumnAdditionTests(operation, tableName, columnName, priority)); + case "ADD_COLUMN": + requirements.push( + ...this._generateColumnAdditionTests( + operation, + tableName, + columnName, + priority, + ), + ); break; - case 'DROP_COLUMN': - requirements.push(...this._generateColumnDropTests(operation, tableName, columnName, priority)); + case "DROP_COLUMN": + requirements.push( + ...this._generateColumnDropTests( + operation, + tableName, + columnName, + priority, + ), + ); break; - case 'ALTER_TYPE': - requirements.push(...this._generateColumnTypeChangeTests(operation, tableName, columnName, priority)); + case "ALTER_TYPE": + requirements.push( + ...this._generateColumnTypeChangeTests( + operation, + tableName, + columnName, + priority, + ), + ); break; - case 'SET_NOT_NULL': - requirements.push(...this._generateColumnNotNullTests(operation, tableName, columnName, priority)); + case "SET_NOT_NULL": + requirements.push( + ...this._generateColumnNotNullTests( + operation, + tableName, + columnName, + priority, + ), + ); break; - case 'DROP_NOT_NULL': - requirements.push(...this._generateColumnNullableTests(operation, tableName, columnName, priority)); + case "DROP_NOT_NULL": + requirements.push( + ...this._generateColumnNullableTests( + operation, + tableName, + columnName, + priority, + ), + ); break; - case 'SET_DEFAULT': - requirements.push(...this._generateColumnSetDefaultTests(operation, tableName, columnName, priority)); + case "SET_DEFAULT": + requirements.push( + ...this._generateColumnSetDefaultTests( + operation, + tableName, + columnName, + priority, + ), + ); break; - case 'DROP_DEFAULT': - requirements.push(...this._generateColumnDropDefaultTests(operation, tableName, columnName, priority)); + case "DROP_DEFAULT": + requirements.push( + ...this._generateColumnDropDefaultTests( + operation, + tableName, + columnName, + priority, + ), + ); break; default: // Generic column operation test @@ -2847,49 +3190,48 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Verify column ${columnName} operation in ${tableName}`, target: `${tableName}.${columnName}`, testCases: [ - 'has_column()', - `-- Verify column operation completed successfully` + "has_column()", + `-- Verify column operation completed successfully`, ], metadata: { operationType, tableName, - columnName - } + columnName, + }, }); } - + return requirements; } - + /** * Generate column addition test requirements * @private */ _generateColumnAdditionTests(operation, tableName, columnName, priority) { const columnMeta = this._parseColumnDefinition(operation.sql, columnName); - const testCases = [ - 'has_column()', - 'col_type_is()', - ]; - + const testCases = ["has_column()", "col_type_is()"]; + if (columnMeta && columnMeta.notNull) { - testCases.push('col_not_null()'); + testCases.push("col_not_null()"); } - + if (columnMeta && columnMeta.hasDefault) { - testCases.push('col_has_default()'); + testCases.push("col_has_default()"); } - - return [{ - type: TEST_TYPES.SCHEMA, - priority, - description: `Verify column ${columnName} added to ${tableName}`, - target: `${tableName}.${columnName}`, - testCases, - metadata: columnMeta - }]; + + return [ + { + type: TEST_TYPES.SCHEMA, + priority, + description: `Verify column ${columnName} added to ${tableName}`, + target: `${tableName}.${columnName}`, + testCases, + metadata: columnMeta, + }, + ]; } - + /** * Generate column drop test requirements * @private @@ -2901,8 +3243,8 @@ class TestRequirementAnalyzer extends EventEmitter { priority: TEST_PRIORITIES.CRITICAL, description: `Verify column ${columnName} dropped from ${tableName}`, target: `${tableName}.${columnName}`, - testCases: ['hasnt_column()'], - metadata: { destructive: true } + testCases: ["hasnt_column()"], + metadata: { destructive: true }, }, { type: TEST_TYPES.DATA, @@ -2910,13 +3252,13 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Comprehensive validation after ${columnName} drop from ${tableName}`, target: tableName, testCases: [ - '-- Verify table structure integrity', - '-- Check remaining columns are intact' - ] - } + "-- Verify table structure integrity", + "-- Check remaining columns are intact", + ], + }, ]; } - + /** * Generate column type change test requirements * @private @@ -2928,31 +3270,25 @@ class TestRequirementAnalyzer extends EventEmitter { priority, description: `Verify ${columnName} type change in ${tableName}`, target: `${tableName}.${columnName}`, - testCases: ['col_type_is()'] + testCases: ["col_type_is()"], }, { type: TEST_TYPES.DATA, priority: TEST_PRIORITIES.CRITICAL, description: `Verify data migration for ${columnName} in ${tableName}`, target: `${tableName}.${columnName}`, - testCases: [ - '-- Test data conversion', - '-- Verify no data loss' - ] + testCases: ["-- Test data conversion", "-- Verify no data loss"], }, { type: TEST_TYPES.DATA, priority: TEST_PRIORITIES.HIGH, description: `Comprehensive validation after ${columnName} type change`, target: tableName, - testCases: [ - '-- Check data integrity', - '-- Test edge cases' - ] - } + testCases: ["-- Check data integrity", "-- Test edge cases"], + }, ]; } - + /** * Generate NOT NULL constraint test requirements * @private @@ -2964,8 +3300,8 @@ class TestRequirementAnalyzer extends EventEmitter { priority, description: `Verify NOT NULL constraint on ${columnName} in ${tableName}`, target: `${tableName}.${columnName}`, - testCases: ['col_not_null()'], - metadata: { constraintType: 'NOT NULL' } + testCases: ["col_not_null()"], + metadata: { constraintType: "NOT NULL" }, }, { type: TEST_TYPES.DATA, @@ -2973,58 +3309,64 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Comprehensive validation after NOT NULL constraint`, target: tableName, testCases: [ - '-- Verify existing data compatibility', - '-- Test INSERT operations require value' - ] - } + "-- Verify existing data compatibility", + "-- Test INSERT operations require value", + ], + }, ]; } - + /** * Generate nullable constraint test requirements * @private */ _generateColumnNullableTests(operation, tableName, columnName, priority) { - return [{ - type: TEST_TYPES.CONSTRAINT, - priority, - description: `Verify nullable constraint removed from ${columnName} in ${tableName}`, - target: `${tableName}.${columnName}`, - testCases: ['col_is_null() - column allows nulls'], - metadata: { constraintRemoved: true } - }]; + return [ + { + type: TEST_TYPES.CONSTRAINT, + priority, + description: `Verify nullable constraint removed from ${columnName} in ${tableName}`, + target: `${tableName}.${columnName}`, + testCases: ["col_is_null() - column allows nulls"], + metadata: { constraintRemoved: true }, + }, + ]; } - + /** * Generate SET DEFAULT test requirements * @private */ _generateColumnSetDefaultTests(operation, tableName, columnName, priority) { - return [{ - type: TEST_TYPES.CONSTRAINT, - priority, - description: `Verify default value set for ${columnName} in ${tableName}`, - target: `${tableName}.${columnName}`, - testCases: ['col_has_default()', 'col_default_is()'], - metadata: { requiresInsertTest: true } - }]; + return [ + { + type: TEST_TYPES.CONSTRAINT, + priority, + description: `Verify default value set for ${columnName} in ${tableName}`, + target: `${tableName}.${columnName}`, + testCases: ["col_has_default()", "col_default_is()"], + metadata: { requiresInsertTest: true }, + }, + ]; } - + /** * Generate DROP DEFAULT test requirements * @private */ _generateColumnDropDefaultTests(operation, tableName, columnName, priority) { - return [{ - type: TEST_TYPES.CONSTRAINT, - priority, - description: `Verify default value removed from ${columnName} in ${tableName}`, - target: `${tableName}.${columnName}`, - testCases: ['col_hasnt_default()'], - metadata: { defaultRemoved: true } - }]; + return [ + { + type: TEST_TYPES.CONSTRAINT, + priority, + description: `Verify default value removed from ${columnName} in ${tableName}`, + target: `${tableName}.${columnName}`, + testCases: ["col_hasnt_default()"], + metadata: { defaultRemoved: true }, + }, + ]; } - + /** * Extract column name from SQL operation * @private @@ -3035,22 +3377,24 @@ class TestRequirementAnalyzer extends EventEmitter { _extractColumnName(sql, operation) { const upperSql = sql.toUpperCase(); const operationUpper = operation.toUpperCase(); - + const operationIndex = upperSql.indexOf(operationUpper); if (operationIndex === -1) { - return 'unknown'; + return "unknown"; } - - const afterOperation = sql.substring(operationIndex + operation.length).trim(); + + const afterOperation = sql + .substring(operationIndex + operation.length) + .trim(); const parts = afterOperation.split(/\s+/); - + if (parts.length > 0) { - return parts[0].replace(/[";,]/g, '').replace(/"/g, ''); + return parts[0].replace(/[";,]/g, "").replace(/"/g, ""); } - - return 'unknown'; + + return "unknown"; } - + /** * Extract constraint name from SQL operation * @private @@ -3059,10 +3403,12 @@ class TestRequirementAnalyzer extends EventEmitter { * @returns {string} Constraint name */ _extractConstraintName(sql, operation) { - const constraintMatch = sql.match(new RegExp(`${operation}\\s+([^\\s]+)`, 'i')); - return constraintMatch ? constraintMatch[1].replace(/"/g, '') : 'unknown'; + const constraintMatch = sql.match( + new RegExp(`${operation}\\s+([^\\s]+)`, "i"), + ); + return constraintMatch ? constraintMatch[1].replace(/"/g, "") : "unknown"; } - + /** * Identify constraint type from SQL * @private @@ -3071,25 +3417,25 @@ class TestRequirementAnalyzer extends EventEmitter { */ _identifyConstraintType(sql) { const upperSql = sql.toUpperCase(); - - if (upperSql.includes('PRIMARY KEY')) { - return 'PRIMARY_KEY'; - } else if (upperSql.includes('FOREIGN KEY')) { - return 'FOREIGN_KEY'; - } else if (upperSql.includes('UNIQUE')) { - return 'UNIQUE'; - } else if (upperSql.includes('CHECK')) { - return 'CHECK'; + + if (upperSql.includes("PRIMARY KEY")) { + return "PRIMARY_KEY"; + } else if (upperSql.includes("FOREIGN KEY")) { + return "FOREIGN_KEY"; + } else if (upperSql.includes("UNIQUE")) { + return "UNIQUE"; + } else if (upperSql.includes("CHECK")) { + return "CHECK"; } - - return 'UNKNOWN'; + + return "UNKNOWN"; } - + /** * Parse column constraints from SQL (alias for _parseColumnDefinition for test compatibility) * @private * @param {string} sql - SQL statement - * @param {string} columnName - Column name + * @param {string} columnName - Column name * @returns {Object|null} Parsed column information */ _parseColumnConstraints(sql, columnName) { @@ -3109,94 +3455,116 @@ class TestRequirementAnalyzer extends EventEmitter { foreignKeys: [], checkConstraints: [], uniqueConstraints: [], - indexes: [] + indexes: [], }; - + try { // Extract table definition inside parentheses - const tableDefMatch = sql.match(/CREATE TABLE\s+[^\s(]+\s*\(([\s\S]*?)\)(?:\s*;|\s*$)/i); + const tableDefMatch = sql.match( + /CREATE TABLE\s+[^\s(]+\s*\(([\s\S]*?)\)(?:\s*;|\s*$)/i, + ); if (!tableDefMatch) { return structure; } - + const tableDef = tableDefMatch[1]; - + // Parse column definitions and constraints const items = this._splitTableItems(tableDef); - + for (const item of items) { const cleanItem = item.trim(); - - if (cleanItem.toUpperCase().startsWith('PRIMARY KEY')) { + + if (cleanItem.toUpperCase().startsWith("PRIMARY KEY")) { // Parse primary key constraint const pkMatch = cleanItem.match(/PRIMARY KEY\s*\(\s*([^)]+)\s*\)/i); if (pkMatch) { - structure.primaryKeys = pkMatch[1].split(',').map(col => col.trim().replace(/"/g, '')); + structure.primaryKeys = pkMatch[1] + .split(",") + .map((col) => col.trim().replace(/"/g, "")); } - } else if (cleanItem.toUpperCase().startsWith('FOREIGN KEY')) { + } else if (cleanItem.toUpperCase().startsWith("FOREIGN KEY")) { // Parse foreign key constraint - const fkMatch = cleanItem.match(/FOREIGN KEY\s*\(\s*([^)]+)\s*\)\s*REFERENCES\s+([^\s(]+)\s*\(\s*([^)]+)\s*\)/i); + const fkMatch = cleanItem.match( + /FOREIGN KEY\s*\(\s*([^)]+)\s*\)\s*REFERENCES\s+([^\s(]+)\s*\(\s*([^)]+)\s*\)/i, + ); if (fkMatch) { structure.foreignKeys.push({ - column: fkMatch[1].trim().replace(/"/g, ''), - referencedTable: fkMatch[2].trim().replace(/"/g, ''), - referencedColumn: fkMatch[3].trim().replace(/"/g, '') + column: fkMatch[1].trim().replace(/"/g, ""), + referencedTable: fkMatch[2].trim().replace(/"/g, ""), + referencedColumn: fkMatch[3].trim().replace(/"/g, ""), }); } - } else if (cleanItem.toUpperCase().startsWith('UNIQUE')) { + } else if (cleanItem.toUpperCase().startsWith("UNIQUE")) { // Parse unique constraint - const uniqueMatch = cleanItem.match(/UNIQUE\s*(?:\(\s*([^)]+)\s*\))?/i); + const uniqueMatch = cleanItem.match( + /UNIQUE\s*(?:\(\s*([^)]+)\s*\))?/i, + ); if (uniqueMatch) { structure.uniqueConstraints.push({ - name: `unique_${uniqueMatch[1] || 'constraint'}`, - columns: uniqueMatch[1] ? uniqueMatch[1].split(',').map(c => c.trim()) : [] + name: `unique_${uniqueMatch[1] || "constraint"}`, + columns: uniqueMatch[1] + ? uniqueMatch[1].split(",").map((c) => c.trim()) + : [], }); } - } else if (cleanItem.toUpperCase().startsWith('CHECK')) { + } else if (cleanItem.toUpperCase().startsWith("CHECK")) { // Parse check constraint const checkMatch = cleanItem.match(/CHECK\s*\(([^)]+)\)/i); if (checkMatch) { structure.checkConstraints.push({ name: `check_constraint_${Date.now()}`, - expression: checkMatch[1] + expression: checkMatch[1], }); } - } else if (cleanItem.toUpperCase().includes('CONSTRAINT')) { + } else if (cleanItem.toUpperCase().includes("CONSTRAINT")) { // Parse named constraints - const constraintMatch = cleanItem.match(/CONSTRAINT\s+([^\s]+)\s+(.*)/i); + const constraintMatch = cleanItem.match( + /CONSTRAINT\s+([^\s]+)\s+(.*)/i, + ); if (constraintMatch) { const constraintName = constraintMatch[1]; const constraintDef = constraintMatch[2]; - - if (constraintDef.toUpperCase().startsWith('PRIMARY KEY')) { - const pkMatch = constraintDef.match(/PRIMARY KEY\s*\(\s*([^)]+)\s*\)/i); + + if (constraintDef.toUpperCase().startsWith("PRIMARY KEY")) { + const pkMatch = constraintDef.match( + /PRIMARY KEY\s*\(\s*([^)]+)\s*\)/i, + ); if (pkMatch) { - structure.primaryKeys = pkMatch[1].split(',').map(col => col.trim().replace(/"/g, '')); + structure.primaryKeys = pkMatch[1] + .split(",") + .map((col) => col.trim().replace(/"/g, "")); } - } else if (constraintDef.toUpperCase().startsWith('FOREIGN KEY')) { - const fkMatch = constraintDef.match(/FOREIGN KEY\s*\(\s*([^)]+)\s*\)\s*REFERENCES\s+([^\s(]+)\s*\(\s*([^)]+)\s*\)/i); + } else if (constraintDef.toUpperCase().startsWith("FOREIGN KEY")) { + const fkMatch = constraintDef.match( + /FOREIGN KEY\s*\(\s*([^)]+)\s*\)\s*REFERENCES\s+([^\s(]+)\s*\(\s*([^)]+)\s*\)/i, + ); if (fkMatch) { structure.foreignKeys.push({ name: constraintName, - column: fkMatch[1].trim().replace(/"/g, ''), - referencedTable: fkMatch[2].trim().replace(/"/g, ''), - referencedColumn: fkMatch[3].trim().replace(/"/g, '') + column: fkMatch[1].trim().replace(/"/g, ""), + referencedTable: fkMatch[2].trim().replace(/"/g, ""), + referencedColumn: fkMatch[3].trim().replace(/"/g, ""), }); } - } else if (constraintDef.toUpperCase().startsWith('UNIQUE')) { - const uniqueMatch = constraintDef.match(/UNIQUE\s*\(\s*([^)]+)\s*\)/i); + } else if (constraintDef.toUpperCase().startsWith("UNIQUE")) { + const uniqueMatch = constraintDef.match( + /UNIQUE\s*\(\s*([^)]+)\s*\)/i, + ); if (uniqueMatch) { structure.uniqueConstraints.push({ name: constraintName, - columns: uniqueMatch[1].split(',').map(c => c.trim().replace(/"/g, '')) + columns: uniqueMatch[1] + .split(",") + .map((c) => c.trim().replace(/"/g, "")), }); } - } else if (constraintDef.toUpperCase().startsWith('CHECK')) { + } else if (constraintDef.toUpperCase().startsWith("CHECK")) { const checkMatch = constraintDef.match(/CHECK\s*\(([^)]+)\)/i); if (checkMatch) { structure.checkConstraints.push({ name: constraintName, - expression: checkMatch[1] + expression: checkMatch[1], }); } } @@ -3209,15 +3577,14 @@ class TestRequirementAnalyzer extends EventEmitter { } } } - } catch (error) { // If parsing fails, return basic structure - console.warn('Failed to parse table structure:', error.message); + console.warn("Failed to parse table structure:", error.message); } - + return structure; } - + /** * Parse table alterations from ALTER TABLE SQL * @private @@ -3233,104 +3600,115 @@ class TestRequirementAnalyzer extends EventEmitter { renamedColumns: [], renamedTo: null, addedConstraints: [], - droppedConstraints: [] + droppedConstraints: [], }; - + try { const upperSql = sql.toUpperCase(); - + // Handle ADD COLUMN - const addColumnRegex = /ADD\s+(?:COLUMN\s+)?([^\s,;]+)\s+([^,;]*?)(?=\s*(?:,|;|$|ADD|DROP|ALTER))/gi; + const addColumnRegex = + /ADD\s+(?:COLUMN\s+)?([^\s,;]+)\s+([^,;]*?)(?=\s*(?:,|;|$|ADD|DROP|ALTER))/gi; let addMatch; while ((addMatch = addColumnRegex.exec(upperSql)) !== null) { - const columnName = addMatch[1].replace(/"/g, ''); + const columnName = addMatch[1].replace(/"/g, ""); const columnDef = addMatch[2].trim(); - alterations.addedColumns.push(this._parseColumnDefinition(`${columnName} ${columnDef}`)); + alterations.addedColumns.push( + this._parseColumnDefinition(`${columnName} ${columnDef}`), + ); } - + // Handle DROP COLUMN const dropColumnRegex = /DROP\s+(?:COLUMN\s+)?([^\s,;]+)/gi; let dropMatch; while ((dropMatch = dropColumnRegex.exec(upperSql)) !== null) { - alterations.droppedColumns.push(dropMatch[1].replace(/"/g, '')); + alterations.droppedColumns.push(dropMatch[1].replace(/"/g, "")); } - + // Handle ALTER COLUMN TYPE - const alterTypeRegex = /ALTER\s+(?:COLUMN\s+)?([^\s]+)\s+(?:SET\s+DATA\s+)?TYPE\s+([^\s,;]+)/gi; + const alterTypeRegex = + /ALTER\s+(?:COLUMN\s+)?([^\s]+)\s+(?:SET\s+DATA\s+)?TYPE\s+([^\s,;]+)/gi; let alterTypeMatch; while ((alterTypeMatch = alterTypeRegex.exec(upperSql)) !== null) { alterations.alteredColumns.push({ - name: alterTypeMatch[1].replace(/"/g, ''), + name: alterTypeMatch[1].replace(/"/g, ""), newType: alterTypeMatch[2], - oldType: 'unknown' // Would need additional context to determine old type + oldType: "unknown", // Would need additional context to determine old type }); } - + // Handle RENAME TABLE const renameTableMatch = upperSql.match(/RENAME\s+TO\s+([^\s;]+)/i); if (renameTableMatch) { - alterations.renamedTo = renameTableMatch[1].replace(/"/g, ''); + alterations.renamedTo = renameTableMatch[1].replace(/"/g, ""); } - + // Handle RENAME COLUMN - const renameColumnRegex = /RENAME\s+(?:COLUMN\s+)?([^\s]+)\s+TO\s+([^\s,;]+)/gi; + const renameColumnRegex = + /RENAME\s+(?:COLUMN\s+)?([^\s]+)\s+TO\s+([^\s,;]+)/gi; let renameColMatch; while ((renameColMatch = renameColumnRegex.exec(upperSql)) !== null) { alterations.renamedColumns.push({ - oldName: renameColMatch[1].replace(/"/g, ''), - newName: renameColMatch[2].replace(/"/g, ''), - type: 'unknown' // Would need additional context to determine type + oldName: renameColMatch[1].replace(/"/g, ""), + newName: renameColMatch[2].replace(/"/g, ""), + type: "unknown", // Would need additional context to determine type }); } - + // Handle ADD CONSTRAINT - const addConstraintRegex = /ADD\s+(?:CONSTRAINT\s+([^\s]+)\s+)?(PRIMARY\s+KEY|FOREIGN\s+KEY|UNIQUE|CHECK)\s*([^,;]*?)(?=\s*(?:,|;|$|ADD|DROP|ALTER))/gi; + const addConstraintRegex = + /ADD\s+(?:CONSTRAINT\s+([^\s]+)\s+)?(PRIMARY\s+KEY|FOREIGN\s+KEY|UNIQUE|CHECK)\s*([^,;]*?)(?=\s*(?:,|;|$|ADD|DROP|ALTER))/gi; let constraintMatch; while ((constraintMatch = addConstraintRegex.exec(upperSql)) !== null) { const constraintName = constraintMatch[1] || `auto_${Date.now()}`; - const constraintType = constraintMatch[2].replace(/\s+/g, ' '); + const constraintType = constraintMatch[2].replace(/\s+/g, " "); const constraintDef = constraintMatch[3].trim(); - + const constraint = { - name: constraintName.replace(/"/g, ''), + name: constraintName.replace(/"/g, ""), type: constraintType, - definition: constraintDef + definition: constraintDef, }; - + // Parse specific constraint details - if (constraintType.includes('FOREIGN KEY')) { - const fkMatch = constraintDef.match(/\(\s*([^)]+)\s*\)\s*REFERENCES\s+([^\s(]+)\s*\(\s*([^)]+)\s*\)/i); + if (constraintType.includes("FOREIGN KEY")) { + const fkMatch = constraintDef.match( + /\(\s*([^)]+)\s*\)\s*REFERENCES\s+([^\s(]+)\s*\(\s*([^)]+)\s*\)/i, + ); if (fkMatch) { - constraint.column = fkMatch[1].trim().replace(/"/g, ''); - constraint.referencedTable = fkMatch[2].trim().replace(/"/g, ''); - constraint.referencedColumn = fkMatch[3].trim().replace(/"/g, ''); + constraint.column = fkMatch[1].trim().replace(/"/g, ""); + constraint.referencedTable = fkMatch[2].trim().replace(/"/g, ""); + constraint.referencedColumn = fkMatch[3].trim().replace(/"/g, ""); } - } else if (constraintType.includes('PRIMARY KEY')) { + } else if (constraintType.includes("PRIMARY KEY")) { const pkMatch = constraintDef.match(/\(\s*([^)]+)\s*\)/i); if (pkMatch) { - constraint.columns = pkMatch[1].split(',').map(col => col.trim().replace(/"/g, '')); + constraint.columns = pkMatch[1] + .split(",") + .map((col) => col.trim().replace(/"/g, "")); } } - + alterations.addedConstraints.push(constraint); } - + // Handle DROP CONSTRAINT const dropConstraintRegex = /DROP\s+CONSTRAINT\s+([^\s,;]+)/gi; let dropConstraintMatch; - while ((dropConstraintMatch = dropConstraintRegex.exec(upperSql)) !== null) { + while ( + (dropConstraintMatch = dropConstraintRegex.exec(upperSql)) !== null + ) { alterations.droppedConstraints.push({ - name: dropConstraintMatch[1].replace(/"/g, '') + name: dropConstraintMatch[1].replace(/"/g, ""), }); } - } catch (error) { - console.warn('Failed to parse table alterations:', error.message); + console.warn("Failed to parse table alterations:", error.message); } - + return alterations; } - + /** * Parse individual column definition * @private @@ -3341,44 +3719,52 @@ class TestRequirementAnalyzer extends EventEmitter { if (!columnDef || !columnDef.trim()) { return null; } - + try { const parts = columnDef.trim().split(/\s+/); if (parts.length < 2) { return null; } - + // Handle different SQL formats let nameIndex = null; let typeIndex = 1; - + if (columnName) { // If column name is provided separately, find it in the SQL and get the type after it const upperSql = columnDef.toUpperCase(); const upperColumnName = columnName.toUpperCase(); const columnIndex = upperSql.indexOf(upperColumnName); - + if (columnIndex !== -1) { // Find the position of the column name in the parts array const beforeColumn = columnDef.substring(0, columnIndex); - const beforeParts = beforeColumn.trim() ? beforeColumn.trim().split(/\s+/) : []; + const beforeParts = beforeColumn.trim() + ? beforeColumn.trim().split(/\s+/) + : []; nameIndex = beforeParts.length; typeIndex = nameIndex + 1; } else { // Column name not found in SQL, try to infer position - if (parts[0].toUpperCase() === 'ADD' && parts[1].toUpperCase() === 'COLUMN') { + if ( + parts[0].toUpperCase() === "ADD" && + parts[1].toUpperCase() === "COLUMN" + ) { nameIndex = 2; typeIndex = 3; - } else if (parts[0].toUpperCase() === 'ADD') { + } else if (parts[0].toUpperCase() === "ADD") { nameIndex = 1; typeIndex = 2; } } - } else if (parts[0].toUpperCase() === 'ADD' && parts[1].toUpperCase() === 'COLUMN') { + } else if ( + parts[0].toUpperCase() === "ADD" && + parts[1].toUpperCase() === "COLUMN" + ) { // Handle "ADD COLUMN name type" format nameIndex = 2; typeIndex = 3; - } else if (parts[0].toUpperCase() === 'ADD') { + } else if (parts[0].toUpperCase() === "ADD") { // Handle "ADD name type" format nameIndex = 1; typeIndex = 2; @@ -3387,48 +3773,62 @@ class TestRequirementAnalyzer extends EventEmitter { nameIndex = 0; typeIndex = 1; } - + const column = { - name: columnName || (nameIndex !== null && nameIndex < parts.length ? parts[nameIndex].replace(/"/g, '') : 'unknown'), + name: + columnName || + (nameIndex !== null && nameIndex < parts.length + ? parts[nameIndex].replace(/"/g, "") + : "unknown"), type: this._parseColumnType(parts, typeIndex), notNull: false, hasDefault: false, defaultValue: null, foreignKey: null, - isUnique: false + isUnique: false, }; - + const defString = columnDef.toUpperCase(); - + // Check for NOT NULL - column.notNull = defString.includes('NOT NULL'); - + column.notNull = defString.includes("NOT NULL"); + // Check for UNIQUE - column.isUnique = defString.includes('UNIQUE'); - + column.isUnique = defString.includes("UNIQUE"); + // Check for DEFAULT - more comprehensive pattern, preserve original case - const defaultMatch = columnDef.match(/DEFAULT\s+('(?:[^'\\]|\\.)*'|"(?:[^"\\]|\\.)*"|\d+\.?\d*|[a-zA-Z_][a-zA-Z0-9_]*(?:\([^)]*\))?)/i); + const defaultMatch = columnDef.match( + /DEFAULT\s+('(?:[^'\\]|\\.)*'|"(?:[^"\\]|\\.)*"|\d+\.?\d*|[a-zA-Z_][a-zA-Z0-9_]*(?:\([^)]*\))?)/i, + ); if (defaultMatch) { column.hasDefault = true; column.defaultValue = defaultMatch[1]; } - + // Check for inline foreign key reference - const referencesMatch = defString.match(/REFERENCES\s+([^\s(]+)(?:\s*\(\s*([^)]+)\s*\))?/i); + const referencesMatch = defString.match( + /REFERENCES\s+([^\s(]+)(?:\s*\(\s*([^)]+)\s*\))?/i, + ); if (referencesMatch) { column.foreignKey = { - referencedTable: referencesMatch[1].replace(/"/g, ''), - referencedColumn: referencesMatch[2] ? referencesMatch[2].replace(/"/g, '') : 'id' + referencedTable: referencesMatch[1].replace(/"/g, ""), + referencedColumn: referencesMatch[2] + ? referencesMatch[2].replace(/"/g, "") + : "id", }; } - + return column; } catch (error) { - console.warn('Failed to parse column definition:', columnDef, error.message); + console.warn( + "Failed to parse column definition:", + columnDef, + error.message, + ); return null; } } - + /** * Parse column type including size specifications * @private @@ -3438,27 +3838,30 @@ class TestRequirementAnalyzer extends EventEmitter { */ _parseColumnType(parts, typeIndex) { if (!parts || typeIndex >= parts.length) { - return 'UNKNOWN'; + return "UNKNOWN"; } - + let type = parts[typeIndex].toUpperCase(); - + // Check if next part contains size specification - if (typeIndex + 1 < parts.length && parts[typeIndex + 1].match(/^\(\d+(?:,\d+)?\)$/)) { + if ( + typeIndex + 1 < parts.length && + parts[typeIndex + 1].match(/^\(\d+(?:,\d+)?\)$/) + ) { type += parts[typeIndex + 1]; - } else if (type.includes('(')) { + } else if (type.includes("(")) { // Type already includes size specification // Check if it spans multiple parts due to spacing let i = typeIndex + 1; - while (i < parts.length && !type.includes(')')) { + while (i < parts.length && !type.includes(")")) { type += parts[i]; i++; } } - + return type; } - + /** * Split table items (columns and constraints) while respecting parentheses * @private @@ -3467,17 +3870,17 @@ class TestRequirementAnalyzer extends EventEmitter { */ _splitTableItems(tableDef) { const items = []; - let current = ''; + let current = ""; let parenDepth = 0; let inQuotes = false; let quoteChar = null; - + for (let i = 0; i < tableDef.length; i++) { const char = tableDef[i]; const prevChar = i > 0 ? tableDef[i - 1] : null; - + // Handle quotes - if ((char === '"' || char === "'") && prevChar !== '\\') { + if ((char === '"' || char === "'") && prevChar !== "\\") { if (!inQuotes) { inQuotes = true; quoteChar = char; @@ -3486,36 +3889,41 @@ class TestRequirementAnalyzer extends EventEmitter { quoteChar = null; } } - + if (!inQuotes) { // Track parentheses depth - if (char === '(') { + if (char === "(") { parenDepth++; - } else if (char === ')') { + } else if (char === ")") { parenDepth--; - } else if (char === ',' && parenDepth === 0) { + } else if (char === "," && parenDepth === 0) { // Split on comma only at top level if (current.trim()) { items.push(current.trim()); } - current = ''; + current = ""; continue; } } - + current += char; } - + // Add the last item if (current.trim()) { items.push(current.trim()); } - + return items; } _comparePriority(priority1, priority2) { - const priorities = [TEST_PRIORITIES.LOW, TEST_PRIORITIES.MEDIUM, TEST_PRIORITIES.HIGH, TEST_PRIORITIES.CRITICAL]; + const priorities = [ + TEST_PRIORITIES.LOW, + TEST_PRIORITIES.MEDIUM, + TEST_PRIORITIES.HIGH, + TEST_PRIORITIES.CRITICAL, + ]; return priorities.indexOf(priority2) - priorities.indexOf(priority1); // Reverse order (highest first) } @@ -3529,7 +3937,7 @@ class TestRequirementAnalyzer extends EventEmitter { */ _generateTriggerCreationTests(operation, target, priority) { const requirements = []; - const sql = operation.sql || ''; + const sql = operation.sql || ""; const triggerDetails = this._parseTriggerDetails(sql); // Basic trigger existence test @@ -3539,11 +3947,11 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Verify trigger ${target} exists with correct properties`, target, testCases: [ - 'has_trigger() - trigger exists', - 'trigger_is() - trigger function validation', - 'is_trigger_on() - verify correct table', - 'trigger_fires_on() - verify trigger events', - 'trigger_is_for() - verify trigger level (ROW/STATEMENT)' + "has_trigger() - trigger exists", + "trigger_is() - trigger function validation", + "is_trigger_on() - verify correct table", + "trigger_fires_on() - verify trigger events", + "trigger_is_for() - verify trigger level (ROW/STATEMENT)", ], metadata: { tableName: triggerDetails.tableName, @@ -3551,8 +3959,8 @@ class TestRequirementAnalyzer extends EventEmitter { timing: triggerDetails.timing, events: triggerDetails.events, level: triggerDetails.level, - condition: triggerDetails.condition - } + condition: triggerDetails.condition, + }, }); // Trigger function validation test @@ -3563,17 +3971,17 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Verify trigger function ${triggerDetails.functionName} behavior`, target: triggerDetails.functionName, testCases: [ - 'has_function() - function exists', - 'function_returns() - returns TRIGGER type', - 'Test function handles TG_OP correctly', - 'Test function handles OLD/NEW records', - 'Verify function error handling' + "has_function() - function exists", + "function_returns() - returns TRIGGER type", + "Test function handles TG_OP correctly", + "Test function handles OLD/NEW records", + "Verify function error handling", ], metadata: { isTriggerFunction: true, associatedTrigger: target, - returnType: 'trigger' - } + returnType: "trigger", + }, }); } @@ -3584,17 +3992,17 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Test trigger ${target} firing conditions and behavior`, target, testCases: [ - 'Test trigger fires on correct operations', - 'Test trigger timing (BEFORE/AFTER/INSTEAD OF)', - 'Test data modifications by trigger', - 'Test trigger with different data scenarios', - 'Test cascade effects of trigger actions' + "Test trigger fires on correct operations", + "Test trigger timing (BEFORE/AFTER/INSTEAD OF)", + "Test data modifications by trigger", + "Test trigger with different data scenarios", + "Test cascade effects of trigger actions", ], metadata: { behaviorTests: this._generateTriggerBehaviorTests(triggerDetails), requiresDataSetup: true, - testComplexity: 'high' - } + testComplexity: "high", + }, }); // Constraint trigger specific tests @@ -3605,36 +4013,39 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Test constraint trigger ${target} enforcement`, target, testCases: [ - 'Test constraint enforcement behavior', - 'Test deferred constraint checking', - 'Test constraint violation handling', - 'Test transaction rollback on constraint failure' + "Test constraint enforcement behavior", + "Test deferred constraint checking", + "Test constraint violation handling", + "Test transaction rollback on constraint failure", ], metadata: { isConstraintTrigger: true, deferrable: triggerDetails.deferrable, - initiallyDeferred: triggerDetails.initiallyDeferred - } + initiallyDeferred: triggerDetails.initiallyDeferred, + }, }); } // Performance tests for potentially expensive triggers - if (this.options.requirePerformanceTests && this._isTriggerPerformanceSensitive(triggerDetails)) { + if ( + this.options.requirePerformanceTests && + this._isTriggerPerformanceSensitive(triggerDetails) + ) { requirements.push({ type: TEST_TYPES.INDEX, priority: TEST_PRIORITIES.MEDIUM, description: `Test performance impact of trigger ${target}`, target, testCases: [ - 'Measure operation performance with/without trigger', - 'Test trigger performance with large data sets', - 'Verify trigger doesn\'t create deadlocks', - 'Test concurrent operation performance' + "Measure operation performance with/without trigger", + "Test trigger performance with large data sets", + "Verify trigger doesn't create deadlocks", + "Test concurrent operation performance", ], metadata: { performanceSensitive: true, - requiresBenchmarking: true - } + requiresBenchmarking: true, + }, }); } @@ -3651,7 +4062,7 @@ class TestRequirementAnalyzer extends EventEmitter { */ _generateTriggerAlterationTests(operation, target, priority) { const requirements = []; - const sql = operation.sql || ''; + const sql = operation.sql || ""; // Basic trigger property verification requirements.push({ @@ -3660,36 +4071,39 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Verify trigger ${target} alterations applied correctly`, target, testCases: [ - 'has_trigger() - trigger still exists', - 'trigger_is() - verify updated properties', - 'Test altered trigger behavior', - 'Verify backward compatibility where applicable' + "has_trigger() - trigger still exists", + "trigger_is() - verify updated properties", + "Test altered trigger behavior", + "Verify backward compatibility where applicable", ], metadata: { alterationType: this._parseAlterationType(sql), - requiresRegression: true - } + requiresRegression: true, + }, }); // If enabling/disabling trigger - if (sql.toUpperCase().includes('ENABLE') || sql.toUpperCase().includes('DISABLE')) { - const isEnabled = sql.toUpperCase().includes('ENABLE'); + if ( + sql.toUpperCase().includes("ENABLE") || + sql.toUpperCase().includes("DISABLE") + ) { + const isEnabled = sql.toUpperCase().includes("ENABLE"); requirements.push({ type: TEST_TYPES.DATA, priority: TEST_PRIORITIES.HIGH, - description: `Test trigger ${target} ${isEnabled ? 'enabled' : 'disabled'} state`, + description: `Test trigger ${target} ${isEnabled ? "enabled" : "disabled"} state`, target, testCases: [ - isEnabled ? - 'Test trigger fires after being enabled' : - 'Test trigger does not fire when disabled', - 'Verify state change is persistent', - 'Test operations that should/should not trigger' + isEnabled + ? "Test trigger fires after being enabled" + : "Test trigger does not fire when disabled", + "Verify state change is persistent", + "Test operations that should/should not trigger", ], metadata: { - stateChange: isEnabled ? 'enabled' : 'disabled', - requiresStateTesting: true - } + stateChange: isEnabled ? "enabled" : "disabled", + requiresStateTesting: true, + }, }); } @@ -3714,15 +4128,15 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Verify trigger ${target} is properly dropped`, target, testCases: [ - 'hasnt_trigger() - trigger no longer exists', - 'Test operations no longer fire the trigger', - 'Verify dependent objects are handled correctly', - 'Test that trigger function still exists (if shared)' + "hasnt_trigger() - trigger no longer exists", + "Test operations no longer fire the trigger", + "Verify dependent objects are handled correctly", + "Test that trigger function still exists (if shared)", ], metadata: { destructiveOperation: true, - requiresCleanupVerification: true - } + requiresCleanupVerification: true, + }, }); // Behavioral verification that trigger is no longer active @@ -3732,15 +4146,15 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Test that operations are not affected by dropped trigger ${target}`, target, testCases: [ - 'Test INSERT operations without trigger effects', - 'Test UPDATE operations without trigger effects', - 'Test DELETE operations without trigger effects', - 'Verify performance improvement (if applicable)' + "Test INSERT operations without trigger effects", + "Test UPDATE operations without trigger effects", + "Test DELETE operations without trigger effects", + "Verify performance improvement (if applicable)", ], metadata: { behaviorVerification: true, - operationsTested: ['INSERT', 'UPDATE', 'DELETE'] - } + operationsTested: ["INSERT", "UPDATE", "DELETE"], + }, }); return requirements; @@ -3756,7 +4170,7 @@ class TestRequirementAnalyzer extends EventEmitter { */ _generateEventTriggerTests(operation, target, priority) { const requirements = []; - const sql = operation.sql || ''; + const sql = operation.sql || ""; const eventDetails = this._parseEventTriggerDetails(sql); // Event trigger existence and properties @@ -3766,18 +4180,18 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Verify event trigger ${target} exists and fires correctly`, target, testCases: [ - 'has_trigger() - event trigger exists', - 'Test event trigger fires on DDL commands', - 'Test event trigger function receives correct event data', - 'Verify event trigger timing (before/after)', - 'Test event trigger filter conditions' + "has_trigger() - event trigger exists", + "Test event trigger fires on DDL commands", + "Test event trigger function receives correct event data", + "Verify event trigger timing (before/after)", + "Test event trigger filter conditions", ], metadata: { isEventTrigger: true, events: eventDetails.events, filterConditions: eventDetails.filterConditions, - functionName: eventDetails.functionName - } + functionName: eventDetails.functionName, + }, }); // Event trigger function tests @@ -3788,17 +4202,17 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Test event trigger function ${eventDetails.functionName}`, target: eventDetails.functionName, testCases: [ - 'has_function() - function exists', - 'function_returns() - returns event_trigger type', - 'Test function handles TG_EVENT correctly', - 'Test function accesses pg_event_trigger_ddl_commands()', - 'Verify function error handling doesn\'t block DDL' + "has_function() - function exists", + "function_returns() - returns event_trigger type", + "Test function handles TG_EVENT correctly", + "Test function accesses pg_event_trigger_ddl_commands()", + "Verify function error handling doesn't block DDL", ], metadata: { isEventTriggerFunction: true, associatedEventTrigger: target, - returnType: 'event_trigger' - } + returnType: "event_trigger", + }, }); } @@ -3809,16 +4223,19 @@ class TestRequirementAnalyzer extends EventEmitter { description: `Test DDL operations with event trigger ${target}`, target, testCases: [ - 'Test CREATE operations trigger the event', - 'Test ALTER operations trigger the event', - 'Test DROP operations trigger the event', - 'Test event trigger doesn\'t break normal DDL', - 'Test event trigger handles DDL failures gracefully' + "Test CREATE operations trigger the event", + "Test ALTER operations trigger the event", + "Test DROP operations trigger the event", + "Test event trigger doesn't break normal DDL", + "Test event trigger handles DDL failures gracefully", ], metadata: { - ddlOperationsTested: eventDetails.events || ['ddl_command_start', 'ddl_command_end'], - requiresDDLTesting: true - } + ddlOperationsTested: eventDetails.events || [ + "ddl_command_start", + "ddl_command_end", + ], + requiresDDLTesting: true, + }, }); return requirements; @@ -3832,39 +4249,48 @@ class TestRequirementAnalyzer extends EventEmitter { */ _parseTriggerDetails(sql) { const details = {}; - + // Extract table name const tableMatch = sql.match(/ON\s+([^\s]+)/i); details.tableName = tableMatch ? tableMatch[1] : null; - + // Extract function name - const functionMatch = sql.match(/EXECUTE\s+(?:PROCEDURE\s+|FUNCTION\s+)?([^\s(]+)/i); + const functionMatch = sql.match( + /EXECUTE\s+(?:PROCEDURE\s+|FUNCTION\s+)?([^\s(]+)/i, + ); details.functionName = functionMatch ? functionMatch[1] : null; - + // Extract timing (BEFORE, AFTER, INSTEAD OF) - if (sql.toUpperCase().includes('BEFORE')) details.timing = ['BEFORE']; - else if (sql.toUpperCase().includes('AFTER')) details.timing = ['AFTER']; - else if (sql.toUpperCase().includes('INSTEAD OF')) details.timing = ['INSTEAD OF']; - + if (sql.toUpperCase().includes("BEFORE")) details.timing = ["BEFORE"]; + else if (sql.toUpperCase().includes("AFTER")) details.timing = ["AFTER"]; + else if (sql.toUpperCase().includes("INSTEAD OF")) + details.timing = ["INSTEAD OF"]; + // Extract events details.events = []; - if (sql.toUpperCase().includes('INSERT')) details.events.push('INSERT'); - if (sql.toUpperCase().includes('UPDATE')) details.events.push('UPDATE'); - if (sql.toUpperCase().includes('DELETE')) details.events.push('DELETE'); - if (sql.toUpperCase().includes('TRUNCATE')) details.events.push('TRUNCATE'); - + if (sql.toUpperCase().includes("INSERT")) details.events.push("INSERT"); + if (sql.toUpperCase().includes("UPDATE")) details.events.push("UPDATE"); + if (sql.toUpperCase().includes("DELETE")) details.events.push("DELETE"); + if (sql.toUpperCase().includes("TRUNCATE")) details.events.push("TRUNCATE"); + // Extract level - details.level = sql.toUpperCase().includes('FOR EACH ROW') ? 'ROW' : 'STATEMENT'; - + details.level = sql.toUpperCase().includes("FOR EACH ROW") + ? "ROW" + : "STATEMENT"; + // Extract condition const conditionMatch = sql.match(/WHEN\s*\(([^)]+)\)/i); details.condition = conditionMatch ? conditionMatch[1] : null; - + // Check if constraint trigger - details.isConstraintTrigger = sql.toUpperCase().includes('CONSTRAINT TRIGGER'); - details.deferrable = sql.toUpperCase().includes('DEFERRABLE'); - details.initiallyDeferred = sql.toUpperCase().includes('INITIALLY DEFERRED'); - + details.isConstraintTrigger = sql + .toUpperCase() + .includes("CONSTRAINT TRIGGER"); + details.deferrable = sql.toUpperCase().includes("DEFERRABLE"); + details.initiallyDeferred = sql + .toUpperCase() + .includes("INITIALLY DEFERRED"); + return details; } @@ -3876,25 +4302,29 @@ class TestRequirementAnalyzer extends EventEmitter { */ _parseEventTriggerDetails(sql) { const details = {}; - + // Extract function name - const functionMatch = sql.match(/EXECUTE\s+(?:PROCEDURE\s+|FUNCTION\s+)?([^\s(]+)/i); + const functionMatch = sql.match( + /EXECUTE\s+(?:PROCEDURE\s+|FUNCTION\s+)?([^\s(]+)/i, + ); details.functionName = functionMatch ? functionMatch[1] : null; - + // Extract events const eventMatch = sql.match(/ON\s+([^\s]+)/i); if (eventMatch) { details.events = [eventMatch[1].toLowerCase()]; } else { - details.events = ['ddl_command_start']; + details.events = ["ddl_command_start"]; } - + // Extract filter conditions const filterMatch = sql.match(/WHEN\s+TAG\s+IN\s*\(([^)]+)\)/i); if (filterMatch) { - details.filterConditions = filterMatch[1].split(',').map(tag => tag.trim().replace(/'/g, '')); + details.filterConditions = filterMatch[1] + .split(",") + .map((tag) => tag.trim().replace(/'/g, "")); } - + return details; } @@ -3906,48 +4336,49 @@ class TestRequirementAnalyzer extends EventEmitter { */ _generateTriggerBehaviorTests(triggerDetails) { const scenarios = []; - + // Generate scenarios based on events - (triggerDetails.events || []).forEach(event => { + (triggerDetails.events || []).forEach((event) => { scenarios.push({ scenario: `Test ${event} operation fires trigger`, operation: event, - expectedResult: 'Trigger function executes and modifies data as expected' + expectedResult: + "Trigger function executes and modifies data as expected", }); - + if (triggerDetails.condition) { scenarios.push({ scenario: `Test ${event} with condition evaluation`, operation: event, - expectedResult: `Trigger fires only when condition (${triggerDetails.condition}) is true` + expectedResult: `Trigger fires only when condition (${triggerDetails.condition}) is true`, }); } }); - + // Add timing-specific scenarios - if (triggerDetails.timing && triggerDetails.timing.includes('BEFORE')) { + if (triggerDetails.timing && triggerDetails.timing.includes("BEFORE")) { scenarios.push({ - scenario: 'Test BEFORE trigger can prevent operation', - operation: 'INSERT/UPDATE/DELETE', - expectedResult: 'Operation is prevented when trigger returns NULL' + scenario: "Test BEFORE trigger can prevent operation", + operation: "INSERT/UPDATE/DELETE", + expectedResult: "Operation is prevented when trigger returns NULL", }); } - + // Add level-specific scenarios - if (triggerDetails.level === 'ROW') { + if (triggerDetails.level === "ROW") { scenarios.push({ - scenario: 'Test trigger fires once per affected row', - operation: 'Multi-row operation', - expectedResult: 'Trigger executes once for each row affected' + scenario: "Test trigger fires once per affected row", + operation: "Multi-row operation", + expectedResult: "Trigger executes once for each row affected", }); - } else if (triggerDetails.level === 'STATEMENT') { + } else if (triggerDetails.level === "STATEMENT") { scenarios.push({ - scenario: 'Test trigger fires once per statement', - operation: 'Multi-row operation', - expectedResult: 'Trigger executes once regardless of rows affected' + scenario: "Test trigger fires once per statement", + operation: "Multi-row operation", + expectedResult: "Trigger executes once regardless of rows affected", }); } - + return scenarios; } @@ -3959,10 +4390,10 @@ class TestRequirementAnalyzer extends EventEmitter { */ _parseAlterationType(sql) { const upperSql = sql.toUpperCase(); - if (upperSql.includes('ENABLE')) return 'ENABLE'; - if (upperSql.includes('DISABLE')) return 'DISABLE'; - if (upperSql.includes('RENAME')) return 'RENAME'; - return 'MODIFY'; + if (upperSql.includes("ENABLE")) return "ENABLE"; + if (upperSql.includes("DISABLE")) return "DISABLE"; + if (upperSql.includes("RENAME")) return "RENAME"; + return "MODIFY"; } /** @@ -3973,20 +4404,24 @@ class TestRequirementAnalyzer extends EventEmitter { */ _isTriggerPerformanceSensitive(triggerDetails) { // Row-level triggers on high-frequency operations are performance sensitive - if (triggerDetails.level === 'ROW' && - triggerDetails.events && - (triggerDetails.events.includes('INSERT') || - triggerDetails.events.includes('UPDATE'))) { + if ( + triggerDetails.level === "ROW" && + triggerDetails.events && + (triggerDetails.events.includes("INSERT") || + triggerDetails.events.includes("UPDATE")) + ) { return true; } - + // Complex trigger functions may be performance sensitive - if (triggerDetails.functionName && - (triggerDetails.functionName.includes('complex') || - triggerDetails.functionName.includes('heavy'))) { + if ( + triggerDetails.functionName && + (triggerDetails.functionName.includes("complex") || + triggerDetails.functionName.includes("heavy")) + ) { return true; } - + return false; } @@ -3994,7 +4429,7 @@ class TestRequirementAnalyzer extends EventEmitter { * Aggregate test requirements from multiple operations * Combines requirements by target object, merges duplicates intelligently, * resolves priority conflicts, and generates summary statistics - * + * * @param {Array>} requirementsList - Array of requirement arrays from multiple operations * @returns {Object} Aggregated requirements with deduplication and statistics */ @@ -4008,25 +4443,27 @@ class TestRequirementAnalyzer extends EventEmitter { duplicatesRemoved: 0, priorityDistribution: {}, typeDistribution: {}, - targetCoverage: {} + targetCoverage: {}, }, relatedObjects: new Map(), - cascadingChanges: [] + cascadingChanges: [], }; } - this.emit('progress', { message: 'Aggregating test requirements from multiple operations...' }); + this.emit("progress", { + message: "Aggregating test requirements from multiple operations...", + }); // Flatten all requirements into a single array const allRequirements = requirementsList.flat(); const totalOriginalCount = allRequirements.length; - + // Track aggregation state const aggregationState = { targetGroups: new Map(), relatedObjects: new Map(), cascadingChanges: [], - duplicatesRemoved: 0 + duplicatesRemoved: 0, }; // Group requirements by target object @@ -4042,25 +4479,26 @@ class TestRequirementAnalyzer extends EventEmitter { this._resolveConflictsAndMergeRelated(aggregationState); // Extract final aggregated requirements - const aggregatedRequirements = this._extractAggregatedRequirements(aggregationState); + const aggregatedRequirements = + this._extractAggregatedRequirements(aggregationState); // Generate summary statistics const summary = this._generateAggregationSummary( - aggregatedRequirements, + aggregatedRequirements, requirementsList.length, totalOriginalCount, - aggregationState.duplicatesRemoved + aggregationState.duplicatesRemoved, ); - this.emit('progress', { - message: `Aggregation complete: ${totalOriginalCount} → ${aggregatedRequirements.length} requirements` + this.emit("progress", { + message: `Aggregation complete: ${totalOriginalCount} → ${aggregatedRequirements.length} requirements`, }); return { requirements: aggregatedRequirements, summary, relatedObjects: aggregationState.relatedObjects, - cascadingChanges: aggregationState.cascadingChanges + cascadingChanges: aggregationState.cascadingChanges, }; } @@ -4070,13 +4508,13 @@ class TestRequirementAnalyzer extends EventEmitter { */ _groupRequirementsByTarget(allRequirements, aggregationState) { for (const requirement of allRequirements) { - const target = requirement.target || 'unknown'; + const target = requirement.target || "unknown"; const targetKey = `${target}:${requirement.type}`; - + if (!aggregationState.targetGroups.has(targetKey)) { aggregationState.targetGroups.set(targetKey, []); } - + aggregationState.targetGroups.get(targetKey).push(requirement); // Track related objects (tables + indexes + policies) @@ -4098,12 +4536,12 @@ class TestRequirementAnalyzer extends EventEmitter { type: requirement.type, dependencies: new Set(), dependents: new Set(), - operations: new Set() + operations: new Set(), }); } const objectInfo = aggregationState.relatedObjects.get(target); - + // Track operations affecting this object if (requirement.metadata?.operationType) { objectInfo.operations.add(requirement.metadata.operationType); @@ -4112,24 +4550,48 @@ class TestRequirementAnalyzer extends EventEmitter { // Identify relationships based on requirement metadata if (requirement.metadata) { // Index -> Table relationship - if (requirement.type === TEST_TYPES.INDEX && requirement.metadata.tableName) { + if ( + requirement.type === TEST_TYPES.INDEX && + requirement.metadata.tableName + ) { objectInfo.dependencies.add(requirement.metadata.tableName); - this._ensureRelatedObject(requirement.metadata.tableName, 'TABLE', aggregationState); - aggregationState.relatedObjects.get(requirement.metadata.tableName).dependents.add(target); + this._ensureRelatedObject( + requirement.metadata.tableName, + "TABLE", + aggregationState, + ); + aggregationState.relatedObjects + .get(requirement.metadata.tableName) + .dependents.add(target); } // Foreign Key -> Referenced Table relationship if (requirement.metadata.referencedTable) { objectInfo.dependencies.add(requirement.metadata.referencedTable); - this._ensureRelatedObject(requirement.metadata.referencedTable, 'TABLE', aggregationState); - aggregationState.relatedObjects.get(requirement.metadata.referencedTable).dependents.add(target); + this._ensureRelatedObject( + requirement.metadata.referencedTable, + "TABLE", + aggregationState, + ); + aggregationState.relatedObjects + .get(requirement.metadata.referencedTable) + .dependents.add(target); } // Policy -> Table relationship - if (requirement.type === TEST_TYPES.RLS && requirement.metadata.tableName) { + if ( + requirement.type === TEST_TYPES.RLS && + requirement.metadata.tableName + ) { objectInfo.dependencies.add(requirement.metadata.tableName); - this._ensureRelatedObject(requirement.metadata.tableName, 'TABLE', aggregationState); - aggregationState.relatedObjects.get(requirement.metadata.tableName).dependents.add(target); + this._ensureRelatedObject( + requirement.metadata.tableName, + "TABLE", + aggregationState, + ); + aggregationState.relatedObjects + .get(requirement.metadata.tableName) + .dependents.add(target); } } } @@ -4144,7 +4606,7 @@ class TestRequirementAnalyzer extends EventEmitter { type: objectType, dependencies: new Set(), dependents: new Set(), - operations: new Set() + operations: new Set(), }); } } @@ -4158,7 +4620,8 @@ class TestRequirementAnalyzer extends EventEmitter { if (requirements.length <= 1) continue; // Group by description similarity for intelligent merging - const descriptionGroups = this._groupByDescriptionSimilarity(requirements); + const descriptionGroups = + this._groupByDescriptionSimilarity(requirements); const mergedRequirements = []; for (const group of descriptionGroups) { @@ -4218,10 +4681,15 @@ class TestRequirementAnalyzer extends EventEmitter { // Similar descriptions (basic keyword matching) const desc1Keywords = this._extractDescriptionKeywords(req1.description); const desc2Keywords = this._extractDescriptionKeywords(req2.description); - const commonKeywords = desc1Keywords.filter(k => desc2Keywords.includes(k)); - + const commonKeywords = desc1Keywords.filter((k) => + desc2Keywords.includes(k), + ); + // At least 50% keyword overlap - return commonKeywords.length >= Math.max(desc1Keywords.length, desc2Keywords.length) * 0.5; + return ( + commonKeywords.length >= + Math.max(desc1Keywords.length, desc2Keywords.length) * 0.5 + ); } /** @@ -4229,9 +4697,22 @@ class TestRequirementAnalyzer extends EventEmitter { * @private */ _extractDescriptionKeywords(description) { - return description.toLowerCase() + return description + .toLowerCase() .split(/\s+/) - .filter(word => word.length > 3 && !['verify', 'test', 'check', 'with', 'that', 'this', 'table'].includes(word)); + .filter( + (word) => + word.length > 3 && + ![ + "verify", + "test", + "check", + "with", + "that", + "this", + "table", + ].includes(word), + ); } /** @@ -4240,23 +4721,25 @@ class TestRequirementAnalyzer extends EventEmitter { */ _mergeRequirementGroup(group) { const base = group[0]; - + // Take highest priority - const priority = this._getHighestPriority(group.map(r => r.priority)); - + const priority = this._getHighestPriority(group.map((r) => r.priority)); + // Merge test cases (deduplicate) const allTestCases = new Set(); - group.forEach(req => { + group.forEach((req) => { if (req.testCases) { - req.testCases.forEach(testCase => allTestCases.add(testCase)); + req.testCases.forEach((testCase) => allTestCases.add(testCase)); } }); // Merge metadata - const mergedMetadata = this._mergeMetadata(group.map(r => r.metadata).filter(Boolean)); + const mergedMetadata = this._mergeMetadata( + group.map((r) => r.metadata).filter(Boolean), + ); // Combine operations - const operations = group.map(r => r.operation).filter(Boolean); + const operations = group.map((r) => r.operation).filter(Boolean); return { type: base.type, @@ -4267,10 +4750,10 @@ class TestRequirementAnalyzer extends EventEmitter { metadata: { ...mergedMetadata, mergedFrom: group.length, - originalDescriptions: group.map(r => r.description) + originalDescriptions: group.map((r) => r.description), }, operations, - reason: this._generateMergedReason(group) + reason: this._generateMergedReason(group), }; } @@ -4279,8 +4762,13 @@ class TestRequirementAnalyzer extends EventEmitter { * @private */ _getHighestPriority(priorities) { - const priorityOrder = [TEST_PRIORITIES.LOW, TEST_PRIORITIES.MEDIUM, TEST_PRIORITIES.HIGH, TEST_PRIORITIES.CRITICAL]; - + const priorityOrder = [ + TEST_PRIORITIES.LOW, + TEST_PRIORITIES.MEDIUM, + TEST_PRIORITIES.HIGH, + TEST_PRIORITIES.CRITICAL, + ]; + return priorities.reduce((highest, current) => { const currentIndex = priorityOrder.indexOf(current); const highestIndex = priorityOrder.indexOf(highest); @@ -4294,9 +4782,9 @@ class TestRequirementAnalyzer extends EventEmitter { */ _mergeMetadata(metadataArray) { if (metadataArray.length === 0) return {}; - + const merged = {}; - + for (const metadata of metadataArray) { for (const [key, value] of Object.entries(metadata)) { if (merged[key] === undefined) { @@ -4306,13 +4794,13 @@ class TestRequirementAnalyzer extends EventEmitter { merged[key] = [...new Set([...merged[key], ...value])]; } else if (merged[key] !== value) { // Handle conflicts by creating arrays - merged[key] = Array.isArray(merged[key]) + merged[key] = Array.isArray(merged[key]) ? [...new Set([...merged[key], value])] : [...new Set([merged[key], value])]; } } } - + return merged; } @@ -4322,10 +4810,10 @@ class TestRequirementAnalyzer extends EventEmitter { */ _generateMergedDescription(group) { if (group.length === 1) return group[0].description; - + const target = group[0].target; const type = group[0].type.toLowerCase(); - + return `Comprehensive ${type} validation for ${target} (merged from ${group.length} requirements)`; } @@ -4334,13 +4822,13 @@ class TestRequirementAnalyzer extends EventEmitter { * @private */ _generateMergedReason(group) { - const reasons = group.map(r => r.reason).filter(Boolean); + const reasons = group.map((r) => r.reason).filter(Boolean); if (reasons.length === 0) return undefined; - + const uniqueReasons = [...new Set(reasons)]; - return uniqueReasons.length === 1 - ? uniqueReasons[0] - : `Multiple requirements: ${uniqueReasons.join('; ')}`; + return uniqueReasons.length === 1 + ? uniqueReasons[0] + : `Multiple requirements: ${uniqueReasons.join("; ")}`; } /** @@ -4350,17 +4838,17 @@ class TestRequirementAnalyzer extends EventEmitter { _identifyCascadingChanges(aggregationState) { for (const [objectName, objectInfo] of aggregationState.relatedObjects) { // Look for operations that might cascade - const cascadingOps = ['DROP', 'RENAME', 'ALTER']; - + const cascadingOps = ["DROP", "RENAME", "ALTER"]; + for (const operation of objectInfo.operations) { - if (cascadingOps.some(op => operation.toUpperCase().includes(op))) { + if (cascadingOps.some((op) => operation.toUpperCase().includes(op))) { // Check if this affects dependent objects for (const dependent of objectInfo.dependents) { aggregationState.cascadingChanges.push({ source: objectName, target: dependent, operation, - impact: this._assessCascadingImpact(operation, objectInfo.type) + impact: this._assessCascadingImpact(operation, objectInfo.type), }); } } @@ -4374,16 +4862,16 @@ class TestRequirementAnalyzer extends EventEmitter { */ _assessCascadingImpact(operation, objectType) { const upperOp = operation.toUpperCase(); - - if (upperOp.includes('DROP')) { - return objectType === 'TABLE' ? 'HIGH' : 'MEDIUM'; - } else if (upperOp.includes('RENAME')) { - return 'MEDIUM'; - } else if (upperOp.includes('ALTER')) { - return 'LOW'; + + if (upperOp.includes("DROP")) { + return objectType === "TABLE" ? "HIGH" : "MEDIUM"; + } else if (upperOp.includes("RENAME")) { + return "MEDIUM"; + } else if (upperOp.includes("ALTER")) { + return "LOW"; } - - return 'LOW'; + + return "LOW"; } /** @@ -4393,10 +4881,18 @@ class TestRequirementAnalyzer extends EventEmitter { _resolveConflictsAndMergeRelated(aggregationState) { // Elevate priorities for objects with cascading changes for (const cascade of aggregationState.cascadingChanges) { - if (cascade.impact === 'HIGH') { - this._elevatePriorityForTarget(cascade.target, TEST_PRIORITIES.HIGH, aggregationState); - } else if (cascade.impact === 'MEDIUM') { - this._elevatePriorityForTarget(cascade.target, TEST_PRIORITIES.MEDIUM, aggregationState); + if (cascade.impact === "HIGH") { + this._elevatePriorityForTarget( + cascade.target, + TEST_PRIORITIES.HIGH, + aggregationState, + ); + } else if (cascade.impact === "MEDIUM") { + this._elevatePriorityForTarget( + cascade.target, + TEST_PRIORITIES.MEDIUM, + aggregationState, + ); } } } @@ -4409,14 +4905,24 @@ class TestRequirementAnalyzer extends EventEmitter { for (const [targetKey, requirements] of aggregationState.targetGroups) { if (targetKey.startsWith(`${target}:`)) { for (const req of requirements) { - const currentPriorityIndex = [TEST_PRIORITIES.LOW, TEST_PRIORITIES.MEDIUM, TEST_PRIORITIES.HIGH, TEST_PRIORITIES.CRITICAL].indexOf(req.priority); - const minPriorityIndex = [TEST_PRIORITIES.LOW, TEST_PRIORITIES.MEDIUM, TEST_PRIORITIES.HIGH, TEST_PRIORITIES.CRITICAL].indexOf(minPriority); - + const currentPriorityIndex = [ + TEST_PRIORITIES.LOW, + TEST_PRIORITIES.MEDIUM, + TEST_PRIORITIES.HIGH, + TEST_PRIORITIES.CRITICAL, + ].indexOf(req.priority); + const minPriorityIndex = [ + TEST_PRIORITIES.LOW, + TEST_PRIORITIES.MEDIUM, + TEST_PRIORITIES.HIGH, + TEST_PRIORITIES.CRITICAL, + ].indexOf(minPriority); + if (currentPriorityIndex < minPriorityIndex) { req.priority = minPriority; req.metadata = req.metadata || {}; req.metadata.priorityElevated = true; - req.metadata.elevationReason = 'Cascading change impact'; + req.metadata.elevationReason = "Cascading change impact"; } } } @@ -4429,17 +4935,20 @@ class TestRequirementAnalyzer extends EventEmitter { */ _extractAggregatedRequirements(aggregationState) { const requirements = []; - - for (const [_targetKey, targetRequirements] of aggregationState.targetGroups) { + + for (const [ + _targetKey, + targetRequirements, + ] of aggregationState.targetGroups) { requirements.push(...targetRequirements); } - + // Sort by priority (highest first), then by target return requirements.sort((a, b) => { const priorityComparison = this._comparePriority(a.priority, b.priority); if (priorityComparison !== 0) return priorityComparison; - - return (a.target || '').localeCompare(b.target || ''); + + return (a.target || "").localeCompare(b.target || ""); }); } @@ -4447,18 +4956,24 @@ class TestRequirementAnalyzer extends EventEmitter { * Generate summary statistics for aggregation * @private */ - _generateAggregationSummary(aggregatedRequirements, operationCount, originalCount, duplicatesRemoved) { + _generateAggregationSummary( + aggregatedRequirements, + operationCount, + originalCount, + duplicatesRemoved, + ) { const priorityDistribution = {}; const typeDistribution = {}; const targetCoverage = {}; - + for (const req of aggregatedRequirements) { // Priority distribution - priorityDistribution[req.priority] = (priorityDistribution[req.priority] || 0) + 1; - + priorityDistribution[req.priority] = + (priorityDistribution[req.priority] || 0) + 1; + // Type distribution typeDistribution[req.type] = (typeDistribution[req.type] || 0) + 1; - + // Target coverage if (req.target) { targetCoverage[req.target] = (targetCoverage[req.target] || 0) + 1; @@ -4470,15 +4985,25 @@ class TestRequirementAnalyzer extends EventEmitter { totalOperations: operationCount, originalRequirements: originalCount, duplicatesRemoved, - deduplicationRate: originalCount > 0 ? ((duplicatesRemoved / originalCount) * 100).toFixed(1) : 0, + deduplicationRate: + originalCount > 0 + ? ((duplicatesRemoved / originalCount) * 100).toFixed(1) + : 0, priorityDistribution, typeDistribution, targetCoverage, - estimatedEffort: aggregatedRequirements.reduce((sum, req) => sum + this._estimateTestEffort(req), 0), - criticalRequirements: aggregatedRequirements.filter(r => r.priority === TEST_PRIORITIES.CRITICAL).length, - highPriorityRequirements: aggregatedRequirements.filter(r => r.priority === TEST_PRIORITIES.HIGH).length, + estimatedEffort: aggregatedRequirements.reduce( + (sum, req) => sum + this._estimateTestEffort(req), + 0, + ), + criticalRequirements: aggregatedRequirements.filter( + (r) => r.priority === TEST_PRIORITIES.CRITICAL, + ).length, + highPriorityRequirements: aggregatedRequirements.filter( + (r) => r.priority === TEST_PRIORITIES.HIGH, + ).length, coverageAreas: Object.keys(typeDistribution).length, - uniqueTargets: Object.keys(targetCoverage).length + uniqueTargets: Object.keys(targetCoverage).length, }; } } @@ -4486,5 +5011,5 @@ class TestRequirementAnalyzer extends EventEmitter { module.exports = { TestRequirementAnalyzer, TEST_TYPES, - TEST_PRIORITIES -}; \ No newline at end of file + TEST_PRIORITIES, +}; diff --git a/src/lib/testing/TestRequirementSchema.js b/src/lib/testing/TestRequirementSchema.js index 9c84725..1ba9476 100644 --- a/src/lib/testing/TestRequirementSchema.js +++ b/src/lib/testing/TestRequirementSchema.js @@ -1,10 +1,10 @@ /** * Test Requirement Schema - JSDoc Type Definitions - * + * * This file defines comprehensive type schemas for test coverage enforcement * in the D.A.T.A. project. These types are used throughout the test coverage * analysis and enforcement system to ensure pgTAP test completeness. - * + * * @fileoverview JSDoc type definitions for test requirements and coverage analysis */ @@ -312,21 +312,24 @@ // Export all types for use in other modules module.exports = { // Type validation helpers - these are runtime functions, not types - + /** * Validate a test requirement object * @param {any} requirement - Object to validate * @returns {boolean} True if valid TestRequirement */ isValidTestRequirement(requirement) { - return requirement && - typeof requirement === 'object' && - typeof requirement.id === 'string' && - typeof requirement.objectType === 'string' && - typeof requirement.targetName === 'string' && - Array.isArray(requirement.requiredAssertions) && - typeof requirement.priority === 'string' && - requirement.metadata && typeof requirement.metadata === 'object'; + return ( + requirement && + typeof requirement === "object" && + typeof requirement.id === "string" && + typeof requirement.objectType === "string" && + typeof requirement.targetName === "string" && + Array.isArray(requirement.requiredAssertions) && + typeof requirement.priority === "string" && + requirement.metadata && + typeof requirement.metadata === "object" + ); }, /** @@ -335,14 +338,16 @@ module.exports = { * @returns {boolean} True if valid CoverageGap */ isValidCoverageGap(gap) { - return gap && - typeof gap === 'object' && - typeof gap.requirementId === 'string' && - typeof gap.objectType === 'string' && - typeof gap.targetName === 'string' && - Array.isArray(gap.missingAssertions) && - typeof gap.priority === 'string' && - typeof gap.severityScore === 'number'; + return ( + gap && + typeof gap === "object" && + typeof gap.requirementId === "string" && + typeof gap.objectType === "string" && + typeof gap.targetName === "string" && + Array.isArray(gap.missingAssertions) && + typeof gap.priority === "string" && + typeof gap.severityScore === "number" + ); }, /** @@ -353,24 +358,26 @@ module.exports = { createTestRequirement(requirement) { const now = new Date(); return { - id: requirement.id || `req_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`, - objectType: requirement.objectType || 'table', - targetName: requirement.targetName || '', + id: + requirement.id || + `req_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`, + objectType: requirement.objectType || "table", + targetName: requirement.targetName || "", requiredAssertions: requirement.requiredAssertions || [], - priority: requirement.priority || 'medium', - schema: requirement.schema || 'public', + priority: requirement.priority || "medium", + schema: requirement.schema || "public", optional: requirement.optional || false, metadata: { - description: '', + description: "", tags: [], - source: 'manual', + source: "manual", estimatedEffort: 5, dependencies: [], autoGenerated: false, createdAt: now, - version: '1.0.0', - ...requirement.metadata - } + version: "1.0.0", + ...requirement.metadata, + }, }; }, @@ -384,28 +391,45 @@ module.exports = { critical: 40, high: 30, medium: 20, - low: 10 + low: 10, }; - + const baseScore = priorityWeights[gap.priority] || 10; - const missingAssertionsWeight = Math.min(gap.missingAssertions.length * 5, 30); - const typeWeight = gap.objectType === 'function' || gap.objectType === 'policy' ? 20 : 10; + const missingAssertionsWeight = Math.min( + gap.missingAssertions.length * 5, + 30, + ); + const typeWeight = + gap.objectType === "function" || gap.objectType === "policy" ? 20 : 10; const criticalPathBonus = gap.metadata?.isCriticalPath ? 10 : 0; - - return Math.min(baseScore + missingAssertionsWeight + typeWeight + criticalPathBonus, 100); + + return Math.min( + baseScore + missingAssertionsWeight + typeWeight + criticalPathBonus, + 100, + ); }, // Constants for common patterns - COMMON_TABLE_ASSERTIONS: ['has_table'], - COMMON_COLUMN_ASSERTIONS: ['has_column', 'col_type_is'], - COMMON_FUNCTION_ASSERTIONS: ['has_function', 'function_returns'], - COMMON_INDEX_ASSERTIONS: ['has_index'], - COMMON_POLICY_ASSERTIONS: ['policy_roles_are', 'policy_cmd_is'], - COMMON_TRIGGER_ASSERTIONS: ['has_trigger', 'trigger_is'], - - DEFAULT_PRIORITY: 'medium', - DEFAULT_SCHEMA: 'public', - - PRIORITY_LEVELS: ['critical', 'high', 'medium', 'low'], - OBJECT_TYPES: ['table', 'column', 'function', 'index', 'policy', 'trigger', 'schema', 'view', 'constraint'] -}; \ No newline at end of file + COMMON_TABLE_ASSERTIONS: ["has_table"], + COMMON_COLUMN_ASSERTIONS: ["has_column", "col_type_is"], + COMMON_FUNCTION_ASSERTIONS: ["has_function", "function_returns"], + COMMON_INDEX_ASSERTIONS: ["has_index"], + COMMON_POLICY_ASSERTIONS: ["policy_roles_are", "policy_cmd_is"], + COMMON_TRIGGER_ASSERTIONS: ["has_trigger", "trigger_is"], + + DEFAULT_PRIORITY: "medium", + DEFAULT_SCHEMA: "public", + + PRIORITY_LEVELS: ["critical", "high", "medium", "low"], + OBJECT_TYPES: [ + "table", + "column", + "function", + "index", + "policy", + "trigger", + "schema", + "view", + "constraint", + ], +}; diff --git a/src/lib/testing/TestTemplateGenerator.js b/src/lib/testing/TestTemplateGenerator.js index 767e787..1b90652 100644 --- a/src/lib/testing/TestTemplateGenerator.js +++ b/src/lib/testing/TestTemplateGenerator.js @@ -1,13 +1,13 @@ /** * TestTemplateGenerator with Pattern Library - * + * * Generates pgTAP test templates for missing coverage based on requirements. * Creates properly structured and formatted test SQL files following project conventions. * Includes a comprehensive pattern library for consistent test generation. */ -const TestPatternLibrary = require('./TestPatternLibrary'); - +const TestPatternLibrary = require("./TestPatternLibrary"); +const { ValidationError, ParsingError } = require("../errors/index"); /** * @typedef {Object} TestRequirement * @property {string} type - Type of test ('rpc' | 'rls' | 'trigger' | 'constraint' | 'function' | 'table' | 'column' | 'index') @@ -95,7 +95,7 @@ class TestTemplateGenerator { function: this.generateFunctionTemplate.bind(this), table: this.generateTableTemplate.bind(this), column: this.generateColumnTemplate.bind(this), - index: this.generateIndexTemplate.bind(this) + index: this.generateIndexTemplate.bind(this), }; /** @@ -103,14 +103,14 @@ class TestTemplateGenerator { * @private */ this.testDirectories = { - rpc: '002_rpc_tests', - rls: '003_rls_tests', - trigger: '004_trigger_tests', - constraint: '005_constraint_tests', - function: '006_function_tests', - table: '001_table_tests', - column: '007_column_tests', - index: '008_index_tests' + rpc: "002_rpc_tests", + rls: "003_rls_tests", + trigger: "004_trigger_tests", + constraint: "005_constraint_tests", + function: "006_function_tests", + table: "001_table_tests", + column: "007_column_tests", + index: "008_index_tests", }; /** @@ -121,7 +121,7 @@ class TestTemplateGenerator { planCount: 6, setupRequired: true, includeSecurity: true, - includeValidation: true + includeValidation: true, }; } @@ -136,7 +136,7 @@ class TestTemplateGenerator { const generator = this.templateGenerators[requirement.type]; if (!generator) { - throw new Error(`Unsupported test type: ${requirement.type}`); + throw new ValidationError(`Unsupported test type: ${requirement.type}`); } const content = generator(requirement); @@ -150,10 +150,12 @@ class TestTemplateGenerator { type: requirement.type, metadata: { name: requirement.name, - schema: requirement.schema || 'public', + schema: requirement.schema || "public", generatedAt: new Date().toISOString(), - description: requirement.description || `Test for ${requirement.type}: ${requirement.name}` - } + description: + requirement.description || + `Test for ${requirement.type}: ${requirement.name}`, + }, }; } @@ -164,7 +166,7 @@ class TestTemplateGenerator { */ generateBatch(requirements) { if (!Array.isArray(requirements)) { - throw new Error('Requirements must be an array'); + throw new ValidationErrorError("Requirements must be an array"); } const templates = []; @@ -181,12 +183,11 @@ class TestTemplateGenerator { summary[requirement.type] = 0; } summary[requirement.type]++; - } catch (error) { errors.push({ index, requirement, - error: error.message + error: error.message, }); } }); @@ -195,7 +196,7 @@ class TestTemplateGenerator { templates, totalGenerated: templates.length, errors, - summary + summary, }; } @@ -245,29 +246,29 @@ class TestTemplateGenerator { generateEnhancedTemplate(requirement, additionalPatterns = []) { const errors = []; const warnings = []; - + // Create checkpoint for rollback const checkpoint = { requirement: JSON.parse(JSON.stringify(requirement)), - timestamp: Date.now() + timestamp: Date.now(), }; - + try { // Start with base template const baseTemplate = this.generateTemplate(requirement); if (!baseTemplate || !baseTemplate.content) { - throw new Error('Failed to generate base template'); + throw new ParsingError("Failed to generate base template"); } - + // Get recommended patterns for this test type const recommendedPatterns = this.getRecommendedPatterns(requirement.type); const allPatterns = [...recommendedPatterns]; - + // Add any additional patterns requested with error handling for (const patternName of additionalPatterns) { try { const pattern = this.getPattern(patternName); - if (pattern && !allPatterns.find(p => p.name === patternName)) { + if (pattern && !allPatterns.find((p) => p.name === patternName)) { allPatterns.push(pattern); } else if (!pattern) { warnings.push(`Pattern '${patternName}' not found in library`); @@ -280,23 +281,29 @@ class TestTemplateGenerator { // Extract variables from requirement for pattern rendering const variables = this.extractPatternVariables(requirement); - + // Generate enhanced content by incorporating relevant patterns let enhancedContent = baseTemplate.content; - + try { // Add pattern-based enhancements with error recovery - const patternEnhancements = this.generatePatternEnhancements(requirement, allPatterns, variables); + const patternEnhancements = this.generatePatternEnhancements( + requirement, + allPatterns, + variables, + ); if (patternEnhancements.trim()) { - enhancedContent += '\n\n-- =========================================================================\n'; - enhancedContent += '-- ENHANCED PATTERNS FROM LIBRARY\n'; - enhancedContent += '-- =========================================================================\n\n'; + enhancedContent += + "\n\n-- =========================================================================\n"; + enhancedContent += "-- ENHANCED PATTERNS FROM LIBRARY\n"; + enhancedContent += + "-- =========================================================================\n\n"; enhancedContent += patternEnhancements; } } catch (patternError) { - errors.push({ - phase: 'pattern_enhancement', - error: patternError.message + errors.push({ + phase: "pattern_enhancement", + error: patternError.message, }); // Continue with base template content if pattern enhancement fails enhancedContent = baseTemplate.content; @@ -308,46 +315,51 @@ class TestTemplateGenerator { content: this.formatTest(enhancedContent), metadata: { ...baseTemplate.metadata, - patternsUsed: allPatterns.map(p => p.name), - enhancementLevel: 'advanced', - generationMethod: 'pattern-enhanced', + patternsUsed: allPatterns.map((p) => p.name), + enhancementLevel: "advanced", + generationMethod: "pattern-enhanced", errors: errors.length > 0 ? errors : undefined, - warnings: warnings.length > 0 ? warnings : undefined - } + warnings: warnings.length > 0 ? warnings : undefined, + }, }; // Validate the generated template before returning if (!this._validateTemplate(enhancedTemplate)) { - throw new Error('Generated template failed validation'); + throw new ParsingError("Generated template failed validation"); } return enhancedTemplate; - } catch (enhancementError) { // Rollback to basic template if enhancement completely fails - console.warn(`Enhancement failed for ${requirement.type} test '${requirement.name}': ${enhancementError.message}`); - console.warn('Falling back to basic template generation'); - + console.warn( + `Enhancement failed for ${requirement.type} test '${requirement.name}': ${enhancementError.message}`, + ); + console.warn("Falling back to basic template generation"); + try { const basicTemplate = this.generateTemplate(checkpoint.requirement); - + // Validate basic template before returning if (!this._validateTemplate(basicTemplate)) { - throw new Error('Basic template fallback also failed validation'); + throw new ParsingError( + "Basic template fallback also failed validation", + ); } return { ...basicTemplate, metadata: { ...basicTemplate.metadata, - enhancementLevel: 'basic', - generationMethod: 'fallback', + enhancementLevel: "basic", + generationMethod: "fallback", enhancementErrors: [enhancementError.message], - fallbackReason: 'Enhancement failed - using basic template' - } + fallbackReason: "Enhancement failed - using basic template", + }, }; } catch (fallbackError) { - throw new Error(`Both enhanced and basic template generation failed: Enhancement: ${enhancementError.message}, Fallback: ${fallbackError.message}`); + throw new ParsingError( + `Both enhanced and basic template generation failed: Enhancement: ${enhancementError.message}, Fallback: ${fallbackError.message}`, + ); } } } @@ -361,56 +373,72 @@ class TestTemplateGenerator { _validateTemplate(template) { try { // Check basic template structure - if (!template || typeof template !== 'object') { - console.error('Template validation failed: Template must be an object'); + if (!template || typeof template !== "object") { + console.error("Template validation failed: Template must be an object"); return false; } // Check required properties - if (!template.content || typeof template.content !== 'string') { - console.error('Template validation failed: Template must have content string'); + if (!template.content || typeof template.content !== "string") { + console.error( + "Template validation failed: Template must have content string", + ); return false; } - if (!template.metadata || typeof template.metadata !== 'object') { - console.error('Template validation failed: Template must have metadata object'); + if (!template.metadata || typeof template.metadata !== "object") { + console.error( + "Template validation failed: Template must have metadata object", + ); return false; } const content = template.content; // Validate pgTAP structure - must have BEGIN (with or without semicolon for functions) - if (!content.includes('BEGIN') && !content.includes('begin')) { - console.error('Template validation failed: Missing pgTAP BEGIN statement'); + if (!content.includes("BEGIN") && !content.includes("begin")) { + console.error( + "Template validation failed: Missing pgTAP BEGIN statement", + ); return false; } // Check for pgTAP plan statement (could be SELECT plan() or RETURN NEXT tap.plan()) - const hasPlan = content.includes('SELECT plan(') || - content.includes('select plan(') || - content.includes('tap.plan(') || - content.includes('TAP.PLAN('); - + const hasPlan = + content.includes("SELECT plan(") || + content.includes("select plan(") || + content.includes("tap.plan(") || + content.includes("TAP.PLAN("); + if (!hasPlan) { - console.error('Template validation failed: Missing pgTAP plan() statement'); + console.error( + "Template validation failed: Missing pgTAP plan() statement", + ); return false; } // Check for proper pgTAP function endings (PostgreSQL functions use END; and tap.finish()) - const hasEnd = content.includes('END;') || content.includes('end;'); - const hasRollback = content.includes('ROLLBACK;') || content.includes('rollback;'); - const hasCommit = content.includes('COMMIT;') || content.includes('commit;'); - + const hasEnd = content.includes("END;") || content.includes("end;"); + const hasRollback = + content.includes("ROLLBACK;") || content.includes("rollback;"); + const hasCommit = + content.includes("COMMIT;") || content.includes("commit;"); + if (!hasEnd && !hasRollback && !hasCommit) { - console.error('Template validation failed: Missing proper ending statement (END, ROLLBACK, or COMMIT)'); + console.error( + "Template validation failed: Missing proper ending statement (END, ROLLBACK, or COMMIT)", + ); return false; } // Validate that content has at least one actual test function call - const testFunctionPattern = /(tap\.|^|\s)(ok|is|isnt|like|unlike|pass|fail|throws_ok|lives_ok|cmp_ok|is_empty|isnt_empty|has_table|has_column|has_function|has_view|has_trigger|has_index)\s*\(/i; - + const testFunctionPattern = + /(tap\.|^|\s)(ok|is|isnt|like|unlike|pass|fail|throws_ok|lives_ok|cmp_ok|is_empty|isnt_empty|has_table|has_column|has_function|has_view|has_trigger|has_index)\s*\(/i; + if (!testFunctionPattern.test(content)) { - console.error('Template validation failed: No pgTAP test functions found in content'); + console.error( + "Template validation failed: No pgTAP test functions found in content", + ); return false; } @@ -420,41 +448,51 @@ class TestTemplateGenerator { /;\s*DELETE\s+FROM\s+(?!.*WHERE)/i, /;\s*UPDATE\s+.*SET\s+.*(?!WHERE)/i, /UNION\s+SELECT/i, - /--\s*'[^']*'[^;]*;/ // SQL comments with quotes followed by statements (more specific injection pattern) + /--\s*'[^']*'[^;]*;/, // SQL comments with quotes followed by statements (more specific injection pattern) ]; for (const pattern of suspiciousPatterns) { if (pattern.test(content)) { - console.error(`Template validation failed: Suspicious SQL pattern detected: ${pattern}`); + console.error( + `Template validation failed: Suspicious SQL pattern detected: ${pattern}`, + ); return false; } } // Validate metadata structure const metadata = template.metadata; - + // Check for name (required in all templates) - if (!metadata.name || typeof metadata.name !== 'string') { - console.error('Template validation failed: Metadata missing name'); + if (!metadata.name || typeof metadata.name !== "string") { + console.error("Template validation failed: Metadata missing name"); return false; } - // Check for schema (required in all templates) - if (!metadata.schema || typeof metadata.schema !== 'string') { - console.error('Template validation failed: Metadata missing schema'); + // Check for schema (required in all templates) + if (!metadata.schema || typeof metadata.schema !== "string") { + console.error("Template validation failed: Metadata missing schema"); return false; } // Check for reasonable plan count - if (metadata.planCount && (typeof metadata.planCount !== 'number' || metadata.planCount < 1 || metadata.planCount > 1000)) { - console.error('Template validation failed: Invalid planCount in metadata'); + if ( + metadata.planCount && + (typeof metadata.planCount !== "number" || + metadata.planCount < 1 || + metadata.planCount > 1000) + ) { + console.error( + "Template validation failed: Invalid planCount in metadata", + ); return false; } return true; - } catch (validationError) { - console.error(`Template validation failed with exception: ${validationError.message}`); + console.error( + `Template validation failed with exception: ${validationError.message}`, + ); return false; } } @@ -467,35 +505,35 @@ class TestTemplateGenerator { generateBestPracticesDoc(testType) { const practices = this.patternLibrary.getBestPractices(testType); const examples = this.patternLibrary.getUsageExamples(testType); - + let doc = `-- =========================================================================\n`; doc += `-- BEST PRACTICES FOR ${testType.toUpperCase()} TESTS\n`; doc += `-- =========================================================================\n\n`; - + if (practices.length > 0) { doc += `-- Best Practices:\n`; - practices.forEach(practice => { + practices.forEach((practice) => { doc += `-- • ${practice}\n`; }); doc += `\n`; } - + if (examples.length > 0) { doc += `-- Usage Examples:\n`; - examples.forEach(example => { + examples.forEach((example) => { doc += `-- • ${example}\n`; }); doc += `\n`; } - + const recommendedPatterns = this.getRecommendedPatterns(testType); if (recommendedPatterns.length > 0) { doc += `-- Recommended Patterns:\n`; - recommendedPatterns.forEach(pattern => { + recommendedPatterns.forEach((pattern) => { doc += `-- • ${pattern.name}: ${pattern.description}\n`; }); } - + return doc; } @@ -514,18 +552,18 @@ class TestTemplateGenerator { generateUsageExamples() { return { basicUsage: { - description: 'Basic template generation (existing functionality)', + description: "Basic template generation (existing functionality)", code: `const generator = new TestTemplateGenerator(); const requirement = { type: 'table', name: 'users', schema: 'public' }; -const template = generator.generateTemplate(requirement);` +const template = generator.generateTemplate(requirement);`, }, - + enhancedUsage: { - description: 'Enhanced template generation with patterns', + description: "Enhanced template generation with patterns", code: `const generator = new TestTemplateGenerator(); const requirement = { type: 'rls', @@ -541,11 +579,11 @@ const requirement = { const enhancedTemplate = generator.generateEnhancedTemplate( requirement, ['privilege_escalation_test'] // Additional patterns -);` +);`, }, - + patternAccess: { - description: 'Direct pattern access and customization', + description: "Direct pattern access and customization", code: `const generator = new TestTemplateGenerator(); // Get recommended patterns for a test type @@ -556,22 +594,22 @@ const securityPatterns = generator.getPatternsByCategory('security_testing'); // Render a specific pattern const variables = { schema: 'public', tableName: 'posts' }; -const rendered = generator.renderPattern('table_exists_basic', variables);` +const rendered = generator.renderPattern('table_exists_basic', variables);`, }, - + documentationGeneration: { - description: 'Generate documentation and best practices', + description: "Generate documentation and best practices", code: `const generator = new TestTemplateGenerator(); // Generate best practices for a test type const bestPractices = generator.generateBestPracticesDoc('rls'); // Generate complete pattern library documentation -const libraryDoc = generator.generatePatternLibraryDoc();` +const libraryDoc = generator.generatePatternLibraryDoc();`, }, - + batchGeneration: { - description: 'Batch generation with pattern enhancement', + description: "Batch generation with pattern enhancement", code: `const generator = new TestTemplateGenerator(); const requirements = [ { type: 'table', name: 'users', schema: 'public' }, @@ -585,8 +623,8 @@ const enhancedTemplates = requirements.map(req => ); // Or use batch generation (basic templates) -const batchResult = generator.generateBatch(requirements);` - } +const batchResult = generator.generateBatch(requirements);`, + }, }; } @@ -596,21 +634,21 @@ const batchResult = generator.generateBatch(requirements);` * @returns {string} Formatted test SQL */ formatTest(template) { - if (!template || typeof template !== 'string') { - throw new Error('Template content must be a non-empty string'); + if (!template || typeof template !== "string") { + throw new ValidationError("Template content must be a non-empty string"); } // Remove excessive blank lines and normalize line endings let formatted = template - .replace(/\r\n/g, '\n') // Normalize line endings - .replace(/\n{3,}/g, '\n\n') // Reduce multiple blank lines to max 2 - .trim(); // Remove leading/trailing whitespace + .replace(/\r\n/g, "\n") // Normalize line endings + .replace(/\n{3,}/g, "\n\n") // Reduce multiple blank lines to max 2 + .trim(); // Remove leading/trailing whitespace // Ensure proper pgTAP structure formatting formatted = this.formatPgTapStructure(formatted); // Add final newline - return formatted + '\n'; + return formatted + "\n"; } /** @@ -620,25 +658,27 @@ const batchResult = generator.generateBatch(requirements);` * @private */ validateRequirement(requirement) { - if (!requirement || typeof requirement !== 'object') { - throw new Error('Requirement must be an object'); + if (!requirement || typeof requirement !== "object") { + throw new ValidationError("Requirement must be an object"); } - if (!requirement.type || typeof requirement.type !== 'string') { - throw new Error('Requirement must have a valid type'); + if (!requirement.type || typeof requirement.type !== "string") { + throw new ValidationError("Requirement must have a valid type"); } - if (!requirement.name || typeof requirement.name !== 'string') { - throw new Error('Requirement must have a valid name'); + if (!requirement.name || typeof requirement.name !== "string") { + throw new ValidationError("Requirement must have a valid name"); } if (!this.templateGenerators[requirement.type]) { - throw new Error(`Unsupported test type: ${requirement.type}`); + throw new ValidationError(`Unsupported test type: ${requirement.type}`); } // Validate name format if (!/^[a-zA-Z0-9_]+$/.test(requirement.name)) { - throw new Error('Name must contain only letters, numbers, and underscores'); + throw new ValidationError( + "Name must contain only letters, numbers, and underscores", + ); } } @@ -649,7 +689,9 @@ const batchResult = generator.generateBatch(requirements);` * @private */ generateFilename(requirement) { - const sanitizedName = requirement.name.toLowerCase().replace(/[^a-z0-9_]/g, '_'); + const sanitizedName = requirement.name + .toLowerCase() + .replace(/[^a-z0-9_]/g, "_"); return `${sanitizedName}.test.sql`; } @@ -660,7 +702,7 @@ const batchResult = generator.generateBatch(requirements);` * @private */ getTestDirectory(testType) { - return this.testDirectories[testType] || '999_custom_tests'; + return this.testDirectories[testType] || "999_custom_tests"; } /** @@ -670,16 +712,17 @@ const batchResult = generator.generateBatch(requirements);` * @private */ generateRpcTemplate(requirement) { - const schema = requirement.schema || 'public'; + const schema = requirement.schema || "public"; const functionName = requirement.name; const testFunctionName = `run_${functionName}_tests`; - const planCount = this.calculatePlanCount(requirement, 'rpc'); - + const planCount = this.calculatePlanCount(requirement, "rpc"); + // Build parameter placeholders if parameters are specified - const hasParams = requirement.parameters && requirement.parameters.length > 0; - const paramPlaceholder = hasParams ? - `(${requirement.parameters.map(() => 'TODO: param').join(', ')})` : - '()'; + const hasParams = + requirement.parameters && requirement.parameters.length > 0; + const paramPlaceholder = hasParams + ? `(${requirement.parameters.map(() => "TODO: param").join(", ")})` + : "()"; return `-- ========================================================================= -- RPC FUNCTION TESTS: ${functionName} @@ -690,7 +733,7 @@ const batchResult = generator.generateBatch(requirements);` -- 3. Function validates invalid inputs properly -- 4. Function respects security and authorization -- 5. Function returns expected data structure -${requirement.description ? `-- \n-- Description: ${requirement.description}` : ''} +${requirement.description ? `-- \n-- Description: ${requirement.description}` : ""} CREATE OR REPLACE FUNCTION test.${testFunctionName}() RETURNS SETOF TEXT @@ -701,7 +744,7 @@ AS $$ DECLARE v_admin_id uuid; v_user_id uuid; - v_result ${requirement.returnType || 'jsonb'}; + v_result ${requirement.returnType || "jsonb"}; v_test_data record; BEGIN -- Plan our tests (adjust count as needed) @@ -722,7 +765,7 @@ BEGIN RETURN NEXT tap.has_function( '${schema}', '${functionName}', - ${hasParams ? `ARRAY[${requirement.parameters.map(p => `'${p}'`).join(', ')}]` : 'ARRAY[]::text[]'}, + ${hasParams ? `ARRAY[${requirement.parameters.map((p) => `'${p}'`).join(", ")}]` : "ARRAY[]::text[]"}, 'Function ${functionName} has correct signature' ); @@ -770,21 +813,25 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${functionName} RPC * @private */ generateRlsTemplate(requirement) { - const schema = requirement.schema || 'public'; + const schema = requirement.schema || "public"; const tableName = requirement.name; const testFunctionName = `run_${tableName}_rls_tests`; - const planCount = this.calculatePlanCount(requirement, 'rls'); - + const planCount = this.calculatePlanCount(requirement, "rls"); + // Extract policy metadata if available const policies = requirement.metadata?.policies || []; const testScenarios = requirement.metadata?.testScenarios || []; - + // Generate core RLS tests let rlsTests = this.generateRlsEnablementTests(schema, tableName); rlsTests += this.generatePolicyExistenceTests(schema, tableName, policies); rlsTests += this.generatePolicyCommandTests(schema, tableName, policies); rlsTests += this.generatePolicyRolesTests(schema, tableName, policies); - rlsTests += this.generateMultiUserAccessTests(schema, tableName, testScenarios); + rlsTests += this.generateMultiUserAccessTests( + schema, + tableName, + testScenarios, + ); rlsTests += this.generateSecurityEdgeCaseTests(schema, tableName); return `-- ========================================================================= @@ -797,7 +844,7 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${functionName} RPC -- 4. Policy role verification (policy_roles_are) -- 5. Multi-user access scenarios -- 6. Security edge cases and bypass scenarios -${requirement.description ? `-- \n-- Description: ${requirement.description}` : ''} +${requirement.description ? `-- \n-- Description: ${requirement.description}` : ""} CREATE OR REPLACE FUNCTION test.${testFunctionName}() RETURNS SETOF TEXT @@ -844,10 +891,10 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Comprehensive RLS policy test * @private */ generateTriggerTemplate(requirement) { - const schema = requirement.schema || 'public'; + const schema = requirement.schema || "public"; const triggerName = requirement.name; const testFunctionName = `run_${triggerName}_tests`; - const planCount = this.calculatePlanCount(requirement, 'trigger'); + const planCount = this.calculatePlanCount(requirement, "trigger"); return `-- ========================================================================= -- TRIGGER TESTS: ${triggerName} @@ -857,7 +904,7 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Comprehensive RLS policy test -- 2. Trigger fires on expected operations -- 3. Trigger performs expected data modifications -- 4. Trigger handles edge cases correctly -${requirement.description ? `-- \n-- Description: ${requirement.description}` : ''} +${requirement.description ? `-- \n-- Description: ${requirement.description}` : ""} CREATE OR REPLACE FUNCTION test.${testFunctionName}() RETURNS SETOF TEXT @@ -916,10 +963,10 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${triggerName} trig * @private */ generateConstraintTemplate(requirement) { - const schema = requirement.schema || 'public'; + const schema = requirement.schema || "public"; const constraintName = requirement.name; const testFunctionName = `run_${constraintName}_tests`; - const planCount = this.calculatePlanCount(requirement, 'constraint'); + const planCount = this.calculatePlanCount(requirement, "constraint"); return `-- ========================================================================= -- CONSTRAINT TESTS: ${constraintName} @@ -929,7 +976,7 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${triggerName} trig -- 2. Constraint allows valid data -- 3. Constraint rejects invalid data -- 4. Constraint behavior is consistent -${requirement.description ? `-- \n-- Description: ${requirement.description}` : ''} +${requirement.description ? `-- \n-- Description: ${requirement.description}` : ""} CREATE OR REPLACE FUNCTION test.${testFunctionName}() RETURNS SETOF TEXT @@ -978,29 +1025,29 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${constraintName} c * @private */ generateFunctionTemplate(requirement) { - const schema = requirement.schema || 'public'; + const schema = requirement.schema || "public"; const functionName = requirement.name; const testFunctionName = `run_${functionName}_function_tests`; - + // Extract metadata with defaults const metadata = requirement.metadata || {}; const parameterTypes = metadata.parameterTypes || []; - const returnType = metadata.returnType || 'text'; - const language = metadata.language || 'plpgsql'; + const returnType = metadata.returnType || "text"; + const language = metadata.language || "plpgsql"; const _isRpcFunction = metadata.isRpcFunction || false; const requiresSecurityTesting = metadata.requiresSecurityTesting || false; const testCases = metadata.testCases || []; const isVolatile = metadata.isVolatile || false; - + // Calculate plan count based on test complexity let planCount = this.calculateFunctionPlanCount(requirement, metadata); - + // Build parameter signature for testing const hasParams = parameterTypes.length > 0; - const parameterSignature = hasParams ? - `ARRAY[${parameterTypes.map(type => `'${type}'`).join(', ')}]` : - 'ARRAY[]::text[]'; - + const parameterSignature = hasParams + ? `ARRAY[${parameterTypes.map((type) => `'${type}'`).join(", ")}]` + : "ARRAY[]::text[]"; + // Generate sample test parameters based on types const sampleParams = this.generateSampleParameters(parameterTypes); const invalidParams = this.generateInvalidParameters(parameterTypes); @@ -1017,8 +1064,8 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${constraintName} c -- 6. Behavioral tests with sample inputs -- 7. Error condition handling -- 8. Authorization scenarios (if applicable) -${requirement.description ? `-- \n-- Description: ${requirement.description}` : ''} -${metadata.tags ? `-- Tags: ${metadata.tags.join(', ')}` : ''} +${requirement.description ? `-- \n-- Description: ${requirement.description}` : ""} +${metadata.tags ? `-- Tags: ${metadata.tags.join(", ")}` : ""} CREATE OR REPLACE FUNCTION test.${testFunctionName}() RETURNS SETOF TEXT @@ -1036,10 +1083,14 @@ BEGIN -- Plan our tests (adjust count as needed based on metadata) RETURN NEXT tap.plan(${planCount}); - ${requiresSecurityTesting ? `-- Setup: Create test users for security testing + ${ + requiresSecurityTesting + ? `-- Setup: Create test users for security testing v_admin_id := test.create_test_admin(); v_user_id := test.create_test_user(); - ` : ''} + ` + : "" + } -- =================================================================== -- BASIC FUNCTION EXISTENCE AND SIGNATURE TESTS -- =================================================================== @@ -1051,55 +1102,67 @@ BEGIN 'Function ${functionName} exists' ); - ${hasParams ? `-- Test 2: Function has correct parameter signature + ${ + hasParams + ? `-- Test 2: Function has correct parameter signature RETURN NEXT tap.has_function( '${schema}', '${functionName}', ${parameterSignature}, - 'Function ${functionName} has correct parameter types: ${parameterTypes.join(', ')}' + 'Function ${functionName} has correct parameter types: ${parameterTypes.join(", ")}' ); - ` : `-- Test 2: Function has no parameters + ` + : `-- Test 2: Function has no parameters RETURN NEXT tap.has_function( '${schema}', '${functionName}', ARRAY[]::text[], 'Function ${functionName} takes no parameters' ); - `} + ` + } -- Test 3: Function returns correct type RETURN NEXT tap.function_returns( '${schema}', '${functionName}', - ${hasParams ? parameterSignature + ',' : ''} + ${hasParams ? parameterSignature + "," : ""} '${returnType}', 'Function ${functionName} returns ${returnType}' ); - ${language !== 'sql' ? `-- Test 4: Function uses correct language + ${ + language !== "sql" + ? `-- Test 4: Function uses correct language RETURN NEXT tap.function_lang_is( '${schema}', '${functionName}', - ${hasParams ? parameterSignature + ',' : ''} + ${hasParams ? parameterSignature + "," : ""} '${language}', 'Function ${functionName} is written in ${language}' ); - ` : ''} + ` + : "" + } - ${metadata.securityDefiner ? `-- Test 5: Function is security definer + ${ + metadata.securityDefiner + ? `-- Test 5: Function is security definer RETURN NEXT tap.is_definer( '${schema}', '${functionName}', - ${hasParams ? parameterSignature + ',' : ''} + ${hasParams ? parameterSignature + "," : ""} 'Function ${functionName} is security definer' ); - ` : `-- Test 5: Function is NOT security definer (security invoker) + ` + : `-- Test 5: Function is NOT security definer (security invoker) RETURN NEXT tap.isnt_definer( '${schema}', '${functionName}', - ${hasParams ? parameterSignature + ',' : ''} + ${hasParams ? parameterSignature + "," : ""} 'Function ${functionName} is security invoker' ); - `} + ` + } -- =================================================================== -- BEHAVIORAL TESTS WITH SAMPLE INPUTS @@ -1120,32 +1183,46 @@ BEGIN ); END; - ${testCases.length > 0 ? testCases.map((testCase, index) => ` + ${ + testCases.length > 0 + ? testCases + .map( + (testCase, index) => ` -- Test ${7 + index}: Custom test case - ${testCase.description || `Test case ${index + 1}`} BEGIN ${testCase.input ? `SELECT ${schema}.${functionName}(${testCase.input}) INTO v_result;` : `SELECT ${schema}.${functionName}() INTO v_result;`} - ${testCase.expectedOutput !== undefined ? `RETURN NEXT tap.is( + ${ + testCase.expectedOutput !== undefined + ? `RETURN NEXT tap.is( v_result, - ${typeof testCase.expectedOutput === 'string' ? `'${testCase.expectedOutput}'` : testCase.expectedOutput}::${returnType}, + ${typeof testCase.expectedOutput === "string" ? `'${testCase.expectedOutput}'` : testCase.expectedOutput}::${returnType}, 'Function ${functionName} returns expected result: ${testCase.description || `test case ${index + 1}`}' - );` : `RETURN NEXT tap.ok( + );` + : `RETURN NEXT tap.ok( v_result IS NOT NULL, 'Function ${functionName} executes successfully: ${testCase.description || `test case ${index + 1}`}' - );`} + );` + } EXCEPTION WHEN OTHERS THEN RETURN NEXT tap.fail( 'Function ${functionName} test case failed: ${testCase.description || `test case ${index + 1}`} - ' || SQLERRM ); END; - `).join('') : ''} + `, + ) + .join("") + : "" + } -- =================================================================== -- ERROR CONDITION AND VALIDATION TESTS -- =================================================================== - ${hasParams ? `-- Test: Function handles invalid input appropriately + ${ + hasParams + ? `-- Test: Function handles invalid input appropriately BEGIN v_error_caught := false; BEGIN @@ -1161,9 +1238,13 @@ BEGIN 'Function ${functionName} handles invalid input appropriately (either raises exception or returns null)' ); END; - ` : ''} + ` + : "" + } - ${requiresSecurityTesting ? `-- =================================================================== + ${ + requiresSecurityTesting + ? `-- =================================================================== -- AUTHORIZATION AND SECURITY TESTS -- =================================================================== @@ -1204,16 +1285,21 @@ BEGIN 'Function ${functionName} should work with admin context: ' || SQLERRM ); END; - ` : ''} + ` + : "" + } - ${isVolatile ? `-- =================================================================== + ${ + isVolatile + ? `-- =================================================================== -- SIDE EFFECTS AND STATE TESTS (for volatile functions) -- =================================================================== -- Test: Function maintains data consistency -- TODO: Add specific tests for function side effects RETURN NEXT tap.pass('TODO: Test function side effects and data consistency'); - ` : `-- =================================================================== + ` + : `-- =================================================================== -- IMMUTABILITY TESTS (for stable/immutable functions) -- =================================================================== @@ -1222,9 +1308,13 @@ BEGIN v_result1 ${returnType}; v_result2 ${returnType}; BEGIN - ${sampleParams ? `SELECT ${schema}.${functionName}(${sampleParams}) INTO v_result1; - SELECT ${schema}.${functionName}(${sampleParams}) INTO v_result2;` : `SELECT ${schema}.${functionName}() INTO v_result1; - SELECT ${schema}.${functionName}() INTO v_result2;`} + ${ + sampleParams + ? `SELECT ${schema}.${functionName}(${sampleParams}) INTO v_result1; + SELECT ${schema}.${functionName}(${sampleParams}) INTO v_result2;` + : `SELECT ${schema}.${functionName}() INTO v_result1; + SELECT ${schema}.${functionName}() INTO v_result2;` + } RETURN NEXT tap.is( v_result1, @@ -1237,7 +1327,8 @@ BEGIN 'Function ${functionName} consistency test failed: ' || SQLERRM ); END; - `} + ` + } -- =================================================================== -- PERFORMANCE AND RESOURCE TESTS (optional) @@ -1308,9 +1399,9 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Comprehensive tests for ${fun -- ===================================================== `; - + if (policies && policies.length > 0) { - policies.forEach(policy => { + policies.forEach((policy) => { tests += ` -- Test: Policy '${policy.name}' exists RETURN NEXT tap.ok( (SELECT COUNT(*) > 0 FROM pg_policies @@ -1335,7 +1426,7 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Comprehensive tests for ${fun `; } - + return tests; } @@ -1353,11 +1444,11 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Comprehensive tests for ${fun -- ===================================================== `; - + if (policies && policies.length > 0) { - policies.forEach(policy => { + policies.forEach((policy) => { if (policy.commands && policy.commands.length > 0) { - policy.commands.forEach(cmd => { + policy.commands.forEach((cmd) => { tests += ` -- Test: Policy '${policy.name}' applies to ${cmd} command RETURN NEXT tap.ok( (SELECT COUNT(*) > 0 FROM pg_policies @@ -1384,7 +1475,7 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Comprehensive tests for ${fun `; } - + return tests; } @@ -1402,19 +1493,19 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Comprehensive tests for ${fun -- ===================================================== `; - + if (policies && policies.length > 0) { - policies.forEach(policy => { + policies.forEach((policy) => { if (policy.roles && policy.roles.length > 0) { - const _roleList = policy.roles.map(role => `'${role}'`).join(', '); + const _roleList = policy.roles.map((role) => `'${role}'`).join(", "); tests += ` -- Test: Policy '${policy.name}' applies to correct roles RETURN NEXT tap.set_eq( $$SELECT unnest(roles) FROM pg_policies WHERE schemaname = '${schema}' AND tablename = '${tableName}' AND policyname = '${policy.name}'$$, - $$VALUES (${policy.roles.map(role => `'${role}'`).join('), (')})$$, - 'Policy "${policy.name}" applies to correct roles: ${policy.roles.join(', ')}' + $$VALUES (${policy.roles.map((role) => `'${role}'`).join("), (")})$$, + 'Policy "${policy.name}" applies to correct roles: ${policy.roles.join(", ")}' ); `; @@ -1432,7 +1523,7 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Comprehensive tests for ${fun `; } - + return tests; } @@ -1450,7 +1541,7 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Comprehensive tests for ${fun -- ===================================================== `; - + // Anonymous user tests tests += ` -- Test: Anonymous access PERFORM test.set_auth_context(NULL, 'anon'); @@ -1461,7 +1552,7 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Comprehensive tests for ${fun ); `; - + // Authenticated user tests tests += ` -- Test: Authenticated user can access own data PERFORM test.set_auth_context(v_user1_id, 'authenticated'); @@ -1472,7 +1563,7 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Comprehensive tests for ${fun RETURN NEXT tap.pass('TODO: Test authenticated user can access own data in ${tableName}'); `; - + // Cross-user access restriction tests tests += ` -- Test: Users cannot access other users' data PERFORM test.set_auth_context(v_user2_id, 'authenticated'); @@ -1481,7 +1572,7 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Comprehensive tests for ${fun RETURN NEXT tap.pass('TODO: Test user cannot access other users data in ${tableName}'); `; - + // Admin access tests tests += ` -- Test: Admin users have elevated access PERFORM test.set_auth_context(v_admin_id, 'authenticated'); @@ -1489,21 +1580,21 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Comprehensive tests for ${fun RETURN NEXT tap.pass('TODO: Test admin user has appropriate access to ${tableName}'); `; - + if (testScenarios && testScenarios.length > 0) { testScenarios.forEach((scenario, index) => { - tests += ` -- Custom Test Scenario ${index + 1}: ${scenario.description || 'Custom scenario'} + tests += ` -- Custom Test Scenario ${index + 1}: ${scenario.description || "Custom scenario"} PERFORM test.set_auth_context( - ${scenario.userId ? `'${scenario.userId}'::uuid` : 'NULL'}, - '${scenario.role || 'authenticated'}' + ${scenario.userId ? `'${scenario.userId}'::uuid` : "NULL"}, + '${scenario.role || "authenticated"}' ); - RETURN NEXT tap.pass('TODO: Implement custom test scenario: ${scenario.description || 'custom scenario'}'); + RETURN NEXT tap.pass('TODO: Implement custom test scenario: ${scenario.description || "custom scenario"}'); `; }); } - + return tests; } @@ -1558,30 +1649,43 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Comprehensive tests for ${fun * @private */ generateTableTemplate(requirement) { - const schema = requirement.schema || 'public'; + const schema = requirement.schema || "public"; const tableName = requirement.targetName || requirement.name; const testFunctionName = `run_${tableName}_table_tests`; - const planCount = this.calculatePlanCount(requirement, 'table'); - + const planCount = this.calculatePlanCount(requirement, "table"); + // Extract metadata for comprehensive testing const metadata = requirement.metadata || {}; const columns = metadata.columns || []; const expectedConstraints = metadata.expectedConstraints || []; const requiresRowLevelSecurity = metadata.requiresRowLevelSecurity || false; const indexes = metadata.indexes || []; - + // Generate column test assertions - const columnTests = this.generateColumnTestAssertions(schema, tableName, columns); - - // Generate constraint test assertions - const constraintTests = this.generateConstraintTestAssertions(schema, tableName, expectedConstraints); - + const columnTests = this.generateColumnTestAssertions( + schema, + tableName, + columns, + ); + + // Generate constraint test assertions + const constraintTests = this.generateConstraintTestAssertions( + schema, + tableName, + expectedConstraints, + ); + // Generate index test assertions - const indexTests = this.generateIndexTestAssertions(schema, tableName, indexes); - + const indexTests = this.generateIndexTestAssertions( + schema, + tableName, + indexes, + ); + // Generate RLS test assertions if required - const rlsTests = requiresRowLevelSecurity ? - this.generateRlsTestAssertions(schema, tableName) : ''; + const rlsTests = requiresRowLevelSecurity + ? this.generateRlsTestAssertions(schema, tableName) + : ""; return `-- ========================================================================= -- TABLE TESTS: ${tableName} @@ -1593,7 +1697,7 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Comprehensive tests for ${fun -- 4. Check constraints and unique constraints work as expected -- 5. Table ownership and privileges are set appropriately -- 6. Row Level Security is configured if required -${requirement.description ? `-- \n-- Description: ${requirement.description}` : ''} +${requirement.description ? `-- \n-- Description: ${requirement.description}` : ""} CREATE OR REPLACE FUNCTION test.${testFunctionName}() RETURNS SETOF TEXT @@ -1701,20 +1805,20 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Comprehensive tests for ${tab * @private */ generateIndexTemplate(requirement) { - const schema = requirement.schema || 'public'; + const schema = requirement.schema || "public"; const indexName = requirement.name; - const tableName = requirement.tableName || 'TODO_TABLE_NAME'; + const tableName = requirement.tableName || "TODO_TABLE_NAME"; const testFunctionName = `run_${indexName}_index_tests`; - const planCount = this.calculatePlanCount(requirement, 'index'); - + const planCount = this.calculatePlanCount(requirement, "index"); + const isUnique = requirement.isUnique || false; - const indexType = requirement.indexType || 'btree'; + const indexType = requirement.indexType || "btree"; const isPartial = requirement.isPartial || false; - const indexedColumns = requirement.indexedColumns || ['TODO_COLUMN']; - const whereClause = requirement.whereClause || ''; - + const indexedColumns = requirement.indexedColumns || ["TODO_COLUMN"]; + const whereClause = requirement.whereClause || ""; + // Build column array string for pgTAP - const columnsArrayStr = indexedColumns.map(col => `'${col}'`).join(', '); + const columnsArrayStr = indexedColumns.map((col) => `'${col}'`).join(", "); return `-- ========================================================================= -- INDEX TESTS: ${indexName} @@ -1725,7 +1829,7 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Comprehensive tests for ${tab -- 3. Index type is correct (${indexType}) -- 4. Index uniqueness constraint works as expected -- 5. Index performance characteristics are appropriate -${requirement.description ? `-- \\n-- Description: ${requirement.description}` : ''} +${requirement.description ? `-- \\n-- Description: ${requirement.description}` : ""} CREATE OR REPLACE FUNCTION test.${testFunctionName}() RETURNS SETOF TEXT @@ -1767,7 +1871,9 @@ BEGIN 'Index ${indexName} is of type ${indexType}' ); -${isUnique ? ` -- Test 4: Index enforces uniqueness +${ + isUnique + ? ` -- Test 4: Index enforces uniqueness RETURN NEXT tap.index_is_unique( '${schema}', '${tableName}', @@ -1778,13 +1884,13 @@ ${isUnique ? ` -- Test 4: Index enforces uniqueness -- Test 5: Unique constraint validation - duplicate insertion should fail BEGIN -- Insert a test record first - INSERT INTO ${schema}.${tableName} (${indexedColumns.join(', ')}) - VALUES (${'null, '.repeat(indexedColumns.length).slice(0, -2)}) -- TODO: Add appropriate test values + INSERT INTO ${schema}.${tableName} (${indexedColumns.join(", ")}) + VALUES (${"null, ".repeat(indexedColumns.length).slice(0, -2)}) -- TODO: Add appropriate test values RETURNING id INTO v_test_record_id; -- Try to insert duplicate - should fail RETURN NEXT tap.throws_ok( - 'INSERT INTO ${schema}.${tableName} (${indexedColumns.join(', ')}) VALUES (${'null, '.repeat(indexedColumns.length).slice(0, -2)})', -- TODO: Same test values + 'INSERT INTO ${schema}.${tableName} (${indexedColumns.join(", ")}) VALUES (${"null, ".repeat(indexedColumns.length).slice(0, -2)})', -- TODO: Same test values '23505', -- Unique violation error code 'Duplicate insertion properly rejected by unique index ${indexName}' ); @@ -1794,21 +1900,27 @@ ${isUnique ? ` -- Test 4: Index enforces uniqueness -- If setup fails, mark as TODO RETURN NEXT tap.pass('TODO: Set up unique constraint validation test'); END; -` : ` -- Test 4: Non-unique index allows duplicates (if applicable) +` + : ` -- Test 4: Non-unique index allows duplicates (if applicable) -- TODO: Add test for non-unique index behavior if relevant RETURN NEXT tap.pass('TODO: Add non-unique index behavior test if applicable'); -- Test 5: Index performance characteristics -- TODO: Add performance validation tests (comments about expected usage patterns) RETURN NEXT tap.pass('TODO: Add performance validation tests'); -`} -${isPartial ? ` -- Test 6: Partial index WHERE clause validation +` +} +${ + isPartial + ? ` -- Test 6: Partial index WHERE clause validation -- TODO: Verify partial index WHERE clause: ${whereClause} RETURN NEXT tap.pass('TODO: Test partial index WHERE clause behavior'); -` : ` -- Test 6: Full index coverage (not partial) +` + : ` -- Test 6: Full index coverage (not partial) -- TODO: Verify index covers all table rows (no WHERE clause) RETURN NEXT tap.pass('TODO: Verify full index coverage'); -`} +` +} -- Test 7: Index usage in query plans (performance validation) -- NOTE: This is a comment-based test for manual verification -- Query patterns that should use this index: @@ -1831,9 +1943,9 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${indexName} index -- Performance Notes: -- Index: ${indexName} -- Type: ${indexType} --- Columns: ${indexedColumns.join(', ')} --- Unique: ${isUnique ? 'Yes' : 'No'} -${isPartial ? `-- Partial: Yes (WHERE ${whereClause})` : '-- Partial: No'} +-- Columns: ${indexedColumns.join(", ")} +-- Unique: ${isUnique ? "Yes" : "No"} +${isPartial ? `-- Partial: Yes (WHERE ${whereClause})` : "-- Partial: No"} -- Expected usage patterns: -- TODO: Document expected query patterns that benefit from this index -- TODO: Document any specific performance requirements or SLA targets`; @@ -1865,7 +1977,7 @@ ${isPartial ? `-- Partial: Yes (WHERE ${whereClause})` : '-- Partial: No'} columns.forEach((column, _index) => { const columnName = column.targetName || column.name; const metadata = column.metadata || {}; - + assertions += ` -- Column: ${columnName} RETURN NEXT tap.has_column( @@ -1888,8 +2000,10 @@ ${isPartial ? `-- Partial: Yes (WHERE ${whereClause})` : '-- Partial: No'} } if (metadata.expectedNotNull !== undefined) { - const assertion = metadata.expectedNotNull ? 'col_not_null' : 'col_is_null'; - const description = metadata.expectedNotNull ? 'NOT NULL' : 'nullable'; + const assertion = metadata.expectedNotNull + ? "col_not_null" + : "col_is_null"; + const description = metadata.expectedNotNull ? "NOT NULL" : "nullable"; assertions += ` RETURN NEXT tap.${assertion}( @@ -1987,7 +2101,7 @@ ${isPartial ? `-- Partial: Yes (WHERE ${whereClause})` : '-- Partial: No'} -- ========================================================================= `; - expectedConstraints.forEach(constraintName => { + expectedConstraints.forEach((constraintName) => { assertions += ` -- Constraint: ${constraintName} RETURN NEXT tap.has_check( @@ -2005,7 +2119,7 @@ ${isPartial ? `-- Partial: Yes (WHERE ${whereClause})` : '-- Partial: No'} } /** - * Generate index test assertions + * Generate index test assertions * @param {string} schema - Schema name * @param {string} tableName - Table name * @param {IndexTestRequirement[]} indexes - Index requirements @@ -2031,10 +2145,10 @@ ${isPartial ? `-- Partial: Yes (WHERE ${whereClause})` : '-- Partial: No'} -- ========================================================================= `; - indexes.forEach(index => { + indexes.forEach((index) => { const indexName = index.targetName || index.name; const metadata = index.metadata || {}; - + assertions += ` -- Index: ${indexName} RETURN NEXT tap.has_index( @@ -2112,14 +2226,16 @@ ${isPartial ? `-- Partial: Yes (WHERE ${whereClause})` : '-- Partial: No'} * @private */ generateColumnTemplate(requirement) { - const schema = requirement.schema || 'public'; + const schema = requirement.schema || "public"; const tableName = requirement.tableName || requirement.metadata?.tableName; const columnName = requirement.name; const testFunctionName = `run_${tableName}_${columnName}_column_tests`; - const planCount = this.calculatePlanCount(requirement, 'column'); + const planCount = this.calculatePlanCount(requirement, "column"); if (!tableName) { - throw new Error('Column test requirement must specify tableName'); + throw new ValidationError( + "Column test requirement must specify tableName", + ); } // Build test assertions based on column metadata @@ -2134,7 +2250,7 @@ ${isPartial ? `-- Partial: Yes (WHERE ${whereClause})` : '-- Partial: No'} -- 3. Primary key and foreign key relationships work correctly -- 4. Data integrity is maintained during operations -- 5. Type conversions work as expected -${requirement.description ? `-- \\n-- Description: ${requirement.description}` : ''} +${requirement.description ? `-- \\n-- Description: ${requirement.description}` : ""} CREATE OR REPLACE FUNCTION test.${testFunctionName}() RETURNS SETOF TEXT @@ -2144,7 +2260,7 @@ SET search_path = test, public, security AS $$ DECLARE v_test_id uuid; - v_sample_value ${requirement.expectedType || 'text'}; + v_sample_value ${requirement.expectedType || "text"}; BEGIN -- Plan our tests (adjust count as needed) RETURN NEXT tap.plan(${planCount}); @@ -2176,7 +2292,7 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${columnName} colum * @private */ buildColumnAssertions(requirement) { - const schema = requirement.schema || 'public'; + const schema = requirement.schema || "public"; const tableName = requirement.tableName || requirement.metadata?.tableName; const columnName = requirement.name; const assertions = []; @@ -2222,11 +2338,12 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${columnName} colum '${columnName}', 'Column ${columnName} has a default value' );`); - + if (requirement.expectedDefaultValue !== undefined) { - const defaultValue = typeof requirement.expectedDefaultValue === 'string' - ? `'${requirement.expectedDefaultValue}'` - : requirement.expectedDefaultValue; + const defaultValue = + typeof requirement.expectedDefaultValue === "string" + ? `'${requirement.expectedDefaultValue}'` + : requirement.expectedDefaultValue; assertions.push(` -- Test ${testNumber++}: Column has correct default value RETURN NEXT tap.col_default_is( '${schema}', @@ -2250,7 +2367,11 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${columnName} colum } // Test 6: Foreign key - if (requirement.isForeignKey === true && requirement.referencedTable && requirement.referencedColumn) { + if ( + requirement.isForeignKey === true && + requirement.referencedTable && + requirement.referencedColumn + ) { assertions.push(` -- Test ${testNumber++}: Foreign key relationship RETURN NEXT tap.fk_ok( '${schema}', '${tableName}', '${columnName}', @@ -2268,7 +2389,10 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${columnName} colum );`); // Test 8: Type conversion test (if applicable) - if (requirement.expectedType && this.isNumericType(requirement.expectedType)) { + if ( + requirement.expectedType && + this.isNumericType(requirement.expectedType) + ) { assertions.push(` -- Test ${testNumber++}: Type conversion test RETURN NEXT tap.lives_ok( 'SELECT ${columnName}::${requirement.expectedType} FROM ${schema}.${tableName} LIMIT 1', @@ -2286,7 +2410,7 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${columnName} colum );`); } - return assertions.join('\\n\\n'); + return assertions.join("\\n\\n"); } /** @@ -2296,10 +2420,25 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${columnName} colum * @private */ isNumericType(dataType) { - const numericTypes = ['integer', 'int', 'int4', 'bigint', 'int8', 'smallint', 'int2', - 'decimal', 'numeric', 'real', 'float4', 'double precision', 'float8', - 'serial', 'bigserial', 'smallserial']; - return numericTypes.some(type => dataType.toLowerCase().includes(type)); + const numericTypes = [ + "integer", + "int", + "int4", + "bigint", + "int8", + "smallint", + "int2", + "decimal", + "numeric", + "real", + "float4", + "double precision", + "float8", + "serial", + "bigserial", + "smallserial", + ]; + return numericTypes.some((type) => dataType.toLowerCase().includes(type)); } /** @@ -2318,7 +2457,7 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${columnName} colum function: 4, table: 12, column: 5, - index: 8 + index: 8, }; let baseCount = baseCounts[testType] || 4; @@ -2329,15 +2468,15 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${columnName} colum } // For RLS tests, adjust based on policies and test scenarios - if (testType === 'rls' && requirement.metadata) { + if (testType === "rls" && requirement.metadata) { const metadata = requirement.metadata; - + // Add tests for each specific policy if (metadata.policies && metadata.policies.length > 0) { baseCount += metadata.policies.length * 2; // 2 tests per policy (existence + commands) - + // Additional tests for policies with role restrictions - metadata.policies.forEach(policy => { + metadata.policies.forEach((policy) => { if (policy.roles && policy.roles.length > 0) { baseCount += 1; // Policy role test } @@ -2346,7 +2485,7 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${columnName} colum } }); } - + // Add tests for custom test scenarios if (metadata.testScenarios && metadata.testScenarios.length > 0) { baseCount += metadata.testScenarios.length; // Custom scenario tests @@ -2358,24 +2497,27 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${columnName} colum } // For table tests, adjust based on metadata - if (testType === 'table' && requirement.metadata) { + if (testType === "table" && requirement.metadata) { const metadata = requirement.metadata; - + // Add tests for each column if (metadata.columns && metadata.columns.length > 0) { baseCount += metadata.columns.length * 2; // 2 tests per column minimum } - + // Add tests for constraints - if (metadata.expectedConstraints && metadata.expectedConstraints.length > 0) { + if ( + metadata.expectedConstraints && + metadata.expectedConstraints.length > 0 + ) { baseCount += metadata.expectedConstraints.length * 2; } - + // Add tests for indexes if (metadata.indexes && metadata.indexes.length > 0) { baseCount += metadata.indexes.length * 2; } - + // Add tests for RLS if required if (metadata.requiresRowLevelSecurity) { baseCount += 3; @@ -2383,7 +2525,7 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${columnName} colum } // Column-specific adjustments - if (testType === 'column') { + if (testType === "column") { if (requirement.expectedType) baseCount += 1; if (requirement.expectedNotNull !== undefined) baseCount += 1; if (requirement.expectedHasDefault) baseCount += 1; @@ -2395,7 +2537,7 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${columnName} colum } // Index-specific adjustments - if (testType === 'index') { + if (testType === "index") { if (requirement.isUnique) { baseCount += 2; // Additional uniqueness constraint tests } @@ -2405,7 +2547,7 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${columnName} colum if (requirement.indexedColumns && requirement.indexedColumns.length > 3) { baseCount += 1; // More complex multi-column indexes } - if (requirement.indexType && requirement.indexType !== 'btree') { + if (requirement.indexType && requirement.indexType !== "btree") { baseCount += 1; // Non-standard index types need more validation } } @@ -2459,19 +2601,19 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${columnName} colum return null; } - const sampleValues = parameterTypes.map(type => { + const sampleValues = parameterTypes.map((type) => { const lowerType = type.toLowerCase(); - + // Handle array types - if (lowerType.includes('[]')) { - const baseType = lowerType.replace('[]', ''); + if (lowerType.includes("[]")) { + const baseType = lowerType.replace("[]", ""); return this.getSampleArrayValue(baseType); } - + return this.getSampleValue(lowerType); }); - return sampleValues.join(', '); + return sampleValues.join(", "); } /** @@ -2485,12 +2627,12 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${columnName} colum return null; } - const invalidValues = parameterTypes.map(type => { + const invalidValues = parameterTypes.map((type) => { const lowerType = type.toLowerCase(); return this.getInvalidValue(lowerType); }); - return invalidValues.join(', '); + return invalidValues.join(", "); } /** @@ -2502,65 +2644,65 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${columnName} colum getSampleValue(type) { const typeMap = { // Integer types - 'integer': '42', - 'int': '42', - 'int4': '42', - 'bigint': '123456789', - 'int8': '123456789', - 'smallint': '123', - 'int2': '123', + integer: "42", + int: "42", + int4: "42", + bigint: "123456789", + int8: "123456789", + smallint: "123", + int2: "123", // Decimal types - 'decimal': '123.45', - 'numeric': '123.45', - 'real': '123.45', - 'float4': '123.45', - 'double precision': '123.45', - 'float8': '123.45', + decimal: "123.45", + numeric: "123.45", + real: "123.45", + float4: "123.45", + "double precision": "123.45", + float8: "123.45", // String types - 'text': "'sample text'", - 'varchar': "'sample varchar'", - 'character varying': "'sample varchar'", - 'char': "'S'", - 'character': "'S'", + text: "'sample text'", + varchar: "'sample varchar'", + "character varying": "'sample varchar'", + char: "'S'", + character: "'S'", // Boolean - 'boolean': 'true', - 'bool': 'true', + boolean: "true", + bool: "true", // Date/Time - 'date': "'2024-01-01'", - 'time': "'12:00:00'", - 'timestamp': "'2024-01-01 12:00:00'", - 'timestamptz': "'2024-01-01 12:00:00+00'", - 'interval': "'1 hour'", + date: "'2024-01-01'", + time: "'12:00:00'", + timestamp: "'2024-01-01 12:00:00'", + timestamptz: "'2024-01-01 12:00:00+00'", + interval: "'1 hour'", // UUID - 'uuid': "'00000000-0000-0000-0000-000000000001'::uuid", + uuid: "'00000000-0000-0000-0000-000000000001'::uuid", // JSON - 'json': "'{\"key\": \"value\"}'::json", - 'jsonb': "'{\"key\": \"value\"}'::jsonb", + json: '\'{"key": "value"}\'::json', + jsonb: '\'{"key": "value"}\'::jsonb', // Binary - 'bytea': "'\\x414243'", + bytea: "'\\x414243'", // Network types - 'inet': "'192.168.1.1'", - 'cidr': "'192.168.1.0/24'", - 'macaddr': "'08:00:2b:01:02:03'", + inet: "'192.168.1.1'", + cidr: "'192.168.1.0/24'", + macaddr: "'08:00:2b:01:02:03'", // Geometric types (simplified) - 'point': "'(1,2)'", - 'polygon': "'((0,0),(1,1),(1,0))'", - 'circle': "'<(0,0),1>'", + point: "'(1,2)'", + polygon: "'((0,0),(1,1),(1,0))'", + circle: "'<(0,0),1>'", // Default fallback - 'default': "'sample_value'" + default: "'sample_value'", }; - return typeMap[type] || typeMap['default']; + return typeMap[type] || typeMap["default"]; } /** @@ -2571,18 +2713,22 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${columnName} colum */ getSampleArrayValue(baseType) { const sampleValue = this.getSampleValue(baseType); - + // For simple types, create an array - if (baseType.includes('int') || baseType.includes('numeric') || baseType.includes('decimal')) { - return 'ARRAY[1, 2, 3]'; - } else if (baseType === 'text' || baseType.includes('varchar')) { + if ( + baseType.includes("int") || + baseType.includes("numeric") || + baseType.includes("decimal") + ) { + return "ARRAY[1, 2, 3]"; + } else if (baseType === "text" || baseType.includes("varchar")) { return "ARRAY['item1', 'item2', 'item3']"; - } else if (baseType === 'boolean') { - return 'ARRAY[true, false]'; - } else if (baseType === 'uuid') { + } else if (baseType === "boolean") { + return "ARRAY[true, false]"; + } else if (baseType === "uuid") { return "ARRAY['00000000-0000-0000-0000-000000000001'::uuid, '00000000-0000-0000-0000-000000000002'::uuid]"; } - + return `ARRAY[${sampleValue}, ${sampleValue}]`; } @@ -2595,43 +2741,43 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${columnName} colum getInvalidValue(type) { const invalidMap = { // Integer types - use string that can't be converted - 'integer': "'not_a_number'", - 'int': "'not_a_number'", - 'int4': "'not_a_number'", - 'bigint': "'not_a_number'", - 'int8': "'not_a_number'", - 'smallint': "'not_a_number'", + integer: "'not_a_number'", + int: "'not_a_number'", + int4: "'not_a_number'", + bigint: "'not_a_number'", + int8: "'not_a_number'", + smallint: "'not_a_number'", // For numeric types, use invalid string - 'decimal': "'invalid_decimal'", - 'numeric': "'invalid_numeric'", - 'real': "'invalid_real'", + decimal: "'invalid_decimal'", + numeric: "'invalid_numeric'", + real: "'invalid_real'", // For dates, use invalid format - 'date': "'invalid-date'", - 'timestamp': "'invalid-timestamp'", - 'timestamptz': "'invalid-timestamp'", + date: "'invalid-date'", + timestamp: "'invalid-timestamp'", + timestamptz: "'invalid-timestamp'", // For UUID, use invalid format - 'uuid': "'invalid-uuid-format'", + uuid: "'invalid-uuid-format'", // For JSON, use invalid syntax - 'json': "'invalid json syntax{'", - 'jsonb': "'invalid json syntax{'", + json: "'invalid json syntax{'", + jsonb: "'invalid json syntax{'", // For boolean, use invalid string - 'boolean': "'maybe'", - 'bool': "'maybe'", + boolean: "'maybe'", + bool: "'maybe'", // For network types, use invalid formats - 'inet': "'invalid.ip.address'", - 'cidr': "'invalid/cidr'", + inet: "'invalid.ip.address'", + cidr: "'invalid/cidr'", // Default: null (which might be invalid for NOT NULL columns) - 'default': 'NULL' + default: "NULL", }; - return invalidMap[type] || invalidMap['default']; + return invalidMap[type] || invalidMap["default"]; } /** @@ -2643,10 +2789,10 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${columnName} colum formatPgTapStructure(content) { // Ensure consistent indentation for pgTAP functions return content - .replace(/^(\s*RETURN NEXT tap\.)/gm, ' $1') // Standardize pgTAP function indentation - .replace(/^(\s*--)/gm, '$1') // Keep comment indentation as-is - .replace(/^(\s*PERFORM)/gm, ' $1') // Standardize PERFORM indentation - .replace(/^(\s*SELECT)/gm, ' $1'); // Standardize SELECT indentation + .replace(/^(\s*RETURN NEXT tap\.)/gm, " $1") // Standardize pgTAP function indentation + .replace(/^(\s*--)/gm, "$1") // Keep comment indentation as-is + .replace(/^(\s*PERFORM)/gm, " $1") // Standardize PERFORM indentation + .replace(/^(\s*SELECT)/gm, " $1"); // Standardize SELECT indentation } /** @@ -2657,50 +2803,50 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${columnName} colum */ extractPatternVariables(requirement) { const variables = { - schema: requirement.schema || 'public', + schema: requirement.schema || "public", tableName: requirement.name, columnName: requirement.name, functionName: requirement.name, indexName: requirement.name, constraintName: requirement.name, - policyName: requirement.name + policyName: requirement.name, }; // Add metadata-based variables if (requirement.metadata) { const metadata = requirement.metadata; - + // Table-specific variables if (metadata.tableName) { variables.tableName = metadata.tableName; } - - // Column-specific variables + + // Column-specific variables if (metadata.expectedType) { variables.dataType = metadata.expectedType; } - + // Function-specific variables if (metadata.parameterTypes) { variables.parameterTypes = metadata.parameterTypes; } - + if (metadata.returnType) { variables.returnType = metadata.returnType; } - + // Index-specific variables if (metadata.indexedColumns) { variables.indexedColumns = metadata.indexedColumns; } - + // RLS-specific variables if (metadata.policies) { variables.policies = metadata.policies; } - + // Test data variables - variables.testId = 'test-id-' + Math.random().toString(36).substr(2, 9); + variables.testId = "test-id-" + Math.random().toString(36).substr(2, 9); variables.validValues = this.generateSampleTestData(requirement); variables.invalidValues = this.generateInvalidTestData(requirement); } @@ -2717,28 +2863,30 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${columnName} colum * @private */ generatePatternEnhancements(requirement, patterns, variables) { - let enhancements = ''; - - patterns.forEach(pattern => { + let enhancements = ""; + + patterns.forEach((pattern) => { try { // Skip patterns that are already covered by the base template if (this.isPatternCoveredByBase(pattern, requirement)) { return; } - + // Render pattern with variables - const renderedPattern = this.patternLibrary.renderPattern(pattern.name, variables); - + const renderedPattern = this.patternLibrary.renderPattern( + pattern.name, + variables, + ); + enhancements += `-- Pattern: ${pattern.name} (${pattern.category})\n`; enhancements += `-- ${pattern.description}\n`; - enhancements += renderedPattern + '\n\n'; - + enhancements += renderedPattern + "\n\n"; } catch (error) { // Log pattern rendering errors but don't fail the whole generation enhancements += `-- Pattern ${pattern.name} could not be rendered: ${error.message}\n\n`; } }); - + return enhancements; } @@ -2751,22 +2899,25 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${columnName} colum */ isPatternCoveredByBase(pattern, requirement) { // Basic existence patterns are usually covered by base templates - const basicPatterns = ['table_exists_basic', 'column_exists_basic']; - + const basicPatterns = ["table_exists_basic", "column_exists_basic"]; + if (basicPatterns.includes(pattern.name)) { return true; } - + // For table tests, column structure validation is already covered - if (requirement.type === 'table' && pattern.name === 'column_structure_validation') { + if ( + requirement.type === "table" && + pattern.name === "column_structure_validation" + ) { return true; } - + // For RLS tests, basic RLS checks are covered - if (requirement.type === 'rls' && pattern.name === 'rls_enablement_check') { + if (requirement.type === "rls" && pattern.name === "rls_enablement_check") { return true; } - + return false; } @@ -2778,15 +2929,15 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${columnName} colum */ generateSampleTestData(requirement) { const metadata = requirement.metadata || {}; - - if (requirement.type === 'column' && metadata.expectedType) { + + if (requirement.type === "column" && metadata.expectedType) { return this.getSampleValue(metadata.expectedType.toLowerCase()); } - - if (requirement.type === 'table') { - return 'DEFAULT VALUES'; + + if (requirement.type === "table") { + return "DEFAULT VALUES"; } - + return "'sample_value'"; } @@ -2798,13 +2949,13 @@ COMMENT ON FUNCTION test.${testFunctionName}() IS 'Tests for ${columnName} colum */ generateInvalidTestData(requirement) { const metadata = requirement.metadata || {}; - - if (requirement.type === 'column' && metadata.expectedType) { + + if (requirement.type === "column" && metadata.expectedType) { return this.getInvalidValue(metadata.expectedType.toLowerCase()); } - - return 'NULL'; + + return "NULL"; } } -module.exports = TestTemplateGenerator; \ No newline at end of file +module.exports = TestTemplateGenerator; diff --git a/src/lib/testing/errors/TestCoverageError.js b/src/lib/testing/errors/TestCoverageError.js new file mode 100644 index 0000000..1feda88 --- /dev/null +++ b/src/lib/testing/errors/TestCoverageError.js @@ -0,0 +1,47 @@ +// src/lib/testing/errors/TestCoverageErrors.js +class TestCoverageError extends Error { + constructor(message, code, details = {}) { + super(message); + this.name = "TestCoverageError"; + this.code = code; + this.details = details; + this.timestamp = new Date().toISOString(); + } + toJSON() { + return { + name: this.name, + message: this.message, + code: this.code, + details: this.details, + timestamp: this.timestamp, + }; + } +} + +class ValidationError extends TestCoverageError { + constructor(message, details) { + super(message, "VALIDATION_ERROR", details); + this.name = "ValidationError"; + } +} + +class CoverageEnforcementError extends TestCoverageError { + constructor(message, gaps, percentage) { + super(message, "COVERAGE_ENFORCEMENT", { gaps, percentage }); + this.name = "CoverageEnforcementError"; + } +} + +class ParsingError extends TestCoverageError { + constructor(message, file, line) { + super(message, "PARSING_ERROR", { file, line }); + this.name = "ParsingError"; + } +} + +module.exports = { + TestCoverageError, + ValidationError, + CoverageEnforcementError, + ParsingError, +}; diff --git a/src/lib/testing/errors/index.js b/src/lib/testing/errors/index.js new file mode 100644 index 0000000..cf5ee9c --- /dev/null +++ b/src/lib/testing/errors/index.js @@ -0,0 +1 @@ +module.exports = require("./TestCoverageError"); diff --git a/src/lib/testing/errors/index.mjs b/src/lib/testing/errors/index.mjs new file mode 100644 index 0000000..14b9229 --- /dev/null +++ b/src/lib/testing/errors/index.mjs @@ -0,0 +1,11 @@ +// ESM wrapper that re-exports the SAME CJS instances +import { createRequire } from "module"; +const require = createRequire(import.meta.url); +const cjs = require("./index.js"); +export const { + TestCoverageError, + ValidationError, + CoverageEnforcementError, + ParsingError, +} = cjs; +export default cjs; diff --git a/src/lib/testing/handleTestingError.js b/src/lib/testing/handleTestingError.js new file mode 100644 index 0000000..3875042 --- /dev/null +++ b/src/lib/testing/handleTestingError.js @@ -0,0 +1,32 @@ +const { + TestCoverageError, + ValidationError, + CoverageEnforcementError, + ParsingError, +} = require("./errors"); + +function handleTestingError(err, logger = console) { + if (err instanceof ValidationError) { + logger.warn(err.toJSON()); + process.exitCode = 2; + return; + } + if (err instanceof CoverageEnforcementError) { + logger.error(err.toJSON()); + process.exitCode = 3; + return; + } + if (err instanceof ParsingError) { + logger.error(err.toJSON()); + process.exitCode = 4; + return; + } + if (err instanceof TestCoverageError) { + logger.error(err.toJSON()); + process.exitCode = 1; + return; + } + logger.error({ name: err?.name, message: err?.message, err }); + process.exitCode = 1; +} +module.exports = { handleTestingError }; diff --git a/src/lib/testing/pgTAPTestScanner.js b/src/lib/testing/pgTAPTestScanner.mjs similarity index 70% rename from src/lib/testing/pgTAPTestScanner.js rename to src/lib/testing/pgTAPTestScanner.mjs index 8582d87..8785871 100644 --- a/src/lib/testing/pgTAPTestScanner.js +++ b/src/lib/testing/pgTAPTestScanner.mjs @@ -1,29 +1,32 @@ /** * pgTAP Test Scanner for D.A.T.A. CLI - * + * * This module provides functionality to scan pgTAP test files and extract test coverage * information. It identifies pgTAP assertions, builds coverage maps, and tracks what * database objects and functionality are being tested. - * + * * @fileoverview pgTAP test file scanner for coverage analysis * @author D.A.T.A. Engineering Team * @version 1.0.0 */ -import { EventEmitter } from 'events'; -import fs from 'fs/promises'; -import path from 'path'; -import { minimatch } from 'minimatch'; +import { EventEmitter } from "events"; +import fs from "fs/promises"; +import path from "path"; +import { minimatch } from "minimatch"; import { ProgressEvent, DirectoryEvent, ErrorEvent, SuccessEvent, - WarningEvent -} from '../events/CommandEvents.js'; -import MemoryMonitor from './MemoryMonitor.js'; -import StreamingCoverageDatabase from './StreamingCoverageDatabase.js'; -import BatchProcessor from './BatchProcessor.js'; + // WarningEvent, +} from "../events/CommandEvents.js"; +import MemoryMonitor from "./MemoryMonitor.js"; +import StreamingCoverageDatabase from "./StreamingCoverageDatabase.js"; +import BatchProcessor from "./BatchProcessor.js"; +import { ValidationError, ParsingError } from "./errors/index.js"; +/* eslint-env node */ +const { process, console, setInterval, clearInterval } = globalThis; /** * @typedef {Object} TestAssertion @@ -60,7 +63,7 @@ import BatchProcessor from './BatchProcessor.js'; * @property {string} filePath - Absolute path to the test file * @property {string} fileName - Name of the test file * @property {TestAssertion[]} assertions - Array of pgTAP assertions found - * @property {number} planCount - Expected number of tests from SELECT plan() + * @property {number} planCount - Expected number of tests from SELECT plan() * @property {string[]} dependencies - Any dependencies or includes found * @property {Object} metadata - Additional file metadata */ @@ -79,17 +82,17 @@ import BatchProcessor from './BatchProcessor.js'; /** * pgTAP Test Scanner Class - * + * * Scans directories of pgTAP test files (.sql) and extracts test coverage information. * Identifies pgTAP assertions, builds coverage maps, and provides insights into what * database objects are being tested. - * + * * @extends EventEmitter */ class pgTAPTestScanner extends EventEmitter { /** * Create a new pgTAP test scanner - * + * * @param {Object} [options={}] - Scanner configuration options * @param {boolean} [options.includeCommented=false] - Include commented-out tests * @param {string[]} [options.fileExtensions=['.sql']] - File extensions to scan @@ -102,15 +105,15 @@ class pgTAPTestScanner extends EventEmitter { */ constructor(options = {}) { super(); - + /** * @type {Object} Scanner configuration */ this.options = { includeCommented: false, - fileExtensions: ['.sql'], - includePatterns: ['**/*'], - excludePatterns: ['**/node_modules/**', '**/.git/**', '**/.*'], + fileExtensions: [".sql"], + includePatterns: ["**/*"], + excludePatterns: ["**/node_modules/**", "**/.git/**", "**/.*"], ignorePatterns: [], // Legacy RegExp patterns validatePlans: true, followSymlinks: false, @@ -123,14 +126,14 @@ class pgTAPTestScanner extends EventEmitter { maxObjectsPerType: 10000, // Maximum objects per coverage type enableGC: true, // Enable garbage collection hints enableCompression: false, // Enable data compression (experimental) - ...options + ...options, }; - + /** * @type {TestFile[]} Array of scanned test files */ this.testFiles = []; - + /** * @type {CoverageMap} Coverage analysis results */ @@ -142,9 +145,9 @@ class pgTAPTestScanner extends EventEmitter { policies: {}, indexes: {}, triggers: {}, - filesByTarget: {} + filesByTarget: {}, }; - + /** * @type {Object} Memory management state */ @@ -155,293 +158,472 @@ class pgTAPTestScanner extends EventEmitter { gcCount: 0, batchesProcessed: 0, objectsProcessed: 0, - streamingMode: false + streamingMode: false, }; - + /** * @type {WeakMap} Weak references for cleanup */ this.weakRefs = new WeakMap(); - + /** * @type {AbortController} For cancelling operations + * + * Use global AbortController (Node 16+) so ESLint doesn’t flag no-undef in ESM. */ - this.abortController = new AbortController(); - + this.abortController = new globalThis.AbortController(); + /** * @type {StreamingCoverageDatabase} Memory-aware coverage database */ this.streamingDB = null; - + /** * @type {BatchProcessor} Batch processing utility */ this.batchProcessor = null; - + /** * @type {Map} pgTAP assertion patterns */ this.assertionPatterns = this._initializeAssertionPatterns(); - + /** * @type {RegExp} Pattern for SELECT plan() statements */ this.planPattern = /SELECT\s+plan\s*\(\s*(\d+)\s*\)\s*;?/gi; - + /** * @type {RegExp} Pattern for SQL comments */ this.commentPattern = /--.*$/gm; - + /** * @type {number} Total files processed */ this.filesProcessed = 0; - + /** * @type {number} Total assertions found */ this.totalAssertions = 0; - + // Initialize memory monitoring this._initializeMemoryMonitoring(); } - + /** * Initialize pgTAP assertion patterns - * + * * @returns {Map} Map of assertion types to regex patterns * @private */ _initializeAssertionPatterns() { const patterns = new Map(); - + // Schema testing - patterns.set('has_schema', /SELECT\s+has_schema\s*\(\s*['"`]([^'"`]+)['"`]\s*\)/gi); - patterns.set('hasnt_schema', /SELECT\s+hasnt_schema\s*\(\s*['"`]([^'"`]+)['"`]\s*\)/gi); - + patterns.set( + "has_schema", + /SELECT\s+has_schema\s*\(\s*['"`]([^'"`]+)['"`]\s*\)/gi, + ); + patterns.set( + "hasnt_schema", + /SELECT\s+hasnt_schema\s*\(\s*['"`]([^'"`]+)['"`]\s*\)/gi, + ); + // Table testing - Enhanced patterns to handle more variations // Pattern for SELECT has_table(...) with optional schema, table name, and description - patterns.set('has_table_select', /SELECT\s+has_table\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi); - patterns.set('hasnt_table_select', /SELECT\s+hasnt_table\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi); - + patterns.set( + "has_table_select", + /SELECT\s+has_table\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi, + ); + patterns.set( + "hasnt_table_select", + /SELECT\s+hasnt_table\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi, + ); + // Pattern for ok(has_table(...), 'description') format - patterns.set('has_table_ok', /ok\s*\(\s*has_table\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)\s*(?:,\s*['"`]([^'"`]*)['"`])?\s*\)/gi); - patterns.set('hasnt_table_ok', /ok\s*\(\s*hasnt_table\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)\s*(?:,\s*['"`]([^'"`]*)['"`])?\s*\)/gi); - + patterns.set( + "has_table_ok", + /ok\s*\(\s*has_table\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)\s*(?:,\s*['"`]([^'"`]*)['"`])?\s*\)/gi, + ); + patterns.set( + "hasnt_table_ok", + /ok\s*\(\s*hasnt_table\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)\s*(?:,\s*['"`]([^'"`]*)['"`])?\s*\)/gi, + ); + // Table privilege testing // table_privs_are('table', 'role', ARRAY['privs']) or table_privs_are('schema', 'table', 'role', ARRAY['privs']) - patterns.set('table_privs_are', /SELECT\s+table_privs_are\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*,\s*ARRAY\s*\[\s*(.*?)\s*\](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi); - + patterns.set( + "table_privs_are", + /SELECT\s+table_privs_are\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*,\s*ARRAY\s*\[\s*(.*?)\s*\](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi, + ); + // Table ownership testing // table_owner_is('table', 'owner') or table_owner_is('schema', 'table', 'owner') - patterns.set('table_owner_is', /SELECT\s+table_owner_is\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi); - + patterns.set( + "table_owner_is", + /SELECT\s+table_owner_is\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi, + ); + // Table enumeration testing // tables_are('schema', ARRAY['table1', 'table2']) or tables_are(ARRAY['table1', 'table2']) - patterns.set('tables_are', /SELECT\s+tables_are\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?ARRAY\s*\[\s*(.*?)\s*\](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi); - + patterns.set( + "tables_are", + /SELECT\s+tables_are\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?ARRAY\s*\[\s*(.*?)\s*\](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi, + ); + // Column testing - patterns.set('has_column', /SELECT\s+has_column\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*\)/gi); - patterns.set('hasnt_column', /SELECT\s+hasnt_column\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*\)/gi); - patterns.set('col_type_is', /SELECT\s+col_type_is\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*\)/gi); - patterns.set('col_not_null', /SELECT\s+col_not_null\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*\)/gi); - patterns.set('col_is_null', /SELECT\s+col_is_null\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*\)/gi); - patterns.set('col_has_default', /SELECT\s+col_has_default\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*\)/gi); - patterns.set('col_hasnt_default', /SELECT\s+col_hasnt_default\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*\)/gi); - patterns.set('col_default_is', /SELECT\s+col_default_is\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*,\s*((?:[^'"`(),]|['"`][^'"`]*['"`]|\([^)]*\))+)\s*\)/gi); - patterns.set('col_is_pk', /SELECT\s+col_is_pk\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*\)/gi); - patterns.set('col_isnt_pk', /SELECT\s+col_isnt_pk\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*\)/gi); - + patterns.set( + "has_column", + /SELECT\s+has_column\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*\)/gi, + ); + patterns.set( + "hasnt_column", + /SELECT\s+hasnt_column\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*\)/gi, + ); + patterns.set( + "col_type_is", + /SELECT\s+col_type_is\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*\)/gi, + ); + patterns.set( + "col_not_null", + /SELECT\s+col_not_null\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*\)/gi, + ); + patterns.set( + "col_is_null", + /SELECT\s+col_is_null\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*\)/gi, + ); + patterns.set( + "col_has_default", + /SELECT\s+col_has_default\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*\)/gi, + ); + patterns.set( + "col_hasnt_default", + /SELECT\s+col_hasnt_default\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*\)/gi, + ); + patterns.set( + "col_default_is", + /SELECT\s+col_default_is\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*,\s*((?:[^'"`(),]|['"`][^'"`]*['"`]|\([^)]*\))+)\s*\)/gi, + ); + patterns.set( + "col_is_pk", + /SELECT\s+col_is_pk\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*\)/gi, + ); + patterns.set( + "col_isnt_pk", + /SELECT\s+col_isnt_pk\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*\)/gi, + ); + // Primary key testing - patterns.set('has_pk', /SELECT\s+has_pk\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*\)/gi); - patterns.set('hasnt_pk', /SELECT\s+hasnt_pk\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*\)/gi); - + patterns.set( + "has_pk", + /SELECT\s+has_pk\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*\)/gi, + ); + patterns.set( + "hasnt_pk", + /SELECT\s+hasnt_pk\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*\)/gi, + ); + // Foreign key testing - patterns.set('has_fk', /SELECT\s+has_fk\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]+)['"`])?\s*\)/gi); - patterns.set('hasnt_fk', /SELECT\s+hasnt_fk\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]+)['"`])?\s*\)/gi); - + patterns.set( + "has_fk", + /SELECT\s+has_fk\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]+)['"`])?\s*\)/gi, + ); + patterns.set( + "hasnt_fk", + /SELECT\s+hasnt_fk\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]+)['"`])?\s*\)/gi, + ); + // Index testing - Enhanced patterns for comprehensive index coverage // has_index('table', 'index_name') or has_index('schema', 'table', 'index_name') - patterns.set('has_index', /SELECT\s+has_index\s*\(\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]+)['"`])?(?:\s*,\s*['"`]([^'"`]+)['"`])?\s*\)/gi); - patterns.set('hasnt_index', /SELECT\s+hasnt_index\s*\(\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]+)['"`])?(?:\s*,\s*['"`]([^'"`]+)['"`])?\s*\)/gi); - + patterns.set( + "has_index", + /SELECT\s+has_index\s*\(\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]+)['"`])?(?:\s*,\s*['"`]([^'"`]+)['"`])?\s*\)/gi, + ); + patterns.set( + "hasnt_index", + /SELECT\s+hasnt_index\s*\(\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]+)['"`])?(?:\s*,\s*['"`]([^'"`]+)['"`])?\s*\)/gi, + ); + // index_is_on('table', 'index', ARRAY['column']) - tests what columns an index covers - patterns.set('index_is_on', /SELECT\s+index_is_on\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*,\s*ARRAY\s*\[\s*(.*?)\s*\]\s*\)/gi); - + patterns.set( + "index_is_on", + /SELECT\s+index_is_on\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*,\s*ARRAY\s*\[\s*(.*?)\s*\]\s*\)/gi, + ); + // index_is_type('table', 'index', 'type') - tests index type (btree, gin, etc.) - patterns.set('index_is_type', /SELECT\s+index_is_type\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*\)/gi); - + patterns.set( + "index_is_type", + /SELECT\s+index_is_type\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*\)/gi, + ); + // has_unique('table', 'constraint_name') - tests unique constraints - patterns.set('has_unique', /SELECT\s+has_unique\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]+)['"`])?\s*\)/gi); - patterns.set('hasnt_unique', /SELECT\s+hasnt_unique\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]+)['"`])?\s*\)/gi); - + patterns.set( + "has_unique", + /SELECT\s+has_unique\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]+)['"`])?\s*\)/gi, + ); + patterns.set( + "hasnt_unique", + /SELECT\s+hasnt_unique\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]+)['"`])?\s*\)/gi, + ); + // index_is_primary('table', 'index') - tests if index is primary key - patterns.set('index_is_primary', /SELECT\s+index_is_primary\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]+)['"`])?\s*\)/gi); - + patterns.set( + "index_is_primary", + /SELECT\s+index_is_primary\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]+)['"`])?\s*\)/gi, + ); + // Function testing - patterns.set('has_function', /SELECT\s+has_function\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*ARRAY\[(.*?)\])?\s*(?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi); - patterns.set('hasnt_function', /SELECT\s+hasnt_function\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*ARRAY\[(.*?)\])?\s*(?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi); - patterns.set('function_returns', /SELECT\s+function_returns\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*ARRAY\[(.*?)\])?\s*,\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi); - patterns.set('function_lang_is', /SELECT\s+function_lang_is\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*ARRAY\[(.*?)\])?\s*,\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi); - patterns.set('is_definer', /SELECT\s+is_definer\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*ARRAY\[(.*?)\])?\s*(?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi); - patterns.set('isnt_definer', /SELECT\s+isnt_definer\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*ARRAY\[(.*?)\])?\s*(?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi); - patterns.set('volatility_is', /SELECT\s+volatility_is\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*ARRAY\[(.*?)\])?\s*,\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi); - patterns.set('function_privs_are', /SELECT\s+function_privs_are\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*ARRAY\[(.*?)\])?\s*,\s*['"`]([^'"`]+)['"`]\s*,\s*ARRAY\[(.*?)\](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi); - + patterns.set( + "has_function", + /SELECT\s+has_function\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*ARRAY\[(.*?)\])?\s*(?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi, + ); + patterns.set( + "hasnt_function", + /SELECT\s+hasnt_function\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*ARRAY\[(.*?)\])?\s*(?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi, + ); + patterns.set( + "function_returns", + /SELECT\s+function_returns\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*ARRAY\[(.*?)\])?\s*,\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi, + ); + patterns.set( + "function_lang_is", + /SELECT\s+function_lang_is\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*ARRAY\[(.*?)\])?\s*,\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi, + ); + patterns.set( + "is_definer", + /SELECT\s+is_definer\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*ARRAY\[(.*?)\])?\s*(?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi, + ); + patterns.set( + "isnt_definer", + /SELECT\s+isnt_definer\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*ARRAY\[(.*?)\])?\s*(?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi, + ); + patterns.set( + "volatility_is", + /SELECT\s+volatility_is\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*ARRAY\[(.*?)\])?\s*,\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi, + ); + patterns.set( + "function_privs_are", + /SELECT\s+function_privs_are\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*ARRAY\[(.*?)\])?\s*,\s*['"`]([^'"`]+)['"`]\s*,\s*ARRAY\[(.*?)\](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi, + ); + // View testing - patterns.set('has_view', /SELECT\s+has_view\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*\)/gi); - patterns.set('hasnt_view', /SELECT\s+hasnt_view\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*\)/gi); - + patterns.set( + "has_view", + /SELECT\s+has_view\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*\)/gi, + ); + patterns.set( + "hasnt_view", + /SELECT\s+hasnt_view\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*\)/gi, + ); + // Type testing - patterns.set('has_type', /SELECT\s+has_type\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*\)/gi); - patterns.set('hasnt_type', /SELECT\s+hasnt_type\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*\)/gi); - + patterns.set( + "has_type", + /SELECT\s+has_type\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*\)/gi, + ); + patterns.set( + "hasnt_type", + /SELECT\s+hasnt_type\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*\)/gi, + ); + // Result testing - patterns.set('results_eq', /SELECT\s+results_eq\s*\(\s*['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]+)['"`])?\s*\)/gi); - patterns.set('results_ne', /SELECT\s+results_ne\s*\(\s*['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]+)['"`])?\s*\)/gi); - + patterns.set( + "results_eq", + /SELECT\s+results_eq\s*\(\s*['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]+)['"`])?\s*\)/gi, + ); + patterns.set( + "results_ne", + /SELECT\s+results_ne\s*\(\s*['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]+)['"`])?\s*\)/gi, + ); + // RLS (Row Level Security) policy testing - patterns.set('is_rls_enabled', /SELECT\s+is_rls_enabled\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*\)/gi); - patterns.set('policy_exists', /SELECT\s+policy_exists\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*\)/gi); - patterns.set('policy_cmd_is', /SELECT\s+policy_cmd_is\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*\)/gi); - patterns.set('policy_roles_are', /SELECT\s+policy_roles_are\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*,\s*ARRAY\[(.*?)\]\s*\)/gi); - patterns.set('policies_are', /SELECT\s+policies_are\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*ARRAY\[(.*?)\](?:\s*,\s*['"`]([^'"`]+)['"`])?\s*\)/gi); - + patterns.set( + "is_rls_enabled", + /SELECT\s+is_rls_enabled\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*\)/gi, + ); + patterns.set( + "policy_exists", + /SELECT\s+policy_exists\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*\)/gi, + ); + patterns.set( + "policy_cmd_is", + /SELECT\s+policy_cmd_is\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*\)/gi, + ); + patterns.set( + "policy_roles_are", + /SELECT\s+policy_roles_are\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*,\s*ARRAY\[(.*?)\]\s*\)/gi, + ); + patterns.set( + "policies_are", + /SELECT\s+policies_are\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*ARRAY\[(.*?)\](?:\s*,\s*['"`]([^'"`]+)['"`])?\s*\)/gi, + ); + // Trigger testing // has_trigger('table', 'trigger_name') or has_trigger('schema', 'table', 'trigger_name') // Also supports optional description: has_trigger('table', 'trigger', 'description') - patterns.set('has_trigger', /SELECT\s+has_trigger\s*\(\s*['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]+)['"`])?(?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi); - patterns.set('hasnt_trigger', /SELECT\s+hasnt_trigger\s*\(\s*['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]+)['"`])?(?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi); - + patterns.set( + "has_trigger", + /SELECT\s+has_trigger\s*\(\s*['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]+)['"`])?(?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi, + ); + patterns.set( + "hasnt_trigger", + /SELECT\s+hasnt_trigger\s*\(\s*['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]+)['"`])?(?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi, + ); + // trigger_is('table', 'trigger', 'function') or trigger_is('schema', 'table', 'trigger', 'func_schema', 'function') - patterns.set('trigger_is', /SELECT\s+trigger_is\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*,\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi); - + patterns.set( + "trigger_is", + /SELECT\s+trigger_is\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*,\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi, + ); + // is_trigger_on('table', 'trigger', 'events') - tests trigger events (INSERT, UPDATE, DELETE) - patterns.set('is_trigger_on', /SELECT\s+is_trigger_on\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi); - + patterns.set( + "is_trigger_on", + /SELECT\s+is_trigger_on\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi, + ); + // trigger_fires_on('table', 'trigger', 'timing') - tests trigger timing (BEFORE, AFTER, INSTEAD OF) - patterns.set('trigger_fires_on', /SELECT\s+trigger_fires_on\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi); - + patterns.set( + "trigger_fires_on", + /SELECT\s+trigger_fires_on\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi, + ); + // trigger_is_for('table', 'trigger', 'level') - tests trigger level (ROW, STATEMENT) - patterns.set('trigger_is_for', /SELECT\s+trigger_is_for\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi); - + patterns.set( + "trigger_is_for", + /SELECT\s+trigger_is_for\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`]\s*,\s*['"`]([^'"`]+)['"`](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi, + ); + // triggers_are('table', ARRAY['trigger_names']) - tests all triggers on a table - patterns.set('triggers_are', /SELECT\s+triggers_are\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*ARRAY\s*\[\s*(.*?)\s*\](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi); - + patterns.set( + "triggers_are", + /SELECT\s+triggers_are\s*\(\s*(?:['"`]([^'"`]+)['"`]\s*,\s*)?['"`]([^'"`]+)['"`]\s*,\s*ARRAY\s*\[\s*(.*?)\s*\](?:\s*,\s*['"`]([^'"`]*)['"`])?\s*\)/gi, + ); + return patterns; } - + /** * Scan a directory for pgTAP test files - * + * * @param {string} testsDir - Directory to scan for test files * @returns {Promise} Array of parsed test files * @throws {Error} If directory doesn't exist or is not accessible */ async scanDirectory(testsDir) { const startTime = Date.now(); - + try { // Emit directory scanning event const dirEvent = DirectoryEvent.scan(testsDir); - this.emit('directory', { + this.emit("directory", { message: dirEvent.message, data: dirEvent.details, timestamp: dirEvent.timestamp, - type: dirEvent.type + type: dirEvent.type, }); - + // Check if directory exists const stat = await fs.stat(testsDir); if (!stat.isDirectory()) { - throw new Error(`Path is not a directory: ${testsDir}`); + throw new ParsingError(`Path is not a directory: ${testsDir}`); } - + // Find all test files const testFiles = await this._findTestFiles(testsDir); - + if (testFiles.length === 0) { - this.emit('warning', { - message: 'No test files found', + this.emit("warning", { + message: "No test files found", data: { testsDir, extensions: this.options.fileExtensions }, timestamp: new Date(), - type: 'warning' + type: "warning", }); return []; } - + // Process each test file this.testFiles = []; this.filesProcessed = 0; this.totalAssertions = 0; - + for (let i = 0; i < testFiles.length; i++) { const filePath = testFiles[i]; - - this.emit('progress', { + + this.emit("progress", { message: `Parsing test file: ${path.basename(filePath)}`, - data: { + data: { filePath, filesProcessed: i, - totalFiles: testFiles.length + totalFiles: testFiles.length, }, timestamp: new Date(), - type: 'progress', - percentage: Math.round((i / testFiles.length) * 100) + type: "progress", + percentage: Math.round((i / testFiles.length) * 100), }); - + try { const testFile = await this.parseTestFile(filePath); this.testFiles.push(testFile); this.totalAssertions += testFile.assertions.length; } catch (error) { - const errorEvent = ErrorEvent.fromError(error, `Failed to parse test file: ${filePath}`); - this.emit('error', { + const errorEvent = ErrorEvent.fromError( + error, + `Failed to parse test file: ${filePath}`, + ); + this.emit("error", { message: errorEvent.message, error: errorEvent.error, data: { filePath, ...errorEvent.details }, timestamp: errorEvent.timestamp, - type: errorEvent.type + type: errorEvent.type, }); } } - + // Build coverage map this._buildCoverageMap(); - + const duration = Date.now() - startTime; const successEvent = new SuccessEvent( `Scanned ${this.filesProcessed} test files and found ${this.totalAssertions} assertions`, - { testsDir, filesProcessed: this.filesProcessed, totalAssertions: this.totalAssertions }, - duration + { + testsDir, + filesProcessed: this.filesProcessed, + totalAssertions: this.totalAssertions, + }, + duration, ); - - this.emit('success', { + + this.emit("success", { message: successEvent.message, data: successEvent.details, timestamp: successEvent.timestamp, - type: successEvent.type + type: successEvent.type, }); - + return this.testFiles; - } catch (error) { - const errorEvent = ErrorEvent.fromError(error, `Failed to scan tests directory: ${testsDir}`); - this.emit('error', { + const errorEvent = ErrorEvent.fromError( + error, + `Failed to scan tests directory: ${testsDir}`, + ); + this.emit("error", { message: errorEvent.message, error: errorEvent.error, data: errorEvent.details, timestamp: errorEvent.timestamp, - type: errorEvent.type + type: errorEvent.type, }); throw error; } } - + /** * Parse an individual test file - * + * * @param {string} filePath - Path to the test file to parse * @returns {Promise} Parsed test file information * @throws {Error} If file cannot be read or parsed @@ -449,33 +631,37 @@ class pgTAPTestScanner extends EventEmitter { async parseTestFile(filePath) { try { // Read file content - const content = await fs.readFile(filePath, 'utf8'); - + const content = await fs.readFile(filePath, "utf8"); + // Extract test plan const planCount = this._extractPlan(content); - + // Extract assertions const assertions = this.extractAssertions(content); - + // Extract dependencies (basic implementation) const dependencies = this._extractDependencies(content); - + // Validate plan count if enabled - if (this.options.validatePlans && planCount !== null && planCount !== assertions.length) { - this.emit('warning', { + if ( + this.options.validatePlans && + planCount !== null && + planCount !== assertions.length + ) { + this.emit("warning", { message: `Plan count mismatch in ${path.basename(filePath)}`, - data: { - filePath, - plannedTests: planCount, - foundAssertions: assertions.length + data: { + filePath, + plannedTests: planCount, + foundAssertions: assertions.length, }, timestamp: new Date(), - type: 'warning' + type: "warning", }); } - + this.filesProcessed++; - + const testFile = { filePath, fileName: path.basename(filePath), @@ -484,100 +670,119 @@ class pgTAPTestScanner extends EventEmitter { dependencies, metadata: { size: content.length, - lines: content.split('\n').length, - parsed: new Date() - } + lines: content.split("\n").length, + parsed: new Date(), + }, }; - + return testFile; - } catch (error) { - throw new Error(`Failed to parse test file ${filePath}: ${error.message}`); + throw new ParsingError( + `Failed to parse test file ${filePath}: ${error.message}`, + ); } } - + /** * Extract pgTAP assertions from SQL content - * + * * @param {string} sql - SQL content to analyze * @returns {TestAssertion[]} Array of extracted assertions */ extractAssertions(sql) { const assertions = []; - + // Remove comments if not including commented tests let processedSql = sql; if (!this.options.includeCommented) { - processedSql = sql.replace(this.commentPattern, ''); + processedSql = sql.replace(this.commentPattern, ""); } - + // Split into lines for line number tracking - const lines = processedSql.split('\n'); - + // const lines = processedSql.split("\n"); // Search for each assertion pattern for (const [assertionType, pattern] of this.assertionPatterns) { let match; - + // Reset regex state pattern.lastIndex = 0; - + while ((match = pattern.exec(processedSql)) !== null) { // Find line number const beforeMatch = processedSql.substring(0, match.index); - const lineNumber = beforeMatch.split('\n').length; - + const lineNumber = beforeMatch.split("\n").length; + // Extract parameters (filter out undefined captures) - const parameters = match.slice(1).filter(param => param !== undefined); - + const parameters = match + .slice(1) + .filter((param) => param !== undefined); + // Clean parameters for specific assertion types this._cleanParameters(assertionType, parameters); - + // Determine target based on assertion type const target = this._determineTarget(assertionType, parameters); - + const assertion = { type: assertionType, target, parameters, lineNumber, - rawSql: match[0].trim() + rawSql: match[0].trim(), }; - + // Add function metadata for function-related assertions - if (assertionType.includes('function') || assertionType.includes('definer') || assertionType === 'volatility_is') { - assertion.functionMetadata = this._extractFunctionMetadata(assertionType, parameters); + if ( + assertionType.includes("function") || + assertionType.includes("definer") || + assertionType === "volatility_is" + ) { + assertion.functionMetadata = this._extractFunctionMetadata( + assertionType, + parameters, + ); } - + // Add table metadata for table-related assertions - if (assertionType.includes('table')) { - const metadata = this._extractAssertionMetadata(assertionType, parameters); + if (assertionType.includes("table")) { + const metadata = this._extractAssertionMetadata( + assertionType, + parameters, + ); Object.assign(assertion, metadata); } - + // Add RLS policy metadata for policy-related assertions - if (assertionType.includes('policy') || assertionType.includes('policies') || assertionType === 'is_rls_enabled') { - assertion.policyMetadata = this._extractPolicyMetadata(assertionType, parameters); + if ( + assertionType.includes("policy") || + assertionType.includes("policies") || + assertionType === "is_rls_enabled" + ) { + assertion.policyMetadata = this._extractPolicyMetadata( + assertionType, + parameters, + ); } - + assertions.push(assertion); } } - + return assertions.sort((a, b) => a.lineNumber - b.lineNumber); } - + /** * Get the current coverage map - * + * * @returns {CoverageMap} Coverage analysis results */ getCoverageMap() { return { ...this.coverageMap }; } - + /** * Get statistics about the scanned tests - * + * * @returns {Object} Test statistics */ getStatistics() { @@ -585,10 +790,10 @@ class pgTAPTestScanner extends EventEmitter { filesScanned: this.filesProcessed, totalAssertions: this.totalAssertions, assertionTypes: this._getAssertionTypeStats(), - coverageStats: this._getCoverageStats() + coverageStats: this._getCoverageStats(), }; } - + /** * Reset scanner state */ @@ -602,17 +807,17 @@ class pgTAPTestScanner extends EventEmitter { policies: {}, indexes: {}, triggers: {}, - filesByTarget: {} + filesByTarget: {}, }; this.filesProcessed = 0; this.totalAssertions = 0; } - + // Private methods - + /** * Find all test files in directory recursively - * + * * @param {string} dir - Directory to search * @param {number} [depth=0] - Current recursion depth * @returns {Promise} Array of test file paths @@ -621,29 +826,29 @@ class pgTAPTestScanner extends EventEmitter { async _findTestFiles(dir, depth = 0) { const files = []; const startTime = Date.now(); - + // Check depth limit if (depth > this.options.maxDepth) { - this.emit('warning', { + this.emit("warning", { message: `Maximum depth ${this.options.maxDepth} reached, skipping: ${dir}`, data: { dir, depth, maxDepth: this.options.maxDepth }, timestamp: new Date(), - type: 'warning' + type: "warning", }); return files; } - + try { // Emit progress for directory scanning - this.emit('progress', { + this.emit("progress", { message: `Scanning directory: ${path.relative(process.cwd(), dir)}`, data: { dir, depth }, timestamp: new Date(), - type: 'progress' + type: "progress", }); - + const entries = await fs.readdir(dir, { withFileTypes: true }); - + // Process entries in sorted order for consistency const sortedEntries = entries.sort((a, b) => { // Directories first, then files, alphabetically within each group @@ -651,48 +856,58 @@ class pgTAPTestScanner extends EventEmitter { if (!a.isDirectory() && b.isDirectory()) return 1; return a.name.localeCompare(b.name); }); - + for (const entry of sortedEntries) { const fullPath = path.join(dir, entry.name); // Use relative path from the original tests directory being scanned const testsRootDir = arguments.length > 1 ? arguments[2] : dir; // Pass root as 3rd param in recursion const relativePath = path.relative(testsRootDir, fullPath); - + try { // Skip hidden files and directories unless explicitly included - if (entry.name.startsWith('.') && !this._shouldIncludeHidden(relativePath)) { + if ( + entry.name.startsWith(".") && + !this._shouldIncludeHidden(relativePath) + ) { continue; } - + // Check exclude patterns first (more efficient) if (this._isExcluded(relativePath)) { continue; } - + if (entry.isDirectory()) { // Recursively search subdirectories, passing the root directory - const subFiles = await this._findTestFiles(fullPath, depth + 1, testsRootDir); + const subFiles = await this._findTestFiles( + fullPath, + depth + 1, + testsRootDir, + ); files.push(...subFiles); - } else if (entry.isFile()) { // Check if file should be included if (await this._shouldIncludeFile(fullPath, relativePath)) { files.push(fullPath); } - } else if (entry.isSymbolicLink() && this.options.followSymlinks) { // Handle symbolic links if enabled - await this._handleSymlink(fullPath, relativePath, files, depth, testsRootDir); + await this._handleSymlink( + fullPath, + relativePath, + files, + depth, + testsRootDir, + ); } - } catch (error) { // Handle permission errors gracefully - if (error.code === 'EACCES' || error.code === 'EPERM') { - this.emit('warning', { + if (error.code === "EACCES" || error.code === "EPERM") { + this.emit("warning", { message: `Permission denied accessing: ${relativePath}`, data: { path: fullPath, error: error.code }, timestamp: new Date(), - type: 'warning' + type: "warning", }); } else { // Re-throw unexpected errors @@ -700,35 +915,36 @@ class pgTAPTestScanner extends EventEmitter { } } } - + // Emit progress for completed directory const duration = Date.now() - startTime; - this.emit('progress', { + this.emit("progress", { message: `Completed scanning: ${path.relative(process.cwd(), dir)} (${files.length} files, ${duration}ms)`, data: { dir, filesFound: files.length, duration, depth }, timestamp: new Date(), - type: 'progress' + type: "progress", }); - } catch (error) { - if (error.code === 'EACCES' || error.code === 'EPERM') { - this.emit('warning', { + if (error.code === "EACCES" || error.code === "EPERM") { + this.emit("warning", { message: `Permission denied reading directory: ${dir}`, data: { dir, error: error.code }, timestamp: new Date(), - type: 'warning' + type: "warning", }); } else { - throw new Error(`Failed to read directory ${dir}: ${error.message}`); + throw new ParsingError( + `Failed to read directory ${dir}: ${error.message}`, + ); } } - + return files.sort(); // Ensure consistent ordering } - + /** * Check if a file should be included based on patterns and extensions - * + * * @param {string} fullPath - Full file path * @param {string} relativePath - Relative file path * @returns {Promise} True if file should be included @@ -740,55 +956,57 @@ class pgTAPTestScanner extends EventEmitter { if (!this.options.fileExtensions.includes(ext)) { return false; } - + // Check include patterns using minimatch for consistency - const matchesInclude = this.options.includePatterns.some(pattern => - minimatch(relativePath, pattern, { dot: true }) + const matchesInclude = this.options.includePatterns.some((pattern) => + minimatch(relativePath, pattern, { dot: true }), ); - + if (!matchesInclude) { return false; } - + // Check legacy RegExp patterns - if (this.options.ignorePatterns.some(pattern => pattern.test(fullPath))) { + if (this.options.ignorePatterns.some((pattern) => pattern.test(fullPath))) { return false; } - + return true; } - + /** * Check if a path should be excluded based on exclude patterns - * + * * @param {string} relativePath - Relative path to check * @returns {boolean} True if path should be excluded * @private */ _isExcluded(relativePath) { - return this.options.excludePatterns.some(pattern => - minimatch(relativePath, pattern, { dot: true }) + return this.options.excludePatterns.some((pattern) => + minimatch(relativePath, pattern, { dot: true }), ); } - + /** * Check if hidden files should be included for this specific path - * + * * @param {string} relativePath - Relative path to check * @returns {boolean} True if hidden file should be included * @private */ _shouldIncludeHidden(relativePath) { // Check if any include pattern explicitly matches this hidden path - return this.options.includePatterns.some(pattern => { + return this.options.includePatterns.some((pattern) => { // Only include hidden files if they're explicitly matched by an include pattern - return pattern.includes('.') && this._matchesPattern(relativePath, pattern); + return ( + pattern.includes(".") && this._matchesPattern(relativePath, pattern) + ); }); } - + /** * Handle symbolic links during file discovery - * + * * @param {string} fullPath - Full path to the symlink * @param {string} relativePath - Relative path to the symlink * @param {string[]} files - Array to collect file paths @@ -801,55 +1019,57 @@ class pgTAPTestScanner extends EventEmitter { try { const realPath = await fs.realpath(fullPath); const stat = await fs.stat(realPath); - + // Prevent infinite loops by checking if we've seen this real path before // This is a simple check - a more robust solution would track visited inodes const realpathRelative = path.relative(testsRootDir, realPath); - + if (stat.isDirectory()) { // Recursively process symlinked directory - this.emit('progress', { + this.emit("progress", { message: `Following symlink to directory: ${realpathRelative}`, data: { symlinkPath: relativePath, targetPath: realpathRelative }, timestamp: new Date(), - type: 'progress' + type: "progress", }); - - const subFiles = await this._findTestFiles(realPath, depth + 1, testsRootDir); + + const subFiles = await this._findTestFiles( + realPath, + depth + 1, + testsRootDir, + ); files.push(...subFiles); - } else if (stat.isFile()) { // Process symlinked file if (await this._shouldIncludeFile(realPath, realpathRelative)) { files.push(realPath); // Use the real path, not the symlink path } } - } catch (error) { - if (error.code === 'ENOENT') { - this.emit('warning', { + if (error.code === "ENOENT") { + this.emit("warning", { message: `Broken symlink: ${relativePath}`, data: { symlinkPath: fullPath }, timestamp: new Date(), - type: 'warning' + type: "warning", }); - } else if (error.code === 'EACCES' || error.code === 'EPERM') { - this.emit('warning', { + } else if (error.code === "EACCES" || error.code === "EPERM") { + this.emit("warning", { message: `Permission denied following symlink: ${relativePath}`, data: { symlinkPath: fullPath, error: error.code }, timestamp: new Date(), - type: 'warning' + type: "warning", }); } else { throw error; } } } - + /** * Simple glob pattern matching without external dependencies * Supports basic patterns like *, **, and literal strings - * + * * @param {string} filePath - File path to test * @param {string} pattern - Glob pattern * @returns {boolean} True if path matches pattern @@ -857,65 +1077,69 @@ class pgTAPTestScanner extends EventEmitter { */ _matchesPattern(filePath, pattern) { // Normalize paths to use forward slashes - const normalizedPath = filePath.replace(/\\/g, '/'); - const normalizedPattern = pattern.replace(/\\/g, '/'); - + const normalizedPath = filePath.replace(/\\/g, "/"); + const normalizedPattern = pattern.replace(/\\/g, "/"); + // Handle exact matches if (normalizedPattern === normalizedPath) { return true; } - + // Handle universal wildcard patterns - if (normalizedPattern === '**/*' || normalizedPattern === '**') { + if (normalizedPattern === "**/*" || normalizedPattern === "**") { return true; } - + // Convert glob pattern to regex with special handling for leading ** let regexPattern = normalizedPattern; - + // Handle leading ** patterns specially - if (regexPattern.startsWith('**/')) { + if (regexPattern.startsWith("**/")) { regexPattern = regexPattern.substring(3); // Remove leading **/ // Add optional prefix matcher - either nothing or any path with / - regexPattern = '(?:.*/)?' + regexPattern; + regexPattern = "(?:.*/)?" + regexPattern; } - + // Handle glob patterns BEFORE escaping special regex chars regexPattern = regexPattern - .replace(/\*\*/g, '__DOUBLESTAR__') // Temporarily mark ** - .replace(/\*/g, '__SINGLESTAR__') // Temporarily mark * - .replace(/\?/g, '__QUESTION__'); // Temporarily mark ? - + .replace(/\*\*/g, "__DOUBLESTAR__") // Temporarily mark ** + .replace(/\*/g, "__SINGLESTAR__") // Temporarily mark * + .replace(/\?/g, "__QUESTION__"); // Temporarily mark ? + // Now escape special regex characters - regexPattern = regexPattern.replace(/[.+^${}()|[\]\\]/g, '\\$&'); - + regexPattern = regexPattern.replace(/[.+^${}()|[\]\\]/g, "\\$&"); + // Convert back to regex patterns regexPattern = regexPattern - .replace(/__LEADINGMATCH__/g, '') // Remove the leading match marker - .replace(/__DOUBLESTAR__/g, '.*') // ** matches any chars including / - .replace(/__SINGLESTAR__/g, '[^/]*') // * matches any chars except / - .replace(/__QUESTION__/g, '[^/]'); // ? matches single char except / - + .replace(/__LEADINGMATCH__/g, "") // Remove the leading match marker + .replace(/__DOUBLESTAR__/g, ".*") // ** matches any chars including / + .replace(/__SINGLESTAR__/g, "[^/]*") // * matches any chars except / + .replace(/__QUESTION__/g, "[^/]"); // ? matches single char except / + try { - const regex = new RegExp('^' + regexPattern + '$'); + const regex = new RegExp("^" + regexPattern + "$"); const result = regex.test(normalizedPath); - + // Debug logging (enable when needed) if (process.env.DEBUG_PATTERNS) { - console.log(`Pattern: '${normalizedPattern}' => Regex: '^${regexPattern}$', Path: '${normalizedPath}', Result: ${result}`); + console.log( + `Pattern: '${normalizedPattern}' => Regex: '^${regexPattern}$', Path: '${normalizedPath}', Result: ${result}`, + ); } - + return result; } catch (error) { // If regex is invalid, fall back to simple string matching - console.warn(`Invalid regex pattern generated from ${normalizedPattern}: ${error.message}`); + console.warn( + `Invalid regex pattern generated from ${normalizedPattern}: ${error.message}`, + ); return normalizedPattern === normalizedPath; } } /** * Extract test plan count from SQL - * + * * @param {string} sql - SQL content * @returns {number|null} Plan count or null if not found * @private @@ -924,31 +1148,31 @@ class pgTAPTestScanner extends EventEmitter { const match = this.planPattern.exec(sql); return match ? parseInt(match[1], 10) : null; } - + /** * Extract dependencies from SQL content - * + * * @param {string} sql - SQL content * @returns {string[]} Array of dependencies found * @private */ _extractDependencies(sql) { const dependencies = []; - + // Look for common dependency patterns const includePattern = /\\i\s+['"`]([^'"`]+)['"`]/gi; let match; - + while ((match = includePattern.exec(sql)) !== null) { dependencies.push(match[1]); } - + return dependencies; } - + /** * Determine target object from assertion parameters - * + * * @param {string} assertionType - Type of assertion * @param {string[]} parameters - Assertion parameters * @returns {string} Target object identifier @@ -956,16 +1180,16 @@ class pgTAPTestScanner extends EventEmitter { */ _determineTarget(assertionType, parameters) { // Default logic - can be extended for specific assertion types - if (parameters.length === 0) return ''; - + if (parameters.length === 0) return ""; + // For schema assertions, first parameter is schema name - if (assertionType.includes('schema')) { + if (assertionType.includes("schema")) { return parameters[0]; } - + // For table assertions - Enhanced logic for new patterns - if (assertionType.includes('table')) { - if (assertionType.includes('_select')) { + if (assertionType.includes("table")) { + if (assertionType.includes("_select")) { // SELECT has_table(...) patterns // Parameters can be: [table] or [schema, table] or [table, description] or [schema, table, description] if (parameters.length === 1) { @@ -974,7 +1198,11 @@ class pgTAPTestScanner extends EventEmitter { } else if (parameters.length === 2) { // Two parameters: could be [schema, table] or [table, description] // Heuristic: if second param looks like a description (long text or empty), treat first as table - if (parameters[1].length === 0 || parameters[1].length > 30 || parameters[1].includes(' ')) { + if ( + parameters[1].length === 0 || + parameters[1].length > 30 || + parameters[1].includes(" ") + ) { // Likely [table, description] (including empty description) return `public.${parameters[0]}`; } else { @@ -985,7 +1213,7 @@ class pgTAPTestScanner extends EventEmitter { // Three parameters: [schema, table, description] return `${parameters[0]}.${parameters[1]}`; } - } else if (assertionType.includes('_ok')) { + } else if (assertionType.includes("_ok")) { // ok(has_table(...), 'description') patterns // Parameters captured: [table] or [schema, table] plus optional outer description if (parameters.length === 1) { @@ -994,7 +1222,11 @@ class pgTAPTestScanner extends EventEmitter { } else if (parameters.length === 2) { // Two parameters: could be [table, outer_desc] or [schema, table] // Check if second param looks like description - if (parameters[1].length === 0 || parameters[1].length > 30 || parameters[1].includes(' ')) { + if ( + parameters[1].length === 0 || + parameters[1].length > 30 || + parameters[1].includes(" ") + ) { // Likely [table, outer_description] (including empty description) return `public.${parameters[0]}`; } else { @@ -1004,7 +1236,12 @@ class pgTAPTestScanner extends EventEmitter { } else if (parameters.length >= 3) { // Three or more parameters: [schema, table, ...] or [table, inner_desc, outer_desc] // Check if first two look like schema.table pattern - if (parameters[0].length < 20 && parameters[1].length < 20 && !parameters[0].includes(' ') && !parameters[1].includes(' ')) { + if ( + parameters[0].length < 20 && + parameters[1].length < 20 && + !parameters[0].includes(" ") && + !parameters[1].includes(" ") + ) { // Likely [schema, table, ...] return `${parameters[0]}.${parameters[1]}`; } else { @@ -1012,7 +1249,7 @@ class pgTAPTestScanner extends EventEmitter { return `public.${parameters[0]}`; } } - } else if (assertionType === 'table_privs_are') { + } else if (assertionType === "table_privs_are") { // table_privs_are('table', 'role', ARRAY['privs']) or table_privs_are('schema', 'table', 'role', ARRAY['privs']) if (parameters.length >= 4) { // Schema, table, role, privileges pattern @@ -1021,7 +1258,7 @@ class pgTAPTestScanner extends EventEmitter { // Table, role, privileges pattern (assume public schema) return `public.${parameters[0]}`; } - } else if (assertionType === 'table_owner_is') { + } else if (assertionType === "table_owner_is") { // table_owner_is('table', 'owner') or table_owner_is('schema', 'table', 'owner') if (parameters.length >= 3) { // Schema, table, owner pattern @@ -1030,25 +1267,30 @@ class pgTAPTestScanner extends EventEmitter { // Table, owner pattern (assume public schema) return `public.${parameters[0]}`; } - } else if (assertionType === 'tables_are') { + } else if (assertionType === "tables_are") { // tables_are('schema', ARRAY['table1', 'table2']) or tables_are(ARRAY['table1', 'table2']) if (parameters.length >= 2) { // Schema and table list pattern - return schema as the target return parameters[0]; } else if (parameters.length === 1) { // Just table list pattern - assume public schema - return 'public'; + return "public"; } } else { // Legacy table patterns - return parameters.length > 1 ? `${parameters[0]}.${parameters[1]}` : `public.${parameters[0]}`; + return parameters.length > 1 + ? `${parameters[0]}.${parameters[1]}` + : `public.${parameters[0]}`; } } - + // For column assertions, handle different patterns - if (assertionType.includes('column') || assertionType.startsWith('col_')) { + if (assertionType.includes("column") || assertionType.startsWith("col_")) { // col_type_is, col_default_is have schema, table, column, type/value - if (assertionType === 'col_type_is' || assertionType === 'col_default_is') { + if ( + assertionType === "col_type_is" || + assertionType === "col_default_is" + ) { if (parameters.length >= 4) { // schema.table.column pattern return `${parameters[0]}.${parameters[1]}.${parameters[2]}`; @@ -1068,17 +1310,24 @@ class pgTAPTestScanner extends EventEmitter { } } } - + // For function assertions - handle specific function testing patterns - if (assertionType.includes('function') || assertionType.includes('definer') || assertionType === 'volatility_is') { + if ( + assertionType.includes("function") || + assertionType.includes("definer") || + assertionType === "volatility_is" + ) { // Extract function name and schema for different assertion patterns - + // Handle has_function, hasnt_function patterns: // has_function('function_name') - // has_function('schema', 'function_name') + // has_function('schema', 'function_name') // has_function('function_name', ARRAY['type1', 'type2']) // has_function('schema', 'function_name', ARRAY['type1', 'type2']) - if (assertionType === 'has_function' || assertionType === 'hasnt_function') { + if ( + assertionType === "has_function" || + assertionType === "hasnt_function" + ) { if (parameters.length >= 2 && !parameters[1].includes("'")) { // Schema and function name provided return `${parameters[0]}.${parameters[1]}`; @@ -1087,13 +1336,13 @@ class pgTAPTestScanner extends EventEmitter { return parameters[0]; } } - + // Handle function_returns patterns: // function_returns('function_name', 'return_type') // function_returns('schema', 'function_name', 'return_type') // function_returns('function_name', ARRAY['type1', 'type2'], 'return_type') // function_returns('schema', 'function_name', ARRAY['type1', 'type2'], 'return_type') - else if (assertionType === 'function_returns') { + else if (assertionType === "function_returns") { // Check for patterns based on parameter structure if (parameters.length >= 4) { // Four parameters: schema, function, args, return_type @@ -1106,7 +1355,7 @@ class pgTAPTestScanner extends EventEmitter { // function, args, return_type pattern return parameters[0]; } else { - // schema, function, return_type pattern + // schema, function, return_type pattern return `${parameters[0]}.${parameters[1]}`; } } else if (parameters.length === 2) { @@ -1114,12 +1363,12 @@ class pgTAPTestScanner extends EventEmitter { return parameters[0]; } } - + // Handle function_lang_is patterns: // function_lang_is('function_name', 'language') // function_lang_is('schema', 'function_name', 'language') // function_lang_is('function_name', ARRAY['type1'], 'language') - else if (assertionType === 'function_lang_is') { + else if (assertionType === "function_lang_is") { if (parameters.length >= 3 && !parameters[2].includes("'")) { // Schema, function, language pattern return `${parameters[0]}.${parameters[1]}`; @@ -1128,12 +1377,15 @@ class pgTAPTestScanner extends EventEmitter { return parameters[0]; } } - + // Handle is_definer, isnt_definer patterns: // is_definer('function_name') // is_definer('schema', 'function_name') // is_definer('function_name', ARRAY['type1', 'type2']) - else if (assertionType === 'is_definer' || assertionType === 'isnt_definer') { + else if ( + assertionType === "is_definer" || + assertionType === "isnt_definer" + ) { if (parameters.length >= 2 && !parameters[1].includes("'")) { // Schema and function name provided return `${parameters[0]}.${parameters[1]}`; @@ -1142,12 +1394,12 @@ class pgTAPTestScanner extends EventEmitter { return parameters[0]; } } - + // Handle volatility_is patterns: // volatility_is('function_name', 'volatility') // volatility_is('schema', 'function_name', 'volatility') // volatility_is('function_name', ARRAY['type1'], 'volatility') - else if (assertionType === 'volatility_is') { + else if (assertionType === "volatility_is") { if (parameters.length >= 3 && !parameters[2].includes("'")) { // Schema, function, volatility pattern return `${parameters[0]}.${parameters[1]}`; @@ -1156,11 +1408,11 @@ class pgTAPTestScanner extends EventEmitter { return parameters[0]; } } - + // Handle function_privs_are patterns: // function_privs_are('schema', 'function', ARRAY['type1'], 'role', ARRAY['privs']) // function_privs_are('function', ARRAY['type1'], 'role', ARRAY['privs']) - else if (assertionType === 'function_privs_are') { + else if (assertionType === "function_privs_are") { if (parameters.length >= 4 && !parameters[3].includes("'")) { // Schema, function, args, role, privs pattern return `${parameters[0]}.${parameters[1]}`; @@ -1169,17 +1421,19 @@ class pgTAPTestScanner extends EventEmitter { return parameters[0]; } } - + // Fallback for any other function assertions else { - return parameters.length > 1 ? `${parameters[0]}.${parameters[1]}` : parameters[0]; + return parameters.length > 1 + ? `${parameters[0]}.${parameters[1]}` + : parameters[0]; } } - + // For index-related assertions - if (assertionType.includes('index') || assertionType.includes('unique')) { + if (assertionType.includes("index") || assertionType.includes("unique")) { // Handle different index assertion patterns - if (assertionType === 'has_index' || assertionType === 'hasnt_index') { + if (assertionType === "has_index" || assertionType === "hasnt_index") { // has_index('table', 'index') or has_index('schema', 'table', 'index') if (parameters.length === 3) { // Schema, table, index @@ -1188,7 +1442,7 @@ class pgTAPTestScanner extends EventEmitter { // Table, index return `${parameters[0]}.${parameters[1]}`; } - } else if (assertionType === 'index_is_on') { + } else if (assertionType === "index_is_on") { // index_is_on('table', 'index', ARRAY['columns']) or index_is_on('schema', 'table', 'index', ARRAY['columns']) if (parameters.length >= 4) { // Schema, table, index, columns @@ -1197,7 +1451,7 @@ class pgTAPTestScanner extends EventEmitter { // Table, index, columns return `${parameters[0]}.${parameters[1]}`; } - } else if (assertionType === 'index_is_type') { + } else if (assertionType === "index_is_type") { // index_is_type('table', 'index', 'type') or index_is_type('schema', 'table', 'index', 'type') if (parameters.length === 4) { // Schema, table, index, type @@ -1206,7 +1460,10 @@ class pgTAPTestScanner extends EventEmitter { // Table, index, type return `${parameters[0]}.${parameters[1]}`; } - } else if (assertionType === 'has_unique' || assertionType === 'hasnt_unique') { + } else if ( + assertionType === "has_unique" || + assertionType === "hasnt_unique" + ) { // has_unique('table', 'constraint') or has_unique('schema', 'table', 'constraint') if (parameters.length === 3) { // Schema, table, constraint @@ -1215,7 +1472,7 @@ class pgTAPTestScanner extends EventEmitter { // Table, constraint return `${parameters[0]}.${parameters[1]}`; } - } else if (assertionType === 'index_is_primary') { + } else if (assertionType === "index_is_primary") { // index_is_primary('table', 'index') or index_is_primary('schema', 'table', 'index') if (parameters.length === 3) { // Schema, table, index @@ -1226,38 +1483,44 @@ class pgTAPTestScanner extends EventEmitter { } } } - + // For RLS policy assertions - if (assertionType.includes('policy') || assertionType.includes('policies') || assertionType === 'is_rls_enabled') { - if (assertionType === 'is_rls_enabled') { + if ( + assertionType.includes("policy") || + assertionType.includes("policies") || + assertionType === "is_rls_enabled" + ) { + if (assertionType === "is_rls_enabled") { // is_rls_enabled('table') or is_rls_enabled('schema', 'table') - return parameters.length > 1 ? `${parameters[0]}.${parameters[1]}` : parameters[0]; - } else if (assertionType === 'policy_exists') { + return parameters.length > 1 + ? `${parameters[0]}.${parameters[1]}` + : parameters[0]; + } else if (assertionType === "policy_exists") { // policy_exists('table', 'policy_name') or policy_exists('schema', 'table', 'policy_name') if (parameters.length >= 3) { return `${parameters[0]}.${parameters[1]}.${parameters[2]}`; } else if (parameters.length === 2) { return `${parameters[0]}.${parameters[1]}`; } - } else if (assertionType === 'policy_cmd_is') { + } else if (assertionType === "policy_cmd_is") { // policy_cmd_is('table', 'policy', 'SELECT') or policy_cmd_is('schema', 'table', 'policy', 'SELECT') if (parameters.length >= 4) { return `${parameters[0]}.${parameters[1]}.${parameters[2]}`; } else if (parameters.length >= 3) { return `${parameters[0]}.${parameters[1]}`; } - } else if (assertionType === 'policy_roles_are') { + } else if (assertionType === "policy_roles_are") { // policy_roles_are('table', 'policy', ARRAY['role']) or policy_roles_are('schema', 'table', 'policy', ARRAY['role']) if (parameters.length >= 4) { return `${parameters[0]}.${parameters[1]}.${parameters[2]}`; } else if (parameters.length >= 3) { return `${parameters[0]}.${parameters[1]}`; } - } else if (assertionType === 'policies_are') { + } else if (assertionType === "policies_are") { // policies_are('table', ARRAY['policy1', 'policy2']) or policies_are('schema', 'table', ARRAY['policy1', 'policy2']) // The ARRAY[...] parameter is captured as a single parameter, so: // ['users', "'policy1', 'policy2'"] has length 2 -> target should be 'users' - // ['public', 'users', "'policy1', 'policy2'"] has length 3 -> target should be 'public.users' + // ['public', 'users', "'policy1', 'policy2'"] has length 3 -> target should be 'public.users' // ['public', 'users', "'policy1', 'policy2'", 'description'] has length 4 -> target should be 'public.users' if (parameters.length >= 4) { return `${parameters[0]}.${parameters[1]}`; @@ -1268,10 +1531,13 @@ class pgTAPTestScanner extends EventEmitter { } } } - + // For trigger assertions - if (assertionType.includes('trigger')) { - if (assertionType === 'has_trigger' || assertionType === 'hasnt_trigger') { + if (assertionType.includes("trigger")) { + if ( + assertionType === "has_trigger" || + assertionType === "hasnt_trigger" + ) { // has_trigger('table', 'trigger_name') or has_trigger('schema', 'table', 'trigger_name') // Also: has_trigger('table', 'trigger', 'description') or has_trigger('schema', 'table', 'trigger', 'description') if (parameters.length === 2) { @@ -1280,8 +1546,12 @@ class pgTAPTestScanner extends EventEmitter { } else if (parameters.length === 3) { // Could be: schema, table, trigger OR table, trigger, description // Heuristic: if 3rd param looks like a description (contains spaces, is very long, or contains descriptive words), treat as table, trigger, description - if (parameters[2].length > 50 || parameters[2].includes(' ') || - (parameters[2].toLowerCase().includes('trigger') && parameters[2].length > 20)) { + if ( + parameters[2].length > 50 || + parameters[2].includes(" ") || + (parameters[2].toLowerCase().includes("trigger") && + parameters[2].length > 20) + ) { // Table, trigger, description return `public.${parameters[0]}.${parameters[1]}`; } else { @@ -1292,7 +1562,7 @@ class pgTAPTestScanner extends EventEmitter { // Schema, table, trigger, description return `${parameters[0]}.${parameters[1]}.${parameters[2]}`; } - } else if (assertionType === 'trigger_is') { + } else if (assertionType === "trigger_is") { // trigger_is('table', 'trigger', 'function') or trigger_is('schema', 'table', 'trigger', 'func_schema', 'function') if (parameters.length >= 5) { // Schema, table, trigger, func_schema, function @@ -1307,7 +1577,11 @@ class pgTAPTestScanner extends EventEmitter { return `${parameters[0]}.${parameters[1]}.${parameters[2]}`; } } - } else if (assertionType === 'is_trigger_on' || assertionType === 'trigger_fires_on' || assertionType === 'trigger_is_for') { + } else if ( + assertionType === "is_trigger_on" || + assertionType === "trigger_fires_on" || + assertionType === "trigger_is_for" + ) { // is_trigger_on('table', 'trigger', 'events') or is_trigger_on('schema', 'table', 'trigger', 'events') // trigger_fires_on('table', 'trigger', 'timing') or trigger_fires_on('schema', 'table', 'trigger', 'timing') // trigger_is_for('table', 'trigger', 'level') or trigger_is_for('schema', 'table', 'trigger', 'level') @@ -1318,7 +1592,7 @@ class pgTAPTestScanner extends EventEmitter { // Table, trigger, property return `public.${parameters[0]}.${parameters[1]}`; } - } else if (assertionType === 'triggers_are') { + } else if (assertionType === "triggers_are") { // triggers_are('table', ARRAY['trigger_names']) or triggers_are('schema', 'table', ARRAY['trigger_names']) if (parameters.length >= 3) { // Schema, table, trigger_array @@ -1329,38 +1603,46 @@ class pgTAPTestScanner extends EventEmitter { } } } - + // Default: join non-empty parameters - return parameters.filter(p => p).join('.'); + return parameters.filter((p) => p).join("."); } - + /** * Clean parameters for specific assertion types - * + * * @param {string} assertionType - Type of assertion * @param {string[]} parameters - Parameters array to clean in-place * @private */ _cleanParameters(assertionType, parameters) { // Clean quotes from default values in col_default_is - if (assertionType === 'col_default_is' && parameters.length > 0) { + if (assertionType === "col_default_is" && parameters.length > 0) { const lastIndex = parameters.length - 1; let value = parameters[lastIndex]; - + // Remove surrounding quotes if present, but preserve inner content - if (value && typeof value === 'string') { + if (value && typeof value === "string") { value = value.trim(); - + // Handle single quotes if (value.startsWith("'") && value.endsWith("'") && value.length > 1) { parameters[lastIndex] = value.slice(1, -1); } // Handle double quotes - else if (value.startsWith('"') && value.endsWith('"') && value.length > 1) { + else if ( + value.startsWith('"') && + value.endsWith('"') && + value.length > 1 + ) { parameters[lastIndex] = value.slice(1, -1); } // Handle backticks - else if (value.startsWith("`") && value.endsWith("`") && value.length > 1) { + else if ( + value.startsWith("`") && + value.endsWith("`") && + value.length > 1 + ) { parameters[lastIndex] = value.slice(1, -1); } } @@ -1369,7 +1651,7 @@ class pgTAPTestScanner extends EventEmitter { /** * Extract additional metadata from assertion parameters - * + * * @param {string} assertionType - Type of assertion * @param {string[]} parameters - Assertion parameters * @returns {Object} Additional metadata for the assertion @@ -1377,20 +1659,24 @@ class pgTAPTestScanner extends EventEmitter { */ _extractAssertionMetadata(assertionType, parameters) { const metadata = {}; - + // Extract metadata for table assertions - if (assertionType.includes('table')) { - if (assertionType.includes('_select')) { + if (assertionType.includes("table")) { + if (assertionType.includes("_select")) { // SELECT has_table(...) patterns if (parameters.length === 1) { // [table] - metadata.schema = 'public'; + metadata.schema = "public"; metadata.tableName = parameters[0]; } else if (parameters.length === 2) { // [schema, table] or [table, description] - if (parameters[1].length === 0 || parameters[1].length > 30 || parameters[1].includes(' ')) { + if ( + parameters[1].length === 0 || + parameters[1].length > 30 || + parameters[1].includes(" ") + ) { // [table, description] (including empty description) - metadata.schema = 'public'; + metadata.schema = "public"; metadata.tableName = parameters[0]; if (parameters[1].length > 0) { metadata.description = parameters[1]; @@ -1406,17 +1692,21 @@ class pgTAPTestScanner extends EventEmitter { metadata.tableName = parameters[1]; metadata.description = parameters[2]; } - } else if (assertionType.includes('_ok')) { + } else if (assertionType.includes("_ok")) { // ok(has_table(...), 'outer_description') patterns if (parameters.length === 1) { // [table] - metadata.schema = 'public'; + metadata.schema = "public"; metadata.tableName = parameters[0]; } else if (parameters.length === 2) { // [table, outer_desc] or [schema, table] - if (parameters[1].length === 0 || parameters[1].length > 30 || parameters[1].includes(' ')) { + if ( + parameters[1].length === 0 || + parameters[1].length > 30 || + parameters[1].includes(" ") + ) { // [table, outer_description] (including empty description) - metadata.schema = 'public'; + metadata.schema = "public"; metadata.tableName = parameters[0]; if (parameters[1].length > 0) { metadata.description = parameters[1]; @@ -1428,19 +1718,24 @@ class pgTAPTestScanner extends EventEmitter { } } else if (parameters.length >= 3) { // [schema, table, outer_desc] or [table, inner_desc, outer_desc] - if (parameters[0].length < 20 && parameters[1].length < 20 && !parameters[0].includes(' ') && !parameters[1].includes(' ')) { + if ( + parameters[0].length < 20 && + parameters[1].length < 20 && + !parameters[0].includes(" ") && + !parameters[1].includes(" ") + ) { // [schema, table, outer_desc] metadata.schema = parameters[0]; metadata.tableName = parameters[1]; metadata.description = parameters[parameters.length - 1]; // Last param is outer description } else { // [table, inner_desc, outer_desc] - metadata.schema = 'public'; + metadata.schema = "public"; metadata.tableName = parameters[0]; metadata.description = parameters[parameters.length - 1]; // Last param is outer description } } - } else if (assertionType === 'table_privs_are') { + } else if (assertionType === "table_privs_are") { // table_privs_are('table', 'role', ARRAY['privs']) or table_privs_are('schema', 'table', 'role', ARRAY['privs']) if (parameters.length >= 4) { // Schema, table, role, privileges pattern @@ -1455,7 +1750,7 @@ class pgTAPTestScanner extends EventEmitter { } } else if (parameters.length >= 3) { // Table, role, privileges pattern (assume public schema) - metadata.schema = 'public'; + metadata.schema = "public"; metadata.tableName = parameters[0]; metadata.role = parameters[1]; metadata.privileges = this._parseArrayParameter(parameters[2]); @@ -1463,7 +1758,7 @@ class pgTAPTestScanner extends EventEmitter { metadata.description = parameters[3]; } } - } else if (assertionType === 'table_owner_is') { + } else if (assertionType === "table_owner_is") { // table_owner_is('table', 'owner') or table_owner_is('schema', 'table', 'owner') if (parameters.length >= 3) { // Schema, table, owner pattern @@ -1475,14 +1770,14 @@ class pgTAPTestScanner extends EventEmitter { } } else if (parameters.length >= 2) { // Table, owner pattern (assume public schema) - metadata.schema = 'public'; + metadata.schema = "public"; metadata.tableName = parameters[0]; metadata.owner = parameters[1]; if (parameters.length >= 3) { metadata.description = parameters[2]; } } - } else if (assertionType === 'tables_are') { + } else if (assertionType === "tables_are") { // tables_are('schema', ARRAY['table1', 'table2']) or tables_are(ARRAY['table1', 'table2']) if (parameters.length >= 2) { // Schema and table list pattern @@ -1493,18 +1788,18 @@ class pgTAPTestScanner extends EventEmitter { } } else if (parameters.length === 1) { // Just table list pattern - assume public schema - metadata.schema = 'public'; + metadata.schema = "public"; metadata.tableList = this._parseArrayParameter(parameters[0]); } } } - + return metadata; } - + /** * Build coverage map from parsed test files - * + * * @private */ _buildCoverageMap() { @@ -1517,45 +1812,53 @@ class pgTAPTestScanner extends EventEmitter { policies: {}, indexes: {}, triggers: {}, - filesByTarget: {} + filesByTarget: {}, }; - + for (const testFile of this.testFiles) { for (const assertion of testFile.assertions) { const { type, target } = assertion; - + // Categorize by assertion type - if (type.includes('schema')) { - this._addToCoverageMap('schemas', target, type, testFile); - } else if (type.includes('table')) { - this._addToCoverageMap('tables', target, type, testFile, assertion); - } else if (type.includes('column') || type.startsWith('col_')) { - this._addToCoverageMap('columns', target, type, testFile); - } else if (type.includes('function') || type.includes('definer') || type === 'volatility_is') { - this._addToCoverageMap('functions', target, type, testFile); - } else if (type.includes('policy') || type.includes('policies') || type === 'is_rls_enabled') { - this._addToCoverageMap('policies', target, type, testFile); - } else if (type.includes('index') || type.includes('unique')) { - this._addToCoverageMap('indexes', target, type, testFile); - } else if (type.includes('trigger')) { - this._addToCoverageMap('triggers', target, type, testFile); + if (type.includes("schema")) { + this._addToCoverageMap("schemas", target, type, testFile); + } else if (type.includes("table")) { + this._addToCoverageMap("tables", target, type, testFile, assertion); + } else if (type.includes("column") || type.startsWith("col_")) { + this._addToCoverageMap("columns", target, type, testFile); + } else if ( + type.includes("function") || + type.includes("definer") || + type === "volatility_is" + ) { + this._addToCoverageMap("functions", target, type, testFile); + } else if ( + type.includes("policy") || + type.includes("policies") || + type === "is_rls_enabled" + ) { + this._addToCoverageMap("policies", target, type, testFile); + } else if (type.includes("index") || type.includes("unique")) { + this._addToCoverageMap("indexes", target, type, testFile); + } else if (type.includes("trigger")) { + this._addToCoverageMap("triggers", target, type, testFile); } - + // Track files by target if (!this.coverageMap.filesByTarget[target]) { this.coverageMap.filesByTarget[target] = []; } - + if (!this.coverageMap.filesByTarget[target].includes(testFile)) { this.coverageMap.filesByTarget[target].push(testFile); } } } } - + /** * Add entry to coverage map - * + * * @param {string} category - Coverage category * @param {string} target - Target object * @param {string} assertionType - Type of assertion @@ -1563,30 +1866,39 @@ class pgTAPTestScanner extends EventEmitter { * @param {Object} [assertion] - Optional full assertion object for enhanced table tracking * @private */ - _addToCoverageMap(category, target, assertionType, testFile, assertion = null) { + _addToCoverageMap( + category, + target, + assertionType, + testFile, + assertion = null, + ) { if (!this.coverageMap[category][target]) { - if (category === 'tables' && assertion) { + if (category === "tables" && assertion) { // Enhanced table coverage tracking this.coverageMap[category][target] = { assertionTypes: [], - schema: assertion.schema || 'public', - tableName: assertion.tableName || target.split('.').pop(), + schema: assertion.schema || "public", + tableName: assertion.tableName || target.split(".").pop(), descriptions: [], - testFiles: [] + testFiles: [], }; } else { // Standard coverage tracking for other categories this.coverageMap[category][target] = []; } } - - if (category === 'tables' && assertion) { + + if (category === "tables" && assertion) { // Enhanced table coverage handling const tableInfo = this.coverageMap[category][target]; if (!tableInfo.assertionTypes.includes(assertionType)) { tableInfo.assertionTypes.push(assertionType); } - if (assertion.description && !tableInfo.descriptions.includes(assertion.description)) { + if ( + assertion.description && + !tableInfo.descriptions.includes(assertion.description) + ) { tableInfo.descriptions.push(assertion.description); } if (!tableInfo.testFiles.includes(testFile.fileName)) { @@ -1600,38 +1912,41 @@ class pgTAPTestScanner extends EventEmitter { } } } - + /** * Get assertion type statistics - * + * * @returns {Object.} Count by assertion type * @private */ _getAssertionTypeStats() { const stats = {}; - + for (const testFile of this.testFiles) { for (const assertion of testFile.assertions) { stats[assertion.type] = (stats[assertion.type] || 0) + 1; } } - + return stats; } - + /** * Get coverage statistics - * + * * @returns {Object} Coverage statistics * @private */ _getCoverageStats() { // Calculate enhanced table statistics const tableStats = Object.values(this.coverageMap.tables); - const tablesWithDescriptions = tableStats.filter(table => - typeof table === 'object' && table.descriptions && table.descriptions.length > 0 + const tablesWithDescriptions = tableStats.filter( + (table) => + typeof table === "object" && + table.descriptions && + table.descriptions.length > 0, ).length; - + return { schemasWithTests: Object.keys(this.coverageMap.schemas).length, tablesWithTests: Object.keys(this.coverageMap.tables).length, @@ -1641,13 +1956,13 @@ class pgTAPTestScanner extends EventEmitter { policiesWithTests: Object.keys(this.coverageMap.policies || {}).length, indexesWithTests: Object.keys(this.coverageMap.indexes || {}).length, triggersWithTests: Object.keys(this.coverageMap.triggers || {}).length, - uniqueTargets: Object.keys(this.coverageMap.filesByTarget).length + uniqueTargets: Object.keys(this.coverageMap.filesByTarget).length, }; } - + /** * Parse ARRAY['item1', 'item2'] parameter into array of strings - * + * * @param {string} arrayStr - Array parameter string like "'item1', 'item2'" * @returns {string[]} Array of parsed items * @private @@ -1656,12 +1971,12 @@ class pgTAPTestScanner extends EventEmitter { if (!arrayStr || !arrayStr.includes("'")) return []; // Handle both ARRAY['item1', 'item2'] and just 'item1', 'item2' formats const matches = arrayStr.match(/'([^']*)'/g); - return matches ? matches.map(m => m.slice(1, -1)) : []; + return matches ? matches.map((m) => m.slice(1, -1)) : []; } /** * Extract function metadata from assertion parameters - * + * * @param {string} assertionType - Type of assertion * @param {string[]} parameters - Assertion parameters * @returns {Object} Function metadata object @@ -1669,51 +1984,62 @@ class pgTAPTestScanner extends EventEmitter { */ _extractFunctionMetadata(assertionType, parameters) { const metadata = {}; - + // Helper function to determine if a parameter is likely a schema vs function name - const isLikelySchema = (param, nextParam) => { + const _isLikelySchema = (param, nextParam) => { if (!nextParam) return false; // Common schema names - const commonSchemas = ['public', 'auth', 'storage', 'extensions', 'pg_catalog', 'information_schema']; - return commonSchemas.includes(param.toLowerCase()) || param.includes('_schema') || param.includes('_db'); + const commonSchemas = [ + "public", + "auth", + "storage", + "extensions", + "pg_catalog", + "information_schema", + ]; + return ( + commonSchemas.includes(param.toLowerCase()) || + param.includes("_schema") || + param.includes("_db") + ); }; - + if (parameters.length === 0) return metadata; - + switch (assertionType) { - case 'has_function': - case 'hasnt_function': + case "has_function": + case "hasnt_function": // Patterns: // has_function('function_name') - // has_function('schema', 'function_name') + // has_function('schema', 'function_name') // has_function('function_name', ARRAY['type1', 'type2']) // has_function('schema', 'function_name', ARRAY['type1', 'type2']) - + if (parameters.length >= 2 && !parameters[1].includes("'")) { // Schema and function name provided (parameters[1] doesn't contain quotes) metadata.schema = parameters[0]; metadata.name = parameters[1]; - + if (parameters.length >= 3) { metadata.parameters = this._parseArrayParameter(parameters[2]); } } else { // Only function name provided, or function name with parameters metadata.name = parameters[0]; - + if (parameters.length >= 2) { metadata.parameters = this._parseArrayParameter(parameters[1]); } } break; - - case 'function_returns': + + case "function_returns": // Patterns: // function_returns('function_name', 'return_type') // function_returns('schema', 'function_name', 'return_type') // function_returns('function_name', ARRAY['type1', 'type2'], 'return_type') // function_returns('schema', 'function_name', ARRAY['type1', 'type2'], 'return_type') - + if (parameters.length >= 4) { // Four parameters: schema, function, args, return_type metadata.schema = parameters[0]; @@ -1730,7 +2056,7 @@ class pgTAPTestScanner extends EventEmitter { metadata.parameters = this._parseArrayParameter(parameters[1]); metadata.returnType = parameters[2]; } else { - // schema, function, return_type pattern + // schema, function, return_type pattern metadata.schema = parameters[0]; metadata.name = parameters[1]; metadata.returnType = parameters[2]; @@ -1741,21 +2067,31 @@ class pgTAPTestScanner extends EventEmitter { metadata.returnType = parameters[1]; } break; - - case 'function_lang_is': + + case "function_lang_is": // Patterns similar to function_returns but last param is language - if (parameters.length >= 3 && !parameters[1].startsWith('ARRAY') && !parameters[2].startsWith('ARRAY')) { + if ( + parameters.length >= 3 && + !parameters[1].startsWith("ARRAY") && + !parameters[2].startsWith("ARRAY") + ) { // Schema, function, language pattern metadata.schema = parameters[0]; metadata.name = parameters[1]; metadata.language = parameters[2]; - } else if (parameters.length >= 4 && parameters[2].startsWith('ARRAY')) { + } else if ( + parameters.length >= 4 && + parameters[2].startsWith("ARRAY") + ) { // Schema, function, args, language pattern metadata.schema = parameters[0]; metadata.name = parameters[1]; metadata.parameters = this._parseArrayParameter(parameters[2]); metadata.language = parameters[3]; - } else if (parameters.length >= 3 && parameters[1].startsWith('ARRAY')) { + } else if ( + parameters.length >= 3 && + parameters[1].startsWith("ARRAY") + ) { // Function, args, language pattern metadata.name = parameters[0]; metadata.parameters = this._parseArrayParameter(parameters[1]); @@ -1766,42 +2102,56 @@ class pgTAPTestScanner extends EventEmitter { metadata.language = parameters[1]; } break; - - case 'is_definer': - case 'isnt_definer': + + case "is_definer": + case "isnt_definer": // Similar patterns to has_function - if (parameters.length >= 2 && !parameters[1].includes("'") && !parameters[1].startsWith('ARRAY')) { + if ( + parameters.length >= 2 && + !parameters[1].includes("'") && + !parameters[1].startsWith("ARRAY") + ) { metadata.schema = parameters[0]; metadata.name = parameters[1]; - - if (parameters.length >= 3 && parameters[2].startsWith('ARRAY')) { + + if (parameters.length >= 3 && parameters[2].startsWith("ARRAY")) { metadata.parameters = this._parseArrayParameter(parameters[2]); } } else { metadata.name = parameters[0]; - - if (parameters.length >= 2 && parameters[1].startsWith('ARRAY')) { + + if (parameters.length >= 2 && parameters[1].startsWith("ARRAY")) { metadata.parameters = this._parseArrayParameter(parameters[1]); } } - - metadata.isSecurityDefiner = assertionType === 'is_definer'; + + metadata.isSecurityDefiner = assertionType === "is_definer"; break; - - case 'volatility_is': + + case "volatility_is": // Similar patterns to function_lang_is but last param is volatility - if (parameters.length >= 3 && !parameters[1].startsWith('ARRAY') && !parameters[2].startsWith('ARRAY')) { + if ( + parameters.length >= 3 && + !parameters[1].startsWith("ARRAY") && + !parameters[2].startsWith("ARRAY") + ) { // Schema, function, volatility pattern metadata.schema = parameters[0]; metadata.name = parameters[1]; metadata.volatility = parameters[2]; - } else if (parameters.length >= 4 && parameters[2].startsWith('ARRAY')) { + } else if ( + parameters.length >= 4 && + parameters[2].startsWith("ARRAY") + ) { // Schema, function, args, volatility pattern metadata.schema = parameters[0]; metadata.name = parameters[1]; metadata.parameters = this._parseArrayParameter(parameters[2]); metadata.volatility = parameters[3]; - } else if (parameters.length >= 3 && parameters[1].startsWith('ARRAY')) { + } else if ( + parameters.length >= 3 && + parameters[1].startsWith("ARRAY") + ) { // Function, args, volatility pattern metadata.name = parameters[0]; metadata.parameters = this._parseArrayParameter(parameters[1]); @@ -1812,26 +2162,26 @@ class pgTAPTestScanner extends EventEmitter { metadata.volatility = parameters[1]; } break; - - case 'function_privs_are': + + case "function_privs_are": // Patterns: // function_privs_are('schema', 'function', ARRAY['type1'], 'role', ARRAY['privs']) // function_privs_are('function', ARRAY['type1'], 'role', ARRAY['privs']) // function_privs_are('schema', 'function', 'role', ARRAY['privs']) // function_privs_are('function', 'role', ARRAY['privs']) - + if (parameters.length >= 5) { // Full pattern with schema, function, args, role, privs metadata.schema = parameters[0]; metadata.name = parameters[1]; - if (parameters[2].startsWith('ARRAY')) { + if (parameters[2].startsWith("ARRAY")) { metadata.parameters = this._parseArrayParameter(parameters[2]); metadata.role = parameters[3]; metadata.privileges = this._parseArrayParameter(parameters[4]); } } else if (parameters.length >= 4) { // Could be: schema, function, role, privs OR function, args, role, privs - if (parameters[1].startsWith('ARRAY')) { + if (parameters[1].startsWith("ARRAY")) { // Function, args, role, privs metadata.name = parameters[0]; metadata.parameters = this._parseArrayParameter(parameters[1]); @@ -1852,13 +2202,13 @@ class pgTAPTestScanner extends EventEmitter { } break; } - + return metadata; } /** * Extract RLS policy metadata from assertion parameters - * + * * @param {string} assertionType - Type of assertion * @param {string[]} parameters - Assertion parameters * @returns {Object} Policy metadata object @@ -1866,43 +2216,43 @@ class pgTAPTestScanner extends EventEmitter { */ _extractPolicyMetadata(assertionType, parameters) { const metadata = {}; - + // Helper function to parse array parameters like ARRAY['role1', 'role2'] or ARRAY['policy1', 'policy2'] const parseArrayParameter = (arrayStr) => { if (!arrayStr || !arrayStr.includes("'")) return []; // Extract quoted items from array string const matches = arrayStr.match(/'([^']*)'/g); - return matches ? matches.map(m => m.slice(1, -1)) : []; + return matches ? matches.map((m) => m.slice(1, -1)) : []; }; - + if (parameters.length === 0) return metadata; - + switch (assertionType) { - case 'is_rls_enabled': + case "is_rls_enabled": // is_rls_enabled('table') or is_rls_enabled('schema', 'table') if (parameters.length >= 2) { metadata.schema = parameters[0]; metadata.tableName = parameters[1]; } else { - metadata.schema = 'public'; // Default schema + metadata.schema = "public"; // Default schema metadata.tableName = parameters[0]; } break; - - case 'policy_exists': + + case "policy_exists": // policy_exists('table', 'policy_name') or policy_exists('schema', 'table', 'policy_name') if (parameters.length >= 3) { metadata.schema = parameters[0]; metadata.tableName = parameters[1]; metadata.policyName = parameters[2]; } else if (parameters.length === 2) { - metadata.schema = 'public'; + metadata.schema = "public"; metadata.tableName = parameters[0]; metadata.policyName = parameters[1]; } break; - - case 'policy_cmd_is': + + case "policy_cmd_is": // policy_cmd_is('table', 'policy', 'SELECT') or policy_cmd_is('schema', 'table', 'policy', 'SELECT') if (parameters.length >= 4) { metadata.schema = parameters[0]; @@ -1910,14 +2260,14 @@ class pgTAPTestScanner extends EventEmitter { metadata.policyName = parameters[2]; metadata.command = parameters[3]; } else if (parameters.length >= 3) { - metadata.schema = 'public'; + metadata.schema = "public"; metadata.tableName = parameters[0]; metadata.policyName = parameters[1]; metadata.command = parameters[2]; } break; - - case 'policy_roles_are': + + case "policy_roles_are": // policy_roles_are('table', 'policy', ARRAY['role']) or policy_roles_are('schema', 'table', 'policy', ARRAY['role']) if (parameters.length >= 4) { metadata.schema = parameters[0]; @@ -1925,14 +2275,14 @@ class pgTAPTestScanner extends EventEmitter { metadata.policyName = parameters[2]; metadata.roles = parseArrayParameter(parameters[3]); } else if (parameters.length >= 3) { - metadata.schema = 'public'; + metadata.schema = "public"; metadata.tableName = parameters[0]; metadata.policyName = parameters[1]; metadata.roles = parseArrayParameter(parameters[2]); } break; - - case 'policies_are': + + case "policies_are": // policies_are('table', ARRAY['policy1', 'policy2']) or policies_are('schema', 'table', ARRAY['policy1', 'policy2']) if (parameters.length >= 3) { metadata.schema = parameters[0]; @@ -1942,7 +2292,7 @@ class pgTAPTestScanner extends EventEmitter { metadata.description = parameters[3]; } } else if (parameters.length >= 2) { - metadata.schema = 'public'; + metadata.schema = "public"; metadata.tableName = parameters[0]; metadata.policies = parseArrayParameter(parameters[1]); if (parameters.length >= 3) { @@ -1951,29 +2301,33 @@ class pgTAPTestScanner extends EventEmitter { } break; } - + return metadata; } /** * Build a comprehensive coverage database from all scanned test files - * + * * This method processes all test files and builds an enhanced coverage database * that indexes coverage by object type and name, tracks assertion counts, * and enables efficient querying for coverage analysis. - * + * * @returns {Object} Enhanced coverage database * @public */ async buildCoverageDatabase() { - this.emit('progress', new ProgressEvent('Building coverage database with memory management...')); - + this.emit( + "progress", + new ProgressEvent("Building coverage database with memory management..."), + ); + // Check if we should use streaming mode based on file count and memory const initialMemory = MemoryMonitor.getMemoryUsage(); - const shouldStream = this.options.enableStreaming && - (this.testFiles.length > this.options.batchSize || - initialMemory.heapUsed > (this.options.maxMemoryMB * 0.5)); - + const shouldStream = + this.options.enableStreaming && + (this.testFiles.length > this.options.batchSize || + initialMemory.heapUsed > this.options.maxMemoryMB * 0.5); + if (shouldStream) { return await this._buildCoverageDatabaseBatched(); } else { @@ -1987,15 +2341,20 @@ class pgTAPTestScanner extends EventEmitter { */ _buildCoverageDatabaseStandard() { const database = this._createEmptyDatabase(); - + // Process files with periodic memory checks for (let i = 0; i < this.testFiles.length; i++) { const testFile = this.testFiles[i]; - + // Check memory every 10 files if (i % 10 === 0) { const memUsage = MemoryMonitor.getMemoryUsage(); - if (MemoryMonitor.shouldTriggerCleanup(memUsage.heapUsed, this.options.maxMemoryMB)) { + if ( + MemoryMonitor.shouldTriggerCleanup( + memUsage.heapUsed, + this.options.maxMemoryMB, + ) + ) { this._performMemoryCleanup(); } } @@ -2005,13 +2364,16 @@ class pgTAPTestScanner extends EventEmitter { this._identifyCoverageGaps(database); this.coverageDatabase = database; - - this.emit('success', new SuccessEvent('Coverage database built successfully', { - totalObjects: this._getTotalIndexedObjects(database), - totalAssertions: database.assertionCounts.total, - coverage: this._calculateOverallCoverage(database), - memoryStats: this.getMemoryStats() - })); + + this.emit( + "success", + new SuccessEvent("Coverage database built successfully", { + totalObjects: this._getTotalIndexedObjects(database), + totalAssertions: database.assertionCounts.total, + coverage: this._calculateOverallCoverage(database), + memoryStats: this.getMemoryStats(), + }), + ); return database; } @@ -2023,7 +2385,7 @@ class pgTAPTestScanner extends EventEmitter { async _buildCoverageDatabaseBatched() { this.memoryState.streamingMode = true; const database = this._createEmptyDatabase(); - + // Use BatchProcessor for memory-managed processing await this.batchProcessor.processBatches( this.testFiles, @@ -2031,36 +2393,41 @@ class pgTAPTestScanner extends EventEmitter { // Check if streaming DB should limit objects if (this.streamingDB) { for (const testFile of batch) { - if (!this.streamingDB.addObject('files', testFile.filePath, testFile)) { - this.emit('warning', { - type: 'memory_limit', - message: `File processing limit reached at batch ${batchIndex}` + if ( + !this.streamingDB.addObject("files", testFile.filePath, testFile) + ) { + this.emit("warning", { + type: "memory_limit", + message: `File processing limit reached at batch ${batchIndex}`, }); break; } } } - + // Process batch files for (const testFile of batch) { this._processFileForDatabase(testFile, database); } - + this.memoryState.batchesProcessed++; - - return batch.map(f => f.filePath); - } + + return batch.map((f) => f.filePath); + }, ); this._identifyCoverageGaps(database); this.coverageDatabase = database; - - this.emit('success', new SuccessEvent('Batched coverage database built successfully', { - totalObjects: this._getTotalIndexedObjects(database), - totalAssertions: database.assertionCounts.total, - coverage: this._calculateOverallCoverage(database), - memoryStats: this.getMemoryStats() - })); + + this.emit( + "success", + new SuccessEvent("Batched coverage database built successfully", { + totalObjects: this._getTotalIndexedObjects(database), + totalAssertions: database.assertionCounts.total, + coverage: this._calculateOverallCoverage(database), + memoryStats: this.getMemoryStats(), + }), + ); return database; } @@ -2078,23 +2445,23 @@ class pgTAPTestScanner extends EventEmitter { schemas: new Map(), policies: new Map(), indexes: new Map(), - triggers: new Map() + triggers: new Map(), }, assertionCounts: { total: 0, byType: new Map(), - byObject: new Map() + byObject: new Map(), }, gaps: { uncoveredObjects: new Set(), - partialCoverage: new Set() + partialCoverage: new Set(), }, fileIndex: new Map(), buildTimestamp: new Date().toISOString(), totalFiles: this.testFiles.length, totalAssertions: this.totalAssertions, memoryManaged: true, - streamingMode: this.memoryState.streamingMode + streamingMode: this.memoryState.streamingMode, }; } @@ -2106,9 +2473,9 @@ class pgTAPTestScanner extends EventEmitter { // Skip if we've hit object limits const fileCount = database.fileIndex.size; if (fileCount > this.options.maxObjectsPerType) { - this.emit('warning', { - type: 'file_limit', - message: `Skipping file ${testFile.fileName} - reached file limit` + this.emit("warning", { + type: "file_limit", + message: `Skipping file ${testFile.fileName} - reached file limit`, }); return; } @@ -2119,16 +2486,17 @@ class pgTAPTestScanner extends EventEmitter { assertions: testFile.assertions.length, planCount: testFile.planCount, dependencies: testFile.dependencies, - metadata: testFile.metadata + metadata: testFile.metadata, }); // Process each assertion in the file for (const assertion of testFile.assertions) { database.assertionCounts.total++; this.memoryState.objectsProcessed++; - + // Track assertion types - const typeCount = database.assertionCounts.byType.get(assertion.type) || 0; + const typeCount = + database.assertionCounts.byType.get(assertion.type) || 0; database.assertionCounts.byType.set(assertion.type, typeCount + 1); // Process based on assertion type and extract object information @@ -2138,7 +2506,7 @@ class pgTAPTestScanner extends EventEmitter { /** * Get coverage information for a specific database object - * + * * @param {string} objectType - Type of object (table, column, function, etc.) * @param {string} objectName - Name/identifier of the object * @returns {Object|null} Coverage information for the object @@ -2151,40 +2519,46 @@ class pgTAPTestScanner extends EventEmitter { const normalizedType = objectType.toLowerCase(); const objectMap = this.coverageDatabase.objects[normalizedType]; - + if (!objectMap || !objectMap.has(objectName)) { return null; } const objectCoverage = objectMap.get(objectName); - + // Calculate coverage percentage for this object - const totalPossibleAssertions = this._estimateMaxAssertions(normalizedType, objectName); - const coveragePercentage = totalPossibleAssertions > 0 - ? Math.round((objectCoverage.assertions.length / totalPossibleAssertions) * 100) - : 100; + const totalPossibleAssertions = this._estimateMaxAssertions( + normalizedType, + objectName, + ); + const coveragePercentage = + totalPossibleAssertions > 0 + ? Math.round( + (objectCoverage.assertions.length / totalPossibleAssertions) * 100, + ) + : 100; return { objectType: normalizedType, objectName, assertionCount: objectCoverage.assertions.length, assertionTypes: [...objectCoverage.assertionTypes], - testFiles: [...objectCoverage.testFiles].map(f => f.fileName), + testFiles: [...objectCoverage.testFiles].map((f) => f.fileName), metadata: objectCoverage.metadata, coveragePercentage, lastTested: objectCoverage.lastTested, - assertions: objectCoverage.assertions.map(a => ({ + assertions: objectCoverage.assertions.map((a) => ({ type: a.type, testFile: a.testFile.fileName, lineNumber: a.lineNumber, - description: a.description - })) + description: a.description, + })), }; } /** * Get comprehensive coverage statistics with percentages and analysis - * + * * @returns {Object} Detailed coverage statistics * @public */ @@ -2208,7 +2582,7 @@ class pgTAPTestScanner extends EventEmitter { functions: db.objects.functions.size, policies: db.objects.policies.size, indexes: db.objects.indexes.size, - triggers: db.objects.triggers.size + triggers: db.objects.triggers.size, }, // Assertion type distribution @@ -2216,21 +2590,22 @@ class pgTAPTestScanner extends EventEmitter { // Coverage percentages by object type coveragePercentages: { - schemas: this._calculateTypesCoverage('schemas'), - tables: this._calculateTypesCoverage('tables'), - columns: this._calculateTypesCoverage('columns'), - functions: this._calculateTypesCoverage('functions'), - policies: this._calculateTypesCoverage('policies'), - indexes: this._calculateTypesCoverage('indexes'), - triggers: this._calculateTypesCoverage('triggers') + schemas: this._calculateTypesCoverage("schemas"), + tables: this._calculateTypesCoverage("tables"), + columns: this._calculateTypesCoverage("columns"), + functions: this._calculateTypesCoverage("functions"), + policies: this._calculateTypesCoverage("policies"), + indexes: this._calculateTypesCoverage("indexes"), + triggers: this._calculateTypesCoverage("triggers"), }, // Coverage quality metrics quality: { - averageAssertionsPerObject: db.totalAssertions / this._getTotalIndexedObjects(db), + averageAssertionsPerObject: + db.totalAssertions / this._getTotalIndexedObjects(db), filesWithHighCoverage: this._countHighCoverageFiles(), objectsWithMultipleTests: this._countMultiTestedObjects(), - assertionDiversity: db.assertionCounts.byType.size + assertionDiversity: db.assertionCounts.byType.size, }, // Coverage gaps analysis @@ -2238,11 +2613,11 @@ class pgTAPTestScanner extends EventEmitter { totalUncovered: db.gaps.uncoveredObjects.size, partialCoverage: db.gaps.partialCoverage.size, uncoveredByType: this._categorizeUncoveredObjects(), - recommendedTests: this._generateTestRecommendations() + recommendedTests: this._generateTestRecommendations(), }, // Top tested objects - topTested: this._getTopTestedObjects(10) + topTested: this._getTopTestedObjects(10), }; return stats; @@ -2250,7 +2625,7 @@ class pgTAPTestScanner extends EventEmitter { /** * Export coverage report in various formats - * + * * @param {Object} [options={}] - Export options * @param {string} [options.format='json'] - Export format (json, csv, html, markdown) * @param {boolean} [options.includeGaps=true] - Include coverage gaps in report @@ -2261,10 +2636,10 @@ class pgTAPTestScanner extends EventEmitter { */ exportCoverageReport(options = {}) { const { - format = 'json', + format = "json", includeGaps = true, includeDetails = false, - objectTypes = null + objectTypes = null, } = options; if (!this.coverageDatabase) { @@ -2275,17 +2650,17 @@ class pgTAPTestScanner extends EventEmitter { const report = { metadata: { generatedAt: new Date().toISOString(), - scanner: 'pgTAPTestScanner', - version: '1.0.0' + scanner: "pgTAPTestScanner", + version: "1.0.0", }, summary: { totalFiles: stats.totalFiles, totalAssertions: stats.totalAssertions, objectCounts: stats.objectCounts, - overallCoverage: this._calculateOverallCoverage(this.coverageDatabase) + overallCoverage: this._calculateOverallCoverage(this.coverageDatabase), }, coverage: this._buildCoverageReport(objectTypes, includeDetails), - statistics: stats + statistics: stats, }; if (includeGaps) { @@ -2294,20 +2669,20 @@ class pgTAPTestScanner extends EventEmitter { // Format the report based on requested format switch (format.toLowerCase()) { - case 'json': + case "json": return JSON.stringify(report, null, 2); - - case 'csv': + + case "csv": return this._formatReportAsCsv(report); - - case 'html': + + case "html": return this._formatReportAsHtml(report); - - case 'markdown': + + case "markdown": return this._formatReportAsMarkdown(report); - + default: - throw new Error(`Unsupported export format: ${format}`); + throw new ValidationError(`Unsupported export format: ${format}`); } } @@ -2315,7 +2690,7 @@ class pgTAPTestScanner extends EventEmitter { /** * Index a single assertion in the coverage database - * + * * @param {Object} database - Coverage database being built * @param {TestAssertion} assertion - Assertion to index * @param {TestFile} testFile - Test file containing the assertion @@ -2323,14 +2698,14 @@ class pgTAPTestScanner extends EventEmitter { */ _indexAssertionInDatabase(database, assertion, testFile) { const { type, target } = assertion; - + // Determine object type and name from assertion const objectInfo = this._parseObjectFromTarget(type, target); if (!objectInfo) return; const { objectType, objectName } = objectInfo; const objectMap = database.objects[objectType]; - + if (!objectMap) return; // Get or create object entry @@ -2340,12 +2715,12 @@ class pgTAPTestScanner extends EventEmitter { assertionTypes: new Set(), testFiles: new Set(), metadata: {}, - lastTested: null + lastTested: null, }); } const objectEntry = objectMap.get(objectName); - + // Add assertion to object entry objectEntry.assertions.push({ type, @@ -2353,19 +2728,25 @@ class pgTAPTestScanner extends EventEmitter { testFile, lineNumber: assertion.lineNumber, description: assertion.description, - parameters: assertion.parameters + parameters: assertion.parameters, }); - + objectEntry.assertionTypes.add(type); objectEntry.testFiles.add(testFile); objectEntry.lastTested = new Date().toISOString(); - + // Store additional metadata based on assertion type if (assertion.functionMetadata) { - objectEntry.metadata.function = { ...objectEntry.metadata.function, ...assertion.functionMetadata }; + objectEntry.metadata.function = { + ...objectEntry.metadata.function, + ...assertion.functionMetadata, + }; } if (assertion.policyMetadata) { - objectEntry.metadata.policy = { ...objectEntry.metadata.policy, ...assertion.policyMetadata }; + objectEntry.metadata.policy = { + ...objectEntry.metadata.policy, + ...assertion.policyMetadata, + }; } // Track assertion count by object @@ -2376,7 +2757,7 @@ class pgTAPTestScanner extends EventEmitter { /** * Parse object type and name from assertion target - * + * * @param {string} assertionType - Type of assertion * @param {string} target - Target string from assertion * @returns {Object|null} Object type and name @@ -2387,56 +2768,64 @@ class pgTAPTestScanner extends EventEmitter { // Determine object type based on assertion type let objectType; - if (assertionType.includes('table') || assertionType.includes('rls')) { - objectType = 'tables'; - } else if (assertionType.includes('column') || assertionType.includes('col_')) { - objectType = 'columns'; - } else if (assertionType.includes('function')) { - objectType = 'functions'; - } else if (assertionType.includes('schema')) { - objectType = 'schemas'; - } else if (assertionType.includes('policy') || assertionType.includes('policies')) { - objectType = 'policies'; - } else if (assertionType.includes('index')) { - objectType = 'indexes'; - } else if (assertionType.includes('trigger')) { - objectType = 'triggers'; + if (assertionType.includes("table") || assertionType.includes("rls")) { + objectType = "tables"; + } else if ( + assertionType.includes("column") || + assertionType.includes("col_") + ) { + objectType = "columns"; + } else if (assertionType.includes("function")) { + objectType = "functions"; + } else if (assertionType.includes("schema")) { + objectType = "schemas"; + } else if ( + assertionType.includes("policy") || + assertionType.includes("policies") + ) { + objectType = "policies"; + } else if (assertionType.includes("index")) { + objectType = "indexes"; + } else if (assertionType.includes("trigger")) { + objectType = "triggers"; } else { // Try to infer from target format - const parts = target.split('.'); + const parts = target.split("."); if (parts.length === 1) { - objectType = 'schemas'; // Likely a schema-level test + objectType = "schemas"; // Likely a schema-level test } else if (parts.length === 2) { - objectType = 'tables'; // Likely schema.table + objectType = "tables"; // Likely schema.table } else { - objectType = 'columns'; // Likely schema.table.column + objectType = "columns"; // Likely schema.table.column } } return { objectType, - objectName: target + objectName: target, }; } /** * Identify coverage gaps in the database - * + * * @param {Object} database - Coverage database * @private */ _identifyCoverageGaps(database) { - // This is a simplified implementation - in practice, you'd want to + // This is a simplified implementation - in practice, you'd want to // compare against actual database schema to find truly uncovered objects - + for (const [objectType, objectMap] of Object.entries(database.objects)) { for (const [objectName, objectData] of objectMap.entries()) { const assertionCount = objectData.assertions.length; - + // Consider objects with very few assertions as having coverage gaps if (assertionCount === 0) { database.gaps.uncoveredObjects.add(`${objectType}:${objectName}`); - } else if (assertionCount < this._getMinimumAssertionThreshold(objectType)) { + } else if ( + assertionCount < this._getMinimumAssertionThreshold(objectType) + ) { database.gaps.partialCoverage.add(`${objectType}:${objectName}`); } } @@ -2445,7 +2834,7 @@ class pgTAPTestScanner extends EventEmitter { /** * Calculate overall coverage percentage - * + * * @param {Object} database - Coverage database * @returns {number} Coverage percentage * @private @@ -2453,72 +2842,74 @@ class pgTAPTestScanner extends EventEmitter { _calculateOverallCoverage(database) { const totalObjects = this._getTotalIndexedObjects(database); const uncoveredObjects = database.gaps.uncoveredObjects.size; - + if (totalObjects === 0) return 100; - + const coveredObjects = totalObjects - uncoveredObjects; return Math.round((coveredObjects / totalObjects) * 100); } /** * Get total number of indexed objects across all types - * + * * @param {Object} database - Coverage database * @returns {number} Total object count * @private */ _getTotalIndexedObjects(database) { - return Object.values(database.objects) - .reduce((total, objectMap) => total + objectMap.size, 0); + return Object.values(database.objects).reduce( + (total, objectMap) => total + objectMap.size, + 0, + ); } /** * Calculate coverage percentage for a specific object type - * + * * @param {string} objectType - Type of object * @returns {number} Coverage percentage * @private */ _calculateTypesCoverage(objectType) { if (!this.coverageDatabase) return 0; - + const objectMap = this.coverageDatabase.objects[objectType]; if (!objectMap || objectMap.size === 0) return 0; - + let coveredCount = 0; for (const [, objectData] of objectMap.entries()) { if (objectData.assertions.length > 0) { coveredCount++; } } - + return Math.round((coveredCount / objectMap.size) * 100); } /** * Estimate maximum possible assertions for an object type - * + * * @param {string} objectType - Type of object * @param {string} objectName - Name of object * @returns {number} Estimated maximum assertions * @private */ - _estimateMaxAssertions(objectType, objectName) { + _estimateMaxAssertions(objectType, _objectName) { // These are rough estimates - could be enhanced with actual schema introspection switch (objectType) { - case 'tables': + case "tables": return 8; // has_table, table_privs, columns, constraints, etc. - case 'columns': + case "columns": return 4; // has_column, col_type_is, col_default_is, col_not_null - case 'functions': + case "functions": return 6; // has_function, function_returns, function_lang, etc. - case 'indexes': + case "indexes": return 3; // has_index, index_is_unique, index_is_primary - case 'triggers': + case "triggers": return 4; // has_trigger, trigger_is, etc. - case 'policies': + case "policies": return 3; // policy_is, policy_cmd, etc. - case 'schemas': + case "schemas": return 2; // has_schema, schema_owner default: return 3; @@ -2527,18 +2918,18 @@ class pgTAPTestScanner extends EventEmitter { /** * Get minimum assertion threshold for object type - * + * * @param {string} objectType - Type of object * @returns {number} Minimum assertion threshold * @private */ _getMinimumAssertionThreshold(objectType) { switch (objectType) { - case 'tables': + case "tables": return 2; // At minimum should test existence and basic properties - case 'functions': + case "functions": return 2; // Should test existence and return type - case 'columns': + case "columns": return 1; // At minimum test type default: return 1; @@ -2547,123 +2938,129 @@ class pgTAPTestScanner extends EventEmitter { /** * Count files with high coverage (multiple assertions per object) - * + * * @returns {number} Count of high coverage files * @private */ _countHighCoverageFiles() { let highCoverageCount = 0; - + for (const testFile of this.testFiles) { - if (testFile.assertions.length >= 5) { // Arbitrary threshold + if (testFile.assertions.length >= 5) { + // Arbitrary threshold highCoverageCount++; } } - + return highCoverageCount; } /** * Count objects that have multiple test types - * + * * @returns {number} Count of multi-tested objects * @private */ _countMultiTestedObjects() { if (!this.coverageDatabase) return 0; - + let multiTestedCount = 0; - + for (const objectMap of Object.values(this.coverageDatabase.objects)) { for (const [, objectData] of objectMap.entries()) { - if (objectData.assertionTypes.size >= 3) { // Multiple assertion types + if (objectData.assertionTypes.size >= 3) { + // Multiple assertion types multiTestedCount++; } } } - + return multiTestedCount; } /** * Categorize uncovered objects by type - * + * * @returns {Object} Uncovered objects by category * @private */ _categorizeUncoveredObjects() { if (!this.coverageDatabase) return {}; - + const categorized = {}; - + for (const objectRef of this.coverageDatabase.gaps.uncoveredObjects) { - const [objectType] = objectRef.split(':'); + const [objectType] = objectRef.split(":"); if (!categorized[objectType]) { categorized[objectType] = []; } categorized[objectType].push(objectRef); } - + return categorized; } /** * Generate test recommendations based on coverage gaps - * + * * @returns {string[]} Array of test recommendations * @private */ _generateTestRecommendations() { const recommendations = []; - + if (!this.coverageDatabase) return recommendations; - + // Analyze gaps and suggest specific tests for (const objectRef of this.coverageDatabase.gaps.uncoveredObjects) { - const [objectType, objectName] = objectRef.split(':', 2); - + const [objectType, objectName] = objectRef.split(":", 2); + switch (objectType) { - case 'tables': + case "tables": recommendations.push(`Add has_table test for ${objectName}`); break; - case 'functions': + case "functions": recommendations.push(`Add has_function test for ${objectName}`); break; - case 'columns': + case "columns": recommendations.push(`Add column type test for ${objectName}`); break; default: - recommendations.push(`Add test coverage for ${objectType}: ${objectName}`); + recommendations.push( + `Add test coverage for ${objectType}: ${objectName}`, + ); } } - + return recommendations.slice(0, 20); // Limit recommendations } /** * Get top tested objects - * + * * @param {number} limit - Maximum number to return * @returns {Array} Array of top tested objects * @private */ _getTopTestedObjects(limit = 10) { if (!this.coverageDatabase) return []; - + const objectStats = []; - - for (const [objectType, objectMap] of Object.entries(this.coverageDatabase.objects)) { + + for (const [objectType, objectMap] of Object.entries( + this.coverageDatabase.objects, + )) { for (const [objectName, objectData] of objectMap.entries()) { objectStats.push({ objectType, objectName, assertionCount: objectData.assertions.length, assertionTypes: objectData.assertionTypes.size, - testFiles: objectData.testFiles.size + testFiles: objectData.testFiles.size, }); } } - + return objectStats .sort((a, b) => b.assertionCount - a.assertionCount) .slice(0, limit); @@ -2671,7 +3068,7 @@ class pgTAPTestScanner extends EventEmitter { /** * Build detailed coverage report data - * + * * @param {string[]} objectTypes - Object types to include * @param {boolean} includeDetails - Include detailed assertion info * @returns {Object} Coverage report data @@ -2679,75 +3076,84 @@ class pgTAPTestScanner extends EventEmitter { */ _buildCoverageReport(objectTypes, includeDetails) { const report = {}; - + if (!this.coverageDatabase) return report; - - const typesToInclude = objectTypes || Object.keys(this.coverageDatabase.objects); - + + const typesToInclude = + objectTypes || Object.keys(this.coverageDatabase.objects); + for (const objectType of typesToInclude) { const objectMap = this.coverageDatabase.objects[objectType]; if (!objectMap) continue; - + report[objectType] = {}; - + for (const [objectName, objectData] of objectMap.entries()) { const objectReport = { assertionCount: objectData.assertions.length, assertionTypes: [...objectData.assertionTypes], testFileCount: objectData.testFiles.size, - lastTested: objectData.lastTested + lastTested: objectData.lastTested, }; - + if (includeDetails) { - objectReport.assertions = objectData.assertions.map(a => ({ + objectReport.assertions = objectData.assertions.map((a) => ({ type: a.type, testFile: a.testFile.fileName, lineNumber: a.lineNumber, - description: a.description + description: a.description, })); } - + report[objectType][objectName] = objectReport; } } - + return report; } /** * Format report as CSV - * + * * @param {Object} report - Report data * @returns {string} CSV formatted report * @private */ _formatReportAsCsv(report) { - const lines = ['Object Type,Object Name,Assertion Count,Assertion Types,Test Files,Coverage %']; - + const lines = [ + "Object Type,Object Name,Assertion Count,Assertion Types,Test Files,Coverage %", + ]; + for (const [objectType, objects] of Object.entries(report.coverage)) { for (const [objectName, data] of Object.entries(objects)) { - const maxAssertions = this._estimateMaxAssertions(objectType, objectName); - const coverage = maxAssertions > 0 - ? Math.round((data.assertionCount / maxAssertions) * 100) - : 100; - - lines.push([ + const maxAssertions = this._estimateMaxAssertions( objectType, objectName, - data.assertionCount, - data.assertionTypes.length, - data.testFileCount, - coverage - ].join(',')); + ); + const coverage = + maxAssertions > 0 + ? Math.round((data.assertionCount / maxAssertions) * 100) + : 100; + + lines.push( + [ + objectType, + objectName, + data.assertionCount, + data.assertionTypes.length, + data.testFileCount, + coverage, + ].join(","), + ); } } - - return lines.join('\n'); + + return lines.join("\n"); } /** * Format report as HTML - * + * * @param {Object} report - Report data * @returns {string} HTML formatted report * @private @@ -2797,68 +3203,84 @@ class pgTAPTestScanner extends EventEmitter { /** * Format report as Markdown - * + * * @param {Object} report - Report data * @returns {string} Markdown formatted report * @private */ _formatReportAsMarkdown(report) { const lines = [ - '# pgTAP Test Coverage Report', - '', + "# pgTAP Test Coverage Report", + "", `Generated: ${report.metadata.generatedAt}`, - '', - '## Summary', - '', + "", + "## Summary", + "", `- **Total Files:** ${report.summary.totalFiles}`, `- **Total Assertions:** ${report.summary.totalAssertions}`, `- **Overall Coverage:** ${report.summary.overallCoverage}%`, - '', - '## Coverage by Object Type', - '' + "", + "## Coverage by Object Type", + "", ]; - + for (const [objectType, objects] of Object.entries(report.coverage)) { - lines.push(`### ${objectType.charAt(0).toUpperCase() + objectType.slice(1)}`); - lines.push(''); - lines.push('| Object Name | Assertions | Types | Files | Coverage |'); - lines.push('|-------------|------------|-------|-------|----------|'); - + lines.push( + `### ${objectType.charAt(0).toUpperCase() + objectType.slice(1)}`, + ); + lines.push(""); + lines.push("| Object Name | Assertions | Types | Files | Coverage |"); + lines.push("|-------------|------------|-------|-------|----------|"); + for (const [objectName, data] of Object.entries(objects)) { - const maxAssertions = this._estimateMaxAssertions(objectType, objectName); - const coverage = maxAssertions > 0 - ? Math.round((data.assertionCount / maxAssertions) * 100) - : 100; - - lines.push(`| ${objectName} | ${data.assertionCount} | ${data.assertionTypes.length} | ${data.testFileCount} | ${coverage}% |`); + const maxAssertions = this._estimateMaxAssertions( + objectType, + objectName, + ); + const coverage = + maxAssertions > 0 + ? Math.round((data.assertionCount / maxAssertions) * 100) + : 100; + + lines.push( + `| ${objectName} | ${data.assertionCount} | ${data.assertionTypes.length} | ${data.testFileCount} | ${coverage}% |`, + ); } - - lines.push(''); + + lines.push(""); } - - return lines.join('\n'); + + return lines.join("\n"); } /** * Generate HTML table rows for coverage report - * + * * @param {Object} coverage - Coverage data * @returns {string} HTML table rows * @private */ _generateHtmlTableRows(coverage) { const rows = []; - + for (const [objectType, objects] of Object.entries(coverage)) { for (const [objectName, data] of Object.entries(objects)) { - const maxAssertions = this._estimateMaxAssertions(objectType, objectName); - const coverage = maxAssertions > 0 - ? Math.round((data.assertionCount / maxAssertions) * 100) - : 100; - - const coverageClass = coverage >= 80 ? 'high-coverage' : - coverage >= 50 ? 'medium-coverage' : 'low-coverage'; - + const maxAssertions = this._estimateMaxAssertions( + objectType, + objectName, + ); + const coverage = + maxAssertions > 0 + ? Math.round((data.assertionCount / maxAssertions) * 100) + : 100; + + const coverageClass = + coverage >= 80 + ? "high-coverage" + : coverage >= 50 + ? "medium-coverage" + : "low-coverage"; + rows.push(` ${objectType} ${objectName} @@ -2869,12 +3291,12 @@ class pgTAPTestScanner extends EventEmitter { `); } } - - return rows.join('\n'); + + return rows.join("\n"); } - + // Memory Management Methods - + /** * Initialize memory monitoring and management * @private @@ -2883,18 +3305,18 @@ class pgTAPTestScanner extends EventEmitter { // Initialize streaming database and batch processor this.streamingDB = new StreamingCoverageDatabase(this.options); this.batchProcessor = new BatchProcessor(this, this.options); - + // Set up periodic memory monitoring if (this.options.cleanupInterval > 0) { this.memoryMonitoringInterval = setInterval(() => { this._checkMemoryUsage(); }, this.options.cleanupInterval); } - + // Listen for process events - process.once('exit', () => this._cleanup()); - process.once('SIGINT', () => this._cleanup()); - process.once('SIGTERM', () => this._cleanup()); + process.once("exit", () => this._cleanup()); + process.once("SIGINT", () => this._cleanup()); + process.once("SIGTERM", () => this._cleanup()); } /** @@ -2904,22 +3326,29 @@ class pgTAPTestScanner extends EventEmitter { _checkMemoryUsage() { const usage = MemoryMonitor.getMemoryUsage(); this.memoryState.currentUsageMB = usage.heapUsed; - this.memoryState.maxUsageMB = Math.max(this.memoryState.maxUsageMB, usage.heapUsed); + this.memoryState.maxUsageMB = Math.max( + this.memoryState.maxUsageMB, + usage.heapUsed, + ); - if (MemoryMonitor.shouldTriggerCleanup(usage.heapUsed, this.options.maxMemoryMB)) { + if ( + MemoryMonitor.shouldTriggerCleanup( + usage.heapUsed, + this.options.maxMemoryMB, + ) + ) { this._performMemoryCleanup(); } // Emit memory status - this.emit('memory_status', { + this.emit("memory_status", { current: usage.heapUsed, max: this.memoryState.maxUsageMB, threshold: this.options.maxMemoryMB * 0.8, - streamingMode: this.memoryState.streamingMode + streamingMode: this.memoryState.streamingMode, }); } - /** * Perform memory cleanup operations * @private @@ -2928,9 +3357,9 @@ class pgTAPTestScanner extends EventEmitter { // Switch to streaming mode if not already if (!this.memoryState.streamingMode && this.options.enableStreaming) { this.memoryState.streamingMode = true; - this.emit('warning', { - type: 'memory_threshold', - message: 'Switching to streaming mode due to high memory usage' + this.emit("warning", { + type: "memory_threshold", + message: "Switching to streaming mode due to high memory usage", }); } @@ -2949,11 +3378,11 @@ class pgTAPTestScanner extends EventEmitter { } this.memoryState.lastCleanup = Date.now(); - - this.emit('cleanup', { - type: 'memory_cleanup', + + this.emit("cleanup", { + type: "memory_cleanup", memoryUsage: MemoryMonitor.getMemoryUsage(), - gcPerformed: this.options.enableGC && MemoryMonitor.forceGC() + gcPerformed: this.options.enableGC && MemoryMonitor.forceGC(), }); } @@ -2963,32 +3392,33 @@ class pgTAPTestScanner extends EventEmitter { */ _limitObjectAccumulation() { // Limit coverage map sizes - Object.keys(this.coverageMap).forEach(type => { - if (type === 'filesByTarget') return; - + Object.keys(this.coverageMap).forEach((type) => { + if (type === "filesByTarget") return; + const objects = this.coverageMap[type]; const objectKeys = Object.keys(objects); - + if (objectKeys.length > this.options.maxObjectsPerType) { // Keep only the most recent objects - const toKeep = objectKeys.slice(-Math.floor(this.options.maxObjectsPerType * 0.8)); + const toKeep = objectKeys.slice( + -Math.floor(this.options.maxObjectsPerType * 0.8), + ); const newObjects = {}; - - toKeep.forEach(key => { + + toKeep.forEach((key) => { newObjects[key] = objects[key]; }); - + this.coverageMap[type] = newObjects; - - this.emit('warning', { - type: 'object_limit', - message: `Limited ${type} objects to ${toKeep.length} items` + + this.emit("warning", { + type: "object_limit", + message: `Limited ${type} objects to ${toKeep.length} items`, }); } }); } - /** * Cleanup resources * @private @@ -2998,12 +3428,12 @@ class pgTAPTestScanner extends EventEmitter { clearInterval(this.memoryMonitoringInterval); this.memoryMonitoringInterval = null; } - + if (this.abortController) { this.abortController.abort(); } - this.emit('cleanup', { type: 'shutdown' }); + this.emit("cleanup", { type: "shutdown" }); } /** @@ -3020,10 +3450,10 @@ class pgTAPTestScanner extends EventEmitter { maxMemoryMB: this.options.maxMemoryMB, batchSize: this.options.batchSize, enableStreaming: this.options.enableStreaming, - enableGC: this.options.enableGC - } + enableGC: this.options.enableGC, + }, }; } } -export default pgTAPTestScanner; \ No newline at end of file +export default pgTAPTestScanner; diff --git a/src/reporters/CliReporter.js b/src/reporters/CliReporter.js index f3aa8dd..390fa78 100644 --- a/src/reporters/CliReporter.js +++ b/src/reporters/CliReporter.js @@ -2,12 +2,9 @@ * CLI Reporter for Command Events */ -const chalk = require('chalk'); -const inquirer = require('inquirer'); -const { - CommandEvent, - ErrorEvent -} = require('../lib/events/CommandEvents'); +const chalk = require("chalk"); +const inquirer = require("inquirer"); +const { CommandEvent, ErrorEvent } = require("../lib/events/CommandEvents"); /** * Reporter that listens to command events and displays CLI output @@ -22,7 +19,7 @@ class CliReporter { */ attach(command) { // Progress events - command.on('progress', (eventData) => { + command.on("progress", (eventData) => { if (!this.silent) { const message = this._extractMessage(eventData); if (message) { @@ -32,31 +29,31 @@ class CliReporter { }); // Warning events - command.on('warning', (eventData) => { + command.on("warning", (eventData) => { if (!this.silent) { const message = this._extractMessage(eventData); const data = this._extractData(eventData); - + if (message) { console.log(chalk.yellow.bold(`\n⚠️ WARNING: ${message}\n`)); - + if (data && data.actions) { - console.log(chalk.yellow('This will:')); - data.actions.forEach(action => { + console.log(chalk.yellow("This will:")); + data.actions.forEach((action) => { console.log(chalk.yellow(` • ${action}`)); }); - console.log(chalk.yellow('\nThis action cannot be undone!\n')); + console.log(chalk.yellow("\nThis action cannot be undone!\n")); } } } }); // Error events - command.on('error', (eventData) => { + command.on("error", (eventData) => { if (!this.silent) { const message = this._extractMessage(eventData); const error = this._extractError(eventData); - + if (message) { console.error(chalk.red(`✗ ${message}`)); } @@ -67,7 +64,7 @@ class CliReporter { }); // Success events - command.on('success', (eventData) => { + command.on("success", (eventData) => { if (!this.silent) { const message = this._extractMessage(eventData); if (message) { @@ -77,7 +74,7 @@ class CliReporter { }); // Prompt events - command.on('prompt', async ({ type, options, resolve }) => { + command.on("prompt", async ({ type, options, resolve }) => { if (this.silent) { // In silent mode, use defaults resolve(options.default || false); @@ -85,25 +82,25 @@ class CliReporter { } try { - if (type === 'confirm') { + if (type === "confirm") { const { result } = await inquirer.prompt([ { - type: 'confirm', - name: 'result', + type: "confirm", + name: "result", message: options.message, - default: options.default || false - } + default: options.default || false, + }, ]); resolve(result); - } else if (type === 'input') { + } else if (type === "input") { const { result } = await inquirer.prompt([ { - type: 'input', - name: 'result', + type: "input", + name: "result", message: options.message, validate: options.validate, - default: options.default - } + default: options.default, + }, ]); resolve(result); } else { @@ -115,28 +112,30 @@ class CliReporter { }); // Command-specific events - command.on('start', (eventData) => { + command.on("start", (eventData) => { if (!this.silent) { const isProd = this._extractIsProd(eventData); if (isProd) { - console.log(chalk.red.bold('\n🚨 PRODUCTION MODE 🚨\n')); + console.log(chalk.red.bold("\n🚨 PRODUCTION MODE 🚨\n")); } } }); - command.on('cancelled', () => { + command.on("cancelled", () => { if (!this.silent) { - console.log(chalk.gray('\nOperation cancelled by user\n')); + console.log(chalk.gray("\nOperation cancelled by user\n")); } }); - command.on('complete', () => { + command.on("complete", () => { if (!this.silent) { - console.log(chalk.green.bold('\n✨ Operation completed successfully!\n')); + console.log( + chalk.green.bold("\n✨ Operation completed successfully!\n"), + ); } }); - command.on('output', (eventData) => { + command.on("output", (eventData) => { if (!this.silent && process.env.VERBOSE) { const stdout = this._extractStdout(eventData); if (stdout) { @@ -155,17 +154,17 @@ class CliReporter { if (eventData instanceof CommandEvent) { return eventData.message; } - + // Handle legacy event objects - if (eventData && typeof eventData === 'object') { + if (eventData && typeof eventData === "object") { return eventData.message; } - + // Handle simple string messages - if (typeof eventData === 'string') { + if (typeof eventData === "string") { return eventData; } - + return null; } @@ -177,15 +176,20 @@ class CliReporter { // Handle typed CommandEvent instances if (eventData instanceof CommandEvent) { // Return all properties except the standard ones - const { eventType: _eventType, timestamp: _timestamp, message: _message, ...data } = eventData; + const { + eventType: _eventType, + timestamp: _timestamp, + message: _message, + ...data + } = eventData; return Object.keys(data).length > 0 ? data : null; } - + // Handle legacy event objects - if (eventData && typeof eventData === 'object') { + if (eventData && typeof eventData === "object") { return eventData.data || eventData; } - + return null; } @@ -198,12 +202,12 @@ class CliReporter { if (eventData instanceof ErrorEvent) { return eventData.error; } - + // Handle legacy event objects - if (eventData && typeof eventData === 'object') { + if (eventData && typeof eventData === "object") { return eventData.error; } - + return null; } @@ -216,12 +220,12 @@ class CliReporter { if (eventData instanceof CommandEvent) { return eventData.isProd || false; } - + // Handle legacy event objects - if (eventData && typeof eventData === 'object') { + if (eventData && typeof eventData === "object") { return eventData.isProd || false; } - + return false; } @@ -234,14 +238,14 @@ class CliReporter { if (eventData instanceof CommandEvent) { return eventData.stdout; } - + // Handle legacy event objects - if (eventData && typeof eventData === 'object') { + if (eventData && typeof eventData === "object") { return eventData.stdout; } - + return null; } } -module.exports = CliReporter; \ No newline at end of file +module.exports = CliReporter; diff --git a/src/ui/logo.js b/src/ui/logo.js index 0c52261..5d2e1e4 100644 --- a/src/ui/logo.js +++ b/src/ui/logo.js @@ -3,7 +3,7 @@ */ // Import oh-my-logo through the CommonJS bridge -const ohMyLogoBridge = require('./oh-my-logo-bridge.cjs'); +const ohMyLogoBridge = require("./oh-my-logo-bridge.cjs"); /** * Display data logo with mountain theme @@ -12,50 +12,71 @@ async function displayLogo() { try { // Wait for the bridge to resolve the ES module const { renderFilled } = await ohMyLogoBridge; - + // All available oh-my-logo palettes const allPalettes = [ - 'grad-blue', 'sunset', 'dawn', 'nebula', 'mono', 'ocean', - 'fire', 'forest', 'gold', 'purple', 'mint', 'coral', 'matrix' + "grad-blue", + "sunset", + "dawn", + "nebula", + "mono", + "ocean", + "fire", + "forest", + "gold", + "purple", + "mint", + "coral", + "matrix", ]; - + // All available block fonts for filled mode const allFonts = [ - '3d', 'block', 'chrome', 'grid', 'huge', 'pallet', - 'shade', 'simple', 'simple3d', 'simpleBlock', 'slick', 'tiny' + "3d", + "block", + "chrome", + "grid", + "huge", + "pallet", + "shade", + "simple", + "simple3d", + "simpleBlock", + "slick", + "tiny", ]; - + // Pick random palette AND random font - MAXIMUM CHAOS! 🎲 - const randomPalette = allPalettes[Math.floor(Math.random() * allPalettes.length)]; + const randomPalette = + allPalettes[Math.floor(Math.random() * allPalettes.length)]; const randomFont = allFonts[Math.floor(Math.random() * allFonts.length)]; - - await renderFilled('Supa', { + + await renderFilled("Supa", { palette: randomPalette, - font: randomFont // RANDOM FONT EVERY TIME! WHEEEEE! 🎉 + font: randomFont, // RANDOM FONT EVERY TIME! WHEEEEE! 🎉 }); - await renderFilled('DATA', { + await renderFilled("DATA", { palette: randomPalette, - font: randomFont // RANDOM FONT EVERY TIME! WHEEEEE! 🎉 + font: randomFont, // RANDOM FONT EVERY TIME! WHEEEEE! 🎉 }); - } catch { // Fallback: Simple console log if logo rendering fails - console.log('D • A • T • A'); + console.log("D • A • T • A"); } console.log("🖖 I am DATA:"); console.log("Database Automation, Testing, and Alignment."); - console.log("🤖 I am an Android. No, not the phone.") + console.log("🤖 I am an Android. No, not the phone."); console.log("═══════════════════════════\n"); console.log(""); console.log("Computer, display the help menu."); console.log(""); - console.log("Displaying help menu.") + console.log("Displaying help menu."); console.log(""); console.log("═══════════════════════════"); console.log(""); } module.exports = { - displayLogo + displayLogo, }; diff --git a/src/ui/oh-my-logo-bridge.cjs b/src/ui/oh-my-logo-bridge.cjs index 088230d..19f420b 100644 --- a/src/ui/oh-my-logo-bridge.cjs +++ b/src/ui/oh-my-logo-bridge.cjs @@ -1,5 +1,5 @@ // Bridge file to import ES module oh-my-logo from CommonJS module.exports = (async () => { - const { render, renderFilled, PALETTES } = await import('oh-my-logo'); + const { render, renderFilled, PALETTES } = await import("oh-my-logo"); return { render, renderFilled, PALETTES }; -})(); \ No newline at end of file +})(); diff --git a/test/CliReporter.test.js b/test/CliReporter.test.js index 8d083f2..50f7f44 100644 --- a/test/CliReporter.test.js +++ b/test/CliReporter.test.js @@ -2,200 +2,206 @@ * CLI Reporter Tests */ -import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; -import { createRequire } from 'module'; -import { EventEmitter } from 'events'; +import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; +import { createRequire } from "module"; +import { EventEmitter } from "events"; const require = createRequire(import.meta.url); -const CliReporter = require('../src/reporters/CliReporter'); -const { CommandEvent, ProgressEvent, ErrorEvent, SuccessEvent, WarningEvent } = require('../src/lib/events/CommandEvents'); - -describe('CliReporter', () => { +const CliReporter = require("../src/reporters/CliReporter"); +const { + CommandEvent, + ProgressEvent, + ErrorEvent, + SuccessEvent, + WarningEvent, +} = require("../src/lib/events/CommandEvents"); + +describe("CliReporter", () => { let reporter; let mockCommand; let consoleLogSpy; let consoleErrorSpy; - + beforeEach(() => { reporter = new CliReporter(false); // Not silent mockCommand = new EventEmitter(); - + // Spy on console methods - consoleLogSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); - + consoleLogSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + consoleErrorSpy = vi.spyOn(console, "error").mockImplementation(() => {}); + reporter.attach(mockCommand); }); - + afterEach(() => { consoleLogSpy.mockRestore(); consoleErrorSpy.mockRestore(); }); - describe('Legacy Event Handling', () => { - it('should handle legacy progress events', () => { - mockCommand.emit('progress', { message: 'Legacy progress' }); - + describe("Legacy Event Handling", () => { + it("should handle legacy progress events", () => { + mockCommand.emit("progress", { message: "Legacy progress" }); + expect(consoleLogSpy).toHaveBeenCalledWith( - expect.stringContaining('🔄 Legacy progress') + expect.stringContaining("🔄 Legacy progress"), ); }); - it('should handle legacy warning events', () => { - mockCommand.emit('warning', { - message: 'Legacy warning', - data: { actions: ['Action 1', 'Action 2'] } + it("should handle legacy warning events", () => { + mockCommand.emit("warning", { + message: "Legacy warning", + data: { actions: ["Action 1", "Action 2"] }, }); - + expect(consoleLogSpy).toHaveBeenCalledWith( - expect.stringContaining('⚠️ WARNING: Legacy warning') + expect.stringContaining("⚠️ WARNING: Legacy warning"), ); }); - it('should handle legacy error events', () => { - const testError = new Error('Test error'); - mockCommand.emit('error', { - message: 'Legacy error', - error: testError + it("should handle legacy error events", () => { + const testError = new Error("Test error"); + mockCommand.emit("error", { + message: "Legacy error", + error: testError, }); - + expect(consoleErrorSpy).toHaveBeenCalledWith( - expect.stringContaining('✗ Legacy error') + expect.stringContaining("✗ Legacy error"), ); expect(consoleErrorSpy).toHaveBeenCalledWith( - expect.stringContaining('Test error') + expect.stringContaining("Test error"), ); }); - it('should handle legacy success events', () => { - mockCommand.emit('success', { message: 'Legacy success' }); - + it("should handle legacy success events", () => { + mockCommand.emit("success", { message: "Legacy success" }); + expect(consoleLogSpy).toHaveBeenCalledWith( - expect.stringContaining('✓ Legacy success') + expect.stringContaining("✓ Legacy success"), ); }); - it('should handle legacy start events with isProd', () => { - mockCommand.emit('start', { isProd: true }); - + it("should handle legacy start events with isProd", () => { + mockCommand.emit("start", { isProd: true }); + expect(consoleLogSpy).toHaveBeenCalledWith( - expect.stringContaining('🚨 PRODUCTION MODE 🚨') + expect.stringContaining("🚨 PRODUCTION MODE 🚨"), ); }); }); - describe('Typed Event Handling', () => { - it('should handle typed progress events', () => { - const progressEvent = new ProgressEvent('Typed progress'); - mockCommand.emit('progress', progressEvent); - + describe("Typed Event Handling", () => { + it("should handle typed progress events", () => { + const progressEvent = new ProgressEvent("Typed progress"); + mockCommand.emit("progress", progressEvent); + expect(consoleLogSpy).toHaveBeenCalledWith( - expect.stringContaining('🔄 Typed progress') + expect.stringContaining("🔄 Typed progress"), ); }); - it('should handle typed warning events', () => { - const warningEvent = new WarningEvent('Typed warning', { - actions: ['Action 1', 'Action 2'] + it("should handle typed warning events", () => { + const warningEvent = new WarningEvent("Typed warning", { + actions: ["Action 1", "Action 2"], }); - mockCommand.emit('warning', warningEvent); - + mockCommand.emit("warning", warningEvent); + expect(consoleLogSpy).toHaveBeenCalledWith( - expect.stringContaining('⚠️ WARNING: Typed warning') + expect.stringContaining("⚠️ WARNING: Typed warning"), ); }); - it('should handle typed error events', () => { - const testError = new Error('Typed error'); - const errorEvent = new ErrorEvent('Typed error message', testError); - mockCommand.emit('error', errorEvent); - + it("should handle typed error events", () => { + const testError = new Error("Typed error"); + const errorEvent = new ErrorEvent("Typed error message", testError); + mockCommand.emit("error", errorEvent); + expect(consoleErrorSpy).toHaveBeenCalledWith( - expect.stringContaining('✗ Typed error message') + expect.stringContaining("✗ Typed error message"), ); expect(consoleErrorSpy).toHaveBeenCalledWith( - expect.stringContaining('Typed error') + expect.stringContaining("Typed error"), ); }); - it('should handle typed success events', () => { - const successEvent = new SuccessEvent('Typed success'); - mockCommand.emit('success', successEvent); - + it("should handle typed success events", () => { + const successEvent = new SuccessEvent("Typed success"); + mockCommand.emit("success", successEvent); + expect(consoleLogSpy).toHaveBeenCalledWith( - expect.stringContaining('✓ Typed success') + expect.stringContaining("✓ Typed success"), ); }); - it('should handle typed start events with isProd', () => { + it("should handle typed start events with isProd", () => { // Create a CommandEvent with isProd property const startEvent = new CommandEvent(); startEvent.isProd = true; - mockCommand.emit('start', startEvent); - + mockCommand.emit("start", startEvent); + expect(consoleLogSpy).toHaveBeenCalledWith( - expect.stringContaining('🚨 PRODUCTION MODE 🚨') + expect.stringContaining("🚨 PRODUCTION MODE 🚨"), ); }); }); - describe('Undefined Value Handling', () => { - it('should handle undefined message gracefully', () => { - mockCommand.emit('progress', { message: undefined }); - + describe("Undefined Value Handling", () => { + it("should handle undefined message gracefully", () => { + mockCommand.emit("progress", { message: undefined }); + // Should not log anything for undefined message expect(consoleLogSpy).not.toHaveBeenCalled(); }); - it('should handle null event data gracefully', () => { - mockCommand.emit('progress', null); - + it("should handle null event data gracefully", () => { + mockCommand.emit("progress", null); + // Should not log anything for null data expect(consoleLogSpy).not.toHaveBeenCalled(); }); - it('should handle missing error object gracefully', () => { - mockCommand.emit('error', { message: 'Error without error object' }); - + it("should handle missing error object gracefully", () => { + mockCommand.emit("error", { message: "Error without error object" }); + expect(consoleErrorSpy).toHaveBeenCalledWith( - expect.stringContaining('✗ Error without error object') + expect.stringContaining("✗ Error without error object"), ); // Should not try to log undefined error message expect(consoleErrorSpy).toHaveBeenCalledTimes(1); }); }); - describe('Silent Mode', () => { - it('should not log in silent mode', () => { + describe("Silent Mode", () => { + it("should not log in silent mode", () => { const silentReporter = new CliReporter(true); const silentCommand = new EventEmitter(); silentReporter.attach(silentCommand); - - silentCommand.emit('progress', { message: 'Silent progress' }); - silentCommand.emit('success', { message: 'Silent success' }); - silentCommand.emit('error', { message: 'Silent error' }); - + + silentCommand.emit("progress", { message: "Silent progress" }); + silentCommand.emit("success", { message: "Silent success" }); + silentCommand.emit("error", { message: "Silent error" }); + expect(consoleLogSpy).not.toHaveBeenCalled(); expect(consoleErrorSpy).not.toHaveBeenCalled(); }); }); - describe('Mixed Event Types', () => { - it('should handle both legacy and typed events in the same session', () => { + describe("Mixed Event Types", () => { + it("should handle both legacy and typed events in the same session", () => { // Legacy event - mockCommand.emit('progress', { message: 'Legacy progress' }); - + mockCommand.emit("progress", { message: "Legacy progress" }); + // Typed event - const typedEvent = new ProgressEvent('Typed progress'); - mockCommand.emit('progress', typedEvent); - + const typedEvent = new ProgressEvent("Typed progress"); + mockCommand.emit("progress", typedEvent); + expect(consoleLogSpy).toHaveBeenCalledWith( - expect.stringContaining('🔄 Legacy progress') + expect.stringContaining("🔄 Legacy progress"), ); expect(consoleLogSpy).toHaveBeenCalledWith( - expect.stringContaining('🔄 Typed progress') + expect.stringContaining("🔄 Typed progress"), ); expect(consoleLogSpy).toHaveBeenCalledTimes(2); }); }); -}); \ No newline at end of file +}); diff --git a/test/Command.integration.test.js b/test/Command.integration.test.js index 6ce07a2..0150410 100644 --- a/test/Command.integration.test.js +++ b/test/Command.integration.test.js @@ -1,5 +1,5 @@ -import { describe, it, expect, vi, beforeEach } from 'vitest'; -import Command from '../src/lib/Command.js'; +import { describe, it, expect, vi, beforeEach } from "vitest"; +import Command from "../src/lib/Command.js"; import { ProgressEvent, WarningEvent, @@ -7,222 +7,242 @@ import { SuccessEvent, StartEvent, CompleteEvent, - CancelledEvent -} from '../src/lib/events/CommandEvents.js'; + CancelledEvent, +} from "../src/lib/events/CommandEvents.js"; -describe('Command Integration Tests', () => { +describe("Command Integration Tests", () => { let command; beforeEach(() => { command = new Command(); // Override performExecute for testing - command.performExecute = vi.fn().mockResolvedValue('test-result'); + command.performExecute = vi.fn().mockResolvedValue("test-result"); command.requiresProductionConfirmation = false; // Skip confirmations for tests }); - describe('Typed Events', () => { - it('should emit typed progress events with correct structure', async () => { + describe("Typed Events", () => { + it("should emit typed progress events with correct structure", async () => { const progressSpy = vi.fn(); - command.on('progress', progressSpy); - - command.progress('Test progress', { step: 1 }); - + command.on("progress", progressSpy); + + command.progress("Test progress", { step: 1 }); + expect(progressSpy).toHaveBeenCalledTimes(1); const emittedEvent = progressSpy.mock.calls[0][0]; - - expect(emittedEvent).toHaveProperty('type', 'progress'); - expect(emittedEvent).toHaveProperty('message', 'Test progress'); - expect(emittedEvent).toHaveProperty('data', { step: 1 }); - expect(emittedEvent).toHaveProperty('timestamp'); + + expect(emittedEvent).toHaveProperty("type", "progress"); + expect(emittedEvent).toHaveProperty("message", "Test progress"); + expect(emittedEvent).toHaveProperty("data", { step: 1 }); + expect(emittedEvent).toHaveProperty("timestamp"); expect(emittedEvent.timestamp).toBeInstanceOf(Date); - expect(emittedEvent).toHaveProperty('percentage', null); // null for indeterminate + expect(emittedEvent).toHaveProperty("percentage", null); // null for indeterminate }); - it('should emit typed warning events with correct structure', async () => { + it("should emit typed warning events with correct structure", async () => { const warnSpy = vi.fn(); - command.on('warning', warnSpy); - - command.warn('Test warning', { severity: 'low' }); - + command.on("warning", warnSpy); + + command.warn("Test warning", { severity: "low" }); + expect(warnSpy).toHaveBeenCalledTimes(1); const emittedEvent = warnSpy.mock.calls[0][0]; - - expect(emittedEvent).toHaveProperty('type', 'warning'); - expect(emittedEvent).toHaveProperty('message', 'Test warning'); - expect(emittedEvent.data).toMatchObject({ severity: 'low' }); // May have additional properties like code: null - expect(emittedEvent).toHaveProperty('timestamp'); + + expect(emittedEvent).toHaveProperty("type", "warning"); + expect(emittedEvent).toHaveProperty("message", "Test warning"); + expect(emittedEvent.data).toMatchObject({ severity: "low" }); // May have additional properties like code: null + expect(emittedEvent).toHaveProperty("timestamp"); expect(emittedEvent.timestamp).toBeInstanceOf(Date); }); - it('should emit typed error events with correct structure', async () => { + it("should emit typed error events with correct structure", async () => { const errorSpy = vi.fn(); - command.on('error', errorSpy); - - const testError = new Error('Test error'); - command.error('Test error message', testError, { code: 'E001' }); - + command.on("error", errorSpy); + + const testError = new Error("Test error"); + command.error("Test error message", testError, { code: "E001" }); + expect(errorSpy).toHaveBeenCalledTimes(1); const emittedEvent = errorSpy.mock.calls[0][0]; - - expect(emittedEvent).toHaveProperty('type', 'error'); - expect(emittedEvent).toHaveProperty('message', 'Test error message'); - expect(emittedEvent).toHaveProperty('error', testError); - expect(emittedEvent).toHaveProperty('data'); - expect(emittedEvent.data).toMatchObject({ code: 'E001' }); - expect(emittedEvent).toHaveProperty('timestamp'); + + expect(emittedEvent).toHaveProperty("type", "error"); + expect(emittedEvent).toHaveProperty("message", "Test error message"); + expect(emittedEvent).toHaveProperty("error", testError); + expect(emittedEvent).toHaveProperty("data"); + expect(emittedEvent.data).toMatchObject({ code: "E001" }); + expect(emittedEvent).toHaveProperty("timestamp"); expect(emittedEvent.timestamp).toBeInstanceOf(Date); }); - it('should emit typed success events with correct structure', async () => { + it("should emit typed success events with correct structure", async () => { const successSpy = vi.fn(); - command.on('success', successSpy); - - command.success('Test success', { result: 'OK' }); - + command.on("success", successSpy); + + command.success("Test success", { result: "OK" }); + expect(successSpy).toHaveBeenCalledTimes(1); const emittedEvent = successSpy.mock.calls[0][0]; - - expect(emittedEvent).toHaveProperty('type', 'success'); - expect(emittedEvent).toHaveProperty('message', 'Test success'); - expect(emittedEvent.data).toMatchObject({ result: 'OK' }); // May have additional properties like duration: null - expect(emittedEvent).toHaveProperty('timestamp'); + + expect(emittedEvent).toHaveProperty("type", "success"); + expect(emittedEvent).toHaveProperty("message", "Test success"); + expect(emittedEvent.data).toMatchObject({ result: "OK" }); // May have additional properties like duration: null + expect(emittedEvent).toHaveProperty("timestamp"); expect(emittedEvent.timestamp).toBeInstanceOf(Date); }); }); - describe('Command Execution Events', () => { - it('should emit start and complete events during execution', async () => { + describe("Command Execution Events", () => { + it("should emit start and complete events during execution", async () => { const startSpy = vi.fn(); const completeSpy = vi.fn(); - - command.on('start', startSpy); - command.on('complete', completeSpy); - + + command.on("start", startSpy); + command.on("complete", completeSpy); + const result = await command.execute(); - + expect(startSpy).toHaveBeenCalledTimes(1); expect(completeSpy).toHaveBeenCalledTimes(1); - + const startEvent = startSpy.mock.calls[0][0]; - expect(startEvent).toHaveProperty('type', 'start'); - expect(startEvent.message).toContain('Command'); - expect(startEvent).toHaveProperty('isProd', false); - + expect(startEvent).toHaveProperty("type", "start"); + expect(startEvent.message).toContain("Command"); + expect(startEvent).toHaveProperty("isProd", false); + const completeEvent = completeSpy.mock.calls[0][0]; - expect(completeEvent).toHaveProperty('type', 'complete'); - expect(completeEvent.message).toContain('completed successfully'); - expect(completeEvent).toHaveProperty('result', 'test-result'); - - expect(result).toBe('test-result'); + expect(completeEvent).toHaveProperty("type", "complete"); + expect(completeEvent.message).toContain("completed successfully"); + expect(completeEvent).toHaveProperty("result", "test-result"); + + expect(result).toBe("test-result"); }); - it('should emit error events when execution fails', async () => { + it("should emit error events when execution fails", async () => { const startSpy = vi.fn(); const errorSpy = vi.fn(); const completeSpy = vi.fn(); - - command.on('start', startSpy); - command.on('error', errorSpy); - command.on('complete', completeSpy); - - const testError = new Error('Execution failed'); + + command.on("start", startSpy); + command.on("error", errorSpy); + command.on("complete", completeSpy); + + const testError = new Error("Execution failed"); command.performExecute = vi.fn().mockRejectedValue(testError); - - await expect(command.execute()).rejects.toThrow('Execution failed'); - + + await expect(command.execute()).rejects.toThrow("Execution failed"); + expect(startSpy).toHaveBeenCalledTimes(1); expect(errorSpy).toHaveBeenCalledTimes(1); expect(completeSpy).not.toHaveBeenCalled(); // Should not emit complete on error - + const errorEvent = errorSpy.mock.calls[0][0]; - expect(errorEvent).toHaveProperty('type', 'error'); - expect(errorEvent.message).toContain('failed'); - expect(errorEvent).toHaveProperty('error', testError); + expect(errorEvent).toHaveProperty("type", "error"); + expect(errorEvent.message).toContain("failed"); + expect(errorEvent).toHaveProperty("error", testError); }); }); - describe('Event Validation', () => { - it('should validate events correctly with basic structure check', () => { - const validEvent = { type: 'progress', message: 'Test', timestamp: new Date(), data: {} }; - const invalidEvent = { type: 'invalid-type' }; // Missing required fields - + describe("Event Validation", () => { + it("should validate events correctly with basic structure check", () => { + const validEvent = { + type: "progress", + message: "Test", + timestamp: new Date(), + data: {}, + }; + const invalidEvent = { type: "invalid-type" }; // Missing required fields + const validResult = command.validateEvent(validEvent); const invalidResult = command.validateEvent(invalidEvent); - + expect(validResult.success).toBe(true); expect(invalidResult.success).toBe(false); }); - it('should validate events against specific class types', () => { - const progressEventInstance = new ProgressEvent('Test progress', null, {}); - const errorEventInstance = new ErrorEvent('Test error', new Error(), null, {}); - - const validProgressResult = command.validateEvent(progressEventInstance, ProgressEvent); - const invalidResult = command.validateEvent(errorEventInstance, ProgressEvent); - + it("should validate events against specific class types", () => { + const progressEventInstance = new ProgressEvent( + "Test progress", + null, + {}, + ); + const errorEventInstance = new ErrorEvent( + "Test error", + new Error(), + null, + {}, + ); + + const validProgressResult = command.validateEvent( + progressEventInstance, + ProgressEvent, + ); + const invalidResult = command.validateEvent( + errorEventInstance, + ProgressEvent, + ); + expect(validProgressResult.success).toBe(true); expect(invalidResult.success).toBe(false); }); - it('should emit typed events with validation via emitTypedEvent', () => { + it("should emit typed events with validation via emitTypedEvent", () => { const testSpy = vi.fn(); - command.on('test-event', testSpy); - - const validEvent = new ProgressEvent('Test progress', null, {}); - - command.emitTypedEvent('test-event', validEvent, ProgressEvent); - + command.on("test-event", testSpy); + + const validEvent = new ProgressEvent("Test progress", null, {}); + + command.emitTypedEvent("test-event", validEvent, ProgressEvent); + expect(testSpy).toHaveBeenCalledTimes(1); const emittedEvent = testSpy.mock.calls[0][0]; - expect(emittedEvent).toHaveProperty('message', 'Test progress'); - expect(emittedEvent).toHaveProperty('type', 'progress'); - expect(emittedEvent).toHaveProperty('data'); - expect(emittedEvent).toHaveProperty('timestamp'); + expect(emittedEvent).toHaveProperty("message", "Test progress"); + expect(emittedEvent).toHaveProperty("type", "progress"); + expect(emittedEvent).toHaveProperty("data"); + expect(emittedEvent).toHaveProperty("timestamp"); }); }); - describe('Backward Compatibility', () => { - it('should maintain the same event structure for existing listeners', () => { + describe("Backward Compatibility", () => { + it("should maintain the same event structure for existing listeners", () => { // This test ensures that existing code listening for events will still work const legacyListenerSpy = vi.fn(); - + // Simulate how existing code might listen for events - command.on('progress', (eventData) => { + command.on("progress", (eventData) => { legacyListenerSpy({ message: eventData.message, data: eventData.data, - hasTimestamp: 'timestamp' in eventData, - hasType: 'type' in eventData + hasTimestamp: "timestamp" in eventData, + hasType: "type" in eventData, }); }); - - command.progress('Legacy test', { oldField: 'value' }); - + + command.progress("Legacy test", { oldField: "value" }); + expect(legacyListenerSpy).toHaveBeenCalledWith({ - message: 'Legacy test', - data: { oldField: 'value' }, + message: "Legacy test", + data: { oldField: "value" }, hasTimestamp: true, - hasType: true + hasType: true, }); }); - it('should maintain existing event object properties', () => { + it("should maintain existing event object properties", () => { const eventSpy = vi.fn(); - command.on('success', eventSpy); - - command.success('Test message', { custom: 'data' }); - + command.on("success", eventSpy); + + command.success("Test message", { custom: "data" }); + expect(eventSpy).toHaveBeenCalledTimes(1); const event = eventSpy.mock.calls[0][0]; - + // Check all expected properties are present - expect(event).toHaveProperty('message', 'Test message'); - expect(event.data).toMatchObject({ custom: 'data' }); // May have additional properties - expect(event).toHaveProperty('timestamp'); - expect(event).toHaveProperty('type', 'success'); - + expect(event).toHaveProperty("message", "Test message"); + expect(event.data).toMatchObject({ custom: "data" }); // May have additional properties + expect(event).toHaveProperty("timestamp"); + expect(event).toHaveProperty("type", "success"); + // Ensure timestamp is a Date object (not string) expect(event.timestamp).toBeInstanceOf(Date); }); }); -}); \ No newline at end of file +}); diff --git a/test/CommandRouter.test.js b/test/CommandRouter.test.js index d0c489a..07fcfc3 100644 --- a/test/CommandRouter.test.js +++ b/test/CommandRouter.test.js @@ -2,307 +2,310 @@ * Tests for CommandRouter */ -import { describe, it, expect, beforeEach, vi } from 'vitest'; -import CommandRouter from '../src/lib/CommandRouter.js'; -import { z } from 'zod'; +import { describe, it, expect, beforeEach, vi } from "vitest"; +import CommandRouter from "../src/lib/CommandRouter.js"; +import { z } from "zod"; -describe('CommandRouter', () => { +describe("CommandRouter", () => { let router; beforeEach(() => { router = new CommandRouter(); }); - describe('Basic Routing', () => { - it('should register and execute a simple command', async () => { - const handler = vi.fn(async (args) => ({ result: 'success', args })); - - router - .command('test') - .handler(handler); + describe("Basic Routing", () => { + it("should register and execute a simple command", async () => { + const handler = vi.fn(async (args) => ({ result: "success", args })); + + router.command("test").handler(handler); + + const result = await router.execute("test", { foo: "bar" }); - const result = await router.execute('test', { foo: 'bar' }); - expect(handler).toHaveBeenCalledWith( - { foo: 'bar' }, + { foo: "bar" }, expect.objectContaining({ - path: 'test', - rawArgs: { foo: 'bar' } - }) + path: "test", + rawArgs: { foo: "bar" }, + }), ); - expect(result).toEqual({ result: 'success', args: { foo: 'bar' } }); + expect(result).toEqual({ result: "success", args: { foo: "bar" } }); }); - it('should handle subcommands', async () => { - const handler = vi.fn(async () => 'subcommand executed'); - - router - .command('parent') - .subcommand('child') - .handler(handler); + it("should handle subcommands", async () => { + const handler = vi.fn(async () => "subcommand executed"); + + router.command("parent").subcommand("child").handler(handler); + + const result = await router.execute("parent/child", {}); - const result = await router.execute('parent/child', {}); - expect(handler).toHaveBeenCalled(); - expect(result).toBe('subcommand executed'); + expect(result).toBe("subcommand executed"); }); - it('should throw error for unregistered commands', async () => { - await expect(router.execute('nonexistent', {})) - .rejects.toThrow('No handler registered for command: nonexistent'); + it("should throw error for unregistered commands", async () => { + await expect(router.execute("nonexistent", {})).rejects.toThrow( + "No handler registered for command: nonexistent", + ); }); }); - describe('Zod Schema Validation', () => { - it('should validate arguments with Zod schema', async () => { + describe("Zod Schema Validation", () => { + it("should validate arguments with Zod schema", async () => { const handler = vi.fn(async (args) => args); - + router - .command('validate') - .schema(z.object({ - name: z.string(), - age: z.number().min(0).max(120), - email: z.string().email().optional() - })) + .command("validate") + .schema( + z.object({ + name: z.string(), + age: z.number().min(0).max(120), + email: z.string().email().optional(), + }), + ) .handler(handler); - const result = await router.execute('validate', { - name: 'John', + const result = await router.execute("validate", { + name: "John", age: 30, - email: 'john@example.com' + email: "john@example.com", }); expect(handler).toHaveBeenCalledWith( { - name: 'John', + name: "John", age: 30, - email: 'john@example.com' + email: "john@example.com", }, - expect.any(Object) + expect.any(Object), ); expect(result).toEqual({ - name: 'John', + name: "John", age: 30, - email: 'john@example.com' + email: "john@example.com", }); }); - it('should apply default values from schema', async () => { + it("should apply default values from schema", async () => { const handler = vi.fn(async (args) => args); - + router - .command('defaults') - .schema(z.object({ - verbose: z.boolean().default(false), - output: z.string().default('console'), - limit: z.number().default(10) - })) + .command("defaults") + .schema( + z.object({ + verbose: z.boolean().default(false), + output: z.string().default("console"), + limit: z.number().default(10), + }), + ) .handler(handler); - const result = await router.execute('defaults', {}); + const result = await router.execute("defaults", {}); expect(result).toEqual({ verbose: false, - output: 'console', - limit: 10 + output: "console", + limit: 10, }); }); - it('should reject invalid arguments', async () => { + it("should reject invalid arguments", async () => { router - .command('strict') - .schema(z.object({ - count: z.number().int().positive() - })) - .handler(async () => 'should not reach'); - - await expect(router.execute('strict', { count: 'not-a-number' })) - .rejects.toThrow('Validation failed'); - - await expect(router.execute('strict', { count: -5 })) - .rejects.toThrow('Validation failed'); + .command("strict") + .schema( + z.object({ + count: z.number().int().positive(), + }), + ) + .handler(async () => "should not reach"); + + await expect( + router.execute("strict", { count: "not-a-number" }), + ).rejects.toThrow("Validation failed"); + + await expect(router.execute("strict", { count: -5 })).rejects.toThrow( + "Validation failed", + ); }); - it('should handle enum schemas', async () => { + it("should handle enum schemas", async () => { const handler = vi.fn(async (args) => args); - + router - .command('format') - .schema(z.object({ - type: z.enum(['json', 'yaml', 'xml']).default('json') - })) + .command("format") + .schema( + z.object({ + type: z.enum(["json", "yaml", "xml"]).default("json"), + }), + ) .handler(handler); - const result = await router.execute('format', { type: 'yaml' }); - expect(result.type).toBe('yaml'); + const result = await router.execute("format", { type: "yaml" }); + expect(result.type).toBe("yaml"); - const defaultResult = await router.execute('format', {}); - expect(defaultResult.type).toBe('json'); + const defaultResult = await router.execute("format", {}); + expect(defaultResult.type).toBe("json"); }); }); - describe('CLI Argument Conversion', () => { - it('should convert kebab-case CLI args to camelCase', async () => { + describe("CLI Argument Conversion", () => { + it("should convert kebab-case CLI args to camelCase", async () => { const handler = vi.fn(async (args) => args); - + router - .command('convert') - .schema(z.object({ - firstName: z.string(), - lastName: z.string(), - phoneNumber: z.string().optional() - })) + .command("convert") + .schema( + z.object({ + firstName: z.string(), + lastName: z.string(), + phoneNumber: z.string().optional(), + }), + ) .handler(handler); - const result = await router.execute('convert', { - 'first-name': 'John', - 'last-name': 'Doe', - 'phone-number': '555-1234' + const result = await router.execute("convert", { + "first-name": "John", + "last-name": "Doe", + "phone-number": "555-1234", }); expect(result).toEqual({ - firstName: 'John', - lastName: 'Doe', - phoneNumber: '555-1234' + firstName: "John", + lastName: "Doe", + phoneNumber: "555-1234", }); }); - it('should handle boolean flags correctly', async () => { + it("should handle boolean flags correctly", async () => { const handler = vi.fn(async (args) => args); - + router - .command('flags') - .schema(z.object({ - verbose: z.boolean().default(false), - quiet: z.boolean().default(false), - force: z.boolean().default(false) - })) + .command("flags") + .schema( + z.object({ + verbose: z.boolean().default(false), + quiet: z.boolean().default(false), + force: z.boolean().default(false), + }), + ) .handler(handler); - const result = await router.execute('flags', { - '--verbose': true, - '--quiet': 'false', - '--force': undefined + const result = await router.execute("flags", { + "--verbose": true, + "--quiet": "false", + "--force": undefined, }); expect(result).toEqual({ verbose: true, quiet: false, - force: false + force: false, }); }); }); - describe('Help Generation', () => { - it('should return help flag when --help is passed', async () => { + describe("Help Generation", () => { + it("should return help flag when --help is passed", async () => { const handler = vi.fn(); - + router - .command('helpful') - .description('A helpful command') - .schema(z.object({ - input: z.string().describe('Input file path'), - output: z.string().describe('Output file path') - })) + .command("helpful") + .description("A helpful command") + .schema( + z.object({ + input: z.string().describe("Input file path"), + output: z.string().describe("Output file path"), + }), + ) .handler(handler); - const result = await router.execute('helpful', { '--help': true }); - + const result = await router.execute("helpful", { "--help": true }); + expect(result).toEqual({ help: true }); expect(handler).not.toHaveBeenCalled(); }); }); - describe('Middleware', () => { - it('should execute global middleware', async () => { + describe("Middleware", () => { + it("should execute global middleware", async () => { const middleware = vi.fn(async (context) => { context.args.middlewareRan = true; }); - + const handler = vi.fn(async (args) => args); - + router.use(middleware); - - router - .command('middleware-test') - .handler(handler); - const result = await router.execute('middleware-test', { foo: 'bar' }); - + router.command("middleware-test").handler(handler); + + const result = await router.execute("middleware-test", { foo: "bar" }); + expect(middleware).toHaveBeenCalled(); expect(result.middlewareRan).toBe(true); }); - it('should execute route-specific middleware', async () => { + it("should execute route-specific middleware", async () => { const routeMiddleware = vi.fn(async (context) => { context.args.routeSpecific = true; }); - + const handler = vi.fn(async (args) => args); - - router - .command('route-middleware') - .use(routeMiddleware) - .handler(handler); - const result = await router.execute('route-middleware', {}); - + router.command("route-middleware").use(routeMiddleware).handler(handler); + + const result = await router.execute("route-middleware", {}); + expect(routeMiddleware).toHaveBeenCalled(); expect(result.routeSpecific).toBe(true); }); }); - describe('Pattern Matching', () => { - it('should match wildcard patterns', async () => { - const handler = vi.fn(async () => 'wildcard matched'); - - router - .command('api/*') - .handler(handler); + describe("Pattern Matching", () => { + it("should match wildcard patterns", async () => { + const handler = vi.fn(async () => "wildcard matched"); + + router.command("api/*").handler(handler); - const result1 = await router.execute('api/users', {}); - const result2 = await router.execute('api/posts/123', {}); - - expect(result1).toBe('wildcard matched'); - expect(result2).toBe('wildcard matched'); + const result1 = await router.execute("api/users", {}); + const result2 = await router.execute("api/posts/123", {}); + + expect(result1).toBe("wildcard matched"); + expect(result2).toBe("wildcard matched"); expect(handler).toHaveBeenCalledTimes(2); }); }); - describe('Class Handlers', () => { - it('should support class-based handlers', async () => { + describe("Class Handlers", () => { + it("should support class-based handlers", async () => { class TestCommand { constructor(config, logger, isProd) { this.config = config; this.logger = logger; this.isProd = isProd; } - + async execute(args) { return { executed: true, args, - hasConfig: !!this.config + hasConfig: !!this.config, }; } } - - router.config = { test: 'config' }; + + router.config = { test: "config" }; router.logger = console; - - router - .command('class-handler') - .handler(TestCommand); - const result = await router.execute('class-handler', { prod: true }); - + router.command("class-handler").handler(TestCommand); + + const result = await router.execute("class-handler", { prod: true }); + expect(result.executed).toBe(true); expect(result.hasConfig).toBe(true); }); }); - describe('Common Schemas', () => { - it('should provide common schema utilities', () => { + describe("Common Schemas", () => { + it("should provide common schema utilities", () => { expect(CommandRouter.schemas.verbose).toBeDefined(); expect(CommandRouter.schemas.quiet).toBeDefined(); expect(CommandRouter.schemas.force).toBeDefined(); @@ -311,71 +314,70 @@ describe('CommandRouter', () => { expect(CommandRouter.schemas.port).toBeDefined(); }); - it('should validate port numbers correctly', () => { + it("should validate port numbers correctly", () => { const portSchema = CommandRouter.schemas.port; - + expect(portSchema.safeParse(3000).success).toBe(true); expect(portSchema.safeParse(80).success).toBe(true); expect(portSchema.safeParse(0).success).toBe(false); expect(portSchema.safeParse(70000).success).toBe(false); - expect(portSchema.safeParse('not-a-port').success).toBe(false); + expect(portSchema.safeParse("not-a-port").success).toBe(false); }); }); - describe('Error Handling', () => { - it('should emit error events on failure', async () => { + describe("Error Handling", () => { + it("should emit error events on failure", async () => { const errorHandler = vi.fn(); - router.on('error', errorHandler); - - router - .command('failing') - .handler(async () => { - throw new Error('Command failed'); - }); - - await expect(router.execute('failing', {})) - .rejects.toThrow('Command failed'); - + router.on("error", errorHandler); + + router.command("failing").handler(async () => { + throw new Error("Command failed"); + }); + + await expect(router.execute("failing", {})).rejects.toThrow( + "Command failed", + ); + expect(errorHandler).toHaveBeenCalledWith( expect.objectContaining({ - path: 'failing', - error: expect.any(Error) - }) + path: "failing", + error: expect.any(Error), + }), ); }); }); - describe('Route Information', () => { - it('should return registered routes', () => { + describe("Route Information", () => { + it("should return registered routes", () => { router - .command('first') - .description('First command') + .command("first") + .description("First command") .schema(z.object({ test: z.string() })) .handler(async () => {}); - + router - .command('second') - .subcommand('sub') - .description('Second command') + .command("second") + .subcommand("sub") + .description("Second command") .handler(async () => {}); const routes = router.getRoutes(); - + expect(routes).toHaveLength(2); expect(routes[0]).toEqual({ - path: 'first', + path: "first", hasHandler: true, hasSchema: true, middleware: 0, - description: 'First command' + description: "First command", }); expect(routes[1]).toEqual({ - path: 'second/sub', + path: "second/sub", hasHandler: true, hasSchema: false, middleware: 0, - description: 'Second command' + description: "Second command", }); }); }); -}); \ No newline at end of file +}); diff --git a/test/MigrateCommand.test.js b/test/MigrateCommand.test.js index c182ee6..2e1c36e 100644 --- a/test/MigrateCommand.test.js +++ b/test/MigrateCommand.test.js @@ -2,9 +2,9 @@ * Tests for MigrateCommand with CommandRouter */ -import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest'; +import { describe, it, expect, beforeEach, afterEach, vi } from "vitest"; -describe('MigrateCommand', () => { +describe("MigrateCommand", () => { let MigrateCommand; let command; let consoleLogSpy; @@ -13,13 +13,13 @@ describe('MigrateCommand', () => { beforeEach(async () => { // Reset modules to ensure clean mocks vi.resetModules(); - + // Mock console to prevent output during tests - consoleLogSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); - + consoleLogSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + consoleErrorSpy = vi.spyOn(console, "error").mockImplementation(() => {}); + // Mock all subcommand modules before importing MigrateCommand - vi.doMock('../src/commands/db/migrate/generate.js', () => { + vi.doMock("../src/commands/db/migrate/generate.js", () => { return { default: class MockGenerateCommand { constructor() { @@ -28,11 +28,11 @@ describe('MigrateCommand', () => { async execute(args) { return { generated: true, args }; } - } + }, }; }); - vi.doMock('../src/commands/db/migrate/test-v2.js', () => { + vi.doMock("../src/commands/db/migrate/test-v2.js", () => { return { default: class MockTestCommand { constructor() { @@ -41,24 +41,24 @@ describe('MigrateCommand', () => { async execute(args) { return { tested: true, args }; } - } + }, }; }); - vi.doMock('../src/commands/db/migrate/status.js', () => { + vi.doMock("../src/commands/db/migrate/status.js", () => { return { default: class MockStatusCommand { constructor() { this.on = vi.fn(); } async execute(args) { - return { status: 'ready', args }; + return { status: "ready", args }; } - } + }, }; }); - vi.doMock('../src/commands/db/migrate/squash.js', () => { + vi.doMock("../src/commands/db/migrate/squash.js", () => { return { default: class MockSquashCommand { constructor() { @@ -67,12 +67,12 @@ describe('MigrateCommand', () => { async execute(args) { return { squashed: true, args }; } - } + }, }; }); // Mock other subcommands - const otherCommands = ['promote', 'rollback', 'clean', 'history', 'verify']; + const otherCommands = ["promote", "rollback", "clean", "history", "verify"]; for (const cmd of otherCommands) { vi.doMock(`../src/commands/db/migrate/${cmd}.js`, () => { return { @@ -83,13 +83,13 @@ describe('MigrateCommand', () => { async execute(args) { return { command: cmd, executed: true, args }; } - } + }, }; }); } // Now import MigrateCommand after all mocks are set up - const module = await import('../src/commands/db/MigrateCommand.js'); + const module = await import("../src/commands/db/MigrateCommand.js"); MigrateCommand = module.default; command = new MigrateCommand(); }); @@ -100,96 +100,98 @@ describe('MigrateCommand', () => { vi.clearAllMocks(); }); - describe('Router Integration', () => { - it('should initialize router with all subcommands', () => { + describe("Router Integration", () => { + it("should initialize router with all subcommands", () => { const routes = command.router.getRoutes(); - const subcommands = routes.map(r => r.path.split('/')[1]); - - expect(subcommands).toContain('generate'); - expect(subcommands).toContain('test'); - expect(subcommands).toContain('status'); - expect(subcommands).toContain('promote'); - expect(subcommands).toContain('rollback'); - expect(subcommands).toContain('clean'); - expect(subcommands).toContain('history'); - expect(subcommands).toContain('verify'); - expect(subcommands).toContain('squash'); + const subcommands = routes.map((r) => r.path.split("/")[1]); + + expect(subcommands).toContain("generate"); + expect(subcommands).toContain("test"); + expect(subcommands).toContain("status"); + expect(subcommands).toContain("promote"); + expect(subcommands).toContain("rollback"); + expect(subcommands).toContain("clean"); + expect(subcommands).toContain("history"); + expect(subcommands).toContain("verify"); + expect(subcommands).toContain("squash"); }); - it('should have schemas for all subcommands', () => { + it("should have schemas for all subcommands", () => { const routes = command.router.getRoutes(); - - routes.forEach(route => { + + routes.forEach((route) => { expect(route.hasSchema).toBe(true); expect(route.description).toBeTruthy(); }); }); }); - describe('Command Execution', () => { - it('should route to generate subcommand', async () => { + describe("Command Execution", () => { + it("should route to generate subcommand", async () => { const result = await command.execute({ - _: ['generate'], - name: 'test-migration' + _: ["generate"], + name: "test-migration", }); expect(result).toEqual({ generated: true, args: expect.objectContaining({ - name: 'test-migration' - }) + name: "test-migration", + }), }); }); - it('should route to status subcommand', async () => { + it("should route to status subcommand", async () => { const result = await command.execute({ - _: ['status'], - detailed: true + _: ["status"], + detailed: true, }); expect(result).toEqual({ - status: 'ready', + status: "ready", args: expect.objectContaining({ - detailed: true - }) + detailed: true, + }), }); }); }); - describe('Help System', () => { - it('should show general help when no subcommand provided', async () => { + describe("Help System", () => { + it("should show general help when no subcommand provided", async () => { await command.execute({}); - + expect(consoleLogSpy).toHaveBeenCalledWith( - expect.stringContaining('Usage: data db migrate ') + expect.stringContaining("Usage: data db migrate "), ); expect(consoleLogSpy).toHaveBeenCalledWith( - expect.stringContaining('Commands:') + expect.stringContaining("Commands:"), ); }); - it('should show subcommand help with --help flag', async () => { + it("should show subcommand help with --help flag", async () => { const result = await command.execute({ - _: ['generate'], - '--help': true + _: ["generate"], + "--help": true, }); expect(result).toEqual({ help: true }); expect(consoleLogSpy).toHaveBeenCalledWith( - expect.stringContaining('Usage: data migrate generate') + expect.stringContaining("Usage: data migrate generate"), ); }); }); - describe('Error Handling', () => { - it('should handle unknown subcommands gracefully', async () => { - await expect(command.execute({ - _: ['unknown-command'] - })).rejects.toThrow(); + describe("Error Handling", () => { + it("should handle unknown subcommands gracefully", async () => { + await expect( + command.execute({ + _: ["unknown-command"], + }), + ).rejects.toThrow(); expect(consoleErrorSpy).toHaveBeenCalledWith( - expect.stringContaining('Unknown migration command: unknown-command') + expect.stringContaining("Unknown migration command: unknown-command"), ); }); }); -}); \ No newline at end of file +}); diff --git a/test/TestRequirementAnalyzer.column.test.js b/test/TestRequirementAnalyzer.column.test.js index 995d897..c5159f2 100644 --- a/test/TestRequirementAnalyzer.column.test.js +++ b/test/TestRequirementAnalyzer.column.test.js @@ -3,199 +3,203 @@ * Validates the column-specific test requirement generation */ -import { describe, it, expect, beforeEach } from 'vitest'; -import { TestRequirementAnalyzer, TEST_TYPES, TEST_PRIORITIES } from '../src/lib/testing/TestRequirementAnalyzer.js'; - -describe('TestRequirementAnalyzer - Column Test Mapping', () => { +import { describe, it, expect, beforeEach } from "vitest"; +import { + TestRequirementAnalyzer, + TEST_TYPES, + TEST_PRIORITIES, +} from "../src/lib/testing/TestRequirementAnalyzer.js"; + +describe("TestRequirementAnalyzer - Column Test Mapping", () => { let analyzer; beforeEach(() => { analyzer = new TestRequirementAnalyzer(); }); - describe('generateColumnTestRequirements', () => { - it('should generate test requirements for ADD_COLUMN operation', () => { + describe("generateColumnTestRequirements", () => { + it("should generate test requirements for ADD_COLUMN operation", () => { const operation = { sql: "ALTER TABLE users ADD COLUMN email VARCHAR(255) NOT NULL DEFAULT 'user@example.com';", - type: 'ALTER_TABLE' + type: "ALTER_TABLE", }; const requirements = analyzer.generateColumnTestRequirements( operation, - 'users', - 'email', - 'ADD_COLUMN', - TEST_PRIORITIES.HIGH + "users", + "email", + "ADD_COLUMN", + TEST_PRIORITIES.HIGH, ); expect(requirements).toHaveLength(1); - + const req = requirements[0]; expect(req.type).toBe(TEST_TYPES.SCHEMA); expect(req.priority).toBe(TEST_PRIORITIES.HIGH); - expect(req.description).toContain('email added to users'); - expect(req.target).toBe('users.email'); - expect(req.testCases).toContain('has_column()'); - expect(req.testCases).toContain('col_type_is()'); - expect(req.testCases).toContain('col_not_null()'); + expect(req.description).toContain("email added to users"); + expect(req.target).toBe("users.email"); + expect(req.testCases).toContain("has_column()"); + expect(req.testCases).toContain("col_type_is()"); + expect(req.testCases).toContain("col_not_null()"); }); - it('should generate test requirements for DROP_COLUMN operation', () => { + it("should generate test requirements for DROP_COLUMN operation", () => { const operation = { sql: "ALTER TABLE users DROP COLUMN old_field;", - type: 'ALTER_TABLE' + type: "ALTER_TABLE", }; const requirements = analyzer.generateColumnTestRequirements( operation, - 'users', - 'old_field', - 'DROP_COLUMN', - TEST_PRIORITIES.HIGH + "users", + "old_field", + "DROP_COLUMN", + TEST_PRIORITIES.HIGH, ); expect(requirements).toHaveLength(2); // Drop test + comprehensive validation - + const dropReq = requirements[0]; expect(dropReq.type).toBe(TEST_TYPES.SCHEMA); expect(dropReq.priority).toBe(TEST_PRIORITIES.CRITICAL); - expect(dropReq.description).toContain('old_field dropped from users'); - expect(dropReq.testCases).toContain('hasnt_column()'); + expect(dropReq.description).toContain("old_field dropped from users"); + expect(dropReq.testCases).toContain("hasnt_column()"); expect(dropReq.metadata.destructive).toBe(true); }); - it('should generate test requirements for ALTER_TYPE operation', () => { + it("should generate test requirements for ALTER_TYPE operation", () => { const operation = { sql: "ALTER TABLE users ALTER COLUMN age TYPE INTEGER;", - type: 'ALTER_TABLE' + type: "ALTER_TABLE", }; const requirements = analyzer.generateColumnTestRequirements( operation, - 'users', - 'age', - 'ALTER_TYPE', - TEST_PRIORITIES.HIGH + "users", + "age", + "ALTER_TYPE", + TEST_PRIORITIES.HIGH, ); expect(requirements).toHaveLength(3); // Type change + data migration + comprehensive validation const typeReq = requirements[0]; expect(typeReq.type).toBe(TEST_TYPES.SCHEMA); - expect(typeReq.description).toContain('age type change in users'); - expect(typeReq.testCases).toContain('col_type_is()'); + expect(typeReq.description).toContain("age type change in users"); + expect(typeReq.testCases).toContain("col_type_is()"); const migrationReq = requirements[1]; expect(migrationReq.type).toBe(TEST_TYPES.DATA); expect(migrationReq.priority).toBe(TEST_PRIORITIES.CRITICAL); - expect(migrationReq.description).toContain('data migration'); + expect(migrationReq.description).toContain("data migration"); }); - it('should generate test requirements for SET_NOT_NULL operation', () => { + it("should generate test requirements for SET_NOT_NULL operation", () => { const operation = { sql: "ALTER TABLE users ALTER COLUMN name SET NOT NULL;", - type: 'ALTER_TABLE' + type: "ALTER_TABLE", }; const requirements = analyzer.generateColumnTestRequirements( operation, - 'users', - 'name', - 'SET_NOT_NULL', - TEST_PRIORITIES.HIGH + "users", + "name", + "SET_NOT_NULL", + TEST_PRIORITIES.HIGH, ); expect(requirements).toHaveLength(2); // NOT NULL test + comprehensive validation const nullReq = requirements[0]; expect(nullReq.type).toBe(TEST_TYPES.CONSTRAINT); - expect(nullReq.description).toContain('NOT NULL constraint'); - expect(nullReq.testCases).toContain('col_not_null()'); - expect(nullReq.metadata.constraintType).toBe('NOT NULL'); + expect(nullReq.description).toContain("NOT NULL constraint"); + expect(nullReq.testCases).toContain("col_not_null()"); + expect(nullReq.metadata.constraintType).toBe("NOT NULL"); }); - it('should generate test requirements for DROP_NOT_NULL operation', () => { + it("should generate test requirements for DROP_NOT_NULL operation", () => { const operation = { sql: "ALTER TABLE users ALTER COLUMN description DROP NOT NULL;", - type: 'ALTER_TABLE' + type: "ALTER_TABLE", }; const requirements = analyzer.generateColumnTestRequirements( operation, - 'users', - 'description', - 'DROP_NOT_NULL', - TEST_PRIORITIES.MEDIUM + "users", + "description", + "DROP_NOT_NULL", + TEST_PRIORITIES.MEDIUM, ); expect(requirements).toHaveLength(1); const req = requirements[0]; expect(req.type).toBe(TEST_TYPES.CONSTRAINT); - expect(req.description).toContain('nullable constraint removed'); - expect(req.testCases).toContain('col_is_null() - column allows nulls'); + expect(req.description).toContain("nullable constraint removed"); + expect(req.testCases).toContain("col_is_null() - column allows nulls"); expect(req.metadata.constraintRemoved).toBe(true); }); - it('should generate test requirements for SET_DEFAULT operation', () => { + it("should generate test requirements for SET_DEFAULT operation", () => { const operation = { sql: "ALTER TABLE users ALTER COLUMN status SET DEFAULT 'active';", - type: 'ALTER_TABLE' + type: "ALTER_TABLE", }; const requirements = analyzer.generateColumnTestRequirements( operation, - 'users', - 'status', - 'SET_DEFAULT', - TEST_PRIORITIES.MEDIUM + "users", + "status", + "SET_DEFAULT", + TEST_PRIORITIES.MEDIUM, ); expect(requirements).toHaveLength(1); const req = requirements[0]; expect(req.type).toBe(TEST_TYPES.CONSTRAINT); - expect(req.description).toContain('default value set'); - expect(req.testCases).toContain('col_has_default()'); - expect(req.testCases).toContain('col_default_is()'); + expect(req.description).toContain("default value set"); + expect(req.testCases).toContain("col_has_default()"); + expect(req.testCases).toContain("col_default_is()"); expect(req.metadata.requiresInsertTest).toBe(true); }); - it('should generate test requirements for DROP_DEFAULT operation', () => { + it("should generate test requirements for DROP_DEFAULT operation", () => { const operation = { sql: "ALTER TABLE users ALTER COLUMN status DROP DEFAULT;", - type: 'ALTER_TABLE' + type: "ALTER_TABLE", }; const requirements = analyzer.generateColumnTestRequirements( operation, - 'users', - 'status', - 'DROP_DEFAULT', - TEST_PRIORITIES.MEDIUM + "users", + "status", + "DROP_DEFAULT", + TEST_PRIORITIES.MEDIUM, ); expect(requirements).toHaveLength(1); const req = requirements[0]; expect(req.type).toBe(TEST_TYPES.CONSTRAINT); - expect(req.description).toContain('default value removed'); - expect(req.testCases).toContain('col_hasnt_default()'); + expect(req.description).toContain("default value removed"); + expect(req.testCases).toContain("col_hasnt_default()"); expect(req.metadata.defaultRemoved).toBe(true); }); - it('should handle unknown column operations gracefully', () => { + it("should handle unknown column operations gracefully", () => { const operation = { sql: "ALTER TABLE users ALTER COLUMN some_field SOME_UNKNOWN_OP;", - type: 'ALTER_TABLE' + type: "ALTER_TABLE", }; const requirements = analyzer.generateColumnTestRequirements( operation, - 'users', - 'some_field', - 'UNKNOWN_OP', - TEST_PRIORITIES.LOW + "users", + "some_field", + "UNKNOWN_OP", + TEST_PRIORITIES.LOW, ); expect(requirements).toHaveLength(1); @@ -203,24 +207,24 @@ describe('TestRequirementAnalyzer - Column Test Mapping', () => { const req = requirements[0]; expect(req.type).toBe(TEST_TYPES.SCHEMA); expect(req.priority).toBe(TEST_PRIORITIES.MEDIUM); - expect(req.description).toContain('column some_field operation'); - expect(req.testCases).toContain('has_column()'); - expect(req.metadata.operationType).toBe('UNKNOWN_OP'); + expect(req.description).toContain("column some_field operation"); + expect(req.testCases).toContain("has_column()"); + expect(req.metadata.operationType).toBe("UNKNOWN_OP"); }); }); - describe('_generateConstraintTests', () => { - it('should generate primary key constraint tests', () => { + describe("_generateConstraintTests", () => { + it("should generate primary key constraint tests", () => { const operation = { sql: "ALTER TABLE users ADD CONSTRAINT pk_users PRIMARY KEY (id);", - type: 'ALTER_TABLE' + type: "ALTER_TABLE", }; const requirements = analyzer._generateConstraintTests( operation, - 'users', - 'pk_users', - TEST_PRIORITIES.HIGH + "users", + "pk_users", + TEST_PRIORITIES.HIGH, ); expect(requirements).toHaveLength(1); @@ -228,21 +232,21 @@ describe('TestRequirementAnalyzer - Column Test Mapping', () => { const req = requirements[0]; expect(req.type).toBe(TEST_TYPES.CONSTRAINT); expect(req.priority).toBe(TEST_PRIORITIES.CRITICAL); - expect(req.description).toContain('primary key constraint'); - expect(req.testCases).toContain('has_pk()'); + expect(req.description).toContain("primary key constraint"); + expect(req.testCases).toContain("has_pk()"); }); - it('should generate foreign key constraint tests', () => { + it("should generate foreign key constraint tests", () => { const operation = { sql: "ALTER TABLE posts ADD CONSTRAINT fk_posts_user FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE;", - type: 'ALTER_TABLE' + type: "ALTER_TABLE", }; const requirements = analyzer._generateConstraintTests( operation, - 'posts', - 'fk_posts_user', - TEST_PRIORITIES.HIGH + "posts", + "fk_posts_user", + TEST_PRIORITIES.HIGH, ); expect(requirements).toHaveLength(1); @@ -250,60 +254,89 @@ describe('TestRequirementAnalyzer - Column Test Mapping', () => { const req = requirements[0]; expect(req.type).toBe(TEST_TYPES.CONSTRAINT); expect(req.priority).toBe(TEST_PRIORITIES.HIGH); - expect(req.description).toContain('foreign key constraint'); - expect(req.testCases).toContain('has_fk()'); - expect(req.testCases).toContain('fk_ok()'); - expect(req.metadata.referencedTable).toBe('users'); - expect(req.metadata.onDelete).toBe('CASCADE'); + expect(req.description).toContain("foreign key constraint"); + expect(req.testCases).toContain("has_fk()"); + expect(req.testCases).toContain("fk_ok()"); + expect(req.metadata.referencedTable).toBe("users"); + expect(req.metadata.onDelete).toBe("CASCADE"); }); - it('should generate check constraint tests', () => { + it("should generate check constraint tests", () => { const operation = { sql: "ALTER TABLE users ADD CONSTRAINT chk_age CHECK (age >= 18);", - type: 'ALTER_TABLE' + type: "ALTER_TABLE", }; const requirements = analyzer._generateConstraintTests( operation, - 'users', - 'chk_age', - TEST_PRIORITIES.HIGH + "users", + "chk_age", + TEST_PRIORITIES.HIGH, ); expect(requirements).toHaveLength(1); const req = requirements[0]; expect(req.type).toBe(TEST_TYPES.CONSTRAINT); - expect(req.description).toContain('check constraint'); - expect(req.testCases).toContain('has_check()'); - expect(req.testCases).toContain('check_test()'); - expect(req.metadata.checkExpression).toBe('age >= 18'); + expect(req.description).toContain("check constraint"); + expect(req.testCases).toContain("has_check()"); + expect(req.testCases).toContain("check_test()"); + expect(req.metadata.checkExpression).toBe("age >= 18"); }); }); - describe('Column parsing helpers', () => { - it('should extract column names correctly', () => { - expect(analyzer._extractColumnName("ADD COLUMN email VARCHAR(255)", "ADD COLUMN")).toBe("email"); - expect(analyzer._extractColumnName("DROP COLUMN old_field", "DROP COLUMN")).toBe("old_field"); - expect(analyzer._extractColumnName("ALTER COLUMN name TYPE TEXT", "ALTER COLUMN")).toBe("name"); + describe("Column parsing helpers", () => { + it("should extract column names correctly", () => { + expect( + analyzer._extractColumnName( + "ADD COLUMN email VARCHAR(255)", + "ADD COLUMN", + ), + ).toBe("email"); + expect( + analyzer._extractColumnName("DROP COLUMN old_field", "DROP COLUMN"), + ).toBe("old_field"); + expect( + analyzer._extractColumnName( + "ALTER COLUMN name TYPE TEXT", + "ALTER COLUMN", + ), + ).toBe("name"); }); - it('should parse column definitions correctly', () => { - const sql = "ADD COLUMN email VARCHAR(255) NOT NULL DEFAULT 'user@example.com' UNIQUE"; + it("should parse column definitions correctly", () => { + const sql = + "ADD COLUMN email VARCHAR(255) NOT NULL DEFAULT 'user@example.com' UNIQUE"; const metadata = analyzer._parseColumnConstraints(sql, "email"); - expect(metadata.type).toBe('VARCHAR(255)'); + expect(metadata.type).toBe("VARCHAR(255)"); expect(metadata.notNull).toBe(true); expect(metadata.hasDefault).toBe(true); expect(metadata.isUnique).toBe(true); expect(metadata.defaultValue).toBe("'user@example.com'"); }); - it('should identify constraint types correctly', () => { - expect(analyzer._identifyConstraintType("ADD CONSTRAINT pk_test PRIMARY KEY (id)")).toBe('PRIMARY_KEY'); - expect(analyzer._identifyConstraintType("ADD CONSTRAINT fk_test FOREIGN KEY (user_id) REFERENCES users(id)")).toBe('FOREIGN_KEY'); - expect(analyzer._identifyConstraintType("ADD CONSTRAINT uk_test UNIQUE (email)")).toBe('UNIQUE'); - expect(analyzer._identifyConstraintType("ADD CONSTRAINT chk_test CHECK (age > 0)")).toBe('CHECK'); + it("should identify constraint types correctly", () => { + expect( + analyzer._identifyConstraintType( + "ADD CONSTRAINT pk_test PRIMARY KEY (id)", + ), + ).toBe("PRIMARY_KEY"); + expect( + analyzer._identifyConstraintType( + "ADD CONSTRAINT fk_test FOREIGN KEY (user_id) REFERENCES users(id)", + ), + ).toBe("FOREIGN_KEY"); + expect( + analyzer._identifyConstraintType( + "ADD CONSTRAINT uk_test UNIQUE (email)", + ), + ).toBe("UNIQUE"); + expect( + analyzer._identifyConstraintType( + "ADD CONSTRAINT chk_test CHECK (age > 0)", + ), + ).toBe("CHECK"); }); }); -}); \ No newline at end of file +}); diff --git a/test/TestRequirementAnalyzer.rls.test.js b/test/TestRequirementAnalyzer.rls.test.js index 7bb4b94..35032d1 100644 --- a/test/TestRequirementAnalyzer.rls.test.js +++ b/test/TestRequirementAnalyzer.rls.test.js @@ -1,24 +1,28 @@ /** * Unit tests for RLS policy test mapping in TestRequirementAnalyzer - * + * * Tests the specific RLS functionality implemented for task T007 */ -import { describe, it, expect, beforeEach } from 'vitest'; -import { TestRequirementAnalyzer, TEST_TYPES, TEST_PRIORITIES } from '../src/lib/testing/TestRequirementAnalyzer.js'; +import { describe, it, expect, beforeEach } from "vitest"; +import { + TestRequirementAnalyzer, + TEST_TYPES, + TEST_PRIORITIES, +} from "../src/lib/testing/TestRequirementAnalyzer.js"; -describe('TestRequirementAnalyzer - RLS Policy Mapping', () => { +describe("TestRequirementAnalyzer - RLS Policy Mapping", () => { let analyzer; beforeEach(() => { analyzer = new TestRequirementAnalyzer(); }); - describe('CREATE POLICY mapping', () => { - it('should generate comprehensive test requirements for CREATE POLICY', async () => { + describe("CREATE POLICY mapping", () => { + it("should generate comprehensive test requirements for CREATE POLICY", async () => { const operation = { - sql: 'CREATE POLICY user_select_policy ON users FOR SELECT TO authenticated USING (auth.uid() = id)', - type: 'SAFE' + sql: "CREATE POLICY user_select_policy ON users FOR SELECT TO authenticated USING (auth.uid() = id)", + type: "SAFE", }; const requirements = await analyzer.determineTestRequirements(operation); @@ -27,50 +31,58 @@ describe('TestRequirementAnalyzer - RLS Policy Mapping', () => { expect(requirements.length).toBeGreaterThan(3); // Should include RLS and PERMISSION test types - const testTypes = requirements.map(req => req.type); + const testTypes = requirements.map((req) => req.type); expect(testTypes).toContain(TEST_TYPES.RLS); expect(testTypes).toContain(TEST_TYPES.PERMISSION); // All requirements should be CRITICAL priority for security - const priorities = requirements.map(req => req.priority); - priorities.forEach(priority => { + const priorities = requirements.map((req) => req.priority); + priorities.forEach((priority) => { expect(priority).toBe(TEST_PRIORITIES.CRITICAL); }); // Should test multiple user roles - const userRoleTests = requirements.filter(req => - req.description.includes('role anon') || - req.description.includes('role authenticated') || - req.description.includes('role service_role') + const userRoleTests = requirements.filter( + (req) => + req.description.includes("role anon") || + req.description.includes("role authenticated") || + req.description.includes("role service_role"), ); expect(userRoleTests.length).toBe(3); }); - it('should extract policy details correctly', () => { - const sql = 'CREATE POLICY test_policy ON users FOR SELECT TO authenticated, anon USING (auth.uid() = id) WITH CHECK (status = \'active\')'; + it("should extract policy details correctly", () => { + const sql = + "CREATE POLICY test_policy ON users FOR SELECT TO authenticated, anon USING (auth.uid() = id) WITH CHECK (status = 'active')"; const details = analyzer._extractPolicyDetails(sql); - expect(details.commands).toEqual(['SELECT']); - expect(details.roles).toEqual(['authenticated', 'anon']); + expect(details.commands).toEqual(["SELECT"]); + expect(details.roles).toEqual(["authenticated", "anon"]); expect(details.isPermissive).toBe(true); - expect(details.expression).toBe('auth.uid() = id'); - expect(details.checkExpression).toBe('status = \'active\''); + expect(details.expression).toBe("auth.uid() = id"); + expect(details.checkExpression).toBe("status = 'active'"); }); - it('should handle FOR ALL commands', () => { - const sql = 'CREATE POLICY admin_policy ON users FOR ALL TO admin USING (true)'; + it("should handle FOR ALL commands", () => { + const sql = + "CREATE POLICY admin_policy ON users FOR ALL TO admin USING (true)"; const details = analyzer._extractPolicyDetails(sql); - expect(details.commands).toEqual(['SELECT', 'INSERT', 'UPDATE', 'DELETE']); - expect(details.roles).toEqual(['admin']); + expect(details.commands).toEqual([ + "SELECT", + "INSERT", + "UPDATE", + "DELETE", + ]); + expect(details.roles).toEqual(["admin"]); }); }); - describe('ALTER POLICY mapping', () => { - it('should generate test requirements for ALTER POLICY', async () => { + describe("ALTER POLICY mapping", () => { + it("should generate test requirements for ALTER POLICY", async () => { const operation = { - sql: 'ALTER POLICY user_policy ON users TO authenticated, admin USING (auth.uid() = id OR auth.role() = \'admin\')', - type: 'WARNING' + sql: "ALTER POLICY user_policy ON users TO authenticated, admin USING (auth.uid() = id OR auth.role() = 'admin')", + type: "WARNING", }; const requirements = await analyzer.determineTestRequirements(operation); @@ -78,18 +90,18 @@ describe('TestRequirementAnalyzer - RLS Policy Mapping', () => { expect(requirements.length).toBeGreaterThan(2); // Should include altered security boundary tests - const alteredTests = requirements.filter(req => - req.metadata?.testType === 'altered_security_boundary' + const alteredTests = requirements.filter( + (req) => req.metadata?.testType === "altered_security_boundary", ); expect(alteredTests.length).toBeGreaterThan(0); }); }); - describe('DROP POLICY mapping', () => { - it('should generate test requirements for DROP POLICY', async () => { + describe("DROP POLICY mapping", () => { + it("should generate test requirements for DROP POLICY", async () => { const operation = { - sql: 'DROP POLICY user_policy ON users', - type: 'DESTRUCTIVE' + sql: "DROP POLICY user_policy ON users", + type: "DESTRUCTIVE", }; const requirements = await analyzer.determineTestRequirements(operation); @@ -97,24 +109,24 @@ describe('TestRequirementAnalyzer - RLS Policy Mapping', () => { expect(requirements.length).toBeGreaterThan(1); // Should include policy removal tests - const removalTests = requirements.filter(req => - req.metadata?.testType === 'policy_removal' + const removalTests = requirements.filter( + (req) => req.metadata?.testType === "policy_removal", ); expect(removalTests.length).toBe(1); // Should include post-drop security tests - const postDropTests = requirements.filter(req => - req.metadata?.testType === 'post_drop_security' + const postDropTests = requirements.filter( + (req) => req.metadata?.testType === "post_drop_security", ); expect(postDropTests.length).toBe(1); }); }); - describe('ENABLE RLS mapping', () => { - it('should generate test requirements for ENABLE ROW LEVEL SECURITY', async () => { + describe("ENABLE RLS mapping", () => { + it("should generate test requirements for ENABLE ROW LEVEL SECURITY", async () => { const operation = { - sql: 'ALTER TABLE users ENABLE ROW LEVEL SECURITY', - type: 'WARNING' + sql: "ALTER TABLE users ENABLE ROW LEVEL SECURITY", + type: "WARNING", }; const requirements = await analyzer.determineTestRequirements(operation); @@ -122,24 +134,24 @@ describe('TestRequirementAnalyzer - RLS Policy Mapping', () => { expect(requirements.length).toBeGreaterThan(1); // Should include is_rls_enabled test - const rlsEnabledTests = requirements.filter(req => - req.testCases.some(tc => tc.includes('is_rls_enabled')) + const rlsEnabledTests = requirements.filter((req) => + req.testCases.some((tc) => tc.includes("is_rls_enabled")), ); expect(rlsEnabledTests.length).toBe(1); // Should test security impact - const securityTests = requirements.filter(req => - req.metadata?.testType === 'rls_security_impact' + const securityTests = requirements.filter( + (req) => req.metadata?.testType === "rls_security_impact", ); expect(securityTests.length).toBe(1); }); }); - describe('DISABLE RLS mapping', () => { - it('should generate test requirements for DISABLE ROW LEVEL SECURITY', async () => { + describe("DISABLE RLS mapping", () => { + it("should generate test requirements for DISABLE ROW LEVEL SECURITY", async () => { const operation = { - sql: 'ALTER TABLE posts DISABLE ROW LEVEL SECURITY', - type: 'WARNING' + sql: "ALTER TABLE posts DISABLE ROW LEVEL SECURITY", + type: "WARNING", }; const requirements = await analyzer.determineTestRequirements(operation); @@ -147,56 +159,71 @@ describe('TestRequirementAnalyzer - RLS Policy Mapping', () => { expect(requirements.length).toBe(2); // Should include RLS disablement test - const disablementTests = requirements.filter(req => - req.metadata?.testType === 'rls_disablement' + const disablementTests = requirements.filter( + (req) => req.metadata?.testType === "rls_disablement", ); expect(disablementTests.length).toBe(1); // Should test security impact with HIGH priority (potential security risk) - const securityTests = requirements.filter(req => - req.metadata?.testType === 'rls_disable_security_impact' + const securityTests = requirements.filter( + (req) => req.metadata?.testType === "rls_disable_security_impact", ); expect(securityTests.length).toBe(1); expect(securityTests[0].priority).toBe(TEST_PRIORITIES.HIGH); }); }); - describe('Security test scenarios', () => { - it('should generate appropriate security test scenarios', () => { + describe("Security test scenarios", () => { + it("should generate appropriate security test scenarios", () => { const policyDetails = { - commands: ['SELECT', 'INSERT'], - roles: ['authenticated'], + commands: ["SELECT", "INSERT"], + roles: ["authenticated"], isPermissive: true, - expression: 'auth.uid() = user_id' + expression: "auth.uid() = user_id", }; - const scenarios = analyzer._generateSecurityTestScenarios(policyDetails, 'authenticated'); + const scenarios = analyzer._generateSecurityTestScenarios( + policyDetails, + "authenticated", + ); expect(scenarios.length).toBeGreaterThan(2); // Should include basic operation tests - const selectScenario = scenarios.find(s => s.operation === 'SELECT'); + const selectScenario = scenarios.find((s) => s.operation === "SELECT"); expect(selectScenario).toBeDefined(); - expect(selectScenario.role).toBe('authenticated'); + expect(selectScenario.role).toBe("authenticated"); - const insertScenario = scenarios.find(s => s.operation === 'INSERT'); + const insertScenario = scenarios.find((s) => s.operation === "INSERT"); expect(insertScenario).toBeDefined(); // Should include service_role bypass test - const bypassScenario = scenarios.find(s => s.role === 'service_role'); + const bypassScenario = scenarios.find((s) => s.role === "service_role"); expect(bypassScenario).toBeDefined(); expect(bypassScenario.shouldAllow).toBe(true); }); }); - describe('Operation categorization', () => { - it('should correctly categorize RLS operations', () => { + describe("Operation categorization", () => { + it("should correctly categorize RLS operations", () => { const testCases = [ - { sql: 'CREATE POLICY test ON users FOR SELECT', expected: 'CREATE_POLICY' }, - { sql: 'ALTER POLICY test ON users TO admin', expected: 'ALTER_POLICY' }, - { sql: 'DROP POLICY test ON users', expected: 'DROP_POLICY' }, - { sql: 'ALTER TABLE users ENABLE ROW LEVEL SECURITY', expected: 'ENABLE_RLS' }, - { sql: 'ALTER TABLE users DISABLE ROW LEVEL SECURITY', expected: 'DISABLE_RLS' } + { + sql: "CREATE POLICY test ON users FOR SELECT", + expected: "CREATE_POLICY", + }, + { + sql: "ALTER POLICY test ON users TO admin", + expected: "ALTER_POLICY", + }, + { sql: "DROP POLICY test ON users", expected: "DROP_POLICY" }, + { + sql: "ALTER TABLE users ENABLE ROW LEVEL SECURITY", + expected: "ENABLE_RLS", + }, + { + sql: "ALTER TABLE users DISABLE ROW LEVEL SECURITY", + expected: "DISABLE_RLS", + }, ]; for (const testCase of testCases) { @@ -207,12 +234,15 @@ describe('TestRequirementAnalyzer - RLS Policy Mapping', () => { }); }); - describe('Target object extraction', () => { - it('should correctly extract policy names', () => { + describe("Target object extraction", () => { + it("should correctly extract policy names", () => { const testCases = [ - { sql: 'CREATE POLICY user_select_policy ON users', expected: 'user_select_policy' }, - { sql: 'ALTER POLICY admin_policy ON posts', expected: 'admin_policy' }, - { sql: 'DROP POLICY old_policy ON comments', expected: 'old_policy' } + { + sql: "CREATE POLICY user_select_policy ON users", + expected: "user_select_policy", + }, + { sql: "ALTER POLICY admin_policy ON posts", expected: "admin_policy" }, + { sql: "DROP POLICY old_policy ON comments", expected: "old_policy" }, ]; for (const testCase of testCases) { @@ -222,4 +252,4 @@ describe('TestRequirementAnalyzer - RLS Policy Mapping', () => { } }); }); -}); \ No newline at end of file +}); diff --git a/test/TestTemplateGenerator.table.test.js b/test/TestTemplateGenerator.table.test.js index b90e5f3..85bf502 100644 --- a/test/TestTemplateGenerator.table.test.js +++ b/test/TestTemplateGenerator.table.test.js @@ -1,205 +1,213 @@ -import { describe, it, expect, beforeEach } from 'vitest'; -import TestTemplateGenerator from '../src/lib/testing/TestTemplateGenerator.js'; +import { describe, it, expect, beforeEach } from "vitest"; +import TestTemplateGenerator from "../src/lib/testing/TestTemplateGenerator.js"; -describe('TestTemplateGenerator - Table Tests', () => { +describe("TestTemplateGenerator - Table Tests", () => { let generator; beforeEach(() => { generator = new TestTemplateGenerator(); }); - describe('generateTableTemplate', () => { - it('should generate basic table template with minimal configuration', () => { + describe("generateTableTemplate", () => { + it("should generate basic table template with minimal configuration", () => { const requirement = { - type: 'table', - name: 'users', - schema: 'public' + type: "table", + name: "users", + schema: "public", }; const template = generator.generateTemplate(requirement); expect(template).toBeDefined(); - expect(template.type).toBe('table'); - expect(template.filename).toBe('users.test.sql'); - expect(template.directory).toBe('001_table_tests'); - expect(template.content).toContain('CREATE OR REPLACE FUNCTION test.run_users_table_tests()'); - expect(template.content).toContain('has_table'); - expect(template.content).toContain('hasnt_view'); + expect(template.type).toBe("table"); + expect(template.filename).toBe("users.test.sql"); + expect(template.directory).toBe("001_table_tests"); + expect(template.content).toContain( + "CREATE OR REPLACE FUNCTION test.run_users_table_tests()", + ); + expect(template.content).toContain("has_table"); + expect(template.content).toContain("hasnt_view"); }); - it('should generate comprehensive table template with full metadata', () => { + it("should generate comprehensive table template with full metadata", () => { const requirement = { - type: 'table', - name: 'users', - schema: 'public', - description: 'User management table', + type: "table", + name: "users", + schema: "public", + description: "User management table", metadata: { columns: [ { - targetName: 'id', + targetName: "id", metadata: { - expectedType: 'uuid', + expectedType: "uuid", expectedNotNull: true, isPrimaryKey: true, expectedHasDefault: true, - expectedDefaultValue: 'gen_random_uuid()' - } + expectedDefaultValue: "gen_random_uuid()", + }, }, { - targetName: 'email', + targetName: "email", metadata: { - expectedType: 'text', + expectedType: "text", expectedNotNull: true, - isUnique: true - } - } + isUnique: true, + }, + }, ], indexes: [ { - targetName: 'users_pkey', + targetName: "users_pkey", metadata: { - indexedColumns: ['id'], + indexedColumns: ["id"], isUnique: true, - isPrimaryKey: true - } - } + isPrimaryKey: true, + }, + }, ], - expectedConstraints: ['users_email_check'], - requiresRowLevelSecurity: true - } + expectedConstraints: ["users_email_check"], + requiresRowLevelSecurity: true, + }, }; const template = generator.generateTemplate(requirement); // Basic template structure - expect(template.content).toContain('run_users_table_tests'); - expect(template.content).toContain('User management table'); + expect(template.content).toContain("run_users_table_tests"); + expect(template.content).toContain("User management table"); // Column tests - expect(template.content).toContain('has_column'); - expect(template.content).toContain('col_type_is'); - expect(template.content).toContain('col_not_null'); - expect(template.content).toContain('col_has_default'); - expect(template.content).toContain('col_default_is'); - expect(template.content).toContain('col_is_pk'); - expect(template.content).toContain('has_unique'); + expect(template.content).toContain("has_column"); + expect(template.content).toContain("col_type_is"); + expect(template.content).toContain("col_not_null"); + expect(template.content).toContain("col_has_default"); + expect(template.content).toContain("col_default_is"); + expect(template.content).toContain("col_is_pk"); + expect(template.content).toContain("has_unique"); // Index tests - expect(template.content).toContain('has_index'); - expect(template.content).toContain('index_is_unique'); - expect(template.content).toContain('has_pk'); + expect(template.content).toContain("has_index"); + expect(template.content).toContain("index_is_unique"); + expect(template.content).toContain("has_pk"); // Constraint tests - expect(template.content).toContain('has_check'); - expect(template.content).toContain('users_email_check'); + expect(template.content).toContain("has_check"); + expect(template.content).toContain("users_email_check"); // RLS tests - expect(template.content).toContain('rowsecurity = true'); - expect(template.content).toContain('pg_policies'); + expect(template.content).toContain("rowsecurity = true"); + expect(template.content).toContain("pg_policies"); // Privilege tests - expect(template.content).toContain('has_table_privilege'); - expect(template.content).toContain('service_role'); - expect(template.content).toContain('anon'); + expect(template.content).toContain("has_table_privilege"); + expect(template.content).toContain("service_role"); + expect(template.content).toContain("anon"); }); - it('should handle foreign key relationships', () => { + it("should handle foreign key relationships", () => { const requirement = { - type: 'table', - name: 'posts', + type: "table", + name: "posts", metadata: { columns: [ { - targetName: 'user_id', + targetName: "user_id", metadata: { - expectedType: 'uuid', + expectedType: "uuid", isForeignKey: true, - referencedTable: 'users', - referencedColumn: 'id' - } - } - ] - } + referencedTable: "users", + referencedColumn: "id", + }, + }, + ], + }, }; const template = generator.generateTemplate(requirement); - expect(template.content).toContain('fk_ok'); - expect(template.content).toContain('users'); - expect(template.content).toContain('user_id'); - expect(template.content).toContain('Foreign key user_id references users.id'); + expect(template.content).toContain("fk_ok"); + expect(template.content).toContain("users"); + expect(template.content).toContain("user_id"); + expect(template.content).toContain( + "Foreign key user_id references users.id", + ); }); - it('should calculate appropriate plan count for complex tables', () => { + it("should calculate appropriate plan count for complex tables", () => { const requirement = { - type: 'table', - name: 'complex_table', + type: "table", + name: "complex_table", metadata: { columns: [ - { targetName: 'id', metadata: {} }, - { targetName: 'name', metadata: {} }, - { targetName: 'email', metadata: {} } + { targetName: "id", metadata: {} }, + { targetName: "name", metadata: {} }, + { targetName: "email", metadata: {} }, ], indexes: [ - { targetName: 'idx1', metadata: {} }, - { targetName: 'idx2', metadata: {} } + { targetName: "idx1", metadata: {} }, + { targetName: "idx2", metadata: {} }, ], - expectedConstraints: ['constraint1', 'constraint2'], - requiresRowLevelSecurity: true - } + expectedConstraints: ["constraint1", "constraint2"], + requiresRowLevelSecurity: true, + }, }; const template = generator.generateTemplate(requirement); - + // Base count (12) + columns (3*2=6) + constraints (2*2=4) + indexes (2*2=4) + RLS (3) = 29 - expect(template.content).toContain('tap.plan(29)'); + expect(template.content).toContain("tap.plan(29)"); }); - it('should handle tables without optional metadata gracefully', () => { + it("should handle tables without optional metadata gracefully", () => { const requirement = { - type: 'table', - name: 'simple_table' + type: "table", + name: "simple_table", }; const template = generator.generateTemplate(requirement); - expect(template.content).toContain('run_simple_table_table_tests'); - expect(template.content).toContain('TODO: Add specific column tests'); - expect(template.content).toContain('TODO: Add constraint validation tests'); - expect(template.content).toContain('has_pk'); // Should still check for primary key - expect(template.content).not.toContain('ROW LEVEL SECURITY TESTS'); + expect(template.content).toContain("run_simple_table_table_tests"); + expect(template.content).toContain("TODO: Add specific column tests"); + expect(template.content).toContain( + "TODO: Add constraint validation tests", + ); + expect(template.content).toContain("has_pk"); // Should still check for primary key + expect(template.content).not.toContain("ROW LEVEL SECURITY TESTS"); }); - it('should generate proper SQL formatting', () => { + it("should generate proper SQL formatting", () => { const requirement = { - type: 'table', - name: 'test_table' + type: "table", + name: "test_table", }; const template = generator.generateTemplate(requirement); // Check proper pgTAP structure - expect(template.content).toContain('LANGUAGE plpgsql'); - expect(template.content).toContain('SECURITY DEFINER'); - expect(template.content).toContain('SET search_path = test, public, security'); - expect(template.content).toContain('RETURN NEXT tap.plan('); - expect(template.content).toContain('RETURN NEXT tap.finish();'); - expect(template.content).toContain('GRANT EXECUTE ON FUNCTION'); - expect(template.content).toContain('COMMENT ON FUNCTION'); + expect(template.content).toContain("LANGUAGE plpgsql"); + expect(template.content).toContain("SECURITY DEFINER"); + expect(template.content).toContain( + "SET search_path = test, public, security", + ); + expect(template.content).toContain("RETURN NEXT tap.plan("); + expect(template.content).toContain("RETURN NEXT tap.finish();"); + expect(template.content).toContain("GRANT EXECUTE ON FUNCTION"); + expect(template.content).toContain("COMMENT ON FUNCTION"); }); }); - describe('generateBatch', () => { - it('should handle batch table template generation', () => { + describe("generateBatch", () => { + it("should handle batch table template generation", () => { const requirements = [ { - type: 'table', - name: 'users' + type: "table", + name: "users", }, { - type: 'table', - name: 'posts' - } + type: "table", + name: "posts", + }, ]; const result = generator.generateBatch(requirements); @@ -208,28 +216,30 @@ describe('TestTemplateGenerator - Table Tests', () => { expect(result.errors).toHaveLength(0); expect(result.summary.table).toBe(2); expect(result.templates).toHaveLength(2); - expect(result.templates[0].filename).toBe('users.test.sql'); - expect(result.templates[1].filename).toBe('posts.test.sql'); + expect(result.templates[0].filename).toBe("users.test.sql"); + expect(result.templates[1].filename).toBe("posts.test.sql"); }); }); - describe('validation', () => { - it('should validate table requirements correctly', () => { + describe("validation", () => { + it("should validate table requirements correctly", () => { const validRequirement = { - type: 'table', - name: 'valid_table' + type: "table", + name: "valid_table", }; expect(() => generator.generateTemplate(validRequirement)).not.toThrow(); }); - it('should reject invalid table names', () => { + it("should reject invalid table names", () => { const invalidRequirement = { - type: 'table', - name: 'invalid-table-name' + type: "table", + name: "invalid-table-name", }; - expect(() => generator.generateTemplate(invalidRequirement)).toThrow('Name must contain only letters, numbers, and underscores'); + expect(() => generator.generateTemplate(invalidRequirement)).toThrow( + "Name must contain only letters, numbers, and underscores", + ); }); }); -}); \ No newline at end of file +}); diff --git a/test/config.validation.test.js b/test/config.validation.test.js index d8426d1..8c62ec5 100644 --- a/test/config.validation.test.js +++ b/test/config.validation.test.js @@ -1,269 +1,269 @@ -const { describe, it, expect } = require('vitest'); -const { +const { describe, it, expect } = require("vitest"); +const { DataConfigSchema, parsedataConfig, safeParsedataConfig, - mergeConfigs -} = require('../src/lib/schemas/DataConfigSchema'); + mergeConfigs, +} = require("../src/lib/schemas/DataConfigSchema"); -describe('dataConfigSchema', () => { - describe('parsedataConfig', () => { - it('should accept valid minimal config', () => { +describe("dataConfigSchema", () => { + describe("parsedataConfig", () => { + it("should accept valid minimal config", () => { const config = { environments: { local: { - db: 'postgresql://localhost/test' - } - } + db: "postgresql://localhost/test", + }, + }, }; - + expect(() => parsedataConfig(config)).not.toThrow(); }); - it('should accept empty config with defaults', () => { + it("should accept empty config with defaults", () => { const config = {}; expect(() => parsedataConfig(config)).not.toThrow(); }); - it('should reject invalid database URL', () => { + it("should reject invalid database URL", () => { const config = { environments: { local: { - db: 'not-a-postgresql-url' - } - } + db: "not-a-postgresql-url", + }, + }, }; - + expect(() => parsedataConfig(config)).toThrow(); }); - it('should reject invalid environment names', () => { + it("should reject invalid environment names", () => { const config = { environments: { - '123invalid': { - db: 'postgresql://localhost/test' - } - } + "123invalid": { + db: "postgresql://localhost/test", + }, + }, }; - + expect(() => parsedataConfig(config)).toThrow(); }); - it('should accept complete valid config', () => { + it("should accept complete valid config", () => { const config = { test: { minimum_coverage: 90, test_timeout: 600, - output_formats: ['console', 'json', 'junit'], + output_formats: ["console", "json", "junit"], parallel: true, - verbose: true + verbose: true, }, environments: { local: { - db: 'postgresql://localhost/test', - supabase_url: 'https://example.supabase.co', - supabase_anon_key: 'test-key', - supabase_service_role_key: 'service-key' + db: "postgresql://localhost/test", + supabase_url: "https://example.supabase.co", + supabase_anon_key: "test-key", + supabase_service_role_key: "service-key", }, staging: { - db: 'postgresql://staging/test' - } + db: "postgresql://staging/test", + }, }, paths: { - sql_dir: './custom/sql', - tests_dir: './custom/tests', - migrations_dir: './custom/migrations', - functions_dir: './custom/functions', - schemas_dir: './custom/schemas' + sql_dir: "./custom/sql", + tests_dir: "./custom/tests", + migrations_dir: "./custom/migrations", + functions_dir: "./custom/functions", + schemas_dir: "./custom/schemas", }, compile: { auto_squash: true, include_comments: false, - validate_syntax: true + validate_syntax: true, }, migrate: { auto_rollback: false, dry_run: true, lock_timeout: 30, - batch_size: 5 + batch_size: 5, }, functions: { deploy_on_migrate: true, - import_map: './custom/import_map.json', - verify_jwt: false + import_map: "./custom/import_map.json", + verify_jwt: false, }, safety: { require_prod_flag: false, require_confirmation: false, backup_before_migrate: true, - max_affected_rows: 50000 + max_affected_rows: 50000, }, logging: { - level: 'debug', - format: 'json', - timestamps: false - } + level: "debug", + format: "json", + timestamps: false, + }, }; - + const parsed = parsedataConfig(config); expect(parsed).toMatchObject(config); }); - it('should allow $schema property', () => { + it("should allow $schema property", () => { const config = { - $schema: './datarc.schema.json', + $schema: "./datarc.schema.json", environments: { local: { - db: 'postgresql://localhost/test' - } - } + db: "postgresql://localhost/test", + }, + }, }; - + expect(() => parsedataConfig(config)).not.toThrow(); }); - it('should reject unknown properties', () => { + it("should reject unknown properties", () => { const config = { - unknownProperty: 'value', + unknownProperty: "value", environments: { local: { - db: 'postgresql://localhost/test' - } - } + db: "postgresql://localhost/test", + }, + }, }; - + expect(() => parsedataConfig(config)).toThrow(); }); - it('should validate coverage range', () => { + it("should validate coverage range", () => { const invalidConfig = { test: { - minimum_coverage: 150 - } + minimum_coverage: 150, + }, }; - + expect(() => parsedataConfig(invalidConfig)).toThrow(); - + const validConfig = { test: { - minimum_coverage: 85 - } + minimum_coverage: 85, + }, }; - + expect(() => parsedataConfig(validConfig)).not.toThrow(); }); - it('should validate output formats', () => { + it("should validate output formats", () => { const invalidConfig = { test: { - output_formats: ['console', 'invalid-format'] - } + output_formats: ["console", "invalid-format"], + }, }; - + expect(() => parsedataConfig(invalidConfig)).toThrow(); - + const validConfig = { test: { - output_formats: ['console', 'json', 'junit', 'tap', 'html'] - } + output_formats: ["console", "json", "junit", "tap", "html"], + }, }; - + expect(() => parsedataConfig(validConfig)).not.toThrow(); }); - it('should validate logging levels', () => { + it("should validate logging levels", () => { const invalidConfig = { logging: { - level: 'invalid-level' - } + level: "invalid-level", + }, }; - + expect(() => parsedataConfig(invalidConfig)).toThrow(); - + const validConfig = { logging: { - level: 'debug' - } + level: "debug", + }, }; - + expect(() => parsedataConfig(validConfig)).not.toThrow(); }); }); - describe('safeParsedataConfig', () => { - it('should return success for valid config', () => { + describe("safeParsedataConfig", () => { + it("should return success for valid config", () => { const config = { environments: { local: { - db: 'postgresql://localhost/test' - } - } + db: "postgresql://localhost/test", + }, + }, }; - + const result = safeParsedataConfig(config); expect(result.success).toBe(true); expect(result.data).toMatchObject(config); }); - it('should return error for invalid config', () => { + it("should return error for invalid config", () => { const config = { environments: { local: { - db: 'invalid-url' - } - } + db: "invalid-url", + }, + }, }; - + const result = safeParsedataConfig(config); expect(result.success).toBe(false); expect(result.error).toBeDefined(); expect(result.error.errors).toHaveLength(1); }); - it('should provide detailed error information', () => { + it("should provide detailed error information", () => { const config = { test: { - minimum_coverage: 'not-a-number', - test_timeout: -5 + minimum_coverage: "not-a-number", + test_timeout: -5, }, environments: { - '': { - db: 'not-a-url' - } - } + "": { + db: "not-a-url", + }, + }, }; - + const result = safeParsedataConfig(config); expect(result.success).toBe(false); expect(result.error.errors.length).toBeGreaterThan(1); }); }); - describe('mergeConfigs', () => { - it('should merge base and override configs', () => { + describe("mergeConfigs", () => { + it("should merge base and override configs", () => { const base = { test: { minimum_coverage: 80, - test_timeout: 300 + test_timeout: 300, }, environments: { local: { - db: 'postgresql://localhost/base' - } - } + db: "postgresql://localhost/base", + }, + }, }; - + const override = { test: { - minimum_coverage: 90 + minimum_coverage: 90, }, environments: { prod: { - db: 'postgresql://prod/db' - } - } + db: "postgresql://prod/db", + }, + }, }; - + const merged = mergeConfigs(base, override); expect(merged.test.minimum_coverage).toBe(90); expect(merged.test.test_timeout).toBe(300); @@ -271,44 +271,44 @@ describe('dataConfigSchema', () => { expect(merged.environments.prod).toBeDefined(); }); - it('should validate merged result', () => { + it("should validate merged result", () => { const base = { test: { - minimum_coverage: 80 - } + minimum_coverage: 80, + }, }; - + const override = { test: { - minimum_coverage: 200 // Invalid - } + minimum_coverage: 200, // Invalid + }, }; - + expect(() => mergeConfigs(base, override)).toThrow(); }); - it('should handle empty configs', () => { + it("should handle empty configs", () => { const base = {}; const override = {}; - + expect(() => mergeConfigs(base, override)).not.toThrow(); }); - it('should override arrays completely', () => { + it("should override arrays completely", () => { const base = { test: { - output_formats: ['console', 'json'] - } + output_formats: ["console", "json"], + }, }; - + const override = { test: { - output_formats: ['junit'] - } + output_formats: ["junit"], + }, }; - + const merged = mergeConfigs(base, override); - expect(merged.test.output_formats).toEqual(['junit']); + expect(merged.test.output_formats).toEqual(["junit"]); }); }); -}); \ No newline at end of file +}); diff --git a/test/fixtures/example-project/.datarc.json b/test/fixtures/example-project/.datarc.json index 0a034c1..482778f 100644 --- a/test/fixtures/example-project/.datarc.json +++ b/test/fixtures/example-project/.datarc.json @@ -10,4 +10,4 @@ "db": "postgresql://postgres:postgres@localhost:54322/postgres" } } -} \ No newline at end of file +} diff --git a/test/fixtures/example-project/functions/hello/index.ts b/test/fixtures/example-project/functions/hello/index.ts index 7d2145d..fefdb85 100644 --- a/test/fixtures/example-project/functions/hello/index.ts +++ b/test/fixtures/example-project/functions/hello/index.ts @@ -1,9 +1,9 @@ // Example Edge Function -import { serve } from "https://deno.land/std@0.168.0/http/server.ts" +import { serve } from "https://deno.land/std@0.168.0/http/server.ts"; serve(async (req) => { return new Response( JSON.stringify({ message: "Hello from Edge Function!" }), { headers: { "Content-Type": "application/json" } }, - ) -}) \ No newline at end of file + ); +}); diff --git a/test/formatters.test.js b/test/formatters.test.js index fc9cead..64b2d29 100644 --- a/test/formatters.test.js +++ b/test/formatters.test.js @@ -2,10 +2,13 @@ * Test the new formatters (JUnit XML and JSON) */ -import { describe, it, expect } from 'vitest'; -import { JUnitFormatter, JSONFormatter } from '../src/lib/test/formatters/index.js'; +import { describe, it, expect } from "vitest"; +import { + JUnitFormatter, + JSONFormatter, +} from "../src/lib/test/formatters/index.js"; -describe('Test Result Formatters', () => { +describe("Test Result Formatters", () => { const mockResults = { total: 3, passed: 2, @@ -13,102 +16,106 @@ describe('Test Result Formatters', () => { skipped: 0, tests: [ { - description: 'test_get_pets: Should return available pets', - status: 'pass', - function: 'run_pet_tests', - number: 1 + description: "test_get_pets: Should return available pets", + status: "pass", + function: "run_pet_tests", + number: 1, }, { - description: 'test_create_application: Should create application', - status: 'fail', - function: 'run_application_tests', - number: 2 + description: "test_create_application: Should create application", + status: "fail", + function: "run_application_tests", + number: 2, }, { - description: 'test_donation: Should process donation', - status: 'pass', - function: 'run_payment_tests', - number: 3 - } + description: "test_donation: Should process donation", + status: "pass", + function: "run_payment_tests", + number: 3, + }, ], - diagnostics: ['Database connection successful'], + diagnostics: ["Database connection successful"], testFunctions: [ { - name: 'run_pet_tests', + name: "run_pet_tests", passed: 1, failed: 0, skipped: 0, total: 1, - success: true + success: true, }, { - name: 'run_application_tests', + name: "run_application_tests", passed: 0, failed: 1, skipped: 0, total: 1, - success: false + success: false, }, { - name: 'run_payment_tests', + name: "run_payment_tests", passed: 1, failed: 0, skipped: 0, total: 1, - success: true - } - ] + success: true, + }, + ], }; - describe('JUnitFormatter', () => { - it('should format results as valid JUnit XML', () => { + describe("JUnitFormatter", () => { + it("should format results as valid JUnit XML", () => { const formatter = new JUnitFormatter(); const xml = formatter.format(mockResults); - + expect(xml).toContain(''); - expect(xml).toContain(''); - expect(xml).toContain('"); + expect(xml).toContain( + ''); - expect(xml).toContain(''); + expect(xml).toContain(""); }); - it('should escape XML special characters', () => { + it("should escape XML special characters", () => { const resultsWithSpecialChars = { ...mockResults, tests: [ { description: 'test with & "characters"', - status: 'pass', - function: 'test_func', - number: 1 - } + status: "pass", + function: "test_func", + number: 1, + }, ], total: 1, passed: 1, failed: 0, - testFunctions: [] + testFunctions: [], }; - + const formatter = new JUnitFormatter(); const xml = formatter.format(resultsWithSpecialChars); - - expect(xml).toContain('<special> & "characters"'); + + expect(xml).toContain("<special> & "characters""); }); - it('should return .xml file extension', () => { + it("should return .xml file extension", () => { const formatter = new JUnitFormatter(); - expect(formatter.getFileExtension()).toBe('.xml'); + expect(formatter.getFileExtension()).toBe(".xml"); }); }); - describe('JSONFormatter', () => { - it('should format results as valid JSON', () => { + describe("JSONFormatter", () => { + it("should format results as valid JSON", () => { const formatter = new JSONFormatter(); const jsonString = formatter.format(mockResults); - + const parsed = JSON.parse(jsonString); - + expect(parsed.stats.total).toBe(3); expect(parsed.stats.passed).toBe(2); expect(parsed.stats.failed).toBe(1); @@ -117,31 +124,31 @@ describe('Test Result Formatters', () => { expect(parsed.testFunctions).toHaveLength(3); }); - it('should include metadata', () => { + it("should include metadata", () => { const formatter = new JSONFormatter(); const jsonString = formatter.format(mockResults); const parsed = JSON.parse(jsonString); - + expect(parsed.metadata).toBeDefined(); - expect(parsed.metadata.format).toBe('json'); - expect(parsed.metadata.version).toBe('1.0'); + expect(parsed.metadata.format).toBe("json"); + expect(parsed.metadata.version).toBe("1.0"); expect(parsed.metadata.timestamp).toBeDefined(); expect(parsed.metadata.duration).toBeDefined(); }); - it('should calculate pass rates', () => { + it("should calculate pass rates", () => { const formatter = new JSONFormatter(); const jsonString = formatter.format(mockResults); const parsed = JSON.parse(jsonString); - - expect(parsed.summary.passRate).toBe('66.7'); // 2/3 * 100 - expect(parsed.summary.failRate).toBe('33.3'); // 1/3 * 100 - expect(parsed.summary.skipRate).toBe('0.0'); // 0/3 * 100 + + expect(parsed.summary.passRate).toBe("66.7"); // 2/3 * 100 + expect(parsed.summary.failRate).toBe("33.3"); // 1/3 * 100 + expect(parsed.summary.skipRate).toBe("0.0"); // 0/3 * 100 }); - it('should return .json file extension', () => { + it("should return .json file extension", () => { const formatter = new JSONFormatter(); - expect(formatter.getFileExtension()).toBe('.json'); + expect(formatter.getFileExtension()).toBe(".json"); }); }); -}); \ No newline at end of file +}); diff --git a/test/function-parsing.test.js b/test/function-parsing.test.js index cb6abbc..ea96417 100644 --- a/test/function-parsing.test.js +++ b/test/function-parsing.test.js @@ -1,220 +1,230 @@ /** * Test function assertion parsing in pgTAPTestScanner */ -import { describe, it, expect, beforeEach } from 'vitest'; -import pgTAPTestScanner from '../src/lib/testing/pgTAPTestScanner.js'; +import { describe, it, expect, beforeEach } from "vitest"; +import pgTAPTestScanner from "../src/lib/testing/pgTAPTestScanner.js"; -describe('pgTAPTestScanner Function Assertion Parsing', () => { +describe("pgTAPTestScanner Function Assertion Parsing", () => { let scanner; beforeEach(() => { scanner = new pgTAPTestScanner({ - validatePlans: false + validatePlans: false, }); }); - describe('has_function assertion parsing', () => { - it('should parse has_function with just function name', () => { + describe("has_function assertion parsing", () => { + it("should parse has_function with just function name", () => { const sql = "SELECT has_function('user_count');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('has_function'); - expect(assertions[0].target).toBe('user_count'); + expect(assertions[0].type).toBe("has_function"); + expect(assertions[0].target).toBe("user_count"); expect(assertions[0].functionMetadata).toEqual({ - name: 'user_count' + name: "user_count", }); }); - it('should parse has_function with schema and function name', () => { + it("should parse has_function with schema and function name", () => { const sql = "SELECT has_function('public', 'user_count');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('has_function'); - expect(assertions[0].target).toBe('public.user_count'); + expect(assertions[0].type).toBe("has_function"); + expect(assertions[0].target).toBe("public.user_count"); expect(assertions[0].functionMetadata).toEqual({ - schema: 'public', - name: 'user_count' + schema: "public", + name: "user_count", }); }); - it('should parse has_function with parameters', () => { - const sql = "SELECT has_function('user_count', ARRAY['integer', 'text']);"; + it("should parse has_function with parameters", () => { + const sql = + "SELECT has_function('user_count', ARRAY['integer', 'text']);"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('has_function'); - expect(assertions[0].target).toBe('user_count'); + expect(assertions[0].type).toBe("has_function"); + expect(assertions[0].target).toBe("user_count"); expect(assertions[0].functionMetadata).toEqual({ - name: 'user_count', - parameters: ['integer', 'text'] + name: "user_count", + parameters: ["integer", "text"], }); }); - it('should parse has_function with schema, function name and parameters', () => { - const sql = "SELECT has_function('public', 'user_count', ARRAY['integer', 'text']);"; + it("should parse has_function with schema, function name and parameters", () => { + const sql = + "SELECT has_function('public', 'user_count', ARRAY['integer', 'text']);"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('has_function'); - expect(assertions[0].target).toBe('public.user_count'); + expect(assertions[0].type).toBe("has_function"); + expect(assertions[0].target).toBe("public.user_count"); expect(assertions[0].functionMetadata).toEqual({ - schema: 'public', - name: 'user_count', - parameters: ['integer', 'text'] + schema: "public", + name: "user_count", + parameters: ["integer", "text"], }); }); }); - describe('function_returns assertion parsing', () => { - it('should parse function_returns with function name and return type', () => { + describe("function_returns assertion parsing", () => { + it("should parse function_returns with function name and return type", () => { const sql = "SELECT function_returns('user_count', 'integer');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('function_returns'); - expect(assertions[0].target).toBe('user_count'); + expect(assertions[0].type).toBe("function_returns"); + expect(assertions[0].target).toBe("user_count"); expect(assertions[0].functionMetadata).toEqual({ - name: 'user_count', - returnType: 'integer' + name: "user_count", + returnType: "integer", }); }); - it('should parse function_returns with schema, function name and return type', () => { + it("should parse function_returns with schema, function name and return type", () => { const sql = "SELECT function_returns('public', 'user_count', 'integer');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('function_returns'); - expect(assertions[0].target).toBe('public.user_count'); + expect(assertions[0].type).toBe("function_returns"); + expect(assertions[0].target).toBe("public.user_count"); expect(assertions[0].functionMetadata).toEqual({ - schema: 'public', - name: 'user_count', - returnType: 'integer' + schema: "public", + name: "user_count", + returnType: "integer", }); }); - it('should parse function_returns with parameters', () => { - const sql = "SELECT function_returns('user_count', ARRAY['text', 'integer'], 'boolean');"; + it("should parse function_returns with parameters", () => { + const sql = + "SELECT function_returns('user_count', ARRAY['text', 'integer'], 'boolean');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('function_returns'); - expect(assertions[0].target).toBe('user_count'); + expect(assertions[0].type).toBe("function_returns"); + expect(assertions[0].target).toBe("user_count"); expect(assertions[0].functionMetadata).toEqual({ - name: 'user_count', - parameters: ['text', 'integer'], - returnType: 'boolean' + name: "user_count", + parameters: ["text", "integer"], + returnType: "boolean", }); }); }); - describe('function_lang_is assertion parsing', () => { - it('should parse function_lang_is', () => { + describe("function_lang_is assertion parsing", () => { + it("should parse function_lang_is", () => { const sql = "SELECT function_lang_is('user_count', 'plpgsql');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('function_lang_is'); - expect(assertions[0].target).toBe('user_count'); + expect(assertions[0].type).toBe("function_lang_is"); + expect(assertions[0].target).toBe("user_count"); expect(assertions[0].functionMetadata).toEqual({ - name: 'user_count', - language: 'plpgsql' + name: "user_count", + language: "plpgsql", }); }); }); - describe('is_definer assertion parsing', () => { - it('should parse is_definer', () => { + describe("is_definer assertion parsing", () => { + it("should parse is_definer", () => { const sql = "SELECT is_definer('secure_function');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('is_definer'); - expect(assertions[0].target).toBe('secure_function'); + expect(assertions[0].type).toBe("is_definer"); + expect(assertions[0].target).toBe("secure_function"); expect(assertions[0].functionMetadata).toEqual({ - name: 'secure_function', - isSecurityDefiner: true + name: "secure_function", + isSecurityDefiner: true, }); }); - it('should parse isnt_definer', () => { + it("should parse isnt_definer", () => { const sql = "SELECT isnt_definer('normal_function');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('isnt_definer'); - expect(assertions[0].target).toBe('normal_function'); + expect(assertions[0].type).toBe("isnt_definer"); + expect(assertions[0].target).toBe("normal_function"); expect(assertions[0].functionMetadata).toEqual({ - name: 'normal_function', - isSecurityDefiner: false + name: "normal_function", + isSecurityDefiner: false, }); }); }); - describe('volatility_is assertion parsing', () => { - it('should parse volatility_is', () => { + describe("volatility_is assertion parsing", () => { + it("should parse volatility_is", () => { const sql = "SELECT volatility_is('pure_function', 'immutable');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('volatility_is'); - expect(assertions[0].target).toBe('pure_function'); + expect(assertions[0].type).toBe("volatility_is"); + expect(assertions[0].target).toBe("pure_function"); expect(assertions[0].functionMetadata).toEqual({ - name: 'pure_function', - volatility: 'immutable' + name: "pure_function", + volatility: "immutable", }); }); }); - describe('function_privs_are assertion parsing', () => { - it('should parse function_privs_are with basic pattern', () => { - const sql = "SELECT function_privs_are('calc_func', 'app_user', ARRAY['EXECUTE']);"; + describe("function_privs_are assertion parsing", () => { + it("should parse function_privs_are with basic pattern", () => { + const sql = + "SELECT function_privs_are('calc_func', 'app_user', ARRAY['EXECUTE']);"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('function_privs_are'); - expect(assertions[0].target).toBe('calc_func'); + expect(assertions[0].type).toBe("function_privs_are"); + expect(assertions[0].target).toBe("calc_func"); expect(assertions[0].functionMetadata).toEqual({ - name: 'calc_func', - role: 'app_user', - privileges: ['EXECUTE'] + name: "calc_func", + role: "app_user", + privileges: ["EXECUTE"], }); }); }); - describe('function coverage tracking', () => { - it('should track function coverage in coverage map', () => { + describe("function coverage tracking", () => { + it("should track function coverage in coverage map", () => { const sql = ` SELECT has_function('public', 'user_count'); SELECT function_returns('public', 'user_count', 'integer'); SELECT function_lang_is('public', 'user_count', 'sql'); SELECT is_definer('public', 'admin_func'); `; - + const assertions = scanner.extractAssertions(sql); expect(assertions).toHaveLength(4); - + // Mock test file structure for coverage map building - scanner.testFiles = [{ - filePath: '/test/functions.sql', - fileName: 'functions.sql', - assertions: assertions, - planCount: 4, - dependencies: [], - metadata: {} - }]; - + scanner.testFiles = [ + { + filePath: "/test/functions.sql", + fileName: "functions.sql", + assertions: assertions, + planCount: 4, + dependencies: [], + metadata: {}, + }, + ]; + scanner._buildCoverageMap(); - + const coverage = scanner.getCoverageMap(); - expect(coverage.functions).toHaveProperty('public.user_count'); - expect(coverage.functions).toHaveProperty('public.admin_func'); - expect(coverage.functions['public.user_count']).toContain('has_function'); - expect(coverage.functions['public.user_count']).toContain('function_returns'); - expect(coverage.functions['public.user_count']).toContain('function_lang_is'); - expect(coverage.functions['public.admin_func']).toContain('is_definer'); + expect(coverage.functions).toHaveProperty("public.user_count"); + expect(coverage.functions).toHaveProperty("public.admin_func"); + expect(coverage.functions["public.user_count"]).toContain("has_function"); + expect(coverage.functions["public.user_count"]).toContain( + "function_returns", + ); + expect(coverage.functions["public.user_count"]).toContain( + "function_lang_is", + ); + expect(coverage.functions["public.admin_func"]).toContain("is_definer"); }); }); -}); \ No newline at end of file +}); diff --git a/test/integration/coverage-enforcement.test.js b/test/integration/coverage-enforcement.test.js index 85068dc..d003270 100644 --- a/test/integration/coverage-enforcement.test.js +++ b/test/integration/coverage-enforcement.test.js @@ -1,54 +1,54 @@ /** * End-to-End Integration Tests for Test Coverage Enforcement System - * + * * These tests verify the complete workflow from migration analysis through * coverage checking to deployment blocking, ensuring all components work * together correctly. */ -const { describe, it, expect, beforeEach, afterEach } = require('vitest'); -const MigrationOrchestrator = require('../../src/lib/migration/MigrationOrchestrator'); -const TestCoverageOrchestrator = require('../../src/lib/testing/TestCoverageOrchestrator'); -const TestRequirementAnalyzer = require('../../src/lib/testing/TestRequirementAnalyzer'); -const pgTAPTestScanner = require('../../src/lib/testing/pgTAPTestScanner'); -const TestTemplateGenerator = require('../../src/lib/testing/TestTemplateGenerator'); -const fs = require('fs').promises; -const path = require('path'); -const os = require('os'); - -describe('Test Coverage Enforcement - End-to-End Integration', () => { +const { describe, it, expect, beforeEach, afterEach } = require("vitest"); +const MigrationOrchestrator = require("../../src/lib/migration/MigrationOrchestrator"); +const TestCoverageOrchestrator = require("../../src/lib/testing/TestCoverageOrchestrator"); +const TestRequirementAnalyzer = require("../../src/lib/testing/TestRequirementAnalyzer"); +const pgTAPTestScanner = require("../../src/lib/testing/pgTAPTestScanner"); +const TestTemplateGenerator = require("../../src/lib/testing/TestTemplateGenerator"); +const fs = require("fs").promises; +const path = require("path"); +const os = require("os"); + +describe("Test Coverage Enforcement - End-to-End Integration", () => { let tempDir; let orchestrator; let coverageOrchestrator; - + beforeEach(async () => { // Create temp directory structure for testing tempDir = path.join(os.tmpdir(), `data-test-${Date.now()}`); await fs.mkdir(tempDir, { recursive: true }); - await fs.mkdir(path.join(tempDir, 'sql'), { recursive: true }); - await fs.mkdir(path.join(tempDir, 'tests'), { recursive: true }); - await fs.mkdir(path.join(tempDir, 'migrations'), { recursive: true }); - + await fs.mkdir(path.join(tempDir, "sql"), { recursive: true }); + await fs.mkdir(path.join(tempDir, "tests"), { recursive: true }); + await fs.mkdir(path.join(tempDir, "migrations"), { recursive: true }); + // Initialize orchestrators with test configuration orchestrator = new MigrationOrchestrator({ - sqlDir: path.join(tempDir, 'sql'), - testsDir: path.join(tempDir, 'tests'), - migrationsDir: path.join(tempDir, 'migrations'), + sqlDir: path.join(tempDir, "sql"), + testsDir: path.join(tempDir, "tests"), + migrationsDir: path.join(tempDir, "migrations"), skipTests: true, // Skip unit tests, focus on coverage skipCoverage: false, - coverageEnforcementLevel: 'strict', + coverageEnforcementLevel: "strict", coverageBypassReason: null, - isProd: false + isProd: false, }); - + coverageOrchestrator = new TestCoverageOrchestrator({ - testsDir: path.join(tempDir, 'tests'), - sqlDir: path.join(tempDir, 'sql'), - enforcementLevel: 'strict', - generateTemplates: true + testsDir: path.join(tempDir, "tests"), + sqlDir: path.join(tempDir, "sql"), + enforcementLevel: "strict", + generateTemplates: true, }); }); - + afterEach(async () => { // Cleanup temp directory try { @@ -57,12 +57,12 @@ describe('Test Coverage Enforcement - End-to-End Integration', () => { // Ignore cleanup errors } }); - - describe('Deployment Blocking with Critical Issues Fixed', () => { - it('should block deployment when coverage is insufficient', async () => { + + describe("Deployment Blocking with Critical Issues Fixed", () => { + it("should block deployment when coverage is insufficient", async () => { // Create SQL changes without tests await fs.writeFile( - path.join(tempDir, 'sql', 'tables.sql'), + path.join(tempDir, "sql", "tables.sql"), ` CREATE TABLE users ( id UUID PRIMARY KEY DEFAULT gen_random_uuid(), @@ -77,12 +77,12 @@ describe('Test Coverage Enforcement - End-to-End Integration', () => { CREATE POLICY users_select_own ON users FOR SELECT USING (auth.uid() = id); - ` + `, ); - + // Create insufficient test coverage (missing RLS and index tests) await fs.writeFile( - path.join(tempDir, 'tests', 'users.test.sql'), + path.join(tempDir, "tests", "users.test.sql"), ` BEGIN; SELECT plan(3); @@ -93,55 +93,56 @@ describe('Test Coverage Enforcement - End-to-End Integration', () => { SELECT finish(); ROLLBACK; - ` + `, ); - + // Mock the migration operations that would come from AST analysis const operations = [ { - sql: 'CREATE TABLE users...', - type: 'SAFE', - description: 'Create users table' + sql: "CREATE TABLE users...", + type: "SAFE", + description: "Create users table", }, { - sql: 'CREATE INDEX idx_users_email...', - type: 'SAFE', - description: 'Create email index' + sql: "CREATE INDEX idx_users_email...", + type: "SAFE", + description: "Create email index", }, { - sql: 'CREATE POLICY users_select_own...', - type: 'WARNING', - description: 'Create RLS policy' - } + sql: "CREATE POLICY users_select_own...", + type: "WARNING", + description: "Create RLS policy", + }, ]; - + // Test that coverage check fails const result = await coverageOrchestrator.checkCoverage(operations); - + expect(result.passed).toBe(false); expect(result.gaps).toHaveLength(3); // Missing index, RLS, and policy tests expect(result.coveragePercentage).toBeLessThan(50); - + // Verify deployment would be blocked - await expect(orchestrator.checkTestCoverage(operations)) - .rejects.toThrow(/Test coverage requirements not met/); + await expect(orchestrator.checkTestCoverage(operations)).rejects.toThrow( + /Test coverage requirements not met/, + ); }); - - it('should allow deployment when coverage is sufficient', async () => { + + it("should allow deployment when coverage is sufficient", async () => { // Create SQL changes await fs.writeFile( - path.join(tempDir, 'sql', 'simple.sql'), + path.join(tempDir, "sql", "simple.sql"), ` CREATE TABLE posts ( id SERIAL PRIMARY KEY, title TEXT NOT NULL ); - ` + `, ); - + // Create comprehensive test coverage await fs.writeFile( - path.join(tempDir, 'tests', 'posts.test.sql'), + path.join(tempDir, "tests", "posts.test.sql"), ` BEGIN; SELECT plan(5); @@ -154,108 +155,112 @@ describe('Test Coverage Enforcement - End-to-End Integration', () => { SELECT finish(); ROLLBACK; - ` + `, ); - + const operations = [ { - sql: 'CREATE TABLE posts (id SERIAL PRIMARY KEY, title TEXT NOT NULL);', - type: 'SAFE', - description: 'Create posts table' - } + sql: "CREATE TABLE posts (id SERIAL PRIMARY KEY, title TEXT NOT NULL);", + type: "SAFE", + description: "Create posts table", + }, ]; - + const result = await coverageOrchestrator.checkCoverage(operations); - + expect(result.passed).toBe(true); expect(result.gaps).toHaveLength(0); expect(result.coveragePercentage).toBeGreaterThan(80); }); }); - - describe('Coverage Bypass Mechanism with Fix', () => { - it('should require explicit bypass reason for technical failures', async () => { + + describe("Coverage Bypass Mechanism with Fix", () => { + it("should require explicit bypass reason for technical failures", async () => { // Simulate a technical failure in coverage checking const brokenOrchestrator = new MigrationOrchestrator({ - sqlDir: path.join(tempDir, 'sql'), - testsDir: '/nonexistent/path', // This will cause scanner to fail - coverageEnforcementLevel: 'strict', + sqlDir: path.join(tempDir, "sql"), + testsDir: "/nonexistent/path", // This will cause scanner to fail + coverageEnforcementLevel: "strict", coverageBypassReason: null, // No bypass reason - isProd: false + isProd: false, }); - - const operations = [{ sql: 'CREATE TABLE test();', type: 'SAFE' }]; - + + const operations = [{ sql: "CREATE TABLE test();", type: "SAFE" }]; + // After our fix, this should throw instead of silently passing - await expect(brokenOrchestrator.checkTestCoverage(operations)) - .rejects.toThrow(/Use --coverage-bypass-reason/); + await expect( + brokenOrchestrator.checkTestCoverage(operations), + ).rejects.toThrow(/Use --coverage-bypass-reason/); }); - - it('should allow bypass with explicit reason', async () => { + + it("should allow bypass with explicit reason", async () => { const bypassOrchestrator = new MigrationOrchestrator({ - sqlDir: path.join(tempDir, 'sql'), - testsDir: '/nonexistent/path', - coverageEnforcementLevel: 'strict', - coverageBypassReason: 'Emergency hotfix #123', - isProd: false + sqlDir: path.join(tempDir, "sql"), + testsDir: "/nonexistent/path", + coverageEnforcementLevel: "strict", + coverageBypassReason: "Emergency hotfix #123", + isProd: false, }); - - const operations = [{ sql: 'CREATE TABLE test();', type: 'SAFE' }]; - + + const operations = [{ sql: "CREATE TABLE test();", type: "SAFE" }]; + const result = await bypassOrchestrator.checkTestCoverage(operations); - + expect(result.passed).toBe(true); - expect(result.bypassReason).toBe('Emergency hotfix #123'); + expect(result.bypassReason).toBe("Emergency hotfix #123"); expect(result.technicalFailure).toBe(true); }); }); - - describe('AST Operation Validation with Fix', () => { - it('should validate operation structure before processing', async () => { + + describe("AST Operation Validation with Fix", () => { + it("should validate operation structure before processing", async () => { const analyzer = new TestRequirementAnalyzer(); - + // Test with invalid operation (missing sql property) - const invalidOp = { type: 'SAFE' }; - - await expect(analyzer.determineTestRequirements(invalidOp)) - .rejects.toThrow(/missing or invalid 'sql' property/); - + const invalidOp = { type: "SAFE" }; + + await expect( + analyzer.determineTestRequirements(invalidOp), + ).rejects.toThrow(/missing or invalid 'sql' property/); + // Test with valid operation const validOp = { - sql: 'CREATE TABLE users (id INT);', - type: 'SAFE', - description: 'Create users table' + sql: "CREATE TABLE users (id INT);", + type: "SAFE", + description: "Create users table", }; - + const requirements = await analyzer.determineTestRequirements(validOp); expect(requirements).toBeInstanceOf(Array); }); - - it('should handle malformed SQL gracefully', async () => { + + it("should handle malformed SQL gracefully", async () => { const analyzer = new TestRequirementAnalyzer(); - + // Test with empty SQL - const emptyOp = { sql: '', type: 'SAFE' }; - await expect(analyzer.determineTestRequirements(emptyOp)) - .rejects.toThrow(/SQL cannot be empty/); - + const emptyOp = { sql: "", type: "SAFE" }; + await expect(analyzer.determineTestRequirements(emptyOp)).rejects.toThrow( + /SQL cannot be empty/, + ); + // Test with oversized SQL - const hugeOp = { - sql: 'A'.repeat(100001), - type: 'SAFE' + const hugeOp = { + sql: "A".repeat(100001), + type: "SAFE", }; - await expect(analyzer.determineTestRequirements(hugeOp)) - .rejects.toThrow(/SQL exceeds maximum length/); + await expect(analyzer.determineTestRequirements(hugeOp)).rejects.toThrow( + /SQL exceeds maximum length/, + ); }); }); - - describe('Memory Management with Fix', () => { - it('should handle large test suites without OOM', async () => { + + describe("Memory Management with Fix", () => { + it("should handle large test suites without OOM", async () => { // Create many test files const testCount = 100; for (let i = 0; i < testCount; i++) { await fs.writeFile( - path.join(tempDir, 'tests', `test_${i}.sql`), + path.join(tempDir, "tests", `test_${i}.sql`), ` BEGIN; SELECT plan(3); @@ -264,128 +269,130 @@ describe('Test Coverage Enforcement - End-to-End Integration', () => { SELECT has_column('table_${i}', 'name'); SELECT finish(); ROLLBACK; - ` + `, ); } - + // Scanner with memory limits const scanner = new pgTAPTestScanner({ maxMemoryMB: 50, // Low memory limit to test management enableStreaming: true, - batchSize: 10 + batchSize: 10, }); - + let memoryWarningEmitted = false; - scanner.on('memory_warning', () => { + scanner.on("memory_warning", () => { memoryWarningEmitted = true; }); - + // Should complete without throwing OOM const database = await scanner.buildCoverageDatabase(); - + expect(database).toBeDefined(); expect(scanner.statistics.filesScanned).toBe(testCount); - + // Verify memory management kicked in if needed if (memoryWarningEmitted) { expect(scanner.statistics.memoryWarnings).toBeGreaterThan(0); } }); }); - - describe('Coverage Key Generation with Fix', () => { - it('should handle edge cases in coverage keys', async () => { - const enforcer = require('../../src/lib/testing/CoverageEnforcer'); + + describe("Coverage Key Generation with Fix", () => { + it("should handle edge cases in coverage keys", async () => { + const enforcer = require("../../src/lib/testing/CoverageEnforcer"); const instance = new enforcer(); - + // Test null schema normalization - const item1 = { schema: null, name: 'users', type: 'table' }; - const item2 = { schema: 'public', name: 'users', type: 'table' }; - + const item1 = { schema: null, name: "users", type: "table" }; + const item2 = { schema: "public", name: "users", type: "table" }; + const key1 = instance._generateCoverageKey(item1); const key2 = instance._generateCoverageKey(item2); - + expect(key1).toBe(key2); // Should normalize to same key - expect(key1).toBe('public::users::table'); - + expect(key1).toBe("public::users::table"); + // Test case normalization - const item3 = { schema: 'PUBLIC', name: 'USERS', type: 'TABLE' }; + const item3 = { schema: "PUBLIC", name: "USERS", type: "TABLE" }; const key3 = instance._generateCoverageKey(item3); - + expect(key3).toBe(key1); // Should normalize case - + // Test special character handling - const item4 = { schema: 'public', name: 'user::posts', type: 'table' }; + const item4 = { schema: "public", name: "user::posts", type: "table" }; const key4 = instance._generateCoverageKey(item4); - - expect(key4).toContain('\\:\\:'); // Should escape separator + + expect(key4).toContain("\\:\\:"); // Should escape separator }); }); - - describe('Pattern Error Recovery with Fix', () => { - it('should recover from pattern rendering failures', async () => { + + describe("Pattern Error Recovery with Fix", () => { + it("should recover from pattern rendering failures", async () => { const generator = new TestTemplateGenerator(); - + // Create requirement that might cause pattern issues const requirement = { - type: 'rls', - name: 'users', - schema: 'public', - description: 'Test RLS policies' + type: "rls", + name: "users", + schema: "public", + description: "Test RLS policies", }; - + // Try to use non-existent pattern - const result = generator.generateEnhancedTemplate( - requirement, - ['non_existent_pattern', 'another_bad_pattern'] - ); - + const result = generator.generateEnhancedTemplate(requirement, [ + "non_existent_pattern", + "another_bad_pattern", + ]); + // Should still return valid template expect(result).toBeDefined(); - expect(result.content).toContain('CREATE OR REPLACE FUNCTION'); - expect(result.metadata.warnings).toContain("Pattern 'non_existent_pattern' not found"); - + expect(result.content).toContain("CREATE OR REPLACE FUNCTION"); + expect(result.metadata.warnings).toContain( + "Pattern 'non_existent_pattern' not found", + ); + // Verify template is valid const validation = generator._validateTemplate(result.content); expect(validation.valid).toBe(true); }); - - it('should validate templates before returning', async () => { + + it("should validate templates before returning", async () => { const generator = new TestTemplateGenerator(); - + // Test validation catches missing structure - const invalidTemplate = 'SELECT 1;'; // Not a valid pgTAP test + const invalidTemplate = "SELECT 1;"; // Not a valid pgTAP test const validation = generator._validateTemplate(invalidTemplate); - + expect(validation.valid).toBe(false); - expect(validation.errors).toContain('Missing function declaration'); - expect(validation.errors).toContain('Missing test plan declaration'); + expect(validation.errors).toContain("Missing function declaration"); + expect(validation.errors).toContain("Missing test plan declaration"); }); }); - - describe('Template Generation for Coverage Gaps', () => { - it('should generate templates for missing coverage', async () => { + + describe("Template Generation for Coverage Gaps", () => { + it("should generate templates for missing coverage", async () => { // Analyze operations const operations = [ { - sql: 'CREATE TABLE products (id INT PRIMARY KEY, name TEXT);', - type: 'SAFE', - description: 'Create products table' + sql: "CREATE TABLE products (id INT PRIMARY KEY, name TEXT);", + type: "SAFE", + description: "Create products table", }, { - sql: 'CREATE FUNCTION get_product(p_id INT) RETURNS TEXT AS $$ SELECT name FROM products WHERE id = p_id $$ LANGUAGE SQL;', - type: 'SAFE', - description: 'Create get_product function' - } + sql: "CREATE FUNCTION get_product(p_id INT) RETURNS TEXT AS $$ SELECT name FROM products WHERE id = p_id $$ LANGUAGE SQL;", + type: "SAFE", + description: "Create get_product function", + }, ]; - + // No existing tests const result = await coverageOrchestrator.checkCoverage(operations); - + expect(result.passed).toBe(false); expect(result.templates).toBeDefined(); expect(result.templates.length).toBeGreaterThan(0); - + // Verify templates are valid const generator = new TestTemplateGenerator(); for (const template of result.templates) { @@ -394,12 +401,12 @@ describe('Test Coverage Enforcement - End-to-End Integration', () => { } }); }); - - describe('Full Workflow Integration', () => { - it('should handle complete deployment workflow with coverage', async () => { + + describe("Full Workflow Integration", () => { + it("should handle complete deployment workflow with coverage", async () => { // Setup SQL and tests await fs.writeFile( - path.join(tempDir, 'sql', 'schema.sql'), + path.join(tempDir, "sql", "schema.sql"), ` CREATE TABLE articles ( id UUID PRIMARY KEY DEFAULT gen_random_uuid(), @@ -412,11 +419,11 @@ describe('Test Coverage Enforcement - End-to-End Integration', () => { CREATE INDEX idx_articles_author ON articles(author_id); CREATE INDEX idx_articles_published ON articles(published) WHERE published = true; - ` + `, ); - + await fs.writeFile( - path.join(tempDir, 'tests', 'articles.test.sql'), + path.join(tempDir, "tests", "articles.test.sql"), ` BEGIN; SELECT plan(10); @@ -439,38 +446,38 @@ describe('Test Coverage Enforcement - End-to-End Integration', () => { SELECT finish(); ROLLBACK; - ` + `, ); - + // Create operations const operations = [ { - sql: 'CREATE TABLE articles...', - type: 'SAFE', - description: 'Create articles table' + sql: "CREATE TABLE articles...", + type: "SAFE", + description: "Create articles table", }, { - sql: 'CREATE INDEX idx_articles_author...', - type: 'SAFE', - description: 'Create author index' + sql: "CREATE INDEX idx_articles_author...", + type: "SAFE", + description: "Create author index", }, { - sql: 'CREATE INDEX idx_articles_published...', - type: 'SAFE', - description: 'Create published partial index' - } + sql: "CREATE INDEX idx_articles_published...", + type: "SAFE", + description: "Create published partial index", + }, ]; - + // Run full coverage check const coverageResult = await orchestrator.checkTestCoverage(operations); - + expect(coverageResult.passed).toBe(true); expect(coverageResult.coveragePercentage).toBeGreaterThan(70); - + // Verify all components worked together expect(orchestrator.coverageOrchestrator).toBeDefined(); expect(coverageResult.gaps).toBeDefined(); expect(coverageResult.suggestions).toBeDefined(); }); }); -}); \ No newline at end of file +}); diff --git a/test/integration/full-workflow.test.js b/test/integration/full-workflow.test.js index 4cd75c1..046ade9 100644 --- a/test/integration/full-workflow.test.js +++ b/test/integration/full-workflow.test.js @@ -1,289 +1,314 @@ /** * Full Workflow Integration Test for D.A.T.A. - * + * * This test exercises the complete migration workflow from SQL source files * through migration generation, testing, and deployment with diff tracking. */ -const { describe, it, expect, beforeEach, afterEach } = require('vitest'); -const fs = require('fs').promises; -const path = require('path'); -const { exec } = require('child_process'); -const { promisify } = require('util'); +const { describe, it, expect, beforeEach, afterEach } = require("vitest"); +const fs = require("fs").promises; +const path = require("path"); +const { exec } = require("child_process"); +const { promisify } = require("util"); const execAsync = promisify(exec); -describe('D.A.T.A. Full Migration Workflow', () => { - const tempDir = '/tmp/test-001'; - const fixtureDir = path.join(process.cwd(), 'test/fixtures/example-project'); - const dataCmd = path.join(process.cwd(), 'bin/data.js'); - +describe("D.A.T.A. Full Migration Workflow", () => { + const tempDir = "/tmp/test-001"; + const fixtureDir = path.join(process.cwd(), "test/fixtures/example-project"); + const dataCmd = path.join(process.cwd(), "bin/data.js"); + beforeEach(async () => { // Step 1: Create temporary directory await fs.rm(tempDir, { recursive: true, force: true }).catch(() => {}); await fs.mkdir(tempDir, { recursive: true }); - console.log('✅ Created temporary directory:', tempDir); - + console.log("✅ Created temporary directory:", tempDir); + // Step 2: Copy fixture project to temporary directory await execAsync(`cp -r ${fixtureDir}/* ${tempDir}/`); - console.log('✅ Copied fixture project to:', tempDir); - + console.log("✅ Copied fixture project to:", tempDir); + // Verify structure const files = await fs.readdir(tempDir); - expect(files).toContain('sql'); - expect(files).toContain('tests'); - expect(files).toContain('.datarc.json'); + expect(files).toContain("sql"); + expect(files).toContain("tests"); + expect(files).toContain(".datarc.json"); }); - + afterEach(async () => { // Cleanup await fs.rm(tempDir, { recursive: true, force: true }).catch(() => {}); }); - - describe('Complete Migration Workflow', () => { - it('should handle full migration lifecycle from SQL to deployment', async () => { + + describe("Complete Migration Workflow", () => { + it("should handle full migration lifecycle from SQL to deployment", async () => { // Step 3: Change to temporary directory and build base migration process.chdir(tempDir); - console.log('📂 Working directory:', process.cwd()); - + console.log("📂 Working directory:", process.cwd()); + // List SQL files to understand what we're working with - const sqlFiles = await fs.readdir(path.join(tempDir, 'sql')); - console.log('📄 SQL files found:', sqlFiles); - + const sqlFiles = await fs.readdir(path.join(tempDir, "sql")); + console.log("📄 SQL files found:", sqlFiles); + // Step 4: Build the base migration from SQL files - console.log('\n🔨 Building base migration from SQL files...'); - + console.log("\n🔨 Building base migration from SQL files..."); + try { const { stdout: compileOutput } = await execAsync( - `node ${dataCmd} db compile --sql-dir ./sql --migrations-dir ./migrations` + `node ${dataCmd} db compile --sql-dir ./sql --migrations-dir ./migrations`, ); - console.log('Compile output:', compileOutput); + console.log("Compile output:", compileOutput); } catch (error) { - console.error('Compile failed:', error.message); + console.error("Compile failed:", error.message); // Continue to see what was generated } - + // Check if migration was created - const migrations = await fs.readdir(path.join(tempDir, 'migrations')).catch(() => []); - console.log('📦 Migrations created:', migrations); - + const migrations = await fs + .readdir(path.join(tempDir, "migrations")) + .catch(() => []); + console.log("📦 Migrations created:", migrations); + if (migrations.length > 0) { // Read the migration to verify it looks correct - const migrationDir = path.join(tempDir, 'migrations', migrations[0]); + const migrationDir = path.join(tempDir, "migrations", migrations[0]); const migrationFiles = await fs.readdir(migrationDir); - console.log('📄 Migration files:', migrationFiles); - - if (migrationFiles.includes('up.sql')) { - const upSql = await fs.readFile(path.join(migrationDir, 'up.sql'), 'utf8'); - console.log('\n📜 Migration UP script preview:'); - console.log(upSql.substring(0, 500) + '...'); - + console.log("📄 Migration files:", migrationFiles); + + if (migrationFiles.includes("up.sql")) { + const upSql = await fs.readFile( + path.join(migrationDir, "up.sql"), + "utf8", + ); + console.log("\n📜 Migration UP script preview:"); + console.log(upSql.substring(0, 500) + "..."); + // Verify migration contains expected content expect(upSql).toBeTruthy(); expect(upSql.length).toBeGreaterThan(0); } - + // Step 5: Run tests against the migration - console.log('\n🧪 Running tests against migration...'); - + console.log("\n🧪 Running tests against migration..."); + try { const { stdout: testOutput } = await execAsync( - `node ${dataCmd} db migrate test --migration ${migrations[0]}` + `node ${dataCmd} db migrate test --migration ${migrations[0]}`, ); - console.log('Test output:', testOutput); - + console.log("Test output:", testOutput); + // Check if tests passed - const testsPassed = !testOutput.includes('FAILED') && - (testOutput.includes('PASS') || testOutput.includes('success')); - + const testsPassed = + !testOutput.includes("FAILED") && + (testOutput.includes("PASS") || testOutput.includes("success")); + if (testsPassed) { - console.log('✅ Tests passed!'); - + console.log("✅ Tests passed!"); + // Step 6: Graduate/promote the migration as "ready" - console.log('\n🎓 Promoting migration as ready...'); - + console.log("\n🎓 Promoting migration as ready..."); + try { const { stdout: promoteOutput } = await execAsync( - `node ${dataCmd} db migrate promote --migration ${migrations[0]}` + `node ${dataCmd} db migrate promote --migration ${migrations[0]}`, ); - console.log('Promote output:', promoteOutput); + console.log("Promote output:", promoteOutput); } catch (error) { - console.log('Promote status:', error.message); + console.log("Promote status:", error.message); } - + // Step 7: Deploy the ready migration (dry run for test) - console.log('\n🚀 Deploying migration (dry run)...'); - + console.log("\n🚀 Deploying migration (dry run)..."); + try { const { stdout: deployOutput } = await execAsync( - `node ${dataCmd} db migrate --dry-run` + `node ${dataCmd} db migrate --dry-run`, ); - console.log('Deploy output:', deployOutput); - + console.log("Deploy output:", deployOutput); + // Sanity check deployment looks correct - expect(deployOutput).not.toContain('ERROR'); + expect(deployOutput).not.toContain("ERROR"); } catch (error) { - console.log('Deploy dry run status:', error.message); + console.log("Deploy dry run status:", error.message); } } else { - console.log('⚠️ Tests did not pass, skipping promotion'); + console.log("⚠️ Tests did not pass, skipping promotion"); } } catch (error) { - console.error('Test execution failed:', error.message); + console.error("Test execution failed:", error.message); } } - + // Step 8: Modify one of the SQL files - console.log('\n✏️ Modifying SQL file to test diff generation...'); - - const sqlFilePath = path.join(tempDir, 'sql', sqlFiles[0]); - const originalSql = await fs.readFile(sqlFilePath, 'utf8'); - + console.log("\n✏️ Modifying SQL file to test diff generation..."); + + const sqlFilePath = path.join(tempDir, "sql", sqlFiles[0]); + const originalSql = await fs.readFile(sqlFilePath, "utf8"); + // Add a new column or table to the SQL - const modifiedSql = originalSql + '\n\n-- Test modification\nALTER TABLE users ADD COLUMN test_field TEXT;'; + const modifiedSql = + originalSql + + "\n\n-- Test modification\nALTER TABLE users ADD COLUMN test_field TEXT;"; await fs.writeFile(sqlFilePath, modifiedSql); - console.log('📝 Modified:', sqlFiles[0]); - + console.log("📝 Modified:", sqlFiles[0]); + // Step 9: Compile SQL again to see if it generates a diff - console.log('\n🔨 Compiling again after modification...'); - + console.log("\n🔨 Compiling again after modification..."); + try { const { stdout: diffCompileOutput } = await execAsync( - `node ${dataCmd} db compile --sql-dir ./sql --migrations-dir ./migrations` + `node ${dataCmd} db compile --sql-dir ./sql --migrations-dir ./migrations`, ); - console.log('Diff compile output:', diffCompileOutput); - + console.log("Diff compile output:", diffCompileOutput); + // Check what was generated - const newMigrations = await fs.readdir(path.join(tempDir, 'migrations')).catch(() => []); - console.log('📦 Migrations after modification:', newMigrations); - + const newMigrations = await fs + .readdir(path.join(tempDir, "migrations")) + .catch(() => []); + console.log("📦 Migrations after modification:", newMigrations); + if (newMigrations.length > migrations.length) { - console.log('✅ Generated incremental migration (diff)'); - + console.log("✅ Generated incremental migration (diff)"); + // Read the new migration to verify it's a diff, not full rebuild - const newMigrationDir = path.join(tempDir, 'migrations', newMigrations[newMigrations.length - 1]); - const newUpSql = await fs.readFile(path.join(newMigrationDir, 'up.sql'), 'utf8'); - console.log('\n📜 Diff migration preview:'); + const newMigrationDir = path.join( + tempDir, + "migrations", + newMigrations[newMigrations.length - 1], + ); + const newUpSql = await fs.readFile( + path.join(newMigrationDir, "up.sql"), + "utf8", + ); + console.log("\n📜 Diff migration preview:"); console.log(newUpSql); - + // Verify it's an incremental change, not full schema - expect(newUpSql).toContain('ALTER TABLE'); - expect(newUpSql).toContain('test_field'); + expect(newUpSql).toContain("ALTER TABLE"); + expect(newUpSql).toContain("test_field"); expect(newUpSql.length).toBeLessThan(originalSql.length); // Should be shorter than full schema - - console.log('✅ Confirmed: Generated short-path incremental migration'); + + console.log( + "✅ Confirmed: Generated short-path incremental migration", + ); } else { - console.log('⚠️ No new migration generated'); + console.log("⚠️ No new migration generated"); } } catch (error) { - console.error('Diff compile failed:', error.message); + console.error("Diff compile failed:", error.message); } - + // Verify the complete workflow expect(migrations.length).toBeGreaterThanOrEqual(0); - console.log('\n✅ Full workflow test completed'); - + console.log("\n✅ Full workflow test completed"); }, 60000); // 60 second timeout for full workflow }); - - describe('Test Coverage Enforcement in Workflow', () => { - it('should enforce test coverage during migration workflow', async () => { + + describe("Test Coverage Enforcement in Workflow", () => { + it("should enforce test coverage during migration workflow", async () => { process.chdir(tempDir); - + // Remove some tests to trigger coverage enforcement - const testFiles = await fs.readdir(path.join(tempDir, 'tests')); + const testFiles = await fs.readdir(path.join(tempDir, "tests")); if (testFiles.length > 0) { // Delete a test file to create coverage gap - await fs.unlink(path.join(tempDir, 'tests', testFiles[0])); - console.log('🗑️ Removed test file to create coverage gap:', testFiles[0]); + await fs.unlink(path.join(tempDir, "tests", testFiles[0])); + console.log( + "🗑️ Removed test file to create coverage gap:", + testFiles[0], + ); } - + // Try to compile and deploy - should fail coverage check - console.log('\n🔨 Attempting migration with insufficient coverage...'); - + console.log("\n🔨 Attempting migration with insufficient coverage..."); + try { const { stdout, stderr } = await execAsync( `node ${dataCmd} db migrate --no-skip-coverage`, - { timeout: 30000 } + { timeout: 30000 }, ); - + // Should not reach here if coverage enforcement works - console.log('Output:', stdout); - console.log('Stderr:', stderr); - + console.log("Output:", stdout); + console.log("Stderr:", stderr); + // Check if coverage was enforced - const coverageEnforced = - stdout.includes('coverage') || - stderr.includes('coverage') || - stdout.includes('test requirements') || - stderr.includes('test requirements'); - + const coverageEnforced = + stdout.includes("coverage") || + stderr.includes("coverage") || + stdout.includes("test requirements") || + stderr.includes("test requirements"); + if (coverageEnforced) { - console.log('✅ Coverage enforcement triggered'); + console.log("✅ Coverage enforcement triggered"); } else { - console.log('⚠️ Coverage enforcement may not have triggered'); + console.log("⚠️ Coverage enforcement may not have triggered"); } } catch (error) { // Expected to fail due to coverage requirements - console.log('✅ Migration blocked due to coverage requirements'); + console.log("✅ Migration blocked due to coverage requirements"); expect(error.message).toMatch(/coverage|test/i); } - + // Now add bypass reason and try again - console.log('\n🔨 Attempting with coverage bypass...'); - + console.log("\n🔨 Attempting with coverage bypass..."); + try { const { stdout } = await execAsync( `node ${dataCmd} db migrate --coverage-bypass-reason "Integration test bypass" --dry-run`, - { timeout: 30000 } + { timeout: 30000 }, ); - - console.log('Bypass output:', stdout); - expect(stdout).toContain('bypass'); - console.log('✅ Coverage bypass worked with explicit reason'); + + console.log("Bypass output:", stdout); + expect(stdout).toContain("bypass"); + console.log("✅ Coverage bypass worked with explicit reason"); } catch (error) { - console.log('Bypass attempt result:', error.message); + console.log("Bypass attempt result:", error.message); } }, 60000); }); - - describe('Git Integration', () => { - it('should track migrations with git tags', async () => { + + describe("Git Integration", () => { + it("should track migrations with git tags", async () => { process.chdir(tempDir); - + // Initialize git repo for testing - await execAsync('git init'); + await execAsync("git init"); await execAsync('git config user.email "test@example.com"'); await execAsync('git config user.name "Test User"'); - await execAsync('git add .'); + await execAsync("git add ."); await execAsync('git commit -m "Initial commit"'); - - console.log('📝 Git repository initialized'); - + + console.log("📝 Git repository initialized"); + // Create and deploy a migration try { - await execAsync(`node ${dataCmd} db compile --sql-dir ./sql --migrations-dir ./migrations`); - const migrations = await fs.readdir(path.join(tempDir, 'migrations')).catch(() => []); - + await execAsync( + `node ${dataCmd} db compile --sql-dir ./sql --migrations-dir ./migrations`, + ); + const migrations = await fs + .readdir(path.join(tempDir, "migrations")) + .catch(() => []); + if (migrations.length > 0) { // Deploy with git tagging const { stdout } = await execAsync( `node ${dataCmd} db migrate --tag --dry-run`, - { timeout: 30000 } + { timeout: 30000 }, ); - - console.log('Deploy with tagging:', stdout); - + + console.log("Deploy with tagging:", stdout); + // Check git tags - const { stdout: tags } = await execAsync('git tag -l'); - console.log('Git tags created:', tags); - - if (tags.includes('deployment-')) { - console.log('✅ Deployment tracked with git tag'); + const { stdout: tags } = await execAsync("git tag -l"); + console.log("Git tags created:", tags); + + if (tags.includes("deployment-")) { + console.log("✅ Deployment tracked with git tag"); } } } catch (error) { - console.log('Git integration test result:', error.message); + console.log("Git integration test result:", error.message); } }, 60000); }); -}); \ No newline at end of file +}); diff --git a/test/manual-scripts/README.md b/test/manual-scripts/README.md index 277fe9d..c3aa2f6 100644 --- a/test/manual-scripts/README.md +++ b/test/manual-scripts/README.md @@ -21,8 +21,9 @@ node test/manual-scripts/test-memory-management.js ## Status These scripts are kept for reference and debugging purposes but are NOT part of the automated test suite. They may be useful for: + - Manual debugging of specific features - Quick verification of functionality - Reference for how certain features were tested during development -Consider converting these to proper Vitest tests if they prove valuable for ongoing development. \ No newline at end of file +Consider converting these to proper Vitest tests if they prove valuable for ongoing development. diff --git a/test/manual-scripts/simple-test.js b/test/manual-scripts/simple-test.js index 100f827..6a3a802 100644 --- a/test/manual-scripts/simple-test.js +++ b/test/manual-scripts/simple-test.js @@ -1,23 +1,25 @@ -const path = require('path'); +const path = require("path"); // Test basic functionality try { - console.log('Loading scanner...'); - const pgTAPTestScanner = require('./src/lib/testing/pgTAPTestScanner.js'); - - console.log('Creating scanner instance...'); + console.log("Loading scanner..."); + const pgTAPTestScanner = require("./src/lib/testing/pgTAPTestScanner.js"); + + console.log("Creating scanner instance..."); const scanner = new pgTAPTestScanner({ validatePlans: false }); - - console.log('Testing has_function with parameters...'); + + console.log("Testing has_function with parameters..."); const sql = "SELECT has_function('user_count', ARRAY['integer', 'text']);"; const assertions = scanner.extractAssertions(sql); - - console.log('Raw parameters:', assertions[0].parameters); - console.log('Function metadata:', JSON.stringify(assertions[0].functionMetadata, null, 2)); - - console.log('✅ Test completed successfully'); - + + console.log("Raw parameters:", assertions[0].parameters); + console.log( + "Function metadata:", + JSON.stringify(assertions[0].functionMetadata, null, 2), + ); + + console.log("✅ Test completed successfully"); } catch (error) { - console.error('❌ Error:', error.message); - console.error('Stack:', error.stack); -} \ No newline at end of file + console.error("❌ Error:", error.message); + console.error("Stack:", error.stack); +} diff --git a/test/manual-scripts/test-function-parsing.js b/test/manual-scripts/test-function-parsing.js index 9e985e3..693c957 100755 --- a/test/manual-scripts/test-function-parsing.js +++ b/test/manual-scripts/test-function-parsing.js @@ -3,7 +3,7 @@ * Simple test script to verify function parsing works correctly */ -const pgTAPTestScanner = require('./src/lib/testing/pgTAPTestScanner.js'); +const pgTAPTestScanner = require("./src/lib/testing/pgTAPTestScanner.js"); function runTest(name, testFn) { try { @@ -17,123 +17,271 @@ function runTest(name, testFn) { function assertEquals(actual, expected, message) { if (JSON.stringify(actual) !== JSON.stringify(expected)) { - throw new Error(`${message}\n Expected: ${JSON.stringify(expected)}\n Actual: ${JSON.stringify(actual)}`); + throw new Error( + `${message}\n Expected: ${JSON.stringify(expected)}\n Actual: ${JSON.stringify(actual)}`, + ); } } const scanner = new pgTAPTestScanner({ validatePlans: false }); // Test basic has_function parsing -runTest('has_function with function name only', () => { +runTest("has_function with function name only", () => { const sql = "SELECT has_function('user_count');"; const assertions = scanner.extractAssertions(sql); - - assertEquals(assertions.length, 1, 'Should have 1 assertion'); - assertEquals(assertions[0].type, 'has_function', 'Should be has_function type'); - assertEquals(assertions[0].target, 'user_count', 'Should have correct target'); - assertEquals(assertions[0].functionMetadata.name, 'user_count', 'Should extract function name'); + + assertEquals(assertions.length, 1, "Should have 1 assertion"); + assertEquals( + assertions[0].type, + "has_function", + "Should be has_function type", + ); + assertEquals( + assertions[0].target, + "user_count", + "Should have correct target", + ); + assertEquals( + assertions[0].functionMetadata.name, + "user_count", + "Should extract function name", + ); }); // Test has_function with schema -runTest('has_function with schema and function name', () => { +runTest("has_function with schema and function name", () => { const sql = "SELECT has_function('public', 'user_count');"; const assertions = scanner.extractAssertions(sql); - - assertEquals(assertions.length, 1, 'Should have 1 assertion'); - assertEquals(assertions[0].type, 'has_function', 'Should be has_function type'); - assertEquals(assertions[0].target, 'public.user_count', 'Should have correct target'); - assertEquals(assertions[0].functionMetadata.schema, 'public', 'Should extract schema'); - assertEquals(assertions[0].functionMetadata.name, 'user_count', 'Should extract function name'); + + assertEquals(assertions.length, 1, "Should have 1 assertion"); + assertEquals( + assertions[0].type, + "has_function", + "Should be has_function type", + ); + assertEquals( + assertions[0].target, + "public.user_count", + "Should have correct target", + ); + assertEquals( + assertions[0].functionMetadata.schema, + "public", + "Should extract schema", + ); + assertEquals( + assertions[0].functionMetadata.name, + "user_count", + "Should extract function name", + ); }); // Test has_function with parameters -runTest('has_function with parameters', () => { +runTest("has_function with parameters", () => { const sql = "SELECT has_function('user_count', ARRAY['integer', 'text']);"; const assertions = scanner.extractAssertions(sql); - - console.log('DEBUG - Parameters:', assertions[0].parameters); - console.log('DEBUG - Function metadata:', assertions[0].functionMetadata); - - assertEquals(assertions.length, 1, 'Should have 1 assertion'); - assertEquals(assertions[0].type, 'has_function', 'Should be has_function type'); - assertEquals(assertions[0].target, 'user_count', 'Should have correct target'); - assertEquals(assertions[0].functionMetadata.name, 'user_count', 'Should extract function name'); - assertEquals(assertions[0].functionMetadata.parameters, ['integer', 'text'], 'Should extract parameters'); + + console.log("DEBUG - Parameters:", assertions[0].parameters); + console.log("DEBUG - Function metadata:", assertions[0].functionMetadata); + + assertEquals(assertions.length, 1, "Should have 1 assertion"); + assertEquals( + assertions[0].type, + "has_function", + "Should be has_function type", + ); + assertEquals( + assertions[0].target, + "user_count", + "Should have correct target", + ); + assertEquals( + assertions[0].functionMetadata.name, + "user_count", + "Should extract function name", + ); + assertEquals( + assertions[0].functionMetadata.parameters, + ["integer", "text"], + "Should extract parameters", + ); }); // Test function_returns -runTest('function_returns parsing', () => { +runTest("function_returns parsing", () => { const sql = "SELECT function_returns('user_count', 'integer');"; const assertions = scanner.extractAssertions(sql); - - assertEquals(assertions.length, 1, 'Should have 1 assertion'); - assertEquals(assertions[0].type, 'function_returns', 'Should be function_returns type'); - assertEquals(assertions[0].target, 'user_count', 'Should have correct target'); - assertEquals(assertions[0].functionMetadata.name, 'user_count', 'Should extract function name'); - assertEquals(assertions[0].functionMetadata.returnType, 'integer', 'Should extract return type'); + + assertEquals(assertions.length, 1, "Should have 1 assertion"); + assertEquals( + assertions[0].type, + "function_returns", + "Should be function_returns type", + ); + assertEquals( + assertions[0].target, + "user_count", + "Should have correct target", + ); + assertEquals( + assertions[0].functionMetadata.name, + "user_count", + "Should extract function name", + ); + assertEquals( + assertions[0].functionMetadata.returnType, + "integer", + "Should extract return type", + ); }); // Test function_lang_is -runTest('function_lang_is parsing', () => { +runTest("function_lang_is parsing", () => { const sql = "SELECT function_lang_is('user_count', 'plpgsql');"; const assertions = scanner.extractAssertions(sql); - - assertEquals(assertions.length, 1, 'Should have 1 assertion'); - assertEquals(assertions[0].type, 'function_lang_is', 'Should be function_lang_is type'); - assertEquals(assertions[0].target, 'user_count', 'Should have correct target'); - assertEquals(assertions[0].functionMetadata.name, 'user_count', 'Should extract function name'); - assertEquals(assertions[0].functionMetadata.language, 'plpgsql', 'Should extract language'); + + assertEquals(assertions.length, 1, "Should have 1 assertion"); + assertEquals( + assertions[0].type, + "function_lang_is", + "Should be function_lang_is type", + ); + assertEquals( + assertions[0].target, + "user_count", + "Should have correct target", + ); + assertEquals( + assertions[0].functionMetadata.name, + "user_count", + "Should extract function name", + ); + assertEquals( + assertions[0].functionMetadata.language, + "plpgsql", + "Should extract language", + ); }); // Test is_definer -runTest('is_definer parsing', () => { +runTest("is_definer parsing", () => { const sql = "SELECT is_definer('secure_function');"; const assertions = scanner.extractAssertions(sql); - - assertEquals(assertions.length, 1, 'Should have 1 assertion'); - assertEquals(assertions[0].type, 'is_definer', 'Should be is_definer type'); - assertEquals(assertions[0].target, 'secure_function', 'Should have correct target'); - assertEquals(assertions[0].functionMetadata.name, 'secure_function', 'Should extract function name'); - assertEquals(assertions[0].functionMetadata.isSecurityDefiner, true, 'Should mark as security definer'); + + assertEquals(assertions.length, 1, "Should have 1 assertion"); + assertEquals(assertions[0].type, "is_definer", "Should be is_definer type"); + assertEquals( + assertions[0].target, + "secure_function", + "Should have correct target", + ); + assertEquals( + assertions[0].functionMetadata.name, + "secure_function", + "Should extract function name", + ); + assertEquals( + assertions[0].functionMetadata.isSecurityDefiner, + true, + "Should mark as security definer", + ); }); // Test volatility_is -runTest('volatility_is parsing', () => { +runTest("volatility_is parsing", () => { const sql = "SELECT volatility_is('pure_function', 'immutable');"; const assertions = scanner.extractAssertions(sql); - - assertEquals(assertions.length, 1, 'Should have 1 assertion'); - assertEquals(assertions[0].type, 'volatility_is', 'Should be volatility_is type'); - assertEquals(assertions[0].target, 'pure_function', 'Should have correct target'); - assertEquals(assertions[0].functionMetadata.name, 'pure_function', 'Should extract function name'); - assertEquals(assertions[0].functionMetadata.volatility, 'immutable', 'Should extract volatility'); + + assertEquals(assertions.length, 1, "Should have 1 assertion"); + assertEquals( + assertions[0].type, + "volatility_is", + "Should be volatility_is type", + ); + assertEquals( + assertions[0].target, + "pure_function", + "Should have correct target", + ); + assertEquals( + assertions[0].functionMetadata.name, + "pure_function", + "Should extract function name", + ); + assertEquals( + assertions[0].functionMetadata.volatility, + "immutable", + "Should extract volatility", + ); }); // Test function_privs_are -runTest('function_privs_are parsing', () => { - const sql = "SELECT function_privs_are('calc_func', 'app_user', ARRAY['EXECUTE']);"; +runTest("function_privs_are parsing", () => { + const sql = + "SELECT function_privs_are('calc_func', 'app_user', ARRAY['EXECUTE']);"; const assertions = scanner.extractAssertions(sql); - - assertEquals(assertions.length, 1, 'Should have 1 assertion'); - assertEquals(assertions[0].type, 'function_privs_are', 'Should be function_privs_are type'); - assertEquals(assertions[0].target, 'calc_func', 'Should have correct target'); - assertEquals(assertions[0].functionMetadata.name, 'calc_func', 'Should extract function name'); - assertEquals(assertions[0].functionMetadata.role, 'app_user', 'Should extract role'); - assertEquals(assertions[0].functionMetadata.privileges, ['EXECUTE'], 'Should extract privileges'); + + assertEquals(assertions.length, 1, "Should have 1 assertion"); + assertEquals( + assertions[0].type, + "function_privs_are", + "Should be function_privs_are type", + ); + assertEquals(assertions[0].target, "calc_func", "Should have correct target"); + assertEquals( + assertions[0].functionMetadata.name, + "calc_func", + "Should extract function name", + ); + assertEquals( + assertions[0].functionMetadata.role, + "app_user", + "Should extract role", + ); + assertEquals( + assertions[0].functionMetadata.privileges, + ["EXECUTE"], + "Should extract privileges", + ); }); // Test complex function example -runTest('complex function parsing', () => { - const sql = "SELECT function_returns('public', 'complex_func', ARRAY['text', 'integer'], 'boolean');"; +runTest("complex function parsing", () => { + const sql = + "SELECT function_returns('public', 'complex_func', ARRAY['text', 'integer'], 'boolean');"; const assertions = scanner.extractAssertions(sql); - - assertEquals(assertions.length, 1, 'Should have 1 assertion'); - assertEquals(assertions[0].type, 'function_returns', 'Should be function_returns type'); - assertEquals(assertions[0].target, 'public.complex_func', 'Should have correct target'); - assertEquals(assertions[0].functionMetadata.schema, 'public', 'Should extract schema'); - assertEquals(assertions[0].functionMetadata.name, 'complex_func', 'Should extract function name'); - assertEquals(assertions[0].functionMetadata.parameters, ['text', 'integer'], 'Should extract parameters'); - assertEquals(assertions[0].functionMetadata.returnType, 'boolean', 'Should extract return type'); + + assertEquals(assertions.length, 1, "Should have 1 assertion"); + assertEquals( + assertions[0].type, + "function_returns", + "Should be function_returns type", + ); + assertEquals( + assertions[0].target, + "public.complex_func", + "Should have correct target", + ); + assertEquals( + assertions[0].functionMetadata.schema, + "public", + "Should extract schema", + ); + assertEquals( + assertions[0].functionMetadata.name, + "complex_func", + "Should extract function name", + ); + assertEquals( + assertions[0].functionMetadata.parameters, + ["text", "integer"], + "Should extract parameters", + ); + assertEquals( + assertions[0].functionMetadata.returnType, + "boolean", + "Should extract return type", + ); }); -console.log('\n🎯 Function parsing tests completed!'); \ No newline at end of file +console.log("\n🎯 Function parsing tests completed!"); diff --git a/test/manual-scripts/test-memory-management.js b/test/manual-scripts/test-memory-management.js index 8509493..162f222 100644 --- a/test/manual-scripts/test-memory-management.js +++ b/test/manual-scripts/test-memory-management.js @@ -4,15 +4,15 @@ * Test script to verify memory management functionality in pgTAPTestScanner */ -import pgTAPTestScanner from './src/lib/testing/pgTAPTestScanner.js'; -import path from 'path'; -import { fileURLToPath } from 'url'; +import pgTAPTestScanner from "./src/lib/testing/pgTAPTestScanner.js"; +import path from "path"; +import { fileURLToPath } from "url"; const __dirname = path.dirname(fileURLToPath(import.meta.url)); async function testMemoryManagement() { - console.log('🧪 Testing pgTAPTestScanner Memory Management'); - console.log('===============================================\n'); + console.log("🧪 Testing pgTAPTestScanner Memory Management"); + console.log("===============================================\n"); // Create scanner with memory limits const scanner = new pgTAPTestScanner({ @@ -21,92 +21,101 @@ async function testMemoryManagement() { enableStreaming: true, cleanupInterval: 500, // Frequent cleanup for testing enableGC: true, - maxObjectsPerType: 100 + maxObjectsPerType: 100, }); // Listen for memory events - scanner.on('memory_status', (data) => { - console.log(`📊 Memory Status: ${data.current}MB (max: ${data.max}MB, streaming: ${data.streamingMode})`); + scanner.on("memory_status", (data) => { + console.log( + `📊 Memory Status: ${data.current}MB (max: ${data.max}MB, streaming: ${data.streamingMode})`, + ); }); - scanner.on('cleanup', (data) => { + scanner.on("cleanup", (data) => { console.log(`🧹 Memory Cleanup: ${data.type}`); }); - scanner.on('warning', (data) => { + scanner.on("warning", (data) => { console.log(`⚠️ Warning: ${data.message}`); }); - scanner.on('progress', (data) => { - if (data.type === 'batch_progress') { - console.log(`⚡ Progress: Batch ${data.batch}/${data.totalBatches} (${data.filesProcessed}/${data.totalFiles} files)`); + scanner.on("progress", (data) => { + if (data.type === "batch_progress") { + console.log( + `⚡ Progress: Batch ${data.batch}/${data.totalBatches} (${data.filesProcessed}/${data.totalFiles} files)`, + ); } }); try { // Test memory stats - console.log('Initial memory stats:'); + console.log("Initial memory stats:"); console.log(JSON.stringify(scanner.getMemoryStats(), null, 2)); - + // Simulate scanning some test files (you can point this to actual test directory) - const testDir = path.join(__dirname, 'test/fixtures'); // Adjust path as needed - + const testDir = path.join(__dirname, "test/fixtures"); // Adjust path as needed + console.log(`\nScanning directory: ${testDir}`); - + if (await scanner.scanDirectory(testDir).catch(() => null)) { // Build coverage database with memory management - console.log('\nBuilding coverage database...'); + console.log("\nBuilding coverage database..."); const database = await scanner.buildCoverageDatabase(); - - console.log('\nFinal memory stats:'); + + console.log("\nFinal memory stats:"); console.log(JSON.stringify(scanner.getMemoryStats(), null, 2)); - - console.log('\nDatabase summary:'); - console.log(`- Total objects indexed: ${scanner._getTotalIndexedObjects(database)}`); + + console.log("\nDatabase summary:"); + console.log( + `- Total objects indexed: ${scanner._getTotalIndexedObjects(database)}`, + ); console.log(`- Total assertions: ${database.assertionCounts.total}`); console.log(`- Memory managed: ${database.memoryManaged}`); console.log(`- Streaming mode: ${database.streamingMode}`); } else { // Test with mock data if no test directory exists - console.log('No test directory found, testing with mock data...'); - + console.log("No test directory found, testing with mock data..."); + // Create some mock test files for (let i = 0; i < 150; i++) { scanner.testFiles.push({ filePath: `/mock/test_${i}.sql`, fileName: `test_${i}.sql`, - assertions: Array(10).fill(null).map((_, j) => ({ - type: 'has_table', - target: `table_${i}_${j}`, - parameters: [`table_${i}_${j}`], - lineNumber: j + 1, - rawSql: `SELECT has_table('table_${i}_${j}');` - })), + assertions: Array(10) + .fill(null) + .map((_, j) => ({ + type: "has_table", + target: `table_${i}_${j}`, + parameters: [`table_${i}_${j}`], + lineNumber: j + 1, + rawSql: `SELECT has_table('table_${i}_${j}');`, + })), planCount: 1, dependencies: [], - metadata: {} + metadata: {}, }); } - + scanner.totalAssertions = 150 * 10; - - console.log('Building coverage database with 150 mock files...'); + + console.log("Building coverage database with 150 mock files..."); const database = await scanner.buildCoverageDatabase(); - - console.log('\nFinal memory stats:'); + + console.log("\nFinal memory stats:"); console.log(JSON.stringify(scanner.getMemoryStats(), null, 2)); - - console.log('\nDatabase summary:'); - console.log(`- Total objects indexed: ${scanner._getTotalIndexedObjects(database)}`); + + console.log("\nDatabase summary:"); + console.log( + `- Total objects indexed: ${scanner._getTotalIndexedObjects(database)}`, + ); console.log(`- Total assertions: ${database.assertionCounts.total}`); console.log(`- Memory managed: ${database.memoryManaged}`); console.log(`- Streaming mode: ${database.streamingMode}`); } - console.log('\n✅ Memory management test completed successfully!'); - + console.log("\n✅ Memory management test completed successfully!"); } catch (error) { - console.error('❌ Test failed:', error.message); + console.error("❌ Test failed:", error.message); console.error(error.stack); } @@ -115,4 +124,4 @@ async function testMemoryManagement() { } // Run the test -testMemoryManagement().catch(console.error); \ No newline at end of file +testMemoryManagement().catch(console.error); diff --git a/test/manual-scripts/test_trigger_final.js b/test/manual-scripts/test_trigger_final.js index f416196..9da5dd9 100644 --- a/test/manual-scripts/test_trigger_final.js +++ b/test/manual-scripts/test_trigger_final.js @@ -1,55 +1,55 @@ #!/usr/bin/env node -const pgTAPTestScanner = require('./src/lib/testing/pgTAPTestScanner.js'); +const pgTAPTestScanner = require("./src/lib/testing/pgTAPTestScanner.js"); -console.log('✅ Testing trigger assertion parsing functionality...\n'); +console.log("✅ Testing trigger assertion parsing functionality...\n"); const scanner = new pgTAPTestScanner(); // Test different trigger assertion patterns const testCases = [ { - name: 'has_trigger with table and trigger', + name: "has_trigger with table and trigger", sql: "SELECT has_trigger('users', 'update_timestamp_trigger');", - expectedTarget: 'public.users.update_timestamp_trigger', - expectedType: 'has_trigger' + expectedTarget: "public.users.update_timestamp_trigger", + expectedType: "has_trigger", }, { - name: 'has_trigger with schema, table, and trigger', + name: "has_trigger with schema, table, and trigger", sql: "SELECT has_trigger('public', 'posts', 'audit_trigger');", - expectedTarget: 'public.posts.audit_trigger', - expectedType: 'has_trigger' + expectedTarget: "public.posts.audit_trigger", + expectedType: "has_trigger", }, { - name: 'trigger_is with function validation', + name: "trigger_is with function validation", sql: "SELECT trigger_is('users', 'update_trigger', 'set_timestamp');", - expectedTarget: 'public.users.update_trigger', - expectedType: 'trigger_is' + expectedTarget: "public.users.update_trigger", + expectedType: "trigger_is", }, { - name: 'is_trigger_on with events', + name: "is_trigger_on with events", sql: "SELECT is_trigger_on('posts', 'audit_trigger', 'UPDATE');", - expectedTarget: 'public.posts.audit_trigger', - expectedType: 'is_trigger_on' + expectedTarget: "public.posts.audit_trigger", + expectedType: "is_trigger_on", }, { - name: 'trigger_fires_on with timing', + name: "trigger_fires_on with timing", sql: "SELECT trigger_fires_on('users', 'update_trigger', 'BEFORE');", - expectedTarget: 'public.users.update_trigger', - expectedType: 'trigger_fires_on' + expectedTarget: "public.users.update_trigger", + expectedType: "trigger_fires_on", }, { - name: 'trigger_is_for with level', + name: "trigger_is_for with level", sql: "SELECT trigger_is_for('users', 'update_trigger', 'ROW');", - expectedTarget: 'public.users.update_trigger', - expectedType: 'trigger_is_for' + expectedTarget: "public.users.update_trigger", + expectedType: "trigger_is_for", }, { - name: 'triggers_are with array', + name: "triggers_are with array", sql: "SELECT triggers_are('users', ARRAY['trigger1', 'trigger2']);", - expectedTarget: 'public.users', - expectedType: 'triggers_are' - } + expectedTarget: "public.users", + expectedType: "triggers_are", + }, ]; let passed = 0; @@ -58,18 +58,23 @@ let failed = 0; testCases.forEach((testCase, index) => { try { const assertions = scanner.extractAssertions(testCase.sql); - + if (assertions.length === 1) { const assertion = assertions[0]; - - if (assertion.type === testCase.expectedType && assertion.target === testCase.expectedTarget) { + + if ( + assertion.type === testCase.expectedType && + assertion.target === testCase.expectedTarget + ) { console.log(`✅ Test ${index + 1}: ${testCase.name} - PASSED`); console.log(` Target: ${assertion.target}`); console.log(` Type: ${assertion.type}`); passed++; } else { console.log(`❌ Test ${index + 1}: ${testCase.name} - FAILED`); - console.log(` Expected: ${testCase.expectedType} -> ${testCase.expectedTarget}`); + console.log( + ` Expected: ${testCase.expectedType} -> ${testCase.expectedTarget}`, + ); console.log(` Got: ${assertion.type} -> ${assertion.target}`); failed++; } @@ -78,9 +83,11 @@ testCases.forEach((testCase, index) => { console.log(` Expected 1 assertion, got ${assertions.length}`); failed++; } - console.log(''); + console.log(""); } catch (error) { - console.log(`❌ Test ${index + 1}: ${testCase.name} - ERROR: ${error.message}`); + console.log( + `❌ Test ${index + 1}: ${testCase.name} - ERROR: ${error.message}`, + ); failed++; } }); @@ -88,34 +95,44 @@ testCases.forEach((testCase, index) => { console.log(`\n📊 Test Results: ${passed} passed, ${failed} failed`); // Test coverage map integration -console.log('\n🗺️ Testing coverage map integration...'); -const combinedSql = testCases.map(tc => tc.sql).join('\n'); +console.log("\n🗺️ Testing coverage map integration..."); +const combinedSql = testCases.map((tc) => tc.sql).join("\n"); const allAssertions = scanner.extractAssertions(combinedSql); -scanner.testFiles = [{ - filePath: '/test/triggers.sql', - fileName: 'triggers.sql', - assertions: allAssertions, - planCount: allAssertions.length, - dependencies: [], - metadata: { size: combinedSql.length, lines: combinedSql.split('\n').length, parsed: new Date() } -}]; +scanner.testFiles = [ + { + filePath: "/test/triggers.sql", + fileName: "triggers.sql", + assertions: allAssertions, + planCount: allAssertions.length, + dependencies: [], + metadata: { + size: combinedSql.length, + lines: combinedSql.split("\n").length, + parsed: new Date(), + }, + }, +]; scanner._buildCoverageMap(); const coverageMap = scanner.getCoverageMap(); -console.log(`Found ${Object.keys(coverageMap.triggers || {}).length} triggers in coverage map:`); -Object.keys(coverageMap.triggers || {}).forEach(trigger => { +console.log( + `Found ${Object.keys(coverageMap.triggers || {}).length} triggers in coverage map:`, +); +Object.keys(coverageMap.triggers || {}).forEach((trigger) => { const tests = coverageMap.triggers[trigger]; - console.log(` - ${trigger}: [${tests.join(', ')}]`); + console.log(` - ${trigger}: [${tests.join(", ")}]`); }); const stats = scanner.getStatistics(); -console.log(`\nCoverage stats: ${stats.coverageStats.triggersWithTests} triggers with tests`); +console.log( + `\nCoverage stats: ${stats.coverageStats.triggersWithTests} triggers with tests`, +); if (failed === 0) { - console.log('\n🎉 All trigger assertion parsing tests passed!'); + console.log("\n🎉 All trigger assertion parsing tests passed!"); } else { console.log(`\n⚠️ ${failed} tests failed. Please review the implementation.`); process.exit(1); -} \ No newline at end of file +} diff --git a/test/pgTAPTestScanner.column.test.js b/test/pgTAPTestScanner.column.test.js index 4efc1fc..908074b 100644 --- a/test/pgTAPTestScanner.column.test.js +++ b/test/pgTAPTestScanner.column.test.js @@ -1,253 +1,290 @@ /** * pgTAPTestScanner Column Assertion Parsing Tests - * + * * Tests the column assertion parsing capabilities of pgTAPTestScanner */ -import { describe, it, expect, beforeEach } from 'vitest'; -import pgTAPTestScanner from '../src/lib/testing/pgTAPTestScanner.js'; +import { describe, it, expect, beforeEach } from "vitest"; +import pgTAPTestScanner from "../src/lib/testing/pgTAPTestScanner.js"; -describe('pgTAPTestScanner Column Assertion Parsing', () => { +describe("pgTAPTestScanner Column Assertion Parsing", () => { let scanner; beforeEach(() => { scanner = new pgTAPTestScanner(); }); - describe('has_column assertion pattern matching', () => { - it('should parse has_column with table and column', () => { + describe("has_column assertion pattern matching", () => { + it("should parse has_column with table and column", () => { const sql = "SELECT has_column('users', 'email');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('has_column'); - expect(assertions[0].target).toBe('users.email'); - expect(assertions[0].parameters).toEqual(['users', 'email']); + expect(assertions[0].type).toBe("has_column"); + expect(assertions[0].target).toBe("users.email"); + expect(assertions[0].parameters).toEqual(["users", "email"]); }); - it('should parse has_column with schema, table, and column', () => { + it("should parse has_column with schema, table, and column", () => { const sql = "SELECT has_column('public', 'users', 'email');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('has_column'); - expect(assertions[0].target).toBe('public.users.email'); - expect(assertions[0].parameters).toEqual(['public', 'users', 'email']); + expect(assertions[0].type).toBe("has_column"); + expect(assertions[0].target).toBe("public.users.email"); + expect(assertions[0].parameters).toEqual(["public", "users", "email"]); }); - it('should parse hasnt_column with table and column', () => { + it("should parse hasnt_column with table and column", () => { const sql = "SELECT hasnt_column('users', 'old_field');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('hasnt_column'); - expect(assertions[0].target).toBe('users.old_field'); - expect(assertions[0].parameters).toEqual(['users', 'old_field']); + expect(assertions[0].type).toBe("hasnt_column"); + expect(assertions[0].target).toBe("users.old_field"); + expect(assertions[0].parameters).toEqual(["users", "old_field"]); }); - it('should parse hasnt_column with schema, table, and column', () => { - const sql = "SELECT hasnt_column('private', 'sessions', 'deprecated_field');"; + it("should parse hasnt_column with schema, table, and column", () => { + const sql = + "SELECT hasnt_column('private', 'sessions', 'deprecated_field');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('hasnt_column'); - expect(assertions[0].target).toBe('private.sessions.deprecated_field'); - expect(assertions[0].parameters).toEqual(['private', 'sessions', 'deprecated_field']); + expect(assertions[0].type).toBe("hasnt_column"); + expect(assertions[0].target).toBe("private.sessions.deprecated_field"); + expect(assertions[0].parameters).toEqual([ + "private", + "sessions", + "deprecated_field", + ]); }); }); - describe('col_type_is assertion pattern matching', () => { - it('should parse col_type_is with table, column, and type', () => { + describe("col_type_is assertion pattern matching", () => { + it("should parse col_type_is with table, column, and type", () => { const sql = "SELECT col_type_is('users', 'email', 'character varying');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('col_type_is'); - expect(assertions[0].target).toBe('users.email'); - expect(assertions[0].parameters).toEqual(['users', 'email', 'character varying']); + expect(assertions[0].type).toBe("col_type_is"); + expect(assertions[0].target).toBe("users.email"); + expect(assertions[0].parameters).toEqual([ + "users", + "email", + "character varying", + ]); }); - it('should parse col_type_is with schema, table, column, and type', () => { + it("should parse col_type_is with schema, table, column, and type", () => { const sql = "SELECT col_type_is('public', 'users', 'age', 'integer');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('col_type_is'); - expect(assertions[0].target).toBe('public.users.age'); - expect(assertions[0].parameters).toEqual(['public', 'users', 'age', 'integer']); + expect(assertions[0].type).toBe("col_type_is"); + expect(assertions[0].target).toBe("public.users.age"); + expect(assertions[0].parameters).toEqual([ + "public", + "users", + "age", + "integer", + ]); }); }); - describe('col_not_null and col_is_null assertion pattern matching', () => { - it('should parse col_not_null with table and column', () => { + describe("col_not_null and col_is_null assertion pattern matching", () => { + it("should parse col_not_null with table and column", () => { const sql = "SELECT col_not_null('users', 'email');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('col_not_null'); - expect(assertions[0].target).toBe('users.email'); - expect(assertions[0].parameters).toEqual(['users', 'email']); + expect(assertions[0].type).toBe("col_not_null"); + expect(assertions[0].target).toBe("users.email"); + expect(assertions[0].parameters).toEqual(["users", "email"]); }); - it('should parse col_not_null with schema, table, and column', () => { + it("should parse col_not_null with schema, table, and column", () => { const sql = "SELECT col_not_null('public', 'users', 'email');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('col_not_null'); - expect(assertions[0].target).toBe('public.users.email'); - expect(assertions[0].parameters).toEqual(['public', 'users', 'email']); + expect(assertions[0].type).toBe("col_not_null"); + expect(assertions[0].target).toBe("public.users.email"); + expect(assertions[0].parameters).toEqual(["public", "users", "email"]); }); - it('should parse col_is_null with table and column', () => { + it("should parse col_is_null with table and column", () => { const sql = "SELECT col_is_null('users', 'description');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('col_is_null'); - expect(assertions[0].target).toBe('users.description'); - expect(assertions[0].parameters).toEqual(['users', 'description']); + expect(assertions[0].type).toBe("col_is_null"); + expect(assertions[0].target).toBe("users.description"); + expect(assertions[0].parameters).toEqual(["users", "description"]); }); - it('should parse col_is_null with schema, table, and column', () => { + it("should parse col_is_null with schema, table, and column", () => { const sql = "SELECT col_is_null('public', 'users', 'description');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('col_is_null'); - expect(assertions[0].target).toBe('public.users.description'); - expect(assertions[0].parameters).toEqual(['public', 'users', 'description']); + expect(assertions[0].type).toBe("col_is_null"); + expect(assertions[0].target).toBe("public.users.description"); + expect(assertions[0].parameters).toEqual([ + "public", + "users", + "description", + ]); }); }); - describe('col_has_default and col_hasnt_default assertion pattern matching', () => { - it('should parse col_has_default with table and column', () => { + describe("col_has_default and col_hasnt_default assertion pattern matching", () => { + it("should parse col_has_default with table and column", () => { const sql = "SELECT col_has_default('users', 'status');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('col_has_default'); - expect(assertions[0].target).toBe('users.status'); - expect(assertions[0].parameters).toEqual(['users', 'status']); + expect(assertions[0].type).toBe("col_has_default"); + expect(assertions[0].target).toBe("users.status"); + expect(assertions[0].parameters).toEqual(["users", "status"]); }); - it('should parse col_has_default with schema, table, and column', () => { + it("should parse col_has_default with schema, table, and column", () => { const sql = "SELECT col_has_default('public', 'users', 'status');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('col_has_default'); - expect(assertions[0].target).toBe('public.users.status'); - expect(assertions[0].parameters).toEqual(['public', 'users', 'status']); + expect(assertions[0].type).toBe("col_has_default"); + expect(assertions[0].target).toBe("public.users.status"); + expect(assertions[0].parameters).toEqual(["public", "users", "status"]); }); - it('should parse col_hasnt_default with table and column', () => { + it("should parse col_hasnt_default with table and column", () => { const sql = "SELECT col_hasnt_default('users', 'temp_field');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('col_hasnt_default'); - expect(assertions[0].target).toBe('users.temp_field'); - expect(assertions[0].parameters).toEqual(['users', 'temp_field']); + expect(assertions[0].type).toBe("col_hasnt_default"); + expect(assertions[0].target).toBe("users.temp_field"); + expect(assertions[0].parameters).toEqual(["users", "temp_field"]); }); - it('should parse col_hasnt_default with schema, table, and column', () => { + it("should parse col_hasnt_default with schema, table, and column", () => { const sql = "SELECT col_hasnt_default('public', 'users', 'temp_field');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('col_hasnt_default'); - expect(assertions[0].target).toBe('public.users.temp_field'); - expect(assertions[0].parameters).toEqual(['public', 'users', 'temp_field']); + expect(assertions[0].type).toBe("col_hasnt_default"); + expect(assertions[0].target).toBe("public.users.temp_field"); + expect(assertions[0].parameters).toEqual([ + "public", + "users", + "temp_field", + ]); }); }); - describe('col_default_is assertion pattern matching', () => { - it('should parse col_default_is with table, column, and default value', () => { + describe("col_default_is assertion pattern matching", () => { + it("should parse col_default_is with table, column, and default value", () => { const sql = "SELECT col_default_is('users', 'status', 'active');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('col_default_is'); - expect(assertions[0].target).toBe('users.status'); - expect(assertions[0].parameters).toEqual(['users', 'status', 'active']); + expect(assertions[0].type).toBe("col_default_is"); + expect(assertions[0].target).toBe("users.status"); + expect(assertions[0].parameters).toEqual(["users", "status", "active"]); }); - it('should parse col_default_is with schema, table, column, and default value', () => { - const sql = "SELECT col_default_is('public', 'users', 'status', 'pending');"; + it("should parse col_default_is with schema, table, column, and default value", () => { + const sql = + "SELECT col_default_is('public', 'users', 'status', 'pending');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('col_default_is'); - expect(assertions[0].target).toBe('public.users.status'); - expect(assertions[0].parameters).toEqual(['public', 'users', 'status', 'pending']); + expect(assertions[0].type).toBe("col_default_is"); + expect(assertions[0].target).toBe("public.users.status"); + expect(assertions[0].parameters).toEqual([ + "public", + "users", + "status", + "pending", + ]); }); - it('should parse col_default_is with quoted string default values', () => { - const sql = "SELECT col_default_is('users', 'email', ''user@example.com''::text);"; + it("should parse col_default_is with quoted string default values", () => { + const sql = + "SELECT col_default_is('users', 'email', ''user@example.com''::text);"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('col_default_is'); - expect(assertions[0].target).toBe('users.email'); - expect(assertions[0].parameters).toEqual(['users', 'email', "''user@example.com''::text"]); + expect(assertions[0].type).toBe("col_default_is"); + expect(assertions[0].target).toBe("users.email"); + expect(assertions[0].parameters).toEqual([ + "users", + "email", + "''user@example.com''::text", + ]); }); - it('should parse col_default_is with function default values', () => { + it("should parse col_default_is with function default values", () => { const sql = "SELECT col_default_is('users', 'created_at', 'now()');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('col_default_is'); - expect(assertions[0].target).toBe('users.created_at'); - expect(assertions[0].parameters).toEqual(['users', 'created_at', 'now()']); + expect(assertions[0].type).toBe("col_default_is"); + expect(assertions[0].target).toBe("users.created_at"); + expect(assertions[0].parameters).toEqual([ + "users", + "created_at", + "now()", + ]); }); }); - describe('col_is_pk and col_isnt_pk assertion pattern matching', () => { - it('should parse col_is_pk with table and column', () => { + describe("col_is_pk and col_isnt_pk assertion pattern matching", () => { + it("should parse col_is_pk with table and column", () => { const sql = "SELECT col_is_pk('users', 'id');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('col_is_pk'); - expect(assertions[0].target).toBe('users.id'); - expect(assertions[0].parameters).toEqual(['users', 'id']); + expect(assertions[0].type).toBe("col_is_pk"); + expect(assertions[0].target).toBe("users.id"); + expect(assertions[0].parameters).toEqual(["users", "id"]); }); - it('should parse col_is_pk with schema, table, and column', () => { + it("should parse col_is_pk with schema, table, and column", () => { const sql = "SELECT col_is_pk('public', 'users', 'id');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('col_is_pk'); - expect(assertions[0].target).toBe('public.users.id'); - expect(assertions[0].parameters).toEqual(['public', 'users', 'id']); + expect(assertions[0].type).toBe("col_is_pk"); + expect(assertions[0].target).toBe("public.users.id"); + expect(assertions[0].parameters).toEqual(["public", "users", "id"]); }); - it('should parse col_isnt_pk with table and column', () => { + it("should parse col_isnt_pk with table and column", () => { const sql = "SELECT col_isnt_pk('users', 'email');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('col_isnt_pk'); - expect(assertions[0].target).toBe('users.email'); - expect(assertions[0].parameters).toEqual(['users', 'email']); + expect(assertions[0].type).toBe("col_isnt_pk"); + expect(assertions[0].target).toBe("users.email"); + expect(assertions[0].parameters).toEqual(["users", "email"]); }); - it('should parse col_isnt_pk with schema, table, and column', () => { + it("should parse col_isnt_pk with schema, table, and column", () => { const sql = "SELECT col_isnt_pk('public', 'users', 'email');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('col_isnt_pk'); - expect(assertions[0].target).toBe('public.users.email'); - expect(assertions[0].parameters).toEqual(['public', 'users', 'email']); + expect(assertions[0].type).toBe("col_isnt_pk"); + expect(assertions[0].target).toBe("public.users.email"); + expect(assertions[0].parameters).toEqual(["public", "users", "email"]); }); }); - describe('Column coverage tracking', () => { - it('should track columns in coverage map', () => { + describe("Column coverage tracking", () => { + it("should track columns in coverage map", () => { const sql = ` SELECT has_column('users', 'email'); SELECT col_type_is('users', 'email', 'character varying'); @@ -255,61 +292,65 @@ describe('pgTAPTestScanner Column Assertion Parsing', () => { SELECT col_has_default('users', 'status'); SELECT col_is_pk('users', 'id'); `; - + const assertions = scanner.extractAssertions(sql); - + // Manually build coverage map for testing - scanner.testFiles = [{ - filePath: '/test/column_test.sql', - fileName: 'column_test.sql', - assertions, - planCount: 5, - dependencies: [], - metadata: {} - }]; - + scanner.testFiles = [ + { + filePath: "/test/column_test.sql", + fileName: "column_test.sql", + assertions, + planCount: 5, + dependencies: [], + metadata: {}, + }, + ]; + scanner._buildCoverageMap(); const coverageMap = scanner.getCoverageMap(); - + expect(coverageMap.columns).toBeDefined(); - expect(Object.keys(coverageMap.columns)).toContain('users.email'); - expect(Object.keys(coverageMap.columns)).toContain('users.status'); - expect(Object.keys(coverageMap.columns)).toContain('users.id'); - - expect(coverageMap.columns['users.email']).toContain('has_column'); - expect(coverageMap.columns['users.email']).toContain('col_type_is'); - expect(coverageMap.columns['users.email']).toContain('col_not_null'); - expect(coverageMap.columns['users.status']).toContain('col_has_default'); - expect(coverageMap.columns['users.id']).toContain('col_is_pk'); - }); - - it('should include columns in statistics', () => { + expect(Object.keys(coverageMap.columns)).toContain("users.email"); + expect(Object.keys(coverageMap.columns)).toContain("users.status"); + expect(Object.keys(coverageMap.columns)).toContain("users.id"); + + expect(coverageMap.columns["users.email"]).toContain("has_column"); + expect(coverageMap.columns["users.email"]).toContain("col_type_is"); + expect(coverageMap.columns["users.email"]).toContain("col_not_null"); + expect(coverageMap.columns["users.status"]).toContain("col_has_default"); + expect(coverageMap.columns["users.id"]).toContain("col_is_pk"); + }); + + it("should include columns in statistics", () => { const sql = ` SELECT has_column('users', 'email'); SELECT has_column('posts', 'title'); SELECT col_is_pk('orders', 'id'); `; - - const assertions = scanner.extractAssertions(sql); - - scanner.testFiles = [{ - filePath: '/test/column_test.sql', - fileName: 'column_test.sql', - assertions, - planCount: 3, - dependencies: [], - metadata: {} - }]; - + + const assertions = scanner.extractAssertions(sql); + + scanner.testFiles = [ + { + filePath: "/test/column_test.sql", + fileName: "column_test.sql", + assertions, + planCount: 3, + dependencies: [], + metadata: {}, + }, + ]; + scanner._buildCoverageMap(); const stats = scanner.getStatistics(); - + expect(stats.coverageStats.columnsWithTests).toBe(3); // 'users.email', 'posts.title', 'orders.id' }); }); - describe('Complex column test scenarios', () => { - it('should handle mixed column and other assertions', () => { + describe("Complex column test scenarios", () => { + it("should handle mixed column and other assertions", () => { const sql = ` SELECT plan(6); SELECT has_table('users'); @@ -319,23 +360,23 @@ describe('pgTAPTestScanner Column Assertion Parsing', () => { SELECT has_index('users', 'idx_users_email'); SELECT col_is_pk('users', 'id'); `; - + const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(6); // Excludes the plan statement - - const columnAssertions = assertions.filter(a => - a.type.includes('column') || a.type.startsWith('col_') + + const columnAssertions = assertions.filter( + (a) => a.type.includes("column") || a.type.startsWith("col_"), ); expect(columnAssertions).toHaveLength(4); - - const otherAssertions = assertions.filter(a => - !a.type.includes('column') && !a.type.startsWith('col_') + + const otherAssertions = assertions.filter( + (a) => !a.type.includes("column") && !a.type.startsWith("col_"), ); expect(otherAssertions).toHaveLength(2); }); - it('should handle whitespace and formatting variations', () => { + it("should handle whitespace and formatting variations", () => { const sql = ` SELECT has_column( 'users', 'email' ); SELECT col_type_is( @@ -346,20 +387,20 @@ describe('pgTAPTestScanner Column Assertion Parsing', () => { ); SELECT col_not_null('users','email'); `; - + const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(3); - expect(assertions[0].type).toBe('has_column'); - expect(assertions[1].type).toBe('col_type_is'); - expect(assertions[2].type).toBe('col_not_null'); - - expect(assertions[0].target).toBe('users.email'); - expect(assertions[1].target).toBe('public.users.description'); - expect(assertions[2].target).toBe('users.email'); + expect(assertions[0].type).toBe("has_column"); + expect(assertions[1].type).toBe("col_type_is"); + expect(assertions[2].type).toBe("col_not_null"); + + expect(assertions[0].target).toBe("users.email"); + expect(assertions[1].target).toBe("public.users.description"); + expect(assertions[2].target).toBe("users.email"); }); - it('should handle all column assertion types in one test', () => { + it("should handle all column assertion types in one test", () => { const sql = ` SELECT has_column('users', 'email'); SELECT hasnt_column('users', 'old_field'); @@ -372,80 +413,85 @@ describe('pgTAPTestScanner Column Assertion Parsing', () => { SELECT col_is_pk('users', 'id'); SELECT col_isnt_pk('users', 'email'); `; - + const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(10); - - const assertionTypes = assertions.map(a => a.type); - expect(assertionTypes).toContain('has_column'); - expect(assertionTypes).toContain('hasnt_column'); - expect(assertionTypes).toContain('col_type_is'); - expect(assertionTypes).toContain('col_not_null'); - expect(assertionTypes).toContain('col_is_null'); - expect(assertionTypes).toContain('col_has_default'); - expect(assertionTypes).toContain('col_hasnt_default'); - expect(assertionTypes).toContain('col_default_is'); - expect(assertionTypes).toContain('col_is_pk'); - expect(assertionTypes).toContain('col_isnt_pk'); - + + const assertionTypes = assertions.map((a) => a.type); + expect(assertionTypes).toContain("has_column"); + expect(assertionTypes).toContain("hasnt_column"); + expect(assertionTypes).toContain("col_type_is"); + expect(assertionTypes).toContain("col_not_null"); + expect(assertionTypes).toContain("col_is_null"); + expect(assertionTypes).toContain("col_has_default"); + expect(assertionTypes).toContain("col_hasnt_default"); + expect(assertionTypes).toContain("col_default_is"); + expect(assertionTypes).toContain("col_is_pk"); + expect(assertionTypes).toContain("col_isnt_pk"); + // All should be categorized as column assertions - const columnAssertions = assertions.filter(a => - a.type.includes('column') || a.type.startsWith('col_') + const columnAssertions = assertions.filter( + (a) => a.type.includes("column") || a.type.startsWith("col_"), ); expect(columnAssertions).toHaveLength(10); }); }); - describe('Edge cases and special scenarios', () => { - it('should handle quoted column names', () => { + describe("Edge cases and special scenarios", () => { + it("should handle quoted column names", () => { const sql = 'SELECT has_column("users", "user-email");'; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('has_column'); - expect(assertions[0].target).toBe('users.user-email'); - expect(assertions[0].parameters).toEqual(['users', 'user-email']); + expect(assertions[0].type).toBe("has_column"); + expect(assertions[0].target).toBe("users.user-email"); + expect(assertions[0].parameters).toEqual(["users", "user-email"]); }); - it('should handle backtick quoted column names', () => { - const sql = 'SELECT has_column(`users`, `user_email`);'; + it("should handle backtick quoted column names", () => { + const sql = "SELECT has_column(`users`, `user_email`);"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('has_column'); - expect(assertions[0].target).toBe('users.user_email'); - expect(assertions[0].parameters).toEqual(['users', 'user_email']); + expect(assertions[0].type).toBe("has_column"); + expect(assertions[0].target).toBe("users.user_email"); + expect(assertions[0].parameters).toEqual(["users", "user_email"]); }); - it('should handle complex default values in col_default_is', () => { - const sql = "SELECT col_default_is('users', 'settings', '{\"theme\": \"dark\", \"notifications\": true}');"; + it("should handle complex default values in col_default_is", () => { + const sql = + "SELECT col_default_is('users', 'settings', '{\"theme\": \"dark\", \"notifications\": true}');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('col_default_is'); - expect(assertions[0].target).toBe('users.settings'); - expect(assertions[0].parameters).toEqual(['users', 'settings', '{"theme": "dark", "notifications": true}']); + expect(assertions[0].type).toBe("col_default_is"); + expect(assertions[0].target).toBe("users.settings"); + expect(assertions[0].parameters).toEqual([ + "users", + "settings", + '{"theme": "dark", "notifications": true}', + ]); }); - it('should handle numeric default values', () => { + it("should handle numeric default values", () => { const sql = "SELECT col_default_is('users', 'score', 0);"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('col_default_is'); - expect(assertions[0].target).toBe('users.score'); - expect(assertions[0].parameters).toEqual(['users', 'score', '0']); + expect(assertions[0].type).toBe("col_default_is"); + expect(assertions[0].target).toBe("users.score"); + expect(assertions[0].parameters).toEqual(["users", "score", "0"]); }); - it('should handle boolean default values', () => { + it("should handle boolean default values", () => { const sql = "SELECT col_default_is('users', 'is_active', true);"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('col_default_is'); - expect(assertions[0].target).toBe('users.is_active'); - expect(assertions[0].parameters).toEqual(['users', 'is_active', 'true']); + expect(assertions[0].type).toBe("col_default_is"); + expect(assertions[0].target).toBe("users.is_active"); + expect(assertions[0].parameters).toEqual(["users", "is_active", "true"]); }); }); -}); \ No newline at end of file +}); diff --git a/test/pgTAPTestScanner.fileDiscovery.test.js b/test/pgTAPTestScanner.fileDiscovery.test.js index 4a6397e..2a8bbd2 100644 --- a/test/pgTAPTestScanner.fileDiscovery.test.js +++ b/test/pgTAPTestScanner.fileDiscovery.test.js @@ -1,22 +1,22 @@ /** * pgTAPTestScanner File Discovery Tests - * + * * Tests the file discovery capabilities of pgTAPTestScanner */ -import { describe, it, expect, beforeEach, afterEach } from 'vitest'; -import { mkdtemp, writeFile, mkdir, rm } from 'fs/promises'; -import { join } from 'path'; -import { tmpdir } from 'os'; -import pgTAPTestScanner from '../src/lib/testing/pgTAPTestScanner.js'; +import { describe, it, expect, beforeEach, afterEach } from "vitest"; +import { mkdtemp, writeFile, mkdir, rm } from "fs/promises"; +import { join } from "path"; +import { tmpdir } from "os"; +import pgTAPTestScanner from "../src/lib/testing/pgTAPTestScanner.js"; -describe('pgTAPTestScanner File Discovery', () => { +describe("pgTAPTestScanner File Discovery", () => { let tempDir; let scanner; beforeEach(async () => { // Create temporary directory for test files - tempDir = await mkdtemp(join(tmpdir(), 'pgtap-test-')); + tempDir = await mkdtemp(join(tmpdir(), "pgtap-test-")); scanner = new pgTAPTestScanner(); }); @@ -27,260 +27,321 @@ describe('pgTAPTestScanner File Discovery', () => { } }); - describe('Basic file discovery', () => { - it('should find SQL test files in directory', async () => { + describe("Basic file discovery", () => { + it("should find SQL test files in directory", async () => { // Create test files - await writeFile(join(tempDir, 'test1.sql'), 'SELECT has_table(\'users\');'); - await writeFile(join(tempDir, 'test2.sql'), 'SELECT has_column(\'users\', \'id\');'); - await writeFile(join(tempDir, 'nottest.txt'), 'This is not a SQL file'); + await writeFile(join(tempDir, "test1.sql"), "SELECT has_table('users');"); + await writeFile( + join(tempDir, "test2.sql"), + "SELECT has_column('users', 'id');", + ); + await writeFile(join(tempDir, "nottest.txt"), "This is not a SQL file"); const testFiles = await scanner.scanDirectory(tempDir); expect(testFiles).toHaveLength(2); - expect(testFiles.map(f => f.fileName)).toContain('test1.sql'); - expect(testFiles.map(f => f.fileName)).toContain('test2.sql'); - expect(testFiles.map(f => f.fileName)).not.toContain('nottest.txt'); + expect(testFiles.map((f) => f.fileName)).toContain("test1.sql"); + expect(testFiles.map((f) => f.fileName)).toContain("test2.sql"); + expect(testFiles.map((f) => f.fileName)).not.toContain("nottest.txt"); }); - it('should find test files recursively', async () => { + it("should find test files recursively", async () => { // Create nested directory structure - const subDir = join(tempDir, 'subdirectory'); + const subDir = join(tempDir, "subdirectory"); await mkdir(subDir); - - await writeFile(join(tempDir, 'root.sql'), 'SELECT has_table(\'root\');'); - await writeFile(join(subDir, 'nested.sql'), 'SELECT has_table(\'nested\');'); + + await writeFile(join(tempDir, "root.sql"), "SELECT has_table('root');"); + await writeFile( + join(subDir, "nested.sql"), + "SELECT has_table('nested');", + ); const testFiles = await scanner.scanDirectory(tempDir); expect(testFiles).toHaveLength(2); - expect(testFiles.map(f => f.fileName)).toContain('root.sql'); - expect(testFiles.map(f => f.fileName)).toContain('nested.sql'); + expect(testFiles.map((f) => f.fileName)).toContain("root.sql"); + expect(testFiles.map((f) => f.fileName)).toContain("nested.sql"); }); - it('should handle empty directory', async () => { + it("should handle empty directory", async () => { const testFiles = await scanner.scanDirectory(tempDir); expect(testFiles).toHaveLength(0); }); - it('should handle directory with no SQL files', async () => { - await writeFile(join(tempDir, 'readme.txt'), 'No SQL files here'); - await writeFile(join(tempDir, 'data.json'), '{}'); + it("should handle directory with no SQL files", async () => { + await writeFile(join(tempDir, "readme.txt"), "No SQL files here"); + await writeFile(join(tempDir, "data.json"), "{}"); const testFiles = await scanner.scanDirectory(tempDir); expect(testFiles).toHaveLength(0); }); }); - describe('File extension configuration', () => { - it('should support custom file extensions', async () => { + describe("File extension configuration", () => { + it("should support custom file extensions", async () => { const customScanner = new pgTAPTestScanner({ - fileExtensions: ['.sql', '.test.sql', '.pgtap'] + fileExtensions: [".sql", ".test.sql", ".pgtap"], }); - await writeFile(join(tempDir, 'test1.sql'), 'SELECT has_table(\'test1\');'); - await writeFile(join(tempDir, 'test2.test.sql'), 'SELECT has_table(\'test2\');'); - await writeFile(join(tempDir, 'test3.pgtap'), 'SELECT has_table(\'test3\');'); - await writeFile(join(tempDir, 'test4.txt'), 'SELECT has_table(\'test4\');'); + await writeFile(join(tempDir, "test1.sql"), "SELECT has_table('test1');"); + await writeFile( + join(tempDir, "test2.test.sql"), + "SELECT has_table('test2');", + ); + await writeFile( + join(tempDir, "test3.pgtap"), + "SELECT has_table('test3');", + ); + await writeFile(join(tempDir, "test4.txt"), "SELECT has_table('test4');"); const testFiles = await customScanner.scanDirectory(tempDir); expect(testFiles).toHaveLength(3); - expect(testFiles.map(f => f.fileName).sort()).toEqual(['test1.sql', 'test2.test.sql', 'test3.pgtap']); + expect(testFiles.map((f) => f.fileName).sort()).toEqual([ + "test1.sql", + "test2.test.sql", + "test3.pgtap", + ]); }); - it('should filter files by extension correctly', async () => { - await writeFile(join(tempDir, 'test.sql'), 'SELECT has_table(\'users\');'); - await writeFile(join(tempDir, 'test.sql.backup'), 'SELECT has_table(\'backup\');'); - await writeFile(join(tempDir, 'test.txt'), 'Not a SQL file'); + it("should filter files by extension correctly", async () => { + await writeFile(join(tempDir, "test.sql"), "SELECT has_table('users');"); + await writeFile( + join(tempDir, "test.sql.backup"), + "SELECT has_table('backup');", + ); + await writeFile(join(tempDir, "test.txt"), "Not a SQL file"); const testFiles = await scanner.scanDirectory(tempDir); expect(testFiles).toHaveLength(1); - expect(testFiles[0].fileName).toBe('test.sql'); + expect(testFiles[0].fileName).toBe("test.sql"); }); }); - describe('Include/exclude patterns', () => { - it('should respect exclude patterns', async () => { + describe("Include/exclude patterns", () => { + it("should respect exclude patterns", async () => { const customScanner = new pgTAPTestScanner({ - excludePatterns: ['**/*.temp.sql', '**/backup/**'] + excludePatterns: ["**/*.temp.sql", "**/backup/**"], }); - const backupDir = join(tempDir, 'backup'); + const backupDir = join(tempDir, "backup"); await mkdir(backupDir); - await writeFile(join(tempDir, 'test1.sql'), 'SELECT has_table(\'test1\');'); - await writeFile(join(tempDir, 'test2.temp.sql'), 'SELECT has_table(\'test2\');'); - await writeFile(join(backupDir, 'old.sql'), 'SELECT has_table(\'old\');'); + await writeFile(join(tempDir, "test1.sql"), "SELECT has_table('test1');"); + await writeFile( + join(tempDir, "test2.temp.sql"), + "SELECT has_table('test2');", + ); + await writeFile(join(backupDir, "old.sql"), "SELECT has_table('old');"); const testFiles = await customScanner.scanDirectory(tempDir); expect(testFiles).toHaveLength(1); - expect(testFiles[0].fileName).toBe('test1.sql'); + expect(testFiles[0].fileName).toBe("test1.sql"); }); - it('should respect include patterns', async () => { + it("should respect include patterns", async () => { const customScanner = new pgTAPTestScanner({ - includePatterns: ['**/unit-*.sql'] + includePatterns: ["**/unit-*.sql"], }); - await writeFile(join(tempDir, 'unit-test1.sql'), 'SELECT has_table(\'test1\');'); - await writeFile(join(tempDir, 'integration-test.sql'), 'SELECT has_table(\'test2\');'); - await writeFile(join(tempDir, 'unit-test2.sql'), 'SELECT has_table(\'test3\');'); + await writeFile( + join(tempDir, "unit-test1.sql"), + "SELECT has_table('test1');", + ); + await writeFile( + join(tempDir, "integration-test.sql"), + "SELECT has_table('test2');", + ); + await writeFile( + join(tempDir, "unit-test2.sql"), + "SELECT has_table('test3');", + ); const testFiles = await customScanner.scanDirectory(tempDir); expect(testFiles).toHaveLength(2); - expect(testFiles.map(f => f.fileName).sort()).toEqual(['unit-test1.sql', 'unit-test2.sql']); + expect(testFiles.map((f) => f.fileName).sort()).toEqual([ + "unit-test1.sql", + "unit-test2.sql", + ]); }); }); - describe('Depth limiting', () => { - it('should respect maximum depth setting', async () => { + describe("Depth limiting", () => { + it("should respect maximum depth setting", async () => { const customScanner = new pgTAPTestScanner({ - maxDepth: 1 + maxDepth: 1, }); // Create nested structure beyond max depth - const level1 = join(tempDir, 'level1'); - const level2 = join(level1, 'level2'); - const level3 = join(level2, 'level3'); - + const level1 = join(tempDir, "level1"); + const level2 = join(level1, "level2"); + const level3 = join(level2, "level3"); + await mkdir(level1); await mkdir(level2, { recursive: true }); await mkdir(level3, { recursive: true }); - await writeFile(join(tempDir, 'root.sql'), 'SELECT has_table(\'root\');'); - await writeFile(join(level1, 'level1.sql'), 'SELECT has_table(\'level1\');'); - await writeFile(join(level2, 'level2.sql'), 'SELECT has_table(\'level2\');'); - await writeFile(join(level3, 'level3.sql'), 'SELECT has_table(\'level3\');'); + await writeFile(join(tempDir, "root.sql"), "SELECT has_table('root');"); + await writeFile( + join(level1, "level1.sql"), + "SELECT has_table('level1');", + ); + await writeFile( + join(level2, "level2.sql"), + "SELECT has_table('level2');", + ); + await writeFile( + join(level3, "level3.sql"), + "SELECT has_table('level3');", + ); const testFiles = await customScanner.scanDirectory(tempDir); expect(testFiles).toHaveLength(2); // root and level1 only - expect(testFiles.map(f => f.fileName).sort()).toEqual(['level1.sql', 'root.sql']); + expect(testFiles.map((f) => f.fileName).sort()).toEqual([ + "level1.sql", + "root.sql", + ]); }); }); - describe('Error handling', () => { - it('should throw error for non-existent directory', async () => { - const nonExistentDir = join(tempDir, 'does-not-exist'); - - await expect(scanner.scanDirectory(nonExistentDir)) - .rejects - .toThrow('ENOENT'); + describe("Error handling", () => { + it("should throw error for non-existent directory", async () => { + const nonExistentDir = join(tempDir, "does-not-exist"); + + await expect(scanner.scanDirectory(nonExistentDir)).rejects.toThrow( + "ENOENT", + ); }); - it('should throw error for file instead of directory', async () => { - const testFile = join(tempDir, 'test.sql'); - await writeFile(testFile, 'SELECT has_table(\'users\');'); - - await expect(scanner.scanDirectory(testFile)) - .rejects - .toThrow('Path is not a directory'); + it("should throw error for file instead of directory", async () => { + const testFile = join(tempDir, "test.sql"); + await writeFile(testFile, "SELECT has_table('users');"); + + await expect(scanner.scanDirectory(testFile)).rejects.toThrow( + "Path is not a directory", + ); }); }); - describe('Event emission', () => { - it('should emit progress events during scanning', async () => { + describe("Event emission", () => { + it("should emit progress events during scanning", async () => { const events = []; - - scanner.on('progress', (event) => { + + scanner.on("progress", (event) => { events.push(event); }); - scanner.on('directory', (event) => { + scanner.on("directory", (event) => { events.push(event); }); - await writeFile(join(tempDir, 'test1.sql'), 'SELECT has_table(\'test1\');'); - await writeFile(join(tempDir, 'test2.sql'), 'SELECT has_table(\'test2\');'); + await writeFile(join(tempDir, "test1.sql"), "SELECT has_table('test1');"); + await writeFile(join(tempDir, "test2.sql"), "SELECT has_table('test2');"); await scanner.scanDirectory(tempDir); expect(events.length).toBeGreaterThan(0); - expect(events.some(e => e.type === 'progress')).toBe(true); - expect(events.some(e => e.type === 'directory')).toBe(true); + expect(events.some((e) => e.type === "progress")).toBe(true); + expect(events.some((e) => e.type === "directory")).toBe(true); }); - it('should emit success event on completion', async () => { + it("should emit success event on completion", async () => { let successEvent = null; - - scanner.on('success', (event) => { + + scanner.on("success", (event) => { successEvent = event; }); - await writeFile(join(tempDir, 'test.sql'), 'SELECT has_table(\'users\');'); + await writeFile(join(tempDir, "test.sql"), "SELECT has_table('users');"); await scanner.scanDirectory(tempDir); expect(successEvent).not.toBeNull(); - expect(successEvent.type).toBe('success'); - expect(successEvent.message).toContain('Scanned'); + expect(successEvent.type).toBe("success"); + expect(successEvent.message).toContain("Scanned"); }); - it('should emit warning for empty directory', async () => { + it("should emit warning for empty directory", async () => { let warningEvent = null; - - scanner.on('warning', (event) => { + + scanner.on("warning", (event) => { warningEvent = event; }); await scanner.scanDirectory(tempDir); expect(warningEvent).not.toBeNull(); - expect(warningEvent.type).toBe('warning'); - expect(warningEvent.message).toContain('No test files found'); + expect(warningEvent.type).toBe("warning"); + expect(warningEvent.message).toContain("No test files found"); }); }); - describe('Integration with parseTestFile', () => { - it('should integrate discovered files with parsing', async () => { - await writeFile(join(tempDir, 'test1.sql'), ` + describe("Integration with parseTestFile", () => { + it("should integrate discovered files with parsing", async () => { + await writeFile( + join(tempDir, "test1.sql"), + ` SELECT plan(2); SELECT has_table('users'); SELECT has_column('users', 'id'); - `); - - await writeFile(join(tempDir, 'test2.sql'), ` + `, + ); + + await writeFile( + join(tempDir, "test2.sql"), + ` SELECT plan(1); SELECT has_function('get_user'); - `); + `, + ); const testFiles = await scanner.scanDirectory(tempDir); expect(testFiles).toHaveLength(2); - + // Check that files were parsed correctly expect(testFiles[0].assertions).toBeDefined(); expect(testFiles[1].assertions).toBeDefined(); - - const totalAssertions = testFiles.reduce((sum, file) => sum + file.assertions.length, 0); + + const totalAssertions = testFiles.reduce( + (sum, file) => sum + file.assertions.length, + 0, + ); expect(totalAssertions).toBe(3); // 2 from test1 + 1 from test2 - + // Check that coverage map was built const coverageMap = scanner.getCoverageMap(); - expect(Object.keys(coverageMap.tables)).toContain('public.users'); - expect(Object.keys(coverageMap.functions)).toContain('get_user'); + expect(Object.keys(coverageMap.tables)).toContain("public.users"); + expect(Object.keys(coverageMap.functions)).toContain("get_user"); }); - it('should handle files with parsing errors gracefully', async () => { + it("should handle files with parsing errors gracefully", async () => { // Create a valid file - await writeFile(join(tempDir, 'valid.sql'), 'SELECT has_table(\'users\');'); - + await writeFile(join(tempDir, "valid.sql"), "SELECT has_table('users');"); + // Create an invalid file that will cause fs.readFile to fail (permission denied) - await writeFile(join(tempDir, 'invalid.sql'), 'SELECT has_table(\'test\');'); + await writeFile( + join(tempDir, "invalid.sql"), + "SELECT has_table('test');", + ); // Make the file unreadable to cause a parsing error - const fs = await import('fs/promises'); + const fs = await import("fs/promises"); try { - await fs.chmod(join(tempDir, 'invalid.sql'), 0o000); // No permissions + await fs.chmod(join(tempDir, "invalid.sql"), 0o000); // No permissions } catch (error) { // If chmod fails (e.g., on some file systems), create a different error // Write binary data that would cause encoding issues - await writeFile(join(tempDir, 'invalid.sql'), Buffer.from([0xFF, 0xFE, 0x00, 0x01])); + await writeFile( + join(tempDir, "invalid.sql"), + Buffer.from([0xff, 0xfe, 0x00, 0x01]), + ); } let errorEvents = []; - scanner.on('error', (event) => { + scanner.on("error", (event) => { errorEvents.push(event); }); @@ -288,29 +349,29 @@ describe('pgTAPTestScanner File Discovery', () => { // Should return at least the valid file, possibly both if the invalid one doesn't error expect(testFiles.length).toBeGreaterThanOrEqual(1); - expect(testFiles.map(f => f.fileName)).toContain('valid.sql'); - + expect(testFiles.map((f) => f.fileName)).toContain("valid.sql"); + // For this test, we'll just check that either we got an error event OR the scanner handled it gracefully // The exact behavior may vary by system expect(true).toBe(true); // This test mainly ensures the scanner doesn't crash }); }); - describe('Performance and scalability', () => { - it('should handle many files efficiently', async () => { + describe("Performance and scalability", () => { + it("should handle many files efficiently", async () => { // Create a reasonable number of test files const fileCount = 20; const promises = []; - + for (let i = 0; i < fileCount; i++) { promises.push( writeFile( join(tempDir, `test${i}.sql`), - `SELECT has_table('table${i}');` - ) + `SELECT has_table('table${i}');`, + ), ); } - + await Promise.all(promises); const startTime = Date.now(); @@ -319,11 +380,14 @@ describe('pgTAPTestScanner File Discovery', () => { expect(testFiles).toHaveLength(fileCount); expect(duration).toBeLessThan(5000); // Should complete within 5 seconds - + // Check that all files were processed - const fileNames = testFiles.map(f => f.fileName).sort(); - const expectedNames = Array.from({ length: fileCount }, (_, i) => `test${i}.sql`).sort(); + const fileNames = testFiles.map((f) => f.fileName).sort(); + const expectedNames = Array.from( + { length: fileCount }, + (_, i) => `test${i}.sql`, + ).sort(); expect(fileNames).toEqual(expectedNames); }); }); -}); \ No newline at end of file +}); diff --git a/test/pgTAPTestScanner.index.test.js b/test/pgTAPTestScanner.index.test.js index 20ef012..e4abc9c 100644 --- a/test/pgTAPTestScanner.index.test.js +++ b/test/pgTAPTestScanner.index.test.js @@ -1,230 +1,295 @@ /** * pgTAPTestScanner Index Assertion Parsing Tests - * + * * Tests the index assertion parsing capabilities of pgTAPTestScanner */ -import { describe, it, expect, beforeEach } from 'vitest'; -import pgTAPTestScanner from '../src/lib/testing/pgTAPTestScanner.js'; +import { describe, it, expect, beforeEach } from "vitest"; +import pgTAPTestScanner from "../src/lib/testing/pgTAPTestScanner.js"; -describe('pgTAPTestScanner Index Assertion Parsing', () => { +describe("pgTAPTestScanner Index Assertion Parsing", () => { let scanner; beforeEach(() => { scanner = new pgTAPTestScanner(); }); - describe('Basic index assertion pattern matching', () => { - it('should parse has_index with table and index', () => { + describe("Basic index assertion pattern matching", () => { + it("should parse has_index with table and index", () => { const sql = "SELECT has_index('users', 'idx_users_email');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('has_index'); - expect(assertions[0].target).toBe('users.idx_users_email'); - expect(assertions[0].parameters).toEqual(['users', 'idx_users_email']); + expect(assertions[0].type).toBe("has_index"); + expect(assertions[0].target).toBe("users.idx_users_email"); + expect(assertions[0].parameters).toEqual(["users", "idx_users_email"]); }); - it('should parse has_index with schema, table, and index', () => { + it("should parse has_index with schema, table, and index", () => { const sql = "SELECT has_index('public', 'users', 'idx_users_email');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('has_index'); - expect(assertions[0].target).toBe('public.users.idx_users_email'); - expect(assertions[0].parameters).toEqual(['public', 'users', 'idx_users_email']); + expect(assertions[0].type).toBe("has_index"); + expect(assertions[0].target).toBe("public.users.idx_users_email"); + expect(assertions[0].parameters).toEqual([ + "public", + "users", + "idx_users_email", + ]); }); - it('should parse hasnt_index with table and index', () => { + it("should parse hasnt_index with table and index", () => { const sql = "SELECT hasnt_index('temp_table', 'non_existent_idx');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('hasnt_index'); - expect(assertions[0].target).toBe('temp_table.non_existent_idx'); - expect(assertions[0].parameters).toEqual(['temp_table', 'non_existent_idx']); + expect(assertions[0].type).toBe("hasnt_index"); + expect(assertions[0].target).toBe("temp_table.non_existent_idx"); + expect(assertions[0].parameters).toEqual([ + "temp_table", + "non_existent_idx", + ]); }); - it('should parse hasnt_index with schema, table, and index', () => { - const sql = "SELECT hasnt_index('private', 'sessions', 'idx_sessions_old');"; + it("should parse hasnt_index with schema, table, and index", () => { + const sql = + "SELECT hasnt_index('private', 'sessions', 'idx_sessions_old');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('hasnt_index'); - expect(assertions[0].target).toBe('private.sessions.idx_sessions_old'); - expect(assertions[0].parameters).toEqual(['private', 'sessions', 'idx_sessions_old']); + expect(assertions[0].type).toBe("hasnt_index"); + expect(assertions[0].target).toBe("private.sessions.idx_sessions_old"); + expect(assertions[0].parameters).toEqual([ + "private", + "sessions", + "idx_sessions_old", + ]); }); }); - describe('index_is_on assertion pattern matching', () => { - it('should parse index_is_on with table, index, and single column', () => { - const sql = "SELECT index_is_on('users', 'idx_users_email', ARRAY['email']);"; + describe("index_is_on assertion pattern matching", () => { + it("should parse index_is_on with table, index, and single column", () => { + const sql = + "SELECT index_is_on('users', 'idx_users_email', ARRAY['email']);"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('index_is_on'); - expect(assertions[0].target).toBe('users.idx_users_email'); - expect(assertions[0].parameters).toEqual(['users', 'idx_users_email', "'email'"]); + expect(assertions[0].type).toBe("index_is_on"); + expect(assertions[0].target).toBe("users.idx_users_email"); + expect(assertions[0].parameters).toEqual([ + "users", + "idx_users_email", + "'email'", + ]); }); - it('should parse index_is_on with table, index, and multiple columns', () => { - const sql = "SELECT index_is_on('orders', 'idx_orders_status_date', ARRAY['status', 'created_at']);"; + it("should parse index_is_on with table, index, and multiple columns", () => { + const sql = + "SELECT index_is_on('orders', 'idx_orders_status_date', ARRAY['status', 'created_at']);"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('index_is_on'); - expect(assertions[0].target).toBe('orders.idx_orders_status_date'); - expect(assertions[0].parameters).toEqual(['orders', 'idx_orders_status_date', "'status', 'created_at'"]); + expect(assertions[0].type).toBe("index_is_on"); + expect(assertions[0].target).toBe("orders.idx_orders_status_date"); + expect(assertions[0].parameters).toEqual([ + "orders", + "idx_orders_status_date", + "'status', 'created_at'", + ]); }); - it('should parse index_is_on with schema, table, index, and columns', () => { - const sql = "SELECT index_is_on('public', 'orders', 'idx_orders_status_date', ARRAY['status', 'created_at']);"; + it("should parse index_is_on with schema, table, index, and columns", () => { + const sql = + "SELECT index_is_on('public', 'orders', 'idx_orders_status_date', ARRAY['status', 'created_at']);"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('index_is_on'); - expect(assertions[0].target).toBe('public.orders.idx_orders_status_date'); - expect(assertions[0].parameters).toEqual(['public', 'orders', 'idx_orders_status_date', "'status', 'created_at'"]); + expect(assertions[0].type).toBe("index_is_on"); + expect(assertions[0].target).toBe("public.orders.idx_orders_status_date"); + expect(assertions[0].parameters).toEqual([ + "public", + "orders", + "idx_orders_status_date", + "'status', 'created_at'", + ]); }); }); - describe('index_is_type assertion pattern matching', () => { - it('should parse index_is_type with table, index, and type', () => { + describe("index_is_type assertion pattern matching", () => { + it("should parse index_is_type with table, index, and type", () => { const sql = "SELECT index_is_type('users', 'idx_users_email', 'btree');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('index_is_type'); - expect(assertions[0].target).toBe('users.idx_users_email'); - expect(assertions[0].parameters).toEqual(['users', 'idx_users_email', 'btree']); + expect(assertions[0].type).toBe("index_is_type"); + expect(assertions[0].target).toBe("users.idx_users_email"); + expect(assertions[0].parameters).toEqual([ + "users", + "idx_users_email", + "btree", + ]); }); - it('should parse index_is_type with schema, table, index, and type', () => { - const sql = "SELECT index_is_type('public', 'posts', 'idx_posts_content', 'gin');"; + it("should parse index_is_type with schema, table, index, and type", () => { + const sql = + "SELECT index_is_type('public', 'posts', 'idx_posts_content', 'gin');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('index_is_type'); - expect(assertions[0].target).toBe('public.posts.idx_posts_content'); - expect(assertions[0].parameters).toEqual(['public', 'posts', 'idx_posts_content', 'gin']); + expect(assertions[0].type).toBe("index_is_type"); + expect(assertions[0].target).toBe("public.posts.idx_posts_content"); + expect(assertions[0].parameters).toEqual([ + "public", + "posts", + "idx_posts_content", + "gin", + ]); }); }); - describe('unique constraint assertion pattern matching', () => { - it('should parse has_unique with table and constraint', () => { + describe("unique constraint assertion pattern matching", () => { + it("should parse has_unique with table and constraint", () => { const sql = "SELECT has_unique('users', 'uq_users_email');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('has_unique'); - expect(assertions[0].target).toBe('users.uq_users_email'); - expect(assertions[0].parameters).toEqual(['users', 'uq_users_email']); + expect(assertions[0].type).toBe("has_unique"); + expect(assertions[0].target).toBe("users.uq_users_email"); + expect(assertions[0].parameters).toEqual(["users", "uq_users_email"]); }); - it('should parse has_unique with schema, table, and constraint', () => { + it("should parse has_unique with schema, table, and constraint", () => { const sql = "SELECT has_unique('public', 'products', 'uq_products_sku');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('has_unique'); - expect(assertions[0].target).toBe('public.products.uq_products_sku'); - expect(assertions[0].parameters).toEqual(['public', 'products', 'uq_products_sku']); + expect(assertions[0].type).toBe("has_unique"); + expect(assertions[0].target).toBe("public.products.uq_products_sku"); + expect(assertions[0].parameters).toEqual([ + "public", + "products", + "uq_products_sku", + ]); }); - it('should parse hasnt_unique with table and constraint', () => { + it("should parse hasnt_unique with table and constraint", () => { const sql = "SELECT hasnt_unique('temp_table', 'old_constraint');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('hasnt_unique'); - expect(assertions[0].target).toBe('temp_table.old_constraint'); - expect(assertions[0].parameters).toEqual(['temp_table', 'old_constraint']); + expect(assertions[0].type).toBe("hasnt_unique"); + expect(assertions[0].target).toBe("temp_table.old_constraint"); + expect(assertions[0].parameters).toEqual([ + "temp_table", + "old_constraint", + ]); }); }); - describe('index_is_primary assertion pattern matching', () => { - it('should parse index_is_primary with table and index', () => { + describe("index_is_primary assertion pattern matching", () => { + it("should parse index_is_primary with table and index", () => { const sql = "SELECT index_is_primary('users', 'idx_users_pkey');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('index_is_primary'); - expect(assertions[0].target).toBe('users.idx_users_pkey'); - expect(assertions[0].parameters).toEqual(['users', 'idx_users_pkey']); + expect(assertions[0].type).toBe("index_is_primary"); + expect(assertions[0].target).toBe("users.idx_users_pkey"); + expect(assertions[0].parameters).toEqual(["users", "idx_users_pkey"]); }); - it('should parse index_is_primary with schema, table, and index', () => { + it("should parse index_is_primary with schema, table, and index", () => { const sql = "SELECT index_is_primary('public', 'users', 'users_pkey');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('index_is_primary'); - expect(assertions[0].target).toBe('public.users.users_pkey'); - expect(assertions[0].parameters).toEqual(['public', 'users', 'users_pkey']); + expect(assertions[0].type).toBe("index_is_primary"); + expect(assertions[0].target).toBe("public.users.users_pkey"); + expect(assertions[0].parameters).toEqual([ + "public", + "users", + "users_pkey", + ]); }); }); - describe('Index coverage tracking', () => { - it('should track indexes in coverage map', () => { + describe("Index coverage tracking", () => { + it("should track indexes in coverage map", () => { const sql = ` SELECT has_index('users', 'idx_users_email'); SELECT index_is_on('users', 'idx_users_email', ARRAY['email']); SELECT index_is_type('users', 'idx_users_email', 'btree'); SELECT has_unique('products', 'uq_products_sku'); `; - + const assertions = scanner.extractAssertions(sql); - + // Manually build coverage map for testing - scanner.testFiles = [{ - filePath: '/test/index_test.sql', - fileName: 'index_test.sql', - assertions, - planCount: 4, - dependencies: [], - metadata: {} - }]; - + scanner.testFiles = [ + { + filePath: "/test/index_test.sql", + fileName: "index_test.sql", + assertions, + planCount: 4, + dependencies: [], + metadata: {}, + }, + ]; + scanner._buildCoverageMap(); const coverageMap = scanner.getCoverageMap(); - + expect(coverageMap.indexes).toBeDefined(); - expect(Object.keys(coverageMap.indexes)).toContain('users.idx_users_email'); - expect(Object.keys(coverageMap.indexes)).toContain('products.uq_products_sku'); - - expect(coverageMap.indexes['users.idx_users_email']).toContain('has_index'); - expect(coverageMap.indexes['users.idx_users_email']).toContain('index_is_on'); - expect(coverageMap.indexes['users.idx_users_email']).toContain('index_is_type'); - expect(coverageMap.indexes['products.uq_products_sku']).toContain('has_unique'); + expect(Object.keys(coverageMap.indexes)).toContain( + "users.idx_users_email", + ); + expect(Object.keys(coverageMap.indexes)).toContain( + "products.uq_products_sku", + ); + + expect(coverageMap.indexes["users.idx_users_email"]).toContain( + "has_index", + ); + expect(coverageMap.indexes["users.idx_users_email"]).toContain( + "index_is_on", + ); + expect(coverageMap.indexes["users.idx_users_email"]).toContain( + "index_is_type", + ); + expect(coverageMap.indexes["products.uq_products_sku"]).toContain( + "has_unique", + ); }); - it('should include indexes in statistics', () => { + it("should include indexes in statistics", () => { const sql = ` SELECT has_index('users', 'idx_users_email'); SELECT has_unique('products', 'uq_products_sku'); SELECT index_is_primary('orders', 'orders_pkey'); `; - + const assertions = scanner.extractAssertions(sql); - - scanner.testFiles = [{ - filePath: '/test/index_test.sql', - fileName: 'index_test.sql', - assertions, - planCount: 3, - dependencies: [], - metadata: {} - }]; - + + scanner.testFiles = [ + { + filePath: "/test/index_test.sql", + fileName: "index_test.sql", + assertions, + planCount: 3, + dependencies: [], + metadata: {}, + }, + ]; + scanner._buildCoverageMap(); const stats = scanner.getStatistics(); - + expect(stats.coverageStats.indexesWithTests).toBe(3); // 'users.idx_users_email', 'products.uq_products_sku', 'orders.orders_pkey' }); }); - describe('Complex index test scenarios', () => { - it('should handle mixed index and other assertions', () => { + describe("Complex index test scenarios", () => { + it("should handle mixed index and other assertions", () => { const sql = ` SELECT plan(5); SELECT has_table('users'); @@ -233,23 +298,23 @@ describe('pgTAPTestScanner Index Assertion Parsing', () => { SELECT index_is_on('users', 'idx_users_email', ARRAY['email']); SELECT index_is_type('users', 'idx_users_email', 'btree'); `; - + const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(5); // Excludes the plan statement - - const indexAssertions = assertions.filter(a => - a.type.includes('index') || a.type.includes('unique') + + const indexAssertions = assertions.filter( + (a) => a.type.includes("index") || a.type.includes("unique"), ); expect(indexAssertions).toHaveLength(3); - - const tableColumnAssertions = assertions.filter(a => - a.type.includes('table') || a.type.includes('column') + + const tableColumnAssertions = assertions.filter( + (a) => a.type.includes("table") || a.type.includes("column"), ); expect(tableColumnAssertions).toHaveLength(2); }); - it('should handle whitespace and formatting variations', () => { + it("should handle whitespace and formatting variations", () => { const sql = ` SELECT has_index( 'users', 'idx_users_email' ); SELECT index_is_on( @@ -260,20 +325,20 @@ describe('pgTAPTestScanner Index Assertion Parsing', () => { ); SELECT index_is_type('users','idx_users_email','btree'); `; - + const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(3); - expect(assertions[0].type).toBe('has_index'); - expect(assertions[1].type).toBe('index_is_on'); - expect(assertions[2].type).toBe('index_is_type'); - - expect(assertions[0].target).toBe('users.idx_users_email'); - expect(assertions[1].target).toBe('public.orders.idx_orders_composite'); - expect(assertions[2].target).toBe('users.idx_users_email'); + expect(assertions[0].type).toBe("has_index"); + expect(assertions[1].type).toBe("index_is_on"); + expect(assertions[2].type).toBe("index_is_type"); + + expect(assertions[0].target).toBe("users.idx_users_email"); + expect(assertions[1].target).toBe("public.orders.idx_orders_composite"); + expect(assertions[2].target).toBe("users.idx_users_email"); }); - it('should handle all index assertion types in one test', () => { + it("should handle all index assertion types in one test", () => { const sql = ` SELECT has_index('users', 'idx_users_email'); SELECT hasnt_index('users', 'old_index'); @@ -283,25 +348,25 @@ describe('pgTAPTestScanner Index Assertion Parsing', () => { SELECT hasnt_unique('users', 'old_unique'); SELECT index_is_primary('users', 'users_pkey'); `; - + const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(7); - - const assertionTypes = assertions.map(a => a.type); - expect(assertionTypes).toContain('has_index'); - expect(assertionTypes).toContain('hasnt_index'); - expect(assertionTypes).toContain('index_is_on'); - expect(assertionTypes).toContain('index_is_type'); - expect(assertionTypes).toContain('has_unique'); - expect(assertionTypes).toContain('hasnt_unique'); - expect(assertionTypes).toContain('index_is_primary'); - + + const assertionTypes = assertions.map((a) => a.type); + expect(assertionTypes).toContain("has_index"); + expect(assertionTypes).toContain("hasnt_index"); + expect(assertionTypes).toContain("index_is_on"); + expect(assertionTypes).toContain("index_is_type"); + expect(assertionTypes).toContain("has_unique"); + expect(assertionTypes).toContain("hasnt_unique"); + expect(assertionTypes).toContain("index_is_primary"); + // All should be categorized as index assertions - const indexAssertions = assertions.filter(a => - a.type.includes('index') || a.type.includes('unique') + const indexAssertions = assertions.filter( + (a) => a.type.includes("index") || a.type.includes("unique"), ); expect(indexAssertions).toHaveLength(7); }); }); -}); \ No newline at end of file +}); diff --git a/test/pgTAPTestScanner.rls.test.js b/test/pgTAPTestScanner.rls.test.js index 022c558..0f71e23 100644 --- a/test/pgTAPTestScanner.rls.test.js +++ b/test/pgTAPTestScanner.rls.test.js @@ -1,113 +1,128 @@ /** * pgTAPTestScanner RLS Policy Assertion Parsing Tests - * + * * Tests the RLS (Row Level Security) policy assertion parsing capabilities of pgTAPTestScanner */ -import { describe, it, expect, beforeEach } from 'vitest'; -import pgTAPTestScanner from '../src/lib/testing/pgTAPTestScanner.js'; +import { describe, it, expect, beforeEach } from "vitest"; +import pgTAPTestScanner from "../src/lib/testing/pgTAPTestScanner.js"; -describe('pgTAPTestScanner RLS Policy Assertion Parsing', () => { +describe("pgTAPTestScanner RLS Policy Assertion Parsing", () => { let scanner; beforeEach(() => { scanner = new pgTAPTestScanner(); }); - describe('is_rls_enabled assertion parsing', () => { - it('should parse is_rls_enabled with table only', () => { + describe("is_rls_enabled assertion parsing", () => { + it("should parse is_rls_enabled with table only", () => { const sql = "SELECT is_rls_enabled('users');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('is_rls_enabled'); - expect(assertions[0].target).toBe('users'); - expect(assertions[0].parameters).toEqual(['users']); + expect(assertions[0].type).toBe("is_rls_enabled"); + expect(assertions[0].target).toBe("users"); + expect(assertions[0].parameters).toEqual(["users"]); }); - it('should parse is_rls_enabled with schema and table', () => { + it("should parse is_rls_enabled with schema and table", () => { const sql = "SELECT is_rls_enabled('public', 'profiles');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('is_rls_enabled'); - expect(assertions[0].target).toBe('public.profiles'); - expect(assertions[0].parameters).toEqual(['public', 'profiles']); + expect(assertions[0].type).toBe("is_rls_enabled"); + expect(assertions[0].target).toBe("public.profiles"); + expect(assertions[0].parameters).toEqual(["public", "profiles"]); }); - it('should parse multiple is_rls_enabled assertions', () => { + it("should parse multiple is_rls_enabled assertions", () => { const sql = ` SELECT is_rls_enabled('users'); SELECT is_rls_enabled('auth', 'sessions'); SELECT is_rls_enabled('public', 'posts'); `; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(3); - expect(assertions[0].target).toBe('users'); - expect(assertions[1].target).toBe('auth.sessions'); - expect(assertions[2].target).toBe('public.posts'); + expect(assertions[0].target).toBe("users"); + expect(assertions[1].target).toBe("auth.sessions"); + expect(assertions[2].target).toBe("public.posts"); }); }); - describe('policy_exists assertion parsing', () => { - it('should parse policy_exists with table and policy name', () => { + describe("policy_exists assertion parsing", () => { + it("should parse policy_exists with table and policy name", () => { const sql = "SELECT policy_exists('users', 'user_select_policy');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('policy_exists'); - expect(assertions[0].target).toBe('users.user_select_policy'); - expect(assertions[0].parameters).toEqual(['users', 'user_select_policy']); + expect(assertions[0].type).toBe("policy_exists"); + expect(assertions[0].target).toBe("users.user_select_policy"); + expect(assertions[0].parameters).toEqual(["users", "user_select_policy"]); }); - it('should parse policy_exists with schema, table, and policy name', () => { - const sql = "SELECT policy_exists('public', 'users', 'user_insert_policy');"; + it("should parse policy_exists with schema, table, and policy name", () => { + const sql = + "SELECT policy_exists('public', 'users', 'user_insert_policy');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('policy_exists'); - expect(assertions[0].target).toBe('public.users.user_insert_policy'); - expect(assertions[0].parameters).toEqual(['public', 'users', 'user_insert_policy']); + expect(assertions[0].type).toBe("policy_exists"); + expect(assertions[0].target).toBe("public.users.user_insert_policy"); + expect(assertions[0].parameters).toEqual([ + "public", + "users", + "user_insert_policy", + ]); }); - it('should handle various quote styles for policy_exists', () => { + it("should handle various quote styles for policy_exists", () => { const sql = ` SELECT policy_exists("users", "user_policy"); SELECT policy_exists(\`auth\`, \`sessions\`, \`session_policy\`); SELECT policy_exists('posts', 'author_policy'); `; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(3); - expect(assertions[0].target).toBe('users.user_policy'); - expect(assertions[1].target).toBe('auth.sessions.session_policy'); - expect(assertions[2].target).toBe('posts.author_policy'); + expect(assertions[0].target).toBe("users.user_policy"); + expect(assertions[1].target).toBe("auth.sessions.session_policy"); + expect(assertions[2].target).toBe("posts.author_policy"); }); }); - describe('policy_cmd_is assertion parsing', () => { - it('should parse policy_cmd_is with table, policy, and command', () => { + describe("policy_cmd_is assertion parsing", () => { + it("should parse policy_cmd_is with table, policy, and command", () => { const sql = "SELECT policy_cmd_is('users', 'user_policy', 'SELECT');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('policy_cmd_is'); - expect(assertions[0].target).toBe('users.user_policy'); - expect(assertions[0].parameters).toEqual(['users', 'user_policy', 'SELECT']); + expect(assertions[0].type).toBe("policy_cmd_is"); + expect(assertions[0].target).toBe("users.user_policy"); + expect(assertions[0].parameters).toEqual([ + "users", + "user_policy", + "SELECT", + ]); }); - it('should parse policy_cmd_is with schema, table, policy, and command', () => { - const sql = "SELECT policy_cmd_is('public', 'users', 'user_insert_policy', 'INSERT');"; + it("should parse policy_cmd_is with schema, table, policy, and command", () => { + const sql = + "SELECT policy_cmd_is('public', 'users', 'user_insert_policy', 'INSERT');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('policy_cmd_is'); - expect(assertions[0].target).toBe('public.users.user_insert_policy'); - expect(assertions[0].parameters).toEqual(['public', 'users', 'user_insert_policy', 'INSERT']); + expect(assertions[0].type).toBe("policy_cmd_is"); + expect(assertions[0].target).toBe("public.users.user_insert_policy"); + expect(assertions[0].parameters).toEqual([ + "public", + "users", + "user_insert_policy", + "INSERT", + ]); }); - it('should parse different SQL commands', () => { + it("should parse different SQL commands", () => { const sql = ` SELECT policy_cmd_is('posts', 'select_policy', 'SELECT'); SELECT policy_cmd_is('posts', 'insert_policy', 'INSERT'); @@ -116,96 +131,127 @@ describe('pgTAPTestScanner RLS Policy Assertion Parsing', () => { SELECT policy_cmd_is('posts', 'all_policy', 'ALL'); `; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(5); - expect(assertions[0].parameters[2]).toBe('SELECT'); - expect(assertions[1].parameters[2]).toBe('INSERT'); - expect(assertions[2].parameters[2]).toBe('UPDATE'); - expect(assertions[3].parameters[2]).toBe('DELETE'); - expect(assertions[4].parameters[2]).toBe('ALL'); + expect(assertions[0].parameters[2]).toBe("SELECT"); + expect(assertions[1].parameters[2]).toBe("INSERT"); + expect(assertions[2].parameters[2]).toBe("UPDATE"); + expect(assertions[3].parameters[2]).toBe("DELETE"); + expect(assertions[4].parameters[2]).toBe("ALL"); }); }); - describe('policy_roles_are assertion parsing', () => { - it('should parse policy_roles_are with table, policy, and role array', () => { - const sql = "SELECT policy_roles_are('users', 'user_policy', ARRAY['authenticated']);"; + describe("policy_roles_are assertion parsing", () => { + it("should parse policy_roles_are with table, policy, and role array", () => { + const sql = + "SELECT policy_roles_are('users', 'user_policy', ARRAY['authenticated']);"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('policy_roles_are'); - expect(assertions[0].target).toBe('users.user_policy'); - expect(assertions[0].parameters).toEqual(['users', 'user_policy', "'authenticated'"]); + expect(assertions[0].type).toBe("policy_roles_are"); + expect(assertions[0].target).toBe("users.user_policy"); + expect(assertions[0].parameters).toEqual([ + "users", + "user_policy", + "'authenticated'", + ]); }); - it('should parse policy_roles_are with schema, table, policy, and role array', () => { - const sql = "SELECT policy_roles_are('public', 'users', 'admin_policy', ARRAY['admin', 'moderator']);"; + it("should parse policy_roles_are with schema, table, policy, and role array", () => { + const sql = + "SELECT policy_roles_are('public', 'users', 'admin_policy', ARRAY['admin', 'moderator']);"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('policy_roles_are'); - expect(assertions[0].target).toBe('public.users.admin_policy'); - expect(assertions[0].parameters).toEqual(['public', 'users', 'admin_policy', "'admin', 'moderator'"]); + expect(assertions[0].type).toBe("policy_roles_are"); + expect(assertions[0].target).toBe("public.users.admin_policy"); + expect(assertions[0].parameters).toEqual([ + "public", + "users", + "admin_policy", + "'admin', 'moderator'", + ]); }); - it('should parse multiple role arrays', () => { + it("should parse multiple role arrays", () => { const sql = ` SELECT policy_roles_are('posts', 'author_policy', ARRAY['author']); SELECT policy_roles_are('posts', 'editor_policy', ARRAY['editor', 'admin']); SELECT policy_roles_are('comments', 'public_policy', ARRAY['public', 'authenticated', 'anon']); `; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(3); expect(assertions[0].parameters[2]).toBe("'author'"); expect(assertions[1].parameters[2]).toBe("'editor', 'admin'"); - expect(assertions[2].parameters[2]).toBe("'public', 'authenticated', 'anon'"); + expect(assertions[2].parameters[2]).toBe( + "'public', 'authenticated', 'anon'", + ); }); }); - describe('policies_are assertion parsing', () => { - it('should parse policies_are with table and policy array', () => { - const sql = "SELECT policies_are('users', ARRAY['select_policy', 'insert_policy']);"; + describe("policies_are assertion parsing", () => { + it("should parse policies_are with table and policy array", () => { + const sql = + "SELECT policies_are('users', ARRAY['select_policy', 'insert_policy']);"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('policies_are'); - expect(assertions[0].target).toBe('users'); - expect(assertions[0].parameters).toEqual(['users', "'select_policy', 'insert_policy'"]); + expect(assertions[0].type).toBe("policies_are"); + expect(assertions[0].target).toBe("users"); + expect(assertions[0].parameters).toEqual([ + "users", + "'select_policy', 'insert_policy'", + ]); }); - it('should parse policies_are with schema, table, and policy array', () => { - const sql = "SELECT policies_are('public', 'users', ARRAY['user_select', 'user_insert', 'user_update']);"; + it("should parse policies_are with schema, table, and policy array", () => { + const sql = + "SELECT policies_are('public', 'users', ARRAY['user_select', 'user_insert', 'user_update']);"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('policies_are'); - expect(assertions[0].target).toBe('public.users'); - expect(assertions[0].parameters).toEqual(['public', 'users', "'user_select', 'user_insert', 'user_update'"]); + expect(assertions[0].type).toBe("policies_are"); + expect(assertions[0].target).toBe("public.users"); + expect(assertions[0].parameters).toEqual([ + "public", + "users", + "'user_select', 'user_insert', 'user_update'", + ]); }); - it('should parse policies_are with optional description', () => { - const sql = "SELECT policies_are('public', 'users', ARRAY['select_policy', 'insert_policy'], 'All user policies');"; + it("should parse policies_are with optional description", () => { + const sql = + "SELECT policies_are('public', 'users', ARRAY['select_policy', 'insert_policy'], 'All user policies');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('policies_are'); - expect(assertions[0].target).toBe('public.users'); - expect(assertions[0].parameters).toEqual(['public', 'users', "'select_policy', 'insert_policy'", 'All user policies']); + expect(assertions[0].type).toBe("policies_are"); + expect(assertions[0].target).toBe("public.users"); + expect(assertions[0].parameters).toEqual([ + "public", + "users", + "'select_policy', 'insert_policy'", + "All user policies", + ]); }); - it('should parse single policy in array', () => { + it("should parse single policy in array", () => { const sql = "SELECT policies_are('posts', ARRAY['author_only_policy']);"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('policies_are'); - expect(assertions[0].target).toBe('posts'); - expect(assertions[0].parameters).toEqual(['posts', "'author_only_policy'"]); + expect(assertions[0].type).toBe("policies_are"); + expect(assertions[0].target).toBe("posts"); + expect(assertions[0].parameters).toEqual([ + "posts", + "'author_only_policy'", + ]); }); }); - describe('Mixed RLS assertion parsing', () => { - it('should parse multiple different RLS assertions in one SQL block', () => { + describe("Mixed RLS assertion parsing", () => { + it("should parse multiple different RLS assertions in one SQL block", () => { const sql = ` -- Check if RLS is enabled SELECT is_rls_enabled('public', 'users'); @@ -227,42 +273,46 @@ describe('pgTAPTestScanner RLS Policy Assertion Parsing', () => { SELECT policies_are('public', 'posts', ARRAY['author_policy', 'admin_policy'], 'Post policies'); `; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(9); - expect(assertions.map(a => a.type)).toEqual([ - 'is_rls_enabled', - 'policy_exists', - 'policy_exists', - 'policy_cmd_is', - 'policy_cmd_is', - 'policy_roles_are', - 'policy_roles_are', - 'policies_are', - 'policies_are' + expect(assertions.map((a) => a.type)).toEqual([ + "is_rls_enabled", + "policy_exists", + "policy_exists", + "policy_cmd_is", + "policy_cmd_is", + "policy_roles_are", + "policy_roles_are", + "policies_are", + "policies_are", ]); }); - it('should handle commented out RLS assertions based on includeCommented option', () => { + it("should handle commented out RLS assertions based on includeCommented option", () => { const sql = ` SELECT is_rls_enabled('users'); -- SELECT policy_exists('users', 'disabled_policy'); /* SELECT policy_cmd_is('users', 'commented_policy', 'SELECT'); */ SELECT policies_are('users', ARRAY['active_policy']); `; - + const assertions = scanner.extractAssertions(sql); // Note: The /* */ multiline comment might not be filtered out by the simple comment pattern expect(assertions.length).toBeGreaterThanOrEqual(2); // At least uncommented assertions - + // Test with includeCommented = true - const scannerWithComments = new pgTAPTestScanner({ includeCommented: true }); + const scannerWithComments = new pgTAPTestScanner({ + includeCommented: true, + }); const assertionsWithComments = scannerWithComments.extractAssertions(sql); - expect(assertionsWithComments.length).toBeGreaterThanOrEqual(assertions.length); // Should include at least as many + expect(assertionsWithComments.length).toBeGreaterThanOrEqual( + assertions.length, + ); // Should include at least as many }); }); - describe('RLS coverage map integration', () => { - it('should build policy coverage map correctly', () => { + describe("RLS coverage map integration", () => { + it("should build policy coverage map correctly", () => { const sql = ` SELECT is_rls_enabled('users'); SELECT policy_exists('users', 'user_policy'); @@ -270,80 +320,95 @@ describe('pgTAPTestScanner RLS Policy Assertion Parsing', () => { SELECT policy_roles_are('public', 'posts', 'admin_policy', ARRAY['admin']); SELECT policies_are('comments', ARRAY['public_policy', 'auth_policy']); `; - + const assertions = scanner.extractAssertions(sql); - + // Simulate building coverage map - scanner.testFiles = [{ - filePath: '/test/rls.sql', - fileName: 'rls.sql', - assertions, - planCount: assertions.length, - dependencies: [], - metadata: {} - }]; - + scanner.testFiles = [ + { + filePath: "/test/rls.sql", + fileName: "rls.sql", + assertions, + planCount: assertions.length, + dependencies: [], + metadata: {}, + }, + ]; + scanner._buildCoverageMap(); const coverageMap = scanner.getCoverageMap(); - + // Check that policies are properly tracked expect(coverageMap.policies).toBeDefined(); expect(Object.keys(coverageMap.policies)).toHaveLength(4); - - expect(coverageMap.policies['users']).toContain('is_rls_enabled'); - expect(coverageMap.policies['users.user_policy']).toContain('policy_exists'); - expect(coverageMap.policies['users.user_policy']).toContain('policy_cmd_is'); - expect(coverageMap.policies['public.posts.admin_policy']).toContain('policy_roles_are'); - expect(coverageMap.policies['comments']).toContain('policies_are'); + + expect(coverageMap.policies["users"]).toContain("is_rls_enabled"); + expect(coverageMap.policies["users.user_policy"]).toContain( + "policy_exists", + ); + expect(coverageMap.policies["users.user_policy"]).toContain( + "policy_cmd_is", + ); + expect(coverageMap.policies["public.posts.admin_policy"]).toContain( + "policy_roles_are", + ); + expect(coverageMap.policies["comments"]).toContain("policies_are"); }); - it('should track files by policy target', () => { + it("should track files by policy target", () => { const sql = ` SELECT is_rls_enabled('users'); SELECT policy_exists('posts', 'author_policy'); `; - + const assertions = scanner.extractAssertions(sql); - - scanner.testFiles = [{ - filePath: '/test/user_rls.sql', - fileName: 'user_rls.sql', - assertions: [assertions[0]], - planCount: 1, - dependencies: [], - metadata: {} - }, { - filePath: '/test/post_rls.sql', - fileName: 'post_rls.sql', - assertions: [assertions[1]], - planCount: 1, - dependencies: [], - metadata: {} - }]; - + + scanner.testFiles = [ + { + filePath: "/test/user_rls.sql", + fileName: "user_rls.sql", + assertions: [assertions[0]], + planCount: 1, + dependencies: [], + metadata: {}, + }, + { + filePath: "/test/post_rls.sql", + fileName: "post_rls.sql", + assertions: [assertions[1]], + planCount: 1, + dependencies: [], + metadata: {}, + }, + ]; + scanner._buildCoverageMap(); const coverageMap = scanner.getCoverageMap(); - - expect(coverageMap.filesByTarget['users']).toHaveLength(1); - expect(coverageMap.filesByTarget['users'][0].fileName).toBe('user_rls.sql'); - expect(coverageMap.filesByTarget['posts.author_policy']).toHaveLength(1); - expect(coverageMap.filesByTarget['posts.author_policy'][0].fileName).toBe('post_rls.sql'); + + expect(coverageMap.filesByTarget["users"]).toHaveLength(1); + expect(coverageMap.filesByTarget["users"][0].fileName).toBe( + "user_rls.sql", + ); + expect(coverageMap.filesByTarget["posts.author_policy"]).toHaveLength(1); + expect(coverageMap.filesByTarget["posts.author_policy"][0].fileName).toBe( + "post_rls.sql", + ); }); }); - describe('Edge cases and error handling', () => { - it('should handle malformed RLS assertions gracefully', () => { + describe("Edge cases and error handling", () => { + it("should handle malformed RLS assertions gracefully", () => { const sql = ` SELECT is_rls_enabled(); -- missing table SELECT policy_exists('users'); -- missing policy name SELECT policy_cmd_is('users', 'policy'); -- missing command `; - + // Should not throw errors, but may not match patterns expect(() => scanner.extractAssertions(sql)).not.toThrow(); }); - it('should handle different whitespace and formatting', () => { + it("should handle different whitespace and formatting", () => { const sql = ` SELECT is_rls_enabled ( 'users' ) ; SELECT @@ -354,28 +419,30 @@ describe('pgTAPTestScanner RLS Policy Assertion Parsing', () => { ); SELECT policy_cmd_is('posts','author_policy','SELECT'); `; - + const assertions = scanner.extractAssertions(sql); expect(assertions).toHaveLength(3); - expect(assertions[0].type).toBe('is_rls_enabled'); - expect(assertions[1].type).toBe('policy_exists'); - expect(assertions[2].type).toBe('policy_cmd_is'); + expect(assertions[0].type).toBe("is_rls_enabled"); + expect(assertions[1].type).toBe("policy_exists"); + expect(assertions[2].type).toBe("policy_cmd_is"); }); - it('should preserve original SQL in rawSql property', () => { + it("should preserve original SQL in rawSql property", () => { const sql = "SELECT policy_exists('users', 'user_policy');"; const assertions = scanner.extractAssertions(sql); - - expect(assertions[0].rawSql).toBe("SELECT policy_exists('users', 'user_policy')"); + + expect(assertions[0].rawSql).toBe( + "SELECT policy_exists('users', 'user_policy')", + ); }); - it('should track line numbers correctly', () => { + it("should track line numbers correctly", () => { const sql = `-- Line 1 SELECT is_rls_enabled('users'); -- Line 2 SELECT policy_exists('posts', 'author_policy'); -- Line 4 `; - + const assertions = scanner.extractAssertions(sql); expect(assertions).toHaveLength(2); expect(assertions[0].lineNumber).toBe(2); @@ -383,123 +450,123 @@ describe('pgTAPTestScanner RLS Policy Assertion Parsing', () => { }); }); - describe('RLS policy metadata extraction', () => { - it('should extract policy metadata for is_rls_enabled assertions', () => { + describe("RLS policy metadata extraction", () => { + it("should extract policy metadata for is_rls_enabled assertions", () => { const sql = ` SELECT is_rls_enabled('users'); SELECT is_rls_enabled('public', 'profiles'); `; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(2); - + expect(assertions[0].policyMetadata).toEqual({ - schema: 'public', - tableName: 'users' + schema: "public", + tableName: "users", }); - + expect(assertions[1].policyMetadata).toEqual({ - schema: 'public', - tableName: 'profiles' + schema: "public", + tableName: "profiles", }); }); - it('should extract policy metadata for policy_exists assertions', () => { + it("should extract policy metadata for policy_exists assertions", () => { const sql = ` SELECT policy_exists('users', 'user_select_policy'); SELECT policy_exists('auth', 'sessions', 'session_policy'); `; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(2); - + expect(assertions[0].policyMetadata).toEqual({ - schema: 'public', - tableName: 'users', - policyName: 'user_select_policy' + schema: "public", + tableName: "users", + policyName: "user_select_policy", }); - + expect(assertions[1].policyMetadata).toEqual({ - schema: 'auth', - tableName: 'sessions', - policyName: 'session_policy' + schema: "auth", + tableName: "sessions", + policyName: "session_policy", }); }); - it('should extract policy metadata for policy_cmd_is assertions', () => { + it("should extract policy metadata for policy_cmd_is assertions", () => { const sql = ` SELECT policy_cmd_is('posts', 'author_policy', 'SELECT'); SELECT policy_cmd_is('public', 'comments', 'moderator_policy', 'DELETE'); `; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(2); - + expect(assertions[0].policyMetadata).toEqual({ - schema: 'public', - tableName: 'posts', - policyName: 'author_policy', - command: 'SELECT' + schema: "public", + tableName: "posts", + policyName: "author_policy", + command: "SELECT", }); - + expect(assertions[1].policyMetadata).toEqual({ - schema: 'public', - tableName: 'comments', - policyName: 'moderator_policy', - command: 'DELETE' + schema: "public", + tableName: "comments", + policyName: "moderator_policy", + command: "DELETE", }); }); - it('should extract policy metadata for policy_roles_are assertions', () => { + it("should extract policy metadata for policy_roles_are assertions", () => { const sql = ` SELECT policy_roles_are('users', 'user_policy', ARRAY['authenticated']); SELECT policy_roles_are('public', 'posts', 'admin_policy', ARRAY['admin', 'moderator']); `; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(2); - + expect(assertions[0].policyMetadata).toEqual({ - schema: 'public', - tableName: 'users', - policyName: 'user_policy', - roles: ['authenticated'] + schema: "public", + tableName: "users", + policyName: "user_policy", + roles: ["authenticated"], }); - + expect(assertions[1].policyMetadata).toEqual({ - schema: 'public', - tableName: 'posts', - policyName: 'admin_policy', - roles: ['admin', 'moderator'] + schema: "public", + tableName: "posts", + policyName: "admin_policy", + roles: ["admin", "moderator"], }); }); - it('should extract policy metadata for policies_are assertions', () => { + it("should extract policy metadata for policies_are assertions", () => { const sql = ` SELECT policies_are('users', ARRAY['select_policy', 'insert_policy']); SELECT policies_are('public', 'posts', ARRAY['author_policy', 'admin_policy'], 'Post access policies'); `; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(2); - + expect(assertions[0].policyMetadata).toEqual({ - schema: 'public', - tableName: 'users', - policies: ['select_policy', 'insert_policy'] + schema: "public", + tableName: "users", + policies: ["select_policy", "insert_policy"], }); - + expect(assertions[1].policyMetadata).toEqual({ - schema: 'public', - tableName: 'posts', - policies: ['author_policy', 'admin_policy'], - description: 'Post access policies' + schema: "public", + tableName: "posts", + policies: ["author_policy", "admin_policy"], + description: "Post access policies", }); }); }); - describe('Statistics and reporting', () => { - it('should include RLS assertions in statistics', () => { + describe("Statistics and reporting", () => { + it("should include RLS assertions in statistics", () => { const sql = ` SELECT is_rls_enabled('users'); SELECT policy_exists('users', 'policy1'); @@ -507,30 +574,32 @@ describe('pgTAPTestScanner RLS Policy Assertion Parsing', () => { SELECT policy_cmd_is('users', 'policy1', 'SELECT'); SELECT policies_are('comments', ARRAY['policy1']); `; - + const assertions = scanner.extractAssertions(sql); - scanner.testFiles = [{ - filePath: '/test/rls.sql', - fileName: 'rls.sql', - assertions, - planCount: assertions.length, - dependencies: [], - metadata: {} - }]; - + scanner.testFiles = [ + { + filePath: "/test/rls.sql", + fileName: "rls.sql", + assertions, + planCount: assertions.length, + dependencies: [], + metadata: {}, + }, + ]; + // Need to set totalAssertions manually or via processing scanner.totalAssertions = assertions.length; scanner.filesProcessed = 1; - + scanner._buildCoverageMap(); const stats = scanner.getStatistics(); - + expect(stats.totalAssertions).toBe(5); - expect(stats.assertionTypes['is_rls_enabled']).toBe(1); - expect(stats.assertionTypes['policy_exists']).toBe(2); - expect(stats.assertionTypes['policy_cmd_is']).toBe(1); - expect(stats.assertionTypes['policies_are']).toBe(1); + expect(stats.assertionTypes["is_rls_enabled"]).toBe(1); + expect(stats.assertionTypes["policy_exists"]).toBe(2); + expect(stats.assertionTypes["policy_cmd_is"]).toBe(1); + expect(stats.assertionTypes["policies_are"]).toBe(1); expect(stats.coverageStats.policiesWithTests).toBe(4); // users, users.policy1, posts.policy2, comments }); }); -}); \ No newline at end of file +}); diff --git a/test/pgTAPTestScanner.trigger.test.js b/test/pgTAPTestScanner.trigger.test.js index c27de27..27bedff 100644 --- a/test/pgTAPTestScanner.trigger.test.js +++ b/test/pgTAPTestScanner.trigger.test.js @@ -3,205 +3,270 @@ * @fileoverview Test coverage for trigger-related pgTAP assertions */ -import { describe, it, expect, beforeEach } from 'vitest'; -import pgTAPTestScanner from '../src/lib/testing/pgTAPTestScanner.js'; +import { describe, it, expect, beforeEach } from "vitest"; +import pgTAPTestScanner from "../src/lib/testing/pgTAPTestScanner.js"; -describe('pgTAPTestScanner Trigger Assertion Parsing', () => { +describe("pgTAPTestScanner Trigger Assertion Parsing", () => { let scanner; - + beforeEach(() => { scanner = new pgTAPTestScanner(); }); - describe('has_trigger assertion parsing', () => { - it('should parse has_trigger with table and trigger name', () => { + describe("has_trigger assertion parsing", () => { + it("should parse has_trigger with table and trigger name", () => { const sql = "SELECT has_trigger('users', 'update_timestamp_trigger');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('has_trigger'); - expect(assertions[0].target).toBe('public.users.update_timestamp_trigger'); - expect(assertions[0].parameters).toEqual(['users', 'update_timestamp_trigger']); + expect(assertions[0].type).toBe("has_trigger"); + expect(assertions[0].target).toBe( + "public.users.update_timestamp_trigger", + ); + expect(assertions[0].parameters).toEqual([ + "users", + "update_timestamp_trigger", + ]); }); - it('should parse has_trigger with schema, table, and trigger name', () => { + it("should parse has_trigger with schema, table, and trigger name", () => { const sql = "SELECT has_trigger('public', 'posts', 'audit_trigger');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('has_trigger'); - expect(assertions[0].target).toBe('public.posts.audit_trigger'); - expect(assertions[0].parameters).toEqual(['public', 'posts', 'audit_trigger']); + expect(assertions[0].type).toBe("has_trigger"); + expect(assertions[0].target).toBe("public.posts.audit_trigger"); + expect(assertions[0].parameters).toEqual([ + "public", + "posts", + "audit_trigger", + ]); }); }); - describe('hasnt_trigger assertion parsing', () => { - it('should parse hasnt_trigger with table and trigger name', () => { + describe("hasnt_trigger assertion parsing", () => { + it("should parse hasnt_trigger with table and trigger name", () => { const sql = "SELECT hasnt_trigger('temp_table', 'old_trigger');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('hasnt_trigger'); - expect(assertions[0].target).toBe('public.temp_table.old_trigger'); - expect(assertions[0].parameters).toEqual(['temp_table', 'old_trigger']); + expect(assertions[0].type).toBe("hasnt_trigger"); + expect(assertions[0].target).toBe("public.temp_table.old_trigger"); + expect(assertions[0].parameters).toEqual(["temp_table", "old_trigger"]); }); }); - describe('trigger_is assertion parsing', () => { - it('should parse trigger_is with table, trigger, and function', () => { - const sql = "SELECT trigger_is('users', 'update_trigger', 'set_timestamp');"; + describe("trigger_is assertion parsing", () => { + it("should parse trigger_is with table, trigger, and function", () => { + const sql = + "SELECT trigger_is('users', 'update_trigger', 'set_timestamp');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('trigger_is'); - expect(assertions[0].target).toBe('public.users.update_trigger'); - expect(assertions[0].parameters).toEqual(['users', 'update_trigger', 'set_timestamp']); + expect(assertions[0].type).toBe("trigger_is"); + expect(assertions[0].target).toBe("public.users.update_trigger"); + expect(assertions[0].parameters).toEqual([ + "users", + "update_trigger", + "set_timestamp", + ]); }); - it('should parse trigger_is with schema, table, trigger, func_schema, and function', () => { - const sql = "SELECT trigger_is('public', 'posts', 'audit_trigger', 'audit', 'log_changes');"; + it("should parse trigger_is with schema, table, trigger, func_schema, and function", () => { + const sql = + "SELECT trigger_is('public', 'posts', 'audit_trigger', 'audit', 'log_changes');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('trigger_is'); - expect(assertions[0].target).toBe('public.posts.audit_trigger'); - expect(assertions[0].parameters).toEqual(['public', 'posts', 'audit_trigger', 'audit', 'log_changes']); + expect(assertions[0].type).toBe("trigger_is"); + expect(assertions[0].target).toBe("public.posts.audit_trigger"); + expect(assertions[0].parameters).toEqual([ + "public", + "posts", + "audit_trigger", + "audit", + "log_changes", + ]); }); }); - describe('is_trigger_on assertion parsing', () => { - it('should parse is_trigger_on with table, trigger, and events', () => { + describe("is_trigger_on assertion parsing", () => { + it("should parse is_trigger_on with table, trigger, and events", () => { const sql = "SELECT is_trigger_on('posts', 'audit_trigger', 'UPDATE');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('is_trigger_on'); - expect(assertions[0].target).toBe('public.posts.audit_trigger'); - expect(assertions[0].parameters).toEqual(['posts', 'audit_trigger', 'UPDATE']); + expect(assertions[0].type).toBe("is_trigger_on"); + expect(assertions[0].target).toBe("public.posts.audit_trigger"); + expect(assertions[0].parameters).toEqual([ + "posts", + "audit_trigger", + "UPDATE", + ]); }); - it('should parse is_trigger_on with schema, table, trigger, and events', () => { - const sql = "SELECT is_trigger_on('public', 'users', 'validation_trigger', 'INSERT');"; + it("should parse is_trigger_on with schema, table, trigger, and events", () => { + const sql = + "SELECT is_trigger_on('public', 'users', 'validation_trigger', 'INSERT');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('is_trigger_on'); - expect(assertions[0].target).toBe('public.users.validation_trigger'); - expect(assertions[0].parameters).toEqual(['public', 'users', 'validation_trigger', 'INSERT']); + expect(assertions[0].type).toBe("is_trigger_on"); + expect(assertions[0].target).toBe("public.users.validation_trigger"); + expect(assertions[0].parameters).toEqual([ + "public", + "users", + "validation_trigger", + "INSERT", + ]); }); }); - describe('trigger_fires_on assertion parsing', () => { - it('should parse trigger_fires_on with timing', () => { - const sql = "SELECT trigger_fires_on('users', 'update_trigger', 'BEFORE');"; + describe("trigger_fires_on assertion parsing", () => { + it("should parse trigger_fires_on with timing", () => { + const sql = + "SELECT trigger_fires_on('users', 'update_trigger', 'BEFORE');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('trigger_fires_on'); - expect(assertions[0].target).toBe('public.users.update_trigger'); - expect(assertions[0].parameters).toEqual(['users', 'update_trigger', 'BEFORE']); + expect(assertions[0].type).toBe("trigger_fires_on"); + expect(assertions[0].target).toBe("public.users.update_trigger"); + expect(assertions[0].parameters).toEqual([ + "users", + "update_trigger", + "BEFORE", + ]); }); }); - describe('trigger_is_for assertion parsing', () => { - it('should parse trigger_is_for with level', () => { + describe("trigger_is_for assertion parsing", () => { + it("should parse trigger_is_for with level", () => { const sql = "SELECT trigger_is_for('users', 'update_trigger', 'ROW');"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('trigger_is_for'); - expect(assertions[0].target).toBe('public.users.update_trigger'); - expect(assertions[0].parameters).toEqual(['users', 'update_trigger', 'ROW']); + expect(assertions[0].type).toBe("trigger_is_for"); + expect(assertions[0].target).toBe("public.users.update_trigger"); + expect(assertions[0].parameters).toEqual([ + "users", + "update_trigger", + "ROW", + ]); }); }); - describe('triggers_are assertion parsing', () => { - it('should parse triggers_are with table and trigger array', () => { - const sql = "SELECT triggers_are('users', ARRAY['update_trigger', 'validation_trigger']);"; + describe("triggers_are assertion parsing", () => { + it("should parse triggers_are with table and trigger array", () => { + const sql = + "SELECT triggers_are('users', ARRAY['update_trigger', 'validation_trigger']);"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('triggers_are'); - expect(assertions[0].target).toBe('public.users'); - expect(assertions[0].parameters).toEqual(['users', "'update_trigger', 'validation_trigger'"]); + expect(assertions[0].type).toBe("triggers_are"); + expect(assertions[0].target).toBe("public.users"); + expect(assertions[0].parameters).toEqual([ + "users", + "'update_trigger', 'validation_trigger'", + ]); }); - it('should parse triggers_are with schema, table, and trigger array', () => { - const sql = "SELECT triggers_are('public', 'posts', ARRAY['audit_trigger', 'notify_trigger']);"; + it("should parse triggers_are with schema, table, and trigger array", () => { + const sql = + "SELECT triggers_are('public', 'posts', ARRAY['audit_trigger', 'notify_trigger']);"; const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(1); - expect(assertions[0].type).toBe('triggers_are'); - expect(assertions[0].target).toBe('public.posts'); - expect(assertions[0].parameters).toEqual(['public', 'posts', "'audit_trigger', 'notify_trigger'"]); + expect(assertions[0].type).toBe("triggers_are"); + expect(assertions[0].target).toBe("public.posts"); + expect(assertions[0].parameters).toEqual([ + "public", + "posts", + "'audit_trigger', 'notify_trigger'", + ]); }); }); - describe('trigger coverage tracking', () => { - it('should track trigger coverage in coverage map', () => { + describe("trigger coverage tracking", () => { + it("should track trigger coverage in coverage map", () => { const sql = ` SELECT has_trigger('users', 'update_trigger'); SELECT trigger_is('users', 'update_trigger', 'set_timestamp'); SELECT trigger_fires_on('users', 'update_trigger', 'BEFORE'); SELECT is_trigger_on('posts', 'audit_trigger', 'INSERT'); `; - + const assertions = scanner.extractAssertions(sql); - + // Mock test file structure - scanner.testFiles = [{ - filePath: '/test/triggers.sql', - fileName: 'triggers.sql', - assertions, - planCount: assertions.length, - dependencies: [], - metadata: { size: sql.length, lines: sql.split('\n').length, parsed: new Date() } - }]; - + scanner.testFiles = [ + { + filePath: "/test/triggers.sql", + fileName: "triggers.sql", + assertions, + planCount: assertions.length, + dependencies: [], + metadata: { + size: sql.length, + lines: sql.split("\n").length, + parsed: new Date(), + }, + }, + ]; + scanner._buildCoverageMap(); const coverageMap = scanner.getCoverageMap(); - + expect(coverageMap.triggers).toBeDefined(); - expect(Object.keys(coverageMap.triggers)).toContain('public.users.update_trigger'); - expect(Object.keys(coverageMap.triggers)).toContain('public.posts.audit_trigger'); - - expect(coverageMap.triggers['public.users.update_trigger']).toEqual([ - 'has_trigger', - 'trigger_is', - 'trigger_fires_on' + expect(Object.keys(coverageMap.triggers)).toContain( + "public.users.update_trigger", + ); + expect(Object.keys(coverageMap.triggers)).toContain( + "public.posts.audit_trigger", + ); + + expect(coverageMap.triggers["public.users.update_trigger"]).toEqual([ + "has_trigger", + "trigger_is", + "trigger_fires_on", ]); - - expect(coverageMap.triggers['public.posts.audit_trigger']).toEqual([ - 'is_trigger_on' + + expect(coverageMap.triggers["public.posts.audit_trigger"]).toEqual([ + "is_trigger_on", ]); }); - it('should include trigger statistics in coverage stats', () => { + it("should include trigger statistics in coverage stats", () => { const sql = ` SELECT has_trigger('users', 'update_trigger'); SELECT has_trigger('posts', 'audit_trigger'); `; - + const assertions = scanner.extractAssertions(sql); - - scanner.testFiles = [{ - filePath: '/test/triggers.sql', - fileName: 'triggers.sql', - assertions, - planCount: assertions.length, - dependencies: [], - metadata: { size: sql.length, lines: sql.split('\n').length, parsed: new Date() } - }]; - + + scanner.testFiles = [ + { + filePath: "/test/triggers.sql", + fileName: "triggers.sql", + assertions, + planCount: assertions.length, + dependencies: [], + metadata: { + size: sql.length, + lines: sql.split("\n").length, + parsed: new Date(), + }, + }, + ]; + scanner._buildCoverageMap(); const stats = scanner.getStatistics(); - + expect(stats.coverageStats.triggersWithTests).toBe(2); }); }); - describe('complex trigger assertion patterns', () => { - it('should handle multiple trigger assertions in one file', () => { + describe("complex trigger assertion patterns", () => { + it("should handle multiple trigger assertions in one file", () => { const sql = ` -- Test trigger existence and properties SELECT has_trigger('users', 'update_timestamp_trigger', 'Update timestamp trigger exists'); @@ -214,25 +279,27 @@ describe('pgTAPTestScanner Trigger Assertion Parsing', () => { SELECT has_trigger('posts', 'audit_trigger'); SELECT trigger_is('posts', 'audit_trigger', 'audit_changes'); `; - + const assertions = scanner.extractAssertions(sql); - + expect(assertions).toHaveLength(7); - + // Verify all assertions are properly categorized - const triggerAssertions = assertions.filter(a => a.type.includes('trigger')); + const triggerAssertions = assertions.filter((a) => + a.type.includes("trigger"), + ); expect(triggerAssertions).toHaveLength(7); - + // Verify target extraction works correctly - const updateTriggerAssertions = assertions.filter(a => - a.target === 'public.users.update_timestamp_trigger' + const updateTriggerAssertions = assertions.filter( + (a) => a.target === "public.users.update_timestamp_trigger", ); expect(updateTriggerAssertions).toHaveLength(5); - - const auditTriggerAssertions = assertions.filter(a => - a.target === 'public.posts.audit_trigger' + + const auditTriggerAssertions = assertions.filter( + (a) => a.target === "public.posts.audit_trigger", ); expect(auditTriggerAssertions).toHaveLength(2); }); }); -}); \ No newline at end of file +}); diff --git a/test/setup.js b/test/setup.js index d67e142..33860ea 100644 --- a/test/setup.js +++ b/test/setup.js @@ -1,4 +1,4 @@ -import { afterAll, afterEach } from 'vitest'; +import { afterAll, afterEach } from "vitest"; // Track all connections globally const globalConnections = new Set(); @@ -8,20 +8,20 @@ afterEach(async () => { // Close any connections created during the test for (const connection of globalConnections) { try { - if (connection && typeof connection.end === 'function') { + if (connection && typeof connection.end === "function") { await connection.end(); } - if (connection && typeof connection.close === 'function') { + if (connection && typeof connection.close === "function") { await connection.close(); } } catch (error) { - console.warn('Failed to close connection in afterEach:', error.message); + console.warn("Failed to close connection in afterEach:", error.message); } } globalConnections.clear(); - + // Clear any remaining timers - if (typeof global.clearAllTimers === 'function') { + if (typeof global.clearAllTimers === "function") { global.clearAllTimers(); } }); @@ -33,27 +33,26 @@ afterAll(async () => { if (global.dbConnection) { await global.dbConnection.end(); } - + // Close any Supabase clients if (global.supabaseClient) { global.supabaseClient = null; } - + // Close any remaining connections for (const connection of globalConnections) { try { - if (connection && typeof connection.end === 'function') { + if (connection && typeof connection.end === "function") { await connection.end(); } } catch (error) { - console.warn('Failed to close connection in afterAll:', error.message); + console.warn("Failed to close connection in afterAll:", error.message); } } - } finally { // Force exit after longer timeout to prevent hanging setTimeout(() => { - console.warn('Force exiting due to hanging resources'); + console.warn("Force exiting due to hanging resources"); process.exit(0); }, 2000).unref(); // Use unref() to not keep process alive } @@ -63,4 +62,4 @@ afterAll(async () => { export function trackConnection(connection) { globalConnections.add(connection); return connection; -} \ No newline at end of file +} diff --git a/test/test-cache-performance.js b/test/test-cache-performance.js index 22b52fc..c8b6910 100644 --- a/test/test-cache-performance.js +++ b/test/test-cache-performance.js @@ -2,15 +2,15 @@ /** * Test Cache Performance Validation Script - * + * * Validates that the TestCache provides >50% performance improvement * on repeat test runs as required by P1.T015 */ -const path = require('path'); -const { performance } = require('perf_hooks'); -const RunCommand = require('../src/commands/test/RunCommand'); -const CacheCommand = require('../src/commands/test/CacheCommand'); +const path = require("path"); +const { performance } = require("perf_hooks"); +const RunCommand = require("../src/commands/test/RunCommand"); +const CacheCommand = require("../src/commands/test/CacheCommand"); /** * Performance validation test suite @@ -22,7 +22,7 @@ class CachePerformanceValidator { secondRun: null, improvement: null, cacheStats: null, - passed: false + passed: false, }; } @@ -30,47 +30,46 @@ class CachePerformanceValidator { * Run performance validation */ async validate() { - console.log('🚀 data Test Cache Performance Validation'); - console.log('=' .repeat(50)); - + console.log("🚀 data Test Cache Performance Validation"); + console.log("=".repeat(50)); + try { // Setup test environment await this.setup(); - + // Clear existing cache to ensure clean test - console.log('\n1. Clearing existing cache...'); + console.log("\n1. Clearing existing cache..."); await this.clearCache(); - + // First run (cache miss - baseline) - console.log('\n2. Running first test execution (building cache)...'); - const firstRunTime = await this.runTests('First run (cache miss)'); + console.log("\n2. Running first test execution (building cache)..."); + const firstRunTime = await this.runTests("First run (cache miss)"); this.results.firstRun = { executionTime: firstRunTime, - cacheHits: 0 + cacheHits: 0, }; - + // Second run (cache hit - optimized) - console.log('\n3. Running second test execution (using cache)...'); - const secondRunTime = await this.runTests('Second run (cache hit)'); + console.log("\n3. Running second test execution (using cache)..."); + const secondRunTime = await this.runTests("Second run (cache hit)"); this.results.secondRun = { executionTime: secondRunTime, - cacheHits: 1 // Assuming at least one cache hit + cacheHits: 1, // Assuming at least one cache hit }; - + // Calculate performance improvement - console.log('\n4. Analyzing performance improvement...'); + console.log("\n4. Analyzing performance improvement..."); await this.analyzePerformance(); - + // Get cache statistics - console.log('\n5. Gathering cache statistics...'); + console.log("\n5. Gathering cache statistics..."); await this.getCacheStats(); - + // Display results - console.log('\n6. Performance Validation Results:'); + console.log("\n6. Performance Validation Results:"); this.displayResults(); - + return this.results.passed; - } catch (error) { console.error(`❌ Validation failed: ${error.message}`); console.error(error.stack); @@ -83,26 +82,28 @@ class CachePerformanceValidator { */ async setup() { // Mock database URL for testing - this.databaseUrl = process.env.DATABASE_URL || 'postgresql://postgres:postgres@localhost:54332/postgres'; - + this.databaseUrl = + process.env.DATABASE_URL || + "postgresql://postgres:postgres@localhost:54332/postgres"; + // Create run command instance this.runCommand = new RunCommand( this.databaseUrl, null, // serviceRoleKey - './tests', // testsDir - './test-results', // outputDir + "./tests", // testsDir + "./test-results", // outputDir console, // logger - false // isProd + false, // isProd ); - + // Create cache command instance this.cacheCommand = new CacheCommand( this.databaseUrl, null, - './tests', - './test-results', + "./tests", + "./test-results", console, - false + false, ); } @@ -111,10 +112,10 @@ class CachePerformanceValidator { */ async clearCache() { try { - await this.cacheCommand.performExecute({ action: 'clear' }); - console.log(' ✓ Cache cleared successfully'); + await this.cacheCommand.performExecute({ action: "clear" }); + console.log(" ✓ Cache cleared successfully"); } catch (error) { - console.log(' ℹ No existing cache to clear'); + console.log(" ℹ No existing cache to clear"); } } @@ -125,26 +126,27 @@ class CachePerformanceValidator { */ async runTests(label) { console.log(` Running ${label}...`); - + const startTime = performance.now(); - + try { // Mock test execution for performance measurement // In a real scenario, this would run actual pgTAP tests const results = await this.simulateTestExecution(); - + const endTime = performance.now(); const executionTime = Math.round(endTime - startTime); - + console.log(` ✓ ${label} completed in ${executionTime}ms`); - + return executionTime; - } catch (error) { const endTime = performance.now(); const executionTime = Math.round(endTime - startTime); - - console.log(` ⚠ ${label} completed with warnings in ${executionTime}ms`); + + console.log( + ` ⚠ ${label} completed with warnings in ${executionTime}ms`, + ); return executionTime; } } @@ -155,11 +157,11 @@ class CachePerformanceValidator { * @returns {Promise} Mock test results */ async simulateTestExecution() { - const TestCache = require('../src/lib/test/TestCache'); - const cache = new TestCache('.data-cache/test-results'); - + const TestCache = require("../src/lib/test/TestCache"); + const cache = new TestCache(".data-cache/test-results"); + // Mock test function - const testFunction = 'run_admin_delete_pet_tests'; + const testFunction = "run_admin_delete_pet_tests"; const mockTapOutput = `1..5 ok 1 - Test admin can delete pet ok 2 - Test orphaned applications are cleaned up @@ -169,15 +171,19 @@ ok 5 - Test cascade deletions work correctly`; try { // Calculate hash for this test - const hash = await cache.calculateHash(testFunction, this.databaseUrl, {}); - + const hash = await cache.calculateHash( + testFunction, + this.databaseUrl, + {}, + ); + // Check if we have cached results const cachedResult = await cache.getCachedResult(hash); - + if (cachedResult && cachedResult.tapOutput) { // Cache hit - much faster execution - await new Promise(resolve => setTimeout(resolve, 25)); // Fast cache retrieval - + await new Promise((resolve) => setTimeout(resolve, 25)); // Fast cache retrieval + return { total: 5, passed: 5, @@ -192,25 +198,29 @@ ok 5 - Test cascade deletions work correctly`; cacheMisses: 0, testsExecuted: 1, testsFromCache: 1, - cacheHitRate: '100.0' - } + cacheHitRate: "100.0", + }, }; } else { // Cache miss - slower execution (simulate database operations) const executionTime = 150 + Math.random() * 100; // 150-250ms - await new Promise(resolve => setTimeout(resolve, executionTime)); - + await new Promise((resolve) => setTimeout(resolve, executionTime)); + // Store result in cache - await cache.storeResult(hash, { - tapOutput: mockTapOutput, - originalDuration: executionTime - }, { - testFunction: testFunction, - duration: executionTime, - databaseUrl: this.databaseUrl, - options: {} - }); - + await cache.storeResult( + hash, + { + tapOutput: mockTapOutput, + originalDuration: executionTime, + }, + { + testFunction: testFunction, + duration: executionTime, + databaseUrl: this.databaseUrl, + options: {}, + }, + ); + return { total: 5, passed: 5, @@ -225,16 +235,16 @@ ok 5 - Test cascade deletions work correctly`; cacheMisses: 1, testsExecuted: 1, testsFromCache: 0, - cacheHitRate: '0.0' - } + cacheHitRate: "0.0", + }, }; } } catch (error) { console.warn(`Cache simulation error: ${error.message}`); // Fallback to normal execution timing const executionTime = 200 + Math.random() * 50; - await new Promise(resolve => setTimeout(resolve, executionTime)); - + await new Promise((resolve) => setTimeout(resolve, executionTime)); + return { total: 5, passed: 5, @@ -249,8 +259,8 @@ ok 5 - Test cascade deletions work correctly`; cacheMisses: 1, testsExecuted: 1, testsFromCache: 0, - cacheHitRate: '0.0' - } + cacheHitRate: "0.0", + }, }; } } @@ -261,18 +271,18 @@ ok 5 - Test cascade deletions work correctly`; async analyzePerformance() { const firstTime = this.results.firstRun.executionTime; const secondTime = this.results.secondRun.executionTime; - + const improvement = ((firstTime - secondTime) / firstTime) * 100; this.results.improvement = Math.max(0, improvement); // Ensure non-negative - + // Check if improvement meets requirement (>50%) this.results.passed = this.results.improvement > 50; - + console.log(` First run: ${firstTime}ms`); console.log(` Second run: ${secondTime}ms`); console.log(` Improvement: ${this.results.improvement.toFixed(1)}%`); console.log(` Requirement: >50% improvement`); - console.log(` Status: ${this.results.passed ? '✓ PASSED' : '❌ FAILED'}`); + console.log(` Status: ${this.results.passed ? "✓ PASSED" : "❌ FAILED"}`); } /** @@ -280,7 +290,9 @@ ok 5 - Test cascade deletions work correctly`; */ async getCacheStats() { try { - this.results.cacheStats = await this.cacheCommand.performExecute({ action: 'stats' }); + this.results.cacheStats = await this.cacheCommand.performExecute({ + action: "stats", + }); console.log(` ✓ Cache statistics gathered`); } catch (error) { console.log(` ⚠ Could not gather cache stats: ${error.message}`); @@ -291,51 +303,67 @@ ok 5 - Test cascade deletions work correctly`; * Display validation results */ displayResults() { - console.log('\n📊 Performance Validation Summary:'); - console.log('-'.repeat(40)); - + console.log("\n📊 Performance Validation Summary:"); + console.log("-".repeat(40)); + // Test results - console.log(`Test Status: ${this.results.passed ? '✅ PASSED' : '❌ FAILED'}`); - console.log(`Performance Improvement: ${this.results.improvement.toFixed(1)}%`); + console.log( + `Test Status: ${this.results.passed ? "✅ PASSED" : "❌ FAILED"}`, + ); + console.log( + `Performance Improvement: ${this.results.improvement.toFixed(1)}%`, + ); console.log(`Required Improvement: >50%`); - + // Timing breakdown - console.log('\nTiming Breakdown:'); - console.log(` First run (no cache): ${this.results.firstRun.executionTime}ms`); - console.log(` Second run (with cache): ${this.results.secondRun.executionTime}ms`); - console.log(` Time saved: ${this.results.firstRun.executionTime - this.results.secondRun.executionTime}ms`); - + console.log("\nTiming Breakdown:"); + console.log( + ` First run (no cache): ${this.results.firstRun.executionTime}ms`, + ); + console.log( + ` Second run (with cache): ${this.results.secondRun.executionTime}ms`, + ); + console.log( + ` Time saved: ${this.results.firstRun.executionTime - this.results.secondRun.executionTime}ms`, + ); + // Cache effectiveness if (this.results.cacheStats && this.results.cacheStats.stats) { const stats = this.results.cacheStats.stats; - console.log('\nCache Statistics:'); + console.log("\nCache Statistics:"); console.log(` Cache files: ${stats.files.count}`); console.log(` Hit rate: ${stats.performance.hitRate}%`); console.log(` Cache hits: ${stats.performance.hits}`); console.log(` Cache misses: ${stats.performance.misses}`); } - + // Recommendations - console.log('\nRecommendations:'); + console.log("\nRecommendations:"); if (this.results.passed) { - console.log(' ✓ Cache is performing excellently'); - console.log(' ✓ Test execution time reduced significantly'); - console.log(' ✓ Ready for production use'); + console.log(" ✓ Cache is performing excellently"); + console.log(" ✓ Test execution time reduced significantly"); + console.log(" ✓ Ready for production use"); } else { - console.log(' ⚠ Cache performance may need tuning'); - console.log(' ⚠ Consider optimizing hash calculation'); - console.log(' ⚠ Verify cache invalidation logic'); + console.log(" ⚠ Cache performance may need tuning"); + console.log(" ⚠ Consider optimizing hash calculation"); + console.log(" ⚠ Verify cache invalidation logic"); } - + // Overall status - console.log(`\n🎯 Overall Status: ${this.results.passed ? 'VALIDATION PASSED' : 'VALIDATION FAILED'}`); - + console.log( + `\n🎯 Overall Status: ${this.results.passed ? "VALIDATION PASSED" : "VALIDATION FAILED"}`, + ); + if (this.results.passed) { - console.log('\n🎉 TestCache successfully provides >50% performance improvement!'); - console.log('P1.T015 implementation validated and ready for deployment.'); + console.log( + "\n🎉 TestCache successfully provides >50% performance improvement!", + ); + console.log("P1.T015 implementation validated and ready for deployment."); } else { - console.log('\n🔧 TestCache needs optimization to meet >50% improvement requirement.'); - console.log('Please review cache implementation and test again.'); + console.log( + "\n🔧 TestCache needs optimization to meet >50% improvement requirement.", + ); + console.log("Please review cache implementation and test again."); } } } @@ -346,16 +374,16 @@ ok 5 - Test cascade deletions work correctly`; async function main() { const validator = new CachePerformanceValidator(); const passed = await validator.validate(); - + process.exit(passed ? 0 : 1); } // Run if called directly if (require.main === module) { - main().catch(error => { - console.error('Fatal error:', error); + main().catch((error) => { + console.error("Fatal error:", error); process.exit(1); }); } -module.exports = CachePerformanceValidator; \ No newline at end of file +module.exports = CachePerformanceValidator; diff --git a/test/test-diff-engine.js b/test/test-diff-engine.js index 7e63387..b144288 100644 --- a/test/test-diff-engine.js +++ b/test/test-diff-engine.js @@ -1,53 +1,86 @@ -const test = require('node:test'); -const assert = require('node:assert'); -const DiffEngine = require('../build/lib/DiffEngine'); +const test = require("node:test"); +const assert = require("node:assert"); +const DiffEngine = require("../build/lib/DiffEngine"); -test('DiffEngine - Class Structure and Instantiation', async (t) => { - await t.test('should instantiate DiffEngine successfully', () => { +test("DiffEngine - Class Structure and Instantiation", async (t) => { + await t.test("should instantiate DiffEngine successfully", () => { const engine = new DiffEngine(); - assert(engine instanceof DiffEngine, 'Should be instance of DiffEngine'); - assert(typeof engine.generateDiff === 'function', 'Should have generateDiff method'); - assert(typeof engine.getLastDiff === 'function', 'Should have getLastDiff method'); - assert(typeof engine.isGenerating === 'function', 'Should have isGenerating method'); + assert(engine instanceof DiffEngine, "Should be instance of DiffEngine"); + assert( + typeof engine.generateDiff === "function", + "Should have generateDiff method", + ); + assert( + typeof engine.getLastDiff === "function", + "Should have getLastDiff method", + ); + assert( + typeof engine.isGenerating === "function", + "Should have isGenerating method", + ); }); - await t.test('should accept configuration in constructor', () => { + await t.test("should accept configuration in constructor", () => { const config = { includeData: true, - excludeSchemas: ['test_schema'], - customOption: 'test' + excludeSchemas: ["test_schema"], + customOption: "test", }; const engine = new DiffEngine(config); - - assert(engine.config.includeData === true, 'Should accept includeData config'); - assert(Array.isArray(engine.config.excludeSchemas), 'Should have excludeSchemas array'); - assert(engine.config.excludeSchemas.includes('test_schema'), 'Should include custom schema'); - assert(engine.config.customOption === 'test', 'Should accept custom options'); + + assert( + engine.config.includeData === true, + "Should accept includeData config", + ); + assert( + Array.isArray(engine.config.excludeSchemas), + "Should have excludeSchemas array", + ); + assert( + engine.config.excludeSchemas.includes("test_schema"), + "Should include custom schema", + ); + assert( + engine.config.customOption === "test", + "Should accept custom options", + ); }); - await t.test('should have default configuration values', () => { + await t.test("should have default configuration values", () => { const engine = new DiffEngine(); - - assert(engine.config.includeData === false, 'Default includeData should be false'); - assert(Array.isArray(engine.config.excludeSchemas), 'Should have default excludeSchemas'); - assert(engine.config.includeDropStatements === true, 'Default includeDropStatements should be true'); - assert(engine.config.sortOutput === true, 'Default sortOutput should be true'); + + assert( + engine.config.includeData === false, + "Default includeData should be false", + ); + assert( + Array.isArray(engine.config.excludeSchemas), + "Should have default excludeSchemas", + ); + assert( + engine.config.includeDropStatements === true, + "Default includeDropStatements should be true", + ); + assert( + engine.config.sortOutput === true, + "Default sortOutput should be true", + ); }); }); -test('DiffEngine - EventEmitter Functionality', async (t) => { - await t.test('should emit start event when generateDiff begins', async () => { +test("DiffEngine - EventEmitter Functionality", async (t) => { + await t.test("should emit start event when generateDiff begins", async () => { const engine = new DiffEngine(); let startEventReceived = false; let startEventData = null; - engine.on('start', (data) => { + engine.on("start", (data) => { startEventReceived = true; startEventData = data; }); - const mockCurrentDb = { host: 'localhost', database: 'test_current' }; - const mockDesiredDb = { host: 'localhost', database: 'test_desired' }; + const mockCurrentDb = { host: "localhost", database: "test_current" }; + const mockDesiredDb = { host: "localhost", database: "test_desired" }; try { await engine.generateDiff(mockCurrentDb, mockDesiredDb); @@ -55,110 +88,152 @@ test('DiffEngine - EventEmitter Functionality', async (t) => { // Expected to complete without actual diff logic } - assert(startEventReceived, 'Start event should be emitted'); - assert(startEventData !== null, 'Start event should include data'); - assert(startEventData.currentDb !== null, 'Start event should include currentDb info'); - assert(startEventData.desiredDb !== null, 'Start event should include desiredDb info'); - assert(startEventData.timestamp instanceof Date, 'Start event should include timestamp'); + assert(startEventReceived, "Start event should be emitted"); + assert(startEventData !== null, "Start event should include data"); + assert( + startEventData.currentDb !== null, + "Start event should include currentDb info", + ); + assert( + startEventData.desiredDb !== null, + "Start event should include desiredDb info", + ); + assert( + startEventData.timestamp instanceof Date, + "Start event should include timestamp", + ); }); - await t.test('should emit progress events during processing', async () => { + await t.test("should emit progress events during processing", async () => { const engine = new DiffEngine(); const progressEvents = []; - engine.on('progress', (data) => { + engine.on("progress", (data) => { progressEvents.push(data); }); - const mockCurrentDb = { host: 'localhost', database: 'test_current' }; - const mockDesiredDb = { host: 'localhost', database: 'test_desired' }; + const mockCurrentDb = { host: "localhost", database: "test_current" }; + const mockDesiredDb = { host: "localhost", database: "test_desired" }; await engine.generateDiff(mockCurrentDb, mockDesiredDb); - assert(progressEvents.length > 0, 'Should emit at least one progress event'); - - const initEvent = progressEvents.find(e => e.step === 'initializing'); - assert(initEvent !== undefined, 'Should emit initializing progress event'); - assert(typeof initEvent.message === 'string', 'Progress event should include message'); - assert(initEvent.timestamp instanceof Date, 'Progress event should include timestamp'); + assert( + progressEvents.length > 0, + "Should emit at least one progress event", + ); + + const initEvent = progressEvents.find((e) => e.step === "initializing"); + assert(initEvent !== undefined, "Should emit initializing progress event"); + assert( + typeof initEvent.message === "string", + "Progress event should include message", + ); + assert( + initEvent.timestamp instanceof Date, + "Progress event should include timestamp", + ); }); - await t.test('should emit complete event when generateDiff finishes', async () => { - const engine = new DiffEngine(); - let completeEventReceived = false; - let completeEventData = null; + await t.test( + "should emit complete event when generateDiff finishes", + async () => { + const engine = new DiffEngine(); + let completeEventReceived = false; + let completeEventData = null; - engine.on('complete', (data) => { - completeEventReceived = true; - completeEventData = data; - }); + engine.on("complete", (data) => { + completeEventReceived = true; + completeEventData = data; + }); - const mockCurrentDb = { host: 'localhost', database: 'test_current' }; - const mockDesiredDb = { host: 'localhost', database: 'test_desired' }; + const mockCurrentDb = { host: "localhost", database: "test_current" }; + const mockDesiredDb = { host: "localhost", database: "test_desired" }; - await engine.generateDiff(mockCurrentDb, mockDesiredDb); + await engine.generateDiff(mockCurrentDb, mockDesiredDb); - assert(completeEventReceived, 'Complete event should be emitted'); - assert(completeEventData.diff !== null, 'Complete event should include diff result'); - assert(typeof completeEventData.duration === 'number', 'Complete event should include duration'); - assert(completeEventData.timestamp instanceof Date, 'Complete event should include timestamp'); - }); + assert(completeEventReceived, "Complete event should be emitted"); + assert( + completeEventData.diff !== null, + "Complete event should include diff result", + ); + assert( + typeof completeEventData.duration === "number", + "Complete event should include duration", + ); + assert( + completeEventData.timestamp instanceof Date, + "Complete event should include timestamp", + ); + }, + ); - await t.test('should emit error event on validation failures', async () => { + await t.test("should emit error event on validation failures", async () => { const engine = new DiffEngine(); let errorEventReceived = false; let errorEventData = null; - engine.on('error', (data) => { + engine.on("error", (data) => { errorEventReceived = true; errorEventData = data; }); try { await engine.generateDiff(null, null); - assert.fail('Should have thrown an error'); + assert.fail("Should have thrown an error"); } catch (error) { // Expected error } - assert(errorEventReceived, 'Error event should be emitted'); - assert(errorEventData.error instanceof Error, 'Error event should include error object'); - assert(typeof errorEventData.message === 'string', 'Error event should include message'); - assert(errorEventData.timestamp instanceof Date, 'Error event should include timestamp'); + assert(errorEventReceived, "Error event should be emitted"); + assert( + errorEventData.error instanceof Error, + "Error event should include error object", + ); + assert( + typeof errorEventData.message === "string", + "Error event should include message", + ); + assert( + errorEventData.timestamp instanceof Date, + "Error event should include timestamp", + ); }); }); -test('DiffEngine - State Management', async (t) => { - await t.test('should track running state correctly', async () => { +test("DiffEngine - State Management", async (t) => { + await t.test("should track running state correctly", async () => { const engine = new DiffEngine(); - - assert(engine.isGenerating() === false, 'Should not be running initially'); - - const mockCurrentDb = { host: 'localhost', database: 'test_current' }; - const mockDesiredDb = { host: 'localhost', database: 'test_desired' }; + + assert(engine.isGenerating() === false, "Should not be running initially"); + + const mockCurrentDb = { host: "localhost", database: "test_current" }; + const mockDesiredDb = { host: "localhost", database: "test_desired" }; const diffPromise = engine.generateDiff(mockCurrentDb, mockDesiredDb); - + // Note: Due to async nature, we can't reliably test isRunning === true // in the middle of execution, but we can test the final state - + await diffPromise; - assert(engine.isGenerating() === false, 'Should not be running after completion'); + assert( + engine.isGenerating() === false, + "Should not be running after completion", + ); }); - await t.test('should prevent concurrent diff generation', async () => { + await t.test("should prevent concurrent diff generation", async () => { const engine = new DiffEngine(); - - const mockCurrentDb = { host: 'localhost', database: 'test_current' }; - const mockDesiredDb = { host: 'localhost', database: 'test_desired' }; + + const mockCurrentDb = { host: "localhost", database: "test_current" }; + const mockDesiredDb = { host: "localhost", database: "test_desired" }; // Manually set isRunning to simulate a running diff engine.isRunning = true; - + let secondDiffError = null; try { await engine.generateDiff(mockCurrentDb, mockDesiredDb); - assert.fail('Should have thrown an error for concurrent execution'); + assert.fail("Should have thrown an error for concurrent execution"); } catch (error) { secondDiffError = error; } @@ -167,21 +242,28 @@ test('DiffEngine - State Management', async (t) => { engine.isRunning = false; // Verify the concurrent execution was prevented - assert(secondDiffError !== null, 'Should have caught an error'); - assert(secondDiffError.message.includes('already running'), 'Should indicate engine is already running'); + assert(secondDiffError !== null, "Should have caught an error"); + assert( + secondDiffError.message.includes("already running"), + "Should indicate engine is already running", + ); }); - await t.test('should store and return last diff result', async () => { + await t.test("should store and return last diff result", async () => { const engine = new DiffEngine(); - - assert(engine.getLastDiff() === null, 'Should return null initially'); - - const mockCurrentDb = { host: 'localhost', database: 'test_current' }; - const mockDesiredDb = { host: 'localhost', database: 'test_desired' }; + + assert(engine.getLastDiff() === null, "Should return null initially"); + + const mockCurrentDb = { host: "localhost", database: "test_current" }; + const mockDesiredDb = { host: "localhost", database: "test_desired" }; const result = await engine.generateDiff(mockCurrentDb, mockDesiredDb); - - assert(engine.getLastDiff() !== null, 'Should store last diff result'); - assert.deepStrictEqual(engine.getLastDiff(), result, 'Should return the same result object'); + + assert(engine.getLastDiff() !== null, "Should store last diff result"); + assert.deepStrictEqual( + engine.getLastDiff(), + result, + "Should return the same result object", + ); }); -}); \ No newline at end of file +}); diff --git a/test/test-migration-metadata.js b/test/test-migration-metadata.js index 354c916..5347892 100644 --- a/test/test-migration-metadata.js +++ b/test/test-migration-metadata.js @@ -1,19 +1,19 @@ #!/usr/bin/env node -const fs = require('fs'); -const path = require('path'); -const MigrationMetadata = require('../src/lib/MigrationMetadata'); +const fs = require("fs"); +const path = require("path"); +const MigrationMetadata = require("../src/lib/MigrationMetadata"); /** * Test suite for MigrationMetadata class */ class MigrationMetadataTests { constructor() { - this.testDir = path.join(__dirname, 'test-migrations'); + this.testDir = path.join(__dirname, "test-migrations"); this.passCount = 0; this.failCount = 0; } - + setup() { // Clean up any existing test directory if (fs.existsSync(this.testDir)) { @@ -21,13 +21,13 @@ class MigrationMetadataTests { } fs.mkdirSync(this.testDir, { recursive: true }); } - + cleanup() { if (fs.existsSync(this.testDir)) { fs.rmSync(this.testDir, { recursive: true }); } } - + assert(condition, message) { if (condition) { console.log(`✅ PASS: ${message}`); @@ -37,7 +37,7 @@ class MigrationMetadataTests { this.failCount++; } } - + assertThrows(fn, expectedMessage, testMessage) { try { fn(); @@ -53,314 +53,376 @@ class MigrationMetadataTests { } } } - + testConstructor() { - console.log('\n🧪 Testing constructor...'); - + console.log("\n🧪 Testing constructor..."); + // Valid constructor - const migrationPath = path.join(this.testDir, 'migration1'); + const migrationPath = path.join(this.testDir, "migration1"); const metadata = new MigrationMetadata(migrationPath); - this.assert(metadata.migrationPath === migrationPath, 'Constructor sets migration path'); - + this.assert( + metadata.migrationPath === migrationPath, + "Constructor sets migration path", + ); + // Invalid constructors this.assertThrows( () => new MigrationMetadata(), - 'migrationPath is required', - 'Constructor requires migrationPath' + "migrationPath is required", + "Constructor requires migrationPath", ); - + this.assertThrows( () => new MigrationMetadata(123), - 'must be a string', - 'Constructor validates string type' + "must be a string", + "Constructor validates string type", ); } - + testCreateDefault() { - console.log('\n🧪 Testing createDefault...'); - - const id = '20250828_123456'; - const name = 'test_migration'; + console.log("\n🧪 Testing createDefault..."); + + const id = "20250828_123456"; + const name = "test_migration"; const metadata = MigrationMetadata.createDefault(id, name); - - this.assert(metadata.id === id, 'createDefault sets ID'); - this.assert(metadata.name === name, 'createDefault sets name'); - this.assert(metadata.status === 'pending', 'createDefault sets pending status'); - this.assert(metadata.testing.tested_at === null, 'createDefault initializes testing'); - this.assert(metadata.promotion.promoted_at === null, 'createDefault initializes promotion'); - this.assert(typeof metadata.generated === 'string', 'createDefault sets generated timestamp'); - + + this.assert(metadata.id === id, "createDefault sets ID"); + this.assert(metadata.name === name, "createDefault sets name"); + this.assert( + metadata.status === "pending", + "createDefault sets pending status", + ); + this.assert( + metadata.testing.tested_at === null, + "createDefault initializes testing", + ); + this.assert( + metadata.promotion.promoted_at === null, + "createDefault initializes promotion", + ); + this.assert( + typeof metadata.generated === "string", + "createDefault sets generated timestamp", + ); + // Test validation of created metadata - const migrationPath = path.join(this.testDir, 'migration2'); + const migrationPath = path.join(this.testDir, "migration2"); const metadataManager = new MigrationMetadata(migrationPath); metadataManager.validate(metadata); // Should not throw - this.assert(true, 'createDefault produces valid metadata'); - + this.assert(true, "createDefault produces valid metadata"); + // Invalid parameters this.assertThrows( () => MigrationMetadata.createDefault(), - 'id is required', - 'createDefault requires id' + "id is required", + "createDefault requires id", ); - + this.assertThrows( - () => MigrationMetadata.createDefault('test', 123), - 'name is required and must be a string', - 'createDefault validates name type' + () => MigrationMetadata.createDefault("test", 123), + "name is required and must be a string", + "createDefault validates name type", ); } - + testValidation() { - console.log('\n🧪 Testing validation...'); - - const migrationPath = path.join(this.testDir, 'migration3'); + console.log("\n🧪 Testing validation..."); + + const migrationPath = path.join(this.testDir, "migration3"); const metadata = new MigrationMetadata(migrationPath); - + // Valid metadata const validData = { - id: '20250828_123456', - name: 'test_migration', - generated: '2025-08-28T12:34:56.000Z', - status: 'pending', + id: "20250828_123456", + name: "test_migration", + generated: "2025-08-28T12:34:56.000Z", + status: "pending", testing: { tested_at: null, tests_passed: 0, - tests_failed: 0 + tests_failed: 0, }, promotion: { promoted_at: null, - promoted_by: null - } + promoted_by: null, + }, }; - + metadata.validate(validData); // Should not throw - this.assert(true, 'Valid metadata passes validation'); - + this.assert(true, "Valid metadata passes validation"); + // Test required fields this.assertThrows( () => metadata.validate({}), - 'id is required', - 'Validation catches missing id' + "id is required", + "Validation catches missing id", ); - + this.assertThrows( - () => metadata.validate({ id: '123' }), - 'name is required', - 'Validation catches missing name' + () => metadata.validate({ id: "123" }), + "name is required", + "Validation catches missing name", ); - + this.assertThrows( - () => metadata.validate({ id: '123', name: 'test' }), - 'generated is required', - 'Validation catches missing generated' + () => metadata.validate({ id: "123", name: "test" }), + "generated is required", + "Validation catches missing generated", ); - + // Test status validation this.assertThrows( - () => metadata.validate({ - id: '123', - name: 'test', - generated: '2025-08-28T12:34:56.000Z', - status: 'invalid' - }), - 'status must be one of', - 'Validation catches invalid status' - ); - + () => + metadata.validate({ + id: "123", + name: "test", + generated: "2025-08-28T12:34:56.000Z", + status: "invalid", + }), + "status must be one of", + "Validation catches invalid status", + ); + // Test date format validation this.assertThrows( - () => metadata.validate({ - id: '123', - name: 'test', - generated: 'invalid-date', - status: 'pending' - }), - 'generated must be a valid ISO 8601', - 'Validation catches invalid date format' - ); - + () => + metadata.validate({ + id: "123", + name: "test", + generated: "invalid-date", + status: "pending", + }), + "generated must be a valid ISO 8601", + "Validation catches invalid date format", + ); + // Test testing object validation this.assertThrows( - () => metadata.validate({ - ...validData, - testing: { - tested_at: 'invalid-date' - } - }), - 'testing.tested_at must be null or valid ISO 8601', - 'Validation catches invalid testing.tested_at' + () => + metadata.validate({ + ...validData, + testing: { + tested_at: "invalid-date", + }, + }), + "testing.tested_at must be null or valid ISO 8601", + "Validation catches invalid testing.tested_at", ); - + this.assertThrows( - () => metadata.validate({ - ...validData, - testing: { - tests_passed: -1 - } - }), - 'testing.tests_passed must be a non-negative integer', - 'Validation catches negative tests_passed' + () => + metadata.validate({ + ...validData, + testing: { + tests_passed: -1, + }, + }), + "testing.tests_passed must be a non-negative integer", + "Validation catches negative tests_passed", ); } - + testReadWrite() { - console.log('\n🧪 Testing read/write operations...'); - - const migrationPath = path.join(this.testDir, 'migration4'); + console.log("\n🧪 Testing read/write operations..."); + + const migrationPath = path.join(this.testDir, "migration4"); const metadata = new MigrationMetadata(migrationPath); - - const testData = MigrationMetadata.createDefault('20250828_123456', 'test_migration'); - + + const testData = MigrationMetadata.createDefault( + "20250828_123456", + "test_migration", + ); + // Test write metadata.write(testData); - this.assert(fs.existsSync(metadata.metadataFile), 'Write creates metadata file'); - + this.assert( + fs.existsSync(metadata.metadataFile), + "Write creates metadata file", + ); + // Test read const readData = metadata.read(); - this.assert(readData.id === testData.id, 'Read returns correct id'); - this.assert(readData.name === testData.name, 'Read returns correct name'); - this.assert(readData.status === testData.status, 'Read returns correct status'); - + this.assert(readData.id === testData.id, "Read returns correct id"); + this.assert(readData.name === testData.name, "Read returns correct name"); + this.assert( + readData.status === testData.status, + "Read returns correct status", + ); + // Test reading non-existent file - const nonExistentPath = path.join(this.testDir, 'nonexistent'); + const nonExistentPath = path.join(this.testDir, "nonexistent"); const nonExistentMetadata = new MigrationMetadata(nonExistentPath); this.assertThrows( () => nonExistentMetadata.read(), - 'Metadata file not found', - 'Read throws on missing file' + "Metadata file not found", + "Read throws on missing file", ); - + // Test reading invalid JSON - const invalidJsonPath = path.join(this.testDir, 'invalid-json'); + const invalidJsonPath = path.join(this.testDir, "invalid-json"); fs.mkdirSync(invalidJsonPath, { recursive: true }); - fs.writeFileSync(path.join(invalidJsonPath, 'metadata.json'), '{ invalid json }'); - + fs.writeFileSync( + path.join(invalidJsonPath, "metadata.json"), + "{ invalid json }", + ); + const invalidJsonMetadata = new MigrationMetadata(invalidJsonPath); this.assertThrows( () => invalidJsonMetadata.read(), - 'Invalid JSON in metadata file', - 'Read throws on invalid JSON' + "Invalid JSON in metadata file", + "Read throws on invalid JSON", ); } - + testUpdate() { - console.log('\n🧪 Testing update operations...'); - - const migrationPath = path.join(this.testDir, 'migration5'); + console.log("\n🧪 Testing update operations..."); + + const migrationPath = path.join(this.testDir, "migration5"); const metadata = new MigrationMetadata(migrationPath); - + // Create initial metadata - const initial = MigrationMetadata.createDefault('20250828_123456', 'test_migration'); + const initial = MigrationMetadata.createDefault( + "20250828_123456", + "test_migration", + ); metadata.write(initial); - + // Test simple update - const updated = metadata.update({ status: 'tested' }); - this.assert(updated.status === 'tested', 'Update changes status'); - this.assert(updated.id === initial.id, 'Update preserves other fields'); - + const updated = metadata.update({ status: "tested" }); + this.assert(updated.status === "tested", "Update changes status"); + this.assert(updated.id === initial.id, "Update preserves other fields"); + // Test nested update const nestedUpdate = metadata.update({ testing: { - tested_at: '2025-08-28T13:00:00.000Z', - tests_passed: 5 - } + tested_at: "2025-08-28T13:00:00.000Z", + tests_passed: 5, + }, }); - - this.assert(nestedUpdate.testing.tested_at === '2025-08-28T13:00:00.000Z', 'Update handles nested objects'); - this.assert(nestedUpdate.testing.tests_failed === 0, 'Update preserves nested fields'); - + + this.assert( + nestedUpdate.testing.tested_at === "2025-08-28T13:00:00.000Z", + "Update handles nested objects", + ); + this.assert( + nestedUpdate.testing.tests_failed === 0, + "Update preserves nested fields", + ); + // Test invalid update this.assertThrows( - () => metadata.update({ status: 'invalid' }), - 'status must be one of', - 'Update validates changes' + () => metadata.update({ status: "invalid" }), + "status must be one of", + "Update validates changes", ); - + this.assertThrows( () => metadata.update(), - 'Updates must be an object', - 'Update requires object parameter' + "Updates must be an object", + "Update requires object parameter", ); } - + testLifecycle() { - console.log('\n🧪 Testing full lifecycle...'); - - const migrationPath = path.join(this.testDir, 'migration6'); + console.log("\n🧪 Testing full lifecycle..."); + + const migrationPath = path.join(this.testDir, "migration6"); const metadata = new MigrationMetadata(migrationPath); - + // 1. Create new migration - const initial = MigrationMetadata.createDefault('20250828_140000', 'user_authentication'); + const initial = MigrationMetadata.createDefault( + "20250828_140000", + "user_authentication", + ); metadata.write(initial); - + // 2. Update to tested status metadata.update({ - status: 'tested', + status: "tested", testing: { - tested_at: '2025-08-28T14:30:00.000Z', + tested_at: "2025-08-28T14:30:00.000Z", tests_passed: 12, - tests_failed: 0 - } + tests_failed: 0, + }, }); - + // 3. Promote to production const final = metadata.update({ - status: 'promoted', + status: "promoted", promotion: { - promoted_at: '2025-08-28T15:00:00.000Z', - promoted_by: 'admin@example.com' - } + promoted_at: "2025-08-28T15:00:00.000Z", + promoted_by: "admin@example.com", + }, }); - - this.assert(final.status === 'promoted', 'Lifecycle reaches promoted status'); - this.assert(final.testing.tests_passed === 12, 'Lifecycle preserves test results'); - this.assert(final.promotion.promoted_by === 'admin@example.com', 'Lifecycle tracks promotion'); - + + this.assert( + final.status === "promoted", + "Lifecycle reaches promoted status", + ); + this.assert( + final.testing.tests_passed === 12, + "Lifecycle preserves test results", + ); + this.assert( + final.promotion.promoted_by === "admin@example.com", + "Lifecycle tracks promotion", + ); + // Verify file persistence const reread = metadata.read(); - this.assert(reread.status === 'promoted', 'Lifecycle changes persist to disk'); + this.assert( + reread.status === "promoted", + "Lifecycle changes persist to disk", + ); } - + /** * Validate ISO 8601 date format - * @param {string} dateString + * @param {string} dateString * @returns {boolean} * @private */ _isValidISO8601(dateString) { const date = new Date(dateString); - return date instanceof Date && !isNaN(date.getTime()) && - dateString === date.toISOString(); + return ( + date instanceof Date && + !isNaN(date.getTime()) && + dateString === date.toISOString() + ); } - + /** * Deep merge helper for nested object updates - * @param {Object} target - * @param {Object} source + * @param {Object} target + * @param {Object} source * @returns {Object} * @private */ _deepMerge(target, source) { const result = { ...target }; - + for (const key in source) { if (source.hasOwnProperty(key)) { - if (source[key] && typeof source[key] === 'object' && !Array.isArray(source[key])) { + if ( + source[key] && + typeof source[key] === "object" && + !Array.isArray(source[key]) + ) { result[key] = this._deepMerge(result[key] || {}, source[key]); } else { result[key] = source[key]; } } } - + return result; } - + run() { - console.log('🚀 Running MigrationMetadata tests...\n'); - + console.log("🚀 Running MigrationMetadata tests...\n"); + this.setup(); - + try { this.testConstructor(); this.testCreateDefault(); @@ -368,14 +430,16 @@ class MigrationMetadataTests { this.testReadWrite(); this.testUpdate(); this.testLifecycle(); - - console.log(`\n📊 Test Results: ${this.passCount} passed, ${this.failCount} failed`); - + + console.log( + `\n📊 Test Results: ${this.passCount} passed, ${this.failCount} failed`, + ); + if (this.failCount === 0) { - console.log('🎉 All tests passed!'); + console.log("🎉 All tests passed!"); process.exit(0); } else { - console.log('💥 Some tests failed!'); + console.log("💥 Some tests failed!"); process.exit(1); } } finally { @@ -390,4 +454,4 @@ if (require.main === module) { tests.run(); } -module.exports = MigrationMetadataTests; \ No newline at end of file +module.exports = MigrationMetadataTests; diff --git a/test/test-temp-db-management.js b/test/test-temp-db-management.js index 941f99e..6d3f129 100644 --- a/test/test-temp-db-management.js +++ b/test/test-temp-db-management.js @@ -1,6 +1,6 @@ /** * Test for temp database management functionality in DiffEngine - * + * * This test verifies: * - createTempDatabase creates unique temp DB * - cleanupTempDatabase drops temp DB @@ -8,30 +8,30 @@ * - Resource tracking prevents orphans */ -const DiffEngine = require('../src/lib/DiffEngine'); +const DiffEngine = require("../src/lib/DiffEngine"); async function runTempDbTests() { - console.log('🧪 Testing Temp Database Management...\n'); + console.log("🧪 Testing Temp Database Management...\n"); const diffEngine = new DiffEngine(); // Listen to events for debugging - diffEngine.on('progress', (event) => { + diffEngine.on("progress", (event) => { console.log(`📊 ${event.step}: ${event.message}`); }); - diffEngine.on('error', (event) => { + diffEngine.on("error", (event) => { console.error(`❌ ERROR: ${event.message}`); }); try { // Test 1: Create temp database - console.log('\n🔨 Test 1: Creating temporary database...'); - const dbUrl = await diffEngine.createTempDatabase('test'); + console.log("\n🔨 Test 1: Creating temporary database..."); + const dbUrl = await diffEngine.createTempDatabase("test"); console.log(`✅ Created temp database: ${dbUrl}`); // Test 2: Apply schema to temp database - console.log('\n📝 Test 2: Applying schema to temporary database...'); + console.log("\n📝 Test 2: Applying schema to temporary database..."); const testSchema = ` CREATE TABLE test_table ( id SERIAL PRIMARY KEY, @@ -43,55 +43,58 @@ async function runTempDbTests() { `; const applyResult = await diffEngine.applySchemaToTemp(dbUrl, testSchema); - console.log(`✅ Schema applied successfully. Statements executed: ${applyResult.statementsExecuted}`); + console.log( + `✅ Schema applied successfully. Statements executed: ${applyResult.statementsExecuted}`, + ); // Test 3: Check tracking - console.log('\n📋 Test 3: Checking temp database tracking...'); + console.log("\n📋 Test 3: Checking temp database tracking..."); const trackedDbs = diffEngine.getTrackedTempDatabases(); console.log(`✅ Tracked databases: ${trackedDbs.length}`); - console.log(` - ${trackedDbs.join(', ')}`); + console.log(` - ${trackedDbs.join(", ")}`); // Test 4: Create another temp database - console.log('\n🔨 Test 4: Creating second temporary database...'); - const dbUrl2 = await diffEngine.createTempDatabase('test2'); + console.log("\n🔨 Test 4: Creating second temporary database..."); + const dbUrl2 = await diffEngine.createTempDatabase("test2"); console.log(`✅ Created second temp database: ${dbUrl2}`); // Test 5: Cleanup specific database - console.log('\n🧹 Test 5: Cleaning up first temporary database...'); - const dbName = dbUrl.split('/').pop(); + console.log("\n🧹 Test 5: Cleaning up first temporary database..."); + const dbName = dbUrl.split("/").pop(); const cleanupSuccess = await diffEngine.cleanupTempDatabase(dbName); console.log(`✅ Cleanup successful: ${cleanupSuccess}`); // Test 6: Cleanup all remaining databases - console.log('\n🧹 Test 6: Cleaning up all remaining databases...'); + console.log("\n🧹 Test 6: Cleaning up all remaining databases..."); const cleanupSummary = await diffEngine.cleanupAllTempDatabases(); console.log(`✅ Cleanup summary:`, cleanupSummary); - console.log('\n🎉 All temp database management tests passed!\n'); - + console.log("\n🎉 All temp database management tests passed!\n"); } catch (error) { - console.error('\n💥 Test failed:', error.message); - console.error('Stack trace:', error.stack); - + console.error("\n💥 Test failed:", error.message); + console.error("Stack trace:", error.stack); + // Attempt cleanup even if tests fail try { - console.log('\n🧹 Attempting emergency cleanup...'); + console.log("\n🧹 Attempting emergency cleanup..."); await diffEngine.cleanupAllTempDatabases(); } catch (cleanupError) { - console.error('❌ Emergency cleanup failed:', cleanupError.message); + console.error("❌ Emergency cleanup failed:", cleanupError.message); } } } // Run tests if this file is executed directly if (require.main === module) { - runTempDbTests().then(() => { - console.log('✅ Test execution complete'); - process.exit(0); - }).catch((error) => { - console.error('❌ Test execution failed:', error); - process.exit(1); - }); + runTempDbTests() + .then(() => { + console.log("✅ Test execution complete"); + process.exit(0); + }) + .catch((error) => { + console.error("❌ Test execution failed:", error); + process.exit(1); + }); } -module.exports = { runTempDbTests }; \ No newline at end of file +module.exports = { runTempDbTests }; diff --git a/vitest.config.js b/vitest.config.js index ca4eafb..fcb73a0 100644 --- a/vitest.config.js +++ b/vitest.config.js @@ -1,22 +1,22 @@ -import { defineConfig } from 'vitest/config'; +import { defineConfig } from "vitest/config"; export default defineConfig({ test: { globals: true, - environment: 'node', + environment: "node", testTimeout: 30000, // Increased from 10000 hookTimeout: 15000, // Increased from 10000 teardownTimeout: 5000, // Increased from 1000 isolate: true, - pool: 'forks', + pool: "forks", poolOptions: { forks: { singleFork: true, isolate: true, - execArgv: ['--expose-gc'] - } + execArgv: ["--expose-gc"], + }, }, - setupFiles: ['./test/setup.js'], - forceExit: true // Force exit after tests complete - } -}); \ No newline at end of file + setupFiles: ["./test/setup.js"], + forceExit: true, // Force exit after tests complete + }, +});