diff --git a/aztec-up/bin/aztec b/aztec-up/bin/aztec index 1032d34d61f5..2ce4cb492a7a 100755 --- a/aztec-up/bin/aztec +++ b/aztec-up/bin/aztec @@ -562,6 +562,12 @@ EOF exec $(dirname $0)/.aztec-run aztec \ node --no-warnings /usr/src/yarn-project/aztec/dest/bin/index.js "$@" ;; + migrate-ha-db) + # DB migration command, inject the database URL + export ENV_VARS_TO_INJECT="DATABASE_URL" + exec $(dirname $0)/.aztec-run aztec \ + node --no-warnings /usr/src/yarn-project/aztec/dest/bin/index.js "$@" + ;; *) export ENV_VARS_TO_INJECT="SECRET_KEY" exec $(dirname $0)/.aztec-run aztec \ diff --git a/yarn-project/aztec/package.json b/yarn-project/aztec/package.json index f6051dd13144..cbb8ce2ccc17 100644 --- a/yarn-project/aztec/package.json +++ b/yarn-project/aztec/package.json @@ -59,6 +59,7 @@ "@aztec/telemetry-client": "workspace:^", "@aztec/test-wallet": "workspace:^", "@aztec/txe": "workspace:^", + "@aztec/validator-ha-signer": "workspace:^", "@aztec/world-state": "workspace:^", "@types/chalk": "^2.2.0", "abitype": "^0.8.11", diff --git a/yarn-project/aztec/src/bin/index.ts b/yarn-project/aztec/src/bin/index.ts index c431d36d2f04..d06b298d9add 100644 --- a/yarn-project/aztec/src/bin/index.ts +++ b/yarn-project/aztec/src/bin/index.ts @@ -14,6 +14,7 @@ import { createConsoleLogger, createLogger } from '@aztec/foundation/log'; import { Command } from 'commander'; +import { injectMigrateCommand } from '../cli/cmds/migrate_ha_db.js'; import { injectAztecCommands } from '../cli/index.js'; import { getCliVersion } from '../cli/release_version.js'; @@ -55,6 +56,7 @@ async function main() { program = injectAztecNodeCommands(program, userLog, debugLogger); program = injectMiscCommands(program, userLog); program = injectValidatorKeysCommands(program, userLog); + program = injectMigrateCommand(program, userLog); await program.parseAsync(process.argv); } diff --git a/yarn-project/aztec/src/cli/cmds/migrate_ha_db.ts b/yarn-project/aztec/src/cli/cmds/migrate_ha_db.ts new file mode 100644 index 000000000000..3fd598bfb787 --- /dev/null +++ b/yarn-project/aztec/src/cli/cmds/migrate_ha_db.ts @@ -0,0 +1,43 @@ +import { runMigrations } from '@aztec/validator-ha-signer/migrations'; + +import type { Command } from 'commander'; + +export function injectMigrateCommand(program: Command, log: (msg: string) => void): Command { + const migrateCommand = program.command('migrate-ha-db').description('Run validator-ha-signer database migrations'); + + migrateCommand + .command('up') + .description('Apply pending migrations') + .requiredOption('--database-url ', 'PostgreSQL connection string', process.env.DATABASE_URL) + .option('--verbose', 'Enable verbose output', false) + .action(async options => { + const migrations = await runMigrations(options.databaseUrl, { + direction: 'up', + verbose: options.verbose, + }); + if (migrations.length > 0) { + log(`Applied migrations: ${migrations.join(', ')}`); + } else { + log('No migrations to apply - schema is up to date'); + } + }); + + migrateCommand + .command('down') + .description('Rollback the last migration') + .requiredOption('--database-url ', 'PostgreSQL connection string', process.env.DATABASE_URL) + .option('--verbose', 'Enable verbose output', false) + .action(async options => { + const migrations = await runMigrations(options.databaseUrl, { + direction: 'down', + verbose: options.verbose, + }); + if (migrations.length > 0) { + log(`Rolled back migrations: ${migrations.join(', ')}`); + } else { + log('No migrations to rollback'); + } + }); + + return program; +} diff --git a/yarn-project/aztec/tsconfig.json b/yarn-project/aztec/tsconfig.json index 5851e55a48aa..6a765d106a74 100644 --- a/yarn-project/aztec/tsconfig.json +++ b/yarn-project/aztec/tsconfig.json @@ -96,6 +96,9 @@ { "path": "../txe" }, + { + "path": "../validator-ha-signer" + }, { "path": "../world-state" } diff --git a/yarn-project/foundation/src/config/env_var.ts b/yarn-project/foundation/src/config/env_var.ts index 9f113c183dab..88c79069fed5 100644 --- a/yarn-project/foundation/src/config/env_var.ts +++ b/yarn-project/foundation/src/config/env_var.ts @@ -314,4 +314,15 @@ export type EnvVar = | 'FISHERMAN_MODE' | 'MAX_ALLOWED_ETH_CLIENT_DRIFT_SECONDS' | 'LEGACY_BLS_CLI' - | 'DEBUG_FORCE_TX_PROOF_VERIFICATION'; + | 'DEBUG_FORCE_TX_PROOF_VERIFICATION' + | 'SLASHING_PROTECTION_NODE_ID' + | 'SLASHING_PROTECTION_POLLING_INTERVAL_MS' + | 'SLASHING_PROTECTION_SIGNING_TIMEOUT_MS' + | 'SLASHING_PROTECTION_ENABLED' + | 'SLASHING_PROTECTION_MAX_STUCK_DUTIES_AGE_MS' + | 'VALIDATOR_HA_DATABASE_URL' + | 'VALIDATOR_HA_RUN_MIGRATIONS' + | 'VALIDATOR_HA_POOL_MAX' + | 'VALIDATOR_HA_POOL_MIN' + | 'VALIDATOR_HA_POOL_IDLE_TIMEOUT_MS' + | 'VALIDATOR_HA_POOL_CONNECTION_TIMEOUT_MS'; diff --git a/yarn-project/package.json b/yarn-project/package.json index 79b90feea1f9..28984d288ee5 100644 --- a/yarn-project/package.json +++ b/yarn-project/package.json @@ -61,6 +61,7 @@ "txe", "test-wallet", "validator-client", + "validator-ha-signer", "wallet-sdk", "world-state" ], diff --git a/yarn-project/tsconfig.json b/yarn-project/tsconfig.json index 4d5e3ce6acb0..ff81b33d59d8 100644 --- a/yarn-project/tsconfig.json +++ b/yarn-project/tsconfig.json @@ -29,6 +29,7 @@ { "path": "aztec.js/tsconfig.json" }, { "path": "aztec-node/tsconfig.json" }, { "path": "validator-client/tsconfig.json" }, + { "path": "validator-ha-signer/tsconfig.json" }, { "path": "bb-prover/tsconfig.json" }, { "path": "bot/tsconfig.json" }, { "path": "constants/tsconfig.json" }, diff --git a/yarn-project/validator-ha-signer/MIGRATIONS.md b/yarn-project/validator-ha-signer/MIGRATIONS.md new file mode 100644 index 000000000000..ef9c7cee1cee --- /dev/null +++ b/yarn-project/validator-ha-signer/MIGRATIONS.md @@ -0,0 +1,186 @@ +# Database Migrations Guide + +This package uses [node-pg-migrate](https://github.com/salsita/node-pg-migrate) for managing database schema changes. + +## Quick Reference + +```bash +# Run pending migrations +aztec migrate-ha-db up --database-url postgresql://... + +# Rollback last migration +aztec migrate-ha-db down --database-url postgresql://... +``` + +## Migration Files + +Migrations are located in the `migrations/` directory and are named with timestamps: + +``` +migrations/ + └── 1_initial-schema.ts +``` + +## Creating New Migrations + +When you need to modify the database schema: + +```bash +# Generate a new migration file +npx node-pg-migrate create add-new-field + +# This creates: migrations/[timestamp]_add-new-field.ts +``` + +Edit the generated file: + +```typescript +import type { MigrationBuilder } from 'node-pg-migrate'; + +export async function up(pgm: MigrationBuilder): Promise { + // Add your schema changes here + pgm.addColumn('validator_duties', { + new_field: { type: 'text', notNull: false }, + }); +} + +export async function down(pgm: MigrationBuilder): Promise { + // Reverse the changes + pgm.dropColumn('validator_duties', 'new_field'); +} +``` + +## Production Deployment + +### Option 1: Kubernetes Init Container + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: validator +spec: + template: + spec: + initContainers: + - name: db-migrate + image: aztecprotocol/aztec: + command: ['node', '--no-warnings', '/usr/src/yarn-project/aztec/dest/bin/index.js', 'migrate-ha-db', 'up'] + env: + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: db-credentials + key: connection-string + containers: + - name: validator + image: aztecprotocol/aztec: + # ... validator config +``` + +### Option 2: Separate Migration Job + +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: validator-migrate-v1 +spec: + template: + spec: + containers: + - name: migrate + image: aztecprotocol/aztec: + command: ['node', '--no-warnings', '/usr/src/yarn-project/aztec/dest/bin/index.js', 'migrate-ha-db', 'up'] + env: + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: db-credentials + key: connection-string + restartPolicy: Never +``` + +### Option 3: CI/CD Pipeline + +```yaml +# GitHub Actions example +- name: Run Database Migrations + run: | + docker run --rm \ + -e DATABASE_URL=${{ secrets.DATABASE_URL }} \ + aztecprotocol/aztec: \ + migrate-ha-db up +``` + +## High Availability Considerations + +The migrations use idempotent SQL operations (`IF NOT EXISTS`, `ON CONFLICT`, etc.), making them safe to run concurrently from multiple nodes. However, for cleaner logs and faster deployments, we recommend: + +1. **Run migrations once** from an init container or migration job +2. **Then start** multiple validator nodes + +If multiple nodes run migrations simultaneously, they will all succeed, but you'll see redundant log output. + +## Development Workflow + +```bash +# 1. Create migration +npx node-pg-migrate create my-feature + +# 2. Edit migrations/[timestamp]_my-feature.ts + +# 3. Test migration locally +aztec migrate-ha-db up --database-url postgresql://localhost:5432/validator_dev + +# 4. Test rollback +aztec migrate-ha-db down --database-url postgresql://localhost:5432/validator_dev + +# 5. Re-apply +aztec migrate-ha-db up --database-url postgresql://localhost:5432/validator_dev + +# 6. Run tests +yarn test +``` + +## Troubleshooting + +### Migration Failed Midway + +If a migration fails partway through: + +```bash +# The failed migration will be marked as running +# Fix the issue and re-run +aztec migrate-ha-db up --database-url postgresql://... +``` + +### Reset Development Database + +```bash +# Drop all migrations +while aztec migrate-ha-db down --database-url postgresql://localhost:5432/validator_dev; do :; done + +# Or drop the database entirely +psql -c "DROP DATABASE validator_dev;" +psql -c "CREATE DATABASE validator_dev;" + +# Re-run migrations +aztec migrate-ha-db up --database-url postgresql://localhost:5432/validator_dev +``` + +### Check Applied Migrations + +```bash +# Query the migrations table +psql $DATABASE_URL -c "SELECT * FROM pgmigrations ORDER BY id;" +``` + +## Migration Best Practices + +1. **Always provide `down()` migrations** for rollback capability +2. **Test migrations on a copy of production data** before deploying +3. **Make migrations backward compatible** when possible +4. **Avoid data migrations in schema migrations** - use separate data migration scripts +5. **Keep migrations small and focused** - one logical change per migration +6. **Never modify committed migrations** - create a new migration instead diff --git a/yarn-project/validator-ha-signer/README.md b/yarn-project/validator-ha-signer/README.md new file mode 100644 index 000000000000..f2ff6aee3d96 --- /dev/null +++ b/yarn-project/validator-ha-signer/README.md @@ -0,0 +1,182 @@ +# Validator HA Signer + +Distributed locking and slashing protection for Aztec validators running in high-availability configurations. + +## Features + +- **Distributed Locking**: Prevents multiple validator nodes from signing the same duty +- **Slashing Protection**: Blocks attempts to sign conflicting data for the same slot +- **Automatic Retry**: Failed signing attempts are cleared, allowing other nodes to retry +- **PostgreSQL Backend**: Shared database for coordination across nodes + +## Quick Start + +### Option 1: Automatic Migrations (Simplest) + +```typescript +import { createHASigner } from '@aztec/validator-ha-signer/factory'; + +// Migrations run automatically on startup +const { signer, db } = await createHASigner({ + databaseUrl: process.env.DATABASE_URL, + enabled: true, + nodeId: 'validator-node-1', + pollingIntervalMs: 100, + signingTimeoutMs: 3000, +}); + +// Start background cleanup tasks +signer.start(); + +// Sign with protection +const signature = await signer.signWithProtection( + validatorAddress, + messageHash, + { slot: 100n, blockNumber: 50n, dutyType: 'BLOCK_PROPOSAL' }, + async root => localSigner.signMessage(root), +); + +// Cleanup on shutdown +await signer.stop(); +await db.close(); +``` + +### Option 2: Manual Migrations (Recommended for Production) + +```bash +# 1. Run migrations separately (once per deployment) +aztec migrate-ha-db up --database-url postgresql://user:pass@host:port/db +``` + +```typescript +// 2. Create signer (migrations already applied) +import { createHASigner } from '@aztec/validator-ha-signer/factory'; + +const { signer, db } = await createHASigner({ + databaseUrl: process.env.DATABASE_URL, + enabled: true, + nodeId: 'validator-node-1', + pollingIntervalMs: 100, + signingTimeoutMs: 3000, +}); + +// Start background cleanup tasks +signer.start(); + +// On shutdown +await signer.stop(); +await db.close(); +``` + +### Advanced: Custom Connection Pool + +If you need custom pool configuration (e.g., max connections, idle timeout) or want to share a connection pool across multiple components: + +> **Note**: You still need to run migrations separately before using this approach. +> See [Option 2](#option-2-manual-migrations-recommended-for-production) above. + +```typescript +import { PostgresSlashingProtectionDatabase } from '@aztec/validator-ha-signer/db'; +import { ValidatorHASigner } from '@aztec/validator-ha-signer/validator-ha-signer'; + +import { Pool } from 'pg'; + +// Custom pool configuration +const pool = new Pool({ + connectionString: process.env.DATABASE_URL, + max: 20, // Maximum connections + idleTimeoutMillis: 30000, +}); +const db = new PostgresSlashingProtectionDatabase(pool); +await db.initialize(); + +const signer = new ValidatorHASigner(db, { + enabled: true, + nodeId: 'validator-node-1', + pollingIntervalMs: 100, + signingTimeoutMs: 3000, + maxStuckDutiesAgeMs: 72000, +}); + +// Start background cleanup tasks +signer.start(); + +// On shutdown +await signer.stop(); +await pool.end(); // You manage the pool lifecycle +``` + +## Configuration + +Set via environment variables or config object: + +- `VALIDATOR_HA_DATABASE_URL`: PostgreSQL connection string (e.g., `postgresql://user:pass@host:port/db`) +- `SLASHING_PROTECTION_ENABLED`: Whether slashing protection is enabled (default: true) +- `SLASHING_PROTECTION_NODE_ID`: Unique identifier for this validator node +- `SLASHING_PROTECTION_POLLING_INTERVAL_MS`: How often to check duty status (default: 100) +- `SLASHING_PROTECTION_SIGNING_TIMEOUT_MS`: Max wait for in-progress signing (default: 3000) +- `SLASHING_PROTECTION_MAX_STUCK_DUTIES_AGE_MS`: Max age of stuck duties before cleanup (default: 72000) + +## Database Migrations + +This package uses `node-pg-migrate` for database schema management. + +### Migration Commands + +```bash +# Run pending migrations +aztec migrate-ha-db up --database-url postgresql://... + +# Rollback last migration +aztec migrate-ha-db down --database-url postgresql://... +``` + +### Creating New Migrations + +```bash +# Generate a new migration file +npx node-pg-migrate create my-migration-name +``` + +### Production Deployment + +Run migrations before starting your application: + +```yaml +# Kubernetes example +apiVersion: batch/v1 +kind: Job +metadata: + name: validator-db-migrate +spec: + template: + spec: + containers: + - name: migrate + image: aztecprotocol/aztec: + command: ['node', '--no-warnings', '/usr/src/yarn-project/aztec/dest/bin/index.js', 'migrate-ha-db', 'up'] + env: + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: db-secret + key: url + restartPolicy: OnFailure +``` + +## How It Works + +When multiple validator nodes attempt to sign: + +1. First node acquires lock and signs +2. Other nodes receive `DutyAlreadySignedError` (expected) +3. If different data detected: `SlashingProtectionError` (likely for block builder signing) +4. Failed attempts are auto-cleaned, allowing retry + +## Development + +```bash +yarn build # Build package +yarn test # Run tests +yarn clean # Clean build artifacts +``` diff --git a/yarn-project/validator-ha-signer/eslint.config.js b/yarn-project/validator-ha-signer/eslint.config.js new file mode 100644 index 000000000000..0331d0552f62 --- /dev/null +++ b/yarn-project/validator-ha-signer/eslint.config.js @@ -0,0 +1,3 @@ +import config from '@aztec/foundation/eslint'; + +export default config; diff --git a/yarn-project/validator-ha-signer/package.json b/yarn-project/validator-ha-signer/package.json new file mode 100644 index 000000000000..cf2fa4c10c4c --- /dev/null +++ b/yarn-project/validator-ha-signer/package.json @@ -0,0 +1,105 @@ +{ + "name": "@aztec/validator-ha-signer", + "version": "0.1.0", + "type": "module", + "exports": { + "./config": "./dest/config.js", + "./db": "./dest/db/index.js", + "./errors": "./dest/errors.js", + "./factory": "./dest/factory.js", + "./migrations": "./dest/migrations.js", + "./slashing-protection-service": "./dest/slashing_protection_service.js", + "./types": "./dest/types.js", + "./validator-ha-signer": "./dest/validator_ha_signer.js" + }, + "typedocOptions": { + "entryPoints": [ + "./src/config.ts", + "./src/db/index.ts", + "./src/errors.ts", + "./src/factory.ts", + "./src/migrations.ts", + "./src/slashing_protection_service.ts", + "./src/types.ts", + "./src/validator_ha_signer.ts" + ], + "name": "Validator High-Availability Signer", + "tsconfig": "./tsconfig.json" + }, + "scripts": { + "build": "yarn clean && ../scripts/tsc.sh", + "build:dev": "../scripts/tsc.sh --watch", + "clean": "rm -rf ./dest .tsbuildinfo", + "test": "NODE_NO_WARNINGS=1 node --experimental-vm-modules ../node_modules/.bin/jest --passWithNoTests --maxWorkers=${JEST_MAX_WORKERS:-8}" + }, + "inherits": [ + "../package.common.json" + ], + "jest": { + "moduleNameMapper": { + "^(\\.{1,2}/.*)\\.[cm]?js$": "$1" + }, + "testRegex": "./src/.*\\.test\\.(js|mjs|ts)$", + "rootDir": "./src", + "transform": { + "^.+\\.tsx?$": [ + "@swc/jest", + { + "jsc": { + "parser": { + "syntax": "typescript", + "decorators": true + }, + "transform": { + "decoratorVersion": "2022-03" + } + } + } + ] + }, + "extensionsToTreatAsEsm": [ + ".ts" + ], + "reporters": [ + "default" + ], + "testTimeout": 120000, + "setupFiles": [ + "../../foundation/src/jest/setup.mjs" + ], + "testEnvironment": "../../foundation/src/jest/env.mjs", + "setupFilesAfterEnv": [ + "../../foundation/src/jest/setupAfterEnv.mjs" + ] + }, + "dependencies": { + "@aztec/foundation": "workspace:^", + "@aztec/node-keystore": "workspace:^", + "node-pg-migrate": "^8.0.4", + "pg": "^8.11.3", + "tslib": "^2.4.0" + }, + "devDependencies": { + "@electric-sql/pglite": "^0.2.17", + "@jest/globals": "^30.0.0", + "@middle-management/pglite-pg-adapter": "^0.0.3", + "@types/jest": "^30.0.0", + "@types/node": "^22.15.17", + "@types/node-pg-migrate": "^2.3.1", + "@types/pg": "^8.10.9", + "@typescript/native-preview": "7.0.0-dev.20251126.1", + "jest": "^30.0.0", + "jest-mock-extended": "^4.0.0", + "ts-node": "^10.9.1", + "typescript": "^5.3.3" + }, + "types": "./dest/types.d.ts", + "files": [ + "dest", + "src", + "!*.test.*" + ], + "engines": { + "node": ">=20.10" + } +} diff --git a/yarn-project/validator-ha-signer/src/config.ts b/yarn-project/validator-ha-signer/src/config.ts new file mode 100644 index 000000000000..1a1719773db0 --- /dev/null +++ b/yarn-project/validator-ha-signer/src/config.ts @@ -0,0 +1,116 @@ +import { + type ConfigMappingsType, + booleanConfigHelper, + getConfigFromMappings, + getDefaultConfig, + numberConfigHelper, +} from '@aztec/foundation/config'; + +/** + * Configuration for the slashing protection service + */ +export interface SlashingProtectionConfig { + /** Whether slashing protection is enabled */ + enabled: boolean; + /** Unique identifier for this node */ + nodeId: string; + /** How long to wait between polls when a duty is being signed (ms) */ + pollingIntervalMs: number; + /** Maximum time to wait for a duty being signed to complete (ms) */ + signingTimeoutMs: number; + /** Maximum age of a stuck duty in ms */ + maxStuckDutiesAgeMs: number; +} + +export const slashingProtectionConfigMappings: ConfigMappingsType = { + enabled: { + env: 'SLASHING_PROTECTION_ENABLED', + description: 'Whether slashing protection is enabled', + ...booleanConfigHelper(true), + }, + nodeId: { + env: 'SLASHING_PROTECTION_NODE_ID', + description: 'The unique identifier for this node', + defaultValue: '', + }, + pollingIntervalMs: { + env: 'SLASHING_PROTECTION_POLLING_INTERVAL_MS', + description: 'The number of ms to wait between polls when a duty is being signed', + ...numberConfigHelper(100), + }, + signingTimeoutMs: { + env: 'SLASHING_PROTECTION_SIGNING_TIMEOUT_MS', + description: 'The maximum time to wait for a duty being signed to complete', + ...numberConfigHelper(3_000), + }, + maxStuckDutiesAgeMs: { + env: 'SLASHING_PROTECTION_MAX_STUCK_DUTIES_AGE_MS', + description: 'The maximum age of a stuck duty in ms', + // hard-coding at current 2 slot duration. This should be set by the validator on init + ...numberConfigHelper(72_000), + }, +}; + +export const defaultSlashingProtectionConfig: SlashingProtectionConfig = getDefaultConfig( + slashingProtectionConfigMappings, +); + +/** + * Configuration for creating an HA signer with PostgreSQL backend + */ +export interface CreateHASignerConfig extends SlashingProtectionConfig { + /** + * PostgreSQL connection string + * Format: postgresql://user:password@host:port/database + */ + databaseUrl: string; + /** + * PostgreSQL connection pool configuration + */ + /** Maximum number of clients in the pool (default: 10) */ + poolMaxCount?: number; + /** Minimum number of clients in the pool (default: 0) */ + poolMinCount?: number; + /** Idle timeout in milliseconds (default: 10000) */ + poolIdleTimeoutMs?: number; + /** Connection timeout in milliseconds (default: 0, no timeout) */ + poolConnectionTimeoutMs?: number; +} + +export const createHASignerConfigMappings: ConfigMappingsType = { + ...slashingProtectionConfigMappings, + databaseUrl: { + env: 'VALIDATOR_HA_DATABASE_URL', + description: + 'PostgreSQL connection string for validator HA signer (format: postgresql://user:password@host:port/database)', + }, + poolMaxCount: { + env: 'VALIDATOR_HA_POOL_MAX', + description: 'Maximum number of clients in the pool', + ...numberConfigHelper(10), + }, + poolMinCount: { + env: 'VALIDATOR_HA_POOL_MIN', + description: 'Minimum number of clients in the pool', + ...numberConfigHelper(0), + }, + poolIdleTimeoutMs: { + env: 'VALIDATOR_HA_POOL_IDLE_TIMEOUT_MS', + description: 'Idle timeout in milliseconds', + ...numberConfigHelper(10_000), + }, + poolConnectionTimeoutMs: { + env: 'VALIDATOR_HA_POOL_CONNECTION_TIMEOUT_MS', + description: 'Connection timeout in milliseconds (0 means no timeout)', + ...numberConfigHelper(0), + }, +}; + +/** + * Returns the validator HA signer configuration from environment variables. + * Note: If an environment variable is not set, the default value is used. + * @returns The validator HA signer configuration. + */ +export function getConfigEnvVars(): CreateHASignerConfig { + return getConfigFromMappings(createHASignerConfigMappings); +} diff --git a/yarn-project/validator-ha-signer/src/db/index.ts b/yarn-project/validator-ha-signer/src/db/index.ts new file mode 100644 index 000000000000..ea026be796cb --- /dev/null +++ b/yarn-project/validator-ha-signer/src/db/index.ts @@ -0,0 +1,3 @@ +export * from './types.js'; +export * from './schema.js'; +export * from './postgres.js'; diff --git a/yarn-project/validator-ha-signer/src/db/migrations/1_initial-schema.ts b/yarn-project/validator-ha-signer/src/db/migrations/1_initial-schema.ts new file mode 100644 index 000000000000..b3579e146ece --- /dev/null +++ b/yarn-project/validator-ha-signer/src/db/migrations/1_initial-schema.ts @@ -0,0 +1,26 @@ +/** + * Initial schema for validator HA slashing protection + * + * This migration imports SQL from the schema.ts file to ensure a single source of truth. + */ +import type { MigrationBuilder } from 'node-pg-migrate'; + +import { DROP_SCHEMA_VERSION_TABLE, DROP_VALIDATOR_DUTIES_TABLE, SCHEMA_SETUP, SCHEMA_VERSION } from '../schema.js'; + +export function up(pgm: MigrationBuilder): void { + for (const statement of SCHEMA_SETUP) { + pgm.sql(statement); + } + + // Insert initial schema version + pgm.sql(` + INSERT INTO schema_version (version) + VALUES (${SCHEMA_VERSION}) + ON CONFLICT (version) DO NOTHING; + `); +} + +export function down(pgm: MigrationBuilder): void { + pgm.sql(DROP_VALIDATOR_DUTIES_TABLE); + pgm.sql(DROP_SCHEMA_VERSION_TABLE); +} diff --git a/yarn-project/validator-ha-signer/src/db/postgres.test.ts b/yarn-project/validator-ha-signer/src/db/postgres.test.ts new file mode 100644 index 000000000000..d8ff9f6a86f4 --- /dev/null +++ b/yarn-project/validator-ha-signer/src/db/postgres.test.ts @@ -0,0 +1,574 @@ +import { Buffer32 } from '@aztec/foundation/buffer'; +import { EthAddress } from '@aztec/foundation/eth-address'; + +import { PGlite } from '@electric-sql/pglite'; +import { afterEach, beforeEach, describe, expect, it, jest } from '@jest/globals'; +import { Pool } from '@middle-management/pglite-pg-adapter'; + +import { PostgresSlashingProtectionDatabase } from './postgres.js'; +import { + DELETE_DUTY, + INSERT_OR_GET_DUTY, + INSERT_SCHEMA_VERSION, + SCHEMA_SETUP, + SCHEMA_VERSION, + UPDATE_DUTY_SIGNED, +} from './schema.js'; +import { setupTestSchema } from './test_helper.js'; +import { type DutyRow, DutyStatus, DutyType, type InsertOrGetRow } from './types.js'; + +/** + * Integration tests for PostgreSQL queries using PGlite. + */ +describe('PostgreSQL Queries', () => { + let db: PGlite; + + const VALIDATOR_ADDRESS = EthAddress.random().toString(); + const SLOT = 100n; + const BLOCK_NUMBER = 50n; + const DUTY_TYPE = DutyType.BLOCK_PROPOSAL; + const MESSAGE_HASH = Buffer32.random().toString(); + const NODE_ID = 'node-1'; + const LOCK_TOKEN = 'test-lock-token-12345'; + const SIGNATURE = '0xsignature'; + + beforeEach(async () => { + db = new PGlite(); + + await setupTestSchema(db); + }); + + afterEach(async () => { + await db.close(); + }); + + describe('INSERT_OR_GET_DUTY', () => { + it('should insert new record and return is_new=true', async () => { + const result = await db.query(INSERT_OR_GET_DUTY, [ + VALIDATOR_ADDRESS.toString(), + SLOT.toString(), + BLOCK_NUMBER.toString(), + DUTY_TYPE, + MESSAGE_HASH, + NODE_ID, + LOCK_TOKEN, + ]); + + expect(result.rows.length).toBe(1); + const row = result.rows[0]; + expect(row.is_new).toBe(true); + expect(row.status).toBe(DutyStatus.SIGNING); + expect(row.validator_address).toBe(VALIDATOR_ADDRESS.toString()); + expect(BigInt(row.slot)).toBe(SLOT); + expect(row.node_id).toBe(NODE_ID); + expect(row.lock_token).toBe(LOCK_TOKEN); + }); + + it('should return existing record with is_new=false on duplicate', async () => { + // First insert + await db.query(INSERT_OR_GET_DUTY, [ + VALIDATOR_ADDRESS.toString(), + SLOT.toString(), + BLOCK_NUMBER.toString(), + DUTY_TYPE, + MESSAGE_HASH, + NODE_ID, + LOCK_TOKEN, + ]); + + // Second insert attempt with different node + const result = await db.query(INSERT_OR_GET_DUTY, [ + VALIDATOR_ADDRESS.toString(), + SLOT.toString(), + BLOCK_NUMBER.toString(), + DUTY_TYPE, + MESSAGE_HASH, + 'node-2', // Different node trying to acquire + 'different-token', + ]); + + expect(result.rows.length).toBe(1); + const row = result.rows[0]; + expect(row.is_new).toBe(false); + expect(row.node_id).toBe(NODE_ID); // Original node still owns it + }); + + it('should not expose lock_token for existing records', async () => { + // node acquires the lock + const insertResult = await db.query(INSERT_OR_GET_DUTY, [ + VALIDATOR_ADDRESS.toString(), + SLOT.toString(), + BLOCK_NUMBER.toString(), + DUTY_TYPE, + MESSAGE_HASH, + NODE_ID, + LOCK_TOKEN, + ]); + expect(insertResult.rows[0].is_new).toBe(true); + expect(insertResult.rows[0].lock_token).toBe(LOCK_TOKEN); + + // Second insert attempt - should not get the original lock_token + const conflictResult = await db.query(INSERT_OR_GET_DUTY, [ + VALIDATOR_ADDRESS.toString(), + SLOT.toString(), + BLOCK_NUMBER.toString(), + DUTY_TYPE, + MESSAGE_HASH, + 'competing-node', + 'competing-token', + ]); + expect(conflictResult.rows[0].is_new).toBe(false); + expect(conflictResult.rows[0].lock_token).toBe(''); // Empty string, not the original token + }); + + it('should allow different duty types for same slot', async () => { + // Insert BLOCK_PROPOSAL + const result1 = await db.query(INSERT_OR_GET_DUTY, [ + VALIDATOR_ADDRESS.toString(), + SLOT.toString(), + BLOCK_NUMBER.toString(), + DutyType.BLOCK_PROPOSAL, + MESSAGE_HASH, + NODE_ID, + LOCK_TOKEN, + ]); + + // Insert ATTESTATION for same slot + const result2 = await db.query(INSERT_OR_GET_DUTY, [ + VALIDATOR_ADDRESS.toString(), + SLOT.toString(), + BLOCK_NUMBER.toString(), + DutyType.ATTESTATION, + MESSAGE_HASH, + NODE_ID, + 'token-2', + ]); + + expect(result1.rows[0].is_new).toBe(true); + expect(result2.rows[0].is_new).toBe(true); + }); + + it('should allow same duty type for different slots', async () => { + const result1 = await db.query(INSERT_OR_GET_DUTY, [ + VALIDATOR_ADDRESS.toString(), + '100', + BLOCK_NUMBER.toString(), + DUTY_TYPE, + MESSAGE_HASH, + NODE_ID, + 'token-1', + ]); + + const result2 = await db.query(INSERT_OR_GET_DUTY, [ + VALIDATOR_ADDRESS.toString(), + '101', + BLOCK_NUMBER.toString(), + DUTY_TYPE, + MESSAGE_HASH, + NODE_ID, + 'token-2', + ]); + + expect(result1.rows[0].is_new).toBe(true); + expect(result2.rows[0].is_new).toBe(true); + }); + }); + + describe('UPDATE_DUTY_SIGNED', () => { + it('should update status to signed and set signature with correct token', async () => { + // Insert a duty first + await db.query(INSERT_OR_GET_DUTY, [ + VALIDATOR_ADDRESS.toString(), + SLOT.toString(), + BLOCK_NUMBER.toString(), + DUTY_TYPE, + MESSAGE_HASH, + NODE_ID, + LOCK_TOKEN, + ]); + + // Update to signed with correct token + const updateResult = await db.query(UPDATE_DUTY_SIGNED, [ + SIGNATURE, + VALIDATOR_ADDRESS.toString(), + SLOT.toString(), + DUTY_TYPE, + LOCK_TOKEN, + ]); + + expect(updateResult.affectedRows).toBe(1); + + // Verify the update + const selectResult = await db.query( + `SELECT status, signature, completed_at FROM validator_duties + WHERE validator_address = $1 AND slot = $2 AND duty_type = $3`, + [VALIDATOR_ADDRESS.toString(), SLOT.toString(), DUTY_TYPE], + ); + + const row = selectResult.rows[0]; + expect(row.status).toBe(DutyStatus.SIGNED); + expect(row.signature).toBe(SIGNATURE); + expect(row.completed_at).toBeTruthy(); + }); + + it('should not update with wrong token', async () => { + await db.query(INSERT_OR_GET_DUTY, [ + VALIDATOR_ADDRESS.toString(), + SLOT.toString(), + BLOCK_NUMBER.toString(), + DUTY_TYPE, + MESSAGE_HASH, + NODE_ID, + LOCK_TOKEN, + ]); + + // Try to update with wrong token + const updateResult = await db.query(UPDATE_DUTY_SIGNED, [ + SIGNATURE, + VALIDATOR_ADDRESS.toString(), + SLOT.toString(), + DUTY_TYPE, + 'wrong-token', + ]); + + expect(updateResult.affectedRows).toBe(0); + + // Verify still in signing state + const selectResult = await db.query( + `SELECT status FROM validator_duties WHERE validator_address = $1 AND slot = $2`, + [VALIDATOR_ADDRESS.toString(), SLOT.toString()], + ); + expect(selectResult.rows[0].status).toBe(DutyStatus.SIGNING); + }); + + it('should not update if status is not signing', async () => { + // Insert and mark as signed + await db.query(INSERT_OR_GET_DUTY, [ + VALIDATOR_ADDRESS.toString(), + SLOT.toString(), + BLOCK_NUMBER.toString(), + DUTY_TYPE, + MESSAGE_HASH, + NODE_ID, + LOCK_TOKEN, + ]); + await db.query(UPDATE_DUTY_SIGNED, [ + SIGNATURE, + VALIDATOR_ADDRESS.toString(), + SLOT.toString(), + DUTY_TYPE, + LOCK_TOKEN, + ]); + + // Try to update again with correct token + const result = await db.query(UPDATE_DUTY_SIGNED, [ + 'new-signature', + VALIDATOR_ADDRESS.toString(), + SLOT.toString(), + DUTY_TYPE, + LOCK_TOKEN, + ]); + + expect(result.affectedRows).toBe(0); + + // Verify signature unchanged + const selectResult = await db.query( + `SELECT signature FROM validator_duties WHERE validator_address = $1 AND slot = $2`, + [VALIDATOR_ADDRESS.toString(), SLOT.toString()], + ); + expect(selectResult.rows[0].signature).toBe(SIGNATURE); + }); + }); + + describe('DELETE_DUTY', () => { + it('should delete a signing duty with correct token', async () => { + await db.query(INSERT_OR_GET_DUTY, [ + VALIDATOR_ADDRESS.toString(), + SLOT.toString(), + BLOCK_NUMBER.toString(), + DUTY_TYPE, + MESSAGE_HASH, + NODE_ID, + LOCK_TOKEN, + ]); + + const deleteResult = await db.query(DELETE_DUTY, [ + VALIDATOR_ADDRESS.toString(), + SLOT.toString(), + DUTY_TYPE, + LOCK_TOKEN, + ]); + + expect(deleteResult.affectedRows).toBe(1); + + // Verify deleted + const selectResult = await db.query(`SELECT * FROM validator_duties WHERE validator_address = $1 AND slot = $2`, [ + VALIDATOR_ADDRESS.toString(), + SLOT.toString(), + ]); + expect(selectResult.rows.length).toBe(0); + }); + + it('should not delete with wrong token', async () => { + await db.query(INSERT_OR_GET_DUTY, [ + VALIDATOR_ADDRESS.toString(), + SLOT.toString(), + BLOCK_NUMBER.toString(), + DUTY_TYPE, + MESSAGE_HASH, + NODE_ID, + LOCK_TOKEN, + ]); + + const deleteResult = await db.query(DELETE_DUTY, [ + VALIDATOR_ADDRESS.toString(), + SLOT.toString(), + DUTY_TYPE, + 'wrong-token', + ]); + + expect(deleteResult.affectedRows).toBe(0); + + // Verify still exists + const selectResult = await db.query(`SELECT * FROM validator_duties WHERE validator_address = $1 AND slot = $2`, [ + VALIDATOR_ADDRESS.toString(), + SLOT.toString(), + ]); + expect(selectResult.rows.length).toBe(1); + }); + + it('should not delete a signed duty', async () => { + await db.query(INSERT_OR_GET_DUTY, [ + VALIDATOR_ADDRESS.toString(), + SLOT.toString(), + BLOCK_NUMBER.toString(), + DUTY_TYPE, + MESSAGE_HASH, + NODE_ID, + LOCK_TOKEN, + ]); + await db.query(UPDATE_DUTY_SIGNED, [ + SIGNATURE, + VALIDATOR_ADDRESS.toString(), + SLOT.toString(), + DUTY_TYPE, + LOCK_TOKEN, + ]); + + // Even with correct token, can't delete a signed duty + const deleteResult = await db.query(DELETE_DUTY, [ + VALIDATOR_ADDRESS.toString(), + SLOT.toString(), + DUTY_TYPE, + LOCK_TOKEN, + ]); + + expect(deleteResult.affectedRows).toBe(0); + + // Verify still exists + const selectResult = await db.query(`SELECT * FROM validator_duties WHERE validator_address = $1 AND slot = $2`, [ + VALIDATOR_ADDRESS.toString(), + SLOT.toString(), + ]); + expect(selectResult.rows.length).toBe(1); + }); + }); + + describe('constraints', () => { + it('should enforce primary key constraint (validator_address, slot, duty_type)', async () => { + await db.query(INSERT_OR_GET_DUTY, [ + VALIDATOR_ADDRESS.toString(), + SLOT.toString(), + BLOCK_NUMBER.toString(), + DUTY_TYPE, + MESSAGE_HASH, + NODE_ID, + LOCK_TOKEN, + ]); + + // Direct insert should fail due to primary key constraint + await expect( + db.query( + `INSERT INTO validator_duties (validator_address, slot, block_number, duty_type, status, message_hash, node_id, lock_token) + VALUES ($1, $2, $3, $4, 'signing', $5, $6, $7)`, + [ + VALIDATOR_ADDRESS.toString(), + SLOT.toString(), + BLOCK_NUMBER.toString(), + DUTY_TYPE, + MESSAGE_HASH, + 'node-2', + 'token-2', + ], + ), + ).rejects.toThrow(); + }); + + it('should enforce duty_type check constraint', async () => { + await expect( + db.query( + `INSERT INTO validator_duties (validator_address, slot, block_number, duty_type, status, message_hash, node_id, lock_token) + VALUES ($1, $2, $3, 'INVALID_TYPE', 'signing', $4, $5, $6)`, + [VALIDATOR_ADDRESS.toString(), SLOT.toString(), BLOCK_NUMBER.toString(), MESSAGE_HASH, NODE_ID, LOCK_TOKEN], + ), + ).rejects.toThrow(); + }); + + it('should enforce status check constraint', async () => { + await expect( + db.query( + `INSERT INTO validator_duties (validator_address, slot, block_number, duty_type, status, message_hash, node_id, lock_token) + VALUES ($1, $2, $3, $4, 'invalid_status', $5, $6, $7)`, + [ + VALIDATOR_ADDRESS.toString(), + SLOT.toString(), + BLOCK_NUMBER.toString(), + DUTY_TYPE, + MESSAGE_HASH, + NODE_ID, + LOCK_TOKEN, + ], + ), + ).rejects.toThrow(); + }); + }); +}); + +describe('PostgresSlashingProtectionDatabase', () => { + let pglite: PGlite; + // pool needs to be 'any' due to some low-level discrepancies + // between pg's Pool & the adapter's implementation + let pool: any; + + beforeEach(() => { + pglite = new PGlite(); + pool = new Pool({ pglite }); + }); + + afterEach(async () => { + await pool.end(); + }); + + describe('initialize', () => { + it('should succeed when schema_version table exists with correct version', async () => { + // Set up schema with correct version + for (const statement of SCHEMA_SETUP) { + await pglite.query(statement); + } + await pglite.query(INSERT_SCHEMA_VERSION, [SCHEMA_VERSION]); + + const db = new PostgresSlashingProtectionDatabase(pool); + + await expect(db.initialize()).resolves.not.toThrow(); + }); + + it('should throw when schema_version table does not exist', async () => { + const db = new PostgresSlashingProtectionDatabase(pool); + + await expect(db.initialize()).rejects.toThrow( + 'Database schema not initialized. Please run migrations first: aztec migrate up --database-url ', + ); + }); + + it('should throw when schema_version table is empty', async () => { + // Create schema_version table but don't insert any version + await pglite.query(` + CREATE TABLE schema_version ( + version INTEGER PRIMARY KEY, + applied_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP + ) + `); + + const db = new PostgresSlashingProtectionDatabase(pool); + + await expect(db.initialize()).rejects.toThrow( + 'Database schema not initialized. Please run migrations first: aztec migrate up --database-url ', + ); + }); + + it('should throw when schema version is lower than expected', async () => { + // Set up schema with outdated version + for (const statement of SCHEMA_SETUP) { + await pglite.query(statement); + } + await pglite.query(INSERT_SCHEMA_VERSION, [SCHEMA_VERSION - 1]); + + const db = new PostgresSlashingProtectionDatabase(pool); + + await expect(db.initialize()).rejects.toThrow( + `Database schema version ${SCHEMA_VERSION - 1} is outdated (expected ${SCHEMA_VERSION}). Please run migrations: aztec migrate up --database-url `, + ); + }); + + it('should throw when schema version is higher than expected', async () => { + // Set up schema with newer version + for (const statement of SCHEMA_SETUP) { + await pglite.query(statement); + } + await pglite.query(INSERT_SCHEMA_VERSION, [SCHEMA_VERSION + 1]); + + const db = new PostgresSlashingProtectionDatabase(pool); + + await expect(db.initialize()).rejects.toThrow( + `Database schema version ${SCHEMA_VERSION + 1} is newer than expected (${SCHEMA_VERSION}). Please update your application.`, + ); + }); + + it('should allow closing the database connection', async () => { + const db = new PostgresSlashingProtectionDatabase(pool); + + const endSpy = jest.spyOn(pool, 'end'); + await db.close(); + expect(endSpy).toHaveBeenCalled(); + }); + }); + + describe('bigint handling', () => { + const VALIDATOR_ADDRESS = EthAddress.random(); + const SLOT = 100n; + const BLOCK_NUMBER = 50n; + const MESSAGE_HASH = Buffer32.random().toString(); + const NODE_ID = 'node-1'; + + beforeEach(async () => { + for (const statement of SCHEMA_SETUP) { + await pglite.query(statement); + } + await pglite.query(INSERT_SCHEMA_VERSION, [SCHEMA_VERSION]); + }); + + it('should handle large slot numbers correctly', async () => { + const largeSlot = 9007199254740991n; // Max safe integer + + const db = new PostgresSlashingProtectionDatabase(pool); + const result = await db.tryInsertOrGetExisting({ + validatorAddress: VALIDATOR_ADDRESS, + slot: largeSlot, + blockNumber: BLOCK_NUMBER, + dutyType: DutyType.BLOCK_PROPOSAL, + messageHash: MESSAGE_HASH, + nodeId: NODE_ID, + }); + + expect(result.isNew).toBe(true); + expect(result.record.slot).toBe(largeSlot); + }); + + it('should handle large block numbers correctly', async () => { + const largeBlockNumber = 9007199254740991n; + + const db = new PostgresSlashingProtectionDatabase(pool); + const result = await db.tryInsertOrGetExisting({ + validatorAddress: VALIDATOR_ADDRESS, + slot: SLOT, + blockNumber: largeBlockNumber, + dutyType: DutyType.BLOCK_PROPOSAL, + messageHash: MESSAGE_HASH, + nodeId: NODE_ID, + }); + + expect(result.isNew).toBe(true); + expect(result.record.blockNumber).toBe(largeBlockNumber); + }); + }); +}); diff --git a/yarn-project/validator-ha-signer/src/db/postgres.ts b/yarn-project/validator-ha-signer/src/db/postgres.ts new file mode 100644 index 000000000000..41ad04b672be --- /dev/null +++ b/yarn-project/validator-ha-signer/src/db/postgres.ts @@ -0,0 +1,202 @@ +/** + * PostgreSQL implementation of SlashingProtectionDatabase + */ +import { randomBytes } from '@aztec/foundation/crypto/random'; +import { EthAddress } from '@aztec/foundation/eth-address'; +import { type Logger, createLogger } from '@aztec/foundation/log'; + +import type { Pool, QueryResult } from 'pg'; + +import type { SlashingProtectionDatabase, TryInsertOrGetResult } from '../types.js'; +import { + CLEANUP_OWN_STUCK_DUTIES, + DELETE_DUTY, + INSERT_OR_GET_DUTY, + SCHEMA_VERSION, + UPDATE_DUTY_SIGNED, +} from './schema.js'; +import type { CheckAndRecordParams, DutyRow, DutyType, InsertOrGetRow, ValidatorDutyRecord } from './types.js'; + +/** + * PostgreSQL implementation of the slashing protection database + */ +export class PostgresSlashingProtectionDatabase implements SlashingProtectionDatabase { + private readonly log: Logger; + + constructor(private readonly pool: Pool) { + this.log = createLogger('slashing-protection:postgres'); + } + + /** + * Verify that database migrations have been run and schema version matches. + * Should be called once at startup. + * + * @throws Error if migrations haven't been run or schema version is outdated + */ + async initialize(): Promise { + let dbVersion: number; + + try { + const result = await this.pool.query<{ version: number }>( + `SELECT version FROM schema_version ORDER BY version DESC LIMIT 1`, + ); + + if (result.rows.length === 0) { + throw new Error('No version found'); + } + + dbVersion = result.rows[0].version; + } catch { + throw new Error( + 'Database schema not initialized. Please run migrations first: aztec migrate up --database-url ', + ); + } + + if (dbVersion < SCHEMA_VERSION) { + throw new Error( + `Database schema version ${dbVersion} is outdated (expected ${SCHEMA_VERSION}). Please run migrations: aztec migrate up --database-url `, + ); + } + + if (dbVersion > SCHEMA_VERSION) { + throw new Error( + `Database schema version ${dbVersion} is newer than expected (${SCHEMA_VERSION}). Please update your application.`, + ); + } + + this.log.info('Database schema verified', { version: dbVersion }); + } + + /** + * Atomically try to insert a new duty record, or get the existing one if present. + * + * @returns { isNew: true, record } if we successfully inserted and acquired the lock + * @returns { isNew: false, record } if a record already exists. lock_token is empty if the record already exists. + */ + async tryInsertOrGetExisting(params: CheckAndRecordParams): Promise { + // create a token for ownership verification + const lockToken = randomBytes(16).toString('hex'); + + const result: QueryResult = await this.pool.query(INSERT_OR_GET_DUTY, [ + params.validatorAddress.toString(), + params.slot.toString(), + params.blockNumber.toString(), + params.dutyType, + params.messageHash, + params.nodeId, + lockToken, + ]); + + if (result.rows.length === 0) { + // This shouldn't happen - the query always returns either the inserted or existing row + throw new Error('INSERT_OR_GET_DUTY returned no rows'); + } + + const row = result.rows[0]; + return { + isNew: row.is_new, + record: this.rowToRecord(row), + }; + } + + /** + * Update a duty to 'signed' status with the signature. + * Only succeeds if the lockToken matches (caller must be the one who created the duty). + * + * @returns true if the update succeeded, false if token didn't match or duty not found + */ + async updateDutySigned( + validatorAddress: EthAddress, + slot: bigint, + dutyType: DutyType, + signature: string, + lockToken: string, + ): Promise { + const result = await this.pool.query(UPDATE_DUTY_SIGNED, [ + signature, + validatorAddress.toString(), + slot.toString(), + dutyType, + lockToken, + ]); + + if (result.rowCount === 0) { + this.log.warn('Failed to update duty to signed status: invalid token or duty not found', { + validatorAddress: validatorAddress.toString(), + slot: slot.toString(), + dutyType, + }); + return false; + } + return true; + } + + /** + * Delete a duty record. + * Only succeeds if the lockToken matches (caller must be the one who created the duty). + * Used when signing fails to allow another node/attempt to retry. + * + * @returns true if the delete succeeded, false if token didn't match or duty not found + */ + async deleteDuty( + validatorAddress: EthAddress, + slot: bigint, + dutyType: DutyType, + lockToken: string, + ): Promise { + const result = await this.pool.query(DELETE_DUTY, [ + validatorAddress.toString(), + slot.toString(), + dutyType, + lockToken, + ]); + + if (result.rowCount === 0) { + this.log.warn('Failed to delete duty: invalid token or duty not found', { + validatorAddress: validatorAddress.toString(), + slot: slot.toString(), + dutyType, + }); + return false; + } + return true; + } + + /** + * Convert a database row to a ValidatorDutyRecord + */ + private rowToRecord(row: DutyRow): ValidatorDutyRecord { + return { + validatorAddress: EthAddress.fromString(row.validator_address), + slot: BigInt(row.slot), + blockNumber: BigInt(row.block_number), + dutyType: row.duty_type, + status: row.status, + messageHash: row.message_hash, + signature: row.signature ?? undefined, + nodeId: row.node_id, + lockToken: row.lock_token, + startedAt: row.started_at, + completedAt: row.completed_at ?? undefined, + errorMessage: row.error_message ?? undefined, + }; + } + + /** + * Close the database connection pool + */ + async close(): Promise { + await this.pool.end(); + this.log.info('Database connection pool closed'); + } + + /** + * Cleanup own stuck duties + * @returns the number of duties cleaned up + */ + async cleanupOwnStuckDuties(nodeId: string, maxAgeMs: number): Promise { + const cutoff = new Date(Date.now() - maxAgeMs); + const result = await this.pool.query(CLEANUP_OWN_STUCK_DUTIES, [nodeId, cutoff]); + return result.rowCount ?? 0; + } +} diff --git a/yarn-project/validator-ha-signer/src/db/schema.ts b/yarn-project/validator-ha-signer/src/db/schema.ts new file mode 100644 index 000000000000..5bd132c7ec88 --- /dev/null +++ b/yarn-project/validator-ha-signer/src/db/schema.ts @@ -0,0 +1,236 @@ +/** + * SQL schema for the validator_duties table + * + * This table is used for distributed locking and slashing protection across multiple validator nodes. + * The PRIMARY KEY constraint ensures that only one node can acquire the lock for a given validator, + * slot, and duty type combination. + */ + +/** + * Current schema version + */ +export const SCHEMA_VERSION = 1; + +/** + * SQL to create the validator_duties table + */ +export const CREATE_VALIDATOR_DUTIES_TABLE = ` +CREATE TABLE IF NOT EXISTS validator_duties ( + validator_address VARCHAR(42) NOT NULL, + slot BIGINT NOT NULL, + block_number BIGINT NOT NULL, + duty_type VARCHAR(30) NOT NULL CHECK (duty_type IN ('BLOCK_PROPOSAL', 'ATTESTATION', 'ATTESTATIONS_AND_SIGNERS')), + status VARCHAR(20) NOT NULL CHECK (status IN ('signing', 'signed', 'failed')), + message_hash VARCHAR(66) NOT NULL, + signature VARCHAR(132), + node_id VARCHAR(255) NOT NULL, + lock_token VARCHAR(64) NOT NULL, + started_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + completed_at TIMESTAMP, + error_message TEXT, + + PRIMARY KEY (validator_address, slot, duty_type), + CHECK (completed_at IS NULL OR completed_at >= started_at) +); +`; + +/** + * SQL to create index on status and started_at for cleanup queries + */ +export const CREATE_STATUS_INDEX = ` +CREATE INDEX IF NOT EXISTS idx_validator_duties_status +ON validator_duties(status, started_at); +`; + +/** + * SQL to create index for querying duties by node + */ +export const CREATE_NODE_INDEX = ` +CREATE INDEX IF NOT EXISTS idx_validator_duties_node +ON validator_duties(node_id, started_at); +`; + +/** + * SQL to create the schema_version table for tracking migrations + */ +export const CREATE_SCHEMA_VERSION_TABLE = ` +CREATE TABLE IF NOT EXISTS schema_version ( + version INTEGER PRIMARY KEY, + applied_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP +); +`; + +/** + * SQL to initialize schema version + */ +export const INSERT_SCHEMA_VERSION = ` +INSERT INTO schema_version (version) +VALUES ($1) +ON CONFLICT (version) DO NOTHING; +`; + +/** + * Complete schema setup - all statements in order + */ +export const SCHEMA_SETUP = [ + CREATE_SCHEMA_VERSION_TABLE, + CREATE_VALIDATOR_DUTIES_TABLE, + CREATE_STATUS_INDEX, + CREATE_NODE_INDEX, +] as const; + +/** + * Query to get current schema version + */ +export const GET_SCHEMA_VERSION = ` +SELECT version FROM schema_version ORDER BY version DESC LIMIT 1; +`; + +/** + * Atomic insert-or-get query. + * Tries to insert a new duty record. If a record already exists (conflict), + * returns the existing record instead. + * + * Returns the record with an `is_new` flag indicating whether we inserted or got existing. + */ +export const INSERT_OR_GET_DUTY = ` +WITH inserted AS ( + INSERT INTO validator_duties ( + validator_address, + slot, + block_number, + duty_type, + status, + message_hash, + node_id, + lock_token, + started_at + ) VALUES ($1, $2, $3, $4, 'signing', $5, $6, $7, CURRENT_TIMESTAMP) + ON CONFLICT (validator_address, slot, duty_type) DO NOTHING + RETURNING + validator_address, + slot, + block_number, + duty_type, + status, + message_hash, + signature, + node_id, + lock_token, + started_at, + completed_at, + error_message, + TRUE as is_new +) +SELECT * FROM inserted +UNION ALL +SELECT + validator_address, + slot, + block_number, + duty_type, + status, + message_hash, + signature, + node_id, + '' as lock_token, + started_at, + completed_at, + error_message, + FALSE as is_new +FROM validator_duties +WHERE validator_address = $1 + AND slot = $2 + AND duty_type = $4 + AND NOT EXISTS (SELECT 1 FROM inserted); +`; + +/** + * Query to update a duty to 'signed' status + */ +export const UPDATE_DUTY_SIGNED = ` +UPDATE validator_duties +SET status = 'signed', + signature = $1, + completed_at = CURRENT_TIMESTAMP +WHERE validator_address = $2 + AND slot = $3 + AND duty_type = $4 + AND status = 'signing' + AND lock_token = $5; +`; + +/** + * Query to delete a duty + * Only deletes if the lockToken matches + */ +export const DELETE_DUTY = ` +DELETE FROM validator_duties +WHERE validator_address = $1 + AND slot = $2 + AND duty_type = $3 + AND status = 'signing' + AND lock_token = $4; +`; + +/** + * Query to clean up old signed duties (for maintenance) + * Removes signed duties older than a specified timestamp + */ +export const CLEANUP_OLD_SIGNED_DUTIES = ` +DELETE FROM validator_duties +WHERE status = 'signed' + AND completed_at < $1; +`; + +/** + * Query to clean up old duties (for maintenance) + * Removes duties older than a specified timestamp + */ +export const CLEANUP_OLD_DUTIES = ` +DELETE FROM validator_duties +WHERE status IN ('signing', 'signed', 'failed') + AND started_at < $1; +`; + +/** + * Query to cleanup own stuck duties + * Removes duties in 'signing' status for a specific node that are older than maxAgeMs + */ +export const CLEANUP_OWN_STUCK_DUTIES = ` +DELETE FROM validator_duties +WHERE node_id = $1 + AND status = 'signing' + AND started_at < $2; +`; + +/** + * SQL to drop the validator_duties table + */ +export const DROP_VALIDATOR_DUTIES_TABLE = `DROP TABLE IF EXISTS validator_duties;`; + +/** + * SQL to drop the schema_version table + */ +export const DROP_SCHEMA_VERSION_TABLE = `DROP TABLE IF EXISTS schema_version;`; + +/** + * Query to get stuck duties (for monitoring/alerting) + * Returns duties in 'signing' status that have been stuck for too long + */ +export const GET_STUCK_DUTIES = ` +SELECT + validator_address, + slot, + block_number, + duty_type, + status, + message_hash, + node_id, + started_at, + EXTRACT(EPOCH FROM (CURRENT_TIMESTAMP - started_at)) as age_seconds +FROM validator_duties +WHERE status = 'signing' + AND started_at < $1 +ORDER BY started_at ASC; +`; diff --git a/yarn-project/validator-ha-signer/src/db/test_helper.ts b/yarn-project/validator-ha-signer/src/db/test_helper.ts new file mode 100644 index 000000000000..97d32658a0da --- /dev/null +++ b/yarn-project/validator-ha-signer/src/db/test_helper.ts @@ -0,0 +1,17 @@ +/** + * Test helpers for database setup + */ +import type { PGlite } from '@electric-sql/pglite'; + +import { INSERT_SCHEMA_VERSION, SCHEMA_SETUP, SCHEMA_VERSION } from './schema.js'; + +/** + * Set up the database schema for testing. + * This runs the same SQL that migrations would apply. + */ +export async function setupTestSchema(pglite: PGlite): Promise { + for (const statement of SCHEMA_SETUP) { + await pglite.query(statement); + } + await pglite.query(INSERT_SCHEMA_VERSION, [SCHEMA_VERSION]); +} diff --git a/yarn-project/validator-ha-signer/src/db/types.ts b/yarn-project/validator-ha-signer/src/db/types.ts new file mode 100644 index 000000000000..af56e6d782bc --- /dev/null +++ b/yarn-project/validator-ha-signer/src/db/types.ts @@ -0,0 +1,117 @@ +import type { EthAddress } from '@aztec/foundation/eth-address'; +import type { Signature } from '@aztec/foundation/eth-signature'; + +/** + * Row type from PostgreSQL query + */ +export interface DutyRow { + validator_address: string; + slot: string; + block_number: string; + duty_type: DutyType; + status: DutyStatus; + message_hash: string; + signature: string | null; + node_id: string; + lock_token: string; + started_at: Date; + completed_at: Date | null; + error_message: string | null; +} + +/** + * Row type from INSERT_OR_GET_DUTY query (includes is_new flag) + */ +export interface InsertOrGetRow extends DutyRow { + is_new: boolean; +} + +/** + * Type of validator duty being performed + */ +export enum DutyType { + BLOCK_PROPOSAL = 'BLOCK_PROPOSAL', + ATTESTATION = 'ATTESTATION', + ATTESTATIONS_AND_SIGNERS = 'ATTESTATIONS_AND_SIGNERS', +} + +/** + * Status of a duty in the database + */ +export enum DutyStatus { + SIGNING = 'signing', + SIGNED = 'signed', +} + +/** + * Record of a validator duty in the database + */ +export interface ValidatorDutyRecord { + /** Ethereum address of the validator */ + validatorAddress: EthAddress; + /** Slot number for this duty */ + slot: bigint; + /** Block number for this duty */ + blockNumber: bigint; + /** Type of duty being performed */ + dutyType: DutyType; + /** Current status of the duty */ + status: DutyStatus; + /** The signing root (hash) for this duty */ + messageHash: string; + /** The signature (populated after successful signing) */ + signature?: string; + /** Unique identifier for the node that acquired the lock */ + nodeId: string; + /** Secret token for verifying ownership of the duty lock */ + lockToken: string; + /** When the duty signing was started */ + startedAt: Date; + /** When the duty signing was completed (success or failure) */ + completedAt?: Date; + /** Error message if status is 'failed' */ + errorMessage?: string; +} + +/** + * Minimal info needed to identify a unique duty + */ +export interface DutyIdentifier { + validatorAddress: EthAddress; + slot: bigint; + dutyType: DutyType; +} + +/** + * Parameters for checking and recording a new duty + */ +export interface CheckAndRecordParams { + validatorAddress: EthAddress; + slot: bigint; + blockNumber: bigint; + dutyType: DutyType; + messageHash: string; + nodeId: string; +} + +/** + * Parameters for recording a successful signing + */ +export interface RecordSuccessParams { + validatorAddress: EthAddress; + slot: bigint; + dutyType: DutyType; + signature: Signature; + nodeId: string; + lockToken: string; +} + +/** + * Parameters for deleting a duty + */ +export interface DeleteDutyParams { + validatorAddress: EthAddress; + slot: bigint; + dutyType: DutyType; + lockToken: string; +} diff --git a/yarn-project/validator-ha-signer/src/errors.test.ts b/yarn-project/validator-ha-signer/src/errors.test.ts new file mode 100644 index 000000000000..fb40e1017771 --- /dev/null +++ b/yarn-project/validator-ha-signer/src/errors.test.ts @@ -0,0 +1,90 @@ +import { describe, expect, it } from '@jest/globals'; + +import { DutyAlreadySignedError, SlashingProtectionError } from './errors.js'; +import { DutyType } from './types.js'; + +describe('DutyAlreadySignedError', () => { + it('should create error with correct properties', () => { + const slot = 100n; + const dutyType = DutyType.BLOCK_PROPOSAL; + const signedByNode = 'node-1'; + + const error = new DutyAlreadySignedError(slot, dutyType, signedByNode); + + expect(error).toBeInstanceOf(Error); + expect(error.name).toBe('DutyAlreadySignedError'); + expect(error.slot).toBe(slot); + expect(error.dutyType).toBe(dutyType); + expect(error.signedByNode).toBe(signedByNode); + expect(error.message).toBe('Duty BLOCK_PROPOSAL for slot 100 already signed by node node-1'); + }); + + it('should work with ATTESTATION duty type', () => { + const error = new DutyAlreadySignedError(200n, DutyType.ATTESTATION, 'node-2'); + expect(error.message).toBe('Duty ATTESTATION for slot 200 already signed by node node-2'); + }); + + it('should work with ATTESTATIONS_AND_SIGNERS duty type', () => { + const error = new DutyAlreadySignedError(300n, DutyType.ATTESTATIONS_AND_SIGNERS, 'node-3'); + expect(error.message).toBe('Duty ATTESTATIONS_AND_SIGNERS for slot 300 already signed by node node-3'); + }); + + it('should work with large slot numbers', () => { + const largeSlot = 9007199254740991n; + const error = new DutyAlreadySignedError(largeSlot, DutyType.BLOCK_PROPOSAL, 'node-large'); + expect(error.slot).toBe(largeSlot); + expect(error.message).toContain(largeSlot.toString()); + }); +}); + +describe('SlashingProtectionError', () => { + const existingRoot = '0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef'; + const attemptedRoot = '0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890'; + + it('should create error with correct properties', () => { + const slot = 100n; + const dutyType = DutyType.BLOCK_PROPOSAL; + + const error = new SlashingProtectionError(slot, dutyType, existingRoot, attemptedRoot); + + expect(error).toBeInstanceOf(Error); + expect(error.name).toBe('SlashingProtectionError'); + expect(error.slot).toBe(slot); + expect(error.dutyType).toBe(dutyType); + expect(error.existingMessageHash).toBe(existingRoot); + expect(error.attemptedMessageHash).toBe(attemptedRoot); + }); + + it('should include truncated signing roots in message', () => { + const error = new SlashingProtectionError(100n, DutyType.BLOCK_PROPOSAL, existingRoot, attemptedRoot); + + expect(error.message).toContain('Slashing protection'); + expect(error.message).toContain(DutyType.BLOCK_PROPOSAL.toString()); + expect(error.message).toContain('slot 100'); + expect(error.message).toContain('already signed with different data'); + // Should contain first 10 characters of each root + expect(error.message).toContain(existingRoot.slice(0, 10)); + expect(error.message).toContain(attemptedRoot.slice(0, 10)); + }); + + it('should work with ATTESTATION duty type', () => { + const error = new SlashingProtectionError(200n, DutyType.ATTESTATION, existingRoot, attemptedRoot); + expect(error.message).toContain(DutyType.ATTESTATION.toString()); + expect(error.message).toContain('slot 200'); + }); + + it('should work with ATTESTATIONS_AND_SIGNERS duty type', () => { + const error = new SlashingProtectionError(300n, DutyType.ATTESTATIONS_AND_SIGNERS, existingRoot, attemptedRoot); + expect(error.message).toContain(DutyType.ATTESTATIONS_AND_SIGNERS.toString()); + expect(error.message).toContain('slot 300'); + }); + + it('should preserve full signing roots in properties', () => { + const error = new SlashingProtectionError(100n, DutyType.BLOCK_PROPOSAL, existingRoot, attemptedRoot); + expect(error.existingMessageHash).toBe(existingRoot); + expect(error.attemptedMessageHash).toBe(attemptedRoot); + // Full roots should be in properties, not just truncated in message + expect(error.existingMessageHash.length).toBe(existingRoot.length); + expect(error.attemptedMessageHash.length).toBe(attemptedRoot.length); + }); +}); diff --git a/yarn-project/validator-ha-signer/src/errors.ts b/yarn-project/validator-ha-signer/src/errors.ts new file mode 100644 index 000000000000..9ea1f35e0c2e --- /dev/null +++ b/yarn-project/validator-ha-signer/src/errors.ts @@ -0,0 +1,42 @@ +/** + * Custom errors for the validator HA signer + */ +import type { DutyType } from './db/types.js'; + +/** + * Thrown when a duty has already been signed (by any node). + * This is expected behavior in an HA setup - all nodes try to sign, + * the first one wins, and subsequent attempts get this error. + */ +export class DutyAlreadySignedError extends Error { + constructor( + public readonly slot: bigint, + public readonly dutyType: DutyType, + public readonly signedByNode: string, + ) { + super(`Duty ${dutyType} for slot ${slot} already signed by node ${signedByNode}`); + this.name = 'DutyAlreadySignedError'; + } +} + +/** + * Thrown when attempting to sign data that conflicts with an already-signed duty. + * This means the same validator tried to sign DIFFERENT data for the same slot. + * + * This is expected in HA setups where nodes may build different blocks + * (e.g., different transaction ordering) - the protection prevents double-signing. + */ +export class SlashingProtectionError extends Error { + constructor( + public readonly slot: bigint, + public readonly dutyType: DutyType, + public readonly existingMessageHash: string, + public readonly attemptedMessageHash: string, + ) { + super( + `Slashing protection: ${dutyType} for slot ${slot} was already signed with different data. ` + + `Existing: ${existingMessageHash.slice(0, 10)}..., Attempted: ${attemptedMessageHash.slice(0, 10)}...`, + ); + this.name = 'SlashingProtectionError'; + } +} diff --git a/yarn-project/validator-ha-signer/src/factory.ts b/yarn-project/validator-ha-signer/src/factory.ts new file mode 100644 index 000000000000..1b1810a0d6ae --- /dev/null +++ b/yarn-project/validator-ha-signer/src/factory.ts @@ -0,0 +1,87 @@ +/** + * Factory functions for creating validator HA signers + */ +import { Pool } from 'pg'; + +import type { CreateHASignerConfig } from './config.js'; +import { PostgresSlashingProtectionDatabase } from './db/postgres.js'; +import type { CreateHASignerDeps, SlashingProtectionDatabase } from './types.js'; +import { ValidatorHASigner } from './validator_ha_signer.js'; + +/** + * Create a validator HA signer with PostgreSQL backend + * + * After creating the signer, call `signer.start()` to begin background + * cleanup tasks. Call `signer.stop()` during graceful shutdown. + * + * Example with manual migrations (recommended for production): + * ```bash + * # Run migrations separately + * yarn migrate:up + * ``` + * + * ```typescript + * const { signer, db } = await createHASigner({ + * databaseUrl: process.env.DATABASE_URL, + * enabled: true, + * nodeId: 'validator-node-1', + * pollingIntervalMs: 100, + * signingTimeoutMs: 3000, + * }); + * signer.start(); // Start background cleanup + * + * // ... use signer ... + * + * await signer.stop(); // On shutdown + * ``` + * + * Example with automatic migrations (simpler for dev/testing): + * ```typescript + * const { signer, db } = await createHASigner({ + * databaseUrl: process.env.DATABASE_URL, + * enabled: true, + * nodeId: 'validator-node-1', + * runMigrations: true, // Auto-run migrations on startup + * }); + * signer.start(); + * ``` + * + * @param config - Configuration for the HA signer + * @param deps - Optional dependencies (e.g., for testing) + * @returns An object containing the signer and database instances + */ +export async function createHASigner( + config: CreateHASignerConfig, + deps?: CreateHASignerDeps, +): Promise<{ + signer: ValidatorHASigner; + db: SlashingProtectionDatabase; +}> { + const { databaseUrl, poolMaxCount, poolMinCount, poolIdleTimeoutMs, poolConnectionTimeoutMs, ...signerConfig } = + config; + + // Create connection pool (or use provided pool) + let pool: Pool; + if (!deps?.pool) { + pool = new Pool({ + connectionString: databaseUrl, + max: poolMaxCount ?? 10, + min: poolMinCount ?? 0, + idleTimeoutMillis: poolIdleTimeoutMs ?? 10_000, + connectionTimeoutMillis: poolConnectionTimeoutMs ?? 0, + }); + } else { + pool = deps.pool; + } + + // Create database instance + const db = new PostgresSlashingProtectionDatabase(pool); + + // Verify database schema is initialized and version matches + await db.initialize(); + + // Create signer + const signer = new ValidatorHASigner(db, { ...signerConfig, databaseUrl }); + + return { signer, db }; +} diff --git a/yarn-project/validator-ha-signer/src/migrations.ts b/yarn-project/validator-ha-signer/src/migrations.ts new file mode 100644 index 000000000000..fda3a95efc5e --- /dev/null +++ b/yarn-project/validator-ha-signer/src/migrations.ts @@ -0,0 +1,59 @@ +/** + * Programmatic migration runner + */ +import { createLogger } from '@aztec/foundation/log'; + +import { runner } from 'node-pg-migrate'; +import { dirname, join } from 'path'; +import { fileURLToPath } from 'url'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = dirname(__filename); + +export interface RunMigrationsOptions { + /** Migration direction ('up' to apply, 'down' to rollback). Defaults to 'up'. */ + direction?: 'up' | 'down'; + /** Enable verbose output. Defaults to false. */ + verbose?: boolean; +} + +/** + * Run database migrations programmatically + * + * @param databaseUrl - PostgreSQL connection string + * @param options - Migration options (direction, verbose) + * @returns Array of applied migration names + */ +export async function runMigrations(databaseUrl: string, options: RunMigrationsOptions = {}): Promise { + const direction = options.direction ?? 'up'; + const verbose = options.verbose ?? false; + + const log = createLogger('validator-ha-signer:migrations'); + + try { + log.info(`Running migrations ${direction}...`); + + const appliedMigrations = await runner({ + databaseUrl, + dir: join(__dirname, 'db', 'migrations'), + direction, + migrationsTable: 'pgmigrations', + count: direction === 'down' ? 1 : Infinity, + verbose, + log: msg => (verbose ? log.info(msg) : log.debug(msg)), + }); + + if (appliedMigrations.length === 0) { + log.info('No migrations to apply - schema is up to date'); + } else { + log.info(`Applied ${appliedMigrations.length} migration(s)`, { + migrations: appliedMigrations.map(m => m.name), + }); + } + + return appliedMigrations.map(m => m.name); + } catch (error: any) { + log.error('Migration failed', error); + throw error; + } +} diff --git a/yarn-project/validator-ha-signer/src/slashing_protection_service.test.ts b/yarn-project/validator-ha-signer/src/slashing_protection_service.test.ts new file mode 100644 index 000000000000..eb514dca0258 --- /dev/null +++ b/yarn-project/validator-ha-signer/src/slashing_protection_service.test.ts @@ -0,0 +1,517 @@ +import { Buffer32 } from '@aztec/foundation/buffer'; +import { EthAddress } from '@aztec/foundation/eth-address'; +import { sleep } from '@aztec/foundation/sleep'; + +import { PGlite } from '@electric-sql/pglite'; +import { Pool } from '@middle-management/pglite-pg-adapter'; + +import { PostgresSlashingProtectionDatabase } from './db/postgres.js'; +import { setupTestSchema } from './db/test_helper.js'; +import { DutyAlreadySignedError, SlashingProtectionError } from './errors.js'; +import { SlashingProtectionService } from './slashing_protection_service.js'; +import { type CheckAndRecordParams, DutyStatus, DutyType, type SlashingProtectionConfig } from './types.js'; + +// Test data constants +const VALIDATOR_ADDRESS = EthAddress.random(); +const SLOT = 100n; +const BLOCK_NUMBER = 50n; +const DUTY_TYPE: DutyType = DutyType.BLOCK_PROPOSAL; +const MESSAGE_HASH = Buffer32.random().toString(); +const MESSAGE_HASH_2 = Buffer32.random().toString(); +const NODE_ID = 'node-1'; +const NODE_ID_2 = 'node-2'; +const SIGNATURE = '0xsignature'; + +describe('SlashingProtectionService', () => { + let pglite: PGlite; + let pool: Pool; + let db: PostgresSlashingProtectionDatabase; + let service: SlashingProtectionService; + let config: SlashingProtectionConfig; + + beforeEach(async () => { + pglite = new PGlite(); + pool = new Pool({ pglite }); + + await setupTestSchema(pglite); + db = new PostgresSlashingProtectionDatabase(pool as any); + await db.initialize(); + + config = { + enabled: true, + nodeId: NODE_ID, + pollingIntervalMs: 50, + signingTimeoutMs: 1000, + maxStuckDutiesAgeMs: 60_000, + }; + service = new SlashingProtectionService(db, config); + }); + + afterEach(async () => { + await service.stop(); + await pool.end(); + }); + + describe('checkAndRecord', () => { + it('should acquire lock on first attempt', async () => { + const params: CheckAndRecordParams = { + validatorAddress: VALIDATOR_ADDRESS, + slot: SLOT, + blockNumber: BLOCK_NUMBER, + dutyType: DUTY_TYPE, + messageHash: MESSAGE_HASH, + nodeId: NODE_ID, + }; + + await service.checkAndRecord(params); + + // Verify via tryInsertOrGetExisting - should return the existing record + const result = await db.tryInsertOrGetExisting(params); + expect(result.isNew).toBe(false); // Already exists + expect(result.record.status).toBe(DutyStatus.SIGNING); + expect(result.record.nodeId).toBe(NODE_ID); + expect(result.record.messageHash).toBe(MESSAGE_HASH); + }); + + it('should throw DutyAlreadySignedError when duty already signed with same data', async () => { + const params: CheckAndRecordParams = { + validatorAddress: VALIDATOR_ADDRESS, + slot: SLOT, + blockNumber: BLOCK_NUMBER, + dutyType: DUTY_TYPE, + messageHash: MESSAGE_HASH, + nodeId: NODE_ID, + }; + + // First node signs + const lockToken = await service.checkAndRecord(params); + await service.recordSuccess({ + validatorAddress: VALIDATOR_ADDRESS, + slot: SLOT, + dutyType: DUTY_TYPE, + signature: { toString: () => SIGNATURE } as any, + nodeId: NODE_ID, + lockToken, + }); + + // Second node tries to sign same data + const params2 = { ...params, nodeId: NODE_ID_2 }; + await expect(service.checkAndRecord(params2)).rejects.toThrow(DutyAlreadySignedError); + }); + + it('should throw SlashingProtectionError when duty already signed with different data', async () => { + const params: CheckAndRecordParams = { + validatorAddress: VALIDATOR_ADDRESS, + slot: SLOT, + blockNumber: BLOCK_NUMBER, + dutyType: DUTY_TYPE, + messageHash: MESSAGE_HASH, + nodeId: NODE_ID, + }; + + // First node signs + const lockToken = await service.checkAndRecord(params); + await service.recordSuccess({ + validatorAddress: VALIDATOR_ADDRESS, + slot: SLOT, + dutyType: DUTY_TYPE, + signature: { toString: () => SIGNATURE } as any, + nodeId: NODE_ID, + lockToken, + }); + + // Second node tries to sign different data + const params2 = { ...params, messageHash: MESSAGE_HASH_2, nodeId: NODE_ID_2 }; + await expect(service.checkAndRecord(params2)).rejects.toThrow(SlashingProtectionError); + }); + + it('should allow retry after deleted duty', async () => { + const params: CheckAndRecordParams = { + validatorAddress: VALIDATOR_ADDRESS, + slot: SLOT, + blockNumber: BLOCK_NUMBER, + dutyType: DUTY_TYPE, + messageHash: MESSAGE_HASH, + nodeId: NODE_ID, + }; + + // First node acquires lock then deletes (simulating failure) + const lockToken = await service.checkAndRecord(params); + await service.deleteDuty({ + validatorAddress: VALIDATOR_ADDRESS, + slot: SLOT, + dutyType: DUTY_TYPE, + lockToken, + }); + + // Second node should be able to retry + const params2 = { ...params, nodeId: NODE_ID_2 }; + await service.checkAndRecord(params2); + + const result = await db.tryInsertOrGetExisting(params2); + expect(result.isNew).toBe(false); + expect(result.record.status).toBe(DutyStatus.SIGNING); + expect(result.record.nodeId).toBe(NODE_ID_2); + }); + + it('should wait and throw when another node is signing same data', async () => { + const params: CheckAndRecordParams = { + validatorAddress: VALIDATOR_ADDRESS, + slot: SLOT, + blockNumber: BLOCK_NUMBER, + dutyType: DUTY_TYPE, + messageHash: MESSAGE_HASH, + nodeId: NODE_ID, + }; + + // First node acquires lock + const lockToken = await service.checkAndRecord(params); + + // Second node tries to acquire lock + const params2 = { ...params, nodeId: NODE_ID_2 }; + const promise = service.checkAndRecord(params2); + + // Complete first node's signing after a short delay + await sleep(100); + await service.recordSuccess({ + validatorAddress: VALIDATOR_ADDRESS, + slot: SLOT, + dutyType: DUTY_TYPE, + signature: { toString: () => SIGNATURE } as any, + nodeId: NODE_ID, + lockToken, + }); + + // Second node should get DutyAlreadySignedError + await expect(promise).rejects.toThrow(DutyAlreadySignedError); + }); + + it('should wait and throw when another node is signing different data', async () => { + const params: CheckAndRecordParams = { + validatorAddress: VALIDATOR_ADDRESS, + slot: SLOT, + blockNumber: BLOCK_NUMBER, + dutyType: DUTY_TYPE, + messageHash: MESSAGE_HASH, + nodeId: NODE_ID, + }; + + // First node acquires lock + const lockToken = await service.checkAndRecord(params); + + // Second node tries to acquire lock with different data + const params2 = { ...params, messageHash: MESSAGE_HASH_2, nodeId: NODE_ID_2 }; + const promise = service.checkAndRecord(params2); + + // Complete first node's signing after a short delay + await sleep(100); + await service.recordSuccess({ + validatorAddress: VALIDATOR_ADDRESS, + slot: SLOT, + dutyType: DUTY_TYPE, + signature: { toString: () => SIGNATURE } as any, + nodeId: NODE_ID, + lockToken, + }); + + // Second node should get SlashingProtectionError + await expect(promise).rejects.toThrow(SlashingProtectionError); + }); + + it('should acquire lock after other node deletes duty on failure', async () => { + const params: CheckAndRecordParams = { + validatorAddress: VALIDATOR_ADDRESS, + slot: SLOT, + blockNumber: BLOCK_NUMBER, + dutyType: DUTY_TYPE, + messageHash: MESSAGE_HASH, + nodeId: NODE_ID, + }; + + // First node acquires lock + const lockToken = await service.checkAndRecord(params); + + // First node deletes on failure + await service.deleteDuty({ + validatorAddress: VALIDATOR_ADDRESS, + slot: SLOT, + dutyType: DUTY_TYPE, + lockToken, + }); + + // Second node should be able to acquire the lock (retry) + const params2 = { ...params, nodeId: NODE_ID_2 }; + await service.checkAndRecord(params2); + + // Verify second node acquired the lock + const result = await db.tryInsertOrGetExisting(params2); + expect(result.isNew).toBe(false); + expect(result.record.status).toBe(DutyStatus.SIGNING); + expect(result.record.nodeId).toBe(NODE_ID_2); + }); + + it('should timeout if signing takes too long', async () => { + const shortTimeoutConfig = { ...config, signingTimeoutMs: 200 }; + const serviceWithShortTimeout = new SlashingProtectionService(db, shortTimeoutConfig); + + try { + const params: CheckAndRecordParams = { + validatorAddress: VALIDATOR_ADDRESS, + slot: SLOT, + blockNumber: BLOCK_NUMBER, + dutyType: DUTY_TYPE, + messageHash: MESSAGE_HASH, + nodeId: NODE_ID, + }; + + // First node acquires lock but never completes + await serviceWithShortTimeout.checkAndRecord(params); + + // Second node tries to acquire lock + const params2 = { ...params, nodeId: NODE_ID_2 }; + await expect(serviceWithShortTimeout.checkAndRecord(params2)).rejects.toThrow(DutyAlreadySignedError); + } finally { + await serviceWithShortTimeout.stop(); + } + }); + }); + + describe('recordSuccess', () => { + it('should update duty to signed status', async () => { + const params: CheckAndRecordParams = { + validatorAddress: VALIDATOR_ADDRESS, + slot: SLOT, + blockNumber: BLOCK_NUMBER, + dutyType: DUTY_TYPE, + messageHash: MESSAGE_HASH, + nodeId: NODE_ID, + }; + + const lockToken = await service.checkAndRecord(params); + const success = await service.recordSuccess({ + validatorAddress: VALIDATOR_ADDRESS, + slot: SLOT, + dutyType: DUTY_TYPE, + signature: { toString: () => SIGNATURE } as any, + nodeId: NODE_ID, + lockToken, + }); + + expect(success).toBe(true); + const result = await db.tryInsertOrGetExisting(params); + expect(result.isNew).toBe(false); + expect(result.record.status).toBe(DutyStatus.SIGNED); + expect(result.record.signature).toBe(SIGNATURE); + expect(result.record.completedAt).toBeDefined(); + }); + + it('should fail to update with wrong lockToken', async () => { + const params: CheckAndRecordParams = { + validatorAddress: VALIDATOR_ADDRESS, + slot: SLOT, + blockNumber: BLOCK_NUMBER, + dutyType: DUTY_TYPE, + messageHash: MESSAGE_HASH, + nodeId: NODE_ID, + }; + + await service.checkAndRecord(params); + const success = await service.recordSuccess({ + validatorAddress: VALIDATOR_ADDRESS, + slot: SLOT, + dutyType: DUTY_TYPE, + signature: { toString: () => SIGNATURE } as any, + nodeId: NODE_ID, + lockToken: 'wrong-token', + }); + + expect(success).toBe(false); + // Duty should still be in signing state + const result = await db.tryInsertOrGetExisting(params); + expect(result.record.status).toBe(DutyStatus.SIGNING); + }); + }); + + describe('deleteDuty', () => { + it('should delete duty with correct lockToken', async () => { + const params: CheckAndRecordParams = { + validatorAddress: VALIDATOR_ADDRESS, + slot: SLOT, + blockNumber: BLOCK_NUMBER, + dutyType: DUTY_TYPE, + messageHash: MESSAGE_HASH, + nodeId: NODE_ID, + }; + + const lockToken = await service.checkAndRecord(params); + const success = await service.deleteDuty({ + validatorAddress: VALIDATOR_ADDRESS, + slot: SLOT, + dutyType: DUTY_TYPE, + lockToken, + }); + + expect(success).toBe(true); + // Duty should be gone - new insert should succeed + const result = await db.tryInsertOrGetExisting(params); + expect(result.isNew).toBe(true); + }); + + it('should fail to delete with wrong lockToken', async () => { + const params: CheckAndRecordParams = { + validatorAddress: VALIDATOR_ADDRESS, + slot: SLOT, + blockNumber: BLOCK_NUMBER, + dutyType: DUTY_TYPE, + messageHash: MESSAGE_HASH, + nodeId: NODE_ID, + }; + + await service.checkAndRecord(params); + const success = await service.deleteDuty({ + validatorAddress: VALIDATOR_ADDRESS, + slot: SLOT, + dutyType: DUTY_TYPE, + lockToken: 'wrong-token', + }); + + expect(success).toBe(false); + // Duty should still be in signing state + const result = await db.tryInsertOrGetExisting(params); + expect(result.record.status).toBe(DutyStatus.SIGNING); + }); + }); + + describe('concurrent operations', () => { + it('should handle multiple nodes competing for the same duty', async () => { + const params1: CheckAndRecordParams = { + validatorAddress: VALIDATOR_ADDRESS, + slot: SLOT, + blockNumber: BLOCK_NUMBER, + dutyType: DUTY_TYPE, + messageHash: MESSAGE_HASH, + nodeId: 'node-1', + }; + const params2 = { ...params1, nodeId: 'node-2' }; + const params3 = { ...params1, nodeId: 'node-3' }; + + // All three nodes try to acquire lock simultaneously + const promises = [params1, params2, params3].map(params => + service.checkAndRecord(params).then(lockToken => ({ nodeId: params.nodeId, lockToken })), + ); + + // Whichever resolves first is the actual winner + const winner = await Promise.race(promises); + await service.recordSuccess({ + validatorAddress: VALIDATOR_ADDRESS, + slot: SLOT, + dutyType: DUTY_TYPE, + signature: { toString: () => SIGNATURE } as any, + nodeId: winner.nodeId, + lockToken: winner.lockToken, + }); + + // Wait for all promises to complete + const results = await Promise.allSettled(promises); + + // Exactly one should succeed + const successes = results.filter(r => r.status === 'fulfilled').length; + const failures = results.filter(r => r.status === 'rejected').length; + + expect(successes).toBe(1); + expect(failures).toBe(2); + + // Both failures should be DutyAlreadySignedError + results.forEach(result => { + if (result.status === 'rejected') { + expect(result.reason).toBeInstanceOf(DutyAlreadySignedError); + } + }); + }); + + it('should handle multiple different duties concurrently', async () => { + const promises = []; + + for (let i = 0; i < 5; i++) { + const params: CheckAndRecordParams = { + validatorAddress: VALIDATOR_ADDRESS, + slot: BigInt(100 + i), + blockNumber: BigInt(50 + i), + dutyType: DUTY_TYPE, + messageHash: MESSAGE_HASH, + nodeId: NODE_ID, + }; + promises.push(service.checkAndRecord(params)); + } + + await Promise.all(promises); + + // Verify all duties were created + for (let i = 0; i < 5; i++) { + const result = await db.tryInsertOrGetExisting({ + validatorAddress: VALIDATOR_ADDRESS, + slot: BigInt(100 + i), + blockNumber: BigInt(50 + i), + dutyType: DUTY_TYPE, + messageHash: MESSAGE_HASH, + nodeId: NODE_ID, + }); + expect(result.isNew).toBe(false); + expect(result.record.status).toBe(DutyStatus.SIGNING); + expect(result.record.nodeId).toBe(NODE_ID); + } + }); + }); + + describe('nodeId', () => { + it('should return the configured node ID', () => { + expect(service.nodeId).toBe(NODE_ID); + }); + }); + + describe('lifecycle', () => { + it('should start and stop without error', async () => { + service.start(); + await service.stop(); + }); + + it('should cleanup stuck duties on start', async () => { + // Create a stuck duty by directly inserting (simulating a crash) + const params: CheckAndRecordParams = { + validatorAddress: VALIDATOR_ADDRESS, + slot: SLOT, + blockNumber: BLOCK_NUMBER, + dutyType: DUTY_TYPE, + messageHash: MESSAGE_HASH, + nodeId: NODE_ID, + }; + + // Insert a duty that will be "stuck" + await service.checkAndRecord(params); + + // Verify duty exists and is in signing state + let result = await db.tryInsertOrGetExisting(params); + expect(result.isNew).toBe(false); + expect(result.record.status).toBe(DutyStatus.SIGNING); + + // Create a new service with a very short maxStuckDutiesAgeMs + const shortAgeConfig = { ...config, maxStuckDutiesAgeMs: 1 }; + const newService = new SlashingProtectionService(db, shortAgeConfig); + + // Wait a bit for the duty to become "old" + await sleep(10); + + // Start the new service - this should trigger immediate cleanup + newService.start(); + + // Give cleanup time to run + await sleep(100); + + await newService.stop(); + + // Now the duty should be deleted, so we can insert again + result = await db.tryInsertOrGetExisting(params); + expect(result.isNew).toBe(true); + }); + }); +}); diff --git a/yarn-project/validator-ha-signer/src/slashing_protection_service.ts b/yarn-project/validator-ha-signer/src/slashing_protection_service.ts new file mode 100644 index 000000000000..a60e1cd4dab9 --- /dev/null +++ b/yarn-project/validator-ha-signer/src/slashing_protection_service.ts @@ -0,0 +1,216 @@ +/** + * Slashing Protection Service + * + * Provides distributed locking and slashing protection for validator duties. + * Uses an external database to coordinate across multiple validator nodes. + */ +import { type Logger, createLogger } from '@aztec/foundation/log'; +import { RunningPromise } from '@aztec/foundation/promise'; +import { sleep } from '@aztec/foundation/sleep'; + +import { type CheckAndRecordParams, type DeleteDutyParams, DutyStatus, type RecordSuccessParams } from './db/types.js'; +import { DutyAlreadySignedError, SlashingProtectionError } from './errors.js'; +import type { SlashingProtectionConfig, SlashingProtectionDatabase } from './types.js'; + +/** + * Slashing Protection Service + * + * This service ensures that a validator only signs one block/attestation per slot, + * even when running multiple redundant nodes (HA setup). + * + * All nodes in the HA setup try to sign - the first one wins, others get + * DutyAlreadySignedError (normal) or SlashingProtectionError (if different data). + * + * Flow: + * 1. checkAndRecord() - Atomically try to acquire lock via tryInsertOrGetExisting + * 2. Caller performs the signing operation + * 3. recordSuccess() - Update to 'signed' status with signature + * OR deleteDuty() - Delete the record to allow retry + */ +export class SlashingProtectionService { + private readonly log: Logger; + private readonly pollingIntervalMs: number; + private readonly signingTimeoutMs: number; + + private cleanupRunningPromise: RunningPromise; + + constructor( + private readonly db: SlashingProtectionDatabase, + private readonly config: SlashingProtectionConfig, + ) { + this.log = createLogger('slashing-protection'); + this.pollingIntervalMs = config.pollingIntervalMs; + this.signingTimeoutMs = config.signingTimeoutMs; + + this.cleanupRunningPromise = new RunningPromise( + this.cleanupStuckDuties.bind(this), + this.log, + this.config.maxStuckDutiesAgeMs, + ); + } + + /** + * Check if a duty can be performed and acquire the lock if so. + * + * This method uses an atomic insert-or-get operation. + * It will: + * 1. Try to insert a new record with 'signing' status + * 2. If insert succeeds, we acquired the lock - return the lockToken + * 3. If a record exists, handle based on status: + * - SIGNED: Throw appropriate error (already signed or slashing protection) + * - FAILED: Delete the failed record + * - SIGNING: Wait and poll until status changes, then handle result + * + * @returns The lockToken that must be used for recordSuccess/deleteDuty + * @throws DutyAlreadySignedError if the duty was already completed + * @throws SlashingProtectionError if attempting to sign different data for same slot/duty + */ + async checkAndRecord(params: CheckAndRecordParams): Promise { + const { validatorAddress, slot, dutyType, messageHash, nodeId } = params; + const startTime = Date.now(); + + this.log.debug(`Checking duty: ${dutyType} for slot ${slot}`, { + validatorAddress: validatorAddress.toString(), + nodeId, + }); + + while (true) { + // insert if not present, get existing if present + const { isNew, record } = await this.db.tryInsertOrGetExisting(params); + + if (isNew) { + // We successfully acquired the lock + this.log.info(`Acquired lock for duty ${dutyType} at slot ${slot}`, { + validatorAddress: validatorAddress.toString(), + nodeId, + }); + return record.lockToken; + } + + // Record already exists - handle based on status + if (record.status === DutyStatus.SIGNED) { + // Duty was already signed - check if same or different data + if (record.messageHash !== messageHash) { + this.log.verbose(`Slashing protection triggered for duty ${dutyType} at slot ${slot}`, { + validatorAddress: validatorAddress.toString(), + existingMessageHash: record.messageHash, + attemptedMessageHash: messageHash, + existingNodeId: record.nodeId, + attemptingNodeId: nodeId, + }); + throw new SlashingProtectionError(slot, dutyType, record.messageHash, messageHash); + } + throw new DutyAlreadySignedError(slot, dutyType, record.nodeId); + } else if (record.status === DutyStatus.SIGNING) { + // Another node is currently signing - check for timeout + if (Date.now() - startTime > this.signingTimeoutMs) { + this.log.warn(`Timeout waiting for signing to complete for duty ${dutyType} at slot ${slot}`, { + validatorAddress: validatorAddress.toString(), + timeoutMs: this.signingTimeoutMs, + signingNodeId: record.nodeId, + }); + throw new DutyAlreadySignedError(slot, dutyType, 'unknown (timeout)'); + } + + // Wait and poll + this.log.debug(`Waiting for signing to complete for duty ${dutyType} at slot ${slot}`, { + validatorAddress: validatorAddress.toString(), + signingNodeId: record.nodeId, + }); + await sleep(this.pollingIntervalMs); + // Loop continues - next iteration will check status again + } else { + throw new Error(`Unknown duty status: ${record.status}`); + } + } + } + + /** + * Record a successful signing operation. + * Updates the duty status to 'signed' and stores the signature. + * Only succeeds if the lockToken matches (caller must be the one who created the duty). + * + * @returns true if the update succeeded, false if token didn't match + */ + async recordSuccess(params: RecordSuccessParams): Promise { + const { validatorAddress, slot, dutyType, signature, nodeId, lockToken } = params; + + const success = await this.db.updateDutySigned(validatorAddress, slot, dutyType, signature.toString(), lockToken); + + if (success) { + this.log.info(`Recorded successful signing for duty ${dutyType} at slot ${slot}`, { + validatorAddress: validatorAddress.toString(), + nodeId, + }); + } else { + this.log.warn(`Failed to record successful signing for duty ${dutyType} at slot ${slot}: invalid token`, { + validatorAddress: validatorAddress.toString(), + nodeId, + }); + } + + return success; + } + + /** + * Delete a duty record after a failed signing operation. + * Removes the record to allow another node/attempt to retry. + * Only succeeds if the lockToken matches (caller must be the one who created the duty). + * + * @returns true if the delete succeeded, false if token didn't match + */ + async deleteDuty(params: DeleteDutyParams): Promise { + const { validatorAddress, slot, dutyType, lockToken } = params; + + const success = await this.db.deleteDuty(validatorAddress, slot, dutyType, lockToken); + + if (success) { + this.log.info(`Deleted duty ${dutyType} at slot ${slot} to allow retry`, { + validatorAddress: validatorAddress.toString(), + }); + } else { + this.log.warn(`Failed to delete duty ${dutyType} at slot ${slot}: invalid token`, { + validatorAddress: validatorAddress.toString(), + }); + } + + return success; + } + + /** + * Get the node ID for this service + */ + get nodeId(): string { + return this.config.nodeId; + } + + /** + * Start running tasks. + * Cleanup runs immediately on start to recover from any previous crashes. + */ + start() { + this.cleanupRunningPromise.start(); + this.log.info('Slashing protection service started', { nodeId: this.config.nodeId }); + } + + /** + * Stop the background cleanup task. + */ + async stop() { + await this.cleanupRunningPromise.stop(); + this.log.info('Slashing protection service stopped', { nodeId: this.config.nodeId }); + } + + /** + * Cleanup own stuck duties + */ + private async cleanupStuckDuties() { + const numDuties = await this.db.cleanupOwnStuckDuties(this.config.nodeId, this.config.maxStuckDutiesAgeMs); + if (numDuties > 0) { + this.log.info(`Cleaned up ${numDuties} stuck duties`, { + nodeId: this.config.nodeId, + maxStuckDutiesAgeMs: this.config.maxStuckDutiesAgeMs, + }); + } + } +} diff --git a/yarn-project/validator-ha-signer/src/types.ts b/yarn-project/validator-ha-signer/src/types.ts new file mode 100644 index 000000000000..5d2e4c4a0262 --- /dev/null +++ b/yarn-project/validator-ha-signer/src/types.ts @@ -0,0 +1,105 @@ +import type { EthAddress } from '@aztec/foundation/eth-address'; + +import type { Pool } from 'pg'; + +import type { CreateHASignerConfig, SlashingProtectionConfig } from './config.js'; +import type { + CheckAndRecordParams, + DeleteDutyParams, + DutyIdentifier, + DutyType, + RecordSuccessParams, + ValidatorDutyRecord, +} from './db/types.js'; + +export type { + CheckAndRecordParams, + CreateHASignerConfig, + DeleteDutyParams, + DutyIdentifier, + RecordSuccessParams, + SlashingProtectionConfig, + ValidatorDutyRecord, +}; +export { DutyStatus, DutyType } from './db/types.js'; + +/** + * Result of tryInsertOrGetExisting operation + */ +export interface TryInsertOrGetResult { + /** True if we inserted a new record, false if we got an existing record */ + isNew: boolean; + /** The record (either newly inserted or existing) */ + record: ValidatorDutyRecord; +} + +/** + * deps for creating an HA signer + */ +export interface CreateHASignerDeps { + /** + * Optional PostgreSQL connection pool + * If provided, databaseUrl and poolConfig are ignored + */ + pool?: Pool; +} + +/** + * Context required for slashing protection during signing operations + */ +export interface SigningContext { + /** Slot number for this duty */ + slot: bigint; + /** Block number for this duty */ + blockNumber: bigint; + /** Type of duty being performed */ + dutyType: DutyType; +} + +/** + * Database interface for slashing protection operations + * This abstraction allows for different database implementations (PostgreSQL, SQLite, etc.) + * + * The interface is designed around 3 core operations: + * 1. tryInsertOrGetExisting - Atomically insert or get existing record (eliminates race conditions) + * 2. updateDutySigned - Update to signed status on success + * 3. deleteDuty - Delete a duty record on failure + */ +export interface SlashingProtectionDatabase { + /** + * Atomically try to insert a new duty record, or get the existing one if present. + * + * @returns { isNew: true, record } if we successfully inserted and acquired the lock + * @returns { isNew: false, record } if a record already exists (caller should handle based on status) + */ + tryInsertOrGetExisting(params: CheckAndRecordParams): Promise; + + /** + * Update a duty to 'signed' status with the signature. + * Only succeeds if the lockToken matches (caller must be the one who created the duty). + * + * @returns true if the update succeeded, false if token didn't match or duty not found + */ + updateDutySigned( + validatorAddress: EthAddress, + slot: bigint, + dutyType: DutyType, + signature: string, + lockToken: string, + ): Promise; + + /** + * Delete a duty record. + * Only succeeds if the lockToken matches (caller must be the one who created the duty). + * Used when signing fails to allow another node/attempt to retry. + * + * @returns true if the delete succeeded, false if token didn't match or duty not found + */ + deleteDuty(validatorAddress: EthAddress, slot: bigint, dutyType: DutyType, lockToken: string): Promise; + + /** + * Cleanup own stuck duties + * @returns the number of duties cleaned up + */ + cleanupOwnStuckDuties(nodeId: string, maxAgeMs: number): Promise; +} diff --git a/yarn-project/validator-ha-signer/src/validator_ha_signer.test.ts b/yarn-project/validator-ha-signer/src/validator_ha_signer.test.ts new file mode 100644 index 000000000000..a2bf8797caf4 --- /dev/null +++ b/yarn-project/validator-ha-signer/src/validator_ha_signer.test.ts @@ -0,0 +1,450 @@ +import { Buffer32 } from '@aztec/foundation/buffer'; +import { EthAddress } from '@aztec/foundation/eth-address'; +import type { Signature } from '@aztec/foundation/eth-signature'; +import { sleep } from '@aztec/foundation/sleep'; + +import { PGlite } from '@electric-sql/pglite'; +import { afterEach, beforeEach, describe, expect, it, jest } from '@jest/globals'; +import { Pool } from '@middle-management/pglite-pg-adapter'; + +import { type CreateHASignerConfig, defaultSlashingProtectionConfig } from './config.js'; +import { PostgresSlashingProtectionDatabase } from './db/postgres.js'; +import { setupTestSchema } from './db/test_helper.js'; +import { DutyStatus, DutyType } from './db/types.js'; +import { DutyAlreadySignedError, SlashingProtectionError } from './errors.js'; +import { ValidatorHASigner } from './validator_ha_signer.js'; + +// Test data constants +const VALIDATOR_ADDRESS = EthAddress.random(); +const MESSAGE_HASH = Buffer32.random(); +const MESSAGE_HASH_2 = Buffer32.random(); +const NODE_ID = 'test-node-1'; +const SIGNATURE_STRING = '0xsignature123'; + +// Mock signature +const mockSignature = { + toString: () => SIGNATURE_STRING, +} as unknown as Signature; + +describe('ValidatorHASigner', () => { + let pglite: PGlite; + let pool: Pool; + let db: PostgresSlashingProtectionDatabase; + let config: CreateHASignerConfig; + + beforeEach(async () => { + pglite = new PGlite(); + pool = new Pool({ pglite }); + + await setupTestSchema(pglite); + db = new PostgresSlashingProtectionDatabase(pool as any); + await db.initialize(); + + config = { + enabled: true, + nodeId: NODE_ID, + pollingIntervalMs: 50, + signingTimeoutMs: 1000, + maxStuckDutiesAgeMs: 60_000, + databaseUrl: 'postgresql://user:pass@localhost:5432/testdb', + }; + }); + + afterEach(async () => { + await pool.end(); + }); + + describe('initialization', () => { + it('should initialize with slashing protection enabled', () => { + const signer = new ValidatorHASigner(db, config); + expect(signer.isEnabled).toBe(true); + expect(signer.nodeId).toBe(NODE_ID); + }); + + it('should not initialize when nodeId is not explicitly set', () => { + const defaultConfig = { ...defaultSlashingProtectionConfig }; + expect( + () => + new ValidatorHASigner(db, { ...defaultConfig, databaseUrl: 'postgresql://user:pass@localhost:5432/testdb' }), + ).toThrow('NODE_ID is required for high-availability setups'); + }); + + it('should not initialize when enabled is false', () => { + const disabledConfig = { ...config, enabled: false }; + expect(() => new ValidatorHASigner(db, disabledConfig)).toThrow('Validator HA Signer is not enabled in config'); + }); + }); + + describe('lifecycle', () => { + it('should start and stop without error when enabled', async () => { + const signer = new ValidatorHASigner(db, config); + signer.start(); + await signer.stop(); + }); + }); + + describe('signWithProtection - enabled', () => { + let signer: ValidatorHASigner; + let signFn: jest.Mock<(messageHash: Buffer32) => Promise>; + + beforeEach(() => { + signer = new ValidatorHASigner(db, config); + signer.start(); + signFn = jest.fn<(messageHash: Buffer32) => Promise>(); + signFn.mockResolvedValue(mockSignature); + }); + + afterEach(async () => { + await signer.stop(); + }); + + it('should sign successfully on first attempt', async () => { + const result = await signer.signWithProtection( + VALIDATOR_ADDRESS, + MESSAGE_HASH, + { + slot: 100n, + blockNumber: 50n, + dutyType: DutyType.BLOCK_PROPOSAL, + }, + signFn, + ); + + expect(result).toBe(mockSignature); + expect(signFn).toHaveBeenCalledWith(MESSAGE_HASH); + expect(signFn).toHaveBeenCalledTimes(1); + + // Verify duty was recorded + const dutyResult = await db.tryInsertOrGetExisting({ + validatorAddress: VALIDATOR_ADDRESS, + slot: 100n, + blockNumber: 50n, + dutyType: DutyType.BLOCK_PROPOSAL, + messageHash: MESSAGE_HASH.toString(), + nodeId: NODE_ID, + }); + expect(dutyResult.isNew).toBe(false); + expect(dutyResult.record.status).toBe(DutyStatus.SIGNED); + expect(dutyResult.record.signature).toBe(SIGNATURE_STRING); + }); + + it('should delete duty when signing function throws', async () => { + const error = new Error('Signing failed'); + signFn.mockRejectedValue(error); + + await expect( + signer.signWithProtection( + VALIDATOR_ADDRESS, + MESSAGE_HASH, + { + slot: 100n, + blockNumber: 50n, + dutyType: DutyType.BLOCK_PROPOSAL, + }, + signFn, + ), + ).rejects.toThrow('Signing failed'); + + // Verify duty was deleted + const dutyResult = await db.tryInsertOrGetExisting({ + validatorAddress: VALIDATOR_ADDRESS, + slot: 100n, + blockNumber: 50n, + dutyType: DutyType.BLOCK_PROPOSAL, + messageHash: MESSAGE_HASH.toString(), + nodeId: NODE_ID, + }); + expect(dutyResult.isNew).toBe(true); + }); + + it('should throw DutyAlreadySignedError when duty already signed', async () => { + // First signing + await signer.signWithProtection( + VALIDATOR_ADDRESS, + MESSAGE_HASH, + { + slot: 100n, + blockNumber: 50n, + dutyType: DutyType.BLOCK_PROPOSAL, + }, + signFn, + ); + + // Second attempt with same data + await expect( + signer.signWithProtection( + VALIDATOR_ADDRESS, + MESSAGE_HASH, + { + slot: 100n, + blockNumber: 50n, + dutyType: DutyType.BLOCK_PROPOSAL, + }, + signFn, + ), + ).rejects.toThrow(DutyAlreadySignedError); + + // Sign function should only be called once + expect(signFn).toHaveBeenCalledTimes(1); + }); + + it('should throw SlashingProtectionError when signing different data for same slot', async () => { + // First signing + await signer.signWithProtection( + VALIDATOR_ADDRESS, + MESSAGE_HASH, + { + slot: 100n, + blockNumber: 50n, + dutyType: DutyType.BLOCK_PROPOSAL, + }, + signFn, + ); + + // Second attempt with different data + await expect( + signer.signWithProtection( + VALIDATOR_ADDRESS, + MESSAGE_HASH_2, + { + slot: 100n, + blockNumber: 50n, + dutyType: DutyType.BLOCK_PROPOSAL, + }, + signFn, + ), + ).rejects.toThrow(SlashingProtectionError); + + // Sign function should only be called once + expect(signFn).toHaveBeenCalledTimes(1); + }); + + it('should allow signing different duty types for same slot', async () => { + const messageHash = Buffer32.random(); + // Sign block proposal + await signer.signWithProtection( + VALIDATOR_ADDRESS, + MESSAGE_HASH, + { + slot: 100n, + blockNumber: 50n, + dutyType: DutyType.BLOCK_PROPOSAL, + }, + signFn, + ); + + // Sign attestation for same slot + await signer.signWithProtection( + VALIDATOR_ADDRESS, + messageHash, + { + slot: 100n, + blockNumber: 50n, + dutyType: DutyType.ATTESTATION, + }, + signFn, + ); + + expect(signFn).toHaveBeenCalledTimes(2); + + // Verify both duties exist + const blockDutyResult = await db.tryInsertOrGetExisting({ + validatorAddress: VALIDATOR_ADDRESS, + slot: 100n, + blockNumber: 50n, + dutyType: DutyType.BLOCK_PROPOSAL, + messageHash: MESSAGE_HASH.toString(), + nodeId: NODE_ID, + }); + const attestationDutyResult = await db.tryInsertOrGetExisting({ + validatorAddress: VALIDATOR_ADDRESS, + slot: 100n, + blockNumber: 50n, + dutyType: DutyType.ATTESTATION, + messageHash: messageHash.toString(), + nodeId: NODE_ID, + }); + expect(blockDutyResult.isNew).toBe(false); + expect(attestationDutyResult.isNew).toBe(false); + expect(attestationDutyResult.record.messageHash).toBe(messageHash.toString()); + }); + + it('should allow signing different slots', async () => { + // Sign slot 100 + await signer.signWithProtection( + VALIDATOR_ADDRESS, + MESSAGE_HASH, + { + slot: 100n, + blockNumber: 50n, + dutyType: DutyType.BLOCK_PROPOSAL, + }, + signFn, + ); + + // Sign slot 101 + await signer.signWithProtection( + VALIDATOR_ADDRESS, + MESSAGE_HASH, + { + slot: 101n, + blockNumber: 51n, + dutyType: DutyType.BLOCK_PROPOSAL, + }, + signFn, + ); + + expect(signFn).toHaveBeenCalledTimes(2); + }); + + it('should handle all duty types', async () => { + const dutyTypes: DutyType[] = [DutyType.BLOCK_PROPOSAL, DutyType.ATTESTATION, DutyType.ATTESTATIONS_AND_SIGNERS]; + for (const dutyType of dutyTypes) { + await signer.signWithProtection( + VALIDATOR_ADDRESS, + MESSAGE_HASH, + { slot: 100n, blockNumber: 50n, dutyType }, + signFn, + ); + } + expect(signFn).toHaveBeenCalledTimes(dutyTypes.length); + }); + + it('should handle multiple validator addresses', async () => { + const validator1 = EthAddress.fromString('0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266'); + const validator2 = EthAddress.fromString('0x70997970c51812dc3a010c7d01b50e0d17dc79c8'); + + // Same slot but different validators should both succeed + await signer.signWithProtection( + validator1, + MESSAGE_HASH, + { + slot: 100n, + blockNumber: 50n, + dutyType: DutyType.BLOCK_PROPOSAL, + }, + signFn, + ); + + await signer.signWithProtection( + validator2, + MESSAGE_HASH, + { + slot: 100n, + blockNumber: 50n, + dutyType: DutyType.BLOCK_PROPOSAL, + }, + signFn, + ); + + expect(signFn).toHaveBeenCalledTimes(2); + }); + + it('should handle concurrent signing attempts - first succeeds', async () => { + const localSignFn = jest.fn<(messageHash: Buffer32) => Promise>(); + + // First call sleeps for 200ms then succeeds + localSignFn.mockImplementationOnce(async () => { + await sleep(200); + return mockSignature; + }); + + // Start first signing (don't await) + const firstSign = signer.signWithProtection( + VALIDATOR_ADDRESS, + MESSAGE_HASH, + { + slot: 100n, + blockNumber: 50n, + dutyType: DutyType.BLOCK_PROPOSAL, + }, + localSignFn, + ); + + // Wait a bit to ensure first signing has started + await sleep(50); + + // Start second signing while first is in progress + const secondSign = signer.signWithProtection( + VALIDATOR_ADDRESS, + MESSAGE_HASH, + { + slot: 100n, + blockNumber: 50n, + dutyType: DutyType.BLOCK_PROPOSAL, + }, + localSignFn, + ); + + // First should succeed + await expect(firstSign).resolves.toBe(mockSignature); + + // Second should throw DutyAlreadySignedError after waiting + await expect(secondSign).rejects.toThrow(DutyAlreadySignedError); + + // Only first signer should have called the signing function + expect(localSignFn).toHaveBeenCalledTimes(1); + }); + + it('should handle concurrent signing attempts - first fails, second succeeds', async () => { + const localSignFn = jest.fn<(messageHash: Buffer32) => Promise>(); + + // First call sleeps for 200ms then fails + localSignFn.mockImplementationOnce(async () => { + await sleep(200); + throw new Error('Signing failed'); + }); + + // Second call succeeds + localSignFn.mockImplementationOnce(() => Promise.resolve(mockSignature)); + + // Start first signing (don't await) + const firstSign = signer.signWithProtection( + VALIDATOR_ADDRESS, + MESSAGE_HASH, + { + slot: 100n, + blockNumber: 50n, + dutyType: DutyType.BLOCK_PROPOSAL, + }, + localSignFn, + ); + + // Wait a bit to ensure first signing has started + await sleep(50); + + // Start second signing while first is in progress + const secondSign = signer.signWithProtection( + VALIDATOR_ADDRESS, + MESSAGE_HASH, + { + slot: 100n, + blockNumber: 50n, + dutyType: DutyType.BLOCK_PROPOSAL, + }, + localSignFn, + ); + + // First should fail + await expect(firstSign).rejects.toThrow('Signing failed'); + + // Second should succeed after waiting for first to fail + await expect(secondSign).resolves.toBe(mockSignature); + + // Both signers should have called the signing function + expect(localSignFn).toHaveBeenCalledTimes(2); + + // Verify the duty is marked as signed by the second signer + const dutyResult = await db.tryInsertOrGetExisting({ + validatorAddress: VALIDATOR_ADDRESS, + slot: 100n, + blockNumber: 50n, + dutyType: DutyType.BLOCK_PROPOSAL, + messageHash: MESSAGE_HASH.toString(), + nodeId: NODE_ID, + }); + expect(dutyResult.isNew).toBe(false); + expect(dutyResult.record.status).toBe(DutyStatus.SIGNED); + }); + }); +}); diff --git a/yarn-project/validator-ha-signer/src/validator_ha_signer.ts b/yarn-project/validator-ha-signer/src/validator_ha_signer.ts new file mode 100644 index 000000000000..c3633d8d0e20 --- /dev/null +++ b/yarn-project/validator-ha-signer/src/validator_ha_signer.ts @@ -0,0 +1,164 @@ +/** + * Validator High Availability Signer + * + * Wraps signing operations with distributed locking and slashing protection. + * This ensures that even with multiple validator nodes running, only one + * node will sign for a given duty (slot + duty type). + */ +import type { Buffer32 } from '@aztec/foundation/buffer'; +import type { EthAddress } from '@aztec/foundation/eth-address'; +import type { Signature } from '@aztec/foundation/eth-signature'; +import { type Logger, createLogger } from '@aztec/foundation/log'; + +import type { CreateHASignerConfig } from './config.js'; +import { SlashingProtectionService } from './slashing_protection_service.js'; +import type { SigningContext, SlashingProtectionDatabase } from './types.js'; + +/** + * Validator High Availability Signer + * + * Provides signing capabilities with distributed locking for validators + * in a high-availability setup. + * + * Usage: + * ``` + * const signer = new ValidatorHASigner(db, config); + * + * // Sign with slashing protection + * const signature = await signer.signWithProtection( + * validatorAddress, + * messageHash, + * { slot: 100n, blockNumber: 50n, dutyType: 'BLOCK_PROPOSAL' }, + * async (root) => localSigner.signMessage(root), + * ); + * ``` + */ +export class ValidatorHASigner { + private readonly log: Logger; + private readonly slashingProtection: SlashingProtectionService | undefined; + + constructor( + db: SlashingProtectionDatabase, + private readonly config: CreateHASignerConfig, + ) { + this.log = createLogger('validator-ha-signer'); + + if (!config.enabled) { + // this shouldn't happen, the validator should use different signer for non-HA setups + throw new Error('Validator HA Signer is not enabled in config'); + } + + if (!config.nodeId || config.nodeId === '') { + throw new Error('NODE_ID is required for high-availability setups'); + } + this.slashingProtection = new SlashingProtectionService(db, config); + this.log.info('Validator HA Signer initialized with slashing protection', { + nodeId: config.nodeId, + }); + } + + /** + * Sign a message with slashing protection. + * + * This method: + * 1. Acquires a distributed lock for (validator, slot, dutyType) + * 2. Calls the provided signing function + * 3. Records the result (success or failure) + * + * @param validatorAddress - The validator's Ethereum address + * @param messageHash - The hash to be signed + * @param context - The signing context (slot, block number, duty type) + * @param signFn - Function that performs the actual signing + * @returns The signature + * + * @throws DutyAlreadySignedError if the duty was already signed (expected in HA) + * @throws SlashingProtectionError if attempting to sign different data for same slot (expected in HA) + */ + async signWithProtection( + validatorAddress: EthAddress, + messageHash: Buffer32, + context: SigningContext, + signFn: (messageHash: Buffer32) => Promise, + ): Promise { + // If slashing protection is disabled, just sign directly + if (!this.slashingProtection) { + this.log.info('Signing without slashing protection enabled', { + validatorAddress: validatorAddress.toString(), + nodeId: this.config.nodeId, + dutyType: context.dutyType, + slot: context.slot, + blockNumber: context.blockNumber, + }); + return await signFn(messageHash); + } + + const { slot, blockNumber, dutyType } = context; + + // Acquire lock and get the token for ownership verification + const lockToken = await this.slashingProtection.checkAndRecord({ + validatorAddress, + slot, + blockNumber, + dutyType, + messageHash: messageHash.toString(), + nodeId: this.config.nodeId, + }); + + // Perform signing + let signature: Signature; + try { + signature = await signFn(messageHash); + } catch (error: any) { + // Delete duty to allow retry (only succeeds if we own the lock) + await this.slashingProtection.deleteDuty({ + validatorAddress, + slot, + dutyType, + lockToken, + }); + throw error; + } + + // Record success (only succeeds if we own the lock) + await this.slashingProtection.recordSuccess({ + validatorAddress, + slot, + dutyType, + signature, + nodeId: this.config.nodeId, + lockToken, + }); + + return signature; + } + + /** + * Check if slashing protection is enabled + */ + get isEnabled(): boolean { + return this.slashingProtection !== undefined; + } + + /** + * Get the node ID for this signer + */ + get nodeId(): string { + return this.config.nodeId; + } + + /** + * Start the HA signer background tasks (cleanup of stuck duties). + * Should be called after construction and before signing operations. + */ + start() { + this.slashingProtection?.start(); + } + + /** + * Stop the HA signer background tasks. + * Should be called during graceful shutdown. + */ + async stop() { + await this.slashingProtection?.stop(); + } +} diff --git a/yarn-project/validator-ha-signer/tsconfig.json b/yarn-project/validator-ha-signer/tsconfig.json new file mode 100644 index 000000000000..eb63cc029db2 --- /dev/null +++ b/yarn-project/validator-ha-signer/tsconfig.json @@ -0,0 +1,17 @@ +{ + "extends": "..", + "compilerOptions": { + "outDir": "dest", + "rootDir": "src", + "tsBuildInfoFile": ".tsbuildinfo" + }, + "references": [ + { + "path": "../foundation" + }, + { + "path": "../node-keystore" + } + ], + "include": ["src"] +} diff --git a/yarn-project/yarn.lock b/yarn-project/yarn.lock index f3b122d0763e..5e0b0fab478b 100644 --- a/yarn-project/yarn.lock +++ b/yarn-project/yarn.lock @@ -931,6 +931,7 @@ __metadata: "@aztec/telemetry-client": "workspace:^" "@aztec/test-wallet": "workspace:^" "@aztec/txe": "workspace:^" + "@aztec/validator-ha-signer": "workspace:^" "@aztec/world-state": "workspace:^" "@jest/globals": "npm:^30.0.0" "@types/chalk": "npm:^2.2.0" @@ -2287,6 +2288,30 @@ __metadata: languageName: unknown linkType: soft +"@aztec/validator-ha-signer@workspace:^, @aztec/validator-ha-signer@workspace:validator-ha-signer": + version: 0.0.0-use.local + resolution: "@aztec/validator-ha-signer@workspace:validator-ha-signer" + dependencies: + "@aztec/foundation": "workspace:^" + "@aztec/node-keystore": "workspace:^" + "@electric-sql/pglite": "npm:^0.2.17" + "@jest/globals": "npm:^30.0.0" + "@middle-management/pglite-pg-adapter": "npm:^0.0.3" + "@types/jest": "npm:^30.0.0" + "@types/node": "npm:^22.15.17" + "@types/node-pg-migrate": "npm:^2.3.1" + "@types/pg": "npm:^8.10.9" + "@typescript/native-preview": "npm:7.0.0-dev.20251126.1" + jest: "npm:^30.0.0" + jest-mock-extended: "npm:^4.0.0" + node-pg-migrate: "npm:^8.0.4" + pg: "npm:^8.11.3" + ts-node: "npm:^10.9.1" + tslib: "npm:^2.4.0" + typescript: "npm:^5.3.3" + languageName: unknown + linkType: soft + "@aztec/wallet-sdk@workspace:^, @aztec/wallet-sdk@workspace:wallet-sdk": version: 0.0.0-use.local resolution: "@aztec/wallet-sdk@workspace:wallet-sdk" @@ -2893,6 +2918,13 @@ __metadata: languageName: node linkType: hard +"@electric-sql/pglite@npm:^0.2.17": + version: 0.2.17 + resolution: "@electric-sql/pglite@npm:0.2.17" + checksum: 10/cb04d18614c9a3d985fa63f5ca889104eab55b15a9aa569df264e335ea559f2279ee64c1284aa68d5b92ff516dd37ae553299cf55c226a3e541d6c46416c7160 + languageName: node + linkType: hard + "@emnapi/core@npm:^1.4.3": version: 1.4.3 resolution: "@emnapi/core@npm:1.4.3" @@ -4868,6 +4900,15 @@ __metadata: languageName: node linkType: hard +"@middle-management/pglite-pg-adapter@npm:^0.0.3": + version: 0.0.3 + resolution: "@middle-management/pglite-pg-adapter@npm:0.0.3" + peerDependencies: + "@electric-sql/pglite": ^0.3.0 + checksum: 10/5526fdbc3cad9d30c868a576c67d7763877338f99df10b3cf478416d4b83c6e65b8f5e2984bebb9fe4e87b998ec011efff900374797b677868a25eb9392709da + languageName: node + linkType: hard + "@modelcontextprotocol/sdk@npm:^1.8.0": version: 1.11.2 resolution: "@modelcontextprotocol/sdk@npm:1.11.2" @@ -7869,6 +7910,13 @@ __metadata: languageName: node linkType: hard +"@types/node-pg-migrate@npm:^2.3.1": + version: 2.3.1 + resolution: "@types/node-pg-migrate@npm:2.3.1" + checksum: 10/015cd2f9c7925864a363b7d043b14bd12cccd5ee58509cf613ebbdc0e56de8adda34ab9cd90e76eabec314e378c1061d7be3a2c9f1cbf36cb78d33d95c96d81a + languageName: node + linkType: hard + "@types/node@npm:*, @types/node@npm:>=13.7.0, @types/node@npm:^22.1.0, @types/node@npm:^22.15.17": version: 22.15.17 resolution: "@types/node@npm:22.15.17" @@ -7899,6 +7947,17 @@ __metadata: languageName: node linkType: hard +"@types/pg@npm:^8.10.9": + version: 8.16.0 + resolution: "@types/pg@npm:8.16.0" + dependencies: + "@types/node": "npm:*" + pg-protocol: "npm:*" + pg-types: "npm:^2.2.0" + checksum: 10/c03346fbe87728a237f30a3d0a436b86ede88e1dc471782bf679a4d74d67ee2a96f953e7c04d73841d21b9db43a5bf2ccdf2cd4c75450ea57efd947049809b3a + languageName: node + linkType: hard + "@types/qs@npm:*": version: 6.9.15 resolution: "@types/qs@npm:6.9.15" @@ -13669,7 +13728,7 @@ __metadata: languageName: node linkType: hard -"foreground-child@npm:^3.1.0": +"foreground-child@npm:^3.1.0, foreground-child@npm:^3.3.1": version: 3.3.1 resolution: "foreground-child@npm:3.3.1" dependencies: @@ -14106,6 +14165,22 @@ __metadata: languageName: node linkType: hard +"glob@npm:~11.1.0": + version: 11.1.0 + resolution: "glob@npm:11.1.0" + dependencies: + foreground-child: "npm:^3.3.1" + jackspeak: "npm:^4.1.1" + minimatch: "npm:^10.1.1" + minipass: "npm:^7.1.2" + package-json-from-dist: "npm:^1.0.0" + path-scurry: "npm:^2.0.0" + bin: + glob: dist/esm/bin.mjs + checksum: 10/da4501819633daff8822c007bb3f93d5c4d2cbc7b15a8e886660f4497dd251a1fb4f53a85fba1e760b31704eff7164aeb2c7a82db10f9f2c362d12c02fe52cf3 + languageName: node + linkType: hard + "globals@npm:^11.1.0": version: 11.12.0 resolution: "globals@npm:11.12.0" @@ -15772,6 +15847,15 @@ __metadata: languageName: node linkType: hard +"jackspeak@npm:^4.1.1": + version: 4.1.1 + resolution: "jackspeak@npm:4.1.1" + dependencies: + "@isaacs/cliui": "npm:^8.0.2" + checksum: 10/ffceb270ec286841f48413bfb4a50b188662dfd599378ce142b6540f3f0a66821dc9dcb1e9ebc55c6c3b24dc2226c96e5819ba9bd7a241bd29031b61911718c7 + languageName: node + linkType: hard + "jake@npm:^10.8.5": version: 10.9.2 resolution: "jake@npm:10.9.2" @@ -18005,6 +18089,24 @@ __metadata: languageName: node linkType: hard +"node-pg-migrate@npm:^8.0.4": + version: 8.0.4 + resolution: "node-pg-migrate@npm:8.0.4" + dependencies: + glob: "npm:~11.1.0" + yargs: "npm:~17.7.0" + peerDependencies: + "@types/pg": ">=6.0.0 <9.0.0" + pg: ">=4.3.0 <9.0.0" + peerDependenciesMeta: + "@types/pg": + optional: true + bin: + node-pg-migrate: bin/node-pg-migrate.js + checksum: 10/55525ea4bea9fa9cf4f26352e1c8d95bddb1186ae3f40a363863f4389e23317e855d79aba3dab61c54c90fafa7892ea61f8871b996f10193968e1827dbff27f4 + languageName: node + linkType: hard + "node-releases@npm:^2.0.18": version: 2.0.18 resolution: "node-releases@npm:2.0.18" @@ -18557,6 +18659,13 @@ __metadata: languageName: node linkType: hard +"package-json-from-dist@npm:^1.0.0": + version: 1.0.1 + resolution: "package-json-from-dist@npm:1.0.1" + checksum: 10/58ee9538f2f762988433da00e26acc788036914d57c71c246bf0be1b60cdbd77dd60b6a3e1a30465f0b248aeb80079e0b34cb6050b1dfa18c06953bb1cbc7602 + languageName: node + linkType: hard + "pako@npm:^2.1.0": version: 2.1.0 resolution: "pako@npm:2.1.0" @@ -18818,6 +18927,87 @@ __metadata: languageName: node linkType: hard +"pg-cloudflare@npm:^1.2.7": + version: 1.2.7 + resolution: "pg-cloudflare@npm:1.2.7" + checksum: 10/3d171407cbce36436c461200666ba6bd884bfe98016972760a797cec850199b8024a40055d80322c5fc02909e1533a144e9b108a99f7d7e21d0c42612f9821fb + languageName: node + linkType: hard + +"pg-connection-string@npm:^2.9.1": + version: 2.9.1 + resolution: "pg-connection-string@npm:2.9.1" + checksum: 10/40e9e9cd752121e72bff18d83e6c7ecda9056426815a84294de018569a319293c924704c8b7f0604fdc588835c7927647dea4f3c87a014e715bcbb17d794e9f0 + languageName: node + linkType: hard + +"pg-int8@npm:1.0.1": + version: 1.0.1 + resolution: "pg-int8@npm:1.0.1" + checksum: 10/a1e3a05a69005ddb73e5f324b6b4e689868a447c5fa280b44cd4d04e6916a344ac289e0b8d2695d66e8e89a7fba023affb9e0e94778770ada5df43f003d664c9 + languageName: node + linkType: hard + +"pg-pool@npm:^3.10.1": + version: 3.10.1 + resolution: "pg-pool@npm:3.10.1" + peerDependencies: + pg: ">=8.0" + checksum: 10/b389a714be59ebe53ec412cbff513191cc0b7a203faa5d26416b6a038cafdfe30fbf1a5936b77bb76109c49bd7c4a116870a5a46a45796b1b34c96f016d7fbe2 + languageName: node + linkType: hard + +"pg-protocol@npm:*, pg-protocol@npm:^1.10.3": + version: 1.10.3 + resolution: "pg-protocol@npm:1.10.3" + checksum: 10/31da85319084c03f403efee7accce9786964df82a7feb60e6bd77b71f1e622c74a2a644a2bc434389d0ab92e5abdeedea69ebdb53b1897d9f01d2a1f51a8a2fe + languageName: node + linkType: hard + +"pg-types@npm:2.2.0, pg-types@npm:^2.2.0": + version: 2.2.0 + resolution: "pg-types@npm:2.2.0" + dependencies: + pg-int8: "npm:1.0.1" + postgres-array: "npm:~2.0.0" + postgres-bytea: "npm:~1.0.0" + postgres-date: "npm:~1.0.4" + postgres-interval: "npm:^1.1.0" + checksum: 10/87a84d4baa91378d3a3da6076c69685eb905d1087bf73525ae1ba84b291b9dd8738c6716b333d8eac6cec91bf087237adc3e9281727365e9cbab0d9d072778b1 + languageName: node + linkType: hard + +"pg@npm:^8.11.3": + version: 8.16.3 + resolution: "pg@npm:8.16.3" + dependencies: + pg-cloudflare: "npm:^1.2.7" + pg-connection-string: "npm:^2.9.1" + pg-pool: "npm:^3.10.1" + pg-protocol: "npm:^1.10.3" + pg-types: "npm:2.2.0" + pgpass: "npm:1.0.5" + peerDependencies: + pg-native: ">=3.0.1" + dependenciesMeta: + pg-cloudflare: + optional: true + peerDependenciesMeta: + pg-native: + optional: true + checksum: 10/6a2885a3f581d6c6dddddf5a4bb2790ee84f402ed7d73ece8b6bc102c58c17e4c5f17894c241633aa2f1d4fedd8f2401a80a9a02ef18bb57d05cbbfd8a53ca4d + languageName: node + linkType: hard + +"pgpass@npm:1.0.5": + version: 1.0.5 + resolution: "pgpass@npm:1.0.5" + dependencies: + split2: "npm:^4.1.0" + checksum: 10/0a6f3bf76e36bdb3c20a7e8033140c732767bba7e81f845f7489fc3123a2bd6e3b8e704f08cba86b117435414b5d2422e20ba9d5f2efb6f0c75c9efca73e8e87 + languageName: node + linkType: hard + "picocolors@npm:^1.0.0, picocolors@npm:^1.1.0, picocolors@npm:^1.1.1": version: 1.1.1 resolution: "picocolors@npm:1.1.1" @@ -18987,6 +19177,36 @@ __metadata: languageName: node linkType: hard +"postgres-array@npm:~2.0.0": + version: 2.0.0 + resolution: "postgres-array@npm:2.0.0" + checksum: 10/aff99e79714d1271fe942fec4ffa2007b755e7e7dc3d2feecae3f1ceecb86fd3637c8138037fc3d9e7ec369231eeb136843c0b25927bf1ce295245a40ef849b4 + languageName: node + linkType: hard + +"postgres-bytea@npm:~1.0.0": + version: 1.0.0 + resolution: "postgres-bytea@npm:1.0.0" + checksum: 10/d844ae4ca7a941b70e45cac1261a73ee8ed39d72d3d74ab1d645248185a1b7f0ac91a3c63d6159441020f4e1f7fe64689ac56536a307b31cef361e5187335090 + languageName: node + linkType: hard + +"postgres-date@npm:~1.0.4": + version: 1.0.7 + resolution: "postgres-date@npm:1.0.7" + checksum: 10/571ef45bec4551bb5d608c31b79987d7a895141f7d6c7b82e936a52d23d97474c770c6143e5cf8936c1cdc8b0dfd95e79f8136bf56a90164182a60f242c19f2b + languageName: node + linkType: hard + +"postgres-interval@npm:^1.1.0": + version: 1.2.0 + resolution: "postgres-interval@npm:1.2.0" + dependencies: + xtend: "npm:^4.0.0" + checksum: 10/746b71f93805ae33b03528e429dc624706d1f9b20ee81bf743263efb6a0cd79ae02a642a8a480dbc0f09547b4315ab7df6ce5ec0be77ed700bac42730f5c76b2 + languageName: node + linkType: hard + "prelude-ls@npm:^1.2.1": version: 1.2.1 resolution: "prelude-ls@npm:1.2.1" @@ -20781,7 +21001,7 @@ __metadata: languageName: node linkType: hard -"split2@npm:^4.0.0": +"split2@npm:^4.0.0, split2@npm:^4.1.0": version: 4.2.0 resolution: "split2@npm:4.2.0" checksum: 10/09bbefc11bcf03f044584c9764cd31a252d8e52cea29130950b26161287c11f519807c5e54bd9e5804c713b79c02cefe6a98f4688630993386be353e03f534ab @@ -23056,7 +23276,7 @@ __metadata: languageName: node linkType: hard -"yargs@npm:17.7.2, yargs@npm:^17.3.1, yargs@npm:^17.7.2": +"yargs@npm:17.7.2, yargs@npm:^17.3.1, yargs@npm:^17.7.2, yargs@npm:~17.7.0": version: 17.7.2 resolution: "yargs@npm:17.7.2" dependencies: