Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
fe26d6a
feat: add schema isolation infrastructure for parallel Jest workers
idoshamun Jan 9, 2026
15be260
feat: enable parallel Jest workers with schema isolation
idoshamun Jan 9, 2026
c5be61b
fix: complete schema isolation with FK constraints and trigger fixes
idoshamun Jan 10, 2026
c5d1b0c
feat: exclude seed data tables from deletion in tests
idoshamun Jan 10, 2026
692be83
fix: create proper sequences in worker schemas for schema isolation
idoshamun Jan 10, 2026
b9bb1ef
fix: explicitly qualify table references in materialized view definit…
idoshamun Jan 12, 2026
0ab7416
fix: lint issue
idoshamun Jan 12, 2026
e8c284f
feat: migrate schema isolation to use migrations instead of schema copy
idoshamun Jan 13, 2026
0ced0b3
fix: lint formatting
idoshamun Jan 13, 2026
1eb78e5
fix: add 30s timeout to beforeEach hook for CI
idoshamun Jan 13, 2026
c2eb2a4
fix: reduce connection pool sizes to prevent OOM on CI
idoshamun Jan 13, 2026
f863730
refactor: move schema creation to globalSetup for better memory effic…
idoshamun Jan 13, 2026
f237ec0
fix: add global testTimeout of 30s for CI stability
idoshamun Jan 13, 2026
8ef7bc8
perf: optimize cleanDatabase with single TRUNCATE statement
idoshamun Jan 13, 2026
911bc5d
refactor: revert clean database logic
idoshamun Jan 13, 2026
80cd735
fix: add jest.setTimeout(30000) for reliable global timeout
idoshamun Jan 13, 2026
4f88f04
ci: use xlarge resource and remove parallelism to fix OOM
idoshamun Jan 13, 2026
23fe501
fix: clear rate limit key before each rate limiting test
idoshamun Jan 13, 2026
20dd9b8
ci: add sequential test step for Redis pub/sub tests
idoshamun Jan 13, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 11 additions & 4 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ jobs:
paths:
- build
test:
resource_class: large
resource_class: xlarge
docker:
- image: cimg/node:22.16
- image: postgres:18-alpine
Expand All @@ -35,7 +35,7 @@ jobs:
-c full_page_writes=off
-c max_connections=200
- image: redis/redis-stack:7.2.0-v13
parallelism: 6
parallelism: 1
steps:
- common/setup-node
- common/wait-for:
Expand All @@ -52,12 +52,19 @@ jobs:
name: Test
command: |
TEST=$(./node_modules/.bin/jest --listTests)
echo $TEST | circleci tests run --command="xargs ./node_modules/.bin/jest --testEnvironment=node --ci --runInBand --reporters=default --reporters=jest-junit --" --split-by=timings
echo $TEST | circleci tests run --command="xargs ./node_modules/.bin/jest --testEnvironment=node --ci --maxWorkers=4 --reporters=default --reporters=jest-junit --" --split-by=timings
environment:
NODE_OPTIONS: --max-old-space-size=6144 # 75% of 8GB which is the memory of large resource class
NODE_OPTIONS: --max-old-space-size=12288 # 75% of 16GB which is the memory of xlarge resource class
JEST_JUNIT_OUTPUT_DIR: ./test-results
JEST_JUNIT_ADD_FILE_ATTRIBUTE: "true"
JEST_JUNIT_FILE_PATH_PREFIX: "/home/circleci/project/"
ENABLE_SCHEMA_ISOLATION: "true"
- run:
name: Test (sequential - for Redis pub/sub tests)
command: |
./node_modules/.bin/jest __tests__/workers/newNotificationV2RealTime.ts --testEnvironment=node --runInBand --ci
environment:
NODE_OPTIONS: --max-old-space-size=12288
- store_test_results:
path: ./test-results
- store_artifacts:
Expand Down
12 changes: 8 additions & 4 deletions __tests__/boot.ts
Original file line number Diff line number Diff line change
Expand Up @@ -218,9 +218,12 @@ beforeEach(async () => {
await con.getRepository(User).save(usersFixture[0]);
await con.getRepository(Source).save(sourcesFixture);
await con.getRepository(Post).save(postsFixture);
await ioRedisPool.execute((client) => client.flushall());

await deleteKeysByPattern('njord:cores_balance:*');
// Delete only keys used by boot tests, not flushall (which affects other workers)
await Promise.all([
deleteKeysByPattern('boot:*'),
deleteKeysByPattern('exp:*'),
deleteKeysByPattern('njord:cores_balance:*'),
]);

const mockTransport = createMockNjordTransport();
jest
Expand Down Expand Up @@ -303,7 +306,8 @@ describe('anonymous boot', () => {
.set('User-Agent', TEST_UA)
.expect(200);
expect(first.body.user.firstVisit).toBeTruthy();
await ioRedisPool.execute((client) => client.flushall());
// Clear boot-related keys to simulate data loss, avoiding flushall which affects other workers
await deleteKeysByPattern('boot:*');
const second = await request(app.server)
.get(BASE_PATH)
.set('User-Agent', TEST_UA)
Expand Down
8 changes: 7 additions & 1 deletion __tests__/comments.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1090,6 +1090,12 @@ describe('mutation commentOnPost', () => {
describe('rate limiting', () => {
const redisKey = `${rateLimiterName}:1:createComment`;
const variables = { postId: 'p1', content: 'comment' };

beforeEach(async () => {
// Clear rate limit key before each test to ensure isolation
await deleteKeysByPattern(redisKey);
});

it('store rate limiting state in redis', async () => {
loggedUser = '1';

Expand Down Expand Up @@ -1123,7 +1129,7 @@ describe('mutation commentOnPost', () => {
// Check expiry, to not cause it to be flaky, we check if it is within 10 seconds
expect(await getRedisObjectExpiry(redisKey)).toBeLessThanOrEqual(3600);
expect(await getRedisObjectExpiry(redisKey)).toBeGreaterThanOrEqual(3590);
}, 10_000);
}, 60_000); // 60s - runs 500 mutations sequentially
});

describe('vordr', () => {
Expand Down
188 changes: 188 additions & 0 deletions __tests__/globalSetup.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,188 @@
import { DataSource, QueryRunner } from 'typeorm';

/**
* Replace hardcoded 'public.' schema references with the target schema.
*/
const replaceSchemaReferences = (sql: string, targetSchema: string): string => {
if (targetSchema === 'public') return sql;

let result = sql;

// Handle DROP INDEX separately - remove schema qualification and add IF EXISTS
result = result.replace(
/DROP INDEX\s+(?:IF EXISTS\s+)?(?:"public"\.|public\.)?("[^"]+"|[\w]+)/gi,
(_, indexName) => `DROP INDEX IF EXISTS ${indexName}`,
);

// Replace various patterns of public schema references
result = result
.replace(/\bpublic\."(\w+)"/gi, `"${targetSchema}"."$1"`)
.replace(/\bpublic\.(\w+)(?=[\s,;())]|$)/gi, `"${targetSchema}"."$1"`)
.replace(/"public"\."(\w+)"/gi, `"${targetSchema}"."$1"`)
.replace(/\bON\s+public\./gi, `ON "${targetSchema}".`);

return result;
};

/**
* Wrap a QueryRunner to intercept and transform SQL queries.
*/
const wrapQueryRunner = (
queryRunner: QueryRunner,
targetSchema: string,
): QueryRunner => {
const originalQuery = queryRunner.query.bind(queryRunner);

queryRunner.query = async (
query: string,
parameters?: unknown[],
): Promise<unknown> => {
const transformedQuery = replaceSchemaReferences(query, targetSchema);
return originalQuery(transformedQuery, parameters);
};

return queryRunner;
};

/**
* Create and run migrations for a single worker schema.
*/
const createWorkerSchema = async (schema: string): Promise<void> => {
const workerDataSource = new DataSource({
type: 'postgres',
host: process.env.TYPEORM_HOST || 'localhost',
port: 5432,
username: process.env.TYPEORM_USERNAME || 'postgres',
password: process.env.TYPEORM_PASSWORD || '12345',
database:
process.env.TYPEORM_DATABASE ||
(process.env.NODE_ENV === 'test' ? 'api_test' : 'api'),
schema,
extra: {
max: 2,
options: `-c search_path=${schema},public`,
},
entities: ['src/entity/**/*.{js,ts}'],
migrations: ['src/migration/**/*.{js,ts}'],
migrationsTableName: 'migrations',
logging: false,
});

await workerDataSource.initialize();

const queryRunner = workerDataSource.createQueryRunner();
await queryRunner.connect();
wrapQueryRunner(queryRunner, schema);

try {
// Create migrations table
await queryRunner.query(`
CREATE TABLE IF NOT EXISTS "${schema}"."migrations" (
"id" SERIAL PRIMARY KEY,
"timestamp" bigint NOT NULL,
"name" varchar NOT NULL
)
`);

// Create typeorm_metadata table
await queryRunner.query(`
CREATE TABLE IF NOT EXISTS "${schema}"."typeorm_metadata" (
"type" varchar NOT NULL,
"database" varchar,
"schema" varchar,
"table" varchar,
"name" varchar,
"value" text
)
`);

// Sort migrations by timestamp
const allMigrations = [...workerDataSource.migrations].sort((a, b) => {
const getTimestamp = (migration: {
name?: string;
constructor: { name: string };
}): number => {
const name = migration.name || migration.constructor.name;
const match = name.match(/(\d{13})$/);
return match ? parseInt(match[1], 10) : 0;
};
return getTimestamp(a) - getTimestamp(b);
});

for (const migration of allMigrations) {
const migrationName = migration.name || migration.constructor.name;

const alreadyRun = await queryRunner.query(
`SELECT * FROM "${schema}"."migrations" WHERE "name" = $1`,
[migrationName],
);

if (alreadyRun.length === 0) {
await migration.up(queryRunner);

const timestampMatch = migrationName.match(/(\d{13})$/);
const timestamp = timestampMatch
? parseInt(timestampMatch[1], 10)
: Date.now();

await queryRunner.query(
`INSERT INTO "${schema}"."migrations" ("timestamp", "name") VALUES ($1, $2)`,
[timestamp, migrationName],
);
}
}
} finally {
await queryRunner.release();
}

await workerDataSource.destroy();
};

/**
* Jest global setup - runs once before all workers start.
* Creates worker schemas for parallel test isolation.
*/
export default async function globalSetup(): Promise<void> {
// Only run when schema isolation is enabled
if (process.env.ENABLE_SCHEMA_ISOLATION !== 'true') {
return;
}

const maxWorkers = parseInt(process.env.JEST_MAX_WORKERS || '4', 10);
console.log(
`\nCreating ${maxWorkers} worker schemas for parallel testing...`,
);

// First, create all schemas
const dataSource = new DataSource({
type: 'postgres',
host: process.env.TYPEORM_HOST || 'localhost',
port: 5432,
username: process.env.TYPEORM_USERNAME || 'postgres',
password: process.env.TYPEORM_PASSWORD || '12345',
database:
process.env.TYPEORM_DATABASE ||
(process.env.NODE_ENV === 'test' ? 'api_test' : 'api'),
schema: 'public',
extra: { max: 1 },
});

await dataSource.initialize();

for (let i = 1; i <= maxWorkers; i++) {
const schema = `test_worker_${i}`;
await dataSource.query(`DROP SCHEMA IF EXISTS "${schema}" CASCADE`);
await dataSource.query(`CREATE SCHEMA "${schema}"`);
}

await dataSource.destroy();

// Run migrations for each schema sequentially to avoid memory spikes
for (let i = 1; i <= maxWorkers; i++) {
const schema = `test_worker_${i}`;
console.log(`Running migrations for ${schema}...`);
await createWorkerSchema(schema);
}

console.log('All worker schemas ready!\n');
}
55 changes: 49 additions & 6 deletions __tests__/setup.ts
Original file line number Diff line number Diff line change
@@ -1,9 +1,13 @@
import * as matchers from 'jest-extended';
import '../src/config';
import createOrGetConnection from '../src/db';
import { testSchema } from '../src/data-source';
import { remoteConfig } from '../src/remoteConfig';
import { loadAuthKeys } from '../src/auth';

// Set global timeout for all tests and hooks (more reliable than jest.config.js testTimeout)
jest.setTimeout(30000);

expect.extend(matchers);

global.structuredClone = (v) => JSON.parse(JSON.stringify(v));
Expand Down Expand Up @@ -57,21 +61,54 @@ jest.mock('../src/remoteConfig', () => ({
},
}));

// Tables that contain seed/reference data that should not be deleted between tests
// These are populated by migrations and tests don't modify them
// NOTE: Most tables are NOT included because tests create their own test data
// and expect tables to start empty (so auto-increment IDs start at 1)
const SEED_DATA_TABLES = new Set([
'migrations', // Required by TypeORM to track applied migrations
'checkpoint', // System checkpoints, tests don't create/modify
]);

const cleanDatabase = async (): Promise<void> => {
await remoteConfig.init();

const con = await createOrGetConnection();
for (const entity of con.entityMetadatas) {
const repository = con.getRepository(entity.name);
if (repository.metadata.tableType === 'view') continue;
await repository.query(`DELETE
FROM "${entity.tableName}";`);

// Skip seed data tables - they're populated once and tests expect them to exist
if (SEED_DATA_TABLES.has(entity.tableName)) continue;

await repository.query(`DELETE FROM "${entity.tableName}";`);

for (const column of entity.primaryColumns) {
if (column.generationStrategy === 'increment') {
await repository.query(
`ALTER SEQUENCE ${entity.tableName}_${column.databaseName}_seq RESTART WITH 1`,
);
// Reset sequences/identity columns for auto-increment primary keys
// Must use schema-qualified table name for schema isolation to work
try {
// First try pg_get_serial_sequence (works for SERIAL columns)
// Schema-qualify the table name for proper resolution in worker schemas
const schemaQualifiedTable = `${testSchema}.${entity.tableName}`;
const seqResult = await repository.query(
`SELECT pg_get_serial_sequence($1, $2) as seq_name`,
[schemaQualifiedTable, column.databaseName],
);
if (seqResult[0]?.seq_name) {
await repository.query(
`ALTER SEQUENCE ${seqResult[0].seq_name} RESTART WITH 1`,
);
} else {
// If no sequence found, try resetting IDENTITY column directly
// This handles GENERATED AS IDENTITY columns
await repository.query(
`ALTER TABLE "${testSchema}"."${entity.tableName}" ALTER COLUMN "${column.databaseName}" RESTART WITH 1`,
);
}
} catch {
// Sequence/identity might not exist or not be resettable, ignore
}
}
}
}
Expand All @@ -82,8 +119,14 @@ jest.mock('file-type', () => ({
fileTypeFromBuffer: () => fileTypeFromBuffer(),
}));

beforeAll(async () => {
// Schema creation is now handled by globalSetup.ts
// This beforeAll just ensures the connection is ready
await createOrGetConnection();
}, 30000);

beforeEach(async () => {
loadAuthKeys();

await cleanDatabase();
});
}, 90000); // 90s - cleanDatabase iterates through many entities
Loading
Loading