diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 079b8abd..806d8019 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -17,6 +17,15 @@ on: - 'LICENSE' - '.gitignore' workflow_dispatch: + inputs: + mode: + description: 'Do you want to run the full or essential benchmark suite?' + default: 'full' + required: false + type: choice + options: + - 'full' + - 'essential' concurrency: cancel-in-progress: true @@ -66,6 +75,8 @@ jobs: run: pnpm build - name: Run benchmarks + env: + BENCHMARK_MODE: ${{ github.event.inputs.mode || 'essential' }} run: pnpm bench - name: Upload benchmark results as artifact @@ -136,14 +147,13 @@ jobs: comment += '_No benchmark files found_\n'; } else { for (const file of results.files) { - const filename = file.filepath.split('/').pop(); - comment += `### ${filename}\n\n`; - if (!file.groups || file.groups.length === 0) { - comment += '_No benchmark groups found_\n\n'; continue; } + const filename = file.filepath.split('/').pop(); + comment += `### ${filename}\n\n`; + for (const group of file.groups) { // Extract test name from fullName (remove file path prefix) const testName = group.fullName.replace(/^[^>]+>\s*/, ''); diff --git a/.gitignore b/.gitignore index 8bbeb374..9acd7791 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ benchmark-results.json +benchmark/data build /coverage deps diff --git a/benchmark/data/README.md b/benchmark/data/README.md new file mode 100644 index 00000000..9723b1f2 --- /dev/null +++ b/benchmark/data/README.md @@ -0,0 +1 @@ +This directory contains benchmark database files for RocksDB-JS. It is important to run benchmarks on a real filesystem (not a tempfs). These files should be deleted after benchmarking. \ No newline at end of file diff --git a/benchmark/get-sync.bench.ts b/benchmark/get-sync.bench.ts index 644e2ad1..a1f44c48 100644 --- a/benchmark/get-sync.bench.ts +++ b/benchmark/get-sync.bench.ts @@ -10,7 +10,7 @@ import { const SMALL_DATASET = 100; describe('getSync()', () => { - describe.only('random keys - small key size (100 records)', () => { + describe('random keys - small key size (100 records)', () => { function setup(ctx) { ctx.data = generateRandomKeys(SMALL_DATASET); for (const key of ctx.data) { @@ -19,6 +19,7 @@ describe('getSync()', () => { } benchmark('rocksdb', { + mode: 'essential', setup, bench({ data, db }) { for (const key of data) { @@ -28,6 +29,7 @@ describe('getSync()', () => { }); benchmark('lmdb', { + mode: 'essential', setup, bench({ data, db }) { for (const key of data) { @@ -46,6 +48,7 @@ describe('getSync()', () => { } benchmark('rocksdb', { + mode: 'essential', setup, bench({ data, db }) { for (const key of data) { @@ -55,6 +58,7 @@ describe('getSync()', () => { }); benchmark('lmdb', { + mode: 'essential', setup, bench({ data, db }) { for (const key of data) { diff --git a/benchmark/ranges.bench.ts b/benchmark/ranges.bench.ts index 5ecfa5d5..dff2d8ab 100644 --- a/benchmark/ranges.bench.ts +++ b/benchmark/ranges.bench.ts @@ -23,6 +23,7 @@ function setupRangeTestData(ctx: any, datasetSize: number) { describe('getRange()', () => { describe('small range (100 records, 50 range)', () => { benchmark('rocksdb', { + mode: 'essential', setup(ctx) { setupRangeTestData(ctx, SMALL_DATASET); }, @@ -32,6 +33,7 @@ describe('getRange()', () => { }); benchmark('lmdb', { + mode: 'essential', setup(ctx) { setupRangeTestData(ctx, SMALL_DATASET); }, diff --git a/benchmark/realistic-load.bench.ts b/benchmark/realistic-load.bench.ts new file mode 100644 index 00000000..b6c42038 --- /dev/null +++ b/benchmark/realistic-load.bench.ts @@ -0,0 +1,73 @@ +import { + concurrent, + workerDescribe as describe, + workerBenchmark as benchmark, + type BenchmarkContext, + type LMDBDatabase +} from './setup.js'; +import type { RocksDatabase } from '../dist/index.mjs'; + +const DELETE_RATIO = 0.2; +const NUM_KEYS = 5_000; +describe('Realistic write load with workers', () => { + const aaaa = Buffer.alloc(1500, 'a'); + const ITERATIONS = 100; + describe('write variable records with transaction log', () => { + benchmark('rocksdb', concurrent({ + mode: 'essential', + numWorkers: 4, + concurrencyMaximum: 32, + dbOptions: { disableWAL: true }, + setup(ctx: BenchmarkContext) { + const db = ctx.db; + const log = db.useLog('0'); + ctx.log = log; + }, + async bench({ db, log }) { + for (let i = 0; i < ITERATIONS; i++) { + await db.transaction((txn) => { + const key = Math.floor(Math.random() * NUM_KEYS).toString(); + if (Math.random() < DELETE_RATIO) { + log.addEntry(aaaa.subarray(0, 30), txn.id); + db.removeSync(key, { transaction: txn }); + } else { + const data = aaaa.subarray(0, Math.random() * 1500); + log.addEntry(data, txn.id); + db.putSync(key, data, { transaction: txn }); + } + }).catch((error) => { + if (error.code !== 'ERR_BUSY') { + console.error('Error occurred during transaction:', error); + } + }) + }; + }, + })); + + benchmark('lmdb', concurrent({ + mode: 'essential', + numWorkers: 4, + concurrencyMaximum: 32, + async setup(ctx: BenchmarkContext) { + let start = Date.now(); + ctx.index = start; + ctx.lastTime = Date.now(); + }, + async bench(ctx: BenchmarkContext) { + const { db } = ctx; + for (let i = 0; i < ITERATIONS; i++) { + let auditTime = ctx.lastTime = Math.max(ctx.lastTime + 0.001, Date.now()); + const key = Math.floor(Math.random() * NUM_KEYS).toString(); + if (Math.random() < DELETE_RATIO) { + db.put('audit' + auditTime, aaaa.subarray(0, 30)); + await db.remove(key); + } else { + const data = aaaa.subarray(0, Math.random() * 1500); + db.put('audit' + auditTime, data); + await db.put(key, data); + } + } + }, + })); + }); +}); diff --git a/benchmark/setup.ts b/benchmark/setup.ts index 5859486c..1657a998 100644 --- a/benchmark/setup.ts +++ b/benchmark/setup.ts @@ -1,5 +1,4 @@ import { RocksDatabase, RocksDatabaseOptions } from '../dist/index.mjs'; -import { tmpdir } from 'node:os'; import { join } from 'node:path'; import * as lmdb from 'lmdb'; import { randomBytes } from 'node:crypto'; @@ -25,7 +24,8 @@ type BenchmarkOptions = { name?: string, setup?: (ctx: BenchmarkContext) => void | Promise, timeout?: number, - teardown?: (ctx: BenchmarkContext) => void | Promise + teardown?: (ctx: BenchmarkContext) => void | Promise, + mode?: 'essential' | 'full' }; export function benchmark(type: 'rocksdb', options: BenchmarkOptions): void; @@ -35,12 +35,14 @@ export function benchmark(type: string, options: any): void { throw new Error(`Unsupported benchmark type: ${type}`); } - if ((process.env.ROCKSDB_ONLY && type !== 'rocksdb') || (process.env.LMDB_ONLY && type !== 'lmdb')) { + if ((process.env.ROCKSDB_ONLY && type !== 'rocksdb') || (process.env.LMDB_ONLY && type !== 'lmdb') || + (process.env.BENCHMARK_MODE === 'essential' && options.mode !== 'essential')) { return; } const { bench, setup, teardown, dbOptions, name } = options; - const dbPath = join(tmpdir(), `rocksdb-benchmark-${randomBytes(8).toString('hex')}`); + // it is important to run benchmarks on a real filesystem (not a tempfs) + const dbPath = join('benchmark', 'data', `rocksdb-benchmark-${randomBytes(8).toString('hex')}`); let ctx: BenchmarkContext; vitestBench(name || type, () => { @@ -65,7 +67,7 @@ export function benchmark(type: string, options: any): void { if (type === 'rocksdb') { ctx = { db: RocksDatabase.open(dbPath, dbOptions), mode }; } else { - ctx = { db: lmdb.open({ dbPath, compression: true, ...dbOptions }), mode }; + ctx = { db: lmdb.open({ path: dbPath, compression: true, ...dbOptions }), mode }; } } if (typeof setup === 'function') { @@ -291,7 +293,8 @@ export function workerBenchmark(type: string, options: any): void { throw new Error(`Unsupported benchmark type: ${type}`); } - if ((process.env.ROCKSDB_ONLY && type !== 'rocksdb') || (process.env.LMDB_ONLY && type !== 'lmdb')) { + if ((process.env.ROCKSDB_ONLY && type !== 'rocksdb') || (process.env.LMDB_ONLY && type !== 'lmdb') || + (process.env.BENCHMARK_MODE === 'essential' && options.mode !== 'essential')) { return; } @@ -335,7 +338,7 @@ export function workerBenchmark(type: string, options: any): void { throws: true, async setup(_task, mode) { if (mode === 'run') return; - const path = join(tmpdir(), `rocksdb-benchmark-${randomBytes(8).toString('hex')}`); + const path = join('benchmark', 'data', `rocksdb-benchmark-${randomBytes(8).toString('hex')}`); // launch all workers and wait for them to initialize await Promise.all(Array.from({ length: numWorkers }, (_, i) => { @@ -378,6 +381,7 @@ export function workerBenchmark(type: string, options: any): void { }); })); }, + time: 2000, async teardown(_task, mode) { if (mode === 'warmup') return; // tell all workers to teardown and wait @@ -442,7 +446,7 @@ export async function workerInit() { if (type === 'rocksdb') { ctx = { db: RocksDatabase.open(path, dbOptions) }; } else { - ctx = { db: lmdb.open({ path, compression: true, ...dbOptions }) }; + ctx = { db: lmdb.open({ path, ...dbOptions }) }; } if (typeof setup === 'function') { await setup(ctx); diff --git a/benchmark/transaction-log.bench.ts b/benchmark/transaction-log.bench.ts index c2587348..9ec30c16 100644 --- a/benchmark/transaction-log.bench.ts +++ b/benchmark/transaction-log.bench.ts @@ -35,6 +35,7 @@ describe('Transaction log', () => { }); describe('read 100 iterators while write log with 100 byte records', () => { benchmark('rocksdb', concurrent({ + mode: 'essential', async setup(ctx: BenchmarkContext) { const db = ctx.db; const log = db.useLog('0'); @@ -56,6 +57,7 @@ describe('Transaction log', () => { })); benchmark('lmdb', concurrent({ + mode: 'essential', async setup(ctx: BenchmarkContext) { let start = Date.now(); ctx.index = start; @@ -75,6 +77,7 @@ describe('Transaction log', () => { }); describe('read one entry from random position from log with 1000 100 byte records', () => { benchmark('rocksdb', { + mode: 'essential', async setup(ctx: BenchmarkContext) { const db = ctx.db; const log = db.useLog('0'); @@ -97,6 +100,7 @@ describe('Transaction log', () => { }); benchmark('lmdb', { + mode: 'essential', async setup(ctx: BenchmarkContext) { let start = Date.now(); const value = Buffer.alloc(100, 'a'); diff --git a/benchmark/worker-put-sync.bench.ts b/benchmark/worker-put-sync.bench.ts index db2835db..cbcac7f4 100644 --- a/benchmark/worker-put-sync.bench.ts +++ b/benchmark/worker-put-sync.bench.ts @@ -59,6 +59,7 @@ describe('putSync()', () => { describe('random keys - small key size (100 records, 10 workers)', () => { benchmark('rocksdb', { + mode: 'essential', numWorkers: 10, setup(ctx) { ctx.data = generateRandomKeys(SMALL_DATASET); @@ -71,6 +72,7 @@ describe('putSync()', () => { }); benchmark('lmdb', { + mode: 'essential', numWorkers: 10, setup(ctx) { ctx.data = generateRandomKeys(SMALL_DATASET); diff --git a/benchmark/worker-transaction-log.bench.ts b/benchmark/worker-transaction-log.bench.ts index 3b2bb344..273d0d15 100644 --- a/benchmark/worker-transaction-log.bench.ts +++ b/benchmark/worker-transaction-log.bench.ts @@ -12,6 +12,7 @@ describe('Transaction log with workers', () => { describe('write log with 100 byte records', () => { benchmark('rocksdb', concurrent({ + mode: 'essential', numWorkers: 4, async setup(ctx: BenchmarkContext) { const db = ctx.db; @@ -26,6 +27,7 @@ describe('Transaction log with workers', () => { })); benchmark('lmdb', concurrent({ + mode: 'essential', numWorkers: 4, async setup(ctx: BenchmarkContext) { let start = Date.now(); diff --git a/package.json b/package.json index b34a3aed..6244294b 100644 --- a/package.json +++ b/package.json @@ -15,7 +15,7 @@ "type": "module", "gypfile": true, "scripts": { - "bench": "cross-env CI=1 vitest bench --outputJson benchmark-results.json", + "bench": "cross-env CI=1 vitest bench --passWithNoTests --outputJson benchmark-results.json", "bench:bun": "cross-env CI=1 bun --bun bench", "bench:deno": "cross-env CI=1 deno run --allow-all --sloppy-imports ./node_modules/vitest/vitest.mjs bench", "build": "pnpm build:bundle && pnpm rebuild", diff --git a/src/database.ts b/src/database.ts index 54950a7a..e55a9892 100644 --- a/src/database.ts +++ b/src/database.ts @@ -507,7 +507,11 @@ export class RocksDatabase extends DBI { if (err instanceof Error && 'code' in err && err.code === 'ERR_ALREADY_ABORTED') { return undefined as T; } - txn.abort(); + try { + txn.abort(); + } catch { + // ignore if abort fails + } throw err; } }