-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdb-compactor.js
More file actions
123 lines (102 loc) · 3.12 KB
/
db-compactor.js
File metadata and controls
123 lines (102 loc) · 3.12 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
const fs = require('node:fs');
const { pipeline } = require('node:stream/promises');
const EventEmitter = require('node:events');
const crypto = require('node:crypto');
const path = require('node:path');
const DBReader = require('./streams/db-read-stream');
const CompactionStream = require('./streams/compaction-stream');
/**
* The Compactor performs compaction of the database log removing old keys. It
* listen to and records update events and then removes out of date keys during
* downtime.
*/
class Compactor extends EventEmitter {
constructor( opts ) {
super();
// The updateLog is a map from the document id to the number of updates
// that the document has received since the last compaction. This is how
// many occurrences of a given id can be removed during compaction.
this.updateLog = new Map();
this.lock = opts.lock;
this.filesDir = opts.filesDir;
this.fileName = opts.fileName;
this.compactionInterval = opts.compactionInterval || 30e3;
this.running = false;
this.dataDelimiter = opts.dataDelimiter;
this.documentDelimiter = opts.documentDelimiter;
this.dbReader = new DBReader(
opts.dataDelimiter,
opts.documentDelimiter
);
}
/**
* Perform compaction on the database file by removing keys that were recorded
* in the updateLog.
*/
async compact() {
const release = await this.lock.acquire();
const filePath = path.resolve(
__dirname,
`${ this.filesDir }/${ this.fileName }`
);
const newFilePath = path.resolve(
__dirname,
`${ this.filesDir }/${ crypto.randomUUID() }`
);
const compactionStream = new CompactionStream({
updateLog: this.updateLog,
dataDelimiter: this.dataDelimiter,
documentDelimiter: this.documentDelimiter
});
await pipeline(
fs.createReadStream( filePath ),
this.dbReader,
compactionStream,
fs.createWriteStream( newFilePath )
);
release();
this.emit( 'compacted', { newFilePath } );
}
/**
* Recursively call the compact function on a cycle.
*/
async compactRecursive() {
await this.compact();
// Add jitter so that compaction cycles cannot converge between multiple
// processes.
const jitter = Math.random() * 5000;
setTimeout( () => {
if ( this.running ) {
this.compactRecursive();
} else {
this.emit('stopped');
}
}, this.compactionInterval + jitter );
}
/**
* Add an update to the updateLog for use during the next compaction cycle.
*/
recordUpdate( update ) {
if ( this.updateLog.has( update.id ) ) {
const currentCount = this.updateLog.get( update.id );
this.updateLog.set( update.id, currentCount + 1 );
} else {
this.updateLog.set( update.id, 1 );
}
}
/**
* Start the database compactor
*/
async start() {
this.running = true;
this.emit('started');
this.compactRecursive();
}
/**
* Stop the Compactor
*/
async stop() {
this.running = false;
}
}
module.exports = Compactor;