-
-
Notifications
You must be signed in to change notification settings - Fork 58
Expand file tree
/
Copy pathapi.js
More file actions
366 lines (349 loc) · 12.5 KB
/
api.js
File metadata and controls
366 lines (349 loc) · 12.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
import * as Y from 'yjs'
import * as redis from 'redis'
import * as map from 'lib0/map'
import * as decoding from 'lib0/decoding'
import * as awarenessProtocol from 'y-protocols/awareness'
import * as array from 'lib0/array'
import * as random from 'lib0/random'
import * as number from 'lib0/number'
import * as promise from 'lib0/promise'
import * as math from 'lib0/math'
import * as protocol from './protocol.js'
import * as env from 'lib0/environment'
import * as logging from 'lib0/logging'
const logWorker = logging.createModuleLogger('@y/redis/api/worker')
const logApi = logging.createModuleLogger('@y/redis/api')
export const redisUrl = env.ensureConf('redis')
/**
* @param {string} a
* @param {string} b
* @return {boolean} iff a < b
*/
export const isSmallerRedisId = (a, b) => {
const [a1, a2 = '0'] = a.split('-')
const [b1, b2 = '0'] = b.split('-')
const a1n = number.parseInt(a1)
const b1n = number.parseInt(b1)
return a1n < b1n || (a1n === b1n && number.parseInt(a2) < number.parseInt(b2))
}
/**
* @param {import('@redis/client/dist/lib/commands/generic-transformers.js').StreamsMessagesReply} streamReply
* @param {string} prefix
*/
const extractMessagesFromStreamReply = (streamReply, prefix) => {
/**
* @type {Map<string, Map<string, { lastId: string, messages: Array<Uint8Array> }>>}
*/
const messages = new Map()
streamReply?.forEach(docStreamReply => {
const { room, docid } = decodeRedisRoomStreamName(docStreamReply.name.toString(), prefix)
const docMessages = map.setIfUndefined(
map.setIfUndefined(
messages,
room,
map.create
),
docid,
() => ({ lastId: array.last(docStreamReply.messages).id, messages: /** @type {Array<Uint8Array>} */ ([]) })
)
docStreamReply.messages.forEach(m => {
if (m.message.m != null) {
docMessages.messages.push(/** @type {Uint8Array} */ (m.message.m))
}
})
})
return messages
}
/**
* @param {string} room
* @param {string} docid
* @param {string} prefix
*/
export const computeRedisRoomStreamName = (room, docid, prefix) => `${prefix}:room:${encodeURIComponent(room)}:${encodeURIComponent(docid)}`
/**
* @param {string} rediskey
* @param {string} expectedPrefix
*/
const decodeRedisRoomStreamName = (rediskey, expectedPrefix) => {
const match = rediskey.match(/^(.*):room:(.*):(.*)$/)
if (match == null || match[1] !== expectedPrefix) {
throw new Error(`Malformed stream name! prefix="${match?.[1]}" expectedPrefix="${expectedPrefix}", rediskey="${rediskey}"`)
}
return { room: decodeURIComponent(match[2]), docid: decodeURIComponent(match[3]) }
}
/**
* @param {import('./storage.js').AbstractStorage} store
* @param {string} redisPrefix
*/
export const createApiClient = async (store, redisPrefix) => {
const a = new Api(store, redisPrefix)
await a.redis.connect()
try {
await a.redis.xGroupCreate(a.redisWorkerStreamName, a.redisWorkerGroupName, '0', { MKSTREAM: true })
} catch (e) { }
return a
}
export class Api {
/**
* @param {import('./storage.js').AbstractStorage} store
* @param {string} prefix
*/
constructor (store, prefix) {
this.store = store
this.prefix = prefix
this.consumername = random.uuidv4()
/**
* After this timeout, a worker will pick up a task and clean up a stream.
*/
this.redisTaskDebounce = number.parseInt(env.getConf('redis-task-debounce') || '10000') // default: 10 seconds
/**
* Minimum lifetime of y* update messages in redis streams.
*/
this.redisMinMessageLifetime = number.parseInt(env.getConf('redis-min-message-lifetime') || '60000') // default: 1 minute
this.redisWorkerStreamName = this.prefix + ':worker'
this.redisWorkerGroupName = this.prefix + ':worker'
this._destroyed = false
this.redis = redis.createClient({
url: redisUrl,
// scripting: https://github.com/redis/node-redis/#lua-scripts
scripts: {
addMessage: redis.defineScript({
NUMBER_OF_KEYS: 1,
SCRIPT: `
if redis.call("EXISTS", KEYS[1]) == 0 then
redis.call("XADD", "${this.redisWorkerStreamName}", "*", "compact", KEYS[1])
redis.call("XREADGROUP", "GROUP", "${this.redisWorkerGroupName}", "pending", "STREAMS", "${this.redisWorkerStreamName}", ">")
end
redis.call("XADD", KEYS[1], "*", "m", ARGV[1])
`,
/**
* @param {string} key
* @param {Buffer} message
*/
transformArguments (key, message) {
return [key, message]
},
/**
* @param {null} x
*/
transformReply (x) {
return x
}
}),
xDelIfEmpty: redis.defineScript({
NUMBER_OF_KEYS: 1,
SCRIPT: `
if redis.call("XLEN", KEYS[1]) == 0 then
redis.call("DEL", KEYS[1])
end
`,
/**
* @param {string} key
*/
transformArguments (key) {
return [key]
},
/**
* @param {null} x
*/
transformReply (x) {
return x
}
})
}
})
}
/**
* @param {Array<{key:string,id:string}>} streams streamname-clock pairs
* @return {Promise<Array<{ stream: string, messages: Array<Uint8Array>, lastId: string }>>}
*/
async getMessages (streams) {
if (streams.length === 0) {
await promise.wait(50)
return []
}
const reads = await this.redis.xRead(
redis.commandOptions({ returnBuffers: true }),
streams,
{ BLOCK: 1000, COUNT: 1000 }
)
/**
* @type {Array<{ stream: string, messages: Array<Uint8Array>, lastId: string }>}
*/
const res = []
reads?.forEach(stream => {
res.push({
stream: stream.name.toString(),
messages: protocol.mergeMessages(stream.messages.map(message => message.message.m).filter(m => m != null)),
lastId: array.last(stream.messages).id.toString()
})
})
return res
}
/**
* @param {string} room
* @param {string} docid
* @param {Buffer} m
*/
addMessage (room, docid, m) {
// handle sync step 2 like a normal update message
if (m[0] === protocol.messageSync && m[1] === protocol.messageSyncStep2) {
if (m.byteLength < 4) {
// message does not contain any content, don't distribute
return promise.resolve()
}
m[1] = protocol.messageSyncUpdate
}
return this.redis.addMessage(computeRedisRoomStreamName(room, docid, this.prefix), m)
}
/**
* @param {string} room
* @param {string} docid
*/
async getStateVector (room, docid = '/') {
return this.store.retrieveStateVector(room, docid)
}
/**
* @param {string} room
* @param {string} docid
*/
async getDoc (room, docid) {
logApi(`getDoc(${room}, ${docid})`)
const ms = extractMessagesFromStreamReply(await this.redis.xRead(redis.commandOptions({ returnBuffers: true }), { key: computeRedisRoomStreamName(room, docid, this.prefix), id: '0' }), this.prefix)
logApi(`getDoc(${room}, ${docid}) - retrieved messages`)
const docMessages = ms.get(room)?.get(docid) || null
const docstate = await this.store.retrieveDoc(room, docid)
logApi(`getDoc(${room}, ${docid}) - retrieved doc`)
const ydoc = new Y.Doc()
const awareness = new awarenessProtocol.Awareness(ydoc)
awareness.setLocalState(null) // we don't want to propagate awareness state
if (docstate) { Y.applyUpdateV2(ydoc, docstate.doc) }
let docChanged = false
ydoc.once('afterTransaction', tr => {
docChanged = tr.changed.size > 0
ydoc.destroy()
})
ydoc.transact(() => {
docMessages?.messages.forEach(m => {
const decoder = decoding.createDecoder(m)
switch (decoding.readVarUint(decoder)) {
case 0: { // sync message
if (decoding.readVarUint(decoder) === 2) { // update message
Y.applyUpdate(ydoc, decoding.readVarUint8Array(decoder))
}
break
}
case 1: { // awareness message
awarenessProtocol.applyAwarenessUpdate(awareness, decoding.readVarUint8Array(decoder), null)
break
}
}
})
})
return { ydoc, awareness, redisLastId: docMessages?.lastId.toString() || '0', storeReferences: docstate?.references || null, docChanged }
}
/**
* @param {WorkerOpts} opts
*/
async consumeWorkerQueue ({ tryClaimCount = 5, updateCallback = async () => {} }) {
/**
* @type {Array<{stream: string, id: string}>}
*/
const tasks = []
const reclaimedTasks = await this.redis.xAutoClaim(this.redisWorkerStreamName, this.redisWorkerGroupName, this.consumername, this.redisTaskDebounce, '0', { COUNT: tryClaimCount })
reclaimedTasks.messages.forEach(m => {
const stream = m?.message.compact
stream && tasks.push({ stream, id: m?.id })
})
if (tasks.length === 0) {
logWorker('No tasks available, pausing..', { tasks })
await promise.wait(1000)
return []
}
logWorker('Accepted tasks ', { tasks })
await promise.all(tasks.map(async task => {
const streamlen = await this.redis.xLen(task.stream)
if (streamlen === 0) {
await this.redis.multi()
.xDelIfEmpty(task.stream)
.xDel(this.redisWorkerStreamName, task.id)
.exec()
logWorker('Stream still empty, removing recurring task from queue ', { stream: task.stream })
} else {
const { room, docid } = decodeRedisRoomStreamName(task.stream, this.prefix)
// @todo, make sure that awareness by this.getDoc is eventually destroyed, or doesn't
// register a timeout anymore
logWorker('requesting doc from store')
const { ydoc, storeReferences, redisLastId, docChanged } = await this.getDoc(room, docid)
logWorker('retrieved doc from store. redisLastId=' + redisLastId, ' storeRefs=' + JSON.stringify(storeReferences))
const lastId = math.max(number.parseInt(redisLastId.split('-')[0]), number.parseInt(task.id.split('-')[0]))
if (docChanged) {
try {
logWorker('doc changed, calling update callback')
await updateCallback(room, ydoc)
} catch (e) {
console.error(e)
}
logWorker('persisting doc')
await this.store.persistDoc(room, docid, ydoc)
}
await promise.all([
storeReferences && docChanged ? this.store.deleteReferences(room, docid, storeReferences) : promise.resolve(),
// if `redisTaskDebounce` is small, or if updateCallback taskes too long, then we might
// add a task twice to this list.
// @todo either use a different datastructure or make sure that task doesn't exist yet
// before adding it to the worker queue
// This issue is not critical, as no data will be lost if this happens.
this.redis.multi()
.xTrim(task.stream, 'MINID', lastId - this.redisMinMessageLifetime)
.xAdd(this.redisWorkerStreamName, '*', { compact: task.stream })
.xReadGroup(this.redisWorkerGroupName, 'pending', { key: this.redisWorkerStreamName, id: '>' }, { COUNT: 50 }) // immediately claim this entry, will be picked up by worker after timeout
.xDel(this.redisWorkerStreamName, task.id)
.exec()
])
logWorker('Compacted stream ', { stream: task.stream, taskId: task.id, newLastId: lastId - this.redisMinMessageLifetime })
}
}))
return tasks
}
async destroy () {
this._destroyed = true
try {
await this.redis.quit()
} catch (e) {}
}
}
/**
* @typedef {Object} WorkerOpts
* @property {(room: string, ydoc: Y.Doc) => Promise<void>} [WorkerOpts.updateCallback]
* @property {number} [WorkerOpts.tryClaimCount]
*/
/**
* @param {import('./storage.js').AbstractStorage} store
* @param {string} redisPrefix
* @param {WorkerOpts} opts
*/
export const createWorker = async (store, redisPrefix, opts) => {
const a = await createApiClient(store, redisPrefix)
return new Worker(a, opts)
}
export class Worker {
/**
* @param {Api} client
* @param {WorkerOpts} opts
*/
constructor (client, opts) {
this.client = client
logWorker('Created worker process ', { id: client.consumername, prefix: client.prefix, minMessageLifetime: client.redisMinMessageLifetime })
;(async () => {
while (!client._destroyed) {
try {
await client.consumeWorkerQueue(opts)
} catch (e) {
console.error(e)
}
}
logWorker('Ended worker process ', { id: client.consumername })
})()
}
}