Skip to content

Commit 7d1cd98

Browse files
authored
Skip large rows (#224)
* mongodb 6.14.1 * Skip large rows, rather than errorring hard. * Add changeset.
1 parent 04e6fcc commit 7d1cd98

File tree

6 files changed

+28
-12
lines changed

6 files changed

+28
-12
lines changed

.changeset/fast-flowers-scream.md

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
---
2+
'@powersync/service-module-mongodb-storage': patch
3+
'@powersync/lib-service-mongodb': patch
4+
'@powersync/service-image': patch
5+
---
6+
7+
Skip large rows, rather than causing hard replication errors

libs/lib-mongodb/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030
"dependencies": {
3131
"@powersync/lib-services-framework": "workspace:*",
3232
"bson": "^6.10.3",
33-
"mongodb": "^6.13.0",
33+
"mongodb": "^6.14.1",
3434
"ts-codec": "^1.3.0",
3535
"uri-js": "^4.4.1"
3636
},

modules/module-mongodb-storage/src/storage/implementation/MongoBucketBatch.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ import { idPrefixFilter } from './util.js';
2424
/**
2525
* 15MB
2626
*/
27-
const MAX_ROW_SIZE = 15 * 1024 * 1024;
27+
export const MAX_ROW_SIZE = 15 * 1024 * 1024;
2828

2929
// Currently, we can only have a single flush() at a time, since it locks the op_id sequence.
3030
// While the MongoDB transaction retry mechanism handles this okay, using an in-process Mutex

modules/module-mongodb-storage/src/storage/implementation/PersistedBatch.ts

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ import * as bson from 'bson';
55

66
import { logger } from '@powersync/lib-services-framework';
77
import { InternalOpId, storage, utils } from '@powersync/service-core';
8-
import { currentBucketKey } from './MongoBucketBatch.js';
8+
import { currentBucketKey, MAX_ROW_SIZE } from './MongoBucketBatch.js';
99
import { MongoIdSequence } from './MongoIdSequence.js';
1010
import { PowerSyncMongo } from './db.js';
1111
import {
@@ -83,11 +83,20 @@ export class PersistedBatch {
8383

8484
for (const k of options.evaluated) {
8585
const key = currentBucketKey(k);
86-
remaining_buckets.delete(key);
8786

8887
// INSERT
8988
const recordData = JSONBig.stringify(k.data);
9089
const checksum = utils.hashData(k.table, k.id, recordData);
90+
if (recordData.length > MAX_ROW_SIZE) {
91+
// In many cases, the raw data size would have been too large already. But there are cases where
92+
// the BSON size is small enough, but the JSON size is too large.
93+
// In these cases, we can't store the data, so we skip it, or generate a REMOVE operation if the row
94+
// was synced previously.
95+
logger.error(`powersync_${this.group_id} Row ${key} too large: ${recordData.length} bytes. Removing.`);
96+
continue;
97+
}
98+
99+
remaining_buckets.delete(key);
91100
this.currentSize += recordData.length + 200;
92101

93102
const op_id = options.op_seq.next();

pnpm-lock.yaml

Lines changed: 7 additions & 7 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

service/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@
3636
"ix": "^5.0.0",
3737
"jose": "^4.15.1",
3838
"lru-cache": "^10.0.1",
39-
"mongodb": "^6.13.0",
39+
"mongodb": "^6.14.1",
4040
"node-fetch": "^3.3.2",
4141
"pgwire": "github:kagis/pgwire#f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87",
4242
"ts-codec": "^1.3.0",

0 commit comments

Comments
 (0)