Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions .changeset/fast-flowers-scream.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
---
'@powersync/service-module-mongodb-storage': patch
'@powersync/lib-service-mongodb': patch
'@powersync/service-image': patch
---

Skip large rows, rather than causing hard replication errors
2 changes: 1 addition & 1 deletion libs/lib-mongodb/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
"dependencies": {
"@powersync/lib-services-framework": "workspace:*",
"bson": "^6.10.3",
"mongodb": "^6.13.0",
"mongodb": "^6.14.1",
"ts-codec": "^1.3.0",
"uri-js": "^4.4.1"
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ import { idPrefixFilter } from './util.js';
/**
* 15MB
*/
const MAX_ROW_SIZE = 15 * 1024 * 1024;
export const MAX_ROW_SIZE = 15 * 1024 * 1024;

// Currently, we can only have a single flush() at a time, since it locks the op_id sequence.
// While the MongoDB transaction retry mechanism handles this okay, using an in-process Mutex
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import * as bson from 'bson';

import { logger } from '@powersync/lib-services-framework';
import { InternalOpId, storage, utils } from '@powersync/service-core';
import { currentBucketKey } from './MongoBucketBatch.js';
import { currentBucketKey, MAX_ROW_SIZE } from './MongoBucketBatch.js';
import { MongoIdSequence } from './MongoIdSequence.js';
import { PowerSyncMongo } from './db.js';
import {
Expand Down Expand Up @@ -83,11 +83,20 @@ export class PersistedBatch {

for (const k of options.evaluated) {
const key = currentBucketKey(k);
remaining_buckets.delete(key);

// INSERT
const recordData = JSONBig.stringify(k.data);
const checksum = utils.hashData(k.table, k.id, recordData);
if (recordData.length > MAX_ROW_SIZE) {
// In many cases, the raw data size would have been too large already. But there are cases where
// the BSON size is small enough, but the JSON size is too large.
// In these cases, we can't store the data, so we skip it, or generate a REMOVE operation if the row
// was synced previously.
logger.error(`powersync_${this.group_id} Row ${key} too large: ${recordData.length} bytes. Removing.`);
continue;
}

remaining_buckets.delete(key);
this.currentSize += recordData.length + 200;

const op_id = options.op_seq.next();
Expand Down
14 changes: 7 additions & 7 deletions pnpm-lock.yaml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion service/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
"ix": "^5.0.0",
"jose": "^4.15.1",
"lru-cache": "^10.0.1",
"mongodb": "^6.13.0",
"mongodb": "^6.14.1",
"node-fetch": "^3.3.2",
"pgwire": "github:kagis/pgwire#f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87",
"ts-codec": "^1.3.0",
Expand Down