diff --git a/.changeset/rotten-pumas-protect.md b/.changeset/rotten-pumas-protect.md
new file mode 100644
index 000000000..65fc44e61
--- /dev/null
+++ b/.changeset/rotten-pumas-protect.md
@@ -0,0 +1,9 @@
+---
+'@powersync/service-core': minor
+'@powersync/service-module-mysql': minor
+'@powersync/service-sync-rules': minor
+---
+
+Introduced alpha support for MySQL as a datasource for replication.
+Bunch of cleanup
+
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index ba0dd13d0..3705cd600 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -22,7 +22,7 @@ jobs:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- - name: Build and push
+ - name: Test Build Docker Image
uses: docker/build-push-action@v5
with:
cache-from: type=registry,ref=stevenontong/${{vars.DOCKER_REGISTRY}}:cache
diff --git a/README.md b/README.md
index b13fc6c77..8b994b142 100644
--- a/README.md
+++ b/README.md
@@ -2,7 +2,7 @@
-*[PowerSync](https://www.powersync.com) is a sync engine for building local-first apps with instantly-responsive UI/UX and simplified state transfer. Syncs between SQLite on the client-side and Postgres or MongoDB on the server-side (MySQL coming soon).*
+_[PowerSync](https://www.powersync.com) is a sync engine for building local-first apps with instantly-responsive UI/UX and simplified state transfer. Syncs between SQLite on the client-side and Postgres or MongoDB on the server-side (MySQL coming soon)._
# PowerSync Service
diff --git a/modules/module-mongodb/package.json b/modules/module-mongodb/package.json
index 5809bed99..16b65a4ab 100644
--- a/modules/module-mongodb/package.json
+++ b/modules/module-mongodb/package.json
@@ -12,7 +12,7 @@
"scripts": {
"build": "tsc -b",
"build:tests": "tsc -b test/tsconfig.json",
- "clean": "rm -rf ./lib && tsc -b --clean",
+ "clean": "rm -rf ./dist && tsc -b --clean",
"test": "vitest --no-threads"
},
"exports": {
@@ -41,7 +41,7 @@
"devDependencies": {
"@types/uuid": "^9.0.4",
"typescript": "^5.2.2",
- "vitest": "^0.34.6",
+ "vitest": "^2.1.1",
"vite-tsconfig-paths": "^4.3.2"
}
}
diff --git a/modules/module-mongodb/src/api/MongoRouteAPIAdapter.ts b/modules/module-mongodb/src/api/MongoRouteAPIAdapter.ts
index d58d2cf58..dd611ce9b 100644
--- a/modules/module-mongodb/src/api/MongoRouteAPIAdapter.ts
+++ b/modules/module-mongodb/src/api/MongoRouteAPIAdapter.ts
@@ -33,7 +33,7 @@ export class MongoRouteAPIAdapter implements api.RouteAPI {
await this.client.close();
}
- async getSourceConfig(): Promise {
+ async getSourceConfig(): Promise {
return this.config;
}
@@ -165,7 +165,7 @@ export class MongoRouteAPIAdapter implements api.RouteAPI {
return result;
}
- async getReplicationLag(syncRulesId: string): Promise {
+ async getReplicationLag(options: api.ReplicationLagOptions): Promise {
// There is no fast way to get replication lag in bytes in MongoDB.
// We can get replication lag in seconds, but need a different API for that.
return undefined;
diff --git a/modules/module-mongodb/src/index.ts b/modules/module-mongodb/src/index.ts
index 4cfc25695..6ecba2a8e 100644
--- a/modules/module-mongodb/src/index.ts
+++ b/modules/module-mongodb/src/index.ts
@@ -1,5 +1 @@
-import { MongoModule } from './module/MongoModule.js';
-
-export const module = new MongoModule();
-
-export default module;
+export * from './module/MongoModule.js';
diff --git a/modules/module-mongodb/src/module/MongoModule.ts b/modules/module-mongodb/src/module/MongoModule.ts
index 3f6e27636..bbd9ab869 100644
--- a/modules/module-mongodb/src/module/MongoModule.ts
+++ b/modules/module-mongodb/src/module/MongoModule.ts
@@ -4,6 +4,8 @@ import { ConnectionManagerFactory } from '../replication/ConnectionManagerFactor
import { MongoErrorRateLimiter } from '../replication/MongoErrorRateLimiter.js';
import { ChangeStreamReplicator } from '../replication/ChangeStreamReplicator.js';
import * as types from '../types/types.js';
+import { MongoManager } from '../replication/MongoManager.js';
+import { checkSourceConfiguration } from '../replication/replication-utils.js';
export class MongoModule extends replication.ReplicationModule {
constructor() {
@@ -49,4 +51,15 @@ export class MongoModule extends replication.ReplicationModule {
// TODO: Implement?
}
+
+ async testConnection(config: types.MongoConnectionConfig): Promise {
+ this.decodeConfig(config);
+ const normalisedConfig = this.resolveConfig(this.decodedConfig!);
+ const connectionManager = new MongoManager(normalisedConfig);
+ try {
+ return checkSourceConfiguration(connectionManager);
+ } finally {
+ await connectionManager.end();
+ }
+ }
}
diff --git a/modules/module-mongodb/src/replication/ChangeStream.ts b/modules/module-mongodb/src/replication/ChangeStream.ts
index 3d36e1814..b952a9d4d 100644
--- a/modules/module-mongodb/src/replication/ChangeStream.ts
+++ b/modules/module-mongodb/src/replication/ChangeStream.ts
@@ -1,6 +1,6 @@
import { container, logger } from '@powersync/lib-services-framework';
-import { Metrics, SourceEntityDescriptor, SourceTable, storage } from '@powersync/service-core';
-import { DatabaseInputRow, SqliteRow, SqlSyncRules, TablePattern, toSyncRulesRow } from '@powersync/service-sync-rules';
+import { Metrics, SaveOperationTag, SourceEntityDescriptor, SourceTable, storage } from '@powersync/service-core';
+import { DatabaseInputRow, SqliteRow, SqlSyncRules, TablePattern } from '@powersync/service-sync-rules';
import * as mongo from 'mongodb';
import { MongoManager } from './MongoManager.js';
import {
@@ -248,7 +248,7 @@ export class ChangeStream {
// This auto-flushes when the batch reaches its size limit
await batch.save({
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
sourceTable: table,
before: undefined,
beforeReplicaId: undefined,
@@ -330,7 +330,7 @@ export class ChangeStream {
if (change.operationType == 'insert') {
const baseRecord = constructAfterRecord(change.fullDocument);
return await batch.save({
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
sourceTable: table,
before: undefined,
beforeReplicaId: undefined,
@@ -341,7 +341,7 @@ export class ChangeStream {
if (change.fullDocument == null) {
// Treat as delete
return await batch.save({
- tag: 'delete',
+ tag: SaveOperationTag.DELETE,
sourceTable: table,
before: undefined,
beforeReplicaId: change.documentKey._id
@@ -349,7 +349,7 @@ export class ChangeStream {
}
const after = constructAfterRecord(change.fullDocument!);
return await batch.save({
- tag: 'update',
+ tag: SaveOperationTag.UPDATE,
sourceTable: table,
before: undefined,
beforeReplicaId: undefined,
@@ -358,7 +358,7 @@ export class ChangeStream {
});
} else if (change.operationType == 'delete') {
return await batch.save({
- tag: 'delete',
+ tag: SaveOperationTag.DELETE,
sourceTable: table,
before: undefined,
beforeReplicaId: change.documentKey._id
diff --git a/modules/module-mongodb/src/replication/ChangeStreamReplicationJob.ts b/modules/module-mongodb/src/replication/ChangeStreamReplicationJob.ts
index fb60e7aa7..06583d571 100644
--- a/modules/module-mongodb/src/replication/ChangeStreamReplicationJob.ts
+++ b/modules/module-mongodb/src/replication/ChangeStreamReplicationJob.ts
@@ -1,5 +1,4 @@
import { container } from '@powersync/lib-services-framework';
-import { MongoManager } from './MongoManager.js';
import { MissingReplicationSlotError, ChangeStream } from './ChangeStream.js';
import { replication } from '@powersync/service-core';
@@ -13,12 +12,10 @@ export interface ChangeStreamReplicationJobOptions extends replication.AbstractR
export class ChangeStreamReplicationJob extends replication.AbstractReplicationJob {
private connectionFactory: ConnectionManagerFactory;
- private readonly connectionManager: MongoManager;
constructor(options: ChangeStreamReplicationJobOptions) {
super(options);
this.connectionFactory = options.connectionFactory;
- this.connectionManager = this.connectionFactory.create();
}
async cleanUp(): Promise {
diff --git a/modules/module-mongodb/src/replication/ChangeStreamReplicator.ts b/modules/module-mongodb/src/replication/ChangeStreamReplicator.ts
index 84b2c7f68..2cf96c494 100644
--- a/modules/module-mongodb/src/replication/ChangeStreamReplicator.ts
+++ b/modules/module-mongodb/src/replication/ChangeStreamReplicator.ts
@@ -3,14 +3,14 @@ import { ChangeStreamReplicationJob } from './ChangeStreamReplicationJob.js';
import { ConnectionManagerFactory } from './ConnectionManagerFactory.js';
import { MongoErrorRateLimiter } from './MongoErrorRateLimiter.js';
-export interface WalStreamReplicatorOptions extends replication.AbstractReplicatorOptions {
+export interface ChangeStreamReplicatorOptions extends replication.AbstractReplicatorOptions {
connectionFactory: ConnectionManagerFactory;
}
export class ChangeStreamReplicator extends replication.AbstractReplicator {
private readonly connectionFactory: ConnectionManagerFactory;
- constructor(options: WalStreamReplicatorOptions) {
+ constructor(options: ChangeStreamReplicatorOptions) {
super(options);
this.connectionFactory = options.connectionFactory;
}
diff --git a/modules/module-mongodb/src/replication/replication-utils.ts b/modules/module-mongodb/src/replication/replication-utils.ts
new file mode 100644
index 000000000..71a521d77
--- /dev/null
+++ b/modules/module-mongodb/src/replication/replication-utils.ts
@@ -0,0 +1,12 @@
+import * as mongo from 'mongodb';
+import { MongoManager } from './MongoManager.js';
+
+export async function checkSourceConfiguration(connectionManager: MongoManager): Promise {
+ const db = connectionManager.db;
+ const hello = await db.command({ hello: 1 });
+ if (hello.msg == 'isdbgrid') {
+ throw new Error('Sharded MongoDB Clusters are not supported yet (including MongoDB Serverless instances).');
+ } else if (hello.setName == null) {
+ throw new Error('Standalone MongoDB instances are not supported - use a replicaset.');
+ }
+}
diff --git a/modules/module-mongodb/src/types/types.ts b/modules/module-mongodb/src/types/types.ts
index 572a8b4dd..ac7873876 100644
--- a/modules/module-mongodb/src/types/types.ts
+++ b/modules/module-mongodb/src/types/types.ts
@@ -15,7 +15,7 @@ export interface NormalizedMongoConnectionConfig {
password?: string;
}
-export const MongoConnectionConfig = service_types.configFile.dataSourceConfig.and(
+export const MongoConnectionConfig = service_types.configFile.DataSourceConfig.and(
t.object({
type: t.literal(MONGO_CONNECTION_TYPE),
/** Unique identifier for the connection - optional when a single connection is present. */
diff --git a/modules/module-mongodb/test/src/change_stream.test.ts b/modules/module-mongodb/test/src/change_stream.test.ts
index f950e4f35..2b5c949d4 100644
--- a/modules/module-mongodb/test/src/change_stream.test.ts
+++ b/modules/module-mongodb/test/src/change_stream.test.ts
@@ -3,7 +3,7 @@ import { MONGO_STORAGE_FACTORY } from '@core-tests/util.js';
import { BucketStorageFactory } from '@powersync/service-core';
import * as crypto from 'crypto';
import { describe, expect, test } from 'vitest';
-import { walStreamTest } from './change_stream_utils.js';
+import { changeStreamTest } from './change_stream_utils.js';
import * as mongo from 'mongodb';
import { setTimeout } from 'node:timers/promises';
@@ -19,15 +19,15 @@ bucket_definitions:
describe(
'change stream - mongodb',
function () {
- defineWalStreamTests(MONGO_STORAGE_FACTORY);
+ defineChangeStreamTests(MONGO_STORAGE_FACTORY);
},
{ timeout: 20_000 }
);
-function defineWalStreamTests(factory: StorageFactory) {
+function defineChangeStreamTests(factory: StorageFactory) {
test(
'replicating basic values',
- walStreamTest(factory, async (context) => {
+ changeStreamTest(factory, async (context) => {
const { db } = context;
await context.updateSyncRules(`
bucket_definitions:
@@ -66,7 +66,7 @@ bucket_definitions:
test(
'no fullDocument available',
- walStreamTest(factory, async (context) => {
+ changeStreamTest(factory, async (context) => {
const { db, client } = context;
await context.updateSyncRules(`
bucket_definitions:
@@ -111,7 +111,7 @@ bucket_definitions:
test(
'replicating case sensitive table',
- walStreamTest(factory, async (context) => {
+ changeStreamTest(factory, async (context) => {
const { db } = context;
await context.updateSyncRules(`
bucket_definitions:
@@ -136,7 +136,7 @@ bucket_definitions:
test(
'replicating large values',
- walStreamTest(factory, async (context) => {
+ changeStreamTest(factory, async (context) => {
const { db } = context;
await context.updateSyncRules(`
bucket_definitions:
@@ -168,7 +168,7 @@ bucket_definitions:
test(
'replicating dropCollection',
- walStreamTest(factory, async (context) => {
+ changeStreamTest(factory, async (context) => {
const { db } = context;
const syncRuleContent = `
bucket_definitions:
@@ -200,7 +200,7 @@ bucket_definitions:
test(
'replicating renameCollection',
- walStreamTest(factory, async (context) => {
+ changeStreamTest(factory, async (context) => {
const { db } = context;
const syncRuleContent = `
bucket_definitions:
@@ -232,7 +232,7 @@ bucket_definitions:
test(
'initial sync',
- walStreamTest(factory, async (context) => {
+ changeStreamTest(factory, async (context) => {
const { db } = context;
await context.updateSyncRules(BASIC_SYNC_RULES);
@@ -251,7 +251,7 @@ bucket_definitions:
// Not correctly implemented yet
test.skip(
'large record',
- walStreamTest(factory, async (context) => {
+ changeStreamTest(factory, async (context) => {
await context.updateSyncRules(`bucket_definitions:
global:
data:
@@ -287,7 +287,7 @@ bucket_definitions:
test(
'table not in sync rules',
- walStreamTest(factory, async (context) => {
+ changeStreamTest(factory, async (context) => {
const { db } = context;
await context.updateSyncRules(BASIC_SYNC_RULES);
diff --git a/modules/module-mongodb/test/src/change_stream_utils.ts b/modules/module-mongodb/test/src/change_stream_utils.ts
index 19148695e..e533c56ca 100644
--- a/modules/module-mongodb/test/src/change_stream_utils.ts
+++ b/modules/module-mongodb/test/src/change_stream_utils.ts
@@ -8,12 +8,12 @@ import * as mongo from 'mongodb';
import { createCheckpoint } from '@module/replication/MongoRelation.js';
/**
- * Tests operating on the wal stream need to configure the stream and manage asynchronous
+ * Tests operating on the mongo change stream need to configure the stream and manage asynchronous
* replication, which gets a little tricky.
*
* This wraps a test in a function that configures all the context, and tears it down afterwards.
*/
-export function walStreamTest(
+export function changeStreamTest(
factory: () => Promise,
test: (context: ChangeStreamTestContext) => Promise
): () => Promise {
diff --git a/modules/module-mysql/LICENSE b/modules/module-mysql/LICENSE
new file mode 100644
index 000000000..c8efd46cc
--- /dev/null
+++ b/modules/module-mysql/LICENSE
@@ -0,0 +1,67 @@
+# Functional Source License, Version 1.1, Apache 2.0 Future License
+
+## Abbreviation
+
+FSL-1.1-Apache-2.0
+
+## Notice
+
+Copyright 2023-2024 Journey Mobile, Inc.
+
+## Terms and Conditions
+
+### Licensor ("We")
+
+The party offering the Software under these Terms and Conditions.
+
+### The Software
+
+The "Software" is each version of the software that we make available under these Terms and Conditions, as indicated by our inclusion of these Terms and Conditions with the Software.
+
+### License Grant
+
+Subject to your compliance with this License Grant and the Patents, Redistribution and Trademark clauses below, we hereby grant you the right to use, copy, modify, create derivative works, publicly perform, publicly display and redistribute the Software for any Permitted Purpose identified below.
+
+### Permitted Purpose
+
+A Permitted Purpose is any purpose other than a Competing Use. A Competing Use means making the Software available to others in a commercial product or service that:
+
+1. substitutes for the Software;
+2. substitutes for any other product or service we offer using the Software that exists as of the date we make the Software available; or
+3. offers the same or substantially similar functionality as the Software.
+
+Permitted Purposes specifically include using the Software:
+
+1. for your internal use and access;
+2. for non-commercial education;
+3. for non-commercial research; and
+4. in connection with professional services that you provide to a licensee using the Software in accordance with these Terms and Conditions.
+
+### Patents
+
+To the extent your use for a Permitted Purpose would necessarily infringe our patents, the license grant above includes a license under our patents. If you make a claim against any party that the Software infringes or contributes to the infringement of any patent, then your patent license to the Software ends immediately.
+
+### Redistribution
+
+The Terms and Conditions apply to all copies, modifications and derivatives of the Software.
+If you redistribute any copies, modifications or derivatives of the Software, you must include a copy of or a link to these Terms and Conditions and not remove any copyright notices provided in or with the Software.
+
+### Disclaimer
+
+THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTIES OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION WARRANTIES OF FITNESS FOR A PARTICULAR PURPOSE, MERCHANTABILITY, TITLE OR NON-INFRINGEMENT.
+IN NO EVENT WILL WE HAVE ANY LIABILITY TO YOU ARISING OUT OF OR RELATED TO THE SOFTWARE, INCLUDING INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES, EVEN IF WE HAVE BEEN INFORMED OF THEIR POSSIBILITY IN ADVANCE.
+
+### Trademarks
+
+Except for displaying the License Details and identifying us as the origin of the Software, you have no right under these Terms and Conditions to use our trademarks, trade names, service marks or product names.
+
+## Grant of Future License
+
+We hereby irrevocably grant you an additional license to use the Software under the Apache License, Version 2.0 that is effective on the second anniversary of the date we make the Software available. On or after that date, you may use the Software under the Apache License, Version 2.0, in which case the following will apply:
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
diff --git a/modules/module-mysql/README.md b/modules/module-mysql/README.md
new file mode 100644
index 000000000..93b33d14c
--- /dev/null
+++ b/modules/module-mysql/README.md
@@ -0,0 +1,3 @@
+# PowerSync MySQL Module
+
+This is a module which provides MySQL replication to PowerSync.
diff --git a/modules/module-mysql/dev/.env.template b/modules/module-mysql/dev/.env.template
new file mode 100644
index 000000000..d82ac9668
--- /dev/null
+++ b/modules/module-mysql/dev/.env.template
@@ -0,0 +1,2 @@
+PS_MONGO_URI=mongodb://mongo:27017/powersync_demo
+PS_PORT=8080
\ No newline at end of file
diff --git a/modules/module-mysql/dev/README.md b/modules/module-mysql/dev/README.md
new file mode 100644
index 000000000..fe62ef533
--- /dev/null
+++ b/modules/module-mysql/dev/README.md
@@ -0,0 +1,9 @@
+# MySQL Development Helpers
+
+This folder contains some helpers for developing with MySQL.
+
+- `./.env.template` contains basic settings to be applied to a root `.env` file
+- `./config` contains YAML configuration files for a MySQL todo list application
+- `./docker/mysql` contains a docker compose file for starting Mysql
+
+TODO this does not contain any auth or backend functionality.
diff --git a/modules/module-mysql/dev/config/sync_rules.yaml b/modules/module-mysql/dev/config/sync_rules.yaml
new file mode 100644
index 000000000..eb74a7198
--- /dev/null
+++ b/modules/module-mysql/dev/config/sync_rules.yaml
@@ -0,0 +1,12 @@
+# See Documentation for more information:
+# https://docs.powersync.com/usage/sync-rules
+# Note that changes to this file are not watched.
+# The service needs to be restarted for changes to take effect.
+
+# Note that specifying the schema is currently required due to the default
+# schema being specified as `public`, but in mysql the schema is the database name
+bucket_definitions:
+ global:
+ data:
+ - SELECT * FROM mydatabase.lists
+ - SELECT * FROM mydatabase.todos
diff --git a/modules/module-mysql/dev/docker/mysql/docker-compose.yaml b/modules/module-mysql/dev/docker/mysql/docker-compose.yaml
new file mode 100644
index 000000000..50dfd2d2b
--- /dev/null
+++ b/modules/module-mysql/dev/docker/mysql/docker-compose.yaml
@@ -0,0 +1,17 @@
+services:
+ mysql:
+ image: mysql:8.0
+ environment:
+ MYSQL_ROOT_PASSWORD: root_password
+ MYSQL_DATABASE: mydatabase
+ MYSQL_USER: myuser
+ MYSQL_PASSWORD: mypassword
+ ports:
+ - '3306:3306'
+ volumes:
+ - ./init-scripts/my.cnf:/etc/mysql/my.cnf
+ - ./init-scripts/mysql.sql:/docker-entrypoint-initdb.d/init_user.sql
+ - mysql_data:/var/lib/mysql
+
+volumes:
+ mysql_data:
diff --git a/modules/module-mysql/dev/docker/mysql/init-scripts/my.cnf b/modules/module-mysql/dev/docker/mysql/init-scripts/my.cnf
new file mode 100644
index 000000000..99f01c70a
--- /dev/null
+++ b/modules/module-mysql/dev/docker/mysql/init-scripts/my.cnf
@@ -0,0 +1,9 @@
+[mysqld]
+gtid_mode = ON
+enforce-gtid-consistency = ON
+# Row format required for ZongJi
+binlog_format = row
+log_bin=mysql-bin
+server-id=1
+binlog-do-db=mydatabase
+replicate-do-table=mydatabase.lists
\ No newline at end of file
diff --git a/modules/module-mysql/dev/docker/mysql/init-scripts/mysql.sql b/modules/module-mysql/dev/docker/mysql/init-scripts/mysql.sql
new file mode 100644
index 000000000..8e5cb3538
--- /dev/null
+++ b/modules/module-mysql/dev/docker/mysql/init-scripts/mysql.sql
@@ -0,0 +1,38 @@
+-- Create a user with necessary privileges
+CREATE USER 'repl_user'@'%' IDENTIFIED BY 'good_password';
+
+-- Grant replication client privilege
+GRANT REPLICATION SLAVE, REPLICATION CLIENT, RELOAD ON *.* TO 'repl_user'@'%';
+GRANT REPLICATION SLAVE, REPLICATION CLIENT, RELOAD ON *.* TO 'myuser'@'%';
+
+-- Grant access to the specific database
+GRANT ALL PRIVILEGES ON mydatabase.* TO 'repl_user'@'%';
+
+-- Apply changes
+FLUSH PRIVILEGES;
+
+CREATE TABLE lists (
+ id CHAR(36) NOT NULL DEFAULT (UUID()), -- String UUID (36 characters)
+ created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ name TEXT NOT NULL,
+ owner_id CHAR(36) NOT NULL,
+ PRIMARY KEY (id)
+);
+
+CREATE TABLE todos (
+ id CHAR(36) NOT NULL DEFAULT (UUID()), -- String UUID (36 characters)
+ created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ completed_at TIMESTAMP NULL,
+ description TEXT NOT NULL,
+ completed BOOLEAN NOT NULL DEFAULT FALSE,
+ created_by CHAR(36) NULL,
+ completed_by CHAR(36) NULL,
+ list_id CHAR(36) NOT NULL,
+ PRIMARY KEY (id),
+ FOREIGN KEY (list_id) REFERENCES lists (id) ON DELETE CASCADE
+);
+
+-- TODO fix case where no data is present
+INSERT INTO lists (id, name, owner_id)
+VALUES
+ (UUID(), 'Do a demo', UUID());
\ No newline at end of file
diff --git a/modules/module-mysql/package.json b/modules/module-mysql/package.json
new file mode 100644
index 000000000..4d16d74a8
--- /dev/null
+++ b/modules/module-mysql/package.json
@@ -0,0 +1,51 @@
+{
+ "name": "@powersync/service-module-mysql",
+ "repository": "https://github.com/powersync-ja/powersync-service",
+ "types": "dist/index.d.ts",
+ "version": "0.0.1",
+ "license": "FSL-1.1-Apache-2.0",
+ "main": "dist/index.js",
+ "type": "module",
+ "publishConfig": {
+ "access": "public"
+ },
+ "scripts": {
+ "build": "tsc -b",
+ "build:tests": "tsc -b test/tsconfig.json",
+ "clean": "rm -rf ./dist && tsc -b --clean",
+ "test": "vitest"
+ },
+ "exports": {
+ ".": {
+ "import": "./dist/index.js",
+ "require": "./dist/index.js",
+ "default": "./dist/index.js"
+ },
+ "./types": {
+ "import": "./dist/types/types.js",
+ "require": "./dist/types/types.js",
+ "default": "./dist/types/types.js"
+ }
+ },
+ "dependencies": {
+ "@powersync/lib-services-framework": "workspace:*",
+ "@powersync/service-core": "workspace:*",
+ "@powersync/service-sync-rules": "workspace:*",
+ "@powersync/service-types": "workspace:*",
+ "@powersync/mysql-zongji": "0.0.0-dev-20241023144335",
+ "semver": "^7.5.4",
+ "async": "^3.2.4",
+ "mysql2": "^3.11.0",
+ "ts-codec": "^1.2.2",
+ "uri-js": "^4.4.1",
+ "uuid": "^9.0.1"
+ },
+ "devDependencies": {
+ "@types/semver": "^7.5.4",
+ "@types/async": "^3.2.24",
+ "@types/uuid": "^9.0.4",
+ "typescript": "^5.5.4",
+ "vite-tsconfig-paths": "^4.3.2",
+ "vitest": "^2.1.1"
+ }
+}
diff --git a/modules/module-mysql/src/api/MySQLRouteAPIAdapter.ts b/modules/module-mysql/src/api/MySQLRouteAPIAdapter.ts
new file mode 100644
index 000000000..eb9c272af
--- /dev/null
+++ b/modules/module-mysql/src/api/MySQLRouteAPIAdapter.ts
@@ -0,0 +1,357 @@
+import { api, ParseSyncRulesOptions, storage } from '@powersync/service-core';
+
+import * as sync_rules from '@powersync/service-sync-rules';
+import * as service_types from '@powersync/service-types';
+import mysql from 'mysql2/promise';
+import * as common from '../common/common-index.js';
+import * as mysql_utils from '../utils/mysql_utils.js';
+import * as types from '../types/types.js';
+import { toExpressionTypeFromMySQLType } from '../common/common-index.js';
+
+type SchemaResult = {
+ schema_name: string;
+ table_name: string;
+ columns: Array<{ data_type: string; column_name: string }>;
+};
+
+export class MySQLRouteAPIAdapter implements api.RouteAPI {
+ protected pool: mysql.Pool;
+
+ constructor(protected config: types.ResolvedConnectionConfig) {
+ this.pool = mysql_utils.createPool(config).promise();
+ }
+
+ async shutdown(): Promise {
+ return this.pool.end();
+ }
+
+ async getSourceConfig(): Promise {
+ return this.config;
+ }
+
+ getParseSyncRulesOptions(): ParseSyncRulesOptions {
+ return {
+ // In MySQL Schema and Database are the same thing. There is no default database
+ defaultSchema: this.config.database
+ };
+ }
+
+ async getConnectionStatus(): Promise {
+ const base = {
+ id: this.config.id,
+ uri: `mysql://${this.config.hostname}:${this.config.port}/${this.config.database}`
+ };
+ try {
+ await this.retriedQuery({
+ query: `SELECT 'PowerSync connection test'`
+ });
+ } catch (e) {
+ return {
+ ...base,
+ connected: false,
+ errors: [{ level: 'fatal', message: `${e.code} - message: ${e.message}` }]
+ };
+ }
+ const connection = await this.pool.getConnection();
+ try {
+ const errors = await common.checkSourceConfiguration(connection);
+ if (errors.length) {
+ return {
+ ...base,
+ connected: true,
+ errors: errors.map((e) => ({ level: 'fatal', message: e }))
+ };
+ }
+ } catch (e) {
+ return {
+ ...base,
+ connected: true,
+ errors: [{ level: 'fatal', message: e.message }]
+ };
+ } finally {
+ connection.release();
+ }
+ return {
+ ...base,
+ connected: true,
+ errors: []
+ };
+ }
+
+ async executeQuery(query: string, params: any[]): Promise {
+ if (!this.config.debug_api) {
+ return service_types.internal_routes.ExecuteSqlResponse.encode({
+ results: {
+ columns: [],
+ rows: []
+ },
+ success: false,
+ error: 'SQL querying is not enabled'
+ });
+ }
+ try {
+ const [results, fields] = await this.pool.query(query, params);
+ return service_types.internal_routes.ExecuteSqlResponse.encode({
+ success: true,
+ results: {
+ columns: fields.map((c) => c.name),
+ rows: results.map((row) => {
+ /**
+ * Row will be in the format:
+ * @rows: [ { test: 2 } ]
+ */
+ return fields.map((c) => {
+ const value = row[c.name];
+ const sqlValue = sync_rules.toSyncRulesValue(value);
+ if (typeof sqlValue == 'bigint') {
+ return Number(value);
+ } else if (value instanceof Date) {
+ return value.toISOString();
+ } else if (sync_rules.isJsonValue(sqlValue)) {
+ return sqlValue;
+ } else {
+ return null;
+ }
+ });
+ })
+ }
+ });
+ } catch (e) {
+ return service_types.internal_routes.ExecuteSqlResponse.encode({
+ results: {
+ columns: [],
+ rows: []
+ },
+ success: false,
+ error: e.message
+ });
+ }
+ }
+
+ async getDebugTablesInfo(
+ tablePatterns: sync_rules.TablePattern[],
+ sqlSyncRules: sync_rules.SqlSyncRules
+ ): Promise {
+ let result: api.PatternResult[] = [];
+
+ for (let tablePattern of tablePatterns) {
+ const schema = tablePattern.schema;
+ let patternResult: api.PatternResult = {
+ schema: schema,
+ pattern: tablePattern.tablePattern,
+ wildcard: tablePattern.isWildcard
+ };
+ result.push(patternResult);
+
+ if (tablePattern.isWildcard) {
+ patternResult.tables = [];
+ const prefix = tablePattern.tablePrefix;
+
+ const [results] = await this.pool.query(
+ `SELECT
+ TABLE_NAME AS table_name
+ FROM
+ INFORMATION_SCHEMA.TABLES
+ WHERE
+ TABLE_SCHEMA = ?
+ AND TABLE_NAME LIKE ?`,
+ [schema, tablePattern.tablePattern]
+ );
+
+ for (let row of results) {
+ const name = row.table_name as string;
+
+ if (!name.startsWith(prefix)) {
+ continue;
+ }
+
+ const details = await this.getDebugTableInfo(tablePattern, name, sqlSyncRules);
+ patternResult.tables.push(details);
+ }
+ } else {
+ const [results] = await this.pool.query(
+ `SELECT
+ TABLE_NAME AS table_name
+ FROM
+ INFORMATION_SCHEMA.TABLES
+ WHERE
+ TABLE_SCHEMA = ?
+ AND TABLE_NAME = ?`,
+ [tablePattern.schema, tablePattern.tablePattern]
+ );
+
+ if (results.length == 0) {
+ // Table not found
+ patternResult.table = await this.getDebugTableInfo(tablePattern, tablePattern.name, sqlSyncRules);
+ } else {
+ const row = results[0];
+ patternResult.table = await this.getDebugTableInfo(tablePattern, row.table_name, sqlSyncRules);
+ }
+ }
+ }
+
+ return result;
+ }
+
+ protected async getDebugTableInfo(
+ tablePattern: sync_rules.TablePattern,
+ tableName: string,
+ syncRules: sync_rules.SqlSyncRules
+ ): Promise {
+ const { schema } = tablePattern;
+
+ let idColumnsResult: common.ReplicationIdentityColumnsResult | null = null;
+ let idColumnsError: service_types.ReplicationError | null = null;
+ let connection: mysql.PoolConnection | null = null;
+ try {
+ connection = await this.pool.getConnection();
+ idColumnsResult = await common.getReplicationIdentityColumns({
+ connection: connection,
+ schema,
+ table_name: tableName
+ });
+ } catch (ex) {
+ idColumnsError = { level: 'fatal', message: ex.message };
+ } finally {
+ connection?.release();
+ }
+
+ const idColumns = idColumnsResult?.columns ?? [];
+ const sourceTable = new storage.SourceTable(0, this.config.tag, tableName, schema, tableName, idColumns, true);
+ const syncData = syncRules.tableSyncsData(sourceTable);
+ const syncParameters = syncRules.tableSyncsParameters(sourceTable);
+
+ if (idColumns.length == 0 && idColumnsError == null) {
+ let message = `No replication id found for ${sourceTable.qualifiedName}. Replica identity: ${idColumnsResult?.identity}.`;
+ if (idColumnsResult?.identity == 'default') {
+ message += ' Configure a primary key on the table.';
+ }
+ idColumnsError = { level: 'fatal', message };
+ }
+
+ let selectError: service_types.ReplicationError | null = null;
+ try {
+ await this.retriedQuery({
+ query: `SELECT * FROM ${sourceTable.table} LIMIT 1`
+ });
+ } catch (e) {
+ selectError = { level: 'fatal', message: e.message };
+ }
+
+ return {
+ schema: schema,
+ name: tableName,
+ pattern: tablePattern.isWildcard ? tablePattern.tablePattern : undefined,
+ replication_id: idColumns.map((c) => c.name),
+ data_queries: syncData,
+ parameter_queries: syncParameters,
+ errors: [idColumnsError, selectError].filter((error) => error != null) as service_types.ReplicationError[]
+ };
+ }
+
+ async getReplicationLag(options: api.ReplicationLagOptions): Promise {
+ const { bucketStorage } = options;
+ const lastCheckpoint = await bucketStorage.getCheckpoint();
+
+ const current = lastCheckpoint.lsn
+ ? common.ReplicatedGTID.fromSerialized(lastCheckpoint.lsn)
+ : common.ReplicatedGTID.ZERO;
+
+ const connection = await this.pool.getConnection();
+ const head = await common.readExecutedGtid(connection);
+ const lag = await current.distanceTo(connection, head);
+ connection.release();
+ if (lag == null) {
+ throw new Error(`Could not determine replication lag`);
+ }
+
+ return lag;
+ }
+
+ async getReplicationHead(): Promise {
+ const connection = await this.pool.getConnection();
+ const result = await common.readExecutedGtid(connection);
+ connection.release();
+ return result.comparable;
+ }
+
+ async getConnectionSchema(): Promise {
+ const [results] = await this.retriedQuery({
+ query: `
+ SELECT
+ tbl.schema_name,
+ tbl.table_name,
+ tbl.quoted_name,
+ JSON_ARRAYAGG(JSON_OBJECT('column_name', a.column_name, 'data_type', a.data_type)) AS columns
+ FROM
+ (
+ SELECT
+ TABLE_SCHEMA AS schema_name,
+ TABLE_NAME AS table_name,
+ CONCAT('\`', TABLE_SCHEMA, '\`.\`', TABLE_NAME, '\`') AS quoted_name
+ FROM
+ INFORMATION_SCHEMA.TABLES
+ WHERE
+ TABLE_TYPE = 'BASE TABLE'
+ AND TABLE_SCHEMA NOT IN ('information_schema', 'mysql', 'performance_schema', 'sys')
+ ) AS tbl
+ LEFT JOIN
+ (
+ SELECT
+ TABLE_SCHEMA AS schema_name,
+ TABLE_NAME AS table_name,
+ COLUMN_NAME AS column_name,
+ COLUMN_TYPE AS data_type
+ FROM
+ INFORMATION_SCHEMA.COLUMNS
+ ) AS a
+ ON
+ tbl.schema_name = a.schema_name
+ AND tbl.table_name = a.table_name
+ GROUP BY
+ tbl.schema_name, tbl.table_name, tbl.quoted_name;
+ `
+ });
+
+ /**
+ * Reduces the SQL results into a Record of {@link DatabaseSchema}
+ * then returns the values as an array.
+ */
+
+ return Object.values(
+ (results as SchemaResult[]).reduce((hash: Record, result) => {
+ const schema =
+ hash[result.schema_name] ||
+ (hash[result.schema_name] = {
+ name: result.schema_name,
+ tables: []
+ });
+
+ schema.tables.push({
+ name: result.table_name,
+ columns: result.columns.map((column) => ({
+ name: column.column_name,
+ type: column.data_type,
+ sqlite_type: toExpressionTypeFromMySQLType(column.data_type).typeFlags,
+ internal_type: column.data_type,
+ pg_type: column.data_type
+ }))
+ });
+
+ return hash;
+ }, {})
+ );
+ }
+
+ protected async retriedQuery(options: { query: string; params?: any[] }) {
+ const connection = await this.pool.getConnection();
+
+ return mysql_utils
+ .retriedQuery({
+ connection: connection,
+ query: options.query,
+ params: options.params
+ })
+ .finally(() => connection.release());
+ }
+}
diff --git a/modules/module-mysql/src/common/ReplicatedGTID.ts b/modules/module-mysql/src/common/ReplicatedGTID.ts
new file mode 100644
index 000000000..7d28b0d3a
--- /dev/null
+++ b/modules/module-mysql/src/common/ReplicatedGTID.ts
@@ -0,0 +1,158 @@
+import mysql from 'mysql2/promise';
+import * as uuid from 'uuid';
+import * as mysql_utils from '../utils/mysql_utils.js';
+
+export type BinLogPosition = {
+ filename: string;
+ offset: number;
+};
+
+export type ReplicatedGTIDSpecification = {
+ raw_gtid: string;
+ /**
+ * The (end) position in a BinLog file where this transaction has been replicated in.
+ */
+ position: BinLogPosition;
+};
+
+export type BinLogGTIDFormat = {
+ server_id: Buffer;
+ transaction_range: number;
+};
+
+export type BinLogGTIDEvent = {
+ raw_gtid: BinLogGTIDFormat;
+ position: BinLogPosition;
+};
+
+/**
+ * A wrapper around the MySQL GTID value.
+ * This adds and tracks additional metadata such as the BinLog filename
+ * and position where this GTID could be located.
+ */
+export class ReplicatedGTID {
+ static fromSerialized(comparable: string): ReplicatedGTID {
+ return new ReplicatedGTID(ReplicatedGTID.deserialize(comparable));
+ }
+
+ private static deserialize(comparable: string): ReplicatedGTIDSpecification {
+ const components = comparable.split('|');
+ if (components.length < 3) {
+ throw new Error(`Invalid serialized GTID: ${comparable}`);
+ }
+
+ return {
+ raw_gtid: components[1],
+ position: {
+ filename: components[2],
+ offset: parseInt(components[3])
+ } satisfies BinLogPosition
+ };
+ }
+
+ static fromBinLogEvent(event: BinLogGTIDEvent) {
+ const { raw_gtid, position } = event;
+ const stringGTID = `${uuid.stringify(raw_gtid.server_id)}:${raw_gtid.transaction_range}`;
+ return new ReplicatedGTID({
+ raw_gtid: stringGTID,
+ position
+ });
+ }
+
+ /**
+ * Special case for the zero GTID which means no transactions have been executed.
+ */
+ static ZERO = new ReplicatedGTID({ raw_gtid: '0:0', position: { filename: '', offset: 0 } });
+
+ constructor(protected options: ReplicatedGTIDSpecification) {}
+
+ /**
+ * Get the BinLog position of this replicated GTID event
+ */
+ get position() {
+ return this.options.position;
+ }
+
+ /**
+ * Get the raw Global Transaction ID. This of the format `server_id:transaction_ranges`
+ */
+ get raw() {
+ return this.options.raw_gtid;
+ }
+
+ get serverId() {
+ return this.options.raw_gtid.split(':')[0];
+ }
+
+ /**
+ * Transforms a GTID into a comparable string format, ensuring lexicographical
+ * order aligns with the GTID's relative age. This assumes that all GTIDs
+ * have the same server ID.
+ *
+ * @returns A comparable string in the format
+ * `padded_end_transaction|raw_gtid|binlog_filename|binlog_position`
+ */
+ get comparable() {
+ const { raw, position } = this;
+ const [, transactionRanges] = this.raw.split(':');
+
+ let maxTransactionId = 0;
+
+ for (const range of transactionRanges.split(',')) {
+ const [start, end] = range.split('-');
+ maxTransactionId = Math.max(maxTransactionId, parseInt(start, 10), parseInt(end || start, 10));
+ }
+
+ const paddedTransactionId = maxTransactionId.toString().padStart(16, '0');
+ return [paddedTransactionId, raw, position.filename, position.offset].join('|');
+ }
+
+ toString() {
+ return this.comparable;
+ }
+
+ /**
+ * Calculates the distance in bytes from this GTID to the provided argument.
+ */
+ async distanceTo(connection: mysql.Connection, to: ReplicatedGTID): Promise {
+ const [logFiles] = await mysql_utils.retriedQuery({
+ connection,
+ query: `SHOW BINARY LOGS;`
+ });
+
+ // Default to the first file for the start to handle the zero GTID case.
+ const startFileIndex = Math.max(
+ logFiles.findIndex((f) => f['Log_name'] == this.position.filename),
+ 0
+ );
+ const startFileEntry = logFiles[startFileIndex];
+
+ if (!startFileEntry) {
+ return null;
+ }
+
+ /**
+ * Fall back to the next position for comparison if the replicated position is not present
+ */
+ const endPosition = to.position;
+
+ // Default to the past the last file to cater for the HEAD case
+ const testEndFileIndex = logFiles.findIndex((f) => f['Log_name'] == endPosition?.filename);
+ // If the endPosition is not defined and found. Fallback to the last file as the end
+ const endFileIndex = testEndFileIndex < 0 && !endPosition ? logFiles.length : logFiles.length - 1;
+
+ const endFileEntry = logFiles[endFileIndex];
+
+ if (!endFileEntry) {
+ return null;
+ }
+
+ return (
+ startFileEntry['File_size'] -
+ this.position.offset -
+ endFileEntry['File_size'] +
+ endPosition.offset +
+ logFiles.slice(startFileIndex + 1, endFileIndex).reduce((sum, file) => sum + file['File_size'], 0)
+ );
+ }
+}
diff --git a/modules/module-mysql/src/common/check-source-configuration.ts b/modules/module-mysql/src/common/check-source-configuration.ts
new file mode 100644
index 000000000..6572a1066
--- /dev/null
+++ b/modules/module-mysql/src/common/check-source-configuration.ts
@@ -0,0 +1,59 @@
+import mysqlPromise from 'mysql2/promise';
+import * as mysql_utils from '../utils/mysql_utils.js';
+
+export async function checkSourceConfiguration(connection: mysqlPromise.Connection): Promise {
+ const errors: string[] = [];
+ const [[result]] = await mysql_utils.retriedQuery({
+ connection,
+ query: `
+ SELECT
+ @@GLOBAL.gtid_mode AS gtid_mode,
+ @@GLOBAL.log_bin AS log_bin,
+ @@GLOBAL.server_id AS server_id,
+ @@GLOBAL.log_bin_basename AS binlog_file,
+ @@GLOBAL.log_bin_index AS binlog_index_file
+ `
+ });
+
+ if (result.gtid_mode != 'ON') {
+ errors.push(`GTID is not enabled, it is currently set to ${result.gtid_mode}. Please enable it.`);
+ }
+
+ if (result.log_bin != 1) {
+ errors.push('Binary logging is not enabled. Please enable it.');
+ }
+
+ if (result.server_id < 0) {
+ errors.push(
+ `Your Server ID setting is too low, it must be greater than 0. It is currently ${result.server_id}. Please correct your configuration.`
+ );
+ }
+
+ if (!result.binlog_file) {
+ errors.push('Binary log file is not set. Please check your settings.');
+ }
+
+ if (!result.binlog_index_file) {
+ errors.push('Binary log index file is not set. Please check your settings.');
+ }
+
+ const [[binLogFormatResult]] = await mysql_utils.retriedQuery({
+ connection,
+ query: `SHOW VARIABLES LIKE 'binlog_format';`
+ });
+
+ if (binLogFormatResult.Value !== 'ROW') {
+ errors.push('Binary log format must be set to "ROW". Please correct your configuration');
+ }
+
+ return errors;
+}
+
+export async function getMySQLVersion(connection: mysqlPromise.Connection): Promise {
+ const [[versionResult]] = await mysql_utils.retriedQuery({
+ connection,
+ query: `SELECT VERSION() as version`
+ });
+
+ return versionResult.version as string;
+}
diff --git a/modules/module-mysql/src/common/common-index.ts b/modules/module-mysql/src/common/common-index.ts
new file mode 100644
index 000000000..6da005718
--- /dev/null
+++ b/modules/module-mysql/src/common/common-index.ts
@@ -0,0 +1,6 @@
+export * from './check-source-configuration.js';
+export * from './get-replication-columns.js';
+export * from './get-tables-from-pattern.js';
+export * from './mysql-to-sqlite.js';
+export * from './read-executed-gtid.js';
+export * from './ReplicatedGTID.js';
diff --git a/modules/module-mysql/src/common/get-replication-columns.ts b/modules/module-mysql/src/common/get-replication-columns.ts
new file mode 100644
index 000000000..19d5ca059
--- /dev/null
+++ b/modules/module-mysql/src/common/get-replication-columns.ts
@@ -0,0 +1,124 @@
+import { storage } from '@powersync/service-core';
+import mysqlPromise from 'mysql2/promise';
+import * as mysql_utils from '../utils/mysql_utils.js';
+
+export type GetReplicationColumnsOptions = {
+ connection: mysqlPromise.Connection;
+ schema: string;
+ table_name: string;
+};
+
+export type ReplicationIdentityColumnsResult = {
+ columns: storage.ColumnDescriptor[];
+ // TODO maybe export an enum from the core package
+ identity: string;
+};
+
+export async function getReplicationIdentityColumns(
+ options: GetReplicationColumnsOptions
+): Promise {
+ const { connection, schema, table_name } = options;
+ const [primaryKeyColumns] = await mysql_utils.retriedQuery({
+ connection: connection,
+ query: `
+ SELECT
+ s.COLUMN_NAME AS name,
+ c.DATA_TYPE AS type
+ FROM
+ INFORMATION_SCHEMA.STATISTICS s
+ JOIN
+ INFORMATION_SCHEMA.COLUMNS c
+ ON
+ s.TABLE_SCHEMA = c.TABLE_SCHEMA
+ AND s.TABLE_NAME = c.TABLE_NAME
+ AND s.COLUMN_NAME = c.COLUMN_NAME
+ WHERE
+ s.TABLE_SCHEMA = ?
+ AND s.TABLE_NAME = ?
+ AND s.INDEX_NAME = 'PRIMARY'
+ ORDER BY
+ s.SEQ_IN_INDEX;
+ `,
+ params: [schema, table_name]
+ });
+
+ if (primaryKeyColumns.length) {
+ return {
+ columns: primaryKeyColumns.map((row) => ({
+ name: row.name,
+ type: row.type
+ })),
+ identity: 'default'
+ };
+ }
+
+ // TODO: test code with tables with unique keys, compound key etc.
+ // No primary key, find the first valid unique key
+ const [uniqueKeyColumns] = await mysql_utils.retriedQuery({
+ connection: connection,
+ query: `
+ SELECT
+ s.INDEX_NAME,
+ s.COLUMN_NAME,
+ c.DATA_TYPE,
+ s.NON_UNIQUE,
+ s.NULLABLE
+ FROM
+ INFORMATION_SCHEMA.STATISTICS s
+ JOIN
+ INFORMATION_SCHEMA.COLUMNS c
+ ON
+ s.TABLE_SCHEMA = c.TABLE_SCHEMA
+ AND s.TABLE_NAME = c.TABLE_NAME
+ AND s.COLUMN_NAME = c.COLUMN_NAME
+ WHERE
+ s.TABLE_SCHEMA = ?
+ AND s.TABLE_NAME = ?
+ AND s.INDEX_NAME != 'PRIMARY'
+ AND s.NON_UNIQUE = 0
+ ORDER BY s.SEQ_IN_INDEX;
+ `,
+ params: [schema, table_name]
+ });
+
+ if (uniqueKeyColumns.length > 0) {
+ return {
+ columns: uniqueKeyColumns.map((col) => ({
+ name: col.COLUMN_NAME,
+ type: col.DATA_TYPE
+ })),
+ identity: 'index'
+ };
+ }
+
+ const [allColumns] = await mysql_utils.retriedQuery({
+ connection: connection,
+ query: `
+ SELECT
+ s.COLUMN_NAME AS name,
+ c.DATA_TYPE as type
+ FROM
+ INFORMATION_SCHEMA.COLUMNS s
+ JOIN
+ INFORMATION_SCHEMA.COLUMNS c
+ ON
+ s.TABLE_SCHEMA = c.TABLE_SCHEMA
+ AND s.TABLE_NAME = c.TABLE_NAME
+ AND s.COLUMN_NAME = c.COLUMN_NAME
+ WHERE
+ s.TABLE_SCHEMA = ?
+ AND s.TABLE_NAME = ?
+ ORDER BY
+ s.ORDINAL_POSITION;
+ `,
+ params: [schema, table_name]
+ });
+
+ return {
+ columns: allColumns.map((row) => ({
+ name: row.name,
+ type: row.type
+ })),
+ identity: 'full'
+ };
+}
diff --git a/modules/module-mysql/src/common/get-tables-from-pattern.ts b/modules/module-mysql/src/common/get-tables-from-pattern.ts
new file mode 100644
index 000000000..166bf93a0
--- /dev/null
+++ b/modules/module-mysql/src/common/get-tables-from-pattern.ts
@@ -0,0 +1,44 @@
+import * as sync_rules from '@powersync/service-sync-rules';
+import mysql from 'mysql2/promise';
+
+export type GetDebugTablesInfoOptions = {
+ connection: mysql.Connection;
+ tablePattern: sync_rules.TablePattern;
+};
+
+export async function getTablesFromPattern(options: GetDebugTablesInfoOptions): Promise> {
+ const { connection, tablePattern } = options;
+ const schema = tablePattern.schema;
+
+ if (tablePattern.isWildcard) {
+ const [results] = await connection.query(
+ `SELECT
+ TABLE_NAME AS table_name
+ FROM
+ INFORMATION_SCHEMA.TABLES
+ WHERE
+ TABLE_SCHEMA = ?
+ AND TABLE_NAME LIKE ?`,
+ [schema, tablePattern.tablePattern]
+ );
+
+ return new Set(
+ results
+ .filter((result) => result.table_name.startsWith(tablePattern.tablePrefix))
+ .map((result) => result.table_name)
+ );
+ } else {
+ const [[match]] = await connection.query(
+ `SELECT
+ TABLE_NAME AS table_name
+ FROM
+ INFORMATION_SCHEMA.TABLES
+ WHERE
+ TABLE_SCHEMA = ?
+ AND TABLE_NAME = ?`,
+ [tablePattern.schema, tablePattern.tablePattern]
+ );
+ // Only return the first result
+ return new Set([match.table_name]);
+ }
+}
diff --git a/modules/module-mysql/src/common/mysql-to-sqlite.ts b/modules/module-mysql/src/common/mysql-to-sqlite.ts
new file mode 100644
index 000000000..29fbfe0fc
--- /dev/null
+++ b/modules/module-mysql/src/common/mysql-to-sqlite.ts
@@ -0,0 +1,67 @@
+import * as sync_rules from '@powersync/service-sync-rules';
+import { ExpressionType } from '@powersync/service-sync-rules';
+import { ColumnDescriptor } from '@powersync/service-core';
+import mysql from 'mysql2';
+
+export function toSQLiteRow(row: Record, columns?: Map): sync_rules.SqliteRow {
+ for (let key in row) {
+ if (row[key] instanceof Date) {
+ const column = columns?.get(key);
+ if (column?.typeId == mysql.Types.DATE) {
+ // Only parse the date part
+ row[key] = row[key].toISOString().split('T')[0];
+ } else {
+ row[key] = row[key].toISOString();
+ }
+ }
+ }
+ return sync_rules.toSyncRulesRow(row);
+}
+
+export function toExpressionTypeFromMySQLType(mysqlType: string | undefined): ExpressionType {
+ if (!mysqlType) {
+ return ExpressionType.TEXT;
+ }
+
+ const upperCaseType = mysqlType.toUpperCase();
+ // Handle type with parameters like VARCHAR(255), DECIMAL(10,2), etc.
+ const baseType = upperCaseType.split('(')[0];
+
+ switch (baseType) {
+ case 'BIT':
+ case 'BOOL':
+ case 'BOOLEAN':
+ case 'TINYINT':
+ case 'SMALLINT':
+ case 'MEDIUMINT':
+ case 'INT':
+ case 'INTEGER':
+ case 'BIGINT':
+ case 'UNSIGNED BIGINT':
+ return ExpressionType.INTEGER;
+ case 'BINARY':
+ case 'VARBINARY':
+ case 'TINYBLOB':
+ case 'MEDIUMBLOB':
+ case 'LONGBLOB':
+ case 'BLOB':
+ case 'GEOMETRY':
+ case 'POINT':
+ case 'LINESTRING':
+ case 'POLYGON':
+ case 'MULTIPOINT':
+ case 'MULTILINESTRING':
+ case 'MULTIPOLYGON':
+ case 'GEOMETRYCOLLECTION':
+ return ExpressionType.BLOB;
+ case 'FLOAT':
+ case 'DOUBLE':
+ case 'REAL':
+ return ExpressionType.REAL;
+ case 'JSON':
+ return ExpressionType.TEXT;
+ default:
+ // In addition to the normal text types, includes: DECIMAL, NUMERIC, DATE, TIME, DATETIME, TIMESTAMP, YEAR, ENUM, SET
+ return ExpressionType.TEXT;
+ }
+}
diff --git a/modules/module-mysql/src/common/read-executed-gtid.ts b/modules/module-mysql/src/common/read-executed-gtid.ts
new file mode 100644
index 000000000..7f224c5b9
--- /dev/null
+++ b/modules/module-mysql/src/common/read-executed-gtid.ts
@@ -0,0 +1,55 @@
+import mysqlPromise from 'mysql2/promise';
+import * as mysql_utils from '../utils/mysql_utils.js';
+import { gte } from 'semver';
+
+import { ReplicatedGTID } from './ReplicatedGTID.js';
+import { getMySQLVersion } from './check-source-configuration.js';
+import { logger } from '@powersync/lib-services-framework';
+
+/**
+ * Gets the current master HEAD GTID
+ */
+export async function readExecutedGtid(connection: mysqlPromise.Connection): Promise {
+ const version = await getMySQLVersion(connection);
+ let binlogStatus: mysqlPromise.RowDataPacket;
+ if (gte(version, '8.4.0')) {
+ // Get the BinLog status
+ const [[binLogResult]] = await mysql_utils.retriedQuery({
+ connection,
+ query: `SHOW BINARY LOG STATUS`
+ });
+ binlogStatus = binLogResult;
+ } else {
+ // TODO Check if this works for version 5.7
+ // Get the BinLog status
+ const [[binLogResult]] = await mysql_utils.retriedQuery({
+ connection,
+ query: `SHOW MASTER STATUS`
+ });
+ binlogStatus = binLogResult;
+ }
+ const position = {
+ filename: binlogStatus.File,
+ offset: parseInt(binlogStatus.Position)
+ };
+
+ logger.info('Succesfully read executed GTID', { position });
+
+ return new ReplicatedGTID({
+ // The head always points to the next position to start replication from
+ position,
+ raw_gtid: binlogStatus.Executed_Gtid_Set
+ });
+}
+
+export async function isBinlogStillAvailable(
+ connection: mysqlPromise.Connection,
+ binlogFile: string
+): Promise {
+ const [logFiles] = await mysql_utils.retriedQuery({
+ connection,
+ query: `SHOW BINARY LOGS;`
+ });
+
+ return logFiles.some((f) => f['Log_name'] == binlogFile);
+}
diff --git a/modules/module-mysql/src/index.ts b/modules/module-mysql/src/index.ts
new file mode 100644
index 000000000..3abe77fc5
--- /dev/null
+++ b/modules/module-mysql/src/index.ts
@@ -0,0 +1 @@
+export * from './module/MySQLModule.js';
diff --git a/modules/module-mysql/src/module/MySQLModule.ts b/modules/module-mysql/src/module/MySQLModule.ts
new file mode 100644
index 000000000..a4ab36bae
--- /dev/null
+++ b/modules/module-mysql/src/module/MySQLModule.ts
@@ -0,0 +1,71 @@
+import { api, ConfigurationFileSyncRulesProvider, replication, system, TearDownOptions } from '@powersync/service-core';
+
+import { MySQLRouteAPIAdapter } from '../api/MySQLRouteAPIAdapter.js';
+import { BinLogReplicator } from '../replication/BinLogReplicator.js';
+import { MySQLErrorRateLimiter } from '../replication/MySQLErrorRateLimiter.js';
+import * as types from '../types/types.js';
+import { MySQLConnectionManagerFactory } from '../replication/MySQLConnectionManagerFactory.js';
+import { MySQLConnectionConfig } from '../types/types.js';
+import { checkSourceConfiguration } from '../common/check-source-configuration.js';
+import { MySQLConnectionManager } from '../replication/MySQLConnectionManager.js';
+
+export class MySQLModule extends replication.ReplicationModule {
+ constructor() {
+ super({
+ name: 'MySQL',
+ type: types.MYSQL_CONNECTION_TYPE,
+ configSchema: types.MySQLConnectionConfig
+ });
+ }
+
+ async initialize(context: system.ServiceContextContainer): Promise {
+ await super.initialize(context);
+ }
+
+ protected createRouteAPIAdapter(): api.RouteAPI {
+ return new MySQLRouteAPIAdapter(this.resolveConfig(this.decodedConfig!));
+ }
+
+ protected createReplicator(context: system.ServiceContext): replication.AbstractReplicator {
+ const normalisedConfig = this.resolveConfig(this.decodedConfig!);
+ const syncRuleProvider = new ConfigurationFileSyncRulesProvider(context.configuration.sync_rules);
+ const connectionFactory = new MySQLConnectionManagerFactory(normalisedConfig);
+
+ return new BinLogReplicator({
+ id: this.getDefaultId(normalisedConfig.database),
+ syncRuleProvider: syncRuleProvider,
+ storageEngine: context.storageEngine,
+ connectionFactory: connectionFactory,
+ rateLimiter: new MySQLErrorRateLimiter()
+ });
+ }
+
+ /**
+ * Combines base config with normalized connection settings
+ */
+ private resolveConfig(config: types.MySQLConnectionConfig): types.ResolvedConnectionConfig {
+ return {
+ ...config,
+ ...types.normalizeConnectionConfig(config)
+ };
+ }
+
+ async teardown(options: TearDownOptions): Promise {
+ // No specific teardown required for MySQL
+ }
+
+ async testConnection(config: MySQLConnectionConfig): Promise {
+ this.decodeConfig(config);
+ const normalisedConfig = this.resolveConfig(this.decodedConfig!);
+ const connectionManager = new MySQLConnectionManager(normalisedConfig, {});
+ const connection = await connectionManager.getConnection();
+ try {
+ const errors = await checkSourceConfiguration(connection);
+ if (errors.length > 0) {
+ throw new Error(errors.join('\n'));
+ }
+ } finally {
+ await connectionManager.end();
+ }
+ }
+}
diff --git a/modules/module-mysql/src/replication/BinLogReplicationJob.ts b/modules/module-mysql/src/replication/BinLogReplicationJob.ts
new file mode 100644
index 000000000..aa1a838b2
--- /dev/null
+++ b/modules/module-mysql/src/replication/BinLogReplicationJob.ts
@@ -0,0 +1,94 @@
+import { container } from '@powersync/lib-services-framework';
+import { replication } from '@powersync/service-core';
+import { BinlogConfigurationError, BinLogStream } from './BinLogStream.js';
+import { MySQLConnectionManagerFactory } from './MySQLConnectionManagerFactory.js';
+
+export interface BinLogReplicationJobOptions extends replication.AbstractReplicationJobOptions {
+ connectionFactory: MySQLConnectionManagerFactory;
+}
+
+export class BinLogReplicationJob extends replication.AbstractReplicationJob {
+ private connectionFactory: MySQLConnectionManagerFactory;
+
+ constructor(options: BinLogReplicationJobOptions) {
+ super(options);
+ this.connectionFactory = options.connectionFactory;
+ }
+
+ get slot_name() {
+ return this.options.storage.slot_name;
+ }
+
+ async keepAlive() {}
+
+ async replicate() {
+ try {
+ await this.replicateLoop();
+ } catch (e) {
+ // Fatal exception
+ container.reporter.captureException(e, {
+ metadata: {
+ replication_slot: this.slot_name
+ }
+ });
+ this.logger.error(`Replication failed on ${this.slot_name}`, e);
+ } finally {
+ this.abortController.abort();
+ }
+ }
+
+ async replicateLoop() {
+ while (!this.isStopped) {
+ await this.replicateOnce();
+
+ if (!this.isStopped) {
+ await new Promise((resolve) => setTimeout(resolve, 5000));
+ }
+ }
+ }
+
+ async replicateOnce() {
+ // New connections on every iteration (every error with retry),
+ // otherwise we risk repeating errors related to the connection,
+ // such as caused by cached PG schemas.
+ const connectionManager = this.connectionFactory.create({
+ // Pool connections are only used intermittently.
+ idleTimeout: 30_000
+ });
+ try {
+ await this.rateLimiter?.waitUntilAllowed({ signal: this.abortController.signal });
+ if (this.isStopped) {
+ return;
+ }
+ const stream = new BinLogStream({
+ abortSignal: this.abortController.signal,
+ storage: this.options.storage,
+ connections: connectionManager
+ });
+ await stream.replicate();
+ } catch (e) {
+ if (this.abortController.signal.aborted) {
+ return;
+ }
+ this.logger.error(`Replication error`, e);
+ if (e.cause != null) {
+ this.logger.error(`cause`, e.cause);
+ }
+
+ if (e instanceof BinlogConfigurationError) {
+ throw e;
+ } else {
+ // Report the error if relevant, before retrying
+ container.reporter.captureException(e, {
+ metadata: {
+ replication_slot: this.slot_name
+ }
+ });
+ // This sets the retry delay
+ this.rateLimiter?.reportError(e);
+ }
+ } finally {
+ await connectionManager.end();
+ }
+ }
+}
diff --git a/modules/module-mysql/src/replication/BinLogReplicator.ts b/modules/module-mysql/src/replication/BinLogReplicator.ts
new file mode 100644
index 000000000..ca07f4a0a
--- /dev/null
+++ b/modules/module-mysql/src/replication/BinLogReplicator.ts
@@ -0,0 +1,35 @@
+import { replication, storage } from '@powersync/service-core';
+import { BinLogReplicationJob } from './BinLogReplicationJob.js';
+import { MySQLConnectionManagerFactory } from './MySQLConnectionManagerFactory.js';
+
+export interface BinLogReplicatorOptions extends replication.AbstractReplicatorOptions {
+ connectionFactory: MySQLConnectionManagerFactory;
+}
+
+export class BinLogReplicator extends replication.AbstractReplicator {
+ private readonly connectionFactory: MySQLConnectionManagerFactory;
+
+ constructor(options: BinLogReplicatorOptions) {
+ super(options);
+ this.connectionFactory = options.connectionFactory;
+ }
+
+ createJob(options: replication.CreateJobOptions): BinLogReplicationJob {
+ return new BinLogReplicationJob({
+ id: this.createJobId(options.storage.group_id),
+ storage: options.storage,
+ lock: options.lock,
+ connectionFactory: this.connectionFactory,
+ rateLimiter: this.rateLimiter
+ });
+ }
+
+ async cleanUp(syncRulesStorage: storage.SyncRulesBucketStorage): Promise {
+ // The MySQL module does not create anything which requires cleanup on the MySQL server.
+ }
+
+ async stop(): Promise {
+ await super.stop();
+ await this.connectionFactory.shutdown();
+ }
+}
diff --git a/modules/module-mysql/src/replication/BinLogStream.ts b/modules/module-mysql/src/replication/BinLogStream.ts
new file mode 100644
index 000000000..ad805bf99
--- /dev/null
+++ b/modules/module-mysql/src/replication/BinLogStream.ts
@@ -0,0 +1,593 @@
+import { logger } from '@powersync/lib-services-framework';
+import * as sync_rules from '@powersync/service-sync-rules';
+import async from 'async';
+
+import { ColumnDescriptor, framework, getUuidReplicaIdentityBson, Metrics, storage } from '@powersync/service-core';
+import mysql, { FieldPacket } from 'mysql2';
+
+import { BinLogEvent, StartOptions, TableMapEntry } from '@powersync/mysql-zongji';
+import * as common from '../common/common-index.js';
+import * as zongji_utils from './zongji/zongji-utils.js';
+import { MySQLConnectionManager } from './MySQLConnectionManager.js';
+import { isBinlogStillAvailable, ReplicatedGTID } from '../common/common-index.js';
+import mysqlPromise from 'mysql2/promise';
+import { MySQLTypesMap } from '../utils/mysql_utils.js';
+
+export interface BinLogStreamOptions {
+ connections: MySQLConnectionManager;
+ storage: storage.SyncRulesBucketStorage;
+ abortSignal: AbortSignal;
+}
+
+interface MysqlRelId {
+ schema: string;
+ name: string;
+}
+
+interface WriteChangePayload {
+ type: storage.SaveOperationTag;
+ data: Data;
+ previous_data?: Data;
+ database: string;
+ table: string;
+ sourceTable: storage.SourceTable;
+ columns: Map;
+}
+
+export type Data = Record;
+
+export class BinlogConfigurationError extends Error {
+ constructor(message: string) {
+ super(message);
+ }
+}
+
+/**
+ * MySQL does not have same relation structure. Just returning unique key as string.
+ * @param source
+ */
+function getMysqlRelId(source: MysqlRelId): string {
+ return `${source.schema}.${source.name}`;
+}
+
+export class BinLogStream {
+ private readonly syncRules: sync_rules.SqlSyncRules;
+ private readonly groupId: number;
+
+ private readonly storage: storage.SyncRulesBucketStorage;
+
+ private readonly connections: MySQLConnectionManager;
+
+ private abortSignal: AbortSignal;
+
+ private tableCache = new Map();
+
+ constructor(protected options: BinLogStreamOptions) {
+ this.storage = options.storage;
+ this.connections = options.connections;
+ this.syncRules = options.storage.getParsedSyncRules({ defaultSchema: this.defaultSchema });
+ this.groupId = options.storage.group_id;
+ this.abortSignal = options.abortSignal;
+ }
+
+ get connectionTag() {
+ return this.connections.connectionTag;
+ }
+
+ get connectionId() {
+ // Default to 1 if not set
+ return this.connections.connectionId ? Number.parseInt(this.connections.connectionId) : 1;
+ }
+
+ get stopped() {
+ return this.abortSignal.aborted;
+ }
+
+ get defaultSchema() {
+ return this.connections.databaseName;
+ }
+
+ async handleRelation(batch: storage.BucketStorageBatch, entity: storage.SourceEntityDescriptor, snapshot: boolean) {
+ const result = await this.storage.resolveTable({
+ group_id: this.groupId,
+ connection_id: this.connectionId,
+ connection_tag: this.connectionTag,
+ entity_descriptor: entity,
+ sync_rules: this.syncRules
+ });
+ this.tableCache.set(entity.objectId, result.table);
+
+ // Drop conflicting tables. This includes for example renamed tables.
+ await batch.drop(result.dropTables);
+
+ // Snapshot if:
+ // 1. Snapshot is requested (false for initial snapshot, since that process handles it elsewhere)
+ // 2. Snapshot is not already done, AND:
+ // 3. The table is used in sync rules.
+ const shouldSnapshot = snapshot && !result.table.snapshotComplete && result.table.syncAny;
+
+ if (shouldSnapshot) {
+ // Truncate this table, in case a previous snapshot was interrupted.
+ await batch.truncate([result.table]);
+
+ let gtid: common.ReplicatedGTID;
+ // Start the snapshot inside a transaction.
+ // We use a dedicated connection for this.
+ const connection = await this.connections.getStreamingConnection();
+ const promiseConnection = (connection as mysql.Connection).promise();
+ try {
+ await promiseConnection.query('BEGIN');
+ try {
+ gtid = await common.readExecutedGtid(promiseConnection);
+ await this.snapshotTable(connection.connection, batch, result.table);
+ await promiseConnection.query('COMMIT');
+ } catch (e) {
+ await promiseConnection.query('ROLLBACK');
+ throw e;
+ }
+ } finally {
+ connection.release();
+ }
+ const [table] = await batch.markSnapshotDone([result.table], gtid.comparable);
+ return table;
+ }
+
+ return result.table;
+ }
+
+ async getQualifiedTableNames(
+ batch: storage.BucketStorageBatch,
+ tablePattern: sync_rules.TablePattern
+ ): Promise {
+ if (tablePattern.connectionTag != this.connectionTag) {
+ return [];
+ }
+
+ let tableRows: any[];
+ const prefix = tablePattern.isWildcard ? tablePattern.tablePrefix : undefined;
+ if (tablePattern.isWildcard) {
+ const result = await this.connections.query(
+ `SELECT TABLE_NAME
+FROM information_schema.tables
+WHERE TABLE_SCHEMA = ? AND TABLE_NAME LIKE ?;
+`,
+ [tablePattern.schema, tablePattern.tablePattern]
+ );
+ tableRows = result[0];
+ } else {
+ const result = await this.connections.query(
+ `SELECT TABLE_NAME
+FROM information_schema.tables
+WHERE TABLE_SCHEMA = ? AND TABLE_NAME = ?;
+`,
+ [tablePattern.schema, tablePattern.tablePattern]
+ );
+ tableRows = result[0];
+ }
+ let tables: storage.SourceTable[] = [];
+
+ for (let row of tableRows) {
+ const name = row['TABLE_NAME'] as string;
+ if (prefix && !name.startsWith(prefix)) {
+ continue;
+ }
+
+ const result = await this.connections.query(
+ `SELECT 1
+FROM information_schema.tables
+WHERE table_schema = ? AND table_name = ?
+AND table_type = 'BASE TABLE';`,
+ [tablePattern.schema, tablePattern.name]
+ );
+ if (result[0].length == 0) {
+ logger.info(`Skipping ${tablePattern.schema}.${name} - no table exists/is not a base table`);
+ continue;
+ }
+
+ const connection = await this.connections.getConnection();
+ const replicationColumns = await common.getReplicationIdentityColumns({
+ connection: connection,
+ schema: tablePattern.schema,
+ table_name: tablePattern.name
+ });
+ connection.release();
+
+ const table = await this.handleRelation(
+ batch,
+ {
+ name,
+ schema: tablePattern.schema,
+ objectId: getMysqlRelId(tablePattern),
+ replicationColumns: replicationColumns.columns
+ },
+ false
+ );
+
+ tables.push(table);
+ }
+ return tables;
+ }
+
+ /**
+ * Checks if the initial sync has been completed yet.
+ */
+ protected async checkInitialReplicated(): Promise {
+ const status = await this.storage.getStatus();
+ const lastKnowGTID = status.checkpoint_lsn ? common.ReplicatedGTID.fromSerialized(status.checkpoint_lsn) : null;
+ if (status.snapshot_done && status.checkpoint_lsn) {
+ logger.info(`Initial replication already done.`);
+
+ if (lastKnowGTID) {
+ // Check if the binlog is still available. If it isn't we need to snapshot again.
+ const connection = await this.connections.getConnection();
+ try {
+ return await isBinlogStillAvailable(connection, lastKnowGTID.position.filename);
+ } finally {
+ connection.release();
+ }
+ }
+
+ return true;
+ }
+
+ return false;
+ }
+
+ /**
+ * Does the initial replication of the database tables.
+ *
+ * If (partial) replication was done before on this slot, this clears the state
+ * and starts again from scratch.
+ */
+ async startInitialReplication() {
+ await this.storage.clear();
+ // Replication will be performed in a single transaction on this connection
+ const connection = await this.connections.getStreamingConnection();
+ const promiseConnection = (connection as mysql.Connection).promise();
+ const headGTID = await common.readExecutedGtid(promiseConnection);
+ logger.info(`Using snapshot checkpoint GTID:: '${headGTID}'`);
+ try {
+ logger.info(`Starting initial replication`);
+ await promiseConnection.query(
+ 'SET TRANSACTION ISOLATION LEVEL REPEATABLE READ, READ ONLY'
+ );
+ await promiseConnection.query('START TRANSACTION');
+ const sourceTables = this.syncRules.getSourceTables();
+ await this.storage.startBatch(
+ { zeroLSN: ReplicatedGTID.ZERO.comparable, defaultSchema: this.defaultSchema },
+ async (batch) => {
+ for (let tablePattern of sourceTables) {
+ const tables = await this.getQualifiedTableNames(batch, tablePattern);
+ for (let table of tables) {
+ await this.snapshotTable(connection as mysql.Connection, batch, table);
+ await batch.markSnapshotDone([table], headGTID.comparable);
+ await framework.container.probes.touch();
+ }
+ }
+ await batch.commit(headGTID.comparable);
+ }
+ );
+ logger.info(`Initial replication done`);
+ await promiseConnection.query('COMMIT');
+ } catch (e) {
+ await promiseConnection.query('ROLLBACK');
+ throw e;
+ } finally {
+ connection.release();
+ }
+ }
+
+ private async snapshotTable(
+ connection: mysql.Connection,
+ batch: storage.BucketStorageBatch,
+ table: storage.SourceTable
+ ) {
+ logger.info(`Replicating ${table.qualifiedName}`);
+ // TODO count rows and log progress at certain batch sizes
+
+ const columns = new Map();
+ return new Promise((resolve, reject) => {
+ // MAX_EXECUTION_TIME(0) hint disables execution timeout for this query
+ connection
+ .query(`SELECT /*+ MAX_EXECUTION_TIME(0) */ * FROM ${table.schema}.${table.table}`)
+ .on('error', (err) => {
+ reject(err);
+ })
+ .on('fields', (fields: FieldPacket[]) => {
+ // Map the columns and their types
+ fields.forEach((field) => {
+ const columnType = MySQLTypesMap[field.type as number];
+ columns.set(field.name, { name: field.name, type: columnType, typeId: field.type });
+ });
+ })
+ .on('result', async (row) => {
+ connection.pause();
+ const record = common.toSQLiteRow(row, columns);
+
+ await batch.save({
+ tag: storage.SaveOperationTag.INSERT,
+ sourceTable: table,
+ before: undefined,
+ beforeReplicaId: undefined,
+ after: record,
+ afterReplicaId: getUuidReplicaIdentityBson(record, table.replicaIdColumns)
+ });
+ connection.resume();
+ Metrics.getInstance().rows_replicated_total.add(1);
+ })
+ .on('end', async function () {
+ await batch.flush();
+ resolve();
+ });
+ });
+ }
+
+ async replicate() {
+ try {
+ // If anything errors here, the entire replication process is halted, and
+ // all connections automatically closed, including this one.
+ await this.initReplication();
+ await this.streamChanges();
+ logger.info('BinlogStream has been shut down');
+ } catch (e) {
+ await this.storage.reportError(e);
+ throw e;
+ }
+ }
+
+ async initReplication() {
+ const connection = await this.connections.getConnection();
+ const errors = await common.checkSourceConfiguration(connection);
+ connection.release();
+
+ if (errors.length > 0) {
+ throw new BinlogConfigurationError(`Binlog Configuration Errors: ${errors.join(', ')}`);
+ }
+
+ const initialReplicationCompleted = await this.checkInitialReplicated();
+ if (!initialReplicationCompleted) {
+ await this.startInitialReplication();
+ }
+ }
+
+ private getTable(tableId: string): storage.SourceTable {
+ const table = this.tableCache.get(tableId);
+ if (table == null) {
+ // We should always receive a replication message before the relation is used.
+ // If we can't find it, it's a bug.
+ throw new Error(`Missing relation cache for ${tableId}`);
+ }
+ return table;
+ }
+
+ async streamChanges() {
+ // Auto-activate as soon as initial replication is done
+ await this.storage.autoActivate();
+
+ const connection = await this.connections.getConnection();
+ const { checkpoint_lsn } = await this.storage.getStatus();
+ logger.info(`Last known LSN from storage: ${checkpoint_lsn}`);
+
+ const fromGTID = checkpoint_lsn
+ ? common.ReplicatedGTID.fromSerialized(checkpoint_lsn)
+ : await common.readExecutedGtid(connection);
+ const binLogPositionState = fromGTID.position;
+ connection.release();
+
+ if (!this.stopped) {
+ await this.storage.startBatch(
+ { zeroLSN: ReplicatedGTID.ZERO.comparable, defaultSchema: this.defaultSchema },
+ async (batch) => {
+ const zongji = this.connections.createBinlogListener();
+
+ let currentGTID: common.ReplicatedGTID | null = null;
+
+ const queue = async.queue(async (evt: BinLogEvent) => {
+ // State machine
+ switch (true) {
+ case zongji_utils.eventIsGTIDLog(evt):
+ currentGTID = common.ReplicatedGTID.fromBinLogEvent({
+ raw_gtid: {
+ server_id: evt.serverId,
+ transaction_range: evt.transactionRange
+ },
+ position: {
+ filename: binLogPositionState.filename,
+ offset: evt.nextPosition
+ }
+ });
+ break;
+ case zongji_utils.eventIsRotation(evt):
+ // Update the position
+ binLogPositionState.filename = evt.binlogName;
+ binLogPositionState.offset = evt.position;
+ break;
+ case zongji_utils.eventIsWriteMutation(evt):
+ const writeTableInfo = evt.tableMap[evt.tableId];
+ await this.writeChanges(batch, {
+ type: storage.SaveOperationTag.INSERT,
+ data: evt.rows,
+ tableEntry: writeTableInfo
+ });
+ break;
+ case zongji_utils.eventIsUpdateMutation(evt):
+ const updateTableInfo = evt.tableMap[evt.tableId];
+ await this.writeChanges(batch, {
+ type: storage.SaveOperationTag.UPDATE,
+ data: evt.rows.map((row) => row.after),
+ previous_data: evt.rows.map((row) => row.before),
+ tableEntry: updateTableInfo
+ });
+ break;
+ case zongji_utils.eventIsDeleteMutation(evt):
+ const deleteTableInfo = evt.tableMap[evt.tableId];
+ await this.writeChanges(batch, {
+ type: storage.SaveOperationTag.DELETE,
+ data: evt.rows,
+ tableEntry: deleteTableInfo
+ });
+ break;
+ case zongji_utils.eventIsXid(evt):
+ Metrics.getInstance().transactions_replicated_total.add(1);
+ // Need to commit with a replicated GTID with updated next position
+ await batch.commit(
+ new common.ReplicatedGTID({
+ raw_gtid: currentGTID!.raw,
+ position: {
+ filename: binLogPositionState.filename,
+ offset: evt.nextPosition
+ }
+ }).comparable
+ );
+ currentGTID = null;
+ // chunks_replicated_total.add(1);
+ break;
+ }
+ }, 1);
+
+ zongji.on('binlog', (evt: BinLogEvent) => {
+ if (!this.stopped) {
+ logger.info(`Pushing Binlog event ${evt.getEventName()}`);
+ queue.push(evt);
+ } else {
+ logger.info(`Replication is busy stopping, ignoring event ${evt.getEventName()}`);
+ }
+ });
+
+ if (this.stopped) {
+ // Powersync is shutting down, don't start replicating
+ return;
+ }
+ // Only listen for changes to tables in the sync rules
+ const includedTables = [...this.tableCache.values()].map((table) => table.table);
+ logger.info(`Starting replication from ${binLogPositionState.filename}:${binLogPositionState.offset}`);
+ zongji.start({
+ includeEvents: ['tablemap', 'writerows', 'updaterows', 'deleterows', 'xid', 'rotate', 'gtidlog'],
+ excludeEvents: [],
+ includeSchema: { [this.defaultSchema]: includedTables },
+ filename: binLogPositionState.filename,
+ position: binLogPositionState.offset,
+ serverId: this.storage.group_id
+ } satisfies StartOptions);
+
+ // Forever young
+ await new Promise((resolve, reject) => {
+ zongji.on('error', (error) => {
+ logger.error('Error on Binlog listener:', error);
+ zongji.stop();
+ queue.kill();
+ reject(error);
+ });
+
+ zongji.on('stopped', () => {
+ logger.info('Binlog listener stopped. Replication ended.');
+ resolve();
+ });
+
+ queue.error((error) => {
+ logger.error('Binlog listener queue error:', error);
+ zongji.stop();
+ queue.kill();
+ reject(error);
+ });
+
+ this.abortSignal.addEventListener(
+ 'abort',
+ () => {
+ logger.info('Abort signal received, stopping replication...');
+ zongji.stop();
+ queue.kill();
+ resolve();
+ },
+ { once: true }
+ );
+ });
+ }
+ );
+ }
+ }
+
+ private async writeChanges(
+ batch: storage.BucketStorageBatch,
+ msg: {
+ type: storage.SaveOperationTag;
+ data: Data[];
+ previous_data?: Data[];
+ tableEntry: TableMapEntry;
+ }
+ ): Promise {
+ const columns = new Map();
+ msg.tableEntry.columns.forEach((column) => {
+ columns.set(column.name, { name: column.name, typeId: column.type });
+ });
+
+ for (const [index, row] of msg.data.entries()) {
+ await this.writeChange(batch, {
+ type: msg.type,
+ database: msg.tableEntry.parentSchema,
+ sourceTable: this.getTable(
+ getMysqlRelId({
+ schema: msg.tableEntry.parentSchema,
+ name: msg.tableEntry.tableName
+ })
+ ),
+ table: msg.tableEntry.tableName,
+ columns: columns,
+ data: row,
+ previous_data: msg.previous_data?.[index]
+ });
+ }
+ return null;
+ }
+
+ private async writeChange(
+ batch: storage.BucketStorageBatch,
+ payload: WriteChangePayload
+ ): Promise {
+ switch (payload.type) {
+ case storage.SaveOperationTag.INSERT:
+ Metrics.getInstance().rows_replicated_total.add(1);
+ const record = common.toSQLiteRow(payload.data, payload.columns);
+ return await batch.save({
+ tag: storage.SaveOperationTag.INSERT,
+ sourceTable: payload.sourceTable,
+ before: undefined,
+ beforeReplicaId: undefined,
+ after: record,
+ afterReplicaId: getUuidReplicaIdentityBson(record, payload.sourceTable.replicaIdColumns)
+ });
+ case storage.SaveOperationTag.UPDATE:
+ Metrics.getInstance().rows_replicated_total.add(1);
+ // "before" may be null if the replica id columns are unchanged
+ // It's fine to treat that the same as an insert.
+ const beforeUpdated = payload.previous_data ? common.toSQLiteRow(payload.previous_data) : undefined;
+ const after = common.toSQLiteRow(payload.data);
+
+ return await batch.save({
+ tag: storage.SaveOperationTag.UPDATE,
+ sourceTable: payload.sourceTable,
+ before: beforeUpdated,
+ beforeReplicaId: beforeUpdated
+ ? getUuidReplicaIdentityBson(beforeUpdated, payload.sourceTable.replicaIdColumns)
+ : undefined,
+ after: common.toSQLiteRow(payload.data),
+ afterReplicaId: getUuidReplicaIdentityBson(after, payload.sourceTable.replicaIdColumns)
+ });
+
+ case storage.SaveOperationTag.DELETE:
+ Metrics.getInstance().rows_replicated_total.add(1);
+ const beforeDeleted = common.toSQLiteRow(payload.data);
+
+ return await batch.save({
+ tag: storage.SaveOperationTag.DELETE,
+ sourceTable: payload.sourceTable,
+ before: beforeDeleted,
+ beforeReplicaId: getUuidReplicaIdentityBson(beforeDeleted, payload.sourceTable.replicaIdColumns),
+ after: undefined,
+ afterReplicaId: undefined
+ });
+ default:
+ return null;
+ }
+ }
+}
diff --git a/modules/module-mysql/src/replication/MySQLConnectionManager.ts b/modules/module-mysql/src/replication/MySQLConnectionManager.ts
new file mode 100644
index 000000000..b69ddb5ad
--- /dev/null
+++ b/modules/module-mysql/src/replication/MySQLConnectionManager.ts
@@ -0,0 +1,107 @@
+import { NormalizedMySQLConnectionConfig } from '../types/types.js';
+import mysqlPromise from 'mysql2/promise';
+import mysql, { RowDataPacket } from 'mysql2';
+import * as mysql_utils from '../utils/mysql_utils.js';
+import ZongJi from '@powersync/mysql-zongji';
+import { logger } from '@powersync/lib-services-framework';
+
+export class MySQLConnectionManager {
+ /**
+ * Pool that can create streamable connections
+ */
+ private readonly pool: mysql.Pool;
+ /**
+ * Pool that can create promise-based connections
+ */
+ private readonly promisePool: mysqlPromise.Pool;
+
+ private binlogListeners: ZongJi[] = [];
+
+ private isClosed = false;
+
+ constructor(
+ public options: NormalizedMySQLConnectionConfig,
+ public poolOptions: mysqlPromise.PoolOptions
+ ) {
+ // The pool is lazy - no connections are opened until a query is performed.
+ this.pool = mysql_utils.createPool(options, poolOptions);
+ this.promisePool = this.pool.promise();
+ }
+
+ public get connectionTag() {
+ return this.options.tag;
+ }
+
+ public get connectionId() {
+ return this.options.id;
+ }
+
+ public get databaseName() {
+ return this.options.database;
+ }
+
+ /**
+ * Create a new replication listener
+ */
+ createBinlogListener(): ZongJi {
+ const listener = new ZongJi({
+ host: this.options.hostname,
+ user: this.options.username,
+ password: this.options.password
+ });
+
+ this.binlogListeners.push(listener);
+
+ return listener;
+ }
+
+ /**
+ * Run a query using a connection from the pool
+ * A promise with the result is returned
+ * @param query
+ * @param params
+ */
+ async query(query: string, params?: any[]) {
+ return this.promisePool.query(query, params);
+ }
+
+ /**
+ * Get a streamable connection from this manager's pool
+ * The connection should be released when it is no longer needed
+ */
+ async getStreamingConnection(): Promise {
+ return new Promise((resolve, reject) => {
+ this.pool.getConnection((err, connection) => {
+ if (err) {
+ reject(err);
+ } else {
+ resolve(connection);
+ }
+ });
+ });
+ }
+
+ /**
+ * Get a promise connection from this manager's pool
+ * The connection should be released when it is no longer needed
+ */
+ async getConnection(): Promise {
+ return this.promisePool.getConnection();
+ }
+
+ async end(): Promise {
+ if (!this.isClosed) {
+ for (const listener of this.binlogListeners) {
+ listener.stop();
+ }
+
+ try {
+ await this.promisePool.end();
+ this.isClosed = true;
+ } catch (error) {
+ // We don't particularly care if any errors are thrown when shutting down the pool
+ logger.warn('Error shutting down MySQL connection pool', error);
+ }
+ }
+ }
+}
diff --git a/modules/module-mysql/src/replication/MySQLConnectionManagerFactory.ts b/modules/module-mysql/src/replication/MySQLConnectionManagerFactory.ts
new file mode 100644
index 000000000..ea87f60ec
--- /dev/null
+++ b/modules/module-mysql/src/replication/MySQLConnectionManagerFactory.ts
@@ -0,0 +1,28 @@
+import { logger } from '@powersync/lib-services-framework';
+import mysql from 'mysql2/promise';
+import { MySQLConnectionManager } from './MySQLConnectionManager.js';
+import { ResolvedConnectionConfig } from '../types/types.js';
+
+export class MySQLConnectionManagerFactory {
+ private readonly connectionManagers: MySQLConnectionManager[];
+ private readonly connectionConfig: ResolvedConnectionConfig;
+
+ constructor(connectionConfig: ResolvedConnectionConfig) {
+ this.connectionConfig = connectionConfig;
+ this.connectionManagers = [];
+ }
+
+ create(poolOptions: mysql.PoolOptions) {
+ const manager = new MySQLConnectionManager(this.connectionConfig, poolOptions);
+ this.connectionManagers.push(manager);
+ return manager;
+ }
+
+ async shutdown() {
+ logger.info('Shutting down MySQL connection Managers...');
+ for (const manager of this.connectionManagers) {
+ await manager.end();
+ }
+ logger.info('MySQL connection Managers shutdown completed.');
+ }
+}
diff --git a/modules/module-mysql/src/replication/MySQLErrorRateLimiter.ts b/modules/module-mysql/src/replication/MySQLErrorRateLimiter.ts
new file mode 100644
index 000000000..aff96c3ab
--- /dev/null
+++ b/modules/module-mysql/src/replication/MySQLErrorRateLimiter.ts
@@ -0,0 +1,44 @@
+import { ErrorRateLimiter } from '@powersync/service-core';
+import { setTimeout } from 'timers/promises';
+
+export class MySQLErrorRateLimiter implements ErrorRateLimiter {
+ nextAllowed: number = Date.now();
+
+ async waitUntilAllowed(options?: { signal?: AbortSignal | undefined } | undefined): Promise {
+ const delay = Math.max(0, this.nextAllowed - Date.now());
+ // Minimum delay between connections, even without errors
+ this.setDelay(500);
+ await setTimeout(delay, undefined, { signal: options?.signal });
+ }
+
+ mayPing(): boolean {
+ return Date.now() >= this.nextAllowed;
+ }
+
+ reportError(e: any): void {
+ const message = (e.message as string) ?? '';
+ if (message.includes('password authentication failed')) {
+ // Wait 15 minutes, to avoid triggering Supabase's fail2ban
+ this.setDelay(900_000);
+ } else if (message.includes('ENOTFOUND')) {
+ // DNS lookup issue - incorrect URI or deleted instance
+ this.setDelay(120_000);
+ } else if (message.includes('ECONNREFUSED')) {
+ // Could be fail2ban or similar
+ this.setDelay(120_000);
+ } else if (
+ message.includes('Unable to do postgres query on ended pool') ||
+ message.includes('Postgres unexpectedly closed connection')
+ ) {
+ // Connection timed out - ignore / immediately retry
+ // We don't explicitly set the delay to 0, since there could have been another error that
+ // we need to respect.
+ } else {
+ this.setDelay(30_000);
+ }
+ }
+
+ private setDelay(delay: number) {
+ this.nextAllowed = Math.max(this.nextAllowed, Date.now() + delay);
+ }
+}
diff --git a/modules/module-mysql/src/replication/zongji/zongji-utils.ts b/modules/module-mysql/src/replication/zongji/zongji-utils.ts
new file mode 100644
index 000000000..36122b636
--- /dev/null
+++ b/modules/module-mysql/src/replication/zongji/zongji-utils.ts
@@ -0,0 +1,32 @@
+import {
+ BinLogEvent,
+ BinLogGTIDLogEvent,
+ BinLogMutationEvent,
+ BinLogRotationEvent,
+ BinLogUpdateEvent,
+ BinLogXidEvent
+} from '@powersync/mysql-zongji';
+
+export function eventIsGTIDLog(event: BinLogEvent): event is BinLogGTIDLogEvent {
+ return event.getEventName() == 'gtidlog';
+}
+
+export function eventIsXid(event: BinLogEvent): event is BinLogXidEvent {
+ return event.getEventName() == 'xid';
+}
+
+export function eventIsRotation(event: BinLogEvent): event is BinLogRotationEvent {
+ return event.getEventName() == 'rotate';
+}
+
+export function eventIsWriteMutation(event: BinLogEvent): event is BinLogMutationEvent {
+ return event.getEventName() == 'writerows';
+}
+
+export function eventIsDeleteMutation(event: BinLogEvent): event is BinLogMutationEvent {
+ return event.getEventName() == 'deleterows';
+}
+
+export function eventIsUpdateMutation(event: BinLogEvent): event is BinLogUpdateEvent {
+ return event.getEventName() == 'updaterows';
+}
diff --git a/modules/module-mysql/src/replication/zongji/zongji.d.ts b/modules/module-mysql/src/replication/zongji/zongji.d.ts
new file mode 100644
index 000000000..9a17f15e9
--- /dev/null
+++ b/modules/module-mysql/src/replication/zongji/zongji.d.ts
@@ -0,0 +1,119 @@
+declare module '@powersync/mysql-zongji' {
+ export type ZongjiOptions = {
+ host: string;
+ user: string;
+ password: string;
+ dateStrings?: boolean;
+ timeZone?: string;
+ };
+
+ interface DatabaseFilter {
+ [databaseName: string]: string[] | true;
+ }
+
+ export type StartOptions = {
+ includeEvents?: string[];
+ excludeEvents?: string[];
+ /**
+ * Describe which databases and tables to include (Only for row events). Use database names as the key and pass an array of table names or true (for the entire database).
+ * Example: { 'my_database': ['allow_table', 'another_table'], 'another_db': true }
+ */
+ includeSchema?: DatabaseFilter;
+ /**
+ * Object describing which databases and tables to exclude (Same format as includeSchema)
+ * Example: { 'other_db': ['disallowed_table'], 'ex_db': true }
+ */
+ excludeSchema?: DatabaseFilter;
+ /**
+ * BinLog position filename to start reading events from
+ */
+ filename?: string;
+ /**
+ * BinLog position offset to start reading events from in file specified
+ */
+ position?: number;
+
+ /**
+ * Unique server ID for this replication client.
+ */
+ serverId?: number;
+ };
+
+ export type ColumnSchema = {
+ COLUMN_NAME: string;
+ COLLATION_NAME: string;
+ CHARACTER_SET_NAME: string;
+ COLUMN_COMMENT: string;
+ COLUMN_TYPE: string;
+ };
+
+ export type ColumnDefinition = {
+ name: string;
+ charset: string;
+ type: number;
+ metadata: Record;
+ };
+
+ export type TableMapEntry = {
+ columnSchemas: ColumnSchema[];
+ parentSchema: string;
+ tableName: string;
+ columns: ColumnDefinition[];
+ };
+
+ export type BaseBinLogEvent = {
+ timestamp: number;
+ getEventName(): string;
+
+ /**
+ * Next position in BinLog file to read from after
+ * this event.
+ */
+ nextPosition: number;
+ /**
+ * Size of this event
+ */
+ size: number;
+ flags: number;
+ useChecksum: boolean;
+ };
+
+ export type BinLogRotationEvent = BaseBinLogEvent & {
+ binlogName: string;
+ position: number;
+ };
+
+ export type BinLogGTIDLogEvent = BaseBinLogEvent & {
+ serverId: Buffer;
+ transactionRange: number;
+ };
+
+ export type BinLogXidEvent = BaseBinLogEvent & {
+ xid: number;
+ };
+
+ export type BinLogMutationEvent = BaseBinLogEvent & {
+ tableId: number;
+ numberOfColumns: number;
+ tableMap: Record;
+ rows: Record[];
+ };
+
+ export type BinLogUpdateEvent = Omit & {
+ rows: {
+ before: Record;
+ after: Record;
+ }[];
+ };
+
+ export type BinLogEvent = BinLogRotationEvent | BinLogGTIDLogEvent | BinLogXidEvent | BinLogMutationEvent;
+
+ export default class ZongJi {
+ constructor(options: ZongjiOptions);
+
+ start(options: StartOptions): void;
+ stop(): void;
+
+ on(type: 'binlog' | string, callback: (event: BinLogEvent) => void);
+ }
+}
diff --git a/modules/module-mysql/src/types/types.ts b/modules/module-mysql/src/types/types.ts
new file mode 100644
index 000000000..43dd17696
--- /dev/null
+++ b/modules/module-mysql/src/types/types.ts
@@ -0,0 +1,106 @@
+import * as service_types from '@powersync/service-types';
+import * as t from 'ts-codec';
+import * as urijs from 'uri-js';
+
+export const MYSQL_CONNECTION_TYPE = 'mysql' as const;
+
+export interface NormalizedMySQLConnectionConfig {
+ id: string;
+ tag: string;
+
+ hostname: string;
+ port: number;
+ database: string;
+
+ username: string;
+ password: string;
+ server_id: number;
+
+ cacert?: string;
+ client_certificate?: string;
+ client_private_key?: string;
+}
+
+export const MySQLConnectionConfig = service_types.configFile.DataSourceConfig.and(
+ t.object({
+ type: t.literal(MYSQL_CONNECTION_TYPE),
+ uri: t.string.optional(),
+ hostname: t.string.optional(),
+ port: service_types.configFile.portCodec.optional(),
+ username: t.string.optional(),
+ password: t.string.optional(),
+ database: t.string.optional(),
+ server_id: t.number.optional(),
+
+ cacert: t.string.optional(),
+ client_certificate: t.string.optional(),
+ client_private_key: t.string.optional()
+ })
+);
+
+/**
+ * Config input specified when starting services
+ */
+export type MySQLConnectionConfig = t.Decoded;
+
+/**
+ * Resolved version of {@link MySQLConnectionConfig}
+ */
+export type ResolvedConnectionConfig = MySQLConnectionConfig & NormalizedMySQLConnectionConfig;
+
+/**
+ * Validate and normalize connection options.
+ *
+ * Returns destructured options.
+ */
+export function normalizeConnectionConfig(options: MySQLConnectionConfig): NormalizedMySQLConnectionConfig {
+ let uri: urijs.URIComponents;
+ if (options.uri) {
+ uri = urijs.parse(options.uri);
+ if (uri.scheme != 'mysql') {
+ throw new Error(`Invalid URI - protocol must be mysql, got ${uri.scheme}`);
+ }
+ } else {
+ uri = urijs.parse('mysql:///');
+ }
+
+ const hostname = options.hostname ?? uri.host ?? '';
+ const port = Number(options.port ?? uri.port ?? 3306);
+
+ const database = options.database ?? uri.path?.substring(1) ?? '';
+
+ const [uri_username, uri_password] = (uri.userinfo ?? '').split(':');
+
+ const username = options.username ?? uri_username ?? '';
+ const password = options.password ?? uri_password ?? '';
+
+ if (hostname == '') {
+ throw new Error(`hostname required`);
+ }
+
+ if (username == '') {
+ throw new Error(`username required`);
+ }
+
+ if (password == '') {
+ throw new Error(`password required`);
+ }
+
+ if (database == '') {
+ throw new Error(`database required`);
+ }
+
+ return {
+ id: options.id ?? 'default',
+ tag: options.tag ?? 'default',
+
+ hostname,
+ port,
+ database,
+
+ username,
+ password,
+
+ server_id: options.server_id ?? 1
+ };
+}
diff --git a/modules/module-mysql/src/utils/mysql_utils.ts b/modules/module-mysql/src/utils/mysql_utils.ts
new file mode 100644
index 000000000..99741b92a
--- /dev/null
+++ b/modules/module-mysql/src/utils/mysql_utils.ts
@@ -0,0 +1,53 @@
+import { logger } from '@powersync/lib-services-framework';
+import mysql from 'mysql2';
+import mysqlPromise from 'mysql2/promise';
+import * as types from '../types/types.js';
+
+export const MySQLTypesMap: { [key: number]: string } = {};
+for (const [name, code] of Object.entries(mysql.Types)) {
+ MySQLTypesMap[code as number] = name;
+}
+
+export type RetriedQueryOptions = {
+ connection: mysqlPromise.Connection;
+ query: string;
+ params?: any[];
+ retries?: number;
+};
+
+/**
+ * Retry a simple query - up to 2 attempts total.
+ */
+export async function retriedQuery(options: RetriedQueryOptions) {
+ const { connection, query, params = [], retries = 2 } = options;
+ for (let tries = retries; ; tries--) {
+ try {
+ logger.debug(`Executing query: ${query}`);
+ return connection.query(query, params);
+ } catch (e) {
+ if (tries == 1) {
+ throw e;
+ }
+ logger.warn('Query error, retrying', e);
+ }
+ }
+}
+
+export function createPool(config: types.NormalizedMySQLConnectionConfig, options?: mysql.PoolOptions): mysql.Pool {
+ const sslOptions = {
+ ca: config.cacert,
+ key: config.client_private_key,
+ cert: config.client_certificate
+ };
+ const hasSSLOptions = Object.values(sslOptions).some((v) => !!v);
+ return mysql.createPool({
+ host: config.hostname,
+ user: config.username,
+ password: config.password,
+ database: config.database,
+ ssl: hasSSLOptions ? sslOptions : undefined,
+ supportBigNumbers: true,
+ timezone: 'Z', // Ensure no auto timezone manipulation of the dates occur
+ ...(options || {})
+ });
+}
diff --git a/modules/module-mysql/test/src/env.ts b/modules/module-mysql/test/src/env.ts
new file mode 100644
index 000000000..3dad20a22
--- /dev/null
+++ b/modules/module-mysql/test/src/env.ts
@@ -0,0 +1,7 @@
+import { utils } from '@powersync/lib-services-framework';
+
+export const env = utils.collectEnvironmentVariables({
+ MYSQL_TEST_URI: utils.type.string.default('mysql://myuser:mypassword@localhost:3306/mydatabase'),
+ CI: utils.type.boolean.default('false'),
+ SLOW_TESTS: utils.type.boolean.default('false')
+});
diff --git a/modules/module-mysql/test/src/setup.ts b/modules/module-mysql/test/src/setup.ts
new file mode 100644
index 000000000..b924cf736
--- /dev/null
+++ b/modules/module-mysql/test/src/setup.ts
@@ -0,0 +1,7 @@
+import { container } from '@powersync/lib-services-framework';
+import { beforeAll } from 'vitest';
+
+beforeAll(() => {
+ // Executes for every test file
+ container.registerDefaults();
+});
diff --git a/modules/module-mysql/test/src/util.ts b/modules/module-mysql/test/src/util.ts
new file mode 100644
index 000000000..a489e0b1b
--- /dev/null
+++ b/modules/module-mysql/test/src/util.ts
@@ -0,0 +1,62 @@
+import * as types from '@module/types/types.js';
+import { BucketStorageFactory, Metrics, MongoBucketStorage } from '@powersync/service-core';
+import { env } from './env.js';
+import mysqlPromise from 'mysql2/promise';
+import { connectMongo } from '@core-tests/util.js';
+import { getMySQLVersion } from '@module/common/check-source-configuration.js';
+import { gte } from 'semver';
+import { RowDataPacket } from 'mysql2';
+
+// The metrics need to be initialized before they can be used
+await Metrics.initialise({
+ disable_telemetry_sharing: true,
+ powersync_instance_id: 'test',
+ internal_metrics_endpoint: 'unused.for.tests.com'
+});
+Metrics.getInstance().resetCounters();
+
+export const TEST_URI = env.MYSQL_TEST_URI;
+
+export const TEST_CONNECTION_OPTIONS = types.normalizeConnectionConfig({
+ type: 'mysql',
+ uri: TEST_URI
+});
+
+export type StorageFactory = () => Promise;
+
+export const INITIALIZED_MONGO_STORAGE_FACTORY: StorageFactory = async () => {
+ const db = await connectMongo();
+
+ // None of the tests insert data into this collection, so it was never created
+ if (!(await db.db.listCollections({ name: db.bucket_parameters.collectionName }).hasNext())) {
+ await db.db.createCollection('bucket_parameters');
+ }
+
+ await db.clear();
+
+ return new MongoBucketStorage(db, { slot_name_prefix: 'test_' });
+};
+
+export async function clearAndRecreateTestDb(connection: mysqlPromise.Connection) {
+ const version = await getMySQLVersion(connection);
+ if (gte(version, '8.4.0')) {
+ await connection.query('RESET BINARY LOGS AND GTIDS');
+ } else {
+ await connection.query('RESET MASTER');
+ }
+
+ // await connection.query(`DROP DATABASE IF EXISTS ${TEST_CONNECTION_OPTIONS.database}`);
+ //
+ // await connection.query(`CREATE DATABASE IF NOT EXISTS ${TEST_CONNECTION_OPTIONS.database}`);
+
+ const [result] = await connection.query(
+ `SELECT TABLE_NAME FROM information_schema.tables
+ WHERE TABLE_SCHEMA = '${TEST_CONNECTION_OPTIONS.database}'`
+ );
+ for (let row of result) {
+ const name = row.TABLE_NAME;
+ if (name.startsWith('test_')) {
+ await connection.query(`DROP TABLE ${name}`);
+ }
+ }
+}
diff --git a/modules/module-mysql/test/tsconfig.json b/modules/module-mysql/test/tsconfig.json
new file mode 100644
index 000000000..18898c4ee
--- /dev/null
+++ b/modules/module-mysql/test/tsconfig.json
@@ -0,0 +1,28 @@
+{
+ "extends": "../../../tsconfig.base.json",
+ "compilerOptions": {
+ "rootDir": "src",
+ "baseUrl": "./",
+ "noEmit": true,
+ "esModuleInterop": true,
+ "skipLibCheck": true,
+ "sourceMap": true,
+ "paths": {
+ "@/*": ["../../../packages/service-core/src/*"],
+ "@module/*": ["../src/*"],
+ "@core-tests/*": ["../../../packages/service-core/test/src/*"]
+ }
+ },
+ "include": ["src"],
+ "references": [
+ {
+ "path": "../"
+ },
+ {
+ "path": "../../../packages/service-core/test"
+ },
+ {
+ "path": "../../../packages/service-core/"
+ }
+ ]
+}
diff --git a/modules/module-mysql/tsconfig.json b/modules/module-mysql/tsconfig.json
new file mode 100644
index 000000000..a9d72169d
--- /dev/null
+++ b/modules/module-mysql/tsconfig.json
@@ -0,0 +1,26 @@
+{
+ "extends": "../../tsconfig.base.json",
+ "compilerOptions": {
+ "rootDir": "src",
+ "outDir": "dist",
+ "esModuleInterop": true,
+ "skipLibCheck": true,
+ "sourceMap": true,
+ "typeRoots": ["./node_modules/@types", "./src/replication/zongji.d.ts"]
+ },
+ "include": ["src"],
+ "references": [
+ {
+ "path": "../../packages/types"
+ },
+ {
+ "path": "../../packages/sync-rules"
+ },
+ {
+ "path": "../../packages/service-core"
+ },
+ {
+ "path": "../../libs/lib-services"
+ }
+ ]
+}
diff --git a/modules/module-mysql/vitest.config.ts b/modules/module-mysql/vitest.config.ts
new file mode 100644
index 000000000..7a39c1f71
--- /dev/null
+++ b/modules/module-mysql/vitest.config.ts
@@ -0,0 +1,15 @@
+import { defineConfig } from 'vitest/config';
+import tsconfigPaths from 'vite-tsconfig-paths';
+
+export default defineConfig({
+ plugins: [tsconfigPaths()],
+ test: {
+ setupFiles: './test/src/setup.ts',
+ poolOptions: {
+ threads: {
+ singleThread: true
+ }
+ },
+ pool: 'threads'
+ }
+});
diff --git a/modules/module-postgres/package.json b/modules/module-postgres/package.json
index aad70aba3..d42db9204 100644
--- a/modules/module-postgres/package.json
+++ b/modules/module-postgres/package.json
@@ -12,7 +12,7 @@
"scripts": {
"build": "tsc -b",
"build:tests": "tsc -b test/tsconfig.json",
- "clean": "rm -rf ./lib && tsc -b --clean",
+ "clean": "rm -rf ./dist && tsc -b --clean",
"test": "vitest"
},
"exports": {
diff --git a/modules/module-postgres/src/api/PostgresRouteAPIAdapter.ts b/modules/module-postgres/src/api/PostgresRouteAPIAdapter.ts
index 8a7e6e40c..11cd1cdbd 100644
--- a/modules/module-postgres/src/api/PostgresRouteAPIAdapter.ts
+++ b/modules/module-postgres/src/api/PostgresRouteAPIAdapter.ts
@@ -33,7 +33,7 @@ export class PostgresRouteAPIAdapter implements api.RouteAPI {
await this.pool.end();
}
- async getSourceConfig(): Promise {
+ async getSourceConfig(): Promise {
return this.config;
}
@@ -203,7 +203,8 @@ export class PostgresRouteAPIAdapter implements api.RouteAPI {
});
}
- async getReplicationLag(syncRulesId: string): Promise {
+ async getReplicationLag(options: api.ReplicationLagOptions): Promise {
+ const { bucketStorage: slotName } = options;
const results = await pg_utils.retriedQuery(this.pool, {
statement: `SELECT
slot_name,
@@ -211,14 +212,14 @@ export class PostgresRouteAPIAdapter implements api.RouteAPI {
pg_current_wal_lsn(),
(pg_current_wal_lsn() - confirmed_flush_lsn) AS lsn_distance
FROM pg_replication_slots WHERE slot_name = $1 LIMIT 1;`,
- params: [{ type: 'varchar', value: syncRulesId }]
+ params: [{ type: 'varchar', value: slotName }]
});
const [row] = pgwire.pgwireRows(results);
if (row) {
return Number(row.lsn_distance);
}
- throw new Error(`Could not determine replication lag for slot ${syncRulesId}`);
+ throw new Error(`Could not determine replication lag for slot ${slotName}`);
}
async getReplicationHead(): Promise {
diff --git a/modules/module-postgres/src/index.ts b/modules/module-postgres/src/index.ts
index 04e83e4e6..3b0d87195 100644
--- a/modules/module-postgres/src/index.ts
+++ b/modules/module-postgres/src/index.ts
@@ -1,5 +1 @@
-import { PostgresModule } from './module/PostgresModule.js';
-
-export const module = new PostgresModule();
-
-export default module;
+export * from './module/PostgresModule.js';
diff --git a/modules/module-postgres/src/module/PostgresModule.ts b/modules/module-postgres/src/module/PostgresModule.ts
index eabffca0a..5b61275e2 100644
--- a/modules/module-postgres/src/module/PostgresModule.ts
+++ b/modules/module-postgres/src/module/PostgresModule.ts
@@ -5,9 +5,11 @@ import { SupabaseKeyCollector } from '../auth/SupabaseKeyCollector.js';
import { ConnectionManagerFactory } from '../replication/ConnectionManagerFactory.js';
import { PgManager } from '../replication/PgManager.js';
import { PostgresErrorRateLimiter } from '../replication/PostgresErrorRateLimiter.js';
-import { cleanUpReplicationSlot } from '../replication/replication-utils.js';
+import { checkSourceConfiguration, cleanUpReplicationSlot } from '../replication/replication-utils.js';
import { WalStreamReplicator } from '../replication/WalStreamReplicator.js';
import * as types from '../types/types.js';
+import { PostgresConnectionConfig } from '../types/types.js';
+import { PUBLICATION_NAME } from '../replication/WalStream.js';
export class PostgresModule extends replication.ReplicationModule {
constructor() {
@@ -21,11 +23,11 @@ export class PostgresModule extends replication.ReplicationModule {
await super.initialize(context);
- // Record replicated bytes using global jpgwire metrics.
if (context.configuration.base_config.client_auth?.supabase) {
this.registerSupabaseAuth(context);
}
+ // Record replicated bytes using global jpgwire metrics.
if (context.metrics) {
jpgwire.setMetricsRecorder({
addBytesRead(bytes) {
@@ -112,4 +114,19 @@ export class PostgresModule extends replication.ReplicationModule {
+ this.decodeConfig(config);
+ const normalisedConfig = this.resolveConfig(this.decodedConfig!);
+ const connectionManager = new PgManager(normalisedConfig, {
+ idleTimeout: 30_000,
+ maxSize: 1
+ });
+ const connection = await connectionManager.snapshotConnection();
+ try {
+ return checkSourceConfiguration(connection, PUBLICATION_NAME);
+ } finally {
+ await connectionManager.end();
+ }
+ }
}
diff --git a/modules/module-postgres/src/replication/WalStream.ts b/modules/module-postgres/src/replication/WalStream.ts
index 80b11df0e..06d9868d2 100644
--- a/modules/module-postgres/src/replication/WalStream.ts
+++ b/modules/module-postgres/src/replication/WalStream.ts
@@ -397,7 +397,7 @@ WHERE oid = $1::regclass`,
for (const record of WalStream.getQueryData(rows)) {
// This auto-flushes when the batch reaches its size limit
await batch.save({
- tag: 'insert',
+ tag: storage.SaveOperationTag.INSERT,
sourceTable: table,
before: undefined,
beforeReplicaId: undefined,
@@ -497,7 +497,7 @@ WHERE oid = $1::regclass`,
Metrics.getInstance().rows_replicated_total.add(1);
const baseRecord = pg_utils.constructAfterRecord(msg);
return await batch.save({
- tag: 'insert',
+ tag: storage.SaveOperationTag.INSERT,
sourceTable: table,
before: undefined,
beforeReplicaId: undefined,
@@ -511,7 +511,7 @@ WHERE oid = $1::regclass`,
const before = pg_utils.constructBeforeRecord(msg);
const after = pg_utils.constructAfterRecord(msg);
return await batch.save({
- tag: 'update',
+ tag: storage.SaveOperationTag.UPDATE,
sourceTable: table,
before: before,
beforeReplicaId: before ? getUuidReplicaIdentityBson(before, table.replicaIdColumns) : undefined,
@@ -523,7 +523,7 @@ WHERE oid = $1::regclass`,
const before = pg_utils.constructBeforeRecord(msg)!;
return await batch.save({
- tag: 'delete',
+ tag: storage.SaveOperationTag.DELETE,
sourceTable: table,
before: before,
beforeReplicaId: getUuidReplicaIdentityBson(before, table.replicaIdColumns),
diff --git a/modules/module-postgres/src/replication/replication-utils.ts b/modules/module-postgres/src/replication/replication-utils.ts
index b2f0db008..c6b1e3fe1 100644
--- a/modules/module-postgres/src/replication/replication-utils.ts
+++ b/modules/module-postgres/src/replication/replication-utils.ts
@@ -93,7 +93,7 @@ WHERE oid = $1::oid LIMIT 1`,
}
}
-export async function checkSourceConfiguration(db: pgwire.PgClient, publicationName: string) {
+export async function checkSourceConfiguration(db: pgwire.PgClient, publicationName: string): Promise {
// Check basic config
await pgwire_utils.retriedQuery(
db,
diff --git a/modules/module-postgres/src/types/types.ts b/modules/module-postgres/src/types/types.ts
index 205b147fe..9b5be2d2b 100644
--- a/modules/module-postgres/src/types/types.ts
+++ b/modules/module-postgres/src/types/types.ts
@@ -22,7 +22,7 @@ export interface NormalizedPostgresConnectionConfig {
client_private_key: string | undefined;
}
-export const PostgresConnectionConfig = service_types.configFile.dataSourceConfig.and(
+export const PostgresConnectionConfig = service_types.configFile.DataSourceConfig.and(
t.object({
type: t.literal(POSTGRES_CONNECTION_TYPE),
/** Unique identifier for the connection - optional when a single connection is present. */
diff --git a/packages/jpgwire/package.json b/packages/jpgwire/package.json
index 3d36eb3d6..d0b47430e 100644
--- a/packages/jpgwire/package.json
+++ b/packages/jpgwire/package.json
@@ -21,7 +21,7 @@
"@powersync/service-jsonbig": "workspace:^",
"@powersync/service-types": "workspace:^",
"@powersync/service-sync-rules": "workspace:^",
- "date-fns": "^3.6.0",
+ "date-fns": "^4.1.0",
"pgwire": "github:kagis/pgwire#f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87"
}
}
diff --git a/packages/service-core/src/api/RouteAPI.ts b/packages/service-core/src/api/RouteAPI.ts
index bbe92180c..c4212aa2b 100644
--- a/packages/service-core/src/api/RouteAPI.ts
+++ b/packages/service-core/src/api/RouteAPI.ts
@@ -1,6 +1,6 @@
import { SqlSyncRules, TablePattern } from '@powersync/service-sync-rules';
import * as types from '@powersync/service-types';
-import { ParseSyncRulesOptions } from '../storage/BucketStorage.js';
+import { ParseSyncRulesOptions, SyncRulesBucketStorage } from '../storage/BucketStorage.js';
export interface PatternResult {
schema: string;
@@ -10,6 +10,10 @@ export interface PatternResult {
table?: types.TableInfo;
}
+export interface ReplicationLagOptions {
+ bucketStorage: SyncRulesBucketStorage;
+}
+
/**
* Describes all the methods currently required to service the sync API endpoints.
*/
@@ -17,7 +21,7 @@ export interface RouteAPI {
/**
* @returns basic identification of the connection
*/
- getSourceConfig(): Promise;
+ getSourceConfig(): Promise;
/**
* Checks the current connection status of the data source.
@@ -42,9 +46,8 @@ export interface RouteAPI {
/**
* @returns The replication lag: that is the amount of data which has not been
* replicated yet, in bytes.
- * @param {string} syncRulesId An identifier representing which set of sync rules the lag is required for.
*/
- getReplicationLag(syncRulesId: string): Promise;
+ getReplicationLag(options: ReplicationLagOptions): Promise;
/**
* Get the current LSN or equivalent replication HEAD position identifier
diff --git a/packages/service-core/src/api/diagnostics.ts b/packages/service-core/src/api/diagnostics.ts
index d323fcf81..72231c9ce 100644
--- a/packages/service-core/src/api/diagnostics.ts
+++ b/packages/service-core/src/api/diagnostics.ts
@@ -78,7 +78,9 @@ export async function getSyncRulesStatus(
if (systemStorage) {
try {
- replication_lag_bytes = await apiHandler.getReplicationLag(systemStorage.slot_name);
+ replication_lag_bytes = await apiHandler.getReplicationLag({
+ bucketStorage: systemStorage
+ });
} catch (e) {
// Ignore
logger.warn(`Unable to get replication lag`, e);
diff --git a/packages/service-core/src/api/schema.ts b/packages/service-core/src/api/schema.ts
index 64b48e1a0..5469973b2 100644
--- a/packages/service-core/src/api/schema.ts
+++ b/packages/service-core/src/api/schema.ts
@@ -16,9 +16,9 @@ export async function getConnectionsSchema(api: api.RouteAPI): Promise | null = null;
protected constructor(protected options: AbstractReplicationJobOptions) {
- this.logger = logger.child({ name: `ReplicationJob: ${options.id}` });
+ this.logger = logger.child({ name: `ReplicationJob: ${this.id}` });
}
/**
@@ -52,7 +52,7 @@ export abstract class AbstractReplicationJob {
* Safely stop the replication process
*/
public async stop(): Promise {
- this.logger.info(`Stopping ${this.id} replication job for sync rule iteration: ${this.storage.group_id}`);
+ this.logger.info(`Stopping replication job for sync rule iteration: ${this.storage.group_id}`);
this.abortController.abort();
await this.isReplicatingPromise;
}
diff --git a/packages/service-core/src/replication/ReplicationModule.ts b/packages/service-core/src/replication/ReplicationModule.ts
index 196e6e014..5b5bca8de 100644
--- a/packages/service-core/src/replication/ReplicationModule.ts
+++ b/packages/service-core/src/replication/ReplicationModule.ts
@@ -7,6 +7,18 @@ import * as modules from '../modules/modules-index.js';
import * as system from '../system/system-index.js';
import { schema } from '@powersync/lib-services-framework';
import { AbstractReplicator } from './AbstractReplicator.js';
+import { TearDownOptions } from '../modules/modules-index.js';
+
+/**
+ * Provides a common interface for testing the connection to a DataSource.
+ */
+export interface ConnectionTester {
+ /**
+ * Confirm if a connection can be established to the datasource for the provided datasource configuration
+ * @param config
+ */
+ testConnection(config: TConfig): Promise;
+}
export interface ReplicationModuleOptions extends modules.AbstractModuleOptions {
type: string;
@@ -17,7 +29,10 @@ export interface ReplicationModuleOptions extends modules.AbstractModuleOptions
* A replication module describes all the functionality that PowerSync requires to
* replicate data from a DataSource. Whenever a new data source is added to powersync this class should be extended.
*/
-export abstract class ReplicationModule extends modules.AbstractModule {
+export abstract class ReplicationModule
+ extends modules.AbstractModule
+ implements ConnectionTester
+{
protected type: string;
protected configSchema: t.AnyCodec;
protected decodedConfig: TConfig | undefined;
@@ -43,6 +58,8 @@ export abstract class ReplicationModule extend
*/
protected abstract createReplicator(context: system.ServiceContext): AbstractReplicator;
+ public abstract testConnection(config: TConfig): Promise;
+
/**
* Register this module's Replicators and RouteAPI adapters if the required configuration is present.
*/
@@ -54,7 +71,7 @@ export abstract class ReplicationModule extend
const matchingConfig = context.configuration.connections.filter((dataSource) => dataSource.type === this.type);
if (!matchingConfig.length) {
- // This module is needed given the config
+ // No configuration for this module was found
return;
}
@@ -66,9 +83,8 @@ export abstract class ReplicationModule extend
try {
const baseMatchingConfig = matchingConfig[0] as TConfig;
- // If validation fails, log the error and continue, no replication will happen for this data source
- this.validateConfig(baseMatchingConfig);
- this.decodedConfig = this.configSchema.decode(baseMatchingConfig);
+ // If decoding fails, log the error and continue, no replication will happen for this data source
+ this.decodeConfig(baseMatchingConfig);
context.replicationEngine?.register(this.createReplicator(context));
context.routerEngine?.registerAPI(this.createRouteAPIAdapter());
@@ -77,6 +93,11 @@ export abstract class ReplicationModule extend
}
}
+ protected decodeConfig(config: TConfig): void {
+ this.validateConfig(config);
+ this.decodedConfig = this.configSchema.decode(config);
+ }
+
private validateConfig(config: TConfig): void {
const validator = schema
.parseJSONSchema(
diff --git a/packages/service-core/src/routes/configure-fastify.ts b/packages/service-core/src/routes/configure-fastify.ts
index ec74d67b8..9c295f365 100644
--- a/packages/service-core/src/routes/configure-fastify.ts
+++ b/packages/service-core/src/routes/configure-fastify.ts
@@ -8,7 +8,7 @@ import { CHECKPOINT_ROUTES } from './endpoints/checkpointing.js';
import { SYNC_RULES_ROUTES } from './endpoints/sync-rules.js';
import { SYNC_STREAM_ROUTES } from './endpoints/sync-stream.js';
import { createRequestQueueHook, CreateRequestQueueParams } from './hooks.js';
-import { RouteDefinition } from './router.js';
+import { RouteDefinition, RouterServiceContext } from './router.js';
/**
* A list of route definitions to be registered as endpoints.
@@ -56,6 +56,19 @@ export const DEFAULT_ROUTE_OPTIONS = {
*/
export function configureFastifyServer(server: fastify.FastifyInstance, options: FastifyServerConfig) {
const { service_context, routes = DEFAULT_ROUTE_OPTIONS } = options;
+
+ const generateContext = async () => {
+ const { routerEngine } = service_context;
+ if (!routerEngine) {
+ throw new Error(`RouterEngine has not been registered`);
+ }
+
+ return {
+ user_id: undefined,
+ service_context: service_context as RouterServiceContext
+ };
+ };
+
/**
* Fastify creates an encapsulated context for each `.register` call.
* Creating a separate context here to separate the concurrency limits for Admin APIs
@@ -63,16 +76,7 @@ export function configureFastifyServer(server: fastify.FastifyInstance, options:
* https://github.com/fastify/fastify/blob/main/docs/Reference/Encapsulation.md
*/
server.register(async function (childContext) {
- registerFastifyRoutes(
- childContext,
- async () => {
- return {
- user_id: undefined,
- service_context
- };
- },
- routes.api?.routes ?? DEFAULT_ROUTE_OPTIONS.api.routes
- );
+ registerFastifyRoutes(childContext, generateContext, routes.api?.routes ?? DEFAULT_ROUTE_OPTIONS.api.routes);
// Limit the active concurrent requests
childContext.addHook(
'onRequest',
@@ -84,12 +88,7 @@ export function configureFastifyServer(server: fastify.FastifyInstance, options:
server.register(async function (childContext) {
registerFastifyRoutes(
childContext,
- async () => {
- return {
- user_id: undefined,
- service_context
- };
- },
+ generateContext,
routes.sync_stream?.routes ?? DEFAULT_ROUTE_OPTIONS.sync_stream.routes
);
// Limit the active concurrent requests
diff --git a/packages/service-core/src/routes/configure-rsocket.ts b/packages/service-core/src/routes/configure-rsocket.ts
index 6d6f3979e..05572dfc5 100644
--- a/packages/service-core/src/routes/configure-rsocket.ts
+++ b/packages/service-core/src/routes/configure-rsocket.ts
@@ -8,7 +8,7 @@ import { ServiceContext } from '../system/ServiceContext.js';
import { generateContext, getTokenFromHeader } from './auth.js';
import { syncStreamReactive } from './endpoints/socket-route.js';
import { RSocketContextMeta, SocketRouteGenerator } from './router-socket.js';
-import { Context } from './router.js';
+import { Context, RouterServiceContext } from './router.js';
export type RSockerRouterConfig = {
service_context: ServiceContext;
@@ -36,12 +36,17 @@ export function configureRSocket(router: ReactiveSocketRouter, options:
if (context?.token_payload == null) {
throw new errors.AuthorizationError(token_errors ?? 'Authentication required');
}
+
+ if (!service_context.routerEngine) {
+ throw new Error(`RouterEngine has not been registered`);
+ }
+
return {
token,
user_agent,
...context,
token_errors: token_errors,
- service_context
+ service_context: service_context as RouterServiceContext
};
} else {
throw new errors.AuthorizationError('No token provided');
diff --git a/packages/service-core/src/routes/endpoints/admin.ts b/packages/service-core/src/routes/endpoints/admin.ts
index 1e96c0f66..c08b1068d 100644
--- a/packages/service-core/src/routes/endpoints/admin.ts
+++ b/packages/service-core/src/routes/endpoints/admin.ts
@@ -137,8 +137,8 @@ export const reprocess = routeDefinition({
connections: [
{
// Previously the connection was asserted with `!`
- tag: baseConfig!.tag!,
- id: baseConfig!.id,
+ tag: baseConfig.tag,
+ id: baseConfig.id,
slot_name: new_rules.slot_name
}
]
diff --git a/packages/service-core/src/routes/endpoints/sync-rules.ts b/packages/service-core/src/routes/endpoints/sync-rules.ts
index 1129a5855..70e6127d6 100644
--- a/packages/service-core/src/routes/endpoints/sync-rules.ts
+++ b/packages/service-core/src/routes/endpoints/sync-rules.ts
@@ -3,6 +3,7 @@ import { SqlSyncRules, SyncRulesErrors } from '@powersync/service-sync-rules';
import type { FastifyPluginAsync } from 'fastify';
import * as t from 'ts-codec';
+import * as system from '../../system/system-index.js';
import { authApi } from '../auth.js';
import { routeDefinition } from '../router.js';
import { RouteAPI } from '../../api/RouteAPI.js';
diff --git a/packages/service-core/src/routes/router.ts b/packages/service-core/src/routes/router.ts
index bc2f9d6d1..4efc49744 100644
--- a/packages/service-core/src/routes/router.ts
+++ b/packages/service-core/src/routes/router.ts
@@ -1,13 +1,19 @@
import { router } from '@powersync/lib-services-framework';
import * as auth from '../auth/auth-index.js';
import { ServiceContext } from '../system/ServiceContext.js';
+import { RouterEngine } from './RouterEngine.js';
+/**
+ * The {@link RouterEngine} must be provided for these routes
+ */
+export type RouterServiceContext = ServiceContext & { routerEngine: RouterEngine };
/**
* Common context for routes
*/
export type Context = {
user_id?: string;
- service_context: ServiceContext;
+
+ service_context: RouterServiceContext;
token_payload?: auth.JwtPayload;
token_errors?: string[];
diff --git a/packages/service-core/src/storage/BucketStorage.ts b/packages/service-core/src/storage/BucketStorage.ts
index 446362a5a..5c6b98fe3 100644
--- a/packages/service-core/src/storage/BucketStorage.ts
+++ b/packages/service-core/src/storage/BucketStorage.ts
@@ -105,20 +105,22 @@ export interface BucketStorageFactory extends DisposableObserverClient;
}
-export interface WriteCheckpoint {
- base: ActiveCheckpoint;
- writeCheckpoint: bigint | null;
-}
-
-export interface ActiveCheckpoint {
+export interface ReplicationCheckpoint {
readonly checkpoint: util.OpId;
readonly lsn: string | null;
+}
+export interface ActiveCheckpoint extends ReplicationCheckpoint {
hasSyncRules(): boolean;
getBucketStorage(): Promise;
}
+export interface WriteCheckpoint {
+ base: ActiveCheckpoint;
+ writeCheckpoint: bigint | null;
+}
+
export interface StorageMetrics {
/**
* Size of operations (bucket_data)
@@ -220,7 +222,7 @@ export interface SyncRulesBucketStorage
callback: (batch: BucketStorageBatch) => Promise
): Promise;
- getCheckpoint(): Promise<{ checkpoint: util.OpId }>;
+ getCheckpoint(): Promise;
getParsedSyncRules(options: ParseSyncRulesOptions): SqlSyncRules;
@@ -386,8 +388,14 @@ export type SaveOp = 'insert' | 'update' | 'delete';
export type SaveOptions = SaveInsert | SaveUpdate | SaveDelete;
+export enum SaveOperationTag {
+ INSERT = 'insert',
+ UPDATE = 'update',
+ DELETE = 'delete'
+}
+
export interface SaveInsert {
- tag: 'insert';
+ tag: SaveOperationTag.INSERT;
sourceTable: SourceTable;
before?: undefined;
beforeReplicaId?: undefined;
@@ -396,7 +404,7 @@ export interface SaveInsert {
}
export interface SaveUpdate {
- tag: 'update';
+ tag: SaveOperationTag.UPDATE;
sourceTable: SourceTable;
/**
@@ -415,7 +423,7 @@ export interface SaveUpdate {
}
export interface SaveDelete {
- tag: 'delete';
+ tag: SaveOperationTag.DELETE;
sourceTable: SourceTable;
before?: SqliteRow;
beforeReplicaId: ReplicaId;
diff --git a/packages/service-core/src/storage/MongoBucketStorage.ts b/packages/service-core/src/storage/MongoBucketStorage.ts
index f3175b915..bfebaba31 100644
--- a/packages/service-core/src/storage/MongoBucketStorage.ts
+++ b/packages/service-core/src/storage/MongoBucketStorage.ts
@@ -21,14 +21,12 @@ import {
UpdateSyncRulesOptions,
WriteCheckpoint
} from './BucketStorage.js';
-import { PowerSyncMongo, PowerSyncMongoOptions } from './mongo/db.js';
+import { PowerSyncMongo } from './mongo/db.js';
import { SyncRuleDocument, SyncRuleState } from './mongo/models.js';
import { MongoPersistedSyncRulesContent } from './mongo/MongoPersistedSyncRulesContent.js';
import { MongoSyncBucketStorage } from './mongo/MongoSyncBucketStorage.js';
import { generateSlotName } from './mongo/util.js';
-export interface MongoBucketStorageOptions extends PowerSyncMongoOptions {}
-
export class MongoBucketStorage
extends DisposableObserver
implements BucketStorageFactory
diff --git a/packages/service-core/src/storage/mongo/MongoBucketBatch.ts b/packages/service-core/src/storage/mongo/MongoBucketBatch.ts
index c24a04999..20e200500 100644
--- a/packages/service-core/src/storage/mongo/MongoBucketBatch.ts
+++ b/packages/service-core/src/storage/mongo/MongoBucketBatch.ts
@@ -81,6 +81,7 @@ export class MongoBucketBatch extends DisposableObserver {
const doc = await this.db.sync_rules.findOne(
{ _id: this.group_id },
{
- projection: { last_checkpoint: 1 }
+ projection: { last_checkpoint: 1, last_checkpoint_lsn: 1 }
}
);
return {
- checkpoint: util.timestampToOpId(doc?.last_checkpoint ?? 0n)
+ checkpoint: util.timestampToOpId(doc?.last_checkpoint ?? 0n),
+ lsn: doc?.last_checkpoint_lsn ?? null
};
}
diff --git a/packages/service-core/test/src/compacting.test.ts b/packages/service-core/test/src/compacting.test.ts
index e7902ab23..b5caf72a3 100644
--- a/packages/service-core/test/src/compacting.test.ts
+++ b/packages/service-core/test/src/compacting.test.ts
@@ -1,11 +1,9 @@
+import { SaveOperationTag } from '@/storage/BucketStorage.js';
import { MongoCompactOptions } from '@/storage/mongo/MongoCompactor.js';
-import { SqlSyncRules } from '@powersync/service-sync-rules';
import { describe, expect, test } from 'vitest';
import { validateCompactedBucket } from './bucket_validation.js';
import { oneFromAsync } from './stream_utils.js';
-import { BATCH_OPTIONS, makeTestTable, MONGO_STORAGE_FACTORY, rid, testRules, ZERO_LSN } from './util.js';
-import { ParseSyncRulesOptions, PersistedSyncRulesContent, StartBatchOptions } from '@/storage/BucketStorage.js';
-import { getUuidReplicaIdentityBson } from '@/util/util-index.js';
+import { BATCH_OPTIONS, makeTestTable, MONGO_STORAGE_FACTORY, rid, testRules } from './util.js';
const TEST_TABLE = makeTestTable('test', ['id']);
@@ -31,7 +29,7 @@ bucket_definitions:
const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
await batch.save({
sourceTable: TEST_TABLE,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 't1'
},
@@ -40,7 +38,7 @@ bucket_definitions:
await batch.save({
sourceTable: TEST_TABLE,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 't2'
},
@@ -49,7 +47,7 @@ bucket_definitions:
await batch.save({
sourceTable: TEST_TABLE,
- tag: 'update',
+ tag: SaveOperationTag.UPDATE,
after: {
id: 't2'
},
@@ -128,7 +126,7 @@ bucket_definitions:
const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
await batch.save({
sourceTable: TEST_TABLE,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 't1'
},
@@ -137,7 +135,7 @@ bucket_definitions:
await batch.save({
sourceTable: TEST_TABLE,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 't2'
},
@@ -146,7 +144,7 @@ bucket_definitions:
await batch.save({
sourceTable: TEST_TABLE,
- tag: 'delete',
+ tag: SaveOperationTag.DELETE,
before: {
id: 't1'
},
@@ -155,7 +153,7 @@ bucket_definitions:
await batch.save({
sourceTable: TEST_TABLE,
- tag: 'update',
+ tag: SaveOperationTag.UPDATE,
after: {
id: 't2'
},
@@ -233,7 +231,7 @@ bucket_definitions:
const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
await batch.save({
sourceTable: TEST_TABLE,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 't1'
},
@@ -242,7 +240,7 @@ bucket_definitions:
await batch.save({
sourceTable: TEST_TABLE,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 't2'
},
@@ -251,7 +249,7 @@ bucket_definitions:
await batch.save({
sourceTable: TEST_TABLE,
- tag: 'delete',
+ tag: SaveOperationTag.DELETE,
before: {
id: 't1'
},
@@ -265,7 +263,7 @@ bucket_definitions:
const result2 = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
await batch.save({
sourceTable: TEST_TABLE,
- tag: 'delete',
+ tag: SaveOperationTag.DELETE,
before: {
id: 't2'
},
diff --git a/packages/service-core/test/src/data_storage.test.ts b/packages/service-core/test/src/data_storage.test.ts
index 05885b704..776043b03 100644
--- a/packages/service-core/test/src/data_storage.test.ts
+++ b/packages/service-core/test/src/data_storage.test.ts
@@ -1,5 +1,4 @@
-import { BucketDataBatchOptions } from '@/storage/BucketStorage.js';
-import { getUuidReplicaIdentityBson } from '@/util/util-index.js';
+import { BucketDataBatchOptions, SaveOperationTag } from '@/storage/BucketStorage.js';
import { RequestParameters } from '@powersync/service-sync-rules';
import { describe, expect, test } from 'vitest';
import { fromAsync, oneFromAsync } from './stream_utils.js';
@@ -14,6 +13,7 @@ import {
StorageFactory,
testRules
} from './util.js';
+import { getUuidReplicaIdentityBson } from '@/util/util-index.js';
const TEST_TABLE = makeTestTable('test', ['id']);
@@ -36,7 +36,7 @@ bucket_definitions:
const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
await batch.save({
sourceTable: TEST_TABLE,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 't2',
id1: 'user3',
@@ -48,7 +48,7 @@ bucket_definitions:
await batch.save({
sourceTable: TEST_TABLE,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 't1',
id1: 'user1',
@@ -83,7 +83,7 @@ bucket_definitions:
const result1 = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
await batch.save({
sourceTable: TEST_TABLE,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 'user1',
group_id: 'group1'
@@ -94,7 +94,7 @@ bucket_definitions:
const result2 = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
await batch.save({
sourceTable: TEST_TABLE,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 'user1',
group_id: 'group2'
@@ -135,7 +135,7 @@ bucket_definitions:
const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
await batch.save({
sourceTable: TEST_TABLE,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 't1',
group_id: 'group1',
@@ -179,7 +179,7 @@ bucket_definitions:
const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
await batch.save({
sourceTable: TEST_TABLE,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 't1',
group_id: 'group1',
@@ -190,7 +190,7 @@ bucket_definitions:
await batch.save({
sourceTable: TEST_TABLE,
- tag: 'update',
+ tag: SaveOperationTag.UPDATE,
after: {
id: 't1',
group_id: 'group1',
@@ -226,7 +226,7 @@ bucket_definitions:
await batch.save({
sourceTable,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 'test1',
description: 'test1'
@@ -235,7 +235,7 @@ bucket_definitions:
});
await batch.save({
sourceTable,
- tag: 'delete',
+ tag: SaveOperationTag.DELETE,
beforeReplicaId: rid('test1')
});
});
@@ -289,7 +289,7 @@ bucket_definitions:
const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
await batch.save({
sourceTable: WORKSPACE_TABLE,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 'workspace1',
userId: 'u1'
@@ -339,7 +339,7 @@ bucket_definitions:
const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
await batch.save({
sourceTable: WORKSPACE_TABLE,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 'workspace1',
visibility: 'public'
@@ -349,7 +349,7 @@ bucket_definitions:
await batch.save({
sourceTable: WORKSPACE_TABLE,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 'workspace2',
visibility: 'private'
@@ -359,7 +359,7 @@ bucket_definitions:
await batch.save({
sourceTable: WORKSPACE_TABLE,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 'workspace3',
visibility: 'public'
@@ -413,7 +413,7 @@ bucket_definitions:
const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
await batch.save({
sourceTable: WORKSPACE_TABLE,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 'workspace1',
visibility: 'public'
@@ -423,7 +423,7 @@ bucket_definitions:
await batch.save({
sourceTable: WORKSPACE_TABLE,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 'workspace2',
visibility: 'private'
@@ -433,7 +433,7 @@ bucket_definitions:
await batch.save({
sourceTable: WORKSPACE_TABLE,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 'workspace3',
user_id: 'u1',
@@ -444,7 +444,7 @@ bucket_definitions:
await batch.save({
sourceTable: WORKSPACE_TABLE,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 'workspace4',
user_id: 'u2',
@@ -501,7 +501,7 @@ bucket_definitions:
const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
await batch.save({
sourceTable,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 'test1',
client_id: 'client1a',
@@ -511,7 +511,7 @@ bucket_definitions:
});
await batch.save({
sourceTable,
- tag: 'update',
+ tag: SaveOperationTag.UPDATE,
after: {
id: 'test1',
client_id: 'client1b',
@@ -522,7 +522,7 @@ bucket_definitions:
await batch.save({
sourceTable,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 'test2',
client_id: 'client2',
@@ -564,7 +564,7 @@ bucket_definitions:
await batch.save({
sourceTable,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 'test1',
description: 'test1'
@@ -578,7 +578,7 @@ bucket_definitions:
await batch.save({
sourceTable,
- tag: 'delete',
+ tag: SaveOperationTag.DELETE,
beforeReplicaId: rid('test1')
});
});
@@ -588,7 +588,7 @@ bucket_definitions:
await batch.save({
sourceTable,
- tag: 'delete',
+ tag: SaveOperationTag.DELETE,
beforeReplicaId: rid('test1')
});
});
@@ -638,7 +638,7 @@ bucket_definitions:
await batch.save({
sourceTable,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 'test1',
description: 'test1'
@@ -652,7 +652,7 @@ bucket_definitions:
await batch.save({
sourceTable,
- tag: 'update',
+ tag: SaveOperationTag.UPDATE,
after: {
id: 'test1',
description: undefined
@@ -662,7 +662,7 @@ bucket_definitions:
await batch.save({
sourceTable,
- tag: 'update',
+ tag: SaveOperationTag.UPDATE,
after: {
id: 'test1',
description: undefined
@@ -672,7 +672,7 @@ bucket_definitions:
await batch.save({
sourceTable,
- tag: 'delete',
+ tag: SaveOperationTag.DELETE,
beforeReplicaId: rid('test1')
});
});
@@ -682,7 +682,7 @@ bucket_definitions:
await batch.save({
sourceTable,
- tag: 'update',
+ tag: SaveOperationTag.UPDATE,
after: {
id: 'test1',
description: undefined
@@ -692,7 +692,7 @@ bucket_definitions:
await batch.save({
sourceTable,
- tag: 'update',
+ tag: SaveOperationTag.UPDATE,
after: {
id: 'test1',
description: undefined
@@ -702,7 +702,7 @@ bucket_definitions:
await batch.save({
sourceTable,
- tag: 'delete',
+ tag: SaveOperationTag.DELETE,
beforeReplicaId: rid('test1')
});
});
@@ -755,7 +755,7 @@ bucket_definitions:
await storage.startBatch(BATCH_OPTIONS, async (batch) => {
await batch.save({
sourceTable: TEST_TABLE,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 't2',
id1: 'user3',
@@ -799,7 +799,7 @@ bucket_definitions:
await batch.save({
sourceTable,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 'test1',
description: 'test1a'
@@ -809,7 +809,7 @@ bucket_definitions:
await batch.save({
sourceTable,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 'test2',
description: 'test2a'
@@ -826,7 +826,7 @@ bucket_definitions:
// b
await batch.save({
sourceTable,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 'test1',
description: 'test1b'
@@ -836,7 +836,7 @@ bucket_definitions:
await batch.save({
sourceTable,
- tag: 'update',
+ tag: SaveOperationTag.UPDATE,
before: {
id: 'test1'
},
@@ -850,7 +850,7 @@ bucket_definitions:
await batch.save({
sourceTable,
- tag: 'update',
+ tag: SaveOperationTag.UPDATE,
before: {
id: 'test2'
},
@@ -866,7 +866,7 @@ bucket_definitions:
// c
await batch.save({
sourceTable,
- tag: 'update',
+ tag: SaveOperationTag.UPDATE,
after: {
id: 'test2',
description: 'test2c'
@@ -877,7 +877,7 @@ bucket_definitions:
// d
await batch.save({
sourceTable,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 'test4',
description: 'test4d'
@@ -887,7 +887,7 @@ bucket_definitions:
await batch.save({
sourceTable,
- tag: 'update',
+ tag: SaveOperationTag.UPDATE,
before: {
id: 'test4'
},
@@ -953,7 +953,7 @@ bucket_definitions:
const result1 = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
await batch.save({
sourceTable,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 'test1',
description: 'test1a'
@@ -968,7 +968,7 @@ bucket_definitions:
// Unchanged, but has a before id
await batch.save({
sourceTable,
- tag: 'update',
+ tag: SaveOperationTag.UPDATE,
before: {
id: 'test1',
description: 'test1a'
@@ -986,7 +986,7 @@ bucket_definitions:
// Delete
await batch.save({
sourceTable,
- tag: 'delete',
+ tag: SaveOperationTag.DELETE,
before: {
id: 'test1',
description: 'test1b'
@@ -1058,7 +1058,7 @@ bucket_definitions:
const result1 = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
await batch.save({
sourceTable,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 'test1',
description: 'test1a'
@@ -1073,7 +1073,7 @@ bucket_definitions:
// Unchanged, but has a before id
await batch.save({
sourceTable,
- tag: 'update',
+ tag: SaveOperationTag.UPDATE,
before: {
id: 'test1',
description: 'test1a'
@@ -1091,7 +1091,7 @@ bucket_definitions:
// Delete
await batch.save({
sourceTable,
- tag: 'delete',
+ tag: SaveOperationTag.DELETE,
before: {
id: 'test1',
description: 'test1a'
@@ -1154,7 +1154,7 @@ bucket_definitions:
await batch.save({
sourceTable,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 'test1',
description: 'test1'
@@ -1164,7 +1164,7 @@ bucket_definitions:
await batch.save({
sourceTable,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 'large1',
description: largeDescription
@@ -1175,7 +1175,7 @@ bucket_definitions:
// Large enough to split the returned batch
await batch.save({
sourceTable,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 'large2',
description: largeDescription
@@ -1185,7 +1185,7 @@ bucket_definitions:
await batch.save({
sourceTable,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 'test3',
description: 'test3'
@@ -1252,7 +1252,7 @@ bucket_definitions:
await batch.save({
sourceTable,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 'test1',
description: 'test1'
@@ -1262,7 +1262,7 @@ bucket_definitions:
await batch.save({
sourceTable,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 'large1',
description: largeDescription
@@ -1273,7 +1273,7 @@ bucket_definitions:
// Large enough to split the returned batch
await batch.save({
sourceTable,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 'large2',
description: largeDescription
@@ -1283,7 +1283,7 @@ bucket_definitions:
await batch.save({
sourceTable,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 'test3',
description: 'test3'
@@ -1346,7 +1346,7 @@ bucket_definitions:
for (let i = 1; i <= 6; i++) {
await batch.save({
sourceTable,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: `test${i}`,
description: `test${i}`
diff --git a/packages/service-core/test/src/sync.test.ts b/packages/service-core/test/src/sync.test.ts
index 606647038..64e463274 100644
--- a/packages/service-core/test/src/sync.test.ts
+++ b/packages/service-core/test/src/sync.test.ts
@@ -1,3 +1,4 @@
+import { SaveOperationTag } from '@/storage/storage-index.js';
import { RequestTracker } from '@/sync/RequestTracker.js';
import { streamResponse } from '@/sync/sync.js';
import { StreamingSyncLine } from '@/util/protocol-types.js';
@@ -36,7 +37,7 @@ function defineTests(factory: StorageFactory) {
const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
await batch.save({
sourceTable: TEST_TABLE,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 't1',
description: 'Test 1'
@@ -46,7 +47,7 @@ function defineTests(factory: StorageFactory) {
await batch.save({
sourceTable: TEST_TABLE,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 't2',
description: 'Test 2'
@@ -87,7 +88,7 @@ function defineTests(factory: StorageFactory) {
const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
await batch.save({
sourceTable: TEST_TABLE,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 't1',
description: 'Test\n"string"',
@@ -174,7 +175,7 @@ function defineTests(factory: StorageFactory) {
await storage.startBatch(BATCH_OPTIONS, async (batch) => {
await batch.save({
sourceTable: TEST_TABLE,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 't1',
description: 'Test 1'
@@ -190,7 +191,7 @@ function defineTests(factory: StorageFactory) {
await storage.startBatch(BATCH_OPTIONS, async (batch) => {
await batch.save({
sourceTable: TEST_TABLE,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 't2',
description: 'Test 2'
@@ -257,7 +258,7 @@ function defineTests(factory: StorageFactory) {
await storage.startBatch(BATCH_OPTIONS, async (batch) => {
await batch.save({
sourceTable: TEST_TABLE,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 't1',
description: 'Test 1'
@@ -267,7 +268,7 @@ function defineTests(factory: StorageFactory) {
await batch.save({
sourceTable: TEST_TABLE,
- tag: 'insert',
+ tag: SaveOperationTag.INSERT,
after: {
id: 't2',
description: 'Test 2'
@@ -308,7 +309,7 @@ function defineTests(factory: StorageFactory) {
await storage.startBatch(BATCH_OPTIONS, async (batch) => {
await batch.save({
sourceTable: TEST_TABLE,
- tag: 'update',
+ tag: SaveOperationTag.UPDATE,
after: {
id: 't1',
description: 'Test 1b'
@@ -318,7 +319,7 @@ function defineTests(factory: StorageFactory) {
await batch.save({
sourceTable: TEST_TABLE,
- tag: 'update',
+ tag: SaveOperationTag.UPDATE,
after: {
id: 't2',
description: 'Test 2b'
diff --git a/packages/types/src/config/PowerSyncConfig.ts b/packages/types/src/config/PowerSyncConfig.ts
index dbd33e1ae..876f1b09f 100644
--- a/packages/types/src/config/PowerSyncConfig.ts
+++ b/packages/types/src/config/PowerSyncConfig.ts
@@ -19,7 +19,7 @@ export const portParser = {
})
};
-export const dataSourceConfig = t.object({
+export const DataSourceConfig = t.object({
// Unique string identifier for the data source
type: t.string,
/** Unique identifier for the connection - optional when a single connection is present. */
@@ -32,12 +32,25 @@ export const dataSourceConfig = t.object({
debug_api: t.boolean.optional()
});
-export type DataSourceConfig = t.Decoded;
+export type DataSourceConfig = t.Decoded;
+
+/**
+ * Resolved version of {@link DataSourceConfig} where the optional
+ * `id` and `tag` field is now required.
+ */
+export const ResolvedDataSourceConfig = DataSourceConfig.and(
+ t.object({
+ id: t.string,
+ tag: t.string
+ })
+);
+
+export type ResolvedDataSourceConfig = t.Decoded;
/**
* This essentially allows any extra fields on this type
*/
-export const genericDataSourceConfig = dataSourceConfig.and(t.record(t.any));
+export const genericDataSourceConfig = DataSourceConfig.and(t.record(t.any));
export type GenericDataSourceConfig = t.Decoded;
export const jwkRSA = t.object({
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index d1c1a27f3..c4f1bd0e4 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -134,8 +134,63 @@ importers:
specifier: ^4.3.2
version: 4.3.2(typescript@5.6.2)(vite@5.3.3(@types/node@22.5.5))
vitest:
- specifier: ^0.34.6
- version: 0.34.6
+ specifier: ^2.1.1
+ version: 2.1.1(@types/node@22.5.5)
+
+ modules/module-mysql:
+ dependencies:
+ '@powersync/lib-services-framework':
+ specifier: workspace:*
+ version: link:../../libs/lib-services
+ '@powersync/mysql-zongji':
+ specifier: 0.0.0-dev-20241023144335
+ version: 0.0.0-dev-20241023144335
+ '@powersync/service-core':
+ specifier: workspace:*
+ version: link:../../packages/service-core
+ '@powersync/service-sync-rules':
+ specifier: workspace:*
+ version: link:../../packages/sync-rules
+ '@powersync/service-types':
+ specifier: workspace:*
+ version: link:../../packages/types
+ async:
+ specifier: ^3.2.4
+ version: 3.2.5
+ mysql2:
+ specifier: ^3.11.0
+ version: 3.11.3
+ semver:
+ specifier: ^7.5.4
+ version: 7.6.2
+ ts-codec:
+ specifier: ^1.2.2
+ version: 1.2.2
+ uri-js:
+ specifier: ^4.4.1
+ version: 4.4.1
+ uuid:
+ specifier: ^9.0.1
+ version: 9.0.1
+ devDependencies:
+ '@types/async':
+ specifier: ^3.2.24
+ version: 3.2.24
+ '@types/semver':
+ specifier: ^7.5.4
+ version: 7.5.8
+ '@types/uuid':
+ specifier: ^9.0.4
+ version: 9.0.8
+ typescript:
+ specifier: ^5.5.4
+ version: 5.6.2
+ vite-tsconfig-paths:
+ specifier: ^4.3.2
+ version: 4.3.2(typescript@5.6.2)(vite@5.3.3(@types/node@22.5.5))
+ vitest:
+ specifier: ^2.1.1
+ version: 2.1.1(@types/node@22.5.5)
modules/module-postgres:
dependencies:
@@ -198,8 +253,8 @@ importers:
specifier: workspace:^
version: link:../types
date-fns:
- specifier: ^3.6.0
- version: 3.6.0
+ specifier: ^4.1.0
+ version: 4.1.0
pgwire:
specifier: github:kagis/pgwire#f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87
version: https://codeload.github.com/kagis/pgwire/tar.gz/f1cb95f9a0f42a612bb5a6b67bb2eb793fc5fc87
@@ -422,6 +477,9 @@ importers:
'@powersync/service-module-mongodb':
specifier: workspace:*
version: link:../modules/module-mongodb
+ '@powersync/service-module-mysql':
+ specifier: workspace:*
+ version: link:../modules/module-mysql
'@powersync/service-module-postgres':
specifier: workspace:*
version: link:../modules/module-postgres
@@ -789,10 +847,6 @@ packages:
resolution: {integrity: sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==}
engines: {node: '>=12'}
- '@jest/schemas@29.6.3':
- resolution: {integrity: sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==}
- engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0}
-
'@jridgewell/resolve-uri@3.1.2':
resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==}
engines: {node: '>=6.0.0'}
@@ -1124,6 +1178,10 @@ packages:
resolution: {integrity: sha512-UA91GwWPhFExt3IizW6bOeY/pQ0BkuNwKjk9iQW9KqxluGCrg4VenZ0/L+2Y0+ZOtme72EVvg6v0zo3AMQRCeA==}
engines: {node: '>=12'}
+ '@powersync/mysql-zongji@0.0.0-dev-20241023144335':
+ resolution: {integrity: sha512-77A5ld4Egm0KTHDUzBSP+MHH1+ibp1Es1jlaGZmHWNzmXKNiScd0jxkPDxna9CTfRvOoAu8R0T3MTAuK0aDQpg==}
+ engines: {node: '>=20.0.0'}
+
'@prisma/instrumentation@5.16.1':
resolution: {integrity: sha512-4m5gRFWnQb8s/yTyGbMZkL7A5uJgqOWcWJxapwcAD0T0kh5sGPEVSQl/zTQvE9aduXhFAxOtC3gO+R8Hb5xO1Q==}
@@ -1249,9 +1307,6 @@ packages:
resolution: {integrity: sha512-2bRovzs0nJZFlCN3rXirE4gwxCn97JNjMmwpecqlbgV9WcxX7WRuIrgzx/X7Ib7MYRbyUTpBYE0s2x6AmZXnlg==}
engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0}
- '@sinclair/typebox@0.27.8':
- resolution: {integrity: sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==}
-
'@sindresorhus/is@5.6.0':
resolution: {integrity: sha512-TV7t8GKYaJWsn00tFDqBw8+Uqmr8A0fRU1tvTQhyZzGv0sJCGRQL3JGMI3ucuKo3XIZdUP+Lx7/gh2t3lewy7g==}
engines: {node: '>=14.16'}
@@ -1290,12 +1345,6 @@ packages:
'@types/async@3.2.24':
resolution: {integrity: sha512-8iHVLHsCCOBKjCF2KwFe0p9Z3rfM9mL+sSP8btyR5vTjJRAqpBYD28/ZLgXPf0pjG1VxOvtCV/BgXkQbpSe8Hw==}
- '@types/chai-subset@1.3.5':
- resolution: {integrity: sha512-c2mPnw+xHtXDoHmdtcCXGwyLMiauiAyxWMzhGpqHC4nqI/Y5G2XhTampslK2rb59kpcuHon03UH8W6iYUzw88A==}
-
- '@types/chai@4.3.20':
- resolution: {integrity: sha512-/pC9HAB5I/xMlc5FP77qjCnI16ChlJfW0tGa0IUcFn38VJrTV6DeZ60NU5KZBtaOZqjdpwTWohz5HU1RrhiYxQ==}
-
'@types/connect@3.4.36':
resolution: {integrity: sha512-P63Zd/JUGq+PdrM1lv0Wv5SBYeA2+CORvbrXbngriYY0jzLUWfQMQQxOhjONEz/wlHOAxOdY7CY65rgQdTjq2w==}
@@ -1359,9 +1408,6 @@ packages:
'@types/ws@8.2.3':
resolution: {integrity: sha512-ahRJZquUYCdOZf/rCsWg88S0/+cb9wazUBHv6HZEe3XdYaBe2zr/slM8J28X07Hn88Pnm4ezo7N8/ofnOgrPVQ==}
- '@vitest/expect@0.34.6':
- resolution: {integrity: sha512-QUzKpUQRc1qC7qdGo7rMK3AkETI7w18gTCUrsNnyjjJKYiuUB9+TQK3QnR1unhCnWRC0AbKv2omLGQDF/mIjOw==}
-
'@vitest/expect@2.1.1':
resolution: {integrity: sha512-YeueunS0HiHiQxk+KEOnq/QMzlUuOzbU1Go+PgAsHvvv3tUkJPm9xWt+6ITNTlzsMXUjmgm5T+U7KBPK2qQV6w==}
@@ -1380,30 +1426,22 @@ packages:
'@vitest/pretty-format@2.1.1':
resolution: {integrity: sha512-SjxPFOtuINDUW8/UkElJYQSFtnWX7tMksSGW0vfjxMneFqxVr8YJ979QpMbDW7g+BIiq88RAGDjf7en6rvLPPQ==}
- '@vitest/runner@0.34.6':
- resolution: {integrity: sha512-1CUQgtJSLF47NnhN+F9X2ycxUP0kLHQ/JWvNHbeBfwW8CzEGgeskzNnHDyv1ieKTltuR6sdIHV+nmR6kPxQqzQ==}
-
'@vitest/runner@2.1.1':
resolution: {integrity: sha512-uTPuY6PWOYitIkLPidaY5L3t0JJITdGTSwBtwMjKzo5O6RCOEncz9PUN+0pDidX8kTHYjO0EwUIvhlGpnGpxmA==}
- '@vitest/snapshot@0.34.6':
- resolution: {integrity: sha512-B3OZqYn6k4VaN011D+ve+AA4whM4QkcwcrwaKwAbyyvS/NB1hCWjFIBQxAQQSQir9/RtyAAGuq+4RJmbn2dH4w==}
-
'@vitest/snapshot@2.1.1':
resolution: {integrity: sha512-BnSku1WFy7r4mm96ha2FzN99AZJgpZOWrAhtQfoxjUU5YMRpq1zmHRq7a5K9/NjqonebO7iVDla+VvZS8BOWMw==}
- '@vitest/spy@0.34.6':
- resolution: {integrity: sha512-xaCvneSaeBw/cz8ySmF7ZwGvL0lBjfvqc1LpQ/vcdHEvpLn3Ff1vAvjw+CoGn0802l++5L/pxb7whwcWAw+DUQ==}
-
'@vitest/spy@2.1.1':
resolution: {integrity: sha512-ZM39BnZ9t/xZ/nF4UwRH5il0Sw93QnZXd9NAZGRpIgj0yvVwPpLd702s/Cx955rGaMlyBQkZJ2Ir7qyY48VZ+g==}
- '@vitest/utils@0.34.6':
- resolution: {integrity: sha512-IG5aDD8S6zlvloDsnzHw0Ut5xczlF+kv2BOTo+iXfPr54Yhi5qbVOgGB1hZaVq4iJ4C/MZ2J0y15IlsV/ZcI0A==}
-
'@vitest/utils@2.1.1':
resolution: {integrity: sha512-Y6Q9TsI+qJ2CC0ZKj6VBb+T8UPz593N113nnUykqwANqhgf3QkZeHFlusgKLTqrnVHbj/XDKZcDHol+dxVT+rQ==}
+ '@vlasky/mysql@2.18.6':
+ resolution: {integrity: sha512-c+qz/zzqecteQLchoje0E0rjLla935d6hHPpMKmfyQJnHlycLpR49ekS6s/zUAt8w0Um5hFglKXm4+PeJTVhaQ==}
+ engines: {node: '>= 0.6'}
+
abbrev@1.1.1:
resolution: {integrity: sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==}
@@ -1491,10 +1529,6 @@ packages:
resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==}
engines: {node: '>=8'}
- ansi-styles@5.2.0:
- resolution: {integrity: sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==}
- engines: {node: '>=10'}
-
ansi-styles@6.2.1:
resolution: {integrity: sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==}
engines: {node: '>=12'}
@@ -1524,9 +1558,6 @@ packages:
resolution: {integrity: sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==}
engines: {node: '>=8'}
- assertion-error@1.1.0:
- resolution: {integrity: sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==}
-
assertion-error@2.0.1:
resolution: {integrity: sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==}
engines: {node: '>=12'}
@@ -1544,6 +1575,10 @@ packages:
avvio@8.3.2:
resolution: {integrity: sha512-st8e519GWHa/azv8S87mcJvZs4WsgTBjOw/Ih1CP6u+8SZvcOeAYNG6JbsIrAUUJJ7JfmrnOkR8ipDS+u9SIRQ==}
+ aws-ssl-profiles@1.1.2:
+ resolution: {integrity: sha512-NZKeq9AfyQvEeNlN0zSYAaWrmBffJh3IELMZfRpJVWgrpEbtEpnjvzqBPf+mxoI287JohRDoa+/nsfqqiZmF6g==}
+ engines: {node: '>= 6.0.0'}
+
balanced-match@1.0.2:
resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==}
@@ -1560,6 +1595,13 @@ packages:
resolution: {integrity: sha512-pbnl5XzGBdrFU/wT4jqmJVPn2B6UHPBOhzMQkY/SPUPB6QtUXtmBHBIwCbXJol93mOpGMnQyP/+BB19q04xj7g==}
engines: {node: '>=4'}
+ big-integer@1.6.51:
+ resolution: {integrity: sha512-GPEid2Y9QU1Exl1rpO9B2IPJGHPSupF5GnVIP0blYvNOMer2bTvSWs1jGOUg04hTmu67nmLsQ9TBo1puaotBHg==}
+ engines: {node: '>=0.6'}
+
+ bignumber.js@9.1.1:
+ resolution: {integrity: sha512-pHm4LsMJ6lzgNGVfZHjMoO8sdoRhOzOH4MLmY65Jg70bpxCKu5iOHNJyfF6OyvYw7t8Fpf35RuzUyqnQsj8Vig==}
+
binary-extensions@2.3.0:
resolution: {integrity: sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==}
engines: {node: '>=8'}
@@ -1618,10 +1660,6 @@ packages:
resolution: {integrity: sha512-xlx1yCK2Oc1APsPXDL2LdlNP6+uu8OCDdhOBSVT279M/S+y75O30C2VuD8T2ogdePBBl7PfPF4504tnLgX3zfw==}
engines: {node: '>=14.16'}
- chai@4.5.0:
- resolution: {integrity: sha512-RITGBfijLkBddZvnn8jdqoTypxvqbOLYQkGGxXzeFjVHvudaPw0HNFD9x928/eUwYWd2dPCugVqspGALTZZQKw==}
- engines: {node: '>=4'}
-
chai@5.1.1:
resolution: {integrity: sha512-pT1ZgP8rPNqUgieVaEY+ryQr6Q4HXNg8Ei9UnLUrjN4IA7dvQC5JB+/kxVcPNDHyBcc/26CXPkbNzq3qwrOEKA==}
engines: {node: '>=12'}
@@ -1641,9 +1679,6 @@ packages:
chardet@0.7.0:
resolution: {integrity: sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==}
- check-error@1.0.3:
- resolution: {integrity: sha512-iKEoDYaRmd1mxM90a2OEfWhjsjPpYPuQ+lMYsoxB126+t8fw7ySEO48nmDg5COTjxDI65/Y2OWpeEHk3ZOe8zg==}
-
check-error@2.1.1:
resolution: {integrity: sha512-OAlb+T7V4Op9OwdkjmguYRqncdlx5JiofwOAUkmTF+jNdHwzTaTs4sRAGpzLF3oOz5xAyDGrPgeIDFQmDOTiJw==}
engines: {node: '>= 16'}
@@ -1743,9 +1778,6 @@ packages:
engines: {node: ^14.13.0 || >=16.0.0}
hasBin: true
- confbox@0.1.7:
- resolution: {integrity: sha512-uJcB/FKZtBMCJpK8MQji6bJHgu1tixKPxRLeGkNzBoOZzpnZUJm0jm2/sBDWcuBx1dYgxV4JU+g5hmNxCyAmdA==}
-
config-chain@1.1.13:
resolution: {integrity: sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==}
@@ -1793,8 +1825,8 @@ packages:
resolution: {integrity: sha512-fnULvOpxnC5/Vg3NCiWelDsLiUc9bRwAPs/+LfTLNvetFCtCTN+yQz15C/fs4AwX1R9K5GLtLfn8QW+dWisaAw==}
engines: {node: '>=0.11'}
- date-fns@3.6.0:
- resolution: {integrity: sha512-fRHTG8g/Gif+kSh50gaGEdToemgfj74aRX3swtiouboip5JDLAyDE9F11nHMIcvOaXeOC6D7SpNhi7uFyB7Uww==}
+ date-fns@4.1.0:
+ resolution: {integrity: sha512-Ukq0owbQXxa/U3EGtsdVBkR1w7KOQ5gIBqdH2hkvknzZPYvBxb/aa6E8L7tmjFtkwZBu3UXBbjIgPo/Ez4xaNg==}
debug@4.3.5:
resolution: {integrity: sha512-pt0bNEmneDIvdL1Xsd9oDQ/wrQRkXDT4AUWlNZNPKvW5x/jyO9VFXkJUP07vQ2upmw5PlaITaPKc31jK13V+jg==}
@@ -1818,10 +1850,6 @@ packages:
resolution: {integrity: sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==}
engines: {node: '>=10'}
- deep-eql@4.1.4:
- resolution: {integrity: sha512-SUwdGfqdKOwxCPeVYjwSyRpJ7Z+fhpwIAtmCUdZIWZ/YP5R9WAsyuSgpLVDi9bjWoN2LXHNss/dk3urXtdQxGg==}
- engines: {node: '>=6'}
-
deep-eql@5.0.2:
resolution: {integrity: sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==}
engines: {node: '>=6'}
@@ -1840,14 +1868,14 @@ packages:
delegates@1.0.0:
resolution: {integrity: sha512-bd2L678uiWATM6m5Z1VzNCErI3jiGzt6HGY8OVICs40JQq/HALfbyNJmp0UDakEY4pMMaN0Ly5om/B1VI/+xfQ==}
+ denque@2.1.0:
+ resolution: {integrity: sha512-HVQE3AAb/pxF8fQAoiqpvg9i3evqug3hoiwakOyZAwJm+6vZehbkYXZ0l4JxS+I3QxM97v5aaRNhj8v5oBhekw==}
+ engines: {node: '>=0.10'}
+
detect-indent@6.1.0:
resolution: {integrity: sha512-reYkTUJAZb9gUuZ2RvVCNhVHdg62RHnJ7WJl8ftMi4diZ6NWlciOzQN88pUhSELEwflJht4oQDv0F0BMlwaYtA==}
engines: {node: '>=8'}
- diff-sequences@29.6.3:
- resolution: {integrity: sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==}
- engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0}
-
diff@4.0.2:
resolution: {integrity: sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==}
engines: {node: '>=0.3.1'}
@@ -2064,6 +2092,9 @@ packages:
engines: {node: ^12.13.0 || ^14.15.0 || >=16.0.0}
deprecated: This package is no longer supported.
+ generate-function@2.3.1:
+ resolution: {integrity: sha512-eeB5GfMNeevm/GRYq20ShmsaGcmI81kIX2K9XQx5miC8KdHaC6Jm0qQ8ZNeGOi7wYB8OsdxKs+Y2oVuTFuVwKQ==}
+
get-caller-file@2.0.5:
resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==}
engines: {node: 6.* || 8.* || >= 10.*}
@@ -2296,6 +2327,9 @@ packages:
resolution: {integrity: sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==}
engines: {node: '>=8'}
+ is-property@1.0.2:
+ resolution: {integrity: sha512-Ks/IoX00TtClbGQr4TWXemAnktAQvYB7HzcCxDGqEZU6oCmb2INHuOoKxbtR+HFkmYWBKv/dOZtGRiAjDhj92g==}
+
is-stream@2.0.1:
resolution: {integrity: sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==}
engines: {node: '>=8'}
@@ -2410,10 +2444,6 @@ packages:
light-my-request@5.13.0:
resolution: {integrity: sha512-9IjUN9ZyCS9pTG+KqTDEQo68Sui2lHsYBrfMyVUTTZ3XhH8PMZq7xO94Kr+eP9dhi/kcKsx4N41p2IXEBil1pQ==}
- local-pkg@0.4.3:
- resolution: {integrity: sha512-SFppqq5p42fe2qcZQqqEOiVRXl+WCP1MdT6k7BDEW1j++sp5fIY+/fdRQitvKgB5BrBcmrs5m/L0v2FrU5MY1g==}
- engines: {node: '>=14'}
-
locate-path@5.0.0:
resolution: {integrity: sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==}
engines: {node: '>=8'}
@@ -2439,12 +2469,12 @@ packages:
resolution: {integrity: sha512-CdaO738xRapbKIMVn2m4F6KTj4j7ooJ8POVnebSgKo3KBz5axNXRAL7ZdRjIV6NOr2Uf4vjtRkxrFETOioCqSA==}
engines: {node: '>= 12.0.0'}
+ long@5.2.3:
+ resolution: {integrity: sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q==}
+
lossless-json@2.0.11:
resolution: {integrity: sha512-BP0vn+NGYvzDielvBZaFain/wgeJ1hTvURCqtKvhr1SCPePdaaTanmmcplrHfEJSJOUql7hk4FHwToNJjWRY3g==}
- loupe@2.3.7:
- resolution: {integrity: sha512-zSMINGVYkdpYSOBmLi0D1Uo7JU9nVdQKrHxC8eYlV+9YKK9WePqAlL7lSlorG/U2Fw1w0hTBmaa/jrQ3UbPHtA==}
-
loupe@3.1.1:
resolution: {integrity: sha512-edNu/8D5MKVfGVFRhFf8aAxiTM6Wumfz5XsaatSxlD3w4R1d/WEKUTydCdPGbl9K7QG/Ca3GnDV2sIKIpXRQcw==}
@@ -2462,6 +2492,10 @@ packages:
resolution: {integrity: sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==}
engines: {node: '>=12'}
+ lru.min@1.1.1:
+ resolution: {integrity: sha512-FbAj6lXil6t8z4z3j0E5mfRlPzxkySotzUHwRXjlpRh10vc6AI6WN62ehZj82VG7M20rqogJ0GLwar2Xa05a8Q==}
+ engines: {bun: '>=1.0.0', deno: '>=1.30.0', node: '>=8.0.0'}
+
magic-string@0.30.11:
resolution: {integrity: sha512-+Wri9p0QHMy+545hKww7YAu5NyzF8iomPL/RQazugQ9+Ez4Ic3mERMd8ZTX5rfK944j+560ZJi8iAwgak1Ac7A==}
@@ -2564,9 +2598,6 @@ packages:
engines: {node: '>=10'}
hasBin: true
- mlly@1.7.1:
- resolution: {integrity: sha512-rrVRZRELyQzrIUAVMHxP97kv+G786pHmOKzuFII8zDYahFBS7qnHh2AlYSl1GAHhaMPCz6/oHjVMcfFYgFYHgA==}
-
mnemonist@0.39.5:
resolution: {integrity: sha512-FPUtkhtJ0efmEFGpU14x7jGbTB+s18LrzRL2KgoWz9YvcY3cPomz8tih01GbHwnGk/OmkOKfqd/RAQoc8Lm7DQ==}
@@ -2620,6 +2651,14 @@ packages:
resolution: {integrity: sha512-avsJQhyd+680gKXyG/sQc0nXaC6rBkPOfyHYcFb9+hdkqQkR9bdnkJ0AMZhke0oesPqIO+mFFJ+IdBc7mst4IA==}
engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0}
+ mysql2@3.11.3:
+ resolution: {integrity: sha512-Qpu2ADfbKzyLdwC/5d4W7+5Yz7yBzCU05YWt5npWzACST37wJsB23wgOSo00qi043urkiRwXtEvJc9UnuLX/MQ==}
+ engines: {node: '>= 8.0'}
+
+ named-placeholders@1.1.3:
+ resolution: {integrity: sha512-eLoBxg6wE/rZkJPhU/xRX1WTpkFEwDJEN96oxFrTsqBdbT5ec295Q+CoHrL9IT0DipqKhmGcaZmwOt8OON5x1w==}
+ engines: {node: '>=12.0.0'}
+
nanoid@3.3.7:
resolution: {integrity: sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==}
engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1}
@@ -2771,10 +2810,6 @@ packages:
resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==}
engines: {node: '>=10'}
- p-limit@4.0.0:
- resolution: {integrity: sha512-5b0R4txpzjPWVw/cXXUResoD4hb6U/x9BH08L7nw+GN1sezDzPdxeRvpc9c433fZhBan/wusjbCsqwqm4EIBIQ==}
- engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
-
p-locate@4.1.0:
resolution: {integrity: sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==}
engines: {node: '>=8'}
@@ -2841,9 +2876,6 @@ packages:
pathe@1.1.2:
resolution: {integrity: sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==}
- pathval@1.1.1:
- resolution: {integrity: sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==}
-
pathval@2.0.0:
resolution: {integrity: sha512-vE7JKRyES09KiunauX7nd2Q9/L7lhok4smP9RZTDeD4MVs72Dp2qNFVz39Nz5a0FVEW0BJR6C0DYrq6unoziZA==}
engines: {node: '>= 14.16'}
@@ -2894,9 +2926,6 @@ packages:
resolution: {integrity: sha512-ip4qdzjkAyDDZklUaZkcRFb2iA118H9SgRh8yzTkSQK8HilsOJF7rSY8HoW5+I0M46AZgX/pxbprf2vvzQCE0Q==}
hasBin: true
- pkg-types@1.2.0:
- resolution: {integrity: sha512-+ifYuSSqOQ8CqP4MbZA5hDpb97n3E8SVWdJe+Wms9kj745lmd3b7EZJiqvmLwAlmRfjrI7Hi5z3kdBJ93lFNPA==}
-
postcss@8.4.39:
resolution: {integrity: sha512-0vzE+lAiG7hZl1/9I8yzKLx3aR9Xbof3fBHKunvMfOCYAtMhrsnccJY2iTURb9EZd5+pLuiNV9/c/GZJOHsgIw==}
engines: {node: ^10 || ^12 || >=14}
@@ -2927,10 +2956,6 @@ packages:
engines: {node: '>=14'}
hasBin: true
- pretty-format@29.7.0:
- resolution: {integrity: sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==}
- engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0}
-
proc-log@3.0.0:
resolution: {integrity: sha512-++Vn7NS4Xf9NacaU9Xq3URUuqZETPsf8L4j5/ckhaRYsfPeRyzGw+iDjFhV/Jr3uNmTvvddEJFWh5R1gRgUH8A==}
engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0}
@@ -3018,9 +3043,6 @@ packages:
resolution: {integrity: sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==}
hasBin: true
- react-is@18.3.1:
- resolution: {integrity: sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==}
-
read-package-json-fast@3.0.2:
resolution: {integrity: sha512-0J+Msgym3vrLOUB3hzQCuZHII0xkNGCtz/HJH9xZshwv9DbDwkw1KaE3gx/e2J5rpEY5rtOy6cyhKOPrkP7FZw==}
engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0}
@@ -3037,6 +3059,9 @@ packages:
readable-stream@1.0.34:
resolution: {integrity: sha512-ok1qVCJuRkNmvebYikljxJA/UEsKwLl2nI1OmaqAu4/UE+h0wKCHok4XkL/gvi39OacXvw59RJUOFUkDib2rHg==}
+ readable-stream@2.3.7:
+ resolution: {integrity: sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==}
+
readable-stream@2.3.8:
resolution: {integrity: sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==}
@@ -3188,6 +3213,9 @@ packages:
engines: {node: '>=10'}
hasBin: true
+ seq-queue@0.0.5:
+ resolution: {integrity: sha512-hr3Wtp/GZIc/6DAGPDcV4/9WoZhjrkXsi5B/07QgX8tsdc6ilr7BFM6PM6rbdAX1kFSDYeZGLipIZZKyQP0O5Q==}
+
set-blocking@2.0.0:
resolution: {integrity: sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==}
@@ -3309,6 +3337,10 @@ packages:
sprintf-js@1.1.3:
resolution: {integrity: sha512-Oo+0REFV59/rz3gfJNKQiBlwfHaSESl1pcGyABQsnnIfWOFt6JNj5gCog2U6MLZ//IGYD+nA8nI+mTShREReaA==}
+ sqlstring@2.3.3:
+ resolution: {integrity: sha512-qC9iz2FlN7DQl3+wjwn3802RTyjCx7sDvfQEXchwa6CWOx07/WVfh91gBmQ9fahw8snwGEWU3xGzOt4tFyHLxg==}
+ engines: {node: '>= 0.6'}
+
ssri@10.0.6:
resolution: {integrity: sha512-MGrFH9Z4NP9Iyhqn16sDtBpRRNJ0Y2hNa6D65h736fVSaPCHr4DM4sWUNvVaSuC+0OBGhwsrydQwmgfg5LncqQ==}
engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0}
@@ -3370,9 +3402,6 @@ packages:
resolution: {integrity: sha512-0fk9zBqO67Nq5M/m45qHCJxylV/DhBlIOVExqgOMiCCrzrhU6tCibRXNqE3jwJLftzE9SNuZtYbpzcO+i9FiKw==}
engines: {node: '>=14.16'}
- strip-literal@1.3.0:
- resolution: {integrity: sha512-PugKzOsyXpArk0yWmUwqOZecSO0GH0bPoctLcqNDH9J04pVW3lflYE0ujElBGTloevcxF5MofAOZ7C5l2b+wLg==}
-
supports-color@5.5.0:
resolution: {integrity: sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==}
engines: {node: '>=4'}
@@ -3415,10 +3444,6 @@ packages:
tinyexec@0.3.0:
resolution: {integrity: sha512-tVGE0mVJPGb0chKhqmsoosjsS+qUnJVGJpZgsHYQcGoPlG3B51R3PouqTgEGH2Dc9jjFyOqOpix6ZHNMXp1FZg==}
- tinypool@0.7.0:
- resolution: {integrity: sha512-zSYNUlYSMhJ6Zdou4cJwo/p7w5nmAH17GRfU/ui3ctvjXFErXXkruT4MWW6poDeXgCaIBlGLrfU6TbTXxyGMww==}
- engines: {node: '>=14.0.0'}
-
tinypool@1.0.1:
resolution: {integrity: sha512-URZYihUbRPcGv95En+sz6MfghfIc2OJ1sv/RmhWZLouPY0/8Vo80viwPvg3dlaS9fuq7fQMEfgRRK7BBZThBEA==}
engines: {node: ^18.0.0 || >=20.0.0}
@@ -3427,10 +3452,6 @@ packages:
resolution: {integrity: sha512-weEDEq7Z5eTHPDh4xjX789+fHfF+P8boiFB+0vbWzpbnbsEr/GRaohi/uMKxg8RZMXnl1ItAi/IUHWMsjDV7kQ==}
engines: {node: '>=14.0.0'}
- tinyspy@2.2.1:
- resolution: {integrity: sha512-KYad6Vy5VDWV4GH3fjpseMQ/XU2BhIYP7Vzd0LG44qRWm/Yt2WCOTicFdvmgo6gWaqooMQCawTtILVQJupKu7A==}
- engines: {node: '>=14.0.0'}
-
tinyspy@3.0.2:
resolution: {integrity: sha512-n1cw8k1k0x4pgA2+9XrOkFydTerNcJ1zWCO5Nn9scWHTD+5tp8dghT2x1uduQePZTZgd3Tupf+x9BxJjeJi77Q==}
engines: {node: '>=14.0.0'}
@@ -3518,10 +3539,6 @@ packages:
resolution: {integrity: sha512-i3P9Kgw3ytjELUfpuKVDNBJvk4u5bXL6gskv572mcevPbSKCV3zt3djhmlEQ65yERjIbOSncy7U4cQJaB1CBCg==}
engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0}
- type-detect@4.1.0:
- resolution: {integrity: sha512-Acylog8/luQ8L7il+geoSxhEkazvkslg7PSNKOX59mbB9cOveP5aq9h74Y7YU8yDpJwetzQQrfIwtf4Wp4LKcw==}
- engines: {node: '>=4'}
-
type-fest@0.21.3:
resolution: {integrity: sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==}
engines: {node: '>=10'}
@@ -3542,9 +3559,6 @@ packages:
engines: {node: '>=14.17'}
hasBin: true
- ufo@1.5.4:
- resolution: {integrity: sha512-UsUk3byDzKd04EyoZ7U4DOlxQaD14JUKQl6/P7wiX4FNvUfm3XL246n9W5AmqwW5RSFJ27NAuM0iLscAOYUiGQ==}
-
undefsafe@2.0.5:
resolution: {integrity: sha512-WxONCrssBM8TSPRqN5EmsjVrsv4A8X12J4ArBiiayv3DyyG3ZlIg6yysuuSYdZsVz3TKcTg2fd//Ujd4CHV1iA==}
@@ -3607,11 +3621,6 @@ packages:
resolution: {integrity: sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==}
engines: {node: '>= 0.8'}
- vite-node@0.34.6:
- resolution: {integrity: sha512-nlBMJ9x6n7/Amaz6F3zJ97EBwR2FkzhBRxF5e+jE6LA3yi6Wtc2lyTij1OnDMIr34v5g/tVQtsVAzhT0jc5ygA==}
- engines: {node: '>=v14.18.0'}
- hasBin: true
-
vite-node@2.1.1:
resolution: {integrity: sha512-N/mGckI1suG/5wQI35XeR9rsMsPqKXzq1CdUndzVstBj/HvyxxGctwnK6WX43NGt5L3Z5tcRf83g4TITKJhPrA==}
engines: {node: ^18.0.0 || >=20.0.0}
@@ -3653,37 +3662,6 @@ packages:
terser:
optional: true
- vitest@0.34.6:
- resolution: {integrity: sha512-+5CALsOvbNKnS+ZHMXtuUC7nL8/7F1F2DnHGjSsszX8zCjWSSviphCb/NuS9Nzf4Q03KyyDRBAXhF/8lffME4Q==}
- engines: {node: '>=v14.18.0'}
- hasBin: true
- peerDependencies:
- '@edge-runtime/vm': '*'
- '@vitest/browser': '*'
- '@vitest/ui': '*'
- happy-dom: '*'
- jsdom: '*'
- playwright: '*'
- safaridriver: '*'
- webdriverio: '*'
- peerDependenciesMeta:
- '@edge-runtime/vm':
- optional: true
- '@vitest/browser':
- optional: true
- '@vitest/ui':
- optional: true
- happy-dom:
- optional: true
- jsdom:
- optional: true
- playwright:
- optional: true
- safaridriver:
- optional: true
- webdriverio:
- optional: true
-
vitest@2.1.1:
resolution: {integrity: sha512-97We7/VC0e9X5zBVkvt7SGQMGrRtn3KtySFQG5fpaMlS+l62eeXRQO633AYhSTC3z7IMebnPPNjGXVGNRFlxBA==}
engines: {node: ^18.0.0 || >=20.0.0}
@@ -3840,10 +3818,6 @@ packages:
resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==}
engines: {node: '>=10'}
- yocto-queue@1.1.1:
- resolution: {integrity: sha512-b4JR1PFR10y1mKjhHY9LaGo6tmrgjit7hxVIeAmyMw3jegXR4dhYqLaQF5zMXZxY7tLpMyJeLjr1C4rLmkVe8g==}
- engines: {node: '>=12.20'}
-
yoctocolors-cjs@2.1.2:
resolution: {integrity: sha512-cYVsTjKl8b+FrnidjibDWskAv7UKOfcwaVZdp/it9n1s9fU3IkgDbhdIRKCW4JDsAlECJY0ytoVPT3sK6kideA==}
engines: {node: '>=18'}
@@ -4135,10 +4109,6 @@ snapshots:
wrap-ansi: 8.1.0
wrap-ansi-cjs: wrap-ansi@7.0.0
- '@jest/schemas@29.6.3':
- dependencies:
- '@sinclair/typebox': 0.27.8
-
'@jridgewell/resolve-uri@3.1.2': {}
'@jridgewell/sourcemap-codec@1.5.0': {}
@@ -4609,6 +4579,12 @@ snapshots:
'@pnpm/network.ca-file': 1.0.2
config-chain: 1.1.13
+ '@powersync/mysql-zongji@0.0.0-dev-20241023144335':
+ dependencies:
+ '@vlasky/mysql': 2.18.6
+ big-integer: 1.6.51
+ iconv-lite: 0.6.3
+
'@prisma/instrumentation@5.16.1':
dependencies:
'@opentelemetry/api': 1.8.0
@@ -4696,7 +4672,7 @@ snapshots:
'@opentelemetry/semantic-conventions': 1.25.1
'@prisma/instrumentation': 5.16.1
'@sentry/core': 8.17.0
- '@sentry/opentelemetry': 8.17.0(@opentelemetry/api@1.9.0)(@opentelemetry/core@1.25.1(@opentelemetry/api@1.6.0))(@opentelemetry/instrumentation@0.52.1(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@1.25.1(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.25.1)
+ '@sentry/opentelemetry': 8.17.0(@opentelemetry/api@1.9.0)(@opentelemetry/core@1.25.1(@opentelemetry/api@1.6.0))(@opentelemetry/instrumentation@0.52.1(@opentelemetry/api@1.6.0))(@opentelemetry/sdk-trace-base@1.25.1(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.25.1)
'@sentry/types': 8.17.0
'@sentry/utils': 8.17.0
optionalDependencies:
@@ -4704,7 +4680,7 @@ snapshots:
transitivePeerDependencies:
- supports-color
- '@sentry/opentelemetry@8.17.0(@opentelemetry/api@1.9.0)(@opentelemetry/core@1.25.1(@opentelemetry/api@1.6.0))(@opentelemetry/instrumentation@0.52.1(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@1.25.1(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.25.1)':
+ '@sentry/opentelemetry@8.17.0(@opentelemetry/api@1.9.0)(@opentelemetry/core@1.25.1(@opentelemetry/api@1.6.0))(@opentelemetry/instrumentation@0.52.1(@opentelemetry/api@1.6.0))(@opentelemetry/sdk-trace-base@1.25.1(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.25.1)':
dependencies:
'@opentelemetry/api': 1.9.0
'@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0)
@@ -4742,8 +4718,6 @@ snapshots:
transitivePeerDependencies:
- supports-color
- '@sinclair/typebox@0.27.8': {}
-
'@sindresorhus/is@5.6.0': {}
'@syncpoint/wkx@0.5.2':
@@ -4773,12 +4747,6 @@ snapshots:
'@types/async@3.2.24': {}
- '@types/chai-subset@1.3.5':
- dependencies:
- '@types/chai': 4.3.20
-
- '@types/chai@4.3.20': {}
-
'@types/connect@3.4.36':
dependencies:
'@types/node': 22.5.5
@@ -4837,12 +4805,6 @@ snapshots:
dependencies:
'@types/node': 22.5.5
- '@vitest/expect@0.34.6':
- dependencies:
- '@vitest/spy': 0.34.6
- '@vitest/utils': 0.34.6
- chai: 4.5.0
-
'@vitest/expect@2.1.1':
dependencies:
'@vitest/spy': 2.1.1
@@ -4862,49 +4824,34 @@ snapshots:
dependencies:
tinyrainbow: 1.2.0
- '@vitest/runner@0.34.6':
- dependencies:
- '@vitest/utils': 0.34.6
- p-limit: 4.0.0
- pathe: 1.1.2
-
'@vitest/runner@2.1.1':
dependencies:
'@vitest/utils': 2.1.1
pathe: 1.1.2
- '@vitest/snapshot@0.34.6':
- dependencies:
- magic-string: 0.30.11
- pathe: 1.1.2
- pretty-format: 29.7.0
-
'@vitest/snapshot@2.1.1':
dependencies:
'@vitest/pretty-format': 2.1.1
magic-string: 0.30.11
pathe: 1.1.2
- '@vitest/spy@0.34.6':
- dependencies:
- tinyspy: 2.2.1
-
'@vitest/spy@2.1.1':
dependencies:
tinyspy: 3.0.2
- '@vitest/utils@0.34.6':
- dependencies:
- diff-sequences: 29.6.3
- loupe: 2.3.7
- pretty-format: 29.7.0
-
'@vitest/utils@2.1.1':
dependencies:
'@vitest/pretty-format': 2.1.1
loupe: 3.1.1
tinyrainbow: 1.2.0
+ '@vlasky/mysql@2.18.6':
+ dependencies:
+ bignumber.js: 9.1.1
+ readable-stream: 2.3.7
+ safe-buffer: 5.2.1
+ sqlstring: 2.3.3
+
abbrev@1.1.1: {}
abort-controller@3.0.0:
@@ -4980,8 +4927,6 @@ snapshots:
dependencies:
color-convert: 2.0.1
- ansi-styles@5.2.0: {}
-
ansi-styles@6.2.1: {}
anymatch@3.1.3:
@@ -5006,8 +4951,6 @@ snapshots:
array-union@2.1.0: {}
- assertion-error@1.1.0: {}
-
assertion-error@2.0.1: {}
async-mutex@0.5.0:
@@ -5023,6 +4966,8 @@ snapshots:
'@fastify/error': 3.4.1
fastq: 1.17.1
+ aws-ssl-profiles@1.1.2: {}
+
balanced-match@1.0.2: {}
base64-js@1.5.1: {}
@@ -5040,6 +4985,10 @@ snapshots:
dependencies:
is-windows: 1.0.2
+ big-integer@1.6.51: {}
+
+ bignumber.js@9.1.1: {}
+
binary-extensions@2.3.0: {}
bl@4.1.0:
@@ -5140,16 +5089,6 @@ snapshots:
camelcase@7.0.1: {}
- chai@4.5.0:
- dependencies:
- assertion-error: 1.1.0
- check-error: 1.0.3
- deep-eql: 4.1.4
- get-func-name: 2.0.2
- loupe: 2.3.7
- pathval: 1.1.1
- type-detect: 4.1.0
-
chai@5.1.1:
dependencies:
assertion-error: 2.0.1
@@ -5173,10 +5112,6 @@ snapshots:
chardet@0.7.0: {}
- check-error@1.0.3:
- dependencies:
- get-func-name: 2.0.2
-
check-error@2.1.1: {}
chokidar@3.6.0:
@@ -5278,8 +5213,6 @@ snapshots:
tree-kill: 1.2.2
yargs: 17.7.2
- confbox@0.1.7: {}
-
config-chain@1.1.13:
dependencies:
ini: 1.3.8
@@ -5338,7 +5271,7 @@ snapshots:
dependencies:
'@babel/runtime': 7.24.8
- date-fns@3.6.0: {}
+ date-fns@4.1.0: {}
debug@4.3.5(supports-color@5.5.0):
dependencies:
@@ -5354,10 +5287,6 @@ snapshots:
dependencies:
mimic-response: 3.1.0
- deep-eql@4.1.4:
- dependencies:
- type-detect: 4.1.0
-
deep-eql@5.0.2: {}
deep-extend@0.6.0: {}
@@ -5370,9 +5299,9 @@ snapshots:
delegates@1.0.0: {}
- detect-indent@6.1.0: {}
+ denque@2.1.0: {}
- diff-sequences@29.6.3: {}
+ detect-indent@6.1.0: {}
diff@4.0.2: {}
@@ -5621,6 +5550,10 @@ snapshots:
strip-ansi: 6.0.1
wide-align: 1.1.5
+ generate-function@2.3.1:
+ dependencies:
+ is-property: 1.0.2
+
get-caller-file@2.0.5: {}
get-func-name@2.0.2: {}
@@ -5747,7 +5680,6 @@ snapshots:
iconv-lite@0.6.3:
dependencies:
safer-buffer: 2.1.2
- optional: true
ieee754@1.2.1: {}
@@ -5858,6 +5790,8 @@ snapshots:
is-path-inside@3.0.3: {}
+ is-property@1.0.2: {}
+
is-stream@2.0.1: {}
is-subdir@1.2.0:
@@ -5952,8 +5886,6 @@ snapshots:
process-warning: 3.0.0
set-cookie-parser: 2.6.0
- local-pkg@0.4.3: {}
-
locate-path@5.0.0:
dependencies:
p-locate: 4.1.0
@@ -5982,11 +5914,9 @@ snapshots:
safe-stable-stringify: 2.4.3
triple-beam: 1.4.1
- lossless-json@2.0.11: {}
+ long@5.2.3: {}
- loupe@2.3.7:
- dependencies:
- get-func-name: 2.0.2
+ lossless-json@2.0.11: {}
loupe@3.1.1:
dependencies:
@@ -6003,6 +5933,8 @@ snapshots:
lru-cache@7.18.3: {}
+ lru.min@1.1.1: {}
+
magic-string@0.30.11:
dependencies:
'@jridgewell/sourcemap-codec': 1.5.0
@@ -6134,13 +6066,6 @@ snapshots:
mkdirp@1.0.4: {}
- mlly@1.7.1:
- dependencies:
- acorn: 8.12.1
- pathe: 1.1.2
- pkg-types: 1.2.0
- ufo: 1.5.4
-
mnemonist@0.39.5:
dependencies:
obliterator: 2.0.4
@@ -6170,6 +6095,22 @@ snapshots:
mute-stream@1.0.0: {}
+ mysql2@3.11.3:
+ dependencies:
+ aws-ssl-profiles: 1.1.2
+ denque: 2.1.0
+ generate-function: 2.3.1
+ iconv-lite: 0.6.3
+ long: 5.2.3
+ lru.min: 1.1.1
+ named-placeholders: 1.1.3
+ seq-queue: 0.0.5
+ sqlstring: 2.3.3
+
+ named-placeholders@1.1.3:
+ dependencies:
+ lru-cache: 7.18.3
+
nanoid@3.3.7: {}
nearley@2.20.1:
@@ -6386,10 +6327,6 @@ snapshots:
dependencies:
yocto-queue: 0.1.0
- p-limit@4.0.0:
- dependencies:
- yocto-queue: 1.1.1
-
p-locate@4.1.0:
dependencies:
p-limit: 2.3.0
@@ -6460,8 +6397,6 @@ snapshots:
pathe@1.1.2: {}
- pathval@1.1.1: {}
-
pathval@2.0.0: {}
pause-stream@0.0.11:
@@ -6516,12 +6451,6 @@ snapshots:
sonic-boom: 3.8.1
thread-stream: 2.7.0
- pkg-types@1.2.0:
- dependencies:
- confbox: 0.1.7
- mlly: 1.7.1
- pathe: 1.1.2
-
postcss@8.4.39:
dependencies:
nanoid: 3.3.7
@@ -6542,12 +6471,6 @@ snapshots:
prettier@3.3.3: {}
- pretty-format@29.7.0:
- dependencies:
- '@jest/schemas': 29.6.3
- ansi-styles: 5.2.0
- react-is: 18.3.1
-
proc-log@3.0.0: {}
process-nextick-args@2.0.1: {}
@@ -6622,8 +6545,6 @@ snapshots:
minimist: 1.2.8
strip-json-comments: 2.0.1
- react-is@18.3.1: {}
-
read-package-json-fast@3.0.2:
dependencies:
json-parse-even-better-errors: 3.0.2
@@ -6650,6 +6571,16 @@ snapshots:
isarray: 0.0.1
string_decoder: 0.10.31
+ readable-stream@2.3.7:
+ dependencies:
+ core-util-is: 1.0.3
+ inherits: 2.0.4
+ isarray: 1.0.0
+ process-nextick-args: 2.0.1
+ safe-buffer: 5.1.2
+ string_decoder: 1.1.1
+ util-deprecate: 1.0.2
+
readable-stream@2.3.8:
dependencies:
core-util-is: 1.0.3
@@ -6805,6 +6736,8 @@ snapshots:
semver@7.6.2: {}
+ seq-queue@0.0.5: {}
+
set-blocking@2.0.0: {}
set-cookie-parser@2.6.0: {}
@@ -6920,6 +6853,8 @@ snapshots:
sprintf-js@1.1.3: {}
+ sqlstring@2.3.3: {}
+
ssri@10.0.6:
dependencies:
minipass: 7.1.2
@@ -6976,10 +6911,6 @@ snapshots:
strip-json-comments@5.0.1: {}
- strip-literal@1.3.0:
- dependencies:
- acorn: 8.12.1
-
supports-color@5.5.0:
dependencies:
has-flag: 3.0.0
@@ -7022,14 +6953,10 @@ snapshots:
tinyexec@0.3.0: {}
- tinypool@0.7.0: {}
-
tinypool@1.0.1: {}
tinyrainbow@1.2.0: {}
- tinyspy@2.2.1: {}
-
tinyspy@3.0.2: {}
tmp@0.0.33:
@@ -7119,8 +7046,6 @@ snapshots:
transitivePeerDependencies:
- supports-color
- type-detect@4.1.0: {}
-
type-fest@0.21.3: {}
type-fest@1.4.0: {}
@@ -7133,8 +7058,6 @@ snapshots:
typescript@5.6.2: {}
- ufo@1.5.4: {}
-
undefsafe@2.0.5: {}
undici-types@6.19.8: {}
@@ -7199,24 +7122,6 @@ snapshots:
vary@1.1.2: {}
- vite-node@0.34.6(@types/node@22.5.5):
- dependencies:
- cac: 6.7.14
- debug: 4.3.7
- mlly: 1.7.1
- pathe: 1.1.2
- picocolors: 1.1.0
- vite: 5.3.3(@types/node@22.5.5)
- transitivePeerDependencies:
- - '@types/node'
- - less
- - lightningcss
- - sass
- - stylus
- - sugarss
- - supports-color
- - terser
-
vite-node@2.1.1(@types/node@22.5.5):
dependencies:
cac: 6.7.14
@@ -7253,41 +7158,6 @@ snapshots:
'@types/node': 22.5.5
fsevents: 2.3.3
- vitest@0.34.6:
- dependencies:
- '@types/chai': 4.3.20
- '@types/chai-subset': 1.3.5
- '@types/node': 22.5.5
- '@vitest/expect': 0.34.6
- '@vitest/runner': 0.34.6
- '@vitest/snapshot': 0.34.6
- '@vitest/spy': 0.34.6
- '@vitest/utils': 0.34.6
- acorn: 8.12.1
- acorn-walk: 8.3.3
- cac: 6.7.14
- chai: 4.5.0
- debug: 4.3.7
- local-pkg: 0.4.3
- magic-string: 0.30.11
- pathe: 1.1.2
- picocolors: 1.1.0
- std-env: 3.7.0
- strip-literal: 1.3.0
- tinybench: 2.9.0
- tinypool: 0.7.0
- vite: 5.3.3(@types/node@22.5.5)
- vite-node: 0.34.6(@types/node@22.5.5)
- why-is-node-running: 2.3.0
- transitivePeerDependencies:
- - less
- - lightningcss
- - sass
- - stylus
- - sugarss
- - supports-color
- - terser
-
vitest@2.1.1(@types/node@22.5.5):
dependencies:
'@vitest/expect': 2.1.1
@@ -7450,8 +7320,6 @@ snapshots:
yocto-queue@0.1.0: {}
- yocto-queue@1.1.1: {}
-
yoctocolors-cjs@2.1.2: {}
zod@3.23.8: {}
diff --git a/service/Dockerfile b/service/Dockerfile
index d6641555e..98a893320 100644
--- a/service/Dockerfile
+++ b/service/Dockerfile
@@ -19,6 +19,7 @@ COPY libs/lib-services/package.json libs/lib-services/tsconfig.json libs/lib-ser
COPY modules/module-postgres/package.json modules/module-postgres/tsconfig.json modules/module-postgres/
COPY modules/module-mongodb/package.json modules/module-mongodb/tsconfig.json modules/module-mongodb/
+COPY modules/module-mysql/package.json modules/module-mysql/tsconfig.json modules/module-mysql/
RUN pnpm install --frozen-lockfile
@@ -37,6 +38,7 @@ COPY libs/lib-services/src libs/lib-services/src/
COPY modules/module-postgres/src modules/module-postgres/src/
COPY modules/module-mongodb/src modules/module-mongodb/src/
+COPY modules/module-mysql/src modules/module-mysql/src/
RUN pnpm build:production && \
rm -rf node_modules **/node_modules && \
diff --git a/service/package.json b/service/package.json
index 0e2f96478..95e1f5296 100644
--- a/service/package.json
+++ b/service/package.json
@@ -18,6 +18,7 @@
"@powersync/lib-services-framework": "workspace:*",
"@powersync/service-module-postgres": "workspace:*",
"@powersync/service-module-mongodb": "workspace:*",
+ "@powersync/service-module-mysql": "workspace:*",
"@powersync/service-jpgwire": "workspace:*",
"@powersync/service-jsonbig": "workspace:*",
"@powersync/service-rsocket-router": "workspace:*",
diff --git a/service/src/entry.ts b/service/src/entry.ts
index c92d19397..b4add3850 100644
--- a/service/src/entry.ts
+++ b/service/src/entry.ts
@@ -1,19 +1,20 @@
import { container, ContainerImplementation } from '@powersync/lib-services-framework';
import * as core from '@powersync/service-core';
-import PostgresModule from '@powersync/service-module-postgres';
-import MongoModule from '@powersync/service-module-mongodb';
import { startServer } from './runners/server.js';
import { startStreamRunner } from './runners/stream-worker.js';
import { startUnifiedRunner } from './runners/unified-runner.js';
import { createSentryReporter } from './util/alerting.js';
+import { PostgresModule } from '@powersync/service-module-postgres';
+import { MySQLModule } from '@powersync/service-module-mysql';
+import { MongoModule } from '@powersync/service-module-mongodb';
// Initialize framework components
container.registerDefaults();
container.register(ContainerImplementation.REPORTER, createSentryReporter());
const moduleManager = new core.modules.ModuleManager();
-moduleManager.register([PostgresModule, MongoModule]);
+moduleManager.register([new PostgresModule(), new MySQLModule(), new MongoModule()]);
// This is a bit of a hack. Commands such as the teardown command or even migrations might
// want access to the ModuleManager in order to use modules
container.register(core.ModuleManager, moduleManager);
diff --git a/service/src/runners/server.ts b/service/src/runners/server.ts
index 28939464b..fef5cfea6 100644
--- a/service/src/runners/server.ts
+++ b/service/src/runners/server.ts
@@ -12,7 +12,7 @@ import { SocketRouter } from '../routes/router.js';
*/
export function registerServerServices(serviceContext: core.system.ServiceContextContainer) {
serviceContext.register(core.routes.RouterEngine, new core.routes.RouterEngine());
- serviceContext.lifeCycleEngine.withLifecycle(serviceContext.routerEngine, {
+ serviceContext.lifeCycleEngine.withLifecycle(serviceContext.routerEngine!, {
start: async (routerEngine) => {
await routerEngine!.start(async (routes) => {
const server = fastify.fastify();
diff --git a/service/tsconfig.json b/service/tsconfig.json
index 40c9a5329..1576a67cc 100644
--- a/service/tsconfig.json
+++ b/service/tsconfig.json
@@ -35,6 +35,9 @@
},
{
"path": "../modules/module-mongodb"
+ },
+ {
+ "path": "../modules/module-mysql"
}
]
}
diff --git a/tsconfig.json b/tsconfig.json
index bce764b1d..542ecf4b2 100644
--- a/tsconfig.json
+++ b/tsconfig.json
@@ -16,6 +16,15 @@
{
"path": "./packages/sync-rules"
},
+ {
+ "path": "./modules/module-postgres"
+ },
+ {
+ "path": "./modules/module-mysql"
+ },
+ {
+ "path": "./modules/module-mongodb"
+ },
{
"path": "./libs/lib-services"
},