@@ -325,6 +325,42 @@ bucket_definitions:
325325 }
326326 } ) ;
327327
328+ test ( 'old date format' , async ( ) => {
329+ await using context = await WalStreamTestContext . open ( factory ) ;
330+ await context . updateSyncRules ( BASIC_SYNC_RULES ) ;
331+
332+ const { pool } = context ;
333+ await pool . query ( `DROP TABLE IF EXISTS test_data` ) ;
334+ await pool . query ( `CREATE TABLE test_data(id text primary key, description timestamptz);` ) ;
335+
336+ await context . initializeReplication ( ) ;
337+ await pool . query ( `INSERT INTO test_data(id, description) VALUES ('t1', '2025-09-10 15:17:14+02')` ) ;
338+
339+ let data = await context . getBucketData ( 'global[]' ) ;
340+ expect ( data ) . toMatchObject ( [ putOp ( 'test_data' , { id : 't1' , description : '2025-09-10 13:17:14Z' } ) ] ) ;
341+ } ) ;
342+
343+ test ( 'new date format' , async ( ) => {
344+ await using context = await WalStreamTestContext . open ( factory ) ;
345+ await context . updateSyncRules ( `
346+ streams:
347+ stream:
348+ query: SELECT id, * FROM "test_data"
349+
350+ config:
351+ edition: 2
352+ ` ) ;
353+ const { pool } = context ;
354+ await pool . query ( `DROP TABLE IF EXISTS test_data` ) ;
355+ await pool . query ( `CREATE TABLE test_data(id text primary key, description timestamptz);` ) ;
356+
357+ await context . initializeReplication ( ) ;
358+ await pool . query ( `INSERT INTO test_data(id, description) VALUES ('t1', '2025-09-10 15:17:14+02')` ) ;
359+
360+ const data = await context . getBucketData ( '1#stream|0[]' ) ;
361+ expect ( data ) . toMatchObject ( [ putOp ( 'test_data' , { id : 't1' , description : '2025-09-10T13:17:14.000000Z' } ) ] ) ;
362+ } ) ;
363+
328364 test ( 'custom types' , async ( ) => {
329365 await using context = await WalStreamTestContext . open ( factory ) ;
330366
@@ -348,4 +384,69 @@ config:
348384 const data = await context . getBucketData ( '1#stream|0[]' ) ;
349385 expect ( data ) . toMatchObject ( [ putOp ( 'test_data' , { id : 't1' , description : '{"foo":1,"bar":2}' } ) ] ) ;
350386 } ) ;
387+
388+ test ( 'custom types in primary key' , async ( ) => {
389+ await using context = await WalStreamTestContext . open ( factory ) ;
390+
391+ await context . updateSyncRules ( `
392+ streams:
393+ stream:
394+ query: SELECT id, * FROM "test_data"
395+
396+ config:
397+ edition: 2
398+ ` ) ;
399+
400+ const { pool } = context ;
401+ await pool . query ( `DROP TABLE IF EXISTS test_data` ) ;
402+ await pool . query ( `CREATE DOMAIN test_id AS TEXT;` ) ;
403+ await pool . query ( `CREATE TABLE test_data(id test_id primary key);` ) ;
404+
405+ await context . initializeReplication ( ) ;
406+ await pool . query ( `INSERT INTO test_data(id) VALUES ('t1')` ) ;
407+
408+ const data = await context . getBucketData ( '1#stream|0[]' ) ;
409+ expect ( data ) . toMatchObject ( [ putOp ( 'test_data' , { id : 't1' } ) ] ) ;
410+ } ) ;
411+
412+ test ( 'replica identity handling' , async ( ) => {
413+ // This specifically test a case of timestamps being used as part of the replica identity.
414+ // There was a regression in versions 1.15.0-1.15.5, which this tests for.
415+ await using context = await WalStreamTestContext . open ( factory ) ;
416+ const { pool } = context ;
417+ await context . updateSyncRules ( BASIC_SYNC_RULES ) ;
418+
419+ await pool . query ( `DROP TABLE IF EXISTS test_data` ) ;
420+ await pool . query ( `CREATE TABLE test_data(id uuid primary key, description text, ts timestamptz)` ) ;
421+ await pool . query ( `ALTER TABLE test_data REPLICA IDENTITY FULL` ) ;
422+
423+ const test_id = `a9798b07-84de-4297-9a8e-aafb4dd0282f` ;
424+
425+ await pool . query (
426+ `INSERT INTO test_data(id, description, ts) VALUES('${ test_id } ', 'test1', '2025-01-01T00:00:00Z') returning id as test_id`
427+ ) ;
428+
429+ await context . replicateSnapshot ( ) ;
430+ await context . startStreaming ( ) ;
431+
432+ await pool . query ( `UPDATE test_data SET description = 'test2' WHERE id = '${ test_id } '` ) ;
433+
434+ const data = await context . getBucketData ( 'global[]' ) ;
435+ // For replica identity full, each change changes the id, making it a REMOVE+PUT
436+ expect ( data ) . toMatchObject ( [
437+ // Initial insert
438+ putOp ( 'test_data' , { id : test_id , description : 'test1' } ) ,
439+ // Update
440+ removeOp ( 'test_data' , test_id ) ,
441+ putOp ( 'test_data' , { id : test_id , description : 'test2' } )
442+ ] ) ;
443+
444+ // subkey contains `${table id}/${replica identity}`.
445+ // table id changes from run to run, but replica identity should always stay constant.
446+ // This should not change if we make changes to the implementation
447+ // (unless specifically opting in to new behavior)
448+ expect ( data [ 0 ] . subkey ) . toContain ( '/c7b3f1a3-ec4d-5d44-b295-c7f2a32bb056' ) ;
449+ expect ( data [ 1 ] . subkey ) . toContain ( '/c7b3f1a3-ec4d-5d44-b295-c7f2a32bb056' ) ;
450+ expect ( data [ 2 ] . subkey ) . toContain ( '/984d457a-69f0-559a-a2f9-a511c28b968d' ) ;
451+ } ) ;
351452}
0 commit comments