@@ -621,6 +621,132 @@ testSetups.forEach((setup) => {
621621 await checkBounded ( entryCount , 1 , 1 , db1 , db2 ) ;
622622 } ) ;
623623
624+ it ( "repairs redistributed entry when maybe-sync misses one hash on peer leave" , async function ( ) {
625+ if ( setup . name !== "u64-iblt" ) {
626+ this . skip ( ) ;
627+ }
628+
629+ const args = {
630+ timeUntilRoleMaturity : 0 ,
631+ waitForPruneDelay : 50 ,
632+ setup,
633+ } as const ;
634+
635+ db1 = await session . peers [ 0 ] . open ( new EventStore < string , any > ( ) , {
636+ args : {
637+ replicate : {
638+ offset : 0 ,
639+ } ,
640+ ...args ,
641+ } ,
642+ } ) ;
643+
644+ db2 = await EventStore . open < EventStore < string , any > > (
645+ db1 . address ! ,
646+ session . peers [ 1 ] ,
647+ {
648+ args : {
649+ replicate : {
650+ offset : 0.3333 ,
651+ } ,
652+ ...args ,
653+ } ,
654+ } ,
655+ ) ;
656+ db3 = await EventStore . open < EventStore < string , any > > (
657+ db1 . address ! ,
658+ session . peers [ 2 ] ,
659+ {
660+ args : {
661+ replicate : {
662+ offset : 0.6666 ,
663+ } ,
664+ ...args ,
665+ } ,
666+ } ,
667+ ) ;
668+
669+ const entryCount = sampleSize * 3 ;
670+ const inserts : Promise < any > [ ] = [ ] ;
671+ for ( let i = 0 ; i < entryCount ; i ++ ) {
672+ inserts . push (
673+ db1 . add ( toBase64 ( new Uint8Array ( [ i ] ) ) , {
674+ meta : { next : [ ] } ,
675+ } ) ,
676+ ) ;
677+ }
678+ await Promise . all ( inserts ) ;
679+ await checkBounded ( entryCount , 0.5 , 0.9 , db1 , db2 , db3 ) ;
680+
681+ const db2Hash = db2 . node . identity . publicKey . hashcode ( ) ;
682+ let candidateHash : string | undefined ;
683+ for ( const entry of await db1 . log . log . toArray ( ) ) {
684+ if ( await db2 . log . log . has ( entry . hash ) ) {
685+ continue ;
686+ }
687+ if ( ! ( await db3 . log . log . has ( entry . hash ) ) ) {
688+ continue ;
689+ }
690+ candidateHash = entry . hash ;
691+ break ;
692+ }
693+ expect (
694+ candidateHash ,
695+ "expected entry that requires redistribution to surviving peer" ,
696+ ) . to . be . a ( "string" ) ;
697+
698+ const sync = db1 . log . syncronizer as {
699+ onMaybeMissingEntries : ( properties : {
700+ entries : Map < string , any > ;
701+ targets : string [ ] ;
702+ } ) => Promise < void > ;
703+ } ;
704+ const originalOnMaybeMissingEntries =
705+ sync . onMaybeMissingEntries . bind ( sync ) ;
706+
707+ sync . onMaybeMissingEntries = async ( properties ) => {
708+ if (
709+ candidateHash &&
710+ properties . targets . includes ( db2Hash ) &&
711+ properties . entries . has ( candidateHash )
712+ ) {
713+ const filtered = new Map ( properties . entries ) ;
714+ filtered . delete ( candidateHash ) ;
715+ return originalOnMaybeMissingEntries ( {
716+ ...properties ,
717+ entries : filtered ,
718+ } ) ;
719+ }
720+ return originalOnMaybeMissingEntries ( properties ) ;
721+ } ;
722+
723+ try {
724+ await db3 . close ( ) ;
725+
726+ await Promise . all ( [
727+ waitForResolved ( async ( ) =>
728+ expect ( await db1 . log . replicationIndex ?. getSize ( ) ) . equal ( 2 ) ,
729+ ) ,
730+ waitForResolved ( async ( ) =>
731+ expect ( await db2 . log . replicationIndex ?. getSize ( ) ) . equal ( 2 ) ,
732+ ) ,
733+ ] ) ;
734+
735+ await waitForResolved (
736+ async ( ) =>
737+ expect ( await db2 . log . log . has ( candidateHash ! ) ) . to . be . true ,
738+ {
739+ timeout : 30_000 ,
740+ delayInterval : 500 ,
741+ } ,
742+ ) ;
743+
744+ await checkBounded ( entryCount , 1 , 1 , db1 , db2 ) ;
745+ } finally {
746+ sync . onMaybeMissingEntries = originalOnMaybeMissingEntries ;
747+ }
748+ } ) ;
749+
624750 it ( "handles peer joining and leaving multiple times" , async ( ) => {
625751 db1 = await session . peers [ 0 ] . open ( new EventStore < string , any > ( ) , {
626752 args : {
0 commit comments