@@ -457,6 +457,158 @@ mod tests {
457457 assert_eq ! ( outcome. all_events[ 1 ] . event_id( ) , Some ( event_id_1) ) ;
458458 }
459459
460+ #[ async_test]
461+ async fn test_memory_based_duplicated_event_ids_from_in_memory_vs_in_store ( ) {
462+ let event_id_0 = owned_event_id ! ( "$ev0" ) ;
463+ let event_id_1 = owned_event_id ! ( "$ev1" ) ;
464+
465+ let event_0 = timeline_event ( & event_id_0) ;
466+ let event_1 = timeline_event ( & event_id_1) ;
467+
468+ let mut deduplicator = Deduplicator :: new_memory_based ( ) ;
469+ let mut room_events = RoomEvents :: new ( ) ;
470+ // `event_0` is loaded in memory.
471+ // `event_1` is not loaded in memory, it's new.
472+ {
473+ let Deduplicator :: InMemory ( bloom_filter) = & mut deduplicator else {
474+ panic ! ( "test is broken, but sky is beautiful" ) ;
475+ } ;
476+ bloom_filter. bloom_filter . lock ( ) . unwrap ( ) . insert ( event_id_0. clone ( ) ) ;
477+ room_events. push_events ( [ event_0. clone ( ) ] ) ;
478+ }
479+
480+ let outcome = deduplicator
481+ . filter_duplicate_events ( vec ! [ event_0, event_1] , & room_events)
482+ . await
483+ . unwrap ( ) ;
484+
485+ // The deduplication says 2 events are valid.
486+ assert_eq ! ( outcome. all_events. len( ) , 2 ) ;
487+ assert_eq ! ( outcome. all_events[ 0 ] . event_id( ) , Some ( event_id_0. clone( ) ) ) ;
488+ assert_eq ! ( outcome. all_events[ 1 ] . event_id( ) , Some ( event_id_1) ) ;
489+
490+ // From these 2 events, 1 is duplicated and has been loaded in memory.
491+ assert_eq ! ( outcome. in_memory_duplicated_event_ids. len( ) , 1 ) ;
492+ assert_eq ! (
493+ outcome. in_memory_duplicated_event_ids[ 0 ] ,
494+ ( event_id_0, Position :: new( ChunkIdentifier :: new( 0 ) , 0 ) )
495+ ) ;
496+
497+ // From these 2 events, 0 are duplicated and live in the store.
498+ //
499+ // Note: with the Bloom filter, this value is always empty because there is no
500+ // store.
501+ assert ! ( outcome. in_store_duplicated_event_ids. is_empty( ) ) ;
502+ }
503+
504+ #[ cfg( not( target_arch = "wasm32" ) ) ] // This uses the cross-process lock, so needs time support.
505+ #[ async_test]
506+ async fn test_store_based_duplicated_event_ids_from_in_memory_vs_in_store ( ) {
507+ use std:: sync:: Arc ;
508+
509+ use matrix_sdk_base:: {
510+ event_cache:: store:: { EventCacheStore , MemoryStore } ,
511+ linked_chunk:: Update ,
512+ } ;
513+ use ruma:: room_id;
514+
515+ let event_id_0 = owned_event_id ! ( "$ev0" ) ;
516+ let event_id_1 = owned_event_id ! ( "$ev1" ) ;
517+ let event_id_2 = owned_event_id ! ( "$ev2" ) ;
518+ let event_id_3 = owned_event_id ! ( "$ev3" ) ;
519+ let event_id_4 = owned_event_id ! ( "$ev4" ) ;
520+
521+ // `event_0` and `event_1` are in the store.
522+ // `event_2` and `event_3` is in the store, but also in memory: it's loaded in
523+ // memory from the store.
524+ // `event_4` is nowhere, it's new.
525+ let event_0 = timeline_event ( & event_id_0) ;
526+ let event_1 = timeline_event ( & event_id_1) ;
527+ let event_2 = timeline_event ( & event_id_2) ;
528+ let event_3 = timeline_event ( & event_id_3) ;
529+ let event_4 = timeline_event ( & event_id_4) ;
530+
531+ let event_cache_store = Arc :: new ( MemoryStore :: new ( ) ) ;
532+ let room_id = room_id ! ( "!fondue:raclette.ch" ) ;
533+
534+ // Prefill the store with ev1 and ev2.
535+ event_cache_store
536+ . handle_linked_chunk_updates (
537+ room_id,
538+ vec ! [
539+ Update :: NewItemsChunk {
540+ previous: None ,
541+ new: ChunkIdentifier :: new( 42 ) ,
542+ next: None ,
543+ } ,
544+ Update :: PushItems {
545+ at: Position :: new( ChunkIdentifier :: new( 42 ) , 0 ) ,
546+ items: vec![ event_0. clone( ) , event_1. clone( ) ] ,
547+ } ,
548+ Update :: NewItemsChunk {
549+ previous: Some ( ChunkIdentifier :: new( 42 ) ) ,
550+ new: ChunkIdentifier :: new( 0 ) , // must match the chunk in `RoomEvents`, so 0. It simulates a lazy-load for example.
551+ next: None ,
552+ } ,
553+ Update :: PushItems {
554+ at: Position :: new( ChunkIdentifier :: new( 0 ) , 0 ) ,
555+ items: vec![ event_2. clone( ) , event_3. clone( ) ] ,
556+ } ,
557+ ] ,
558+ )
559+ . await
560+ . unwrap ( ) ;
561+
562+ let event_cache_store = EventCacheStoreLock :: new ( event_cache_store, "hodor" . to_owned ( ) ) ;
563+
564+ let deduplicator = Deduplicator :: new_store_based ( room_id. to_owned ( ) , event_cache_store) ;
565+ let mut room_events = RoomEvents :: new ( ) ;
566+ room_events. push_events ( [ event_2. clone ( ) , event_3. clone ( ) ] ) ;
567+
568+ let outcome = deduplicator
569+ . filter_duplicate_events (
570+ vec ! [ event_0, event_1, event_2, event_3, event_4] ,
571+ & room_events,
572+ )
573+ . await
574+ . unwrap ( ) ;
575+
576+ // The deduplication says 5 events are valid.
577+ assert_eq ! ( outcome. all_events. len( ) , 5 ) ;
578+ assert_eq ! ( outcome. all_events[ 0 ] . event_id( ) , Some ( event_id_0. clone( ) ) ) ;
579+ assert_eq ! ( outcome. all_events[ 1 ] . event_id( ) , Some ( event_id_1. clone( ) ) ) ;
580+ assert_eq ! ( outcome. all_events[ 2 ] . event_id( ) , Some ( event_id_2. clone( ) ) ) ;
581+ assert_eq ! ( outcome. all_events[ 3 ] . event_id( ) , Some ( event_id_3. clone( ) ) ) ;
582+ assert_eq ! ( outcome. all_events[ 4 ] . event_id( ) , Some ( event_id_4. clone( ) ) ) ;
583+
584+ // From these 5 events, 2 are duplicated and have been loaded in memory.
585+ //
586+ // Note that events are sorted by their descending position.
587+ assert_eq ! ( outcome. in_memory_duplicated_event_ids. len( ) , 2 ) ;
588+ assert_eq ! (
589+ outcome. in_memory_duplicated_event_ids[ 0 ] ,
590+ ( event_id_3, Position :: new( ChunkIdentifier :: new( 0 ) , 1 ) )
591+ ) ;
592+ assert_eq ! (
593+ outcome. in_memory_duplicated_event_ids[ 1 ] ,
594+ ( event_id_2, Position :: new( ChunkIdentifier :: new( 0 ) , 0 ) )
595+ ) ;
596+
597+ // From these 4 events, 2 are duplicated and live in the store only, they have
598+ // not been loaded in memory.
599+ //
600+ // Note that events are sorted by their descending position.
601+ assert_eq ! ( outcome. in_store_duplicated_event_ids. len( ) , 2 ) ;
602+ assert_eq ! (
603+ outcome. in_store_duplicated_event_ids[ 0 ] ,
604+ ( event_id_1, Position :: new( ChunkIdentifier :: new( 42 ) , 1 ) )
605+ ) ;
606+ assert_eq ! (
607+ outcome. in_store_duplicated_event_ids[ 1 ] ,
608+ ( event_id_0, Position :: new( ChunkIdentifier :: new( 42 ) , 0 ) )
609+ ) ;
610+ }
611+
460612 #[ test]
461613 fn test_bloom_filter_no_duplicate ( ) {
462614 let event_id_0 = owned_event_id ! ( "$ev0" ) ;
0 commit comments