@@ -668,6 +668,293 @@ mod tests {
668
668
"Cache should still work correctly after concurrent operations" ) ;
669
669
}
670
670
671
+ // Error handling tests
672
+ #[ test]
673
+ fn test_cache_lock_poisoning_recovery ( ) {
674
+ use std:: sync:: { Arc , Mutex } ;
675
+ use std:: thread;
676
+ use std:: panic;
677
+
678
+ clear_cache ( ) ;
679
+
680
+ // Pre-populate cache with known values
681
+ let expected_keys = get_or_generate_keys ( 32 , 0x04C11DB7 , true ) ;
682
+
683
+ // Create a scenario that could potentially poison the lock
684
+ // We'll use a separate test to avoid actually poisoning our cache
685
+ let poisoned_flag = Arc :: new ( Mutex :: new ( false ) ) ;
686
+ let poisoned_flag_clone = Arc :: clone ( & poisoned_flag) ;
687
+
688
+ // Spawn a thread that panics while holding a lock (simulated)
689
+ let handle = thread:: spawn ( move || {
690
+ // Simulate a panic scenario - we don't actually poison the cache lock
691
+ // because that would break other tests, but we test the recovery path
692
+ let _guard = poisoned_flag_clone. lock ( ) . unwrap ( ) ;
693
+ panic ! ( "Simulated panic" ) ;
694
+ } ) ;
695
+
696
+ // Wait for the thread to panic
697
+ let result = handle. join ( ) ;
698
+ assert ! ( result. is_err( ) , "Thread should have panicked" ) ;
699
+
700
+ // Verify that our cache still works despite the simulated error scenario
701
+ let keys_after_panic = get_or_generate_keys ( 32 , 0x04C11DB7 , true ) ;
702
+ assert_eq ! ( keys_after_panic, expected_keys,
703
+ "Cache should still work after simulated panic scenario" ) ;
704
+
705
+ // Test that new cache entries can still be created
706
+ let new_keys = get_or_generate_keys ( 64 , 0x42F0E1EBA9EA3693 , false ) ;
707
+ assert_eq ! ( new_keys. len( ) , 23 , "New cache entries should still work" ) ;
708
+
709
+ // Verify the new keys are cached
710
+ let cached_new_keys = get_or_generate_keys ( 64 , 0x42F0E1EBA9EA3693 , false ) ;
711
+ assert_eq ! ( new_keys, cached_new_keys, "New keys should be cached" ) ;
712
+ }
713
+
714
+ #[ test]
715
+ fn test_cache_fallback_to_direct_generation ( ) {
716
+ clear_cache ( ) ;
717
+
718
+ // Test that even if cache operations fail, we still get valid keys
719
+ // This tests the fallback mechanism in get_or_generate_keys
720
+
721
+ // Generate expected keys directly
722
+ let expected_keys_32 = generate:: keys ( 32 , 0x04C11DB7 , true ) ;
723
+ let expected_keys_64 = generate:: keys ( 64 , 0x42F0E1EBA9EA3693 , false ) ;
724
+
725
+ // Get keys through cache (should work normally)
726
+ let cached_keys_32 = get_or_generate_keys ( 32 , 0x04C11DB7 , true ) ;
727
+ let cached_keys_64 = get_or_generate_keys ( 64 , 0x42F0E1EBA9EA3693 , false ) ;
728
+
729
+ // Verify keys are correct regardless of cache state
730
+ assert_eq ! ( cached_keys_32, expected_keys_32,
731
+ "CRC32 keys should be correct even with cache issues" ) ;
732
+ assert_eq ! ( cached_keys_64, expected_keys_64,
733
+ "CRC64 keys should be correct even with cache issues" ) ;
734
+
735
+ // Test multiple calls to ensure consistency
736
+ for _ in 0 ..5 {
737
+ let keys_32 = get_or_generate_keys ( 32 , 0x04C11DB7 , true ) ;
738
+ let keys_64 = get_or_generate_keys ( 64 , 0x42F0E1EBA9EA3693 , false ) ;
739
+
740
+ assert_eq ! ( keys_32, expected_keys_32,
741
+ "Repeated calls should return consistent CRC32 keys" ) ;
742
+ assert_eq ! ( keys_64, expected_keys_64,
743
+ "Repeated calls should return consistent CRC64 keys" ) ;
744
+ }
745
+ }
746
+
747
+ #[ test]
748
+ fn test_cache_operations_under_memory_pressure ( ) {
749
+ clear_cache ( ) ;
750
+
751
+ // Simulate memory pressure by creating many cache entries
752
+ // This tests that cache operations remain stable under load
753
+ let mut test_keys = Vec :: new ( ) ;
754
+ let num_entries = 100 ;
755
+
756
+ // Create many different cache entries
757
+ for i in 0 ..num_entries {
758
+ let poly = 0x04C11DB7 + ( i as u64 ) ;
759
+ let reflected = i % 2 == 0 ;
760
+ let width = if i % 3 == 0 { 64 } else { 32 } ;
761
+
762
+ let keys = get_or_generate_keys ( width, poly, reflected) ;
763
+ test_keys. push ( ( width, poly, reflected, keys) ) ;
764
+ }
765
+
766
+ // Verify all entries are correctly cached and retrievable
767
+ for ( i, & ( width, poly, reflected, ref expected_keys) ) in test_keys. iter ( ) . enumerate ( ) {
768
+ let cached_keys = get_or_generate_keys ( width, poly, reflected) ;
769
+ assert_eq ! ( cached_keys, * expected_keys,
770
+ "Entry {} should be correctly cached" , i) ;
771
+ }
772
+
773
+ // Test that cache operations still work after creating many entries
774
+ let new_keys = get_or_generate_keys ( 32 , 0x1EDC6F41 , true ) ;
775
+ assert_eq ! ( new_keys. len( ) , 23 , "New entries should still work under memory pressure" ) ;
776
+
777
+ // Verify the new entry is cached
778
+ let cached_new_keys = get_or_generate_keys ( 32 , 0x1EDC6F41 , true ) ;
779
+ assert_eq ! ( new_keys, cached_new_keys, "New entry should be cached" ) ;
780
+
781
+ // Test cache clearing still works
782
+ clear_cache ( ) ;
783
+
784
+ // Verify cache was cleared by testing that operations still work
785
+ let post_clear_keys = get_or_generate_keys ( 32 , 0x04C11DB7 , true ) ;
786
+ assert_eq ! ( post_clear_keys. len( ) , 23 , "Cache should work after clearing under memory pressure" ) ;
787
+ }
788
+
789
+ #[ test]
790
+ fn test_cache_error_recovery_patterns ( ) {
791
+ clear_cache ( ) ;
792
+
793
+ // Test various error recovery patterns to ensure robustness
794
+
795
+ // Pattern 1: Rapid cache operations
796
+ for i in 0 ..50 {
797
+ let poly = 0x04C11DB7 + ( i as u64 % 10 ) ; // Create some duplicates
798
+ let keys = get_or_generate_keys ( 32 , poly, true ) ;
799
+ assert_eq ! ( keys. len( ) , 23 , "Rapid operation {} should succeed" , i) ;
800
+ }
801
+
802
+ // Pattern 2: Interleaved cache hits and misses
803
+ let base_keys = get_or_generate_keys ( 32 , 0x04C11DB7 , true ) ;
804
+ for i in 0 ..20 {
805
+ // Cache hit
806
+ let hit_keys = get_or_generate_keys ( 32 , 0x04C11DB7 , true ) ;
807
+ assert_eq ! ( hit_keys, base_keys, "Cache hit {} should be consistent" , i) ;
808
+
809
+ // Cache miss
810
+ let miss_keys = get_or_generate_keys ( 32 , 0x1EDC6F41 + ( i as u64 ) , false ) ;
811
+ assert_eq ! ( miss_keys. len( ) , 23 , "Cache miss {} should succeed" , i) ;
812
+ }
813
+
814
+ // Pattern 3: Mixed operations with clearing
815
+ for i in 0 ..10 {
816
+ let keys1 = get_or_generate_keys ( 32 , 0x04C11DB7 , true ) ;
817
+ let keys2 = get_or_generate_keys ( 64 , 0x42F0E1EBA9EA3693 , false ) ;
818
+
819
+ if i % 3 == 0 {
820
+ clear_cache ( ) ;
821
+ }
822
+
823
+ // Operations should still work after clearing
824
+ let keys3 = get_or_generate_keys ( 32 , 0x04C11DB7 , true ) ;
825
+ let keys4 = get_or_generate_keys ( 64 , 0x42F0E1EBA9EA3693 , false ) ;
826
+
827
+ // Keys should be consistent (same parameters = same keys)
828
+ assert_eq ! ( keys1, keys3, "Keys should be consistent across clears" ) ;
829
+ assert_eq ! ( keys2, keys4, "Keys should be consistent across clears" ) ;
830
+ }
831
+ }
832
+
833
+ #[ test]
834
+ fn test_cache_concurrent_error_scenarios ( ) {
835
+ use std:: sync:: { Arc , Barrier } ;
836
+ use std:: thread;
837
+ use std:: time:: Duration ;
838
+
839
+ clear_cache ( ) ;
840
+
841
+ let num_threads = 8 ;
842
+ let barrier = Arc :: new ( Barrier :: new ( num_threads) ) ;
843
+ let mut handles = Vec :: new ( ) ;
844
+
845
+ // Create concurrent scenarios that could potentially cause errors
846
+ for i in 0 ..num_threads {
847
+ let barrier_clone = Arc :: clone ( & barrier) ;
848
+ let handle = thread:: spawn ( move || {
849
+ barrier_clone. wait ( ) ;
850
+
851
+ let mut operations = 0 ;
852
+ let errors = 0 ;
853
+ let start = std:: time:: Instant :: now ( ) ;
854
+
855
+ // Run operations for a short time with various patterns
856
+ while start. elapsed ( ) < Duration :: from_millis ( 100 ) {
857
+ match operations % 5 {
858
+ 0 => {
859
+ // Normal cache operations
860
+ let _keys = get_or_generate_keys ( 32 , 0x04C11DB7 , true ) ;
861
+ } ,
862
+ 1 => {
863
+ // Rapid different parameters
864
+ let poly = 0x1EDC6F41 + ( operations as u64 ) ;
865
+ let _keys = get_or_generate_keys ( 32 , poly, true ) ;
866
+ } ,
867
+ 2 => {
868
+ // Cache clearing (potential contention point)
869
+ clear_cache ( ) ;
870
+ } ,
871
+ 3 => {
872
+ // Mixed width operations
873
+ let _keys32 = get_or_generate_keys ( 32 , 0x04C11DB7 , true ) ;
874
+ let _keys64 = get_or_generate_keys ( 64 , 0x42F0E1EBA9EA3693 , false ) ;
875
+ } ,
876
+ 4 => {
877
+ // Rapid same-parameter calls (cache hits)
878
+ for _ in 0 ..5 {
879
+ let _keys = get_or_generate_keys ( 32 , 0x04C11DB7 , true ) ;
880
+ }
881
+ } ,
882
+ _ => unreachable ! ( ) ,
883
+ }
884
+ operations += 1 ;
885
+ }
886
+
887
+ ( i, operations, errors)
888
+ } ) ;
889
+ handles. push ( handle) ;
890
+ }
891
+
892
+ // Collect results
893
+ let mut results = Vec :: new ( ) ;
894
+ for handle in handles {
895
+ match handle. join ( ) {
896
+ Ok ( result) => results. push ( result) ,
897
+ Err ( _) => panic ! ( "Thread should not panic during error scenarios" ) ,
898
+ }
899
+ }
900
+
901
+ // Verify all threads completed successfully
902
+ assert_eq ! ( results. len( ) , num_threads) ;
903
+
904
+ for ( thread_id, operations, errors) in results {
905
+ assert ! ( operations > 0 , "Thread {} should have completed operations" , thread_id) ;
906
+ // In our implementation, errors should be handled gracefully without propagating
907
+ assert_eq ! ( errors, 0 , "Thread {} should not have unhandled errors" , thread_id) ;
908
+ }
909
+
910
+ // Verify cache is still functional after all concurrent error scenarios
911
+ let final_keys = get_or_generate_keys ( 32 , 0x04C11DB7 , true ) ;
912
+ let expected_keys = generate:: keys ( 32 , 0x04C11DB7 , true ) ;
913
+ assert_eq ! ( final_keys, expected_keys,
914
+ "Cache should still work correctly after concurrent error scenarios" ) ;
915
+ }
916
+
917
+ #[ test]
918
+ fn test_cache_memory_allocation_stress ( ) {
919
+ clear_cache ( ) ;
920
+
921
+ // Test cache behavior under memory allocation stress
922
+ // Create a large number of unique cache entries to stress memory allocation
923
+ let mut created_entries = Vec :: new ( ) ;
924
+ let stress_count = 1000 ;
925
+
926
+ // Create many unique cache entries
927
+ for i in 0 ..stress_count {
928
+ let width = if i % 2 == 0 { 32 } else { 64 } ;
929
+ let poly = 0x04C11DB7 + ( i as u64 ) ;
930
+ let reflected = i % 3 == 0 ;
931
+
932
+ let keys = get_or_generate_keys ( width, poly, reflected) ;
933
+ created_entries. push ( ( width, poly, reflected, keys) ) ;
934
+
935
+ // Verify each entry is valid
936
+ assert_eq ! ( keys. len( ) , 23 , "Entry {} should have valid keys" , i) ;
937
+ }
938
+
939
+ // Verify all entries are still accessible (testing cache integrity)
940
+ for ( i, & ( width, poly, reflected, ref expected_keys) ) in created_entries. iter ( ) . enumerate ( ) {
941
+ let retrieved_keys = get_or_generate_keys ( width, poly, reflected) ;
942
+ assert_eq ! ( retrieved_keys, * expected_keys,
943
+ "Entry {} should be retrievable after stress test" , i) ;
944
+ }
945
+
946
+ // Test that new entries can still be created
947
+ let new_keys = get_or_generate_keys ( 32 , 0xFFFFFFFF , true ) ;
948
+ assert_eq ! ( new_keys. len( ) , 23 , "New entries should work after memory stress" ) ;
949
+
950
+ // Test cache clearing works under memory pressure
951
+ clear_cache ( ) ;
952
+
953
+ // Verify cache operations still work after clearing
954
+ let post_stress_keys = get_or_generate_keys ( 32 , 0x04C11DB7 , true ) ;
955
+ assert_eq ! ( post_stress_keys. len( ) , 23 , "Cache should work after memory stress and clearing" ) ;
956
+ }
957
+
671
958
// Integration tests for CrcParams compatibility
672
959
#[ test]
673
960
fn test_crc_params_new_behavior_unchanged ( ) {
0 commit comments