@@ -743,6 +743,180 @@ static void test_rx_fragment_tree_update_a(void)
743743 instrumented_allocator_reset (& alloc_payload );
744744}
745745
746+ /// Exhaustive test for rx_fragment_tree_update with random fragmentation patterns.
747+ /// Tests a fixed payload split into every possible non-empty substring,
748+ /// fed in random order with possible duplicates, and verifies correct completion detection.
749+ static void test_rx_fragment_tree_update_exhaustive (void )
750+ {
751+ instrumented_allocator_t alloc_frag = { 0 };
752+ instrumented_allocator_new (& alloc_frag );
753+ const udpard_mem_resource_t mem_frag = instrumented_allocator_make_resource (& alloc_frag );
754+
755+ instrumented_allocator_t alloc_payload = { 0 };
756+ instrumented_allocator_new (& alloc_payload );
757+ const udpard_mem_resource_t mem_payload = instrumented_allocator_make_resource (& alloc_payload );
758+ const udpard_mem_deleter_t del_payload = instrumented_allocator_make_deleter (& alloc_payload );
759+
760+ const char payload [] = "0123456789" ;
761+ const size_t payload_length = strlen (payload );
762+
763+ // Generate all possible non-empty substrings (offset, length pairs).
764+ // For a string of length N, there are N*(N+1)/2 possible substrings.
765+ typedef struct
766+ {
767+ size_t offset ;
768+ size_t length ;
769+ } substring_t ;
770+
771+ const size_t max_substrings = (payload_length * (payload_length + 1 )) / 2 ;
772+ substring_t substrings [max_substrings ];
773+ size_t substring_count = 0 ;
774+
775+ for (size_t offset = 0 ; offset < payload_length ; offset ++ ) {
776+ for (size_t length = 1 ; length <= (payload_length - offset ); length ++ ) {
777+ substrings [substring_count ].offset = offset ;
778+ substrings [substring_count ].length = length ;
779+ substring_count ++ ;
780+ }
781+ }
782+ TEST_ASSERT_EQUAL_size_t (max_substrings , substring_count );
783+
784+ // Run multiple randomized test iterations to explore different orderings.
785+ // We use fewer iterations to keep test time reasonable.
786+ const size_t num_iterations = 10000 ;
787+
788+ for (size_t iteration = 0 ; iteration < num_iterations ; iteration ++ ) {
789+ udpard_tree_t * root = NULL ;
790+ size_t cov = 0 ;
791+
792+ // Create a randomized schedule of fragments to feed.
793+ // We'll randomly select which substrings to use and in what order.
794+ // Some may be duplicated, some may be omitted initially.
795+
796+ // Track which bytes have been covered by submitted fragments.
797+ bool byte_covered [10 ] = { false };
798+ bool transfer_complete = false;
799+
800+ // Shuffle the substring indices to get a random order.
801+ size_t schedule [substring_count ];
802+ for (size_t i = 0 ; i < substring_count ; i ++ ) {
803+ schedule [i ] = i ;
804+ }
805+
806+ // Fisher-Yates shuffle
807+ for (size_t i = substring_count - 1 ; i > 0 ; i -- ) {
808+ const size_t j = (size_t )(rand () % (int )(i + 1 ));
809+ const size_t tmp = schedule [i ];
810+ schedule [i ] = schedule [j ];
811+ schedule [j ] = tmp ;
812+ }
813+
814+ // Feed fragments in the shuffled order.
815+ // We stop after we've seen every byte at least once.
816+ for (size_t sched_idx = 0 ; sched_idx < substring_count ; sched_idx ++ ) {
817+ const substring_t sub = substrings [schedule [sched_idx ]];
818+
819+ // Allocate and copy the substring payload.
820+ char * const frag_data = mem_payload .alloc (mem_payload .user , sub .length );
821+ memcpy (frag_data , payload + sub .offset , sub .length );
822+
823+ const rx_frame_base_t frame = { .offset = sub .offset ,
824+ .payload = { .data = frag_data , .size = sub .length },
825+ .origin = { .data = frag_data , .size = sub .length } };
826+
827+ const rx_fragment_tree_update_result_t res =
828+ rx_fragment_tree_update (& root , mem_frag , del_payload , frame , payload_length , payload_length , & cov );
829+
830+ // Update our tracking of covered bytes.
831+ for (size_t i = 0 ; i < sub .length ; i ++ ) {
832+ byte_covered [sub .offset + i ] = true;
833+ }
834+
835+ // Check if all bytes are covered.
836+ bool all_covered = true;
837+ for (size_t i = 0 ; i < payload_length ; i ++ ) {
838+ if (!byte_covered [i ]) {
839+ all_covered = false;
840+ break ;
841+ }
842+ }
843+ if (all_covered ) {
844+ TEST_ASSERT_EQUAL (rx_fragment_tree_done , res );
845+ transfer_complete = true;
846+ break ;
847+ }
848+ TEST_ASSERT_EQUAL (rx_fragment_tree_not_done , res );
849+ }
850+ TEST_ASSERT_TRUE (transfer_complete );
851+ TEST_ASSERT_EQUAL_size_t (payload_length , cov );
852+
853+ udpard_fragment_free_all ((udpard_fragment_t * )root , mem_frag );
854+ TEST_ASSERT_EQUAL_size_t (0 , alloc_frag .allocated_fragments );
855+ TEST_ASSERT_EQUAL_size_t (0 , alloc_payload .allocated_fragments );
856+ }
857+ TEST_ASSERT_EQUAL_size_t (0 , alloc_frag .allocated_fragments );
858+ TEST_ASSERT_EQUAL_size_t (0 , alloc_payload .allocated_fragments );
859+
860+ // Test with duplicates: feed the same fragments multiple times.
861+ for (size_t iteration = 0 ; iteration < num_iterations ; iteration ++ ) {
862+ udpard_tree_t * root = NULL ;
863+ size_t cov = 0 ;
864+
865+ bool byte_covered [10 ] = { false };
866+ bool transfer_complete = false;
867+
868+ // Create a schedule with duplicates.
869+ const size_t schedule_length = substring_count * 3 ; // 3x duplication factor
870+ size_t schedule [schedule_length ];
871+ for (size_t i = 0 ; i < schedule_length ; i ++ ) {
872+ schedule [i ] = (size_t )(rand () % (int )substring_count );
873+ }
874+
875+ // Feed fragments with duplicates.
876+ for (size_t sched_idx = 0 ; sched_idx < schedule_length ; sched_idx ++ ) {
877+ const substring_t sub = substrings [schedule [sched_idx ]];
878+
879+ char * const frag_data = mem_payload .alloc (mem_payload .user , sub .length );
880+ memcpy (frag_data , payload + sub .offset , sub .length );
881+
882+ const rx_frame_base_t frame = { .offset = sub .offset ,
883+ .payload = { .data = frag_data , .size = sub .length },
884+ .origin = { .data = frag_data , .size = sub .length } };
885+
886+ const rx_fragment_tree_update_result_t res =
887+ rx_fragment_tree_update (& root , mem_frag , del_payload , frame , payload_length , payload_length , & cov );
888+
889+ // Update tracking.
890+ for (size_t i = 0 ; i < sub .length ; i ++ ) {
891+ byte_covered [sub .offset + i ] = true;
892+ }
893+
894+ // Check completion.
895+ bool all_covered = true;
896+ for (size_t i = 0 ; i < payload_length ; i ++ ) {
897+ if (!byte_covered [i ]) {
898+ all_covered = false;
899+ break ;
900+ }
901+ }
902+ if (all_covered ) {
903+ TEST_ASSERT_EQUAL (rx_fragment_tree_done , res );
904+ transfer_complete = true;
905+ break ;
906+ }
907+ TEST_ASSERT_EQUAL (rx_fragment_tree_not_done , res );
908+ }
909+ TEST_ASSERT_TRUE (transfer_complete );
910+ TEST_ASSERT_EQUAL_size_t (payload_length , cov );
911+
912+ udpard_fragment_free_all ((udpard_fragment_t * )root , mem_frag );
913+ TEST_ASSERT_EQUAL_size_t (0 , alloc_frag .allocated_fragments );
914+ TEST_ASSERT_EQUAL_size_t (0 , alloc_payload .allocated_fragments );
915+ }
916+ TEST_ASSERT_EQUAL_size_t (0 , alloc_frag .allocated_fragments );
917+ TEST_ASSERT_EQUAL_size_t (0 , alloc_payload .allocated_fragments );
918+ }
919+
746920static void test_rx_transfer_id_forward_distance (void )
747921{
748922 // Test 1: Same value (distance is 0)
@@ -1026,6 +1200,7 @@ int main(void)
10261200{
10271201 UNITY_BEGIN ();
10281202 RUN_TEST (test_rx_fragment_tree_update_a );
1203+ RUN_TEST (test_rx_fragment_tree_update_exhaustive );
10291204 RUN_TEST (test_rx_transfer_id_forward_distance );
10301205 RUN_TEST (test_rx_transfer_id_window_slide );
10311206 RUN_TEST (test_rx_transfer_id_window_manip );
0 commit comments