@@ -753,61 +753,6 @@ static int32_t rx_cavl_compare_fragment_end(const void* const user, const udpard
753753 return 0 ; // clang-format on
754754}
755755
756- /// Checks the tree for fragments that can be removed if a new one spanning [left, right) is to be inserted.
757- /// Returns the first fragment that can be removed, or NULL if none can be removed.
758- /// Invoke repeatedly until NULL is returned to remove all redundant fragments.
759- /// The complexity is logarithmic in the number of fragments in the tree.
760- static udpard_fragment_t * rx_fragment_tree_first_redundant (udpard_tree_t * const root ,
761- const size_t left ,
762- const size_t right )
763- {
764- // The most simple case to check first is to find the fragments that fall fully within the new one.
765- // This may somewhat simplify the reasoning about the remaining checks below.
766- udpard_fragment_t * frag = (udpard_fragment_t * )cavl2_lower_bound (root , & left , & rx_cavl_compare_fragment_offset );
767- if ((frag != NULL ) && ((frag -> offset + frag -> view .size ) <= right )) {
768- return frag ; // We may land here multiple times if the new fragment is large.
769- }
770- // Now we are certain that no fully-contained fragments exist. But the addition of a larger fragment that
771- // joins adjacent fragments together into a larger contiguous block may render smaller fragments that overlap
772- // with its edges redundant. To check for that, we create a new virtual fragment that represents the new fragment
773- // together with those that join it on either end, if any, and then look for fragments fully contained within.
774- // Example:
775- // |---B---|
776- // |--X--|
777- // |--A--|
778- // The addition of fragment A or B will render X redundant, even though it is not contained within any fragment.
779- // This algorithm will detect that and mark X for removal.
780- //
781- // To find the left neighbor, we need to find the fragment crossing the left boundary whose offset is the smallest.
782- // To do that, we simply need to find the fragment with the smallest right boundary that is on the right of our
783- // left boundary. This works because by construction we guarantee that our tree has no fully-contained fragments,
784- // implying that ordering by left is also ordering by right.
785- size_t v_left = left ;
786- frag = (udpard_fragment_t * )cavl2_lower_bound (root , & left , & rx_cavl_compare_fragment_end );
787- if (frag != NULL ) {
788- v_left = smaller (v_left , frag -> offset ); // Ignore fragments ending before our left boundary
789- }
790- // The right neighbor is found by analogy: find the fragment with the largest left boundary that is on the left
791- // of our right boundary. This guarantees that the new virtual right boundary will max out to the right.
792- size_t v_right = right ;
793- frag = (udpard_fragment_t * )cavl2_predecessor (root , & right , & rx_cavl_compare_fragment_offset );
794- if (frag != NULL ) {
795- v_right = larger (v_right , frag -> offset + frag -> view .size ); // Ignore fragments starting after our right boundary
796- }
797- UDPARD_ASSERT ((v_left <= left ) && (right <= v_right ));
798- // Adjust the boundaries to avoid removing the neighbors themselves.
799- v_left ++ ;
800- if (v_right > 0 ) {
801- v_right -- ;
802- }
803- // Now we repeat the process as above but with the expanded virtual fragment (v_left, v_right-1).
804- frag = (udpard_fragment_t * )cavl2_lower_bound (root , & v_left , & rx_cavl_compare_fragment_offset );
805- if ((frag != NULL ) && ((frag -> offset + frag -> view .size ) <= v_right )) {
806- return frag ;
807- }
808- return NULL ; // Nothing left to remove.
809- }
810-
811756/// True if the fragment tree does not have a contiguous payload coverage in [left, right).
812757/// This function requires that the tree does not have fully-overlapped fragments; the implications are that
813758/// no two fragments may have the same offset, and that fragments ordered by offset also order by their ends.
@@ -891,55 +836,88 @@ static rx_fragment_tree_update_result_t rx_fragment_tree_update(udpard_tree_t**
891836 const size_t extent ,
892837 size_t * const covered_prefix_io )
893838{
894- // First we need to check if the new fragment is needed at all. It is needed in two cases:
839+ // The new fragment is needed in two cases:
895840 // 1) It covers new ground in the [0, extent) range.
896841 // 2) It overlaps smaller fragments that are already in the tree, which can be replaced.
897- {
898- const size_t left = frame .offset ;
899- udpard_fragment_t * frag = (udpard_fragment_t * )cavl2_predecessor (* root , & left , & rx_cavl_compare_fragment_offset );
900- if ((frag != NULL ) && ((frag -> offset + frag -> view .size ) >= (frame .offset + frame .payload .size ))) {
901- mem_free_payload (payload_deleter , frame .origin ); // New fragment is fully contained within an existing one.
902- return rx_fragment_tree_not_done ;
903- }
842+ // We do a quick lookup first to see if an existing fragment fully contains the new one;
843+ // this simplifies the logic below an is very fast.
844+ const size_t left = frame .offset ;
845+ const size_t right = frame .offset + frame .payload .size ;
846+ udpard_fragment_t * frag = (udpard_fragment_t * )cavl2_predecessor (* root , & left , & rx_cavl_compare_fragment_offset );
847+ if ((frag != NULL ) && ((frag -> offset + frag -> view .size ) >= right )) {
848+ mem_free_payload (payload_deleter , frame .origin );
849+ return rx_fragment_tree_not_done ; // New fragment is fully contained within an existing one.
850+ }
851+
852+ // The addition of a larger fragment that joins adjacent fragments together into a larger contiguous block may
853+ // render smaller fragments that overlap with its edges redundant.
854+ // To check for that, we create a new virtual fragment that represents the new fragment together with those
855+ // that join it on either end, if any, and then look for fragments contained within.
856+ // The search interval would be (v_left, v_right-1) to avoid matching the neighbors themselves.
857+ // Example:
858+ // |--B--|
859+ // |--X--|
860+ // |--A--|
861+ // The addition of fragment A or B will render X redundant, even though it is not contained within any fragment.
862+ // This algorithm will detect that and mark X for removal.
863+ //
864+ // To find the left neighbor, we need to find the fragment crossing the left boundary whose offset is the smallest.
865+ // To do that, we simply need to find the fragment with the smallest right boundary that is on the right of our
866+ // left boundary. This works because by construction we guarantee that our tree has no fully-contained fragments,
867+ // implying that ordering by left is also ordering by right.
868+ //
869+ // The right neighbor is found by analogy: find the fragment with the largest left boundary that is on the left
870+ // of our right boundary. This guarantees that the new virtual right boundary will max out to the right.
871+ size_t v_left = left ;
872+ size_t v_right = right ;
873+ frag = (udpard_fragment_t * )cavl2_lower_bound (* root , & left , & rx_cavl_compare_fragment_end );
874+ if (frag != NULL ) {
875+ v_left = smaller (v_left , frag -> offset + 1U ); // Avoid matching the left neighbor itself.
904876 }
905- udpard_fragment_t * victim =
906- rx_fragment_tree_first_redundant (* root , frame .offset , frame .offset + frame .payload .size );
907- const bool need =
908- (victim != NULL ) || rx_fragment_is_needed (* root , frame .offset , frame .payload .size , transfer_payload_size , extent );
877+ frag = (udpard_fragment_t * )cavl2_predecessor (* root , & right , & rx_cavl_compare_fragment_offset );
878+ if (frag != NULL ) {
879+ v_right = larger (v_right , frag -> offset + frag -> view .size );
880+ }
881+ UDPARD_ASSERT ((v_left <= left ) && (right <= v_right ));
882+
883+ // Find the first victim. It has to be done early to decide if the new fragment is worth keeping at all.
884+ frag = (udpard_fragment_t * )cavl2_lower_bound (* root , & v_left , & rx_cavl_compare_fragment_offset );
885+ const bool need = ((frag != NULL ) && ((frag -> offset + frag -> view .size ) < v_right )) ||
886+ rx_fragment_is_needed (* root , frame .offset , frame .payload .size , transfer_payload_size , extent );
909887 if (!need ) {
910888 mem_free_payload (payload_deleter , frame .origin );
911889 return rx_fragment_tree_not_done ; // Cannot make use of this fragment.
912890 }
913891
914892 // Ensure we can allocate the fragment header for the new frame before pruning the tree to avoid data loss.
915- udpard_fragment_t * frag = mem_alloc (fragment_memory , sizeof (udpard_fragment_t ));
916- if (frag == NULL ) {
893+ udpard_fragment_t * mew = mem_alloc (fragment_memory , sizeof (udpard_fragment_t ));
894+ if (mew == NULL ) {
917895 mem_free_payload (payload_deleter , frame .origin );
918896 return rx_fragment_tree_oom ; // Cannot allocate fragment header. Maybe we will succeed later.
919897 }
920- mem_zero (sizeof (* frag ), frag );
921- frag -> view .data = frame .payload .data ;
922- frag -> view .size = frame .payload .size ;
923- frag -> origin .data = frame .origin .data ;
924- frag -> origin .size = frame .origin .size ;
925- frag -> offset = frame .offset ;
926- frag -> payload_deleter = payload_deleter ;
898+ mem_zero (sizeof (* mew ), mew );
899+ mew -> view .data = frame .payload .data ;
900+ mew -> view .size = frame .payload .size ;
901+ mew -> origin .data = frame .origin .data ;
902+ mew -> origin .size = frame .origin .size ;
903+ mew -> offset = frame .offset ;
904+ mew -> payload_deleter = payload_deleter ;
927905
928906 // Remove all redundant fragments before inserting the new one.
929- while (victim != NULL ) {
930- cavl2_remove (root , & victim -> index_offset );
931- mem_free_payload (victim -> payload_deleter , victim -> origin );
932- mem_free (fragment_memory , sizeof (udpard_fragment_t ), victim );
933- victim = rx_fragment_tree_first_redundant ( * root , frame . offset , frame . offset + frame . payload . size );
907+ while (( frag != NULL ) && (( frag -> offset + frag -> view . size ) < v_right ) ) {
908+ cavl2_remove (root , & frag -> index_offset );
909+ mem_free_payload (frag -> payload_deleter , frag -> origin );
910+ mem_free (fragment_memory , sizeof (udpard_fragment_t ), frag );
911+ frag = ( udpard_fragment_t * ) cavl2_lower_bound ( * root , & v_left , & rx_cavl_compare_fragment_offset );
934912 }
935913
936914 // Insert the new fragment.
937915 udpard_tree_t * const res = cavl2_find_or_insert (root , //
938- & frag -> offset ,
916+ & mew -> offset ,
939917 & rx_cavl_compare_fragment_offset ,
940- & frag -> index_offset ,
918+ & mew -> index_offset ,
941919 & cavl2_trivial_factory );
942- UDPARD_ASSERT (res == & frag -> index_offset );
920+ UDPARD_ASSERT (res == & mew -> index_offset );
943921 (void )res ;
944922
945923 // Update the covered prefix.
0 commit comments