Skip to content

Commit 0a877e7

Browse files
acceptance WIP
1 parent df1f6e3 commit 0a877e7

File tree

1 file changed

+119
-7
lines changed

1 file changed

+119
-7
lines changed

libudpard/udpard.c

Lines changed: 119 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -733,8 +733,8 @@ typedef struct
733733
meta_t meta;
734734
} rx_frame_t;
735735

736-
/// We require that the fragment tree does not contain fully-contained or equal-range fragments.
737-
/// One implication is that no two fragments can have the same offset.
736+
/// We require that the fragment tree does not contain fully-contained or equal-range fragments. This implies that no
737+
/// two fragments have the same offset, and that fragments ordered by offset also order by their ends.
738738
static int32_t rx_cavl_compare_fragment_offset(const void* const user, const udpard_tree_t* const node)
739739
{
740740
const size_t u = *(const size_t*)user;
@@ -743,12 +743,69 @@ static int32_t rx_cavl_compare_fragment_offset(const void* const user, const udp
743743
if (u > v) { return +1; }
744744
return 0; // clang-format on
745745
}
746+
static int32_t rx_cavl_compare_fragment_end(const void* const user, const udpard_tree_t* const node)
747+
{
748+
const size_t u = *(const size_t*)user;
749+
const udpard_fragment_t* const f = (const udpard_fragment_t*)node;
750+
const size_t v = f->offset + f->view.size; // clang-format off
751+
if (u < v) { return -1; }
752+
if (u > v) { return +1; }
753+
return 0; // clang-format on
754+
}
746755

747-
/// Find the first fragment where offset >= left in log time. Returns NULL if no such fragment exists.
748-
/// This is intended for overlap removal and gap detection when deciding if a new fragment is needed.
749-
static udpard_fragment_t* rx_fragment_tree_lower_bound(udpard_tree_t* const root, const size_t left)
756+
/// Checks the tree for fragments that can be removed if a new one spanning [left, right) is to be inserted.
757+
/// Returns the first fragment that can be removed, or NULL if none can be removed.
758+
/// Invoke repeatedly until NULL is returned to remove all redundant fragments.
759+
/// The complexity is logarithmic in the number of fragments in the tree.
760+
static udpard_fragment_t* rx_fragment_tree_first_redundant(udpard_tree_t* const root,
761+
const size_t left,
762+
const size_t right)
750763
{
751-
return (udpard_fragment_t*)cavl2_lower_bound(root, &left, &rx_cavl_compare_fragment_offset);
764+
// The most simple case to check first is to find the fragments that fall fully within the new one.
765+
// This may somewhat simplify the reasoning about the remaining checks below.
766+
udpard_fragment_t* frag = (udpard_fragment_t*)cavl2_lower_bound(root, &left, &rx_cavl_compare_fragment_offset);
767+
if ((frag != NULL) && ((frag->offset + frag->view.size) <= right)) {
768+
return frag; // We may land here multiple times if the new fragment is large.
769+
}
770+
// Now we are certain that no fully-contained fragments exist. But the addition of a larger fragment that
771+
// joins adjacent fragments together into a larger contiguous block may render smaller fragments that overlap
772+
// with its edges redundant. To check for that, we create a new virtual fragment that represents the new fragment
773+
// together with those that join it on either end, if any, and then look for fragments fully contained within.
774+
// Example:
775+
// |---B---|
776+
// |--X--|
777+
// |--A--|
778+
// The addition of fragment A or B will render X redundant, even though it is not contained within any fragment.
779+
// This algorithm will detect that and mark X for removal.
780+
//
781+
// To find the left neighbor, we need to find the fragment crossing the left boundary whose offset is the smallest.
782+
// To do that, we simply need to find the fragment with the smallest right boundary that is on the right of our
783+
// left boundary. This works because by construction we guarantee that our tree has no fully-contained fragments,
784+
// implying that ordering by left is also ordering by right.
785+
size_t v_left = left;
786+
frag = (udpard_fragment_t*)cavl2_lower_bound(root, &left, &rx_cavl_compare_fragment_end);
787+
if (frag != NULL) {
788+
v_left = smaller(v_left, frag->offset); // Ignore fragments ending before our left boundary
789+
}
790+
// The right neighbor is found by analogy: find the fragment with the largest left boundary that is on the left
791+
// of our right boundary. This guarantees that the new virtual right boundary will max out to the right.
792+
size_t v_right = right;
793+
frag = (udpard_fragment_t*)cavl2_predecessor(root, &right, &rx_cavl_compare_fragment_offset);
794+
if (frag != NULL) {
795+
v_right = larger(v_right, frag->offset + frag->view.size); // Ignore fragments starting after our right boundary
796+
}
797+
UDPARD_ASSERT((v_left <= left) && (right <= v_right));
798+
// Adjust the boundaries to avoid removing the neighbors themselves.
799+
v_left++;
800+
if (v_right > 0) {
801+
v_right--;
802+
}
803+
// Now we repeat the process as above but with the expanded virtual fragment (v_left, v_right-1).
804+
frag = (udpard_fragment_t*)cavl2_lower_bound(root, &v_left, &rx_cavl_compare_fragment_offset);
805+
if ((frag != NULL) && ((frag->offset + frag->view.size) <= v_right)) {
806+
return frag;
807+
}
808+
return NULL; // Nothing left to remove.
752809
}
753810

754811
/// True if the fragment tree does not have a contiguous payload coverage in [left, right).
@@ -825,7 +882,7 @@ typedef enum
825882
rx_fragment_tree_oom,
826883
} rx_fragment_tree_update_result_t;
827884

828-
/// Takes ownership of the frame payload. Returns true if the transfer is fully reassembled in the tree.
885+
/// Takes ownership of the frame payload; either a new fragment is inserted or the payload is freed.
829886
static rx_fragment_tree_update_result_t rx_fragment_tree_update(udpard_tree_t** const root,
830887
const udpard_mem_resource_t fragment_memory,
831888
const rx_frame_base_t frame,
@@ -837,6 +894,61 @@ static rx_fragment_tree_update_result_t rx_fragment_tree_update(udpard_tree_t**
837894
// First we need to check if the new fragment is needed at all. It is needed in two cases:
838895
// 1) It covers new ground in the [0, extent) range.
839896
// 2) It overlaps smaller fragments that are already in the tree, which can be replaced.
897+
{
898+
const size_t left = frame.offset;
899+
udpard_fragment_t* frag = (udpard_fragment_t*)cavl2_predecessor(*root, &left, &rx_cavl_compare_fragment_offset);
900+
if ((frag != NULL) && ((frag->offset + frag->view.size) >= (frame.offset + frame.payload.size))) {
901+
mem_free_payload(payload_deleter, frame.origin); // New fragment is fully contained within an existing one.
902+
return rx_fragment_tree_not_done;
903+
}
904+
}
905+
udpard_fragment_t* victim =
906+
rx_fragment_tree_first_redundant(*root, frame.offset, frame.offset + frame.payload.size);
907+
const bool need =
908+
(victim != NULL) || rx_fragment_is_needed(*root, frame.offset, frame.payload.size, transfer_payload_size, extent);
909+
if (!need) {
910+
mem_free_payload(payload_deleter, frame.origin);
911+
return rx_fragment_tree_not_done; // Cannot make use of this fragment.
912+
}
913+
914+
// Ensure we can allocate the fragment header for the new frame before pruning the tree to avoid data loss.
915+
udpard_fragment_t* frag = mem_alloc(fragment_memory, sizeof(udpard_fragment_t));
916+
if (frag == NULL) {
917+
mem_free_payload(payload_deleter, frame.origin);
918+
return rx_fragment_tree_oom; // Cannot allocate fragment header. Maybe we will succeed later.
919+
}
920+
mem_zero(sizeof(*frag), frag);
921+
frag->view.data = frame.payload.data;
922+
frag->view.size = frame.payload.size;
923+
frag->origin.data = frame.origin.data;
924+
frag->origin.size = frame.origin.size;
925+
frag->offset = frame.offset;
926+
frag->payload_deleter = payload_deleter;
927+
928+
// Remove all redundant fragments before inserting the new one.
929+
while (victim != NULL) {
930+
cavl2_remove(root, &victim->index_offset);
931+
mem_free_payload(victim->payload_deleter, victim->origin);
932+
mem_free(fragment_memory, sizeof(udpard_fragment_t), victim);
933+
victim = rx_fragment_tree_first_redundant(*root, frame.offset, frame.offset + frame.payload.size);
934+
}
935+
936+
// Insert the new fragment.
937+
udpard_tree_t* const res = cavl2_find_or_insert(root, //
938+
&frag->offset,
939+
&rx_cavl_compare_fragment_offset,
940+
&frag->index_offset,
941+
&cavl2_trivial_factory);
942+
UDPARD_ASSERT(res == &frag->index_offset);
943+
(void)res;
944+
945+
// Update the covered prefix.
946+
*covered_prefix_io = rx_fragment_tree_update_covered_prefix(*root, //
947+
*covered_prefix_io,
948+
frame.offset,
949+
frame.payload.size);
950+
return (*covered_prefix_io >= smaller(extent, transfer_payload_size)) ? rx_fragment_tree_done
951+
: rx_fragment_tree_not_done;
840952
}
841953

842954
typedef enum

0 commit comments

Comments
 (0)