Skip to content

Commit 79c7e65

Browse files
committed
Move allocating new datagrams into TransmitBuf
This moves the logic of updating the buffer for new datagrams into a function on the TransmitBuf. This centralises all the manipulation of these variables into one logical place, ensuring all the invariants are upheld.
1 parent d64058b commit 79c7e65

File tree

2 files changed

+100
-55
lines changed

2 files changed

+100
-55
lines changed

quinn-proto/src/connection/mod.rs

Lines changed: 26 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -661,68 +661,42 @@ impl Connection {
661661

662662
builder.finish_and_track(now, self, sent_frames.take(), buf.buf);
663663

664-
if buf.num_datagrams == 1 {
665-
// Set the segment size for this GSO batch to the size of the first UDP
666-
// datagram in the batch. Larger data that cannot be fragmented
667-
// (e.g. application datagrams) will be included in a future batch. When
668-
// sending large enough volumes of data for GSO to be useful, we expect
669-
// packet sizes to usually be consistent, e.g. populated by max-size STREAM
670-
// frames or uniformly sized datagrams.
671-
buf.segment_size = buf.len();
672-
// Clip the unused capacity out of the buffer so future packets don't
673-
// overrun
674-
buf.buf_capacity = buf.len();
675-
676-
// Check whether the data we planned to send will fit in the reduced segment
677-
// size. If not, bail out and leave it for the next GSO batch so we don't
678-
// end up trying to send an empty packet. We can't easily compute the right
679-
// segment size before the original call to `space_can_send`, because at
680-
// that time we haven't determined whether we're going to coalesce with the
681-
// first datagram or potentially pad it to `MIN_INITIAL_SIZE`.
682-
if space_id == SpaceId::Data {
683-
let frame_space_1rtt = buf
684-
.segment_size
685-
.saturating_sub(self.predict_1rtt_overhead(Some(pn)));
686-
if self.space_can_send(space_id, frame_space_1rtt).is_empty() {
687-
break;
688-
}
664+
if buf.num_datagrams == 1 && space_id == SpaceId::Data {
665+
// Now that we know the size of the first datagram, check whether
666+
// the data we planned to send will fit in the next segment. If
667+
// not, bails out and leave it for the next GSO batch. We can't
668+
// easily compute the right segment size before the original call to
669+
// `space_can_send`, because at that time we haven't determined
670+
// whether we're going to coalesce with the first datagram or
671+
// potentially pad it to `MIN_INITIAL_SIZE`.
672+
673+
buf.clip_datagram_size();
674+
675+
let frame_space_1rtt = buf
676+
.segment_size
677+
.saturating_sub(self.predict_1rtt_overhead(Some(pn)));
678+
if self.space_can_send(space_id, frame_space_1rtt).is_empty() {
679+
break;
689680
}
690681
}
691682
}
692683

693-
// Allocate space for another datagram
694-
let next_datagram_size_limit = match self.spaces[space_id].loss_probes {
695-
0 => buf.segment_size,
684+
// Start the next datagram
685+
match self.spaces[space_id].loss_probes {
686+
0 => buf.start_new_datagram(),
696687
_ => {
697688
self.spaces[space_id].loss_probes -= 1;
698689
// Clamp the datagram to at most the minimum MTU to ensure that loss probes
699690
// can get through and enable recovery even if the path MTU has shrank
700691
// unexpectedly.
701-
usize::from(INITIAL_MTU)
692+
buf.start_new_datagram_with_size(std::cmp::min(
693+
usize::from(INITIAL_MTU),
694+
buf.segment_size,
695+
));
702696
}
703697
};
704-
buf.buf_capacity += next_datagram_size_limit;
705-
if buf.buf.capacity() < buf.buf_capacity {
706-
// We reserve the maximum space for sending `max_datagrams` upfront
707-
// to avoid any reallocations if more datagrams have to be appended later on.
708-
// Benchmarks have shown shown a 5-10% throughput improvement
709-
// compared to continuously resizing the datagram buffer.
710-
// While this will lead to over-allocation for small transmits
711-
// (e.g. purely containing ACKs), modern memory allocators
712-
// (e.g. mimalloc and jemalloc) will pool certain allocation sizes
713-
// and therefore this is still rather efficient.
714-
buf.buf.reserve(buf.max_datagrams * buf.segment_size);
715-
}
716-
buf.num_datagrams += 1;
717698
coalesce = true;
718699
pad_datagram = false;
719-
buf.datagram_start = buf.len();
720-
721-
debug_assert_eq!(
722-
buf.datagram_start % buf.segment_size,
723-
0,
724-
"datagrams in a GSO batch must be aligned to the segment size"
725-
);
726700
} else {
727701
// We can append/coalesce the next packet into the current
728702
// datagram.
@@ -924,8 +898,8 @@ impl Connection {
924898
.mtud
925899
.poll_transmit(now, self.packet_number_filter.peek(&self.spaces[space_id]))?;
926900

927-
buf.buf_capacity = probe_size as usize;
928-
buf.buf.reserve(buf.buf_capacity);
901+
debug_assert_eq!(buf.num_datagrams, 0);
902+
buf.start_new_datagram_with_size(probe_size as usize);
929903

930904
debug_assert_eq!(buf.datagram_start, 0);
931905
let mut builder =
@@ -949,7 +923,6 @@ impl Connection {
949923
builder.finish_and_track(now, self, Some(sent_frames), buf.buf);
950924

951925
self.stats.path.sent_plpmtud_probes += 1;
952-
buf.num_datagrams = 1;
953926

954927
trace!(?probe_size, "writing MTUD probe");
955928
}
@@ -1001,9 +974,7 @@ impl Connection {
1001974
SpaceId::Data,
1002975
"PATH_CHALLENGE queued without 1-RTT keys"
1003976
);
1004-
buf.buf.reserve(MIN_INITIAL_SIZE as usize);
1005-
1006-
buf.buf_capacity = buf.buf.capacity();
977+
buf.start_new_datagram_with_size(MIN_INITIAL_SIZE as usize);
1007978

1008979
// Use the previous CID to avoid linking the new path with the previous path. We
1009980
// don't bother accounting for possible retirement of that prev_cid because this is

quinn-proto/src/connection/transmit_buf.rs

Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,80 @@ impl<'a> TransmitBuf<'a> {
6565
}
6666
}
6767

68+
/// Starts a datagram with a custom datagram size
69+
///
70+
/// This is a specialized version of [`TransmitBuf::start_new_datagram`] which sets the
71+
/// datagram size. Useful for e.g. PATH_CHALLENGE, tail-loss probes or MTU probes.
72+
///
73+
/// After the first datagram you can never increase the segment size. If you decrease
74+
/// the size of a datagram in a batch, it must be the last datagram of the batch.
75+
pub(super) fn start_new_datagram_with_size(&mut self, datagram_size: usize) {
76+
// Only reserve space for this datagram, usually it is the last one in the batch.
77+
let max_capacity_hint = datagram_size;
78+
self.new_datagram_inner(datagram_size, max_capacity_hint)
79+
}
80+
81+
/// Starts a new datagram in the transmit buffer
82+
///
83+
/// If this starts the second datagram the segment size will be set to the size of the
84+
/// first datagram.
85+
///
86+
/// If the underlying buffer does not have enough capacity yet this will allocate enough
87+
/// capacity for all the datagrams allowed in a single batch. Use
88+
/// [`TransmitBuf::start_new_datagram_with_size`] if you know you will need less.
89+
pub(super) fn start_new_datagram(&mut self) {
90+
// We reserve the maximum space for sending `max_datagrams` upfront to avoid any
91+
// reallocations if more datagrams have to be appended later on. Benchmarks have
92+
// shown a 5-10% throughput improvement compared to continuously resizing the
93+
// datagram buffer. While this will lead to over-allocation for small transmits
94+
// (e.g. purely containing ACKs), modern memory allocators (e.g. mimalloc and
95+
// jemalloc) will pool certain allocation sizes and therefore this is still rather
96+
// efficient.
97+
let max_capacity_hint = self.max_datagrams * self.segment_size;
98+
self.new_datagram_inner(self.segment_size, max_capacity_hint)
99+
}
100+
101+
fn new_datagram_inner(&mut self, datagram_size: usize, max_capacity_hint: usize) {
102+
debug_assert!(self.num_datagrams < self.max_datagrams);
103+
if self.num_datagrams == 1 {
104+
// Set the segment size to the size of the first datagram.
105+
self.segment_size = self.buf.len();
106+
}
107+
if self.num_datagrams >= 1 {
108+
debug_assert!(datagram_size <= self.segment_size);
109+
if datagram_size < self.segment_size {
110+
// If this is a GSO batch and this datagram is smaller than the segment
111+
// size, this must be the last datagram in the batch.
112+
self.max_datagrams = self.num_datagrams + 1;
113+
}
114+
}
115+
self.datagram_start = self.buf.len();
116+
debug_assert_eq!(
117+
self.datagram_start % self.segment_size,
118+
0,
119+
"datagrams in a GSO batch must be aligned to the segment size"
120+
);
121+
self.buf_capacity = self.datagram_start + datagram_size;
122+
if self.buf_capacity > self.buf.capacity() {
123+
self.buf
124+
.reserve_exact(max_capacity_hint - self.buf.capacity());
125+
}
126+
self.num_datagrams += 1;
127+
}
128+
129+
/// Clips the datagram size to the current size
130+
///
131+
/// Only valid for the first datagram, when the datagram might be smaller than the
132+
/// segment size. Needed before estimating the available space in the next datagram
133+
/// based on [`TransmitBuf::segment_size`].
134+
///
135+
/// Use [`TransmitBuf::start_new_datagram_with_size`] if you need to reduce the size of
136+
/// the last datagram in a batch.
137+
pub(super) fn clip_datagram_size(&mut self) {
138+
debug_assert_eq!(self.num_datagrams, 1);
139+
self.segment_size = self.buf.len();
140+
}
141+
68142
/// Returns `true` if the buffer did not have anything written into it
69143
pub(super) fn is_empty(&self) -> bool {
70144
self.len() == 0

0 commit comments

Comments
 (0)