Skip to content

Commit 35eb32d

Browse files
committed
Move allocating new datagrams into TransmitBuf
This moves the logic of updating the buffer for new datagrams into a function on the TransmitBuf. This centralises all the manipulation of these variables into one logical place, ensuring all the invariants are upheld.
1 parent d9a02c3 commit 35eb32d

File tree

2 files changed

+101
-60
lines changed

2 files changed

+101
-60
lines changed

quinn-proto/src/connection/mod.rs

Lines changed: 27 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -661,68 +661,42 @@ impl Connection {
661661

662662
builder.finish_and_track(now, self, sent_frames.take(), buf.buf);
663663

664-
if buf.num_datagrams == 1 {
665-
// Set the segment size for this GSO batch to the size of the first UDP
666-
// datagram in the batch. Larger data that cannot be fragmented
667-
// (e.g. application datagrams) will be included in a future batch. When
668-
// sending large enough volumes of data for GSO to be useful, we expect
669-
// packet sizes to usually be consistent, e.g. populated by max-size STREAM
670-
// frames or uniformly sized datagrams.
671-
buf.segment_size = buf.len();
672-
// Clip the unused capacity out of the buffer so future packets don't
673-
// overrun
674-
buf.buf_capacity = buf.len();
675-
676-
// Check whether the data we planned to send will fit in the reduced segment
677-
// size. If not, bail out and leave it for the next GSO batch so we don't
678-
// end up trying to send an empty packet. We can't easily compute the right
679-
// segment size before the original call to `space_can_send`, because at
680-
// that time we haven't determined whether we're going to coalesce with the
681-
// first datagram or potentially pad it to `MIN_INITIAL_SIZE`.
682-
if space_id == SpaceId::Data {
683-
let frame_space_1rtt = buf
684-
.segment_size
685-
.saturating_sub(self.predict_1rtt_overhead(Some(pn)));
686-
if self.space_can_send(space_id, frame_space_1rtt).is_empty() {
687-
break;
688-
}
664+
if buf.num_datagrams == 1 && space_id == SpaceId::Data {
665+
// Now that we know the size of the first datagram, check whether
666+
// the data we planned to send will fit in the next segment. If
667+
// not, bails out andleave it for the next GSO batch. We can't
668+
// easily compute the right segment size before the original call to
669+
// `space_can_send`, because at that time we haven't determined
670+
// whether we're going to coalesce with the first datagram or
671+
// potentially pad it to `MIN_INITIAL_SIZE`.
672+
673+
buf.clip_datagram_size();
674+
675+
let frame_space_1rtt = buf
676+
.segment_size
677+
.saturating_sub(self.predict_1rtt_overhead(Some(pn)));
678+
if self.space_can_send(space_id, frame_space_1rtt).is_empty() {
679+
break;
689680
}
690681
}
691682
}
692683

693-
// Allocate space for another datagram
694-
let next_datagram_size_limit = match self.spaces[space_id].loss_probes {
695-
0 => buf.segment_size,
684+
// Start the next datagram
685+
match self.spaces[space_id].loss_probes {
686+
0 => buf.start_new_datagram(),
696687
_ => {
697688
self.spaces[space_id].loss_probes -= 1;
698689
// Clamp the datagram to at most the minimum MTU to ensure that loss probes
699690
// can get through and enable recovery even if the path MTU has shrank
700691
// unexpectedly.
701-
usize::from(INITIAL_MTU)
692+
buf.start_new_datagram_with_size(std::cmp::min(
693+
usize::from(INITIAL_MTU),
694+
buf.segment_size,
695+
));
702696
}
703697
};
704-
buf.buf_capacity += next_datagram_size_limit;
705-
if buf.buf.capacity() < buf.buf_capacity {
706-
// We reserve the maximum space for sending `max_datagrams` upfront
707-
// to avoid any reallocations if more datagrams have to be appended later on.
708-
// Benchmarks have shown shown a 5-10% throughput improvement
709-
// compared to continuously resizing the datagram buffer.
710-
// While this will lead to over-allocation for small transmits
711-
// (e.g. purely containing ACKs), modern memory allocators
712-
// (e.g. mimalloc and jemalloc) will pool certain allocation sizes
713-
// and therefore this is still rather efficient.
714-
buf.buf.reserve(buf.max_datagrams * buf.segment_size);
715-
}
716-
buf.num_datagrams += 1;
717698
coalesce = true;
718699
pad_datagram = false;
719-
buf.datagram_start = buf.len();
720-
721-
debug_assert_eq!(
722-
buf.datagram_start % buf.segment_size,
723-
0,
724-
"datagrams in a GSO batch must be aligned to the segment size"
725-
);
726700
} else {
727701
// We can append/coalesce the next packet into the current
728702
// datagram.
@@ -924,8 +898,8 @@ impl Connection {
924898
.mtud
925899
.poll_transmit(now, self.packet_number_filter.peek(&self.spaces[space_id]))?;
926900

927-
buf.buf_capacity = probe_size as usize;
928-
buf.buf.reserve(buf.buf_capacity);
901+
debug_assert_eq!(buf.num_datagrams, 0);
902+
buf.start_new_datagram_with_size(probe_size as usize);
929903

930904
debug_assert_eq!(buf.datagram_start, 0);
931905
let mut builder =
@@ -949,7 +923,6 @@ impl Connection {
949923
builder.finish_and_track(now, self, Some(sent_frames), buf.buf);
950924

951925
self.stats.path.sent_plpmtud_probes += 1;
952-
buf.num_datagrams = 1;
953926

954927
trace!(?probe_size, "writing MTUD probe");
955928
}
@@ -986,11 +959,7 @@ impl Connection {
986959
}
987960

988961
/// Send PATH_CHALLENGE for a previous path if necessary
989-
fn send_path_challenge(
990-
&mut self,
991-
now: Instant,
992-
mut buf: &mut TransmitBuf<'_>,
993-
) -> Option<Transmit> {
962+
fn send_path_challenge(&mut self, now: Instant, buf: &mut TransmitBuf<'_>) -> Option<Transmit> {
994963
let (prev_cid, prev_path) = self.prev_path.as_mut()?;
995964
if !prev_path.challenge_pending {
996965
return None;
@@ -1005,9 +974,7 @@ impl Connection {
1005974
SpaceId::Data,
1006975
"PATH_CHALLENGE queued without 1-RTT keys"
1007976
);
1008-
buf.buf.reserve(MIN_INITIAL_SIZE as usize);
1009-
1010-
buf.buf_capacity = buf.buf.capacity();
977+
buf.start_new_datagram_with_size(MIN_INITIAL_SIZE as usize);
1011978

1012979
// Use the previous CID to avoid linking the new path with the previous path. We
1013980
// don't bother accounting for possible retirement of that prev_cid because this is

quinn-proto/src/connection/transmit_buf.rs

Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,80 @@ impl<'a> TransmitBuf<'a> {
7474
pub(super) fn is_empty(&self) -> bool {
7575
self.len() == 0
7676
}
77+
78+
/// Clips the datagram size to the current size
79+
///
80+
/// Only valid for the first datagram, when the datagram might be smaller than the
81+
/// segment size. Needed before estimating the available space in the next datagram
82+
/// based on [`TransmitBuf::segment_size`].
83+
///
84+
/// Use [`TransmitBuf::start_new_datagram_with_size`] if you need to reduce the size of
85+
/// the last datagram in a batch.
86+
pub(super) fn clip_datagram_size(&mut self) {
87+
debug_assert_eq!(self.num_datagrams, 1);
88+
self.segment_size = self.buf.len();
89+
}
90+
91+
/// Starts a new datagram in the transmit buffer
92+
///
93+
/// If this starts the second datagram the segment size will be set to the size of the
94+
/// first datagram.
95+
///
96+
/// If the underlying buffer does not have enough capacity yet this will allocate enough
97+
/// capacity for all the datagrams allowed in a single batch. Use
98+
/// [`TransmitBuf::start_new_datagram_with_size`] if you know you will need less.
99+
pub(super) fn start_new_datagram(&mut self) {
100+
// We reserve the maximum space for sending `max_datagrams` upfront to avoid any
101+
// reallocations if more datagrams have to be appended later on. Benchmarks have
102+
// shown a 5-10% throughput improvment compared to continuously resizing the
103+
// datagram buffer. While this will lead to over-allocation for small transmits
104+
// (e.g. purely containing ACKs), modern memory allocators (e.g. mimalloc and
105+
// jemalloc) will pool certain allocation sizes and therefore this is still rather
106+
// efficient.
107+
let max_capacity_hint = self.max_datagrams * self.segment_size;
108+
self.new_datagram_inner(self.segment_size, max_capacity_hint)
109+
}
110+
111+
/// Starts a datagram with a custom datagram size
112+
///
113+
/// This is a specialised version of [`TransmitBuf::start_new_datagram`] which sets the
114+
/// datagram size. Useful for e.g. PATH_CHALLENGE, tail-loss probes or MTU probes.
115+
///
116+
/// After the first datagram you can never increase the segment size. If you decrease
117+
/// the size of a datagram in a batch, it must be the last datagram of the batch.
118+
pub(super) fn start_new_datagram_with_size(&mut self, datagram_size: usize) {
119+
// Only reserve space for this datagram, usually it is the last one in the batch.
120+
let max_capacity_hint = datagram_size;
121+
self.new_datagram_inner(datagram_size, max_capacity_hint)
122+
}
123+
124+
fn new_datagram_inner(&mut self, datagram_size: usize, max_capacity_hint: usize) {
125+
debug_assert!(self.num_datagrams < self.max_datagrams);
126+
if self.num_datagrams == 1 {
127+
// Set the segment size to the size of the first datagram.
128+
self.segment_size = self.buf.len();
129+
}
130+
if self.num_datagrams >= 1 {
131+
debug_assert!(datagram_size <= self.segment_size);
132+
if datagram_size < self.segment_size {
133+
// If This is a GSO batch and this datagram is smaller than the segment
134+
// size, this must be the last datagram in the batch.
135+
self.max_datagrams = self.num_datagrams + 1;
136+
}
137+
}
138+
self.datagram_start = self.buf.len();
139+
debug_assert_eq!(
140+
self.datagram_start % self.segment_size,
141+
0,
142+
"datagrams in a GSO batch must be aligned to the segment size"
143+
);
144+
self.buf_capacity = self.datagram_start + datagram_size;
145+
if self.buf_capacity > self.buf.capacity() {
146+
self.buf
147+
.reserve_exact(max_capacity_hint - self.buf.capacity());
148+
}
149+
self.num_datagrams += 1;
150+
}
77151
}
78152

79153
unsafe impl BufMut for TransmitBuf<'_> {

0 commit comments

Comments
 (0)