@@ -661,68 +661,42 @@ impl Connection {
661661
662662 builder. finish_and_track ( now, self , sent_frames. take ( ) , buf. buf ) ;
663663
664- if buf. num_datagrams == 1 {
665- // Set the segment size for this GSO batch to the size of the first UDP
666- // datagram in the batch. Larger data that cannot be fragmented
667- // (e.g. application datagrams) will be included in a future batch. When
668- // sending large enough volumes of data for GSO to be useful, we expect
669- // packet sizes to usually be consistent, e.g. populated by max-size STREAM
670- // frames or uniformly sized datagrams.
671- buf. segment_size = buf. len ( ) ;
672- // Clip the unused capacity out of the buffer so future packets don't
673- // overrun
674- buf. buf_capacity = buf. len ( ) ;
675-
676- // Check whether the data we planned to send will fit in the reduced segment
677- // size. If not, bail out and leave it for the next GSO batch so we don't
678- // end up trying to send an empty packet. We can't easily compute the right
679- // segment size before the original call to `space_can_send`, because at
680- // that time we haven't determined whether we're going to coalesce with the
681- // first datagram or potentially pad it to `MIN_INITIAL_SIZE`.
682- if space_id == SpaceId :: Data {
683- let frame_space_1rtt = buf
684- . segment_size
685- . saturating_sub ( self . predict_1rtt_overhead ( Some ( pn) ) ) ;
686- if self . space_can_send ( space_id, frame_space_1rtt) . is_empty ( ) {
687- break ;
688- }
664+ if buf. num_datagrams == 1 && space_id == SpaceId :: Data {
665+ // Now that we know the size of the first datagram, check whether
666+ // the data we planned to send will fit in the next segment. If
667+ // not, bails out and leave it for the next GSO batch. We can't
668+ // easily compute the right segment size before the original call to
669+ // `space_can_send`, because at that time we haven't determined
670+ // whether we're going to coalesce with the first datagram or
671+ // potentially pad it to `MIN_INITIAL_SIZE`.
672+
673+ buf. clip_datagram_size ( ) ;
674+
675+ let frame_space_1rtt = buf
676+ . segment_size
677+ . saturating_sub ( self . predict_1rtt_overhead ( Some ( pn) ) ) ;
678+ if self . space_can_send ( space_id, frame_space_1rtt) . is_empty ( ) {
679+ break ;
689680 }
690681 }
691682 }
692683
693- // Allocate space for another datagram
694- let next_datagram_size_limit = match self . spaces [ space_id] . loss_probes {
695- 0 => buf. segment_size ,
684+ // Start the next datagram
685+ match self . spaces [ space_id] . loss_probes {
686+ 0 => buf. start_new_datagram ( ) ,
696687 _ => {
697688 self . spaces [ space_id] . loss_probes -= 1 ;
698689 // Clamp the datagram to at most the minimum MTU to ensure that loss probes
699690 // can get through and enable recovery even if the path MTU has shrank
700691 // unexpectedly.
701- usize:: from ( INITIAL_MTU )
692+ buf. start_new_datagram_with_size ( std:: cmp:: min (
693+ usize:: from ( INITIAL_MTU ) ,
694+ buf. segment_size ,
695+ ) ) ;
702696 }
703697 } ;
704- buf. buf_capacity += next_datagram_size_limit;
705- if buf. buf . capacity ( ) < buf. buf_capacity {
706- // We reserve the maximum space for sending `max_datagrams` upfront
707- // to avoid any reallocations if more datagrams have to be appended later on.
708- // Benchmarks have shown shown a 5-10% throughput improvement
709- // compared to continuously resizing the datagram buffer.
710- // While this will lead to over-allocation for small transmits
711- // (e.g. purely containing ACKs), modern memory allocators
712- // (e.g. mimalloc and jemalloc) will pool certain allocation sizes
713- // and therefore this is still rather efficient.
714- buf. buf . reserve ( buf. max_datagrams * buf. segment_size ) ;
715- }
716- buf. num_datagrams += 1 ;
717698 coalesce = true ;
718699 pad_datagram = false ;
719- buf. datagram_start = buf. len ( ) ;
720-
721- debug_assert_eq ! (
722- buf. datagram_start % buf. segment_size,
723- 0 ,
724- "datagrams in a GSO batch must be aligned to the segment size"
725- ) ;
726700 } else {
727701 // We can append/coalesce the next packet into the current
728702 // datagram.
@@ -924,8 +898,8 @@ impl Connection {
924898 . mtud
925899 . poll_transmit ( now, self . packet_number_filter . peek ( & self . spaces [ space_id] ) ) ?;
926900
927- buf. buf_capacity = probe_size as usize ;
928- buf. buf . reserve ( buf . buf_capacity ) ;
901+ debug_assert_eq ! ( buf. num_datagrams , 0 ) ;
902+ buf. start_new_datagram_with_size ( probe_size as usize ) ;
929903
930904 debug_assert_eq ! ( buf. datagram_start, 0 ) ;
931905 let mut builder =
@@ -949,7 +923,6 @@ impl Connection {
949923 builder. finish_and_track ( now, self , Some ( sent_frames) , buf. buf ) ;
950924
951925 self . stats . path . sent_plpmtud_probes += 1 ;
952- buf. num_datagrams = 1 ;
953926
954927 trace ! ( ?probe_size, "writing MTUD probe" ) ;
955928 }
@@ -1001,9 +974,7 @@ impl Connection {
1001974 SpaceId :: Data ,
1002975 "PATH_CHALLENGE queued without 1-RTT keys"
1003976 ) ;
1004- buf. buf . reserve ( MIN_INITIAL_SIZE as usize ) ;
1005-
1006- buf. buf_capacity = buf. buf . capacity ( ) ;
977+ buf. start_new_datagram_with_size ( MIN_INITIAL_SIZE as usize ) ;
1007978
1008979 // Use the previous CID to avoid linking the new path with the previous path. We
1009980 // don't bother accounting for possible retirement of that prev_cid because this is
0 commit comments