11use alloc:: collections:: VecDeque ;
2- use core:: { cmp, hint :: unreachable_unchecked , mem :: MaybeUninit , slice } ;
2+ use core:: cmp;
33
44pub struct RingBuffer {
5- buf : VecDeque < MaybeUninit < u8 > > ,
5+ buf : VecDeque < u8 > ,
66}
77
88impl RingBuffer {
@@ -24,12 +24,10 @@ impl RingBuffer {
2424 }
2525
2626 /// Return the amount of available space (in bytes) of the buffer.
27+ #[ cfg( test) ]
2728 pub fn free ( & self ) -> usize {
2829 let len = self . buf . len ( ) ;
2930 let capacity = self . buf . capacity ( ) ;
30- if len > capacity {
31- unsafe { unreachable_unchecked ( ) }
32- }
3331
3432 capacity - len
3533 }
@@ -46,41 +44,23 @@ impl RingBuffer {
4644
4745 /// Ensure that there's space for `amount` elements in the buffer.
4846 pub fn reserve ( & mut self , additional : usize ) {
49- if self . free ( ) < additional {
50- self . reserve_amortized ( additional) ;
51- }
52-
53- if self . free ( ) < additional {
54- unsafe { unreachable_unchecked ( ) }
55- }
56- }
57-
58- #[ inline( never) ]
59- #[ cold]
60- fn reserve_amortized ( & mut self , additional : usize ) {
6147 self . buf . reserve ( additional) ;
6248 }
6349
6450 #[ allow( dead_code) ]
6551 pub fn push_back ( & mut self , byte : u8 ) {
66- self . reserve ( 1 ) ;
67- self . buf . push_back ( MaybeUninit :: new ( byte) ) ;
52+ self . buf . push_back ( byte) ;
6853 }
6954
7055 /// Fetch the byte stored at the selected index from the buffer, returning it, or
7156 /// `None` if the index is out of bounds.
7257 #[ allow( dead_code) ]
7358 pub fn get ( & self , idx : usize ) -> Option < u8 > {
74- self . buf
75- . get ( idx)
76- . map ( |& byte| unsafe { MaybeUninit :: assume_init ( byte) } )
59+ self . buf . get ( idx) . copied ( )
7760 }
7861
7962 /// Append the provided data to the end of `self`.
8063 pub fn extend ( & mut self , data : & [ u8 ] ) {
81- let len = data. len ( ) ;
82- let data = data. as_ptr ( ) . cast :: < MaybeUninit < u8 > > ( ) ;
83- let data = unsafe { slice:: from_raw_parts ( data, len) } ;
8464 self . buf . extend ( data) ;
8565 }
8666
@@ -94,16 +74,12 @@ impl RingBuffer {
9474
9575 /// Return references to each part of the ring buffer.
9676 pub fn as_slices ( & self ) -> ( & [ u8 ] , & [ u8 ] ) {
97- let ( a, b) = self . buf . as_slices ( ) ;
98-
99- ( unsafe { slice_assume_init_ref_polyfill ( a) } , unsafe {
100- slice_assume_init_ref_polyfill ( b)
101- } )
77+ self . buf . as_slices ( )
10278 }
10379
10480 /// Copies elements from the provided range to the end of the buffer.
10581 #[ allow( dead_code) ]
106- pub fn extend_from_within ( & mut self , start : usize , len : usize ) {
82+ pub fn extend_from_within ( & mut self , mut start : usize , len : usize ) {
10783 if start + len > self . len ( ) {
10884 panic ! (
10985 "Calls to this functions must respect start ({}) + len ({}) <= self.len() ({})!" ,
@@ -113,43 +89,26 @@ impl RingBuffer {
11389 ) ;
11490 }
11591
116- self . reserve ( len ) ;
117-
118- // SAFETY: Requirements checked:
119- // 1. explicitly checked above, resulting in a panic if it does not hold
120- // 2. explicitly reserved enough memory
121- unsafe { self . extend_from_within_unchecked ( start , len ) }
122- }
92+ // Naive and cheaper implementation (for small lengths)
93+ if len <= 12 {
94+ self . reserve ( len ) ;
95+ for i in 0 ..len {
96+ let byte = self . get ( start + i ) . unwrap ( ) ;
97+ self . push_back ( byte ) ;
98+ }
12399
124- /// Copies data from the provided range to the end of the buffer, without
125- /// first verifying that the unoccupied capacity is available.
126- ///
127- /// SAFETY:
128- /// For this to be safe two requirements need to hold:
129- /// 1. start + len <= self.len() so we do not copy uninitialised memory
130- /// 2. More then len reserved space so we do not write out-of-bounds
131- #[ warn( unsafe_op_in_unsafe_fn) ]
132- pub unsafe fn extend_from_within_unchecked ( & mut self , mut start : usize , len : usize ) {
133- debug_assert ! ( start + len <= self . len( ) ) ;
134- debug_assert ! ( self . free( ) >= len) ;
135-
136- if self . free ( ) < len {
137- unsafe { unreachable_unchecked ( ) }
100+ return ;
138101 }
139102
140103 let original_len = self . len ( ) ;
141104 let mut intermediate = {
142105 IntermediateRingBuffer {
143106 this : self ,
144107 original_len,
145- disarmed : false ,
146108 }
147109 } ;
148110
149- intermediate
150- . this
151- . buf
152- . extend ( ( 0 ..len) . map ( |_| MaybeUninit :: uninit ( ) ) ) ;
111+ intermediate. this . buf . extend ( ( 0 ..len) . map ( |_| 0 ) ) ;
153112 debug_assert_eq ! ( intermediate. this. buf. len( ) , original_len + len) ;
154113
155114 let ( a, b, a_spare, b_spare) = intermediate. as_slices_spare_mut ( ) ;
@@ -158,7 +117,7 @@ impl RingBuffer {
158117 let skip = cmp:: min ( a. len ( ) , start) ;
159118 start -= skip;
160119 let a = & a[ skip..] ;
161- let b = unsafe { b . get_unchecked ( start..) } ;
120+ let b = & b [ start..] ;
162121
163122 let mut remaining_copy_len = len;
164123
@@ -168,7 +127,6 @@ impl RingBuffer {
168127 remaining_copy_len -= copy_at_least;
169128
170129 if remaining_copy_len == 0 {
171- intermediate. disarmed = true ;
172130 return ;
173131 }
174132
@@ -181,7 +139,6 @@ impl RingBuffer {
181139 remaining_copy_len -= copy_at_least;
182140
183141 if remaining_copy_len == 0 {
184- intermediate. disarmed = true ;
185142 return ;
186143 }
187144
@@ -193,7 +150,6 @@ impl RingBuffer {
193150 remaining_copy_len -= copy_at_least;
194151
195152 if remaining_copy_len == 0 {
196- intermediate. disarmed = true ;
197153 return ;
198154 }
199155
@@ -205,22 +161,17 @@ impl RingBuffer {
205161 remaining_copy_len -= copy_at_least;
206162
207163 debug_assert_eq ! ( remaining_copy_len, 0 ) ;
208-
209- intermediate. disarmed = true ;
210164 }
211165}
212166
213167struct IntermediateRingBuffer < ' a > {
214168 this : & ' a mut RingBuffer ,
215169 original_len : usize ,
216- disarmed : bool ,
217170}
218171
219172impl < ' a > IntermediateRingBuffer < ' a > {
220173 // inspired by `Vec::split_at_spare_mut`
221- fn as_slices_spare_mut (
222- & mut self ,
223- ) -> ( & [ u8 ] , & [ u8 ] , & mut [ MaybeUninit < u8 > ] , & mut [ MaybeUninit < u8 > ] ) {
174+ fn as_slices_spare_mut ( & mut self ) -> ( & [ u8 ] , & [ u8 ] , & mut [ u8 ] , & mut [ u8 ] ) {
224175 let ( a, b) = self . this . buf . as_mut_slices ( ) ;
225176 debug_assert ! ( a. len( ) + b. len( ) >= self . original_len) ;
226177
@@ -230,26 +181,11 @@ impl<'a> IntermediateRingBuffer<'a> {
230181 let b_mid = remaining_init_len;
231182 debug_assert ! ( b. len( ) >= b_mid) ;
232183
233- let ( a, a_spare) = unsafe { a . split_at_mut_unchecked ( a_mid) } ;
234- let ( b, b_spare) = unsafe { b . split_at_mut_unchecked ( b_mid) } ;
184+ let ( a, a_spare) = a . split_at_mut ( a_mid) ;
185+ let ( b, b_spare) = b . split_at_mut ( b_mid) ;
235186 debug_assert ! ( a_spare. is_empty( ) || b. is_empty( ) ) ;
236187
237- (
238- unsafe { slice_assume_init_ref_polyfill ( a) } ,
239- unsafe { slice_assume_init_ref_polyfill ( b) } ,
240- a_spare,
241- b_spare,
242- )
243- }
244- }
245-
246- impl < ' a > Drop for IntermediateRingBuffer < ' a > {
247- fn drop ( & mut self ) {
248- if self . disarmed {
249- return ;
250- }
251-
252- self . this . buf . truncate ( self . original_len ) ;
188+ ( a, b, a_spare, b_spare)
253189 }
254190}
255191
@@ -266,48 +202,11 @@ impl<'a> Drop for IntermediateRingBuffer<'a> {
266202/// The chunk size is not part of the contract and may change depending on the target platform.
267203///
268204/// If that isn't possible we just fall back to ptr::copy_nonoverlapping
269- fn copy_bytes_overshooting ( src : & [ u8 ] , dst : & mut [ MaybeUninit < u8 > ] , copy_at_least : usize ) {
270- // this assert is required for this function to be safe
271- // the optimizer should be able to remove it given how the caller
272- // has somehow to figure out `copy_at_least <= src.len() && copy_at_least <= dst.len()`
273- assert ! ( src. len( ) >= copy_at_least && dst. len( ) >= copy_at_least) ;
274-
275- type CopyType = usize ;
276-
277- const COPY_AT_ONCE_SIZE : usize = core:: mem:: size_of :: < CopyType > ( ) ;
278- let min_buffer_size = usize:: min ( src. len ( ) , dst. len ( ) ) ;
279-
280- // this check should be removed by the optimizer thanks to the above assert
281- // if `src.len() >= copy_at_least && dst.len() >= copy_at_least` then `min_buffer_size >= copy_at_least`
282- assert ! ( min_buffer_size >= copy_at_least) ;
283-
284- // these bounds checks are removed because this is guaranteed:
285- // `min_buffer_size <= src.len() && min_buffer_size <= dst.len()`
286- let src = & src[ ..min_buffer_size] ;
287- let dst = & mut dst[ ..min_buffer_size] ;
288-
289- // Can copy in just one read+write, very common case
290- if min_buffer_size >= COPY_AT_ONCE_SIZE && copy_at_least <= COPY_AT_ONCE_SIZE {
291- let chunk = unsafe { src. as_ptr ( ) . cast :: < CopyType > ( ) . read_unaligned ( ) } ;
292- unsafe { dst. as_mut_ptr ( ) . cast :: < CopyType > ( ) . write_unaligned ( chunk) } ;
293- } else {
294- unsafe {
295- dst. as_mut_ptr ( )
296- . cast :: < u8 > ( )
297- . copy_from_nonoverlapping ( src. as_ptr ( ) , copy_at_least)
298- } ;
299- }
300-
301- debug_assert_eq ! ( & src[ ..copy_at_least] , unsafe {
302- slice_assume_init_ref_polyfill( & dst[ ..copy_at_least] )
303- } ) ;
304- }
205+ fn copy_bytes_overshooting ( src : & [ u8 ] , dst : & mut [ u8 ] , copy_at_least : usize ) {
206+ let src = & src[ ..copy_at_least] ;
207+ let dst = & mut dst[ ..copy_at_least] ;
305208
306- #[ inline( always) ]
307- unsafe fn slice_assume_init_ref_polyfill ( slice : & [ MaybeUninit < u8 > ] ) -> & [ u8 ] {
308- let len = slice. len ( ) ;
309- let data = slice. as_ptr ( ) . cast :: < u8 > ( ) ;
310- slice:: from_raw_parts ( data, len)
209+ dst. copy_from_slice ( src) ;
311210}
312211
313212#[ cfg( test) ]
0 commit comments