1- use std:: borrow:: Cow ;
2- use std:: collections:: VecDeque ;
3- use std:: ops:: { Bound , RangeBounds } ;
1+ use std:: ops:: { Bound , RangeBounds , RangeToInclusive } ;
42
3+ use super :: rolling_buffer:: RollingBuffer ;
54use crate :: error:: AppendError ;
65use crate :: mem:: QueueSummary ;
76use crate :: rolling:: FileNumber ;
87use crate :: Record ;
98
10- #[ derive( Default ) ]
11- struct RollingBuffer {
12- buffer : VecDeque < u8 > ,
13- }
14-
15- impl RollingBuffer {
16- fn new ( ) -> Self {
17- RollingBuffer {
18- buffer : VecDeque :: new ( ) ,
19- }
20- }
21-
22- fn len ( & self ) -> usize {
23- self . buffer . len ( )
24- }
25-
26- fn capacity ( & self ) -> usize {
27- self . buffer . capacity ( )
28- }
29-
30- fn clear ( & mut self ) {
31- self . buffer . clear ( ) ;
32- self . buffer . shrink_to_fit ( ) ;
33- }
34-
35- fn drain_start ( & mut self , pos : usize ) {
36- let target_capacity = self . len ( ) * 9 / 8 ;
37- self . buffer . drain ( ..pos) ;
38- // In order to avoid leaking memory we shrink the buffer.
39- // The last maximum length (= the length before drain)
40- // is a good estimate of what we will need in the future.
41- //
42- // We add 1/8 to that in order to make sure that we don't end up
43- // shrinking / allocating for small variations.
44-
45- if self . buffer . capacity ( ) > target_capacity {
46- self . buffer . shrink_to ( target_capacity) ;
47- }
48- }
49-
50- fn extend ( & mut self , slice : & [ u8 ] ) {
51- self . buffer . extend ( slice. iter ( ) . copied ( ) ) ;
52- }
53-
54- fn get_range ( & self , bounds : impl RangeBounds < usize > ) -> Cow < [ u8 ] > {
55- let start = match bounds. start_bound ( ) {
56- Bound :: Included ( pos) => * pos,
57- Bound :: Excluded ( pos) => pos + 1 ,
58- Bound :: Unbounded => 0 ,
59- } ;
60-
61- let end = match bounds. end_bound ( ) {
62- Bound :: Included ( pos) => pos + 1 ,
63- Bound :: Excluded ( pos) => * pos,
64- Bound :: Unbounded => self . len ( ) ,
65- } ;
66-
67- let ( left_part_of_queue, right_part_of_queue) = self . buffer . as_slices ( ) ;
68-
69- if end < left_part_of_queue. len ( ) {
70- Cow :: Borrowed ( & left_part_of_queue[ start..end] )
71- } else if start >= left_part_of_queue. len ( ) {
72- let start = start - left_part_of_queue. len ( ) ;
73- let end = end - left_part_of_queue. len ( ) ;
74-
75- Cow :: Borrowed ( & right_part_of_queue[ start..end] )
76- } else {
77- // VecDeque is a rolling buffer. As a result, we do not have
78- // access to a continuous buffer.
79- //
80- // Here the requested slice cross the boundary and we need to allocate and copy the data
81- // in a new buffer.
82- let mut res = Vec :: with_capacity ( end - start) ;
83- res. extend_from_slice ( & left_part_of_queue[ start..] ) ;
84- let end = end - left_part_of_queue. len ( ) ;
85- res. extend_from_slice ( & right_part_of_queue[ ..end] ) ;
86-
87- Cow :: Owned ( res)
88- }
89- }
90- }
91-
929#[ derive( Clone ) ]
9310struct RecordMeta {
9411 start_offset : usize ,
@@ -247,7 +164,8 @@ impl MemQueue {
247164 ///
248165 /// If truncating to a future position, make the queue go forward to that position.
249166 /// Return the number of record removed.
250- pub fn truncate ( & mut self , truncate_up_to_pos : u64 ) -> usize {
167+ pub fn truncate_head ( & mut self , truncate_range : RangeToInclusive < u64 > ) -> usize {
168+ let truncate_up_to_pos = truncate_range. end ;
251169 if self . start_position > truncate_up_to_pos {
252170 return 0 ;
253171 }
@@ -267,7 +185,8 @@ impl MemQueue {
267185 for record_meta in & mut self . record_metas {
268186 record_meta. start_offset -= start_offset_to_keep;
269187 }
270- self . concatenated_records . drain_start ( start_offset_to_keep) ;
188+ self . concatenated_records
189+ . truncate_head ( ..start_offset_to_keep) ;
271190 self . start_position = truncate_up_to_pos + 1 ;
272191 first_record_to_keep
273192 }
0 commit comments