@@ -256,10 +256,10 @@ test "fixed buffer flush" {
256
256
try testing .expectEqual (10 , buffer [0 ]);
257
257
}
258
258
259
- /// Calls `VTable.drain` but hides the last `preserve_length ` bytes from the
259
+ /// Calls `VTable.drain` but hides the last `preserve_len ` bytes from the
260
260
/// implementation, keeping them buffered.
261
- pub fn drainPreserve (w : * Writer , preserve_length : usize ) Error ! void {
262
- const temp_end = w .end - | preserve_length ;
261
+ pub fn drainPreserve (w : * Writer , preserve_len : usize ) Error ! void {
262
+ const temp_end = w .end - | preserve_len ;
263
263
const preserved = w .buffer [temp_end .. w .end ];
264
264
w .end = temp_end ;
265
265
defer w .end += preserved .len ;
@@ -310,24 +310,38 @@ pub fn writableSliceGreedy(w: *Writer, minimum_length: usize) Error![]u8 {
310
310
}
311
311
312
312
/// Asserts the provided buffer has total capacity enough for `minimum_length`
313
- /// and `preserve_length ` combined.
313
+ /// and `preserve_len ` combined.
314
314
///
315
315
/// Does not `advance` the buffer end position.
316
316
///
317
- /// When draining the buffer, ensures that at least `preserve_length ` bytes
317
+ /// When draining the buffer, ensures that at least `preserve_len ` bytes
318
318
/// remain buffered.
319
319
///
320
- /// If `preserve_length ` is zero, this is equivalent to `writableSliceGreedy`.
321
- pub fn writableSliceGreedyPreserve (w : * Writer , preserve_length : usize , minimum_length : usize ) Error ! []u8 {
322
- assert (w .buffer .len >= preserve_length + minimum_length );
320
+ /// If `preserve_len ` is zero, this is equivalent to `writableSliceGreedy`.
321
+ pub fn writableSliceGreedyPreserve (w : * Writer , preserve_len : usize , minimum_length : usize ) Error ! []u8 {
322
+ assert (w .buffer .len >= preserve_len + minimum_length );
323
323
while (w .buffer .len - w .end < minimum_length ) {
324
- try drainPreserve (w , preserve_length );
324
+ try drainPreserve (w , preserve_len );
325
325
} else {
326
326
@branchHint (.likely );
327
327
return w .buffer [w .end .. ];
328
328
}
329
329
}
330
330
331
+ /// Asserts the provided buffer has total capacity enough for `len`.
332
+ ///
333
+ /// Advances the buffer end position by `len`.
334
+ ///
335
+ /// When draining the buffer, ensures that at least `preserve_len` bytes
336
+ /// remain buffered.
337
+ ///
338
+ /// If `preserve_len` is zero, this is equivalent to `writableSlice`.
339
+ pub fn writableSlicePreserve (w : * Writer , preserve_len : usize , len : usize ) Error ! []u8 {
340
+ const big_slice = try w .writableSliceGreedyPreserve (preserve_len , len );
341
+ advance (w , len );
342
+ return big_slice [0.. len ];
343
+ }
344
+
331
345
pub const WritableVectorIterator = struct {
332
346
first : []u8 ,
333
347
middle : []const []u8 = &.{},
@@ -523,16 +537,16 @@ pub fn write(w: *Writer, bytes: []const u8) Error!usize {
523
537
return w .vtable .drain (w , &.{bytes }, 1 );
524
538
}
525
539
526
- /// Asserts `buffer` capacity exceeds `preserve_length `.
527
- pub fn writePreserve (w : * Writer , preserve_length : usize , bytes : []const u8 ) Error ! usize {
528
- assert (preserve_length <= w .buffer .len );
540
+ /// Asserts `buffer` capacity exceeds `preserve_len `.
541
+ pub fn writePreserve (w : * Writer , preserve_len : usize , bytes : []const u8 ) Error ! usize {
542
+ assert (preserve_len <= w .buffer .len );
529
543
if (w .end + bytes .len <= w .buffer .len ) {
530
544
@branchHint (.likely );
531
545
@memcpy (w .buffer [w .end .. ][0.. bytes .len ], bytes );
532
546
w .end += bytes .len ;
533
547
return bytes .len ;
534
548
}
535
- const temp_end = w .end - | preserve_length ;
549
+ const temp_end = w .end - | preserve_len ;
536
550
const preserved = w .buffer [temp_end .. w .end ];
537
551
w .end = temp_end ;
538
552
defer w .end += preserved .len ;
@@ -552,13 +566,13 @@ pub fn writeAll(w: *Writer, bytes: []const u8) Error!void {
552
566
/// Calls `drain` as many times as necessary such that all of `bytes` are
553
567
/// transferred.
554
568
///
555
- /// When draining the buffer, ensures that at least `preserve_length ` bytes
569
+ /// When draining the buffer, ensures that at least `preserve_len ` bytes
556
570
/// remain buffered.
557
571
///
558
- /// Asserts `buffer` capacity exceeds `preserve_length `.
559
- pub fn writeAllPreserve (w : * Writer , preserve_length : usize , bytes : []const u8 ) Error ! void {
572
+ /// Asserts `buffer` capacity exceeds `preserve_len `.
573
+ pub fn writeAllPreserve (w : * Writer , preserve_len : usize , bytes : []const u8 ) Error ! void {
560
574
var index : usize = 0 ;
561
- while (index < bytes .len ) index += try w .writePreserve (preserve_length , bytes [index .. ]);
575
+ while (index < bytes .len ) index += try w .writePreserve (preserve_len , bytes [index .. ]);
562
576
}
563
577
564
578
/// Renders fmt string with args, calling `writer` with slices of bytes.
@@ -761,11 +775,11 @@ pub fn writeByte(w: *Writer, byte: u8) Error!void {
761
775
}
762
776
}
763
777
764
- /// When draining the buffer, ensures that at least `preserve_length ` bytes
778
+ /// When draining the buffer, ensures that at least `preserve_len ` bytes
765
779
/// remain buffered.
766
- pub fn writeBytePreserve (w : * Writer , preserve_length : usize , byte : u8 ) Error ! void {
780
+ pub fn writeBytePreserve (w : * Writer , preserve_len : usize , byte : u8 ) Error ! void {
767
781
while (w .buffer .len - w .end == 0 ) {
768
- try drainPreserve (w , preserve_length );
782
+ try drainPreserve (w , preserve_len );
769
783
} else {
770
784
@branchHint (.likely );
771
785
w .buffer [w .end ] = byte ;
@@ -788,10 +802,42 @@ test splatByteAll {
788
802
try testing .expectEqualStrings ("7" ** 45 , aw .writer .buffered ());
789
803
}
790
804
805
+ pub fn splatBytePreserve (w : * Writer , preserve_len : usize , byte : u8 , n : usize ) Error ! void {
806
+ const new_end = w .end + n ;
807
+ if (new_end <= w .buffer .len ) {
808
+ @memset (w .buffer [w .end .. ][0.. n ], byte );
809
+ w .end = new_end ;
810
+ return ;
811
+ }
812
+ // If `n` is large, we can ignore `preserve_len` up to a point.
813
+ var remaining = n ;
814
+ while (remaining > preserve_len ) {
815
+ assert (remaining != 0 );
816
+ remaining -= try splatByte (w , byte , remaining - preserve_len );
817
+ if (w .end + remaining <= w .buffer .len ) {
818
+ @memset (w .buffer [w .end .. ][0.. remaining ], byte );
819
+ w .end += remaining ;
820
+ return ;
821
+ }
822
+ }
823
+ // All the next bytes received must be preserved.
824
+ if (preserve_len < w .end ) {
825
+ @memmove (w .buffer [0.. preserve_len ], w .buffer [w .end - preserve_len .. ][0.. preserve_len ]);
826
+ w .end = preserve_len ;
827
+ }
828
+ while (remaining > 0 ) remaining -= try w .splatByte (byte , remaining );
829
+ }
830
+
791
831
/// Writes the same byte many times, allowing short writes.
792
832
///
793
833
/// Does maximum of one underlying `VTable.drain`.
794
834
pub fn splatByte (w : * Writer , byte : u8 , n : usize ) Error ! usize {
835
+ if (w .end + n <= w .buffer .len ) {
836
+ @branchHint (.likely );
837
+ @memset (w .buffer [w .end .. ][0.. n ], byte );
838
+ w .end += n ;
839
+ return n ;
840
+ }
795
841
return writeSplat (w , &.{&.{byte }}, n );
796
842
}
797
843
@@ -801,9 +847,10 @@ pub fn splatBytesAll(w: *Writer, bytes: []const u8, splat: usize) Error!void {
801
847
var remaining_bytes : usize = bytes .len * splat ;
802
848
remaining_bytes -= try w .splatBytes (bytes , splat );
803
849
while (remaining_bytes > 0 ) {
804
- const leftover = remaining_bytes % bytes .len ;
805
- const buffers : [2 ][]const u8 = .{ bytes [bytes .len - leftover .. ], bytes };
806
- remaining_bytes -= try w .writeSplat (& buffers , splat );
850
+ const leftover_splat = remaining_bytes / bytes .len ;
851
+ const leftover_bytes = remaining_bytes % bytes .len ;
852
+ const buffers : [2 ][]const u8 = .{ bytes [bytes .len - leftover_bytes .. ], bytes };
853
+ remaining_bytes -= try w .writeSplat (& buffers , leftover_splat );
807
854
}
808
855
}
809
856
0 commit comments