Skip to content

Commit d83d79c

Browse files
committed
std.Io.Reader: rework peekDelimiterInclusive
Now it's based on calling fillMore rather than an illegal aliased stream into the Reader buffer. This commit also includes a disambiguation block inspired by #25162. If `StreamTooLong` was added to `RebaseError` then this logic could be replaced by removing the exit condition from the while loop. That error code would represent when `buffer` capacity is too small for an operation, replacing the current use of asserts.
1 parent b9f8b6e commit d83d79c

File tree

1 file changed

+28
-19
lines changed

1 file changed

+28
-19
lines changed

lib/std/Io/Reader.zig

Lines changed: 28 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -792,28 +792,37 @@ pub fn takeDelimiterInclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
792792
/// * `peekDelimiterExclusive`
793793
/// * `takeDelimiterInclusive`
794794
pub fn peekDelimiterInclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
795-
const buffer = r.buffer[0..r.end];
796-
const seek = r.seek;
797-
if (std.mem.indexOfScalarPos(u8, buffer, seek, delimiter)) |delimiter_index| {
798-
@branchHint(.likely);
799-
return buffer[seek .. delimiter_index + 1];
795+
{
796+
const contents = r.buffer[0..r.end];
797+
const seek = r.seek;
798+
if (std.mem.findScalarPos(u8, contents, seek, delimiter)) |end| {
799+
@branchHint(.likely);
800+
return contents[seek .. end + 1];
801+
}
800802
}
801-
// TODO take a parameter for max search length rather than relying on buffer capacity
802-
try rebase(r, r.buffer.len);
803-
while (r.buffer.len - r.end != 0) {
804-
const existing_buffered_len = r.end - r.seek;
805-
const end_cap = r.buffer[r.end..];
806-
var writer: Writer = .fixed(end_cap);
807-
const n = r.vtable.stream(r, &writer, .limited(end_cap.len)) catch |err| switch (err) {
808-
error.WriteFailed => unreachable,
809-
else => |e| return e,
810-
};
811-
r.end += n;
812-
if (std.mem.indexOfScalarPos(u8, r.buffer[0..r.end], r.seek + existing_buffered_len, delimiter)) |delimiter_index| {
813-
return r.buffer[r.seek .. delimiter_index + 1];
803+
while (true) {
804+
const content_len = r.end - r.seek;
805+
if (r.buffer.len - content_len == 0) break;
806+
try fillMore(r);
807+
const seek = r.seek;
808+
const contents = r.buffer[0..r.end];
809+
if (std.mem.findScalarPos(u8, contents, seek + content_len, delimiter)) |end| {
810+
return contents[seek .. end + 1];
814811
}
815812
}
816-
return error.StreamTooLong;
813+
// It might or might not be end of stream. There is no more buffer space
814+
// left to disambiguate. If `StreamTooLong` was added to `RebaseError` then
815+
// this logic could be replaced by removing the exit condition from the
816+
// above while loop. That error code would represent when `buffer` capacity
817+
// is too small for an operation, replacing the current use of asserts.
818+
var failing_writer = Writer.failing;
819+
while (r.vtable.stream(r, &failing_writer, .limited(1))) |n| {
820+
assert(n == 0);
821+
} else |err| switch (err) {
822+
error.WriteFailed => return error.StreamTooLong,
823+
error.ReadFailed => |e| return e,
824+
error.EndOfStream => |e| return e,
825+
}
817826
}
818827

819828
/// Returns a slice of the next bytes of buffered data from the stream until

0 commit comments

Comments
 (0)