From b9020406f78166792381c2832fb0b2eccd766971 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Thu, 6 Nov 2025 13:22:28 +0200 Subject: [PATCH] mem: allow using io.WriterTo with a io.LimitedReader MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The caller of this function can wrap the io.Reader in a io.LimitedReader. This happens if some max message size is set. If so, this `io.WriterTo` check doesn't work anymore. Work around this by checking if it is maybe a `io.LimitedReader`. Overall, the problem I'm trying to solve is that the constant ```go buf := pool.Get(readAllBufSize) ``` 32KiB is way too much in our use case. Messages are typically at max about only 1KiB in size so we always overallocate by ~31KiB in the best case scenario so we want to use the `io.WriterTo` branch so that we could appropriately size the buffer. Signed-off-by: Giedrius Statkevičius --- mem/buffer_slice.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/mem/buffer_slice.go b/mem/buffer_slice.go index 084fb19c6d15..7fe7eeb1115e 100644 --- a/mem/buffer_slice.go +++ b/mem/buffer_slice.go @@ -257,6 +257,18 @@ func ReadAll(r io.Reader, pool BufferPool) (BufferSlice, error) { _, err := wt.WriteTo(w) return result, err } + + if lr, ok := r.(*io.LimitedReader); ok { + if wt, ok := lr.R.(io.WriterTo); ok { + // This is more optimal since wt knows the size of chunks it wants to + // write and, hence, we can allocate buffers of an optimal size to fit + // them. E.g. might be a single big chunk, and we wouldn't chop it + // into pieces. + w := NewWriter(&result, pool) + _, err := wt.WriteTo(w) + return result, err + } + } nextBuffer: for { buf := pool.Get(readAllBufSize)