Skip to content

fix(options): Add buffer sizes to failover. Update README #3468

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Aug 11, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .github/wordlist.txt
Original file line number Diff line number Diff line change
Expand Up @@ -74,4 +74,5 @@ Azure
StreamingCredentialsProvider
oauth
entraid
MiB
MiB
KiB
18 changes: 18 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ In `go-redis` we are aiming to support the last three releases of Redis. Current
- [Redis 7.2](https://raw.githubusercontent.com/redis/redis/7.2/00-RELEASENOTES) - using Redis Stack 7.2 for modules support
- [Redis 7.4](https://raw.githubusercontent.com/redis/redis/7.4/00-RELEASENOTES) - using Redis Stack 7.4 for modules support
- [Redis 8.0](https://raw.githubusercontent.com/redis/redis/8.0/00-RELEASENOTES) - using Redis CE 8.0 where modules are included
- [Redis 8.2](https://raw.githubusercontent.com/redis/redis/8.2/00-RELEASENOTES) - using Redis CE 8.2 where modules are included

Although the `go.mod` states it requires at minimum `go 1.18`, our CI is configured to run the tests against all three
versions of Redis and latest two versions of Go ([1.23](https://go.dev/doc/devel/release#go1.23.0),
Expand Down Expand Up @@ -77,6 +78,7 @@ key value NoSQL database that uses RocksDB as storage engine and is compatible w
- [Redis Ring](https://redis.uptrace.dev/guide/ring.html).
- [Redis Performance Monitoring](https://redis.uptrace.dev/guide/redis-performance-monitoring.html).
- [Redis Probabilistic [RedisStack]](https://redis.io/docs/data-types/probabilistic/)
- [Customizable read and write buffers size.](#custom-buffer-sizes)

## Installation

Expand Down Expand Up @@ -372,6 +374,21 @@ For example:
```
You can find further details in the [query dialect documentation](https://redis.io/docs/latest/develop/interact/search-and-query/advanced-concepts/dialects/).
#### Custom buffer sizes
Prior to v9.12, the buffer size was the default go value of 4096 bytes. Starting from v9.12,
go-redis uses 256KiB read and write buffers by default for optimal performance.
For high-throughput applications or large pipelines, you can customize buffer sizes:
```go
rdb := redis.NewClient(&redis.Options{
Addr: "localhost:6379",
ReadBufferSize: 1024 * 1024, // 1MiB read buffer
WriteBufferSize: 1024 * 1024, // 1MiB write buffer
})
```
**Important**: If you experience any issues with the default buffer sizes, please try setting them to the go default of 4096 bytes.
## Contributing
We welcome contributions to the go-redis library! If you have a bug fix, feature request, or improvement, please open an issue or pull request on GitHub.
We appreciate your help in making go-redis better for everyone.
Expand Down Expand Up @@ -412,6 +429,7 @@ vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello")
res, err := rdb.Do(ctx, "set", "key", "value").Result()
```
## Run the test
go-redis will start a redis-server and run the test cases.
Expand Down
28 changes: 14 additions & 14 deletions internal/pool/buffer_size_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,12 @@ var _ = Describe("Buffer Size Configuration", func() {
Expect(err).NotTo(HaveOccurred())
defer connPool.CloseConn(cn)

// Check that default buffer sizes are used (0.5MiB)
// Check that default buffer sizes are used (256KiB)
writerBufSize := getWriterBufSizeUnsafe(cn)
readerBufSize := getReaderBufSizeUnsafe(cn)

Expect(writerBufSize).To(Equal(proto.DefaultBufferSize)) // Default 0.5MiB buffer size
Expect(readerBufSize).To(Equal(proto.DefaultBufferSize)) // Default 0.5MiB buffer size
Expect(writerBufSize).To(Equal(proto.DefaultBufferSize)) // Default 256KiB buffer size
Expect(readerBufSize).To(Equal(proto.DefaultBufferSize)) // Default 256KiB buffer size
})

It("should use custom buffer sizes when specified", func() {
Expand Down Expand Up @@ -79,28 +79,28 @@ var _ = Describe("Buffer Size Configuration", func() {
Expect(err).NotTo(HaveOccurred())
defer connPool.CloseConn(cn)

// Check that default buffer sizes are used (0.5MiB)
// Check that default buffer sizes are used (256KiB)
writerBufSize := getWriterBufSizeUnsafe(cn)
readerBufSize := getReaderBufSizeUnsafe(cn)

Expect(writerBufSize).To(Equal(proto.DefaultBufferSize)) // Default 0.5MiB buffer size
Expect(readerBufSize).To(Equal(proto.DefaultBufferSize)) // Default 0.5MiB buffer size
Expect(writerBufSize).To(Equal(proto.DefaultBufferSize)) // Default 256KiB buffer size
Expect(readerBufSize).To(Equal(proto.DefaultBufferSize)) // Default 256KiB buffer size
})

It("should use 0.5MiB default buffer sizes for standalone NewConn", func() {
// Test that NewConn (without pool) also uses 0.5MiB defaults
It("should use 256KiB default buffer sizes for standalone NewConn", func() {
// Test that NewConn (without pool) also uses 256KiB buffers
netConn := newDummyConn()
cn := pool.NewConn(netConn)
defer cn.Close()

writerBufSize := getWriterBufSizeUnsafe(cn)
readerBufSize := getReaderBufSizeUnsafe(cn)

Expect(writerBufSize).To(Equal(proto.DefaultBufferSize)) // Default 0.5MiB buffer size
Expect(readerBufSize).To(Equal(proto.DefaultBufferSize)) // Default 0.5MiB buffer size
Expect(writerBufSize).To(Equal(proto.DefaultBufferSize)) // Default 256KiB buffer size
Expect(readerBufSize).To(Equal(proto.DefaultBufferSize)) // Default 256KiB buffer size
})

It("should use 0.5MiB defaults even when pool is created directly without buffer sizes", func() {
It("should use 256KiB defaults even when pool is created directly without buffer sizes", func() {
// Test the scenario where someone creates a pool directly (like in tests)
// without setting ReadBufferSize and WriteBufferSize
connPool = pool.NewConnPool(&pool.Options{
Expand All @@ -114,12 +114,12 @@ var _ = Describe("Buffer Size Configuration", func() {
Expect(err).NotTo(HaveOccurred())
defer connPool.CloseConn(cn)

// Should still get 0.5MiB defaults because NewConnPool sets them
// Should still get 256KiB defaults because NewConnPool sets them
writerBufSize := getWriterBufSizeUnsafe(cn)
readerBufSize := getReaderBufSizeUnsafe(cn)

Expect(writerBufSize).To(Equal(proto.DefaultBufferSize)) // Default 0.5MiB buffer size
Expect(readerBufSize).To(Equal(proto.DefaultBufferSize)) // Default 0.5MiB buffer size
Expect(writerBufSize).To(Equal(proto.DefaultBufferSize)) // Default 256KiB buffer size
Expect(readerBufSize).To(Equal(proto.DefaultBufferSize)) // Default 256KiB buffer size
})
})

Expand Down
4 changes: 2 additions & 2 deletions internal/proto/reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ import (
"github.com/redis/go-redis/v9/internal/util"
)

// DefaultBufferSize is the default size for read/write buffers (0.5MiB)
const DefaultBufferSize = 512 * 1024
// DefaultBufferSize is the default size for read/write buffers (256 KiB).
const DefaultBufferSize = 256 * 1024

// redis resp protocol data type.
const (
Expand Down
4 changes: 2 additions & 2 deletions options.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,14 +135,14 @@ type Options struct {
// Larger buffers can improve performance for commands that return large responses.
// Smaller buffers can improve memory usage for larger pools.
//
// default: 0.5MiB (524288 bytes)
// default: 256KiB (262144 bytes)
ReadBufferSize int

// WriteBufferSize is the size of the bufio.Writer buffer for each connection.
// Larger buffers can improve performance for large pipelines and commands with many arguments.
// Smaller buffers can improve memory usage for larger pools.
//
// default: 0.5MiB (524288 bytes)
// default: 256KiB (262144 bytes)
WriteBufferSize int

// PoolFIFO type of connection pool.
Expand Down
4 changes: 2 additions & 2 deletions osscluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,14 +96,14 @@ type ClusterOptions struct {
// Larger buffers can improve performance for commands that return large responses.
// Smaller buffers can improve memory usage for larger pools.
//
// default: 0.5MiB (524288 bytes)
// default: 256KiB (262144 bytes)
ReadBufferSize int

// WriteBufferSize is the size of the bufio.Writer buffer for each connection.
// Larger buffers can improve performance for large pipelines and commands with many arguments.
// Smaller buffers can improve memory usage for larger pools.
//
// default: 0.5MiB (524288 bytes)
// default: 256KiB (262144 bytes)
WriteBufferSize int

TLSConfig *tls.Config
Expand Down
4 changes: 2 additions & 2 deletions ring.go
Original file line number Diff line number Diff line change
Expand Up @@ -128,14 +128,14 @@ type RingOptions struct {
// Larger buffers can improve performance for commands that return large responses.
// Smaller buffers can improve memory usage for larger pools.
//
// default: 0.5MiB (524288 bytes)
// default: 256KiB (262144 bytes)
ReadBufferSize int

// WriteBufferSize is the size of the bufio.Writer buffer for each connection.
// Larger buffers can improve performance for large pipelines and commands with many arguments.
// Smaller buffers can improve memory usage for larger pools.
//
// default: 0.5MiB (524288 bytes)
// default: 256KiB (262144 bytes)
WriteBufferSize int

TLSConfig *tls.Config
Expand Down
23 changes: 23 additions & 0 deletions sentinel.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,20 @@ type FailoverOptions struct {
WriteTimeout time.Duration
ContextTimeoutEnabled bool

// ReadBufferSize is the size of the bufio.Reader buffer for each connection.
// Larger buffers can improve performance for commands that return large responses.
// Smaller buffers can improve memory usage for larger pools.
//
// default: 256KiB (262144 bytes)
ReadBufferSize int

// WriteBufferSize is the size of the bufio.Writer buffer for each connection.
// Larger buffers can improve performance for large pipelines and commands with many arguments.
// Smaller buffers can improve memory usage for larger pools.
//
// default: 256KiB (262144 bytes)
WriteBufferSize int

PoolFIFO bool

PoolSize int
Expand Down Expand Up @@ -138,6 +152,9 @@ func (opt *FailoverOptions) clientOptions() *Options {
MinRetryBackoff: opt.MinRetryBackoff,
MaxRetryBackoff: opt.MaxRetryBackoff,

ReadBufferSize: opt.ReadBufferSize,
WriteBufferSize: opt.WriteBufferSize,

DialTimeout: opt.DialTimeout,
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
Expand Down Expand Up @@ -178,6 +195,9 @@ func (opt *FailoverOptions) sentinelOptions(addr string) *Options {
MinRetryBackoff: opt.MinRetryBackoff,
MaxRetryBackoff: opt.MaxRetryBackoff,

ReadBufferSize: opt.ReadBufferSize,
WriteBufferSize: opt.WriteBufferSize,

DialTimeout: opt.DialTimeout,
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
Expand Down Expand Up @@ -224,6 +244,9 @@ func (opt *FailoverOptions) clusterOptions() *ClusterOptions {
MinRetryBackoff: opt.MinRetryBackoff,
MaxRetryBackoff: opt.MaxRetryBackoff,

ReadBufferSize: opt.ReadBufferSize,
WriteBufferSize: opt.WriteBufferSize,

DialTimeout: opt.DialTimeout,
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
Expand Down
Loading