Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
* [CHANGE] Change all max async concurrency default values `50` to `3` #6268
* [CHANGE] Change default value of `-blocks-storage.bucket-store.index-cache.multilevel.max-async-concurrency` from `50` to `3` #6265
* [CHANGE] Enable Compactor and Alertmanager in target all. #6204
* [FEATURE] Implement Prometheus remote write 2.0. #6320
* [FEATURE] Ruler: Pagination support for List Rules API. #6299
* [FEATURE] Query Frontend/Querier: Add protobuf codec `-api.querier-default-codec` and the option to choose response compression type `-querier.response-compression`. #5527
* [FEATURE] Ruler: Experimental: Add `ruler.frontend-address` to allow query to query frontends instead of ingesters. #6151
Expand Down
1 change: 1 addition & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ $(foreach exe, $(EXES), $(eval $(call dep_exe, $(exe))))

# Manually declared dependencies And what goes into each exe
pkg/cortexpb/cortex.pb.go: pkg/cortexpb/cortex.proto
pkg/cortexpbv2/cortexv2.pb.go: pkg/cortexpbv2/cortexv2.proto
pkg/ingester/client/ingester.pb.go: pkg/ingester/client/ingester.proto
pkg/distributor/distributorpb/distributor.pb.go: pkg/distributor/distributorpb/distributor.proto
pkg/ingester/wal.pb.go: pkg/ingester/wal.proto
Expand Down
6 changes: 3 additions & 3 deletions pkg/cortexpb/slicesPool.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ type byteSlicePools struct {
pools []sync.Pool
}

func newSlicePool(pools int) *byteSlicePools {
func NewSlicePool(pools int) *byteSlicePools {
sp := byteSlicePools{}
sp.init(pools)
return &sp
Expand All @@ -32,7 +32,7 @@ func (sp *byteSlicePools) init(pools int) {
}
}

func (sp *byteSlicePools) getSlice(size int) *[]byte {
func (sp *byteSlicePools) GetSlice(size int) *[]byte {
index := int(math.Ceil(math.Log2(float64(size)))) - minPoolSizePower

if index >= len(sp.pools) {
Expand All @@ -50,7 +50,7 @@ func (sp *byteSlicePools) getSlice(size int) *[]byte {
return s
}

func (sp *byteSlicePools) reuseSlice(s *[]byte) {
func (sp *byteSlicePools) ReuseSlice(s *[]byte) {
index := int(math.Floor(math.Log2(float64(cap(*s))))) - minPoolSizePower

if index >= len(sp.pools) || index < 0 {
Expand Down
12 changes: 6 additions & 6 deletions pkg/cortexpb/slicesPool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,22 +9,22 @@ import (
)

func TestFuzzyByteSlicePools(t *testing.T) {
sut := newSlicePool(20)
sut := NewSlicePool(20)
maxByteSize := int(math.Pow(2, 20+minPoolSizePower-1))

for i := 0; i < 1000; i++ {
size := rand.Int() % maxByteSize
s := sut.getSlice(size)
s := sut.GetSlice(size)
assert.Equal(t, len(*s), size)
sut.reuseSlice(s)
sut.ReuseSlice(s)
}
}

func TestReturnSliceSmallerThanMin(t *testing.T) {
sut := newSlicePool(20)
sut := NewSlicePool(20)
size := 3
buff := make([]byte, 0, size)
sut.reuseSlice(&buff)
buff2 := sut.getSlice(size * 2)
sut.ReuseSlice(&buff)
buff2 := sut.GetSlice(size * 2)
assert.Equal(t, len(*buff2), size*2)
}
6 changes: 3 additions & 3 deletions pkg/cortexpb/timeseries.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ var (
}
},
}
bytePool = newSlicePool(20)
bytePool = NewSlicePool(20)
)

// PreallocConfig configures how structures will be preallocated to optimise
Expand Down Expand Up @@ -86,7 +86,7 @@ func (p *PreallocTimeseries) Unmarshal(dAtA []byte) error {

func (p *PreallocWriteRequest) Marshal() (dAtA []byte, err error) {
size := p.Size()
p.data = bytePool.getSlice(size)
p.data = bytePool.GetSlice(size)
dAtA = *p.data
n, err := p.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
Expand All @@ -97,7 +97,7 @@ func (p *PreallocWriteRequest) Marshal() (dAtA []byte, err error) {

func ReuseWriteRequest(req *PreallocWriteRequest) {
if req.data != nil {
bytePool.reuseSlice(req.data)
bytePool.ReuseSlice(req.data)
req.data = nil
}
req.Source = 0
Expand Down
Loading
Loading