|
| 1 | +// Copyright 2024 The Cockroach Authors. |
| 2 | +// |
| 3 | +// Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +// you may not use this file except in compliance with the License. |
| 5 | +// You may obtain a copy of the License at |
| 6 | +// |
| 7 | +// http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +// |
| 9 | +// Unless required by applicable law or agreed to in writing, software |
| 10 | +// distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
| 12 | +// implied. See the License for the specific language governing |
| 13 | +// permissions and limitations under the License. |
| 14 | + |
| 15 | +package cralloc |
| 16 | + |
| 17 | +import "sync" |
| 18 | + |
| 19 | +// BatchAllocator is used to allocate small objects in batches, reducing the |
| 20 | +// number of individual allocations. |
| 21 | +// |
| 22 | +// The tradeoff is that the lifetime of the objects in a batch are tied |
| 23 | +// together, which can potentially result in higher memory usage. In addition, |
| 24 | +// there can be O(GOMAXPROCS) extra instantiated batches at any one time. |
| 25 | +// BatchAllocator should be used when T is small and it does not contain |
| 26 | +// references to large objects. |
| 27 | +// |
| 28 | +// Sample usage: |
| 29 | +// |
| 30 | +// var someTypeBatchAlloc = MakeBatchAllocator[SomeType]() // global |
| 31 | +// ... |
| 32 | +// x := someTypeBatchAlloc.Alloc() |
| 33 | +type BatchAllocator[T any] struct { |
| 34 | + // We use a sync.Pool as an approximation to maintaining one batch per CPU. |
| 35 | + // This is more efficient than using a mutex and provides good memory |
| 36 | + // locality. |
| 37 | + pool sync.Pool |
| 38 | +} |
| 39 | + |
| 40 | +// MakeBatchAllocator initializes a BatchAllocator. |
| 41 | +func MakeBatchAllocator[T any]() BatchAllocator[T] { |
| 42 | + return BatchAllocator[T]{ |
| 43 | + pool: sync.Pool{ |
| 44 | + New: func() any { |
| 45 | + return &batch[T]{} |
| 46 | + }, |
| 47 | + }, |
| 48 | + } |
| 49 | +} |
| 50 | + |
| 51 | +const batchSize = 8 |
| 52 | + |
| 53 | +// Init must be called before the batch allocator can be used. |
| 54 | +func (ba *BatchAllocator[T]) Init() { |
| 55 | + ba.pool.New = func() any { |
| 56 | + return &batch[T]{} |
| 57 | + } |
| 58 | +} |
| 59 | + |
| 60 | +// Alloc returns a new zeroed out instance of T. |
| 61 | +func (ba *BatchAllocator[T]) Alloc() *T { |
| 62 | + b := ba.pool.Get().(*batch[T]) |
| 63 | + // If Init() was not called, the first Alloc() will panic here. |
| 64 | + t := &b.buf[b.used] |
| 65 | + b.used++ |
| 66 | + if b.used < batchSize { |
| 67 | + // Batch has more objects available, put it back into the pool. |
| 68 | + ba.pool.Put(b) |
| 69 | + } |
| 70 | + return t |
| 71 | +} |
| 72 | + |
| 73 | +type batch[T any] struct { |
| 74 | + // elements buf[:used] have been returned via Alloc. The rest are unused and |
| 75 | + // zero. |
| 76 | + buf [batchSize]T |
| 77 | + used int8 |
| 78 | +} |
0 commit comments