Skip to content

Commit b4b62cc

Browse files
levbclaude
andcommitted
Review fixes: cap setIsCached to bitmap bounds, concurrent test, nits
- Fix OOB panic in setIsCached when range extends past cache size. The old sync.Map.Store silently accepted out-of-bounds keys, but the bitmap would panic on index OOB. Cap n to len(dirty)*64. - Add TestSetIsCached_PastCacheSize to verify the fix. - Add TestSetIsCached_ConcurrentOverlapping: 8 goroutines with overlapping ranges under -race to prove atomic OR correctness. - Remove redundant sort.SliceIsSorted (covered by require.Equal). - Use 2 MiB block size in BoundaryCrossing test (real hugepage size) to exercise a second block size and drop nolint:unparam. - Make benchmark consts consistently int64. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
1 parent ec523da commit b4b62cc

File tree

2 files changed

+62
-9
lines changed

2 files changed

+62
-9
lines changed

packages/orchestrator/pkg/sandbox/block/cache.go

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -284,6 +284,12 @@ func (c *Cache) setIsCached(off, length int64) {
284284
start := off / c.blockSize
285285
n := (off + length + c.blockSize - 1) / c.blockSize
286286

287+
// Cap to the actual bitmap size so callers that pass a range extending
288+
// past c.size (e.g. a partial last segment) don't panic on index OOB.
289+
if maxBlock := int64(len(c.dirty)) * 64; n > maxBlock {
290+
n = maxBlock
291+
}
292+
287293
for i := start; i < n; {
288294
w := i / 64
289295
lo := i % 64

packages/orchestrator/pkg/sandbox/block/cache_dirty_test.go

Lines changed: 56 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ package block
22

33
import (
44
"path/filepath"
5-
"sort"
5+
"sync"
66
"testing"
77

88
"github.com/stretchr/testify/require"
@@ -11,7 +11,7 @@ import (
1111
// newTestCache creates a minimal Cache for testing dirty-bit operations.
1212
// It uses a small blockSize and does NOT create a real mmap — only the dirty
1313
// array and blockSize are initialized.
14-
func newTestCache(t *testing.T, numBlocks int64, blockSize int64) *Cache { //nolint:unparam // blockSize kept as param for test flexibility
14+
func newTestCache(t *testing.T, numBlocks int64, blockSize int64) *Cache {
1515
t.Helper()
1616

1717
size := numBlocks * blockSize
@@ -66,8 +66,8 @@ func TestMarkBlockRangeCached_MultipleBlocks(t *testing.T) {
6666
func TestMarkBlockRangeCached_BoundaryCrossing(t *testing.T) {
6767
t.Parallel()
6868

69-
const blockSize int64 = 4096
70-
// Use 256 blocks to ensure we span word boundaries (word = 64 blocks).
69+
const blockSize int64 = 2 * 1024 * 1024 // 2 MiB hugepage block size
70+
// Use 256 blocks at 2 MiB to exercise a different block size and span word boundaries (word = 64 blocks).
7171
c := newTestCache(t, 256, blockSize)
7272

7373
// Mark blocks 60..67 (crosses the 64-block word boundary).
@@ -171,7 +171,6 @@ func TestDirtySortedKeys_Sorted(t *testing.T) {
171171
}
172172

173173
require.Equal(t, expected, keys)
174-
require.True(t, sort.SliceIsSorted(keys, func(i, j int) bool { return keys[i] < keys[j] }))
175174
}
176175

177176
func TestDirtySortedKeys_Range(t *testing.T) {
@@ -231,14 +230,62 @@ func TestMarkBlockRangeCached_OverlappingRanges(t *testing.T) {
231230
require.False(t, c.isBlockCached(13))
232231
}
233232

233+
func TestSetIsCached_PastCacheSize(t *testing.T) {
234+
t.Parallel()
235+
236+
const blockSize int64 = 4096
237+
const numBlocks int64 = 128
238+
c := newTestCache(t, numBlocks, blockSize)
239+
240+
// Pass a range that extends past cache size — should not panic.
241+
c.setIsCached((numBlocks-2)*blockSize, 10*blockSize)
242+
243+
// Last two blocks should be cached.
244+
require.True(t, c.isBlockCached(numBlocks-2))
245+
require.True(t, c.isBlockCached(numBlocks-1))
246+
}
247+
248+
func TestSetIsCached_ConcurrentOverlapping(t *testing.T) {
249+
t.Parallel()
250+
251+
const blockSize int64 = 4096
252+
const numBlocks int64 = 512
253+
c := newTestCache(t, numBlocks, blockSize)
254+
255+
// 8 goroutines each mark an overlapping 128-block range.
256+
// Ranges: [0,128), [32,160), [64,192), ... [224,352)
257+
const goroutines = 8
258+
const rangeBlocks int64 = 128
259+
const strideBlocks int64 = 32
260+
261+
var wg sync.WaitGroup
262+
wg.Add(goroutines)
263+
for g := range goroutines {
264+
go func() {
265+
defer wg.Done()
266+
off := int64(g) * strideBlocks * blockSize
267+
c.setIsCached(off, rangeBlocks*blockSize)
268+
}()
269+
}
270+
wg.Wait()
271+
272+
// Union covers blocks 0..(goroutines-1)*stride+rangeBlocks-1.
273+
lastBlock := int64(goroutines-1)*strideBlocks + rangeBlocks
274+
for i := range lastBlock {
275+
require.True(t, c.isBlockCached(i), "block %d should be cached", i)
276+
}
277+
278+
require.False(t, c.isBlockCached(lastBlock))
279+
}
280+
234281
// --- Benchmarks ---
235282

236283
const (
237284
benchBlockSize int64 = 4096
238-
benchNumBlocks int64 = 16384 // 64 MiB at 4K blocks — realistic memfile size
239-
benchCacheSize = benchNumBlocks * benchBlockSize
240-
benchChunkSize int64 = 4 * 1024 * 1024 // 4 MiB — MemoryChunkSize
241-
benchChunkCount = benchCacheSize / benchChunkSize
285+
benchNumBlocks int64 = 16384 // 64 MiB at 4K blocks — realistic memfile size
286+
benchCacheSize int64 = benchNumBlocks * benchBlockSize
287+
benchChunkSize int64 = 4 * 1024 * 1024 // 4 MiB — MemoryChunkSize
288+
benchChunkCount int64 = benchCacheSize / benchChunkSize
242289
)
243290

244291
func newBenchCache(b *testing.B) *Cache {

0 commit comments

Comments
 (0)