Skip to content

Commit 44e5fb4

Browse files
committed
db: store *Comparer on Batch
Store a pointer to a Comparer on the Batch struct, rather than copying individual fields during initialization. These fields were not accessed directly during hot paths, so the indirection is insignificant. This will ease use of Comparer.Split in new places, such as the batch iterator early exiting during a SeekPrefixGE.
1 parent e36d078 commit 44e5fb4

File tree

2 files changed

+36
-51
lines changed

2 files changed

+36
-51
lines changed

batch.go

Lines changed: 35 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -270,11 +270,9 @@ type batchInternal struct {
270270
// batches. Large batches will set the data field to nil when committed as
271271
// the data has been moved to a flushableBatch and inserted into the queue of
272272
// memtables.
273-
data []byte
274-
cmp Compare
275-
formatKey base.FormatKey
276-
abbreviatedKey AbbreviatedKey
277-
opts batchOptions
273+
data []byte
274+
comparer *base.Comparer
275+
opts batchOptions
278276

279277
// An upper bound on required space to add this batch to a memtable.
280278
// Note that although batches are limited to 4 GiB in size, that limit
@@ -464,12 +462,10 @@ func newBatchWithSize(db *DB, size int, opts ...BatchOption) *Batch {
464462

465463
func newIndexedBatch(db *DB, comparer *Comparer) *Batch {
466464
i := indexedBatchPool.Get().(*indexedBatch)
467-
i.batch.cmp = comparer.Compare
468-
i.batch.formatKey = comparer.FormatKey
469-
i.batch.abbreviatedKey = comparer.AbbreviatedKey
465+
i.batch.comparer = comparer
470466
i.batch.db = db
471467
i.batch.index = &i.index
472-
i.batch.index.Init(&i.batch.data, i.batch.cmp, i.batch.abbreviatedKey)
468+
i.batch.index.Init(&i.batch.data, comparer.Compare, comparer.AbbreviatedKey)
473469
i.batch.opts.ensureDefaults()
474470
return &i.batch
475471
}
@@ -508,9 +504,7 @@ func (b *Batch) release() {
508504
// field. Without using an atomic to clear that field the Go race detector
509505
// complains.
510506
b.reset()
511-
b.cmp = nil
512-
b.formatKey = nil
513-
b.abbreviatedKey = nil
507+
b.comparer = nil
514508

515509
if b.index == nil {
516510
batchPool.Put(b)
@@ -637,14 +631,14 @@ func (b *Batch) Apply(batch *Batch, _ *WriteOptions) error {
637631
b.tombstones = nil
638632
b.tombstonesSeqNum = 0
639633
if b.rangeDelIndex == nil {
640-
b.rangeDelIndex = batchskl.NewSkiplist(&b.data, b.cmp, b.abbreviatedKey)
634+
b.rangeDelIndex = batchskl.NewSkiplist(&b.data, b.comparer.Compare, b.comparer.AbbreviatedKey)
641635
}
642636
err = b.rangeDelIndex.Add(uint32(offset))
643637
case InternalKeyKindRangeKeySet, InternalKeyKindRangeKeyUnset, InternalKeyKindRangeKeyDelete:
644638
b.rangeKeys = nil
645639
b.rangeKeysSeqNum = 0
646640
if b.rangeKeyIndex == nil {
647-
b.rangeKeyIndex = batchskl.NewSkiplist(&b.data, b.cmp, b.abbreviatedKey)
641+
b.rangeKeyIndex = batchskl.NewSkiplist(&b.data, b.comparer.Compare, b.comparer.AbbreviatedKey)
648642
}
649643
err = b.rangeKeyIndex.Add(uint32(offset))
650644
default:
@@ -1014,7 +1008,7 @@ func (b *Batch) DeleteRangeDeferred(startLen, endLen int) *DeferredBatchOp {
10141008
b.tombstonesSeqNum = 0
10151009
// Range deletions are rare, so we lazily allocate the index for them.
10161010
if b.rangeDelIndex == nil {
1017-
b.rangeDelIndex = batchskl.NewSkiplist(&b.data, b.cmp, b.abbreviatedKey)
1011+
b.rangeDelIndex = batchskl.NewSkiplist(&b.data, b.comparer.Compare, b.comparer.AbbreviatedKey)
10181012
}
10191013
b.deferredOp.index = b.rangeDelIndex
10201014
}
@@ -1069,7 +1063,7 @@ func (b *Batch) incrementRangeKeysCount() {
10691063
b.rangeKeysSeqNum = 0
10701064
// Range keys are rare, so we lazily allocate the index for them.
10711065
if b.rangeKeyIndex == nil {
1072-
b.rangeKeyIndex = batchskl.NewSkiplist(&b.data, b.cmp, b.abbreviatedKey)
1066+
b.rangeKeyIndex = batchskl.NewSkiplist(&b.data, b.comparer.Compare, b.comparer.AbbreviatedKey)
10731067
}
10741068
b.deferredOp.index = b.rangeKeyIndex
10751069
}
@@ -1283,7 +1277,6 @@ func (b *Batch) newInternalIter(o *IterOptions) *batchIter {
12831277

12841278
func (b *Batch) initInternalIter(o *IterOptions, iter *batchIter) {
12851279
*iter = batchIter{
1286-
cmp: b.cmp,
12871280
batch: b,
12881281
iter: b.index.NewIter(o.GetLowerBound(), o.GetUpperBound()),
12891282
// NB: We explicitly do not propagate the batch snapshot to the point
@@ -1321,7 +1314,7 @@ func (b *Batch) newRangeDelIter(o *IterOptions, batchSnapshot base.SeqNum) *keys
13211314

13221315
func (b *Batch) initRangeDelIter(_ *IterOptions, iter *keyspan.Iter, batchSnapshot base.SeqNum) {
13231316
if b.rangeDelIndex == nil {
1324-
iter.Init(b.cmp, nil)
1317+
iter.Init(b.comparer.Compare, nil)
13251318
return
13261319
}
13271320

@@ -1335,26 +1328,25 @@ func (b *Batch) initRangeDelIter(_ *IterOptions, iter *keyspan.Iter, batchSnapsh
13351328
// cleared.
13361329
nextSeqNum := b.nextSeqNum()
13371330
if b.tombstones != nil && b.tombstonesSeqNum <= batchSnapshot {
1338-
iter.Init(b.cmp, b.tombstones)
1331+
iter.Init(b.comparer.Compare, b.tombstones)
13391332
return
13401333
}
13411334

13421335
tombstones := make([]keyspan.Span, 0, b.countRangeDels)
13431336
frag := &keyspan.Fragmenter{
1344-
Cmp: b.cmp,
1345-
Format: b.formatKey,
1337+
Cmp: b.comparer.Compare,
1338+
Format: b.comparer.FormatKey,
13461339
Emit: func(s keyspan.Span) {
13471340
tombstones = append(tombstones, s)
13481341
},
13491342
}
13501343
it := &batchIter{
1351-
cmp: b.cmp,
13521344
batch: b,
13531345
iter: b.rangeDelIndex.NewIter(nil, nil),
13541346
snapshot: batchSnapshot,
13551347
}
13561348
fragmentRangeDels(frag, it, int(b.countRangeDels))
1357-
iter.Init(b.cmp, tombstones)
1349+
iter.Init(b.comparer.Compare, tombstones)
13581350

13591351
// If we just read all the tombstones in the batch (eg, batchSnapshot was
13601352
// set to b.nextSeqNum()), then cache the tombstones so that a subsequent
@@ -1397,7 +1389,7 @@ func (b *Batch) newRangeKeyIter(o *IterOptions, batchSnapshot base.SeqNum) *keys
13971389

13981390
func (b *Batch) initRangeKeyIter(_ *IterOptions, iter *keyspan.Iter, batchSnapshot base.SeqNum) {
13991391
if b.rangeKeyIndex == nil {
1400-
iter.Init(b.cmp, nil)
1392+
iter.Init(b.comparer.Compare, nil)
14011393
return
14021394
}
14031395

@@ -1410,26 +1402,25 @@ func (b *Batch) initRangeKeyIter(_ *IterOptions, iter *keyspan.Iter, batchSnapsh
14101402
// sequence number the cache would've been cleared.
14111403
nextSeqNum := b.nextSeqNum()
14121404
if b.rangeKeys != nil && b.rangeKeysSeqNum <= batchSnapshot {
1413-
iter.Init(b.cmp, b.rangeKeys)
1405+
iter.Init(b.comparer.Compare, b.rangeKeys)
14141406
return
14151407
}
14161408

14171409
rangeKeys := make([]keyspan.Span, 0, b.countRangeKeys)
14181410
frag := &keyspan.Fragmenter{
1419-
Cmp: b.cmp,
1420-
Format: b.formatKey,
1411+
Cmp: b.comparer.Compare,
1412+
Format: b.comparer.FormatKey,
14211413
Emit: func(s keyspan.Span) {
14221414
rangeKeys = append(rangeKeys, s)
14231415
},
14241416
}
14251417
it := &batchIter{
1426-
cmp: b.cmp,
14271418
batch: b,
14281419
iter: b.rangeKeyIndex.NewIter(nil, nil),
14291420
snapshot: batchSnapshot,
14301421
}
14311422
fragmentRangeKeys(frag, it, int(b.countRangeKeys))
1432-
iter.Init(b.cmp, rangeKeys)
1423+
iter.Init(b.comparer.Compare, rangeKeys)
14331424

14341425
// If we just read all the range keys in the batch (eg, batchSnapshot was
14351426
// set to b.nextSeqNum()), then cache the range keys so that a subsequent
@@ -1553,13 +1544,11 @@ func (b *Batch) reset() {
15531544
// Zero out the struct, retaining only the fields necessary for manual
15541545
// reuse.
15551546
b.batchInternal = batchInternal{
1556-
data: b.data,
1557-
cmp: b.cmp,
1558-
formatKey: b.formatKey,
1559-
abbreviatedKey: b.abbreviatedKey,
1560-
opts: b.opts,
1561-
index: b.index,
1562-
db: b.db,
1547+
data: b.data,
1548+
comparer: b.comparer,
1549+
opts: b.opts,
1550+
index: b.index,
1551+
db: b.db,
15631552
}
15641553
b.applied.Store(false)
15651554
if b.data != nil {
@@ -1576,7 +1565,7 @@ func (b *Batch) reset() {
15761565
}
15771566
}
15781567
if b.index != nil {
1579-
b.index.Init(&b.data, b.cmp, b.abbreviatedKey)
1568+
b.index.Init(&b.data, b.comparer.Compare, b.comparer.AbbreviatedKey)
15801569
}
15811570
}
15821571

@@ -1658,7 +1647,6 @@ func (b *Batch) CommitStats() BatchCommitStats {
16581647
// Note: batchIter mirrors the implementation of flushableBatchIter. Keep the
16591648
// two in sync.
16601649
type batchIter struct {
1661-
cmp Compare
16621650
batch *Batch
16631651
iter batchskl.Iterator
16641652
kv base.InternalKV
@@ -1698,7 +1686,6 @@ func (i *batchIter) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV
16981686
}
16991687

17001688
func (i *batchIter) SeekPrefixGE(prefix, key []byte, flags base.SeekGEFlags) *base.InternalKV {
1701-
i.err = nil // clear cached iteration error
17021689
return i.SeekGE(key, flags)
17031690
}
17041691

@@ -1849,9 +1836,9 @@ type flushableBatchEntry struct {
18491836
// flushableBatch wraps an existing batch and provides the interfaces needed
18501837
// for making the batch flushable (i.e. able to mimic a memtable).
18511838
type flushableBatch struct {
1852-
cmp Compare
1853-
formatKey base.FormatKey
1854-
data []byte
1839+
cmp Compare
1840+
comparer *base.Comparer
1841+
data []byte
18551842

18561843
// The base sequence number for the entries in the batch. This is the same
18571844
// value as Batch.seqNum() and is cached here for performance.
@@ -1883,10 +1870,10 @@ var _ flushable = (*flushableBatch)(nil)
18831870
// of the batch data.
18841871
func newFlushableBatch(batch *Batch, comparer *Comparer) (*flushableBatch, error) {
18851872
b := &flushableBatch{
1886-
data: batch.data,
1887-
cmp: comparer.Compare,
1888-
formatKey: comparer.FormatKey,
1889-
offsets: make([]flushableBatchEntry, 0, batch.Count()),
1873+
data: batch.data,
1874+
cmp: comparer.Compare,
1875+
comparer: comparer,
1876+
offsets: make([]flushableBatchEntry, 0, batch.Count()),
18901877
}
18911878
if b.data != nil {
18921879
// Note that this sequence number is not correct when this batch has not
@@ -1969,7 +1956,7 @@ func newFlushableBatch(batch *Batch, comparer *Comparer) (*flushableBatch, error
19691956
if len(rangeDelOffsets) > 0 {
19701957
frag := &keyspan.Fragmenter{
19711958
Cmp: b.cmp,
1972-
Format: b.formatKey,
1959+
Format: b.comparer.FormatKey,
19731960
Emit: func(s keyspan.Span) {
19741961
b.tombstones = append(b.tombstones, s)
19751962
},
@@ -1986,7 +1973,7 @@ func newFlushableBatch(batch *Batch, comparer *Comparer) (*flushableBatch, error
19861973
if len(rangeKeyOffsets) > 0 {
19871974
frag := &keyspan.Fragmenter{
19881975
Cmp: b.cmp,
1989-
Format: b.formatKey,
1976+
Format: b.comparer.FormatKey,
19901977
Emit: func(s keyspan.Span) {
19911978
b.rangeKeys = append(b.rangeKeys, s)
19921979
},

batch_test.go

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -528,9 +528,7 @@ func TestIndexedBatchReset(t *testing.T) {
528528
require.Equal(t, 1, indexCount(b.index))
529529

530530
b.Reset()
531-
require.NotNil(t, b.cmp)
532-
require.NotNil(t, b.formatKey)
533-
require.NotNil(t, b.abbreviatedKey)
531+
require.NotNil(t, b.comparer)
534532
require.NotNil(t, b.index)
535533
require.Nil(t, b.rangeDelIndex)
536534
require.Nil(t, b.rangeKeyIndex)

0 commit comments

Comments
 (0)