Skip to content

Commit 8273c9c

Browse files
committed
sstable: use CommonProperties when Properties is unnecessary
Use CommonProperties for functions where having all of the contents of Properites is unnecessary.
1 parent e6a8459 commit 8273c9c

File tree

6 files changed

+22
-198
lines changed

6 files changed

+22
-198
lines changed

compaction.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3224,7 +3224,7 @@ func (c *compaction) makeVersionEdit(result compact.Result) (*versionEdit, error
32243224
// If the file didn't contain any range deletions, we can fill its
32253225
// table stats now, avoiding unnecessarily loading the table later.
32263226
maybeSetStatsFromProperties(
3227-
fileMeta.PhysicalMeta(), &t.WriterMeta.Properties,
3227+
fileMeta.PhysicalMeta(), &t.WriterMeta.Properties.CommonProperties,
32283228
)
32293229

32303230
if t.WriterMeta.HasPointKeys {

ingest.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -357,7 +357,7 @@ func ingestLoad1(
357357
// disallowing removal of an open file. Under MemFS, if we don't populate
358358
// meta.Stats here, the file will be loaded into the file cache for
359359
// calculating stats before we can remove the original link.
360-
maybeSetStatsFromProperties(meta.PhysicalMeta(), &r.Properties)
360+
maybeSetStatsFromProperties(meta.PhysicalMeta(), &r.Properties.CommonProperties)
361361

362362
{
363363
iter, err := r.NewIter(sstable.NoTransforms, nil /* lower */, nil /* upper */, sstable.AssertNoBlobHandles)

sstable/properties.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -252,12 +252,12 @@ func writeProperties(loaded map[uintptr]struct{}, v reflect.Value, buf *bytes.Bu
252252
}
253253
}
254254

255-
func (p *Properties) GetScaledProperties(backingSize, size uint64) Properties {
255+
func (p *Properties) GetScaledProperties(backingSize, size uint64) CommonProperties {
256256
scale := func(a uint64) uint64 {
257257
return (a*size + backingSize - 1) / backingSize
258258
}
259259

260-
props := *p
260+
props := p.CommonProperties
261261
props.RawKeySize = scale(p.RawKeySize)
262262
props.RawValueSize = scale(p.RawValueSize)
263263
props.NumEntries = scale(p.NumEntries)

sstable/testdata/virtual_reader_props

Lines changed: 8 additions & 120 deletions
Original file line numberDiff line numberDiff line change
@@ -21,21 +21,9 @@ props:
2121
rocksdb.num.entries: 1
2222
rocksdb.raw.key.size: 3
2323
rocksdb.raw.value.size: 1
24-
rocksdb.deleted.keys: 0
25-
rocksdb.num.range-deletions: 0
2624
rocksdb.num.data.blocks: 1
2725
rocksdb.compression: Snappy
28-
rocksdb.compression_options: window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0;
29-
rocksdb.comparator: pebble.internal.testkeys
30-
rocksdb.data.size: 50
31-
rocksdb.filter.size: 0
32-
rocksdb.index.size: 40
33-
rocksdb.block.based.table.index.type: 0
34-
rocksdb.merge.operator: pebble.concatenate
35-
rocksdb.merge.operands: 0
36-
rocksdb.property.collectors: [pebble.internal.testkeys.suffixes,obsolete-key]
37-
obsolete-key: hex:01
38-
pebble.internal.testkeys.suffixes: hex:0000ffffffffffffffffff01
26+
rocksdb.compression_options: window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0;
3927

4028
# Repeat the above with (Pebble,v5).
4129

@@ -60,22 +48,9 @@ props:
6048
rocksdb.num.entries: 1
6149
rocksdb.raw.key.size: 4
6250
rocksdb.raw.value.size: 1
63-
rocksdb.deleted.keys: 0
64-
rocksdb.num.range-deletions: 0
6551
rocksdb.num.data.blocks: 1
6652
rocksdb.compression: Snappy
67-
rocksdb.compression_options: window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0;
68-
rocksdb.comparator: pebble.internal.testkeys
69-
rocksdb.data.size: 87
70-
rocksdb.filter.size: 0
71-
rocksdb.index.size: 56
72-
rocksdb.block.based.table.index.type: 0
73-
pebble.colblk.schema: DefaultKeySchema(pebble.internal.testkeys,16)
74-
rocksdb.merge.operator: pebble.concatenate
75-
rocksdb.merge.operands: 0
76-
rocksdb.property.collectors: [pebble.internal.testkeys.suffixes,obsolete-key]
77-
obsolete-key: hex:01
78-
pebble.internal.testkeys.suffixes: hex:0000ffffffffffffffffff01
53+
rocksdb.compression_options: window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0;
7954

8055

8156
# Test 2: Similar to test 1 but force two level iterators.
@@ -96,23 +71,9 @@ props:
9671
rocksdb.num.entries: 1
9772
rocksdb.raw.key.size: 2
9873
rocksdb.raw.value.size: 1
99-
rocksdb.deleted.keys: 0
100-
rocksdb.num.range-deletions: 0
10174
rocksdb.num.data.blocks: 1
10275
rocksdb.compression: Snappy
103-
rocksdb.compression_options: window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0;
104-
rocksdb.comparator: pebble.internal.testkeys
105-
rocksdb.data.size: 108
106-
rocksdb.filter.size: 0
107-
rocksdb.index.partitions: 4
108-
rocksdb.index.size: 276
109-
rocksdb.block.based.table.index.type: 2
110-
rocksdb.merge.operator: pebble.concatenate
111-
rocksdb.merge.operands: 0
112-
rocksdb.property.collectors: [pebble.internal.testkeys.suffixes,obsolete-key]
113-
rocksdb.top-level.index.size: 131
114-
obsolete-key: hex:01
115-
pebble.internal.testkeys.suffixes: hex:0000ffffffffffffffffff01
76+
rocksdb.compression_options: window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0;
11677

11778
# Test the constrain bounds function. It performs some subtle shrinking and
11879
# expanding of bounds. The current virtual sstable bounds are [b,c].
@@ -186,24 +147,9 @@ props:
186147
rocksdb.num.entries: 1
187148
rocksdb.raw.key.size: 5
188149
rocksdb.raw.value.size: 1
189-
rocksdb.deleted.keys: 0
190-
rocksdb.num.range-deletions: 0
191150
rocksdb.num.data.blocks: 1
192151
rocksdb.compression: Snappy
193-
rocksdb.compression_options: window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0;
194-
rocksdb.comparator: pebble.internal.testkeys
195-
rocksdb.data.size: 316
196-
rocksdb.filter.size: 0
197-
rocksdb.index.partitions: 4
198-
rocksdb.index.size: 302
199-
rocksdb.block.based.table.index.type: 2
200-
pebble.colblk.schema: DefaultKeySchema(pebble.internal.testkeys,16)
201-
rocksdb.merge.operator: pebble.concatenate
202-
rocksdb.merge.operands: 0
203-
rocksdb.property.collectors: [pebble.internal.testkeys.suffixes,obsolete-key]
204-
rocksdb.top-level.index.size: 0
205-
obsolete-key: hex:01
206-
pebble.internal.testkeys.suffixes: hex:0000ffffffffffffffffff01
152+
rocksdb.compression_options: window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0;
207153

208154
# Test the constrain bounds function. It performs some subtle shrinking and
209155
# expanding of bounds. The current virtual sstable bounds are [b,c].
@@ -284,26 +230,10 @@ props:
284230
rocksdb.raw.value.size: 1
285231
rocksdb.deleted.keys: 1
286232
rocksdb.num.range-deletions: 1
287-
pebble.num.range-key-dels: 0
288233
pebble.num.range-key-sets: 1
289234
rocksdb.num.data.blocks: 1
290235
rocksdb.compression: Snappy
291-
rocksdb.compression_options: window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0;
292-
rocksdb.comparator: pebble.internal.testkeys
293-
rocksdb.data.size: 81
294-
rocksdb.filter.size: 0
295-
rocksdb.index.partitions: 3
296-
rocksdb.index.size: 208
297-
rocksdb.block.based.table.index.type: 2
298-
rocksdb.merge.operator: pebble.concatenate
299-
rocksdb.merge.operands: 0
300-
pebble.num.range-key-unsets: 0
301-
rocksdb.property.collectors: [pebble.internal.testkeys.suffixes,obsolete-key]
302-
pebble.raw.range-key.key.size: 18
303-
pebble.raw.range-key.value.size: 20
304-
rocksdb.top-level.index.size: 98
305-
obsolete-key: hex:01
306-
pebble.internal.testkeys.suffixes: hex:0000ffffffffffffffffff01
236+
rocksdb.compression_options: window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0;
307237

308238
# Repeat the above with (Pebble,v5).
309239

@@ -333,27 +263,10 @@ props:
333263
rocksdb.raw.value.size: 1
334264
rocksdb.deleted.keys: 1
335265
rocksdb.num.range-deletions: 1
336-
pebble.num.range-key-dels: 0
337266
pebble.num.range-key-sets: 1
338267
rocksdb.num.data.blocks: 1
339268
rocksdb.compression: Snappy
340-
rocksdb.compression_options: window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0;
341-
rocksdb.comparator: pebble.internal.testkeys
342-
rocksdb.data.size: 241
343-
rocksdb.filter.size: 0
344-
rocksdb.index.partitions: 3
345-
rocksdb.index.size: 237
346-
rocksdb.block.based.table.index.type: 2
347-
pebble.colblk.schema: DefaultKeySchema(pebble.internal.testkeys,16)
348-
rocksdb.merge.operator: pebble.concatenate
349-
rocksdb.merge.operands: 0
350-
pebble.num.range-key-unsets: 0
351-
rocksdb.property.collectors: [pebble.internal.testkeys.suffixes,obsolete-key]
352-
pebble.raw.range-key.key.size: 4
353-
pebble.raw.range-key.value.size: 6
354-
rocksdb.top-level.index.size: 0
355-
obsolete-key: hex:01
356-
pebble.internal.testkeys.suffixes: hex:0000ffffffffffffffffff01
269+
rocksdb.compression_options: window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0;
357270

358271
build table-format=Pebble,v4
359272
a.SET.1:a
@@ -376,21 +289,9 @@ props:
376289
rocksdb.num.entries: 2
377290
rocksdb.raw.key.size: 10
378291
rocksdb.raw.value.size: 2
379-
rocksdb.deleted.keys: 0
380-
rocksdb.num.range-deletions: 0
381292
rocksdb.num.data.blocks: 1
382293
rocksdb.compression: Snappy
383-
rocksdb.compression_options: window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0;
384-
rocksdb.comparator: pebble.internal.testkeys
385-
rocksdb.data.size: 97
386-
rocksdb.filter.size: 0
387-
rocksdb.index.size: 40
388-
rocksdb.block.based.table.index.type: 0
389-
rocksdb.merge.operator: pebble.concatenate
390-
rocksdb.merge.operands: 0
391-
rocksdb.property.collectors: [pebble.internal.testkeys.suffixes,obsolete-key]
392-
obsolete-key: hex:01
393-
pebble.internal.testkeys.suffixes: hex:0000ffffffffffffffffff01
294+
rocksdb.compression_options: window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0;
394295

395296
# Repeat the above with (Pebble,v5).
396297

@@ -415,19 +316,6 @@ props:
415316
rocksdb.num.entries: 2
416317
rocksdb.raw.key.size: 10
417318
rocksdb.raw.value.size: 2
418-
rocksdb.deleted.keys: 0
419-
rocksdb.num.range-deletions: 0
420319
rocksdb.num.data.blocks: 1
421320
rocksdb.compression: Snappy
422-
rocksdb.compression_options: window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0;
423-
rocksdb.comparator: pebble.internal.testkeys
424-
rocksdb.data.size: 107
425-
rocksdb.filter.size: 0
426-
rocksdb.index.size: 56
427-
rocksdb.block.based.table.index.type: 0
428-
pebble.colblk.schema: DefaultKeySchema(pebble.internal.testkeys,16)
429-
rocksdb.merge.operator: pebble.concatenate
430-
rocksdb.merge.operands: 0
431-
rocksdb.property.collectors: [pebble.internal.testkeys.suffixes,obsolete-key]
432-
obsolete-key: hex:01
433-
pebble.internal.testkeys.suffixes: hex:0000ffffffffffffffffff01
321+
rocksdb.compression_options: window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0;

table_stats.go

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -306,7 +306,7 @@ func (d *DB) loadTableStats(
306306

307307
err := d.fileCache.withReader(
308308
context.TODO(), block.NoReadEnv, meta, func(r *sstable.Reader, env sstable.ReadEnv) (err error) {
309-
props := r.Properties
309+
props := r.Properties.CommonProperties
310310
if meta.Virtual != nil {
311311
props = r.Properties.GetScaledProperties(env.Virtual.BackingSize, env.Virtual.Size)
312312
}
@@ -342,7 +342,11 @@ func (d *DB) loadTableStats(
342342
// loadTablePointKeyStats calculates the point key statistics for the given
343343
// table. The provided manifest.TableStats are updated.
344344
func (d *DB) loadTablePointKeyStats(
345-
props *sstable.Properties, v *version, level int, meta *tableMetadata, stats *manifest.TableStats,
345+
props *sstable.CommonProperties,
346+
v *version,
347+
level int,
348+
meta *tableMetadata,
349+
stats *manifest.TableStats,
346350
) error {
347351
// TODO(jackson): If the file has a wide keyspace, the average
348352
// value size beneath the entire file might not be representative
@@ -476,7 +480,7 @@ func (d *DB) loadTableRangeDelStats(
476480
}
477481

478482
func (d *DB) estimateSizesBeneath(
479-
v *version, level int, meta *tableMetadata, fileProps *sstable.Properties,
483+
v *version, level int, meta *tableMetadata, fileProps *sstable.CommonProperties,
480484
) (avgValueLogicalSize, compressionRatio float64, err error) {
481485
// Find all files in lower levels that overlap with meta,
482486
// summing their value sizes and entry counts.
@@ -649,7 +653,7 @@ func (d *DB) estimateReclaimedSizeBeneath(
649653
return estimate, hintSeqNum, nil
650654
}
651655

652-
func maybeSetStatsFromProperties(meta *tableMetadata, props *sstable.Properties) bool {
656+
func maybeSetStatsFromProperties(meta *tableMetadata, props *sstable.CommonProperties) bool {
653657
// If a table contains range deletions or range key deletions, we defer the
654658
// stats collection. There are two main reasons for this:
655659
//
@@ -697,7 +701,7 @@ func maybeSetStatsFromProperties(meta *tableMetadata, props *sstable.Properties)
697701
}
698702

699703
func pointDeletionsBytesEstimate(
700-
fileSize uint64, props *sstable.Properties, avgValLogicalSize, compressionRatio float64,
704+
fileSize uint64, props *sstable.CommonProperties, avgValLogicalSize, compressionRatio float64,
701705
) (estimate uint64) {
702706
if props.NumEntries == 0 {
703707
return 0
@@ -784,7 +788,7 @@ func pointDeletionsBytesEstimate(
784788
}
785789

786790
func estimatePhysicalSizes(
787-
fileSize uint64, props *sstable.Properties,
791+
fileSize uint64, props *sstable.CommonProperties,
788792
) (avgValLogicalSize, compressionRatio float64) {
789793
// RawKeySize and RawValueSize are uncompressed totals. Scale according to
790794
// the data size to account for compression, index blocks and metadata

0 commit comments

Comments
 (0)