Skip to content

Commit e6a8459

Browse files
committed
db: rename LevelMetrics table fields
Rename LevelMetrics fields that refer to 'files' to refer to 'tables' instead. With the introduction of value separation, significant volumes of data will live in blob files not accounted within these particular existing metrics.
1 parent abc4ac2 commit e6a8459

File tree

11 files changed

+62
-62
lines changed

11 files changed

+62
-62
lines changed

cmd/pebble/write_bench.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -293,7 +293,7 @@ func runWriteBenchmark(_ *cobra.Command, args []string) error {
293293
}
294294

295295
// Print the current stats.
296-
l0Files := m.Levels[0].NumFiles
296+
l0Files := m.Levels[0].TablesCount
297297
l0Sublevels := m.Levels[0].Sublevels
298298
nLevels := 0
299299
for _, l := range m.Levels {

compaction.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1277,11 +1277,11 @@ func (d *DB) runIngestFlush(c *compaction) (*manifest.VersionEdit, error) {
12771277
levelMetrics = &LevelMetrics{}
12781278
c.metrics[level] = levelMetrics
12791279
}
1280-
levelMetrics.NumFiles--
1281-
levelMetrics.Size -= int64(m.Size)
1280+
levelMetrics.TablesCount--
1281+
levelMetrics.TablesSize -= int64(m.Size)
12821282
for i := range added {
1283-
levelMetrics.NumFiles++
1284-
levelMetrics.Size += int64(added[i].Meta.Size)
1283+
levelMetrics.TablesCount++
1284+
levelMetrics.TablesSize += int64(added[i].Meta.Size)
12851285
}
12861286
}
12871287

@@ -3250,8 +3250,8 @@ func (c *compaction) makeVersionEdit(result compact.Result) (*versionEdit, error
32503250
outputMetrics.TablesFlushed++
32513251
outputMetrics.BytesFlushed += fileMeta.Size
32523252
}
3253-
outputMetrics.Size += int64(fileMeta.Size)
3254-
outputMetrics.NumFiles++
3253+
outputMetrics.TablesSize += int64(fileMeta.Size)
3254+
outputMetrics.TablesCount++
32553255
outputMetrics.Additional.BytesWrittenDataBlocks += t.WriterMeta.Properties.DataSize
32563256
outputMetrics.Additional.BytesWrittenValueBlocks += t.WriterMeta.Properties.ValueBlocksSize
32573257
}

compaction_picker_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1661,9 +1661,9 @@ func TestCompactionPickerScores(t *testing.T) {
16611661
fmt.Fprintf(&buf, "L Size Score\n")
16621662
for l, lm := range d.Metrics().Levels {
16631663
if l < numLevels-1 {
1664-
fmt.Fprintf(&buf, "L%-3d\t%-7s%.1f\n", l, humanize.Bytes.Int64(lm.Size), lm.Score)
1664+
fmt.Fprintf(&buf, "L%-3d\t%-7s%.1f\n", l, humanize.Bytes.Int64(lm.TablesSize), lm.Score)
16651665
} else {
1666-
fmt.Fprintf(&buf, "L%-3d\t%-7s-\n", l, humanize.Bytes.Int64(lm.Size))
1666+
fmt.Fprintf(&buf, "L%-3d\t%-7s-\n", l, humanize.Bytes.Int64(lm.TablesSize))
16671667
}
16681668
}
16691669
return buf.String()

file_cache_test.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -325,7 +325,7 @@ func TestVirtualReadsWiring(t *testing.T) {
325325
require.NoError(t, d.Apply(b, nil))
326326
require.NoError(t, d.Flush())
327327
require.NoError(t, d.Compact([]byte{'a'}, []byte{'b'}, false))
328-
require.Equal(t, 1, int(d.Metrics().Levels[6].NumFiles))
328+
require.Equal(t, 1, int(d.Metrics().Levels[6].TablesCount))
329329

330330
d.mu.Lock()
331331

@@ -402,8 +402,8 @@ func TestVirtualReadsWiring(t *testing.T) {
402402
lm = &LevelMetrics{}
403403
metrics[de.Level] = lm
404404
}
405-
metrics[de.Level].NumFiles--
406-
metrics[de.Level].Size -= int64(f.Size)
405+
metrics[de.Level].TablesCount--
406+
metrics[de.Level].TablesSize -= int64(f.Size)
407407
}
408408
return metrics
409409
}
@@ -442,7 +442,7 @@ func TestVirtualReadsWiring(t *testing.T) {
442442
d.mu.Unlock()
443443

444444
// Confirm that there were only 2 virtual sstables in L6.
445-
require.Equal(t, 2, int(d.Metrics().Levels[6].NumFiles))
445+
require.Equal(t, 2, int(d.Metrics().Levels[6].TablesCount))
446446

447447
// These reads will go through the file cache.
448448
iter, _ := d.NewIter(nil)

ingest.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2054,8 +2054,8 @@ func (d *DB) ingestApply(
20542054
levelMetrics = &LevelMetrics{}
20552055
metrics[f.Level] = levelMetrics
20562056
}
2057-
levelMetrics.NumFiles++
2058-
levelMetrics.Size += int64(m.Size)
2057+
levelMetrics.TablesCount++
2058+
levelMetrics.TablesSize += int64(m.Size)
20592059
levelMetrics.BytesIngested += m.Size
20602060
levelMetrics.TablesIngested++
20612061
}
@@ -2072,11 +2072,11 @@ func (d *DB) ingestApply(
20722072
levelMetrics = &LevelMetrics{}
20732073
metrics[level] = levelMetrics
20742074
}
2075-
levelMetrics.NumFiles--
2076-
levelMetrics.Size -= int64(m.Size)
2075+
levelMetrics.TablesCount--
2076+
levelMetrics.TablesSize -= int64(m.Size)
20772077
for i := range added {
2078-
levelMetrics.NumFiles++
2079-
levelMetrics.Size += int64(added[i].Meta.Size)
2078+
levelMetrics.TablesCount++
2079+
levelMetrics.TablesSize += int64(added[i].Meta.Size)
20802080
}
20812081
}
20822082
if exciseSpan.Valid() {

iterator_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2764,7 +2764,7 @@ func BenchmarkSeekPrefixTombstones(b *testing.B) {
27642764
}
27652765

27662766
d.mu.Lock()
2767-
require.Equal(b, int64(ks.Count()-1), d.mu.versions.metrics.Levels[numLevels-1].NumFiles)
2767+
require.Equal(b, int64(ks.Count()-1), d.mu.versions.metrics.Levels[numLevels-1].TablesCount)
27682768
d.mu.Unlock()
27692769

27702770
seekKey := testkeys.Key(ks, 1)
@@ -3053,7 +3053,7 @@ func runBenchmarkQueueWorkload(b *testing.B, deleteRatio float32, initOps int, v
30533053
for i := 0; i < numLevels; i++ {
30543054
numTombstones := stats.Levels[i].KindsCount[base.InternalKeyKindDelete]
30553055
numSets := stats.Levels[i].KindsCount[base.InternalKeyKindSet]
3056-
numTables := metrics.Levels[i].NumFiles
3056+
numTables := metrics.Levels[i].TablesCount
30573057
if numSets > 0 {
30583058
b.Logf("L%d: %d tombstones, %d sets, %d sstables\n", i, numTombstones, numSets, numTables)
30593059
}

metrics.go

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -46,14 +46,14 @@ type LevelMetrics struct {
4646
// sublevel count of 0, implying no read amplification. Only L0 will have
4747
// a sublevel count other than 0 or 1.
4848
Sublevels int32
49-
// The total number of files in the level.
50-
NumFiles int64
49+
// The total count of sstables in the level.
50+
TablesCount int64
51+
// The total size in bytes of the sstables in the level.
52+
TablesSize int64
5153
// The total number of virtual sstables in the level.
52-
NumVirtualFiles uint64
53-
// The total size in bytes of the files in the level.
54-
Size int64
54+
VirtualTablesCount uint64
5555
// The total size of the virtual sstables in the level.
56-
VirtualSize uint64
56+
VirtualTablesSize uint64
5757
// The level's compaction score. This is the compensatedScoreRatio in the
5858
// candidateLevelInfo.
5959
Score float64
@@ -118,10 +118,10 @@ type LevelMetrics struct {
118118

119119
// Add updates the counter metrics for the level.
120120
func (m *LevelMetrics) Add(u *LevelMetrics) {
121-
m.NumFiles += u.NumFiles
122-
m.NumVirtualFiles += u.NumVirtualFiles
123-
m.VirtualSize += u.VirtualSize
124-
m.Size += u.Size
121+
m.TablesCount += u.TablesCount
122+
m.VirtualTablesCount += u.VirtualTablesCount
123+
m.VirtualTablesSize += u.VirtualTablesSize
124+
m.TablesSize += u.TablesSize
125125
m.BytesIn += u.BytesIn
126126
m.BytesIngested += u.BytesIngested
127127
m.BytesMoved += u.BytesMoved
@@ -399,7 +399,7 @@ func (m *Metrics) DiskSpaceUsage() uint64 {
399399
func (m *Metrics) NumVirtual() uint64 {
400400
var n uint64
401401
for _, level := range m.Levels {
402-
n += level.NumVirtualFiles
402+
n += level.VirtualTablesCount
403403
}
404404
return n
405405
}
@@ -410,7 +410,7 @@ func (m *Metrics) NumVirtual() uint64 {
410410
func (m *Metrics) VirtualSize() uint64 {
411411
var size uint64
412412
for _, level := range m.Levels {
413-
size += level.VirtualSize
413+
size += level.VirtualTablesSize
414414
}
415415
return size
416416
}
@@ -449,8 +449,8 @@ func (m *Metrics) Total() LevelMetrics {
449449
func (m *Metrics) RemoteTablesTotal() (count uint64, size uint64) {
450450
var liveTables, liveTableBytes int64
451451
for level := 0; level < numLevels; level++ {
452-
liveTables += m.Levels[level].NumFiles
453-
liveTableBytes += m.Levels[level].Size
452+
liveTables += m.Levels[level].TablesCount
453+
liveTableBytes += m.Levels[level].TablesSize
454454
}
455455
totalCount := liveTables + m.Table.ObsoleteCount + m.Table.ZombieCount
456456
localCount := m.Table.Local.LiveCount + m.Table.Local.ObsoleteCount + m.Table.Local.ZombieCount
@@ -553,10 +553,10 @@ func (m *Metrics) SafeFormat(w redact.SafePrinter, _ rune) {
553553
}
554554

555555
w.Printf("| %5s %6s %6s %7s | %5s | %5s | %5s %6s | %5s %6s | %5s %6s | %5s | %3d %4s",
556-
humanize.Count.Int64(m.NumFiles),
557-
humanize.Bytes.Int64(m.Size),
556+
humanize.Count.Int64(m.TablesCount),
557+
humanize.Bytes.Int64(m.TablesSize),
558558
humanize.Bytes.Uint64(m.Additional.ValueBlocksSize),
559-
humanize.Count.Uint64(m.NumVirtualFiles),
559+
humanize.Count.Uint64(m.VirtualTablesCount),
560560
redact.Safe(scoreStr),
561561
humanize.Bytes.Uint64(m.BytesIn),
562562
humanize.Count.Uint64(m.TablesIngested),

metrics_test.go

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -82,10 +82,10 @@ func exampleMetrics() Metrics {
8282
l := &m.Levels[i]
8383
base := uint64((i + 1) * 100)
8484
l.Sublevels = int32(i + 1)
85-
l.NumFiles = int64(base) + 1
86-
l.NumVirtualFiles = uint64(base) + 1
87-
l.VirtualSize = base + 3
88-
l.Size = int64(base) + 2
85+
l.TablesCount = int64(base) + 1
86+
l.VirtualTablesCount = uint64(base) + 1
87+
l.VirtualTablesSize = base + 3
88+
l.TablesSize = int64(base) + 2
8989
l.Score = float64(base) + 3
9090
l.BytesIn = base + 4
9191
l.BytesIngested = base + 4
@@ -394,7 +394,7 @@ func TestMetrics(t *testing.T) {
394394
if l >= numLevels {
395395
panic(fmt.Sprintf("invalid level %d", l))
396396
}
397-
buf.WriteString(fmt.Sprintf("%d\n", m.Levels[l].NumVirtualFiles))
397+
buf.WriteString(fmt.Sprintf("%d\n", m.Levels[l].VirtualTablesCount))
398398
} else if line == "remote-count" {
399399
count, _ := m.RemoteTablesTotal()
400400
buf.WriteString(fmt.Sprintf("%d\n", count))

obsolete_files.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -320,7 +320,7 @@ func (d *DB) getDeletionPacerInfo() deletionPacerInfo {
320320
pacerInfo.freeBytes = d.calculateDiskAvailableBytes()
321321
d.mu.Lock()
322322
pacerInfo.obsoleteBytes = d.mu.versions.metrics.Table.ObsoleteSize
323-
pacerInfo.liveBytes = uint64(d.mu.versions.metrics.Total().Size)
323+
pacerInfo.liveBytes = uint64(d.mu.versions.metrics.Total().TablesSize)
324324
d.mu.Unlock()
325325
return pacerInfo
326326
}

version_set.go

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -344,9 +344,9 @@ func (vs *versionSet) load(
344344

345345
for i := range vs.metrics.Levels {
346346
l := &vs.metrics.Levels[i]
347-
l.NumFiles = int64(newVersion.Levels[i].Len())
347+
l.TablesCount = int64(newVersion.Levels[i].Len())
348348
files := newVersion.Levels[i].Slice()
349-
l.Size = int64(files.SizeSum())
349+
l.TablesSize = int64(files.SizeSum())
350350
}
351351
for _, l := range newVersion.Levels {
352352
for f := range l.All() {
@@ -549,7 +549,7 @@ func (vs *versionSet) UpdateVersionLocked(updateFn func() (versionUpdate, error)
549549

550550
var nextSnapshotFilecount int64
551551
for i := range vs.metrics.Levels {
552-
nextSnapshotFilecount += vs.metrics.Levels[i].NumFiles
552+
nextSnapshotFilecount += vs.metrics.Levels[i].TablesCount
553553
}
554554
if sizeExceeded && !requireRotation {
555555
requireRotation = vs.rotationHelper.ShouldRotate(nextSnapshotFilecount)
@@ -708,30 +708,30 @@ func (vs *versionSet) UpdateVersionLocked(updateFn func() (versionUpdate, error)
708708
vs.metrics.updateLevelMetrics(vu.Metrics)
709709
for i := range vs.metrics.Levels {
710710
l := &vs.metrics.Levels[i]
711-
l.NumFiles = int64(newVersion.Levels[i].Len())
712-
l.NumVirtualFiles = newVersion.Levels[i].NumVirtual
713-
l.VirtualSize = newVersion.Levels[i].VirtualSize
714-
l.Size = int64(newVersion.Levels[i].Size())
711+
l.TablesCount = int64(newVersion.Levels[i].Len())
712+
l.VirtualTablesCount = newVersion.Levels[i].NumVirtual
713+
l.VirtualTablesSize = newVersion.Levels[i].VirtualSize
714+
l.TablesSize = int64(newVersion.Levels[i].Size())
715715

716716
l.Sublevels = 0
717-
if l.NumFiles > 0 {
717+
if l.TablesCount > 0 {
718718
l.Sublevels = 1
719719
}
720720
if invariants.Enabled {
721721
levelFiles := newVersion.Levels[i].Slice()
722-
if size := int64(levelFiles.SizeSum()); l.Size != size {
723-
vs.opts.Logger.Fatalf("versionSet metrics L%d Size = %d, actual size = %d", i, l.Size, size)
722+
if size := int64(levelFiles.SizeSum()); l.TablesSize != size {
723+
vs.opts.Logger.Fatalf("versionSet metrics L%d Size = %d, actual size = %d", i, l.TablesSize, size)
724724
}
725-
if nVirtual := levelFiles.NumVirtual(); nVirtual != l.NumVirtualFiles {
725+
if nVirtual := levelFiles.NumVirtual(); nVirtual != l.VirtualTablesCount {
726726
vs.opts.Logger.Fatalf(
727727
"versionSet metrics L%d NumVirtual = %d, actual NumVirtual = %d",
728-
i, l.NumVirtualFiles, nVirtual,
728+
i, l.VirtualTablesCount, nVirtual,
729729
)
730730
}
731-
if vSize := levelFiles.VirtualSizeSum(); vSize != l.VirtualSize {
731+
if vSize := levelFiles.VirtualSizeSum(); vSize != l.VirtualTablesSize {
732732
vs.opts.Logger.Fatalf(
733733
"versionSet metrics L%d Virtual size = %d, actual size = %d",
734-
i, l.VirtualSize, vSize,
734+
i, l.VirtualTablesSize, vSize,
735735
)
736736
}
737737
}
@@ -1151,8 +1151,8 @@ func newFileMetrics(newFiles []manifest.NewTableEntry) levelMetricsDelta {
11511151
lm = &LevelMetrics{}
11521152
m[nf.Level] = lm
11531153
}
1154-
lm.NumFiles++
1155-
lm.Size += int64(nf.Meta.Size)
1154+
lm.TablesCount++
1155+
lm.TablesSize += int64(nf.Meta.Size)
11561156
}
11571157
return m
11581158
}

0 commit comments

Comments
 (0)