@@ -256,7 +256,7 @@ type Metrics struct {
256256 Zombie metrics.CountAndSizeByPlacement
257257 }
258258
259- // Compression statistics for sstable data (does not include blob files) .
259+ // Compression statistics for the live sstables .
260260 Compression CompressionMetrics
261261
262262 // Garbage bytes.
@@ -322,9 +322,17 @@ type Metrics struct {
322322 // value sizes.
323323 ReferencedBackingValueSize uint64
324324
325+ // Compression statistics for the live blob files.
325326 Compression CompressionMetrics
326327 }
327328
329+ // CompressionCounters are cumulative counters for the number of logical
330+ // (uncompressed) bytes that went through compression and decompression.
331+ CompressionCounters struct {
332+ LogicalBytesCompressed block.ByLevel [block.ByKind [uint64 ]]
333+ LogicalBytesDecompressed block.ByLevel [block.ByKind [uint64 ]]
334+ }
335+
328336 FileCache FileCacheMetrics
329337
330338 // Count of the number of open sstable iterators.
@@ -465,7 +473,7 @@ type KeysMetrics struct {
465473
466474// CompressionMetrics contains compression metrics for sstables or blob files.
467475type CompressionMetrics struct {
468- // NoCompressionBytes is the total number of bytes in files that do are not
476+ // NoCompressionBytes is the total number of bytes in files that are not
469477 // compressed. Data can be uncompressed when 1) compression is disabled; 2)
470478 // for certain special types of blocks; and 3) for blocks that are not
471479 // compressible.
@@ -790,6 +798,17 @@ var (
790798 table .Div (),
791799 table .String ("blob files" , 13 , table .AlignRight , func (i compressionInfo ) string { return i .blobFiles }),
792800 )
801+ compressionCountersTableHeader = ` Logical bytes compressed / decompressed`
802+
803+ compressionCountersTable = table .Define [compressionCountersInfo ](
804+ table .String ("level" , 5 , table .AlignRight , func (i compressionCountersInfo ) string { return i .level }),
805+ table .Div (),
806+ table .String ("data blocks" , 14 , table .AlignCenter , func (i compressionCountersInfo ) string { return i .DataBlocks }),
807+ table .Div (),
808+ table .String ("value blocks" , 14 , table .AlignCenter , func (i compressionCountersInfo ) string { return i .ValueBlocks }),
809+ table .Div (),
810+ table .String ("other blocks" , 14 , table .AlignCenter , func (i compressionCountersInfo ) string { return i .OtherBlocks }),
811+ )
793812 deletePacerTableHeader = `DELETE PACER`
794813 deletePacerTable = table .Define [deletePacerInfo ](
795814 table .String ("" , 14 , table .AlignRight , func (i deletePacerInfo ) string { return i .label }),
@@ -921,6 +940,34 @@ func makeCompressionInfo(algorithm string, table, blob CompressionStatsForSettin
921940 return i
922941}
923942
943+ type compressionCountersInfo struct {
944+ level string
945+ block.ByKind [string ]
946+ }
947+
948+ func makeCompressionCountersInfo (m * Metrics ) []compressionCountersInfo {
949+ var result []compressionCountersInfo
950+ isZero := func (c * block.ByKind [uint64 ]) bool {
951+ return c .DataBlocks == 0 && c .ValueBlocks == 0 && c .OtherBlocks == 0
952+ }
953+ addLevel := func (level string , compressed , decompressed * block.ByKind [uint64 ]) {
954+ if isZero (compressed ) && isZero (decompressed ) {
955+ return
956+ }
957+ result = append (result , compressionCountersInfo {
958+ level : level ,
959+ ByKind : block.ByKind [string ]{
960+ DataBlocks : humanizeBytes (compressed .DataBlocks ) + " / " + humanizeBytes (decompressed .DataBlocks ),
961+ ValueBlocks : humanizeBytes (compressed .ValueBlocks ) + " / " + humanizeBytes (decompressed .ValueBlocks ),
962+ OtherBlocks : humanizeBytes (compressed .OtherBlocks ) + " / " + humanizeBytes (decompressed .OtherBlocks )},
963+ })
964+ }
965+ addLevel ("L0-L4" , & m .CompressionCounters .LogicalBytesCompressed .OtherLevels , & m .CompressionCounters .LogicalBytesDecompressed .OtherLevels )
966+ addLevel ("L5" , & m .CompressionCounters .LogicalBytesCompressed .L5 , & m .CompressionCounters .LogicalBytesDecompressed .L5 )
967+ addLevel ("L6" , & m .CompressionCounters .LogicalBytesCompressed .L6 , & m .CompressionCounters .LogicalBytesDecompressed .L6 )
968+ return result
969+ }
970+
924971// String pretty-prints the metrics.
925972//
926973// See testdata/metrics for an example.
@@ -1112,6 +1159,10 @@ func (m *Metrics) String() string {
11121159 })
11131160 cur = compressionTable .Render (cur , table.RenderOptions {}, compressionContents ... )
11141161
1162+ cur = cur .NewlineReturn ()
1163+ cur = cur .WriteString (compressionCountersTableHeader ).NewlineReturn ()
1164+ cur = compressionCountersTable .Render (cur , table.RenderOptions {}, makeCompressionCountersInfo (m )... )
1165+
11151166 cur = cur .NewlineReturn ()
11161167 cur .WriteString (deletePacerTableHeader )
11171168 deletePacerContents := []deletePacerInfo {
@@ -1162,8 +1213,8 @@ func (m *Metrics) StringForTests() string {
11621213
11631214 // We recalculate the file cache size using the 64-bit sizes, and we ignore
11641215 // the genericcache metadata size which is harder to adjust.
1165- const sstableReaderSize64bit = 280
1166- const blobFileReaderSize64bit = 112
1216+ const sstableReaderSize64bit = 288
1217+ const blobFileReaderSize64bit = 120
11671218 mCopy .FileCache .Size = mCopy .FileCache .TableCount * sstableReaderSize64bit + mCopy .FileCache .BlobFileCount * blobFileReaderSize64bit
11681219 if math .MaxInt == math .MaxInt64 {
11691220 // Verify the 64-bit sizes, so they are kept updated.
0 commit comments