diff --git a/ingest_test.go b/ingest_test.go index b47c1cb179..1df0fa7cee 100644 --- a/ingest_test.go +++ b/ingest_test.go @@ -191,7 +191,7 @@ func TestIngestLoadRand(t *testing.T) { }, path: paths[i], } - expected[i].tableMetadata.Stats.CompressionType = block.SnappyCompression + expected[i].tableMetadata.Stats.CompressionType = block.SnappyCompressionFamily expected[i].StatsMarkValid() func() { diff --git a/internal/manifest/version.go b/internal/manifest/version.go index 6630844d13..b3ddec9a1f 100644 --- a/internal/manifest/version.go +++ b/internal/manifest/version.go @@ -76,7 +76,7 @@ type TableStats struct { // Total size of value blocks and value index block. ValueBlocksSize uint64 // CompressionType is the compression type of the table. - CompressionType block.Compression + CompressionType block.CompressionFamily // TombstoneDenseBlocksRatio is the ratio of data blocks in this table that // fulfills at least one of the following: // 1. The block contains at least options.Experimental.NumDeletionsThreshold diff --git a/options.go b/options.go index af2077c939..d5782816f2 100644 --- a/options.go +++ b/options.go @@ -43,12 +43,41 @@ const ( type Compression = block.Compression // Exported Compression constants. -const ( +var ( DefaultCompression = block.DefaultCompression NoCompression = block.NoCompression SnappyCompression = block.SnappyCompression - ZstdCompression = block.ZstdCompression - MinlzCompression = block.MinlzCompression + ZstdCompression = block.DefaultZstdCompression + MinlzCompression = block.DefaultMinlzCompression + + // Zstd compression levels. + ZstdCompressionLevel1 = block.Compression{Family: block.ZstdCompressionFamily, Level: block.ZstdLevel1} + ZstdCompressionLevel2 = block.Compression{Family: block.ZstdCompressionFamily, Level: block.ZstdLevel2} + ZstdCompressionLevel3 = block.Compression{Family: block.ZstdCompressionFamily, Level: block.ZstdLevel3} + ZstdCompressionLevel4 = block.Compression{Family: block.ZstdCompressionFamily, Level: block.ZstdLevel4} + ZstdCompressionLevel5 = block.Compression{Family: block.ZstdCompressionFamily, Level: block.ZstdLevel5} + ZstdCompressionLevel6 = block.Compression{Family: block.ZstdCompressionFamily, Level: block.ZstdLevel6} + ZstdCompressionLevel7 = block.Compression{Family: block.ZstdCompressionFamily, Level: block.ZstdLevel7} + ZstdCompressionLevel8 = block.Compression{Family: block.ZstdCompressionFamily, Level: block.ZstdLevel8} + ZstdCompressionLevel9 = block.Compression{Family: block.ZstdCompressionFamily, Level: block.ZstdLevel9} + ZstdCompressionLevel10 = block.Compression{Family: block.ZstdCompressionFamily, Level: block.ZstdLevel10} + ZstdCompressionLevel11 = block.Compression{Family: block.ZstdCompressionFamily, Level: block.ZstdLevel11} + ZstdCompressionLevel12 = block.Compression{Family: block.ZstdCompressionFamily, Level: block.ZstdLevel12} + ZstdCompressionLevel13 = block.Compression{Family: block.ZstdCompressionFamily, Level: block.ZstdLevel13} + ZstdCompressionLevel14 = block.Compression{Family: block.ZstdCompressionFamily, Level: block.ZstdLevel14} + ZstdCompressionLevel15 = block.Compression{Family: block.ZstdCompressionFamily, Level: block.ZstdLevel15} + ZstdCompressionLevel16 = block.Compression{Family: block.ZstdCompressionFamily, Level: block.ZstdLevel16} + ZstdCompressionLevel17 = block.Compression{Family: block.ZstdCompressionFamily, Level: block.ZstdLevel17} + ZstdCompressionLevel18 = block.Compression{Family: block.ZstdCompressionFamily, Level: block.ZstdLevel18} + ZstdCompressionLevel19 = block.Compression{Family: block.ZstdCompressionFamily, Level: block.ZstdLevel19} + ZstdCompressionLevel20 = block.Compression{Family: block.ZstdCompressionFamily, Level: block.ZstdLevel20} + ZstdCompressionLevel21 = block.Compression{Family: block.ZstdCompressionFamily, Level: block.ZstdLevel21} + ZstdCompressionLevel22 = block.Compression{Family: block.ZstdCompressionFamily, Level: block.ZstdLevel22} + + // Minlz compression levels. + MinlzCompressionLevelFastest = block.Compression{Family: block.MinlzCompressionFamily, Level: block.MinlzLevelFastest} + MinlzCompressionLevelBalanced = block.Compression{Family: block.MinlzCompressionFamily, Level: block.MinlzLevelBalanced} + MinlzCompressionLevelSmallest = block.Compression{Family: block.MinlzCompressionFamily, Level: block.MinlzLevelSmallest} ) // FilterType exports the base.FilterType type. @@ -1492,7 +1521,7 @@ func (o *Options) String() string { fmt.Fprintf(&buf, " block_restart_interval=%d\n", l.BlockRestartInterval) fmt.Fprintf(&buf, " block_size=%d\n", l.BlockSize) fmt.Fprintf(&buf, " block_size_threshold=%d\n", l.BlockSizeThreshold) - fmt.Fprintf(&buf, " compression=%s\n", resolveDefaultCompression(l.Compression())) + fmt.Fprintf(&buf, " compression=%s\n", resolveDefaultCompression(l.Compression()).Family.String()) fmt.Fprintf(&buf, " filter_policy=%s\n", filterPolicyName(l.FilterPolicy)) fmt.Fprintf(&buf, " filter_type=%s\n", l.FilterType) fmt.Fprintf(&buf, " index_block_size=%d\n", l.IndexBlockSize) @@ -2149,8 +2178,8 @@ func (o *Options) MakeBlobWriterOptions(level int) blob.FileWriterOptions { } func resolveDefaultCompression(c Compression) Compression { - if c <= DefaultCompression || c >= block.NCompression { - c = SnappyCompression + if c.Family <= block.DefaultCompressionFamily || c.Family >= block.NCompressionFamily { + c = block.SnappyCompression } return c } diff --git a/sstable/blob/blob.go b/sstable/blob/blob.go index 5d5d6eb12f..198d4baefc 100644 --- a/sstable/blob/blob.go +++ b/sstable/blob/blob.go @@ -57,7 +57,7 @@ type FileWriterOptions struct { } func (o *FileWriterOptions) ensureDefaults() { - if o.Compression <= block.DefaultCompression || o.Compression >= block.NCompression { + if o.Compression.Family <= block.DefaultCompressionFamily || o.Compression.Family >= block.NCompressionFamily { o.Compression = block.SnappyCompression } if o.ChecksumType == block.ChecksumTypeNone { diff --git a/sstable/blob/blob_test.go b/sstable/blob/blob_test.go index 92108c0e9b..7fa93e3aa1 100644 --- a/sstable/blob/blob_test.go +++ b/sstable/blob/blob_test.go @@ -65,7 +65,7 @@ func scanFileWriterOptions(t *testing.T, td *datadriven.TestData) FileWriterOpti td.MaybeScanArgs(t, "target-block-size", &targetBlockSize) td.MaybeScanArgs(t, "block-size-threshold", &blockSizeThreshold) if cmdArg, ok := td.Arg("compression"); ok { - compression = block.CompressionFromString(cmdArg.SingleVal(t)) + compression = block.FamilyToDefaultCompression[block.CompressionFromString(cmdArg.SingleVal(t))] } return FileWriterOptions{ Compression: compression, diff --git a/sstable/block/compression.go b/sstable/block/compression.go index bc81d34a68..867151e359 100644 --- a/sstable/block/compression.go +++ b/sstable/block/compression.go @@ -16,32 +16,97 @@ import ( "github.com/cockroachdb/pebble/objstorage" ) -// Compression is the per-block compression algorithm to use. -type Compression int +// CompressionFamily identifies a compression algorithm (e.g., Snappy, Zstd, Minlz). +type CompressionFamily int + +// CompressionLevel specifies the compression level for a given family. +// Some families ignore this value if they don't support levels. +type CompressionLevel int + +// Compression is the per-block compression algorithm and level to use. +// For families like Snappy, the level is ignored. For families like Zstd or Minlz, +// the level adjusts compression ratio and speed. +type Compression struct { + Family CompressionFamily + Level CompressionLevel +} + +// The available compression family types. +const ( + DefaultCompressionFamily CompressionFamily = iota + NoCompressionFamily + SnappyCompressionFamily + ZstdCompressionFamily + MinlzCompressionFamily + NCompressionFamily +) -// The available compression types. +// The available compression levels. const ( - DefaultCompression Compression = iota - NoCompression - SnappyCompression - ZstdCompression - MinlzCompression - NCompression + LevelDefault CompressionLevel = 0 + + // Zstd compression levels. + ZstdLevelMin CompressionLevel = 1 + ZstdLevel1 CompressionLevel = 1 + ZstdLevel2 CompressionLevel = 2 + ZstdLevel3 CompressionLevel = 3 // Default for Zstd. + ZstdLevelDefault CompressionLevel = ZstdLevel3 + ZstdLevel4 CompressionLevel = 4 + ZstdLevel5 CompressionLevel = 5 + ZstdLevel6 CompressionLevel = 6 + ZstdLevel7 CompressionLevel = 7 + ZstdLevel8 CompressionLevel = 8 + ZstdLevel9 CompressionLevel = 9 + ZstdLevel10 CompressionLevel = 10 + ZstdLevel11 CompressionLevel = 11 + ZstdLevel12 CompressionLevel = 12 + ZstdLevel13 CompressionLevel = 13 + ZstdLevel14 CompressionLevel = 14 + ZstdLevel15 CompressionLevel = 15 + ZstdLevel16 CompressionLevel = 16 + ZstdLevel17 CompressionLevel = 17 + ZstdLevel18 CompressionLevel = 18 + ZstdLevel19 CompressionLevel = 19 + ZstdLevel20 CompressionLevel = 20 + ZstdLevel21 CompressionLevel = 21 + ZstdLevel22 CompressionLevel = 22 + ZstdLevelMax CompressionLevel = 22 + + // Minlz compression levels. + MinlzLevelMin CompressionLevel = 1 + MinlzLevelFastest CompressionLevel = 1 // Default for MinLZ. + MinlzLevelDefault CompressionLevel = MinlzLevelFastest + MinlzLevelBalanced CompressionLevel = 2 + MinlzLevelSmallest CompressionLevel = 3 + MinlzLevelMax CompressionLevel = 3 ) +var DefaultCompression = Compression{Family: DefaultCompressionFamily, Level: LevelDefault} +var NoCompression = Compression{Family: NoCompressionFamily, Level: LevelDefault} +var SnappyCompression = Compression{Family: SnappyCompressionFamily, Level: LevelDefault} +var DefaultZstdCompression = Compression{Family: ZstdCompressionFamily, Level: LevelDefault} +var DefaultMinlzCompression = Compression{Family: MinlzCompressionFamily, Level: LevelDefault} + +var FamilyToDefaultCompression = map[CompressionFamily]Compression{ + DefaultCompressionFamily: DefaultCompression, + NoCompressionFamily: NoCompression, + SnappyCompressionFamily: SnappyCompression, + ZstdCompressionFamily: DefaultZstdCompression, +} + // String implements fmt.Stringer, returning a human-readable name for the // compression algorithm. -func (c Compression) String() string { +func (c CompressionFamily) String() string { switch c { - case DefaultCompression: + case DefaultCompressionFamily: return "Default" - case NoCompression: + case NoCompressionFamily: return "NoCompression" - case SnappyCompression: + case SnappyCompressionFamily: return "Snappy" - case ZstdCompression: + case ZstdCompressionFamily: return "ZSTD" - case MinlzCompression: + case MinlzCompressionFamily: return "Minlz" default: return "Unknown" @@ -50,20 +115,20 @@ func (c Compression) String() string { // CompressionFromString returns an sstable.Compression from its // string representation. Inverse of c.String() above. -func CompressionFromString(s string) Compression { +func CompressionFromString(s string) CompressionFamily { switch s { case "Default": - return DefaultCompression + return DefaultCompressionFamily case "NoCompression": - return NoCompression + return NoCompressionFamily case "Snappy": - return SnappyCompression + return SnappyCompressionFamily case "ZSTD": - return ZstdCompression + return ZstdCompressionFamily case "Minlz": - return MinlzCompression + return MinlzCompressionFamily default: - return DefaultCompression + return DefaultCompressionFamily } } @@ -222,10 +287,10 @@ func CompressAndChecksum( // Compress the buffer, discarding the result if the improvement isn't at // least 12.5%. algo := NoCompressionIndicator - if compression != NoCompression { - compressor := GetCompressor(compression) + if compression.Family != NoCompressionFamily { + compressor := GetCompressor(compression.Family) defer compressor.Close() - algo, buf = compressor.Compress(buf, blockData) + algo, buf = compressor.Compress(buf, blockData, compression.Level) if len(buf) >= len(blockData)-len(blockData)/8 { algo = NoCompressionIndicator } diff --git a/sstable/block/compression_test.go b/sstable/block/compression_test.go index 1607a6563f..317d2a2d4a 100644 --- a/sstable/block/compression_test.go +++ b/sstable/block/compression_test.go @@ -26,8 +26,8 @@ func TestCompressionRoundtrip(t *testing.T) { t.Logf("seed %d", seed) rng := rand.New(rand.NewPCG(0, seed)) - for compression := DefaultCompression + 1; compression < NCompression; compression++ { - if compression == NoCompression { + for compression := DefaultCompressionFamily + 1; compression < NCompressionFamily; compression++ { + if compression == NoCompressionFamily { continue } t.Run(compression.String(), func(t *testing.T) { @@ -40,7 +40,7 @@ func TestCompressionRoundtrip(t *testing.T) { compressedBuf := make([]byte, 1+rng.IntN(1<<10 /* 1 KiB */)) compressor := GetCompressor(compression) defer compressor.Close() - btyp, compressed := compressor.Compress(compressedBuf, payload) + btyp, compressed := compressor.Compress(compressedBuf, payload, LevelDefault) v, err := decompress(btyp, compressed) require.NoError(t, err) got := payload @@ -151,12 +151,12 @@ func TestMinlzEncodingLimit(t *testing.T) { require.Fail(t, "Expected minlz.ErrTooLarge Error") } - c := GetCompressor(MinlzCompression) + c := GetCompressor(MinlzCompressionFamily) defer c.Close() - algo, _ := c.Compress([]byte{}, bytes.Repeat([]byte{0}, minlz.MaxBlockSize-1)) + algo, _ := c.Compress([]byte{}, bytes.Repeat([]byte{0}, minlz.MaxBlockSize-1), MinlzLevelDefault) require.Equal(t, algo, MinlzCompressionIndicator) - algo, _ = c.Compress([]byte{}, bytes.Repeat([]byte{0}, minlz.MaxBlockSize)) + algo, _ = c.Compress([]byte{}, bytes.Repeat([]byte{0}, minlz.MaxBlockSize), MinlzLevelDefault) require.Equal(t, algo, MinlzCompressionIndicator) - algo, _ = c.Compress([]byte{}, bytes.Repeat([]byte{0}, minlz.MaxBlockSize+1)) + algo, _ = c.Compress([]byte{}, bytes.Repeat([]byte{0}, minlz.MaxBlockSize+1), MinlzLevelDefault) require.Equal(t, algo, SnappyCompressionIndicator) } diff --git a/sstable/block/compressor.go b/sstable/block/compressor.go index e12cf2aa73..5d8e1aefcc 100644 --- a/sstable/block/compressor.go +++ b/sstable/block/compressor.go @@ -10,7 +10,7 @@ import ( ) type Compressor interface { - Compress(dst, src []byte) (CompressionIndicator, []byte) + Compress(dst, src []byte, level CompressionLevel) (CompressionIndicator, []byte) // Close must be called when the Compressor is no longer needed. // After Close is called, the Compressor must not be used again. @@ -25,42 +25,53 @@ var _ Compressor = noopCompressor{} var _ Compressor = snappyCompressor{} var _ Compressor = minlzCompressor{} -func (noopCompressor) Compress(dst, src []byte) (CompressionIndicator, []byte) { +func (noopCompressor) Compress(dst, src []byte, _ CompressionLevel) (CompressionIndicator, []byte) { panic("NoCompressionCompressor.Compress() should not be called.") } func (noopCompressor) Close() {} -func (snappyCompressor) Compress(dst, src []byte) (CompressionIndicator, []byte) { +func (snappyCompressor) Compress( + dst, src []byte, _ CompressionLevel, +) (CompressionIndicator, []byte) { dst = dst[:cap(dst):cap(dst)] return SnappyCompressionIndicator, snappy.Encode(dst, src) } func (snappyCompressor) Close() {} -func (minlzCompressor) Compress(dst, src []byte) (CompressionIndicator, []byte) { - // Minlz cannot encode blocks greater than 8MB. Fall back to Snappy in those cases. +func (minlzCompressor) Compress( + dst, src []byte, level CompressionLevel, +) (CompressionIndicator, []byte) { + // Minlz cannot encode blocks greater than 8MiB. Fall back to Snappy in those cases. if len(src) > minlz.MaxBlockSize { - return (snappyCompressor{}).Compress(dst, src) + return (snappyCompressor{}).Compress(dst, src, LevelDefault) } - - compressed, err := minlz.Encode(dst, src, minlz.LevelFastest) + var encoderLevel int + if level == LevelDefault { + encoderLevel = int(MinlzLevelDefault) + } else if level < MinlzLevelMin || level > MinlzLevelMax { + panic("minlz compression: illegal level") + } else { + encoderLevel = int(level) + } + compressed, err := minlz.Encode(dst, src, encoderLevel) if err != nil { - panic(errors.Wrap(err, "minlz compression")) + panic(errors.Wrap(err, "Error while compressing using Minlz.")) } return MinlzCompressionIndicator, compressed } func (minlzCompressor) Close() {} -func GetCompressor(c Compression) Compressor { +func GetCompressor(c CompressionFamily) Compressor { switch c { - case NoCompression: + case NoCompressionFamily: return noopCompressor{} - case SnappyCompression: + case SnappyCompressionFamily: return snappyCompressor{} - case ZstdCompression: + case ZstdCompressionFamily: return getZstdCompressor() - case MinlzCompression: + case MinlzCompressionFamily: return minlzCompressor{} default: panic("Invalid compression type.") diff --git a/sstable/block/compressor_cgo.go b/sstable/block/compressor_cgo.go index cfe004d7ab..f4f679553c 100644 --- a/sstable/block/compressor_cgo.go +++ b/sstable/block/compressor_cgo.go @@ -38,7 +38,9 @@ const UseStandardZstdLib = true // is sufficient. The subslice `compressedBuf[:varIntLen]` should already encode // the length of `b` before calling Compress. It returns the encoded byte // slice, including the `compressedBuf[:varIntLen]` prefix. -func (z *zstdCompressor) Compress(compressedBuf []byte, b []byte) (CompressionIndicator, []byte) { +func (z *zstdCompressor) Compress( + compressedBuf []byte, b []byte, level CompressionLevel, +) (CompressionIndicator, []byte) { if len(compressedBuf) < binary.MaxVarintLen64 { compressedBuf = append(compressedBuf, make([]byte, binary.MaxVarintLen64-len(compressedBuf))...) } @@ -52,9 +54,17 @@ func (z *zstdCompressor) Compress(compressedBuf []byte, b []byte) (CompressionIn } varIntLen := binary.PutUvarint(compressedBuf, uint64(len(b))) - result, err := z.ctx.CompressLevel(compressedBuf[varIntLen:varIntLen+bound], b, 3) + var encoderLevel int + if level == LevelDefault { + encoderLevel = int(ZstdLevelDefault) + } else if level < ZstdLevelMin || level > ZstdLevelMax { + panic("zstd compression: illegal level") + } else { + encoderLevel = int(level) + } + result, err := z.ctx.CompressLevel(compressedBuf[varIntLen:varIntLen+bound], b, encoderLevel) if err != nil { - panic("Error while compressing using Zstd.") + panic(errors.Wrap(err, "Error while compressing using Zstd.")) } if &result[0] != &compressedBuf[varIntLen] { panic("Allocated a new buffer despite checking CompressBound.") @@ -92,7 +102,7 @@ func (z *zstdDecompressor) DecompressInto(dst, src []byte) error { } _, err := z.ctx.DecompressInto(dst, src) if err != nil { - return err + return errors.Wrap(err, "Error while decompressing Zstd.") } return nil } diff --git a/sstable/block/compressor_nocgo.go b/sstable/block/compressor_nocgo.go index e519b3653e..8e4ff0aa3e 100644 --- a/sstable/block/compressor_nocgo.go +++ b/sstable/block/compressor_nocgo.go @@ -34,13 +34,35 @@ const UseStandardZstdLib = false // is sufficient. The subslice `compressedBuf[:varIntLen]` should already encode // the length of `b` before calling Compress. It returns the encoded byte // slice, including the `compressedBuf[:varIntLen]` prefix. -func (zstdCompressor) Compress(compressedBuf, b []byte) (CompressionIndicator, []byte) { +func (zstdCompressor) Compress( + compressedBuf, b []byte, level CompressionLevel, +) (CompressionIndicator, []byte) { if len(compressedBuf) < binary.MaxVarintLen64 { compressedBuf = append(compressedBuf, make([]byte, binary.MaxVarintLen64-len(compressedBuf))...) } varIntLen := binary.PutUvarint(compressedBuf, uint64(len(b))) - encoder, _ := zstd.NewWriter(nil) - defer encoder.Close() + var encoderLevel zstd.EncoderLevel + if level == LevelDefault { + encoderLevel = zstd.EncoderLevel(ZstdLevelDefault) + } else if level < ZstdLevelMin || level > ZstdLevelMax { + panic("zstd compression: illegal level") + } else { + encoderLevel = zstd.EncoderLevel(level) + } + // The no cgo library for zstd only has three levels: SpeedFastest, SpeedBetterCompression, SpeedBestCompression + // The WithEncoderLevel will take an integer and convert it to one of these three levels based on + // predetermined ranges. See https://github.com/klauspost/compress/blob/v1.18.0/zstd/encoder_options.go#L146 + // for details. + opts := zstd.WithEncoderLevel(encoderLevel) + encoder, err := zstd.NewWriter(nil, opts) + if err != nil { + panic(errors.Wrap(err, "Error while compressing using Zstd.")) + } + defer func() { + if err := encoder.Close(); err != nil { + panic(errors.Wrap(err, "error while closing Zstd encoder.")) + } + }() return ZstdCompressionIndicator, encoder.EncodeAll(b, compressedBuf[:varIntLen]) } @@ -65,7 +87,7 @@ func (zstdDecompressor) DecompressInto(dst, src []byte) error { defer decoder.Close() result, err := decoder.DecodeAll(src, dst[:0]) if err != nil { - return err + return errors.Wrap(err, "Error while decompressing Zstd.") } if len(result) != len(dst) || (len(result) > 0 && &result[0] != &dst[0]) { return base.CorruptionErrorf("pebble/table: decompressed into unexpected buffer: %p != %p", diff --git a/sstable/colblk_writer.go b/sstable/colblk_writer.go index e88cfe95f5..b2188147fc 100644 --- a/sstable/colblk_writer.go +++ b/sstable/colblk_writer.go @@ -176,7 +176,7 @@ func newColumnarWriter( w.props.PropertyCollectorNames = buf.String() w.props.ComparerName = o.Comparer.Name - w.props.CompressionName = o.Compression.String() + w.props.CompressionName = o.Compression.Family.String() w.props.KeySchemaName = o.KeySchema.Name w.props.MergerName = o.MergerName diff --git a/sstable/options.go b/sstable/options.go index 119d3c4290..60b091ffd5 100644 --- a/sstable/options.go +++ b/sstable/options.go @@ -330,8 +330,14 @@ func (o WriterOptions) ensureDefaults() WriterOptions { if o.Comparer == nil { o.Comparer = base.DefaultComparer } - if o.Compression <= block.DefaultCompression || o.Compression >= block.NCompression { - o.Compression = block.SnappyCompression + if o.Compression.Family <= block.DefaultCompressionFamily || o.Compression.Family >= block.NCompressionFamily { + o.Compression.Family = block.SnappyCompressionFamily + } + if o.Compression.Family == block.ZstdCompressionFamily && (o.Compression.Level < block.ZstdLevelMin || o.Compression.Level > block.ZstdLevelMax) { + o.Compression.Level = block.ZstdLevelDefault + } + if o.Compression.Family == block.MinlzCompressionFamily && (o.Compression.Level < block.MinlzLevelMin || o.Compression.Level > block.MinlzLevelMax) { + o.Compression.Level = block.MinlzLevelDefault } if o.IndexBlockSize <= 0 { o.IndexBlockSize = o.BlockSize diff --git a/sstable/reader_test.go b/sstable/reader_test.go index 5b6f6fdd48..75a2005fa6 100644 --- a/sstable/reader_test.go +++ b/sstable/reader_test.go @@ -1896,7 +1896,7 @@ var basicBenchmarks = []struct { BlockSize: 32 << 10, BlockRestartInterval: 16, FilterPolicy: nil, - Compression: block.ZstdCompression, + Compression: block.DefaultZstdCompression, TableFormat: TableFormatPebblev2, }, }, diff --git a/sstable/rowblk_writer.go b/sstable/rowblk_writer.go index 2d218490d8..c081e9b128 100644 --- a/sstable/rowblk_writer.go +++ b/sstable/rowblk_writer.go @@ -1735,7 +1735,7 @@ func newRowWriter(writable objstorage.Writable, o WriterOptions) *RawRowWriter { } w.props.ComparerName = o.Comparer.Name - w.props.CompressionName = o.Compression.String() + w.props.CompressionName = o.Compression.Family.String() w.props.MergerName = o.MergerName w.props.PropertyCollectorNames = "[]" diff --git a/sstable/suffix_rewriter_test.go b/sstable/suffix_rewriter_test.go index 0555fefebb..f70b5029a2 100644 --- a/sstable/suffix_rewriter_test.go +++ b/sstable/suffix_rewriter_test.go @@ -250,7 +250,7 @@ func BenchmarkRewriteSST(b *testing.B) { b.ResetTimer() for comp := range compressions { - b.Run(compressions[comp].String(), func(b *testing.B) { + b.Run(compressions[comp].Family.String(), func(b *testing.B) { for sz := range sizes { r := files[comp][sz] sst := sstBytes[comp][sz] diff --git a/sstable/test_fixtures.go b/sstable/test_fixtures.go index db8c5326fd..ebd9f27775 100644 --- a/sstable/test_fixtures.go +++ b/sstable/test_fixtures.go @@ -237,7 +237,7 @@ var TestFixtures = []TestFixtureInfo{ }, { Filename: "h.zstd-compression.sst", - Compression: block.ZstdCompression, + Compression: block.DefaultZstdCompression, FullKeyFilter: false, PrefixFilter: false, IndexBlockSize: fixtureDefaultIndexBlockSize, diff --git a/sstable/writer_fixture_test.go b/sstable/writer_fixture_test.go index 64307bff9d..7408467cfe 100644 --- a/sstable/writer_fixture_test.go +++ b/sstable/writer_fixture_test.go @@ -82,7 +82,7 @@ func TestFixtureOutput(t *testing.T) { // . // Since the fixture test requires bit-to-bit reproducibility, we cannot // run the zstd test when the implementation is not based on facebook/zstd. - if !block.UseStandardZstdLib && fixture.Compression == block.ZstdCompression { + if !block.UseStandardZstdLib && fixture.Compression.Family == block.ZstdCompressionFamily { continue } t.Run(fixture.Filename, func(t *testing.T) { diff --git a/sstable/writer_test.go b/sstable/writer_test.go index 6747151900..3181880b78 100644 --- a/sstable/writer_test.go +++ b/sstable/writer_test.go @@ -1242,8 +1242,8 @@ func runWriterBench(b *testing.B, keys [][]byte, comparer *base.Comparer, format b.Run(fmt.Sprintf("block=%s", humanize.Bytes.Int64(int64(bs))), func(b *testing.B) { for _, filter := range []bool{true, false} { b.Run(fmt.Sprintf("filter=%t", filter), func(b *testing.B) { - for _, comp := range []block.Compression{block.NoCompression, block.SnappyCompression, block.ZstdCompression, block.MinlzCompression} { - b.Run(fmt.Sprintf("compression=%s", comp), func(b *testing.B) { + for _, comp := range []block.Compression{block.NoCompression, block.SnappyCompression, block.DefaultZstdCompression, block.DefaultMinlzCompression} { + b.Run(fmt.Sprintf("compression=%s", comp.Family.String()), func(b *testing.B) { opts := WriterOptions{ BlockRestartInterval: 16, BlockSize: bs, diff --git a/table_stats.go b/table_stats.go index d63b192d59..237d6cf59e 100644 --- a/table_stats.go +++ b/table_stats.go @@ -1049,13 +1049,13 @@ func (a compressionTypeAggregator) Accumulate( f *tableMetadata, dst *compressionTypes, ) (v *compressionTypes, cacheOK bool) { switch f.Stats.CompressionType { - case SnappyCompression: + case SnappyCompression.Family: dst.snappy++ - case ZstdCompression: + case ZstdCompression.Family: dst.zstd++ - case MinlzCompression: + case MinlzCompression.Family: dst.minlz++ - case NoCompression: + case NoCompression.Family: dst.none++ default: dst.unknown++