diff --git a/batch.go b/batch.go index fb6fec7bfa3..f439133ae5f 100644 --- a/batch.go +++ b/batch.go @@ -8,7 +8,6 @@ import ( "bytes" "context" "encoding/binary" - "fmt" "io" "math" "sort" @@ -2080,7 +2079,7 @@ func newFlushableBatch(batch *Batch, comparer *Comparer) (*flushableBatch, error func (b *flushableBatch) setSeqNum(seqNum base.SeqNum) { if b.seqNum != 0 { - panic(fmt.Sprintf("pebble: flushableBatch.seqNum already set: %d", b.seqNum)) + panic(errors.AssertionFailedf("pebble: flushableBatch.seqNum already set: %d", b.seqNum)) } b.seqNum = seqNum for i := range b.tombstones { diff --git a/checkpoint_test.go b/checkpoint_test.go index 79003c2a466..b7102b496ac 100644 --- a/checkpoint_test.go +++ b/checkpoint_test.go @@ -272,7 +272,8 @@ func TestCopyCheckpointOptions(t *testing.T) { require.NoError(t, f.Close()) return string(newFile) default: - panic(fmt.Sprintf("unrecognized command %q", td.Cmd)) + t.Fatalf("unrecognized command %q", td.Cmd) + return "" } }) } diff --git a/cmd/pebble/test.go b/cmd/pebble/test.go index 9b860735a30..68ffe29ccff 100644 --- a/cmd/pebble/test.go +++ b/cmd/pebble/test.go @@ -18,6 +18,7 @@ import ( "time" "github.com/HdrHistogram/hdrhistogram-go" + "github.com/cockroachdb/errors" "github.com/cockroachdb/pebble" ) @@ -141,7 +142,7 @@ func (w *namedHistogram) Record(elapsed time.Duration) { // Note that a histogram only drops recorded values that are out of range, // but we clamp the latency value to the configured range to prevent such // drops. This code path should never happen. - panic(fmt.Sprintf(`%s: recording value: %s`, w.name, err)) + panic(errors.AssertionFailedf(`%s: recording value: %s`, w.name, err)) } } diff --git a/cockroachkvs/cockroachkvs.go b/cockroachkvs/cockroachkvs.go index ae85adc84cf..f8dfabf1e89 100644 --- a/cockroachkvs/cockroachkvs.go +++ b/cockroachkvs/cockroachkvs.go @@ -476,7 +476,7 @@ func normalizeVersionForCompare(a []byte) []byte { func normalizeEngineSuffixForCompare(a []byte) []byte { // Check sentinel byte. if invariants.Enabled && len(a) != int(a[len(a)-1]) { - panic(errors.AssertionFailedf("malformed suffix: %x (length byte is %d; but suffix is %d bytes)", a, a[len(a)-1], len(a))) + panic(errors.AssertionFailedf("malformed suffix: %x (length byte is %d; but suffix is %d bytes)", errors.Safe(a), errors.Safe(a[len(a)-1]), errors.Safe(len(a)))) } // Strip off sentinel byte. a = a[:len(a)-1] @@ -659,7 +659,7 @@ func (kw *cockroachKeyWriter) WriteKey( versionLen := int(key[len(key)-1]) if (len(key)-versionLen) != int(keyPrefixLen) || key[keyPrefixLen-1] != 0x00 { panic(errors.AssertionFailedf("invalid %d-byte key with %d-byte prefix (%q)", - len(key), keyPrefixLen, key)) + errors.Safe(len(key)), errors.Safe(keyPrefixLen), key)) } // TODO(jackson): Avoid copying the previous suffix. kw.prevSuffix = append(kw.prevSuffix[:0], key[keyPrefixLen:]...) @@ -754,7 +754,7 @@ func (kw *cockroachKeyWriter) Finish( case cockroachColUntypedVersion: return kw.untypedVersions.Finish(0, rows, offset, buf) default: - panic(fmt.Sprintf("unknown default key column: %d", col)) + panic(errors.AssertionFailedf("unknown default key column: %d", errors.Safe(col))) } } @@ -793,7 +793,7 @@ func (ks *cockroachKeySeeker) init( header := bd.Header() header = header[:len(header)-colblk.DataBlockCustomHeaderSize] if len(header) != 1 { - panic(errors.AssertionFailedf("invalid key schema-specific header %x", header)) + panic(errors.AssertionFailedf("invalid key schema-specific header %x", errors.Safe(header))) } ks.suffixTypes = suffixTypes(header[0]) } @@ -1003,7 +1003,7 @@ func (ks *cockroachKeySeeker) MaterializeUserKey( ki *colblk.PrefixBytesIter, prevRow, row int, ) []byte { if invariants.Enabled && (row < 0 || row >= ks.roachKeys.Rows()) { - panic(errors.AssertionFailedf("invalid row number %d", row)) + panic(errors.AssertionFailedf("invalid row number %d", errors.Safe(row))) } if prevRow+1 == row && prevRow >= 0 { ks.roachKeys.SetNext(ki) @@ -1148,7 +1148,7 @@ func (fk formatKeySuffix) Format(f fmt.State, c rune) { fmt.Fprintf(f, "%d.%09d,%d", wallTime/1e9, wallTime%1e9, logical) } default: - panic(errors.AssertionFailedf("invalid suffix length: %d", len(fk))) + panic(errors.AssertionFailedf("invalid suffix length: %d", errors.Safe(len(fk)))) } } diff --git a/cockroachkvs/cockroachkvs_test.go b/cockroachkvs/cockroachkvs_test.go index 72b012c3158..5800222cf63 100644 --- a/cockroachkvs/cockroachkvs_test.go +++ b/cockroachkvs/cockroachkvs_test.go @@ -20,6 +20,7 @@ import ( "github.com/cockroachdb/crlib/testutils/leaktest" "github.com/cockroachdb/crlib/testutils/require" "github.com/cockroachdb/datadriven" + "github.com/cockroachdb/errors" "github.com/cockroachdb/pebble" "github.com/cockroachdb/pebble/internal/base" "github.com/cockroachdb/pebble/internal/testutils" @@ -148,7 +149,8 @@ func TestComparerFuncs(t *testing.T) { } return buf.String() default: - panic(fmt.Sprintf("unknown command: %s", td.Cmd)) + t.Fatalf("unknown command: %s", td.Cmd) + return "" } }) } @@ -226,7 +228,8 @@ func TestKeySchema_KeyWriter(t *testing.T) { tbl.Render() return buf.String() default: - panic(fmt.Sprintf("unrecognized command %q", td.Cmd)) + t.Fatalf("unrecognized command %q", td.Cmd) + return "" } }) } @@ -322,7 +325,8 @@ func TestKeySchema_KeySeeker(t *testing.T) { } return buf.String() default: - panic(fmt.Sprintf("unrecognized command %q", td.Cmd)) + t.Fatalf("unrecognized command %q", td.Cmd) + return "" } }) } @@ -638,7 +642,7 @@ func parseUserKey(userKeyStr string) []byte { var err error roachKey, err = strconv.Unquote(roachKey) if err != nil { - panic(fmt.Sprintf("invalid user key string %s: %v", userKeyStr, err)) + panic(errors.AssertionFailedf("invalid user key string %s: %v", userKeyStr, err)) } } k := append(append([]byte(roachKey), 0), parseVersion(versionStr)...) @@ -664,7 +668,7 @@ func parseVersion(versionStr string) []byte { } ret := AppendTimestamp([]byte{0x00}, wallTime, uint32(logicalTime)) if len(ret) != 14 && len(ret) != 10 { - panic(fmt.Sprintf("expected 10 or 14-length ret got %d", len(ret))) + panic(errors.AssertionFailedf("expected 10 or 14-length ret got %d", len(ret))) } validateEngineKey.MustValidate(ret) // TODO(jackson): Refactor to allow us to generate a suffix without a @@ -675,7 +679,7 @@ func parseVersion(versionStr string) []byte { // Parse as a hex-encoded version. var version []byte if _, err := fmt.Sscanf(versionStr, "%X", &version); err != nil { - panic(fmt.Sprintf("invalid version string %q", versionStr)) + panic(errors.AssertionFailedf("invalid version string %q", versionStr)) } return append(version, byte(len(version)+1)) } diff --git a/compaction.go b/compaction.go index ef64507b52a..34beba60dc7 100644 --- a/compaction.go +++ b/compaction.go @@ -188,6 +188,10 @@ func (k compactionKind) String() string { return "?" } +func (k compactionKind) SafeFormat(w redact.SafePrinter, _ rune) { + w.Print(redact.SafeString(k.String())) +} + // compactingOrFlushing returns "flushing" if the compaction kind is a flush, // otherwise it returns "compacting". func (k compactionKind) compactingOrFlushing() string { @@ -1690,9 +1694,9 @@ func (d *DB) flush1() (bytesFlushed uint64, err error) { for i := 0; i < n; i++ { if logNum := d.mu.mem.queue[i].logNum; logNum >= minUnflushedLogNum { panic(errors.AssertionFailedf("logNum invariant violated: flushing %d items; %d:type=%T,logNum=%d; %d:type=%T,logNum=%d", - n, - i, d.mu.mem.queue[i].flushable, logNum, - n, d.mu.mem.queue[n].flushable, minUnflushedLogNum)) + errors.Safe(n), + errors.Safe(i), errors.Safe(d.mu.mem.queue[i].flushable), logNum, + errors.Safe(n), errors.Safe(d.mu.mem.queue[n].flushable), minUnflushedLogNum)) } } } @@ -1837,7 +1841,7 @@ func (d *DB) flush1() (bytesFlushed uint64, err error) { d.clearCompactingState(c, err != nil) if c.UsesBurstConcurrency() { if v := d.mu.compact.burstConcurrency.Add(-1); v < 0 { - panic(errors.AssertionFailedf("burst concurrency underflow: %d", v)) + panic(errors.AssertionFailedf("burst concurrency underflow: %d", errors.Safe(v))) } } delete(d.mu.compact.inProgress, c) @@ -2101,7 +2105,7 @@ func (d *DB) runPickedCompaction(pc pickedCompaction, grantHandle CompactionGran } } if doneChannel == nil { - panic(errors.AssertionFailedf("did not find manual compaction with id %d", pc.ManualID())) + panic(errors.AssertionFailedf("did not find manual compaction with id %d", errors.Safe(pc.ManualID()))) } } @@ -2267,7 +2271,7 @@ func (d *DB) compact(c compaction, errChannel chan error) { } if c.UsesBurstConcurrency() { if v := d.mu.compact.burstConcurrency.Add(-1); v < 0 { - panic(errors.AssertionFailedf("burst concurrency underflow: %d", v)) + panic(errors.AssertionFailedf("burst concurrency underflow: %d", errors.Safe(v))) } } delete(d.mu.compact.inProgress, c) diff --git a/compaction_picker.go b/compaction_picker.go index d7eaa6cd98e..9915409cd0c 100644 --- a/compaction_picker.go +++ b/compaction_picker.go @@ -281,8 +281,8 @@ func newPickedTableCompaction( panic("base level cannot be 0") } if startLevel > 0 && startLevel < baseLevel { - panic(fmt.Sprintf("invalid compaction: start level %d should not be empty (base level %d)", - startLevel, baseLevel)) + panic(errors.AssertionFailedf("invalid compaction: start level %d should not be empty (base level %d)", + errors.Safe(startLevel), errors.Safe(baseLevel))) } pc := &pickedTableCompaction{ @@ -514,7 +514,7 @@ func (pc *pickedTableCompaction) maybeGrow( func (pc *pickedTableCompaction) maybeGrowL0ForBase(cmp base.Compare, maxExpandedBytes uint64) { if invariants.Enabled { if pc.startLevel.level != 0 { - panic(fmt.Sprintf("pc.startLevel.level is %d, expected 0", pc.startLevel.level)) + panic(errors.AssertionFailedf("pc.startLevel.level is %d, expected 0", errors.Safe(pc.startLevel.level))) } } @@ -1664,7 +1664,7 @@ func (p *compactionPickerByScore) pickedCompactionFromCandidateFile( } } if !found { - panic(fmt.Sprintf("file %s not found in level %d as expected", candidate.TableNum, startLevel)) + panic(errors.AssertionFailedf("file %s not found in level %d as expected", candidate.TableNum, errors.Safe(startLevel))) } } diff --git a/compaction_picker_test.go b/compaction_picker_test.go index 4f2a4847baf..e11785478e4 100644 --- a/compaction_picker_test.go +++ b/compaction_picker_test.go @@ -1638,7 +1638,7 @@ func TestCompactionPickerScores(t *testing.T) { } }() default: - panic(fmt.Sprintf("unrecognized `wait-for-compaction` value: %q", waitFor)) + t.Fatalf("unrecognized `wait-for-compaction` value: %q", waitFor) } buf.Reset() diff --git a/data_test.go b/data_test.go index cbb5a0c0d37..1348ae667fb 100644 --- a/data_test.go +++ b/data_test.go @@ -371,7 +371,7 @@ func printIterState( if iter.Valid() { hasPoint, hasRange := iter.HasPointAndRange() if hasPoint || !hasRange { - panic(fmt.Sprintf("pebble: unexpected HasPointAndRange (%t, %t)", hasPoint, hasRange)) + panic(errors.AssertionFailedf("pebble: unexpected HasPointAndRange (%t, %t)", hasPoint, hasRange)) } start, end := iter.RangeBounds() fmt.Fprintf(b, "%s [%s-%s)", iter.Key(), formatASCIIKey(start), formatASCIIKey(end)) diff --git a/db.go b/db.go index 14856b3efbe..21a49ccf180 100644 --- a/db.go +++ b/db.go @@ -7,7 +7,6 @@ package pebble // import "github.com/cockroachdb/pebble" import ( "context" - "fmt" "io" "reflect" "slices" @@ -796,7 +795,7 @@ func (d *DB) applyInternal(batch *Batch, opts *WriteOptions, noSyncWait bool) er return ErrReadOnly } if batch.db != nil && batch.db != d { - panic(fmt.Sprintf("pebble: batch db mismatch: %p != %p", batch.db, d)) + panic(errors.AssertionFailedf("pebble: batch db mismatch: %p != %p", errors.Safe(batch.db), errors.Safe(d))) } sync := opts.GetSync() @@ -805,7 +804,7 @@ func (d *DB) applyInternal(batch *Batch, opts *WriteOptions, noSyncWait bool) er } if fmv := d.FormatMajorVersion(); fmv < batch.minimumFormatMajorVersion { - panic(fmt.Sprintf( + panic(errors.AssertionFailedf( "pebble: batch requires at least format major version %d (current: %d)", batch.minimumFormatMajorVersion, fmv, )) @@ -995,7 +994,7 @@ func assertZeroed(v reflect.Value) error { return assertZeroed(v.Elem()) case reflect.Slice: if v.Len() > 0 { - return errors.AssertionFailedf("%s is not zeroed (%d len): %#v", typ.Name(), v.Len(), v) + return errors.AssertionFailedf("%s is not zeroed (%d len): %#v", errors.Safe(typ.Name()), errors.Safe(v.Len()), v) } resliced := v.Slice(0, v.Cap()) for i := 0; i < resliced.Len(); i++ { @@ -1015,7 +1014,7 @@ func assertZeroed(v reflect.Value) error { } return nil } - return errors.AssertionFailedf("%s (%s) is not zeroed: %#v", typ.Name(), typ.Kind(), v) + return errors.AssertionFailedf("%s (%s) is not zeroed: %#v", errors.Safe(typ.Name()), typ.Kind(), v) } func newIterAlloc() *iterAlloc { diff --git a/file_cache.go b/file_cache.go index c1f91b33798..a3c47dcef42 100644 --- a/file_cache.go +++ b/file_cache.go @@ -291,7 +291,7 @@ func (h *fileCacheHandle) findOrCreateBlob( func (h *fileCacheHandle) Evict(fileNum base.DiskFileNum, fileType base.FileType) { defer func() { if p := recover(); p != nil { - panic(fmt.Sprintf("pebble: evicting in-use file %s(%s): %v", fileNum, fileType, p)) + panic(errors.AssertionFailedf("pebble: evicting in-use file %s(%s): %v", fileNum, fileType, errors.Safe(p))) } }() h.fileCache.c.Evict(fileCacheKey{handle: h, fileNum: fileNum, fileType: fileType}) @@ -429,7 +429,7 @@ func (c *FileCache) Ref() { // We don't want the reference count to ever go from 0 -> 1, // cause a reference count of 0 implies that we've closed the cache. if v <= 1 { - panic(fmt.Sprintf("pebble: inconsistent reference count: %d", v)) + panic(errors.AssertionFailedf("pebble: inconsistent reference count: %d", errors.Safe(v))) } } @@ -438,7 +438,7 @@ func (c *FileCache) Unref() { v := c.refs.Add(-1) switch { case v < 0: - panic(fmt.Sprintf("pebble: inconsistent reference count: %d", v)) + panic(errors.AssertionFailedf("pebble: inconsistent reference count: %d", errors.Safe(v))) case v == 0: c.c.Close() c.c = genericcache.Cache[fileCacheKey, fileCacheValue, initFileOpts]{} diff --git a/flushable.go b/flushable.go index ba8e70fd35d..766e9476feb 100644 --- a/flushable.go +++ b/flushable.go @@ -6,7 +6,6 @@ package pebble import ( "context" - "fmt" "sync/atomic" "time" @@ -122,7 +121,7 @@ type flushableEntry struct { func (e *flushableEntry) readerRef() { switch v := e.readerRefs.Add(1); { case v <= 1: - panic(fmt.Sprintf("pebble: inconsistent reference count: %d", v)) + panic(errors.AssertionFailedf("pebble: inconsistent reference count: %d", errors.Safe(v))) } } @@ -141,7 +140,7 @@ func (e *flushableEntry) readerUnrefHelper( ) { switch v := e.readerRefs.Add(-1); { case v < 0: - panic(fmt.Sprintf("pebble: inconsistent reference count: %d", v)) + panic(errors.AssertionFailedf("pebble: inconsistent reference count: %d", errors.Safe(v))) case v == 0: if e.releaseMemAccounting == nil { panic("pebble: memtable reservation already released") diff --git a/format_major_version.go b/format_major_version.go index b18766486d5..7f4cd9dfe3c 100644 --- a/format_major_version.go +++ b/format_major_version.go @@ -287,7 +287,7 @@ func (v FormatMajorVersion) resolveDefault() FormatMajorVersion { return FormatMinSupported } if v < FormatMinSupported || v > internalFormatNewest { - panic(fmt.Sprintf("pebble: unsupported format major version: %s", v)) + panic(errors.AssertionFailedf("pebble: unsupported format major version: %s", v)) } return v } @@ -335,7 +335,7 @@ func (v FormatMajorVersion) MaxBlobFileFormat() blob.FileFormat { case v >= FormatValueSeparation: return blob.FileFormatV1 default: - panic(fmt.Sprintf("pebble: format major version %s does not support blob files", v)) + panic(errors.AssertionFailedf("pebble: format major version %s does not support blob files", v)) } } diff --git a/ingest.go b/ingest.go index 8c852c8b2b3..c846a07cf32 100644 --- a/ingest.go +++ b/ingest.go @@ -6,7 +6,6 @@ package pebble import ( "context" - "fmt" "slices" "sort" "time" @@ -2393,8 +2392,8 @@ func (d *DB) ingestApply( return versionUpdate{}, err } if isShared && f.Level < sharedLevelsStart { - panic(fmt.Sprintf("cannot slot a shared file higher than the highest shared level: %d < %d", - f.Level, sharedLevelsStart)) + panic(errors.AssertionFailedf("cannot slot a shared file higher than the highest shared level: %d < %d", + errors.Safe(f.Level), errors.Safe(sharedLevelsStart))) } f.Meta = m levelMetrics := metrics.level(f.Level) @@ -2526,7 +2525,7 @@ func (d *DB) ingestApply( d.opts.Comparer.FormatKey(exciseSpan.Start), d.opts.Comparer.FormatKey(exciseSpan.End), exciseSeqNum, d.opts.Comparer.FormatKey(efos.protectedRanges[i].Start), d.opts.Comparer.FormatKey(efos.protectedRanges[i].End), efos.seqNum, - efos.hasTransitioned(), efos.isClosed(), d.mu.versions.visibleSeqNum.Load())) + errors.Safe(efos.hasTransitioned()), errors.Safe(efos.isClosed()), d.mu.versions.visibleSeqNum.Load())) } } } diff --git a/internal/arenaskl/arena.go b/internal/arenaskl/arena.go index b69dd7ec33c..7fbeb43d687 100644 --- a/internal/arenaskl/arena.go +++ b/internal/arenaskl/arena.go @@ -47,7 +47,7 @@ const MaxArenaSize = min(math.MaxUint32, math.MaxInt) func NewArena(buf []byte) *Arena { if len(buf) > MaxArenaSize { if invariants.Enabled { - panic(errors.AssertionFailedf("attempting to create arena of size %d", len(buf))) + panic(errors.AssertionFailedf("attempting to create arena of size %d", errors.Safe(len(buf)))) } buf = buf[:MaxArenaSize] } @@ -84,7 +84,7 @@ func (a *Arena) Capacity() uint32 { // requested size but don't use those extra bytes). func (a *Arena) alloc(size, alignment, overflow uint32) (uint32, error) { if invariants.Enabled && (alignment&(alignment-1)) != 0 { - panic(errors.AssertionFailedf("invalid alignment %d", alignment)) + panic(errors.AssertionFailedf("invalid alignment %d", errors.Safe(alignment))) } // Verify that the arena isn't already full. origSize := a.n.Load() diff --git a/internal/ascii/table/table.go b/internal/ascii/table/table.go index 6f4d49a02ab..75c186fc6b8 100644 --- a/internal/ascii/table/table.go +++ b/internal/ascii/table/table.go @@ -10,6 +10,7 @@ import ( "strconv" "github.com/cockroachdb/crlib/crhumanize" + "github.com/cockroachdb/errors" "github.com/cockroachdb/pebble/internal/ascii" "golang.org/x/exp/constraints" ) @@ -48,7 +49,7 @@ func Define[T any](fields ...Element) Layout[T] { for i := range fields { if f, ok := fields[i].(Field[T]); ok { if h := f.header(); len(h) > f.width() { - panic(fmt.Sprintf("header %q is too long for column %d", h, i)) + panic(errors.AssertionFailedf("header %q is too long for column %d", errors.Safe(h), errors.Safe(i))) } } } diff --git a/internal/base/directory_lock.go b/internal/base/directory_lock.go index e947680382e..0ffc5bdae39 100644 --- a/internal/base/directory_lock.go +++ b/internal/base/directory_lock.go @@ -87,7 +87,7 @@ func LockDirectory(dirname string, fs vfs.FS) (*DirLock, error) { l.refs.Store(1) invariants.SetFinalizer(l, func(obj interface{}) { if refs := obj.(*DirLock).refs.Load(); refs > 0 { - panic(errors.AssertionFailedf("lock for %q finalized with %d refs", dirname, refs)) + panic(errors.AssertionFailedf("lock for %q finalized with %d refs", errors.Safe(dirname), errors.Safe(refs))) } }) return l, nil @@ -135,7 +135,7 @@ func (l *DirLock) Close() error { if v > 0 { return nil } else if v < 0 { - return errors.AssertionFailedf("pebble: unexpected %q DirLock reference count %d", l.dirname, v) + return errors.AssertionFailedf("pebble: unexpected %q DirLock reference count %d", errors.Safe(l.dirname), errors.Safe(v)) } defer func() { l.fileLock = nil }() return l.fileLock.Close() diff --git a/internal/base/filenames.go b/internal/base/filenames.go index fc12d6eb395..006903566e2 100644 --- a/internal/base/filenames.go +++ b/internal/base/filenames.go @@ -158,7 +158,7 @@ func FileTypeFromName(name string) FileType { return FileType(i) } } - panic(fmt.Sprintf("unknown file type: %q", name)) + panic(errors.AssertionFailedf("unknown file type: %q", errors.Safe(name))) } // SafeFormat implements redact.SafeFormatter. diff --git a/internal/base/internal.go b/internal/base/internal.go index 5ecabbfb4cc..d003a244ed9 100644 --- a/internal/base/internal.go +++ b/internal/base/internal.go @@ -377,7 +377,7 @@ func ParseSeqNum(s string) SeqNum { } n, err := strconv.ParseUint(s, 10, 64) if err != nil { - panic(fmt.Sprintf("error parsing %q as seqnum: %s", s, err)) + panic(errors.AssertionFailedf("error parsing %q as seqnum: %s", errors.Safe(s), err)) } seqNum := SeqNum(n) if batch { @@ -390,7 +390,7 @@ func ParseSeqNum(s string) SeqNum { func ParseKind(s string) InternalKeyKind { kind, ok := kindsMap[s] if !ok { - panic(fmt.Sprintf("unknown kind: %q", s)) + panic(errors.AssertionFailedf("unknown kind: %q", errors.Safe(s))) } return kind } @@ -609,14 +609,14 @@ func ParseInternalKey(s string) InternalKey { sep1 := strings.Index(s, "#") sep2 := strings.Index(s, ",") if sep1 == -1 || sep2 == -1 || sep2 < sep1 { - panic(fmt.Sprintf("invalid internal key %q", s)) + panic(errors.AssertionFailedf("invalid internal key %q", s)) } userKey := []byte(s[:sep1]) seqNum := ParseSeqNum(s[sep1+1 : sep2]) kind, ok := kindsMap[s[sep2+1:]] if !ok { - panic(fmt.Sprintf("unknown kind: %q", s[sep2+1:])) + panic(errors.AssertionFailedf("unknown kind: %q", errors.Safe(s[sep2+1:]))) } return MakeInternalKey(userKey, seqNum, kind) } @@ -627,7 +627,7 @@ func ParseInternalKV(s string) InternalKV { // Cut the key at the first ":". sepIdx := strings.Index(s, ":") if sepIdx == -1 { - panic(fmt.Sprintf("invalid KV %q", s)) + panic(errors.AssertionFailedf("invalid KV %q", s)) } keyStr := strings.TrimSpace(s[:sepIdx]) valStr := strings.TrimSpace(s[sepIdx+1:]) @@ -643,7 +643,7 @@ func ParseInternalKeyRange(s string) (start, end InternalKey) { s, ok2 := strings.CutSuffix(s, "]") x := strings.Split(s, "-") if !ok1 || !ok2 || len(x) != 2 { - panic(fmt.Sprintf("invalid key range %q", s)) + panic(errors.AssertionFailedf("invalid key range %q", s)) } return ParseInternalKey(x[0]), ParseInternalKey(x[1]) } diff --git a/internal/base/level.go b/internal/base/level.go index 7040247c43f..df9d68cc79a 100644 --- a/internal/base/level.go +++ b/internal/base/level.go @@ -38,7 +38,7 @@ func (l Level) String() string { func MakeLevel(l int) Level { if invariants.Enabled && l < 0 || l >= validBit { - panic(errors.AssertionFailedf("invalid level: %d", l)) + panic(errors.AssertionFailedf("invalid level: %d", errors.Safe(l))) } return Level{uint8(l) | validBit} } diff --git a/internal/base/test_utils.go b/internal/base/test_utils.go index 89856b652db..9d9e4d97f7c 100644 --- a/internal/base/test_utils.go +++ b/internal/base/test_utils.go @@ -6,7 +6,6 @@ package base import ( "context" - "fmt" "io" "strconv" "strings" @@ -281,7 +280,7 @@ func ParseUserKeyBounds(s string) UserKeyBounds { first, last, s := s[0], s[len(s)-1], s[1:len(s)-1] start, end, ok := strings.Cut(s, ", ") if !ok || first != '[' || (last != ']' && last != ')') { - panic(fmt.Sprintf("invalid bounds %q", s)) + panic(errors.AssertionFailedf("invalid bounds %q", s)) } return UserKeyBoundsEndExclusiveIf([]byte(start), []byte(end), last == ')') } diff --git a/internal/cache/cache.go b/internal/cache/cache.go index 2d25f6a1111..047e7fb57d4 100644 --- a/internal/cache/cache.go +++ b/internal/cache/cache.go @@ -152,7 +152,7 @@ func NewWithShards(size int64, shards int) *Cache { func (c *Cache) Ref() { v := c.refs.Add(1) if v <= 1 { - panic(fmt.Sprintf("pebble: inconsistent reference count: %d", v)) + panic(errors.AssertionFailedf("pebble: inconsistent reference count: %d", errors.Safe(v))) } c.trace("ref", v) } @@ -163,7 +163,7 @@ func (c *Cache) Unref() { c.trace("unref", v) switch { case v < 0: - panic(fmt.Sprintf("pebble: inconsistent reference count: %d", v)) + panic(errors.AssertionFailedf("pebble: inconsistent reference count: %d", errors.Safe(v))) case v == 0: c.destroy() } @@ -328,7 +328,7 @@ func (c *Handle) GetWithReadHandle( // REQUIRES: value.refs() == 1 func (c *Handle) Set(fileNum base.DiskFileNum, offset uint64, value *Value) { if n := value.refs(); n != 1 { - panic(errors.AssertionFailedf("pebble: Value has already been added to the cache: refs=%d", n)) + panic(errors.AssertionFailedf("pebble: Value has already been added to the cache: refs=%d", errors.Safe(n))) } k := makeKey(c.id, fileNum, offset) c.cache.getShard(k).set(k, value, false /*markAccessed*/) diff --git a/internal/cache/clockpro.go b/internal/cache/clockpro.go index dfda772a9f6..ca1e9e069d0 100644 --- a/internal/cache/clockpro.go +++ b/internal/cache/clockpro.go @@ -25,6 +25,7 @@ import ( "sync" "sync/atomic" + "github.com/cockroachdb/errors" "github.com/cockroachdb/pebble/internal/base" "github.com/cockroachdb/pebble/internal/invariants" ) @@ -265,14 +266,14 @@ func (c *shard) checkConsistency() { // See the comment above the count{Hot,Cold,Test} fields. switch { case c.sizeHot < 0 || c.sizeCold < 0 || c.sizeTest < 0 || c.countHot < 0 || c.countCold < 0 || c.countTest < 0: - panic(fmt.Sprintf("pebble: unexpected negative: %d (%d bytes) hot, %d (%d bytes) cold, %d (%d bytes) test", - c.countHot, c.sizeHot, c.countCold, c.sizeCold, c.countTest, c.sizeTest)) + panic(errors.AssertionFailedf("pebble: unexpected negative: %d (%d bytes) hot, %d (%d bytes) cold, %d (%d bytes) test", + errors.Safe(c.countHot), errors.Safe(c.sizeHot), errors.Safe(c.countCold), errors.Safe(c.sizeCold), errors.Safe(c.countTest), errors.Safe(c.sizeTest))) case c.sizeHot > 0 && c.countHot == 0: - panic(fmt.Sprintf("pebble: mismatch %d hot size, %d hot count", c.sizeHot, c.countHot)) + panic(errors.AssertionFailedf("pebble: mismatch %d hot size, %d hot count", errors.Safe(c.sizeHot), errors.Safe(c.countHot))) case c.sizeCold > 0 && c.countCold == 0: - panic(fmt.Sprintf("pebble: mismatch %d cold size, %d cold count", c.sizeCold, c.countCold)) + panic(errors.AssertionFailedf("pebble: mismatch %d cold size, %d cold count", errors.Safe(c.sizeCold), errors.Safe(c.countCold))) case c.sizeTest > 0 && c.countTest == 0: - panic(fmt.Sprintf("pebble: mismatch %d test size, %d test count", c.sizeTest, c.countTest)) + panic(errors.AssertionFailedf("pebble: mismatch %d test size, %d test count", errors.Safe(c.sizeTest), errors.Safe(c.countTest))) } } @@ -581,8 +582,8 @@ func (c *shard) runHandCold(countColdDebug, sizeColdDebug int64) { // arguments will appear within stack traces should we encounter // a reproduction. if c.countCold != countColdDebug || c.sizeCold != sizeColdDebug { - panic(fmt.Sprintf("runHandCold: cold count and size are %d, %d, arguments are %d and %d", - c.countCold, c.sizeCold, countColdDebug, sizeColdDebug)) + panic(errors.AssertionFailedf("runHandCold: cold count and size are %d, %d, arguments are %d and %d", + errors.Safe(c.countCold), errors.Safe(c.sizeCold), errors.Safe(countColdDebug), errors.Safe(sizeColdDebug))) } e := c.handCold @@ -643,7 +644,7 @@ func (c *shard) runHandTest() { // sizeCold is > 0, so assert that countCold == 0. See the // comment above count{Hot,Cold,Test}. if c.countCold == 0 { - panic(fmt.Sprintf("pebble: mismatch %d cold size, %d cold count", c.sizeCold, c.countCold)) + panic(errors.AssertionFailedf("pebble: mismatch %d cold size, %d cold count", errors.Safe(c.sizeCold), errors.Safe(c.countCold))) } c.runHandCold(c.countCold, c.sizeCold) diff --git a/internal/cache/read_shard.go b/internal/cache/read_shard.go index 26258607588..c42b5b08107 100644 --- a/internal/cache/read_shard.go +++ b/internal/cache/read_shard.go @@ -295,7 +295,7 @@ func (e *readEntry) unrefAndTryRemoveFromMap() { func (e *readEntry) setReadValue(v *Value) { if n := v.refs(); n != 1 { - panic(errors.AssertionFailedf("pebble: Value has already been added to the cache: refs=%d", n)) + panic(errors.AssertionFailedf("pebble: Value has already been added to the cache: refs=%d", errors.Safe(n))) } concurrentRequesters := false e.mu.Lock() diff --git a/internal/cache/refcnt_tracing.go b/internal/cache/refcnt_tracing.go index 151c68d08e9..0f80518ee4d 100644 --- a/internal/cache/refcnt_tracing.go +++ b/internal/cache/refcnt_tracing.go @@ -12,6 +12,8 @@ import ( "strings" "sync" "sync/atomic" + + "github.com/cockroachdb/errors" ) // refcnt provides an atomic reference count, along with a tracing facility for @@ -35,7 +37,7 @@ func (v *refcnt) refs() int32 { func (v *refcnt) acquire() { switch n := v.val.Add(1); { case n <= 1: - panic(fmt.Sprintf("pebble: inconsistent reference count: %d", n)) + panic(errors.AssertionFailedf("pebble: inconsistent reference count: %d", errors.Safe(n))) } v.trace("acquire") } @@ -44,7 +46,7 @@ func (v *refcnt) release() bool { n := v.val.Add(-1) switch { case n < 0: - panic(fmt.Sprintf("pebble: inconsistent reference count: %d", n)) + panic(errors.AssertionFailedf("pebble: inconsistent reference count: %d", errors.Safe(n))) } v.trace("release") return n == 0 diff --git a/internal/cache/value.go b/internal/cache/value.go index bb77c2f988b..03a3bc7bb11 100644 --- a/internal/cache/value.go +++ b/internal/cache/value.go @@ -144,7 +144,7 @@ func (v *Value) Release() { // Free. Do not call Free on a value that has been added to the cache. func Free(v *Value) { if n := v.refs(); n > 1 { - panic(errors.AssertionFailedf("pebble: Value has been added to the cache: refs=%d", n)) + panic(errors.AssertionFailedf("pebble: Value has been added to the cache: refs=%d", errors.Safe(n))) } v.Release() } diff --git a/internal/compact/iterator.go b/internal/compact/iterator.go index 378ebe828d5..5c562a0331a 100644 --- a/internal/compact/iterator.go +++ b/internal/compact/iterator.go @@ -1351,7 +1351,7 @@ func (i *Iter) deleteSizedNext() *base.InternalKV { } if i.iterStripeChange == sameStripe { - panic(errors.AssertionFailedf("unexpectedly found iter stripe change = %d", i.iterStripeChange)) + panic(errors.AssertionFailedf("unexpectedly found iter stripe change = %d", errors.Safe(i.iterStripeChange))) } // We landed outside the original stripe. Reset skip. i.skip = false diff --git a/internal/compact/splitting.go b/internal/compact/splitting.go index 48b99bc7441..50465087124 100644 --- a/internal/compact/splitting.go +++ b/internal/compact/splitting.go @@ -9,6 +9,7 @@ import ( "fmt" "slices" + "github.com/cockroachdb/errors" "github.com/cockroachdb/pebble/internal/base" "github.com/cockroachdb/pebble/internal/invariants" "github.com/cockroachdb/pebble/internal/manifest" @@ -280,7 +281,7 @@ func (s *OutputSplitter) SplitKey() []byte { s.frontier.Update(nil) if s.splitKey != nil { if invariants.Enabled && s.cmp(s.splitKey, s.startKey) <= 0 { - panic(fmt.Sprintf("splitKey %q <= startKey %q", s.splitKey, s.startKey)) + panic(errors.AssertionFailedf("splitKey %q <= startKey %q", s.splitKey, s.startKey)) } return s.splitKey } diff --git a/internal/compression/minlz.go b/internal/compression/minlz.go index bd09040429e..acb2ade56e5 100644 --- a/internal/compression/minlz.go +++ b/internal/compression/minlz.go @@ -45,7 +45,7 @@ func getMinlzCompressor(level int) Compressor { case minlz.LevelBalanced: return minlzCompressorBalanced default: - panic(errors.AssertionFailedf("unexpected MinLZ level %d", level)) + panic(errors.AssertionFailedf("unexpected MinLZ level %d", errors.Safe(level))) } } diff --git a/internal/datadrivenutil/datadrivenutil.go b/internal/datadrivenutil/datadrivenutil.go index 1e0036f985b..57dc338a2ef 100644 --- a/internal/datadrivenutil/datadrivenutil.go +++ b/internal/datadrivenutil/datadrivenutil.go @@ -4,10 +4,11 @@ package datadrivenutil import ( "encoding/hex" - "fmt" "strconv" "strings" "unicode" + + "github.com/cockroachdb/errors" ) // Lines wraps a string, providing facilities for parsing individual lines. @@ -88,7 +89,7 @@ func (fs Fields) KeyValue(key string) (Value, bool) { func (fs Fields) MustKeyValue(key string) Value { f, ok := fs.KeyValue(key) if !ok { - panic(fmt.Sprintf("unable to find required key-value pair %q", key)) + panic(errors.AssertionFailedf("unable to find required key-value pair %q", key)) } return f } diff --git a/internal/invariants/on.go b/internal/invariants/on.go index 3826e6bf627..37639c3810c 100644 --- a/internal/invariants/on.go +++ b/internal/invariants/on.go @@ -7,9 +7,10 @@ package invariants import ( - "fmt" "math/rand/v2" "slices" + + "github.com/cockroachdb/errors" ) // Sometimes returns true percent% of the time if invariants are Enabled (i.e. @@ -116,7 +117,7 @@ func (v *Value[V]) Set(inner V) { // non-invariant builds. func CheckBounds[T Integer](i T, n T) { if i < 0 || i >= n { - panic(fmt.Sprintf("index %d out of bounds [0, %d)", i, n)) + panic(errors.AssertionFailedf("index %d out of bounds [0, %d)", errors.Safe(i), errors.Safe(n))) } } @@ -124,7 +125,7 @@ func CheckBounds[T Integer](i T, n T) { // in non-invariant builds. func SafeSub[T Integer](a, b T) T { if a < b { - panic(fmt.Sprintf("underflow: %d - %d", a, b)) + panic(errors.AssertionFailedf("underflow: %d - %d", errors.Safe(a), errors.Safe(b))) } return a - b } diff --git a/internal/keyspan/fragmenter.go b/internal/keyspan/fragmenter.go index 6bb5db61998..e20edd8a353 100644 --- a/internal/keyspan/fragmenter.go +++ b/internal/keyspan/fragmenter.go @@ -5,8 +5,7 @@ package keyspan import ( - "fmt" - + "github.com/cockroachdb/errors" "github.com/cockroachdb/pebble/internal/base" "github.com/cockroachdb/pebble/internal/invariants" ) @@ -43,10 +42,10 @@ type Fragmenter struct { func (f *Fragmenter) checkInvariants(buf []Span) { for i := 1; i < len(buf); i++ { if f.Cmp(buf[i].Start, buf[i].End) >= 0 { - panic(fmt.Sprintf("pebble: empty pending span invariant violated: %s", buf[i])) + panic(errors.AssertionFailedf("pebble: empty pending span invariant violated: %s", buf[i])) } if f.Cmp(buf[i-1].Start, buf[i].Start) != 0 { - panic(fmt.Sprintf("pebble: pending span invariant violated: %s %s", + panic(errors.AssertionFailedf("pebble: pending span invariant violated: %s %s", f.Format(buf[i-1].Start), f.Format(buf[i].Start))) } } @@ -143,7 +142,7 @@ func (f *Fragmenter) Add(s Span) { if f.flushedKey != nil { switch c := f.Cmp(s.Start, f.flushedKey); { case c < 0: - panic(fmt.Sprintf("pebble: start key (%s) < flushed key (%s)", + panic(errors.AssertionFailedf("pebble: start key (%s) < flushed key (%s)", f.Format(s.Start), f.Format(f.flushedKey))) } } @@ -161,7 +160,7 @@ func (f *Fragmenter) Add(s Span) { // to compare against the first one. switch c := f.Cmp(f.pending[0].Start, s.Start); { case c > 0: - panic(fmt.Sprintf("pebble: keys must be added in order: %s > %s", + panic(errors.AssertionFailedf("pebble: keys must be added in order: %s > %s", f.Format(f.pending[0].Start), f.Format(s.Start))) case c == 0: // The new span has the same start key as the existing pending diff --git a/internal/keyspan/fragmenter_test.go b/internal/keyspan/fragmenter_test.go index 67decfbfa2e..b8143b2c75a 100644 --- a/internal/keyspan/fragmenter_test.go +++ b/internal/keyspan/fragmenter_test.go @@ -217,7 +217,7 @@ func TestFragmenter_EmitOrder(t *testing.T) { for line := range crstrings.LinesSeq(d.Input) { fields := strings.Fields(line) if len(fields) != 2 { - panic(fmt.Sprintf("datadriven test: expect 2 fields, found %d", len(fields))) + t.Fatalf("datadriven test: expect 2 fields, found %d", len(fields)) } k := base.ParseInternalKey(fields[0]) f.Add(Span{ @@ -230,7 +230,8 @@ func TestFragmenter_EmitOrder(t *testing.T) { f.Finish() return buf.String() default: - panic(fmt.Sprintf("unrecognized command %q", d.Cmd)) + t.Fatalf("unrecognized command %q", d.Cmd) + return "" } }) } diff --git a/internal/keyspan/interleaving_iter.go b/internal/keyspan/interleaving_iter.go index a7afd14d570..3a8fec378c9 100644 --- a/internal/keyspan/interleaving_iter.go +++ b/internal/keyspan/interleaving_iter.go @@ -12,6 +12,7 @@ import ( "github.com/cockroachdb/pebble/internal/base" "github.com/cockroachdb/pebble/internal/invariants" "github.com/cockroachdb/pebble/internal/treesteps" + "github.com/cockroachdb/redact" ) // A SpanMask may be used to configure an interleaving iterator to skip point @@ -190,6 +191,33 @@ const ( posKeyspanEnd ) +// String implements fmt.Stringer. +func (p interleavePos) String() string { + switch p { + case posUninitialized: + return "uninitialized" + case posSeekedBeyondLowerBound: + return "seekedBeyondLowerBound" + case posSeekedBeyondUpperBound: + return "seekedBeyondUpperBound" + case posExhausted: + return "exhausted" + case posPointKey: + return "pointKey" + case posKeyspanStart: + return "keyspanStart" + case posKeyspanEnd: + return "keyspanEnd" + default: + return fmt.Sprintf("unknown(%d)", int8(p)) + } +} + +// SafeFormat implements redact.SafeFormatter. +func (p interleavePos) SafeFormat(w redact.SafePrinter, _ rune) { + w.Print(redact.SafeString(p.String())) +} + // Assert that *InterleavingIter implements the InternalIterator interface. var _ base.InternalIterator = &InterleavingIter{} @@ -753,7 +781,7 @@ func (i *InterleavingIter) nextPos() { i.enforceBoundsForward() i.computeSmallestPos() default: - panic(fmt.Sprintf("unexpected pos=%d", i.pos)) + panic(errors.AssertionFailedf("unexpected pos=%d", i.pos)) } } @@ -820,7 +848,7 @@ func (i *InterleavingIter) prevPos() { i.pos = posKeyspanStart } default: - panic(fmt.Sprintf("unexpected pos=%d", i.pos)) + panic(errors.AssertionFailedf("unexpected pos=%d", i.pos)) } } @@ -886,7 +914,7 @@ func (i *InterleavingIter) yieldPosition(lowerBound []byte, advance func()) *bas } return i.yieldSyntheticSpanStartMarker(lowerBound) default: - panic(fmt.Sprintf("unexpected interleavePos=%d", i.pos)) + panic(errors.AssertionFailedf("unexpected interleavePos=%d", i.pos)) } } } diff --git a/internal/keyspan/keyspanimpl/merging_iter.go b/internal/keyspan/keyspanimpl/merging_iter.go index a4098a6d45e..66a145e96a0 100644 --- a/internal/keyspan/keyspanimpl/merging_iter.go +++ b/internal/keyspan/keyspanimpl/merging_iter.go @@ -11,6 +11,7 @@ import ( "fmt" "slices" + "github.com/cockroachdb/errors" "github.com/cockroachdb/pebble/internal/base" "github.com/cockroachdb/pebble/internal/invariants" "github.com/cockroachdb/pebble/internal/keyspan" @@ -1017,7 +1018,7 @@ func (m *MergingIter) heapRoot() []byte { func (m *MergingIter) synthesizeKeys(dir int8) (bool, *keyspan.Span, error) { if invariants.Enabled { if m.comparer.Compare(m.start, m.end) >= 0 { - panic(fmt.Sprintf("pebble: invariant violation: span start ≥ end: %s >= %s", m.start, m.end)) + panic(errors.AssertionFailedf("pebble: invariant violation: span start ≥ end: %s >= %s", m.start, m.end)) } } diff --git a/internal/keyspan/span.go b/internal/keyspan/span.go index 2b8eaab1eb4..4feeb40e435 100644 --- a/internal/keyspan/span.go +++ b/internal/keyspan/span.go @@ -12,6 +12,7 @@ import ( "strings" "unicode" + "github.com/cockroachdb/errors" "github.com/cockroachdb/pebble/internal/base" ) @@ -551,7 +552,7 @@ func ParseSpan(input string) Span { } for i := 1; i < len(s.Keys); i++ { if s.Keys[i-1].Trailer < s.Keys[i].Trailer { - panic(fmt.Sprintf("span keys not sorted: %s %s", s.Keys[i-1], s.Keys[i])) + panic(errors.AssertionFailedf("span keys not sorted: %s %s", s.Keys[i-1], s.Keys[i])) } } s.KeysOrder = ByTrailerDesc diff --git a/internal/keyspan/test_utils.go b/internal/keyspan/test_utils.go index c487eb643b1..21dc89bc512 100644 --- a/internal/keyspan/test_utils.go +++ b/internal/keyspan/test_utils.go @@ -423,12 +423,12 @@ func runIterOp(w io.Writer, it FragmentIterator, op string) { s, err = it.Last() case "seekge", "seek-ge": if len(fields) == 1 { - panic(fmt.Sprintf("unable to parse iter op %q", op)) + panic(errors.AssertionFailedf("unable to parse iter op %q", op)) } s, err = it.SeekGE([]byte(fields[1])) case "seeklt", "seek-lt": if len(fields) == 1 { - panic(fmt.Sprintf("unable to parse iter op %q", op)) + panic(errors.AssertionFailedf("unable to parse iter op %q", op)) } s, err = it.SeekLT([]byte(fields[1])) case "next": @@ -436,7 +436,7 @@ func runIterOp(w io.Writer, it FragmentIterator, op string) { case "prev": s, err = it.Prev() default: - panic(fmt.Sprintf("unrecognized iter op %q", fields[0])) + panic(errors.AssertionFailedf("unrecognized iter op %q", fields[0])) } switch { case err != nil: diff --git a/internal/lint/lint_test.go b/internal/lint/lint_test.go index b2d8564cad3..acebd650543 100644 --- a/internal/lint/lint_test.go +++ b/internal/lint/lint_test.go @@ -191,6 +191,24 @@ func TestLint(t *testing.T) { } }) + // Disallow panic(fmt.Sprintf(...)); use panic(errors.AssertionFailedf(...)) + // instead so that panic messages are not redacted in CockroachDB logs and + // include proper stack traces. + t.Run("TestPanicFmtSprintf", func(t *testing.T) { + t.Parallel() + + if err := stream.ForEach( + stream.Sequence( + dirCmd(t, pkg.Dir, "git", "grep", "-B1", `panic(fmt\.Sprintf(`, "--", "*.go"), + lintIgnore("lint:ignore PanicFmtSprintf"), + stream.GrepNot(`^internal/lint/lint_test.go`), + ), func(s string) { + t.Errorf("\n%s <- please use \"panic(errors.AssertionFailedf(...))\" instead", s) + }); err != nil { + t.Error(err) + } + }) + t.Run("TestFmtErrorf", func(t *testing.T) { t.Parallel() diff --git a/internal/manifest/blob_metadata.go b/internal/manifest/blob_metadata.go index fd0792e8bc4..75a34db131d 100644 --- a/internal/manifest/blob_metadata.go +++ b/internal/manifest/blob_metadata.go @@ -57,11 +57,11 @@ func MakeBlobReference( switch { case valueSize > phys.ValueSize: panic(errors.AssertionFailedf("pebble: blob reference value size %d is greater than the blob file's value size %d", - valueSize, phys.ValueSize)) + errors.Safe(valueSize), errors.Safe(phys.ValueSize))) case valueSize == 0: - panic(errors.AssertionFailedf("pebble: blob reference value size %d is zero", valueSize)) + panic(errors.AssertionFailedf("pebble: blob reference value size %d is zero", errors.Safe(valueSize))) case phys.ValueSize == 0: - panic(errors.AssertionFailedf("pebble: blob file value size %d is zero", phys.ValueSize)) + panic(errors.AssertionFailedf("pebble: blob file value size %d is zero", errors.Safe(phys.ValueSize))) } } return BlobReference{ @@ -207,7 +207,7 @@ func (m *PhysicalBlobFile) Ref() { func (m *PhysicalBlobFile) unref(of ObsoleteFilesSet) { refs := m.refs.Add(-1) if refs < 0 { - panic(errors.AssertionFailedf("pebble: refs for blob file %s equal to %d", m.FileNum, refs)) + panic(errors.AssertionFailedf("pebble: refs for blob file %s equal to %d", m.FileNum, errors.Safe(refs))) } else if refs == 0 { of.AddBlob(m) } @@ -888,7 +888,7 @@ func (s *CurrentBlobFileSet) ApplyAndUpdateVersionEdit(ve *VersionEdit) error { if len(cbf.references) == 0 { if cbf.referencedValueSize != 0 { return errors.AssertionFailedf("pebble: referenced value size %d is non-zero for blob file %s with no refs", - cbf.referencedValueSize, cbf.metadata.FileID) + errors.Safe(cbf.referencedValueSize), cbf.metadata.FileID) } // Remove the blob file from any heap it's in. @@ -1007,16 +1007,16 @@ func (s *CurrentBlobFileSet) maybeVerifyHeapStateInvariants() { if invariants.Enabled { for i, cbf := range s.rewrite.candidates.items { if cbf.heapState.heap != &s.rewrite.candidates { - panic(errors.AssertionFailedf("pebble: heap state mismatch %v != %v", cbf.heapState.heap, &s.rewrite.candidates)) + panic(errors.AssertionFailedf("pebble: heap state mismatch %v != %v", errors.Safe(cbf.heapState.heap), errors.Safe(&s.rewrite.candidates))) } else if cbf.heapState.index != i { - panic(errors.AssertionFailedf("pebble: heap index mismatch %d != %d", cbf.heapState.index, i)) + panic(errors.AssertionFailedf("pebble: heap index mismatch %d != %d", errors.Safe(cbf.heapState.index), errors.Safe(i))) } } for i, cbf := range s.rewrite.recentlyCreated.items { if cbf.heapState.heap != &s.rewrite.recentlyCreated { - panic(errors.AssertionFailedf("pebble: heap state mismatch %v != %v", cbf.heapState.heap, &s.rewrite.recentlyCreated)) + panic(errors.AssertionFailedf("pebble: heap state mismatch %v != %v", errors.Safe(cbf.heapState.heap), errors.Safe(&s.rewrite.recentlyCreated))) } else if cbf.heapState.index != i { - panic(errors.AssertionFailedf("pebble: heap index mismatch %d != %d", cbf.heapState.index, i)) + panic(errors.AssertionFailedf("pebble: heap index mismatch %d != %d", errors.Safe(cbf.heapState.index), errors.Safe(i))) } } } @@ -1061,9 +1061,9 @@ func (s *currentBlobFileHeap[O]) Len() int { return len(s.items) } func (s *currentBlobFileHeap[O]) Less(i, j int) bool { if invariants.Enabled { if s.items[i].heapState.heap != s { - panic(errors.AssertionFailedf("pebble: heap state mismatch %v != %v", s.items[i].heapState.heap, s)) + panic(errors.AssertionFailedf("pebble: heap state mismatch %v != %v", errors.Safe(s.items[i].heapState.heap), errors.Safe(s))) } else if s.items[j].heapState.heap != s { - panic(errors.AssertionFailedf("pebble: heap state mismatch %v != %v", s.items[j].heapState.heap, s)) + panic(errors.AssertionFailedf("pebble: heap state mismatch %v != %v", errors.Safe(s.items[j].heapState.heap), errors.Safe(s))) } } return s.ordering.less(s.items[i], s.items[j]) @@ -1075,9 +1075,9 @@ func (s *currentBlobFileHeap[O]) Swap(i, j int) { s.items[j].heapState.index = j if invariants.Enabled { if s.items[i].heapState.heap != s { - panic(errors.AssertionFailedf("pebble: heap state mismatch %v != %v", s.items[i].heapState.heap, s)) + panic(errors.AssertionFailedf("pebble: heap state mismatch %v != %v", errors.Safe(s.items[i].heapState.heap), errors.Safe(s))) } else if s.items[j].heapState.heap != s { - panic(errors.AssertionFailedf("pebble: heap state mismatch %v != %v", s.items[j].heapState.heap, s)) + panic(errors.AssertionFailedf("pebble: heap state mismatch %v != %v", errors.Safe(s.items[j].heapState.heap), errors.Safe(s))) } } } diff --git a/internal/manifest/btree.go b/internal/manifest/btree.go index b015fe049f5..7273369c3ff 100644 --- a/internal/manifest/btree.go +++ b/internal/manifest/btree.go @@ -680,8 +680,8 @@ func (n *node[M]) verifyInvariants() { } } if recomputedSubtreeCount != n.subtreeCount() { - panic(fmt.Sprintf("recomputed subtree count (%d) ≠ n.subtreeCount (%d)", - recomputedSubtreeCount, n.subtreeCount())) + panic(errors.AssertionFailedf("recomputed subtree count (%d) ≠ n.subtreeCount (%d)", + errors.Safe(recomputedSubtreeCount), errors.Safe(n.subtreeCount()))) } } diff --git a/internal/manifest/l0_sublevels.go b/internal/manifest/l0_sublevels.go index 1d295728c10..5fd48efe0bc 100644 --- a/internal/manifest/l0_sublevels.go +++ b/internal/manifest/l0_sublevels.go @@ -675,7 +675,7 @@ func (s *l0Sublevels) addFileToSublevels(f *TableMetadata) { } f.SubLevel = subLevel if subLevel > len(s.levelFiles) { - panic(errors.AssertionFailedf("chose a sublevel beyond allowed range of sublevels: %d vs 0-%d", subLevel, len(s.levelFiles))) + panic(errors.AssertionFailedf("chose a sublevel beyond allowed range of sublevels: %d vs 0-%d", errors.Safe(subLevel), errors.Safe(len(s.levelFiles)))) } if subLevel == len(s.levelFiles) { s.levelFiles = append(s.levelFiles, []*TableMetadata{f}) @@ -716,11 +716,11 @@ func (s *l0Sublevels) InitCompactingFileInfo(inProgress []L0Compaction) { if invariants.Enabled { bounds := f.UserKeyBounds() if !bytes.Equal(s.orderedIntervals[f.minIntervalIndex].startKey.key, bounds.Start) { - panic(fmt.Sprintf("f.minIntervalIndex in TableMetadata out of sync with intervals in L0Sublevels: %s != %s", + panic(errors.AssertionFailedf("f.minIntervalIndex in TableMetadata out of sync with intervals in L0Sublevels: %s != %s", s.formatKey(s.orderedIntervals[f.minIntervalIndex].startKey.key), s.formatKey(bounds.Start))) } if !bytes.Equal(s.orderedIntervals[f.maxIntervalIndex+1].startKey.key, bounds.End.Key) { - panic(fmt.Sprintf("f.maxIntervalIndex in TableMetadata out of sync with intervals in L0Sublevels: %s != %s", + panic(errors.AssertionFailedf("f.maxIntervalIndex in TableMetadata out of sync with intervals in L0Sublevels: %s != %s", s.formatKey(s.orderedIntervals[f.maxIntervalIndex+1].startKey.key), s.formatKey(bounds.Start))) } } @@ -731,7 +731,7 @@ func (s *l0Sublevels) InitCompactingFileInfo(inProgress []L0Compaction) { bounds := f.UserKeyBounds() if s.cmp(s.orderedIntervals[f.minIntervalIndex].startKey.key, bounds.Start) != 0 || s.cmp(s.orderedIntervals[f.maxIntervalIndex+1].startKey.key, bounds.End.Key) != 0 { - panic(fmt.Sprintf("file %s has inconsistent L0 Sublevel interval bounds: %s-%s, %s-%s", f.TableNum, + panic(errors.AssertionFailedf("file %s has inconsistent L0 Sublevel interval bounds: %s-%s, %s-%s", f.TableNum, s.orderedIntervals[f.minIntervalIndex].startKey.key, s.orderedIntervals[f.maxIntervalIndex+1].startKey.key, bounds.Start, bounds.End.Key)) } @@ -792,7 +792,7 @@ func (s *l0Sublevels) Check() { n := 0 for t := iter.First(); t != nil; n, t = n+1, iter.Next() { if t.L0Index != n { - panic(fmt.Sprintf("t.L0Index out of sync (%d vs %d)", t.L0Index, n)) + panic(errors.AssertionFailedf("t.L0Index out of sync (%d vs %d)", errors.Safe(t.L0Index), errors.Safe(n))) } } if len(s.Levels) != len(s.levelFiles) { @@ -2020,7 +2020,7 @@ func (s *l0Sublevels) extendCandidateToRectangle( if f.IsCompacting() { // TODO(bilal): Do a logger.Fatalf instead of a panic, for // cleaner unwinding and error messages. - panic(fmt.Sprintf("expected %s to not be compacting", f.TableNum)) + panic(errors.AssertionFailedf("expected %s to not be compacting", f.TableNum)) } if candidate.isIntraL0 && f.SeqNums.High >= candidate.earliestUnflushedSeqNum { continue @@ -2160,7 +2160,7 @@ func (o *L0Organizer) PerformUpdate(prepared L0PreparedUpdate, newVersion *Versi if invariants.Enabled && invariants.Sometimes(10) { expectedSublevels, err := newL0Sublevels(&o.levelMetadata, o.cmp, o.formatKey, o.flushSplitBytes) if err != nil { - panic(fmt.Sprintf("error when regenerating sublevels: %s", err)) + panic(errors.AssertionFailedf("error when regenerating sublevels: %s", err)) } s1 := describeSublevels(o.formatKey, false /* verbose */, expectedSublevels.Levels) s2 := describeSublevels(o.formatKey, false /* verbose */, newSublevels.Levels) @@ -2168,7 +2168,7 @@ func (o *L0Organizer) PerformUpdate(prepared L0PreparedUpdate, newVersion *Versi // Add verbosity. s1 := describeSublevels(o.formatKey, true /* verbose */, expectedSublevels.Levels) s2 := describeSublevels(o.formatKey, true /* verbose */, newSublevels.Levels) - panic(fmt.Sprintf("incremental L0 sublevel generation produced different output than regeneration: %s != %s", s1, s2)) + panic(errors.AssertionFailedf("incremental L0 sublevel generation produced different output than regeneration: %s != %s", s1, s2)) } } o.l0Sublevels = newSublevels diff --git a/internal/manifest/level_metadata.go b/internal/manifest/level_metadata.go index 612268df331..8c09d7254d0 100644 --- a/internal/manifest/level_metadata.go +++ b/internal/manifest/level_metadata.go @@ -276,7 +276,7 @@ func (ls LevelSlice) verifyInvariants() { length++ } if ls.length != length { - panic(fmt.Sprintf("LevelSlice %s has length %d value; actual length is %d", ls, ls.length, length)) + panic(errors.AssertionFailedf("LevelSlice %s has length %d value; actual length is %d", ls, errors.Safe(ls.length), errors.Safe(length))) } } } @@ -449,7 +449,7 @@ func (kt KeyType) String() string { case KeyTypeRange: return "range" default: - panic(errors.AssertionFailedf("unrecognized key type: %d", kt)) + panic(errors.AssertionFailedf("unrecognized key type: %d", errors.Safe(kt))) } } diff --git a/internal/manifest/table_metadata.go b/internal/manifest/table_metadata.go index 4186ebd7df6..59fbd791229 100644 --- a/internal/manifest/table_metadata.go +++ b/internal/manifest/table_metadata.go @@ -18,6 +18,7 @@ import ( "github.com/cockroachdb/pebble/sstable" "github.com/cockroachdb/pebble/sstable/block" "github.com/cockroachdb/pebble/sstable/virtual" + "github.com/cockroachdb/redact" ) // TableMetadata is maintained for leveled-ssts, i.e., they belong to a level of @@ -347,7 +348,7 @@ type TableBacking struct { func (b *TableBacking) MustHaveRefs() { if refs := b.refs.Load(); refs <= 0 { panic(errors.AssertionFailedf("backing %s must have positive refcount (refs=%d)", - b.DiskFileNum, refs)) + b.DiskFileNum, errors.Safe(refs))) } } @@ -366,7 +367,7 @@ func (b *TableBacking) IsUnused() bool { func (b *TableBacking) Unref() int32 { v := b.refs.Add(-1) if invariants.Enabled && v < 0 { - panic(errors.AssertionFailedf("pebble: invalid TableBacking refcounting: file %s has refcount %d", b.DiskFileNum, v)) + panic(errors.AssertionFailedf("pebble: invalid TableBacking refcounting: file %s has refcount %d", b.DiskFileNum, errors.Safe(v))) } return v } @@ -466,7 +467,7 @@ func (b *TableBacking) PopulateProperties(props *sstable.Properties) *TableBacki // to parse them back. b.props.CompressionStats, err = block.ParseCompressionStats(props.CompressionStats) if invariants.Enabled && err != nil { - panic(errors.AssertionFailedf("pebble: error parsing compression stats %q for table %s: %v", b.props.CompressionStats, b.DiskFileNum, err)) + panic(errors.AssertionFailedf("pebble: error parsing compression stats %q for table %s: %v", errors.Safe(b.props.CompressionStats), b.DiskFileNum, err)) } oldStatsValid := b.propsValid.Swap(true) if invariants.Enabled && oldStatsValid { @@ -571,7 +572,7 @@ func (m *TableMetadata) SetCompactionState(to CompactionState) { case CompactionStateCompacted: panic(transitionErr()) default: - panic(fmt.Sprintf("pebble: unknown compaction state: %d", m.CompactionState)) + panic(errors.AssertionFailedf("pebble: unknown compaction state: %d", m.CompactionState)) } } m.CompactionState = to @@ -1263,6 +1264,11 @@ const ( CompactionStateCompacted ) +// SafeFormat implements redact.SafeFormatter. +func (s CompactionState) SafeFormat(w redact.SafePrinter, _ rune) { + w.Print(redact.SafeString(s.String())) +} + // String implements fmt.Stringer. func (s CompactionState) String() string { switch s { @@ -1273,6 +1279,6 @@ func (s CompactionState) String() string { case CompactionStateCompacted: return "Compacted" default: - panic(fmt.Sprintf("pebble: unknown compaction state %d", s)) + panic(errors.AssertionFailedf("pebble: unknown compaction state %d", s)) } } diff --git a/internal/manifest/table_metadata_test.go b/internal/manifest/table_metadata_test.go index 7b3c6a41809..cee885521c9 100644 --- a/internal/manifest/table_metadata_test.go +++ b/internal/manifest/table_metadata_test.go @@ -31,7 +31,7 @@ func TestExtendBounds(t *testing.T) { case base.InternalKeyKindRangeKeySet, base.InternalKeyKindRangeKeyUnset, base.InternalKeyKindRangeKeyDelete: upper = base.MakeExclusiveSentinelKey(k, []byte(end)) default: - panic(fmt.Sprintf("unknown kind %s with end key", k)) + t.Fatalf("unknown kind %s with end key", k) } } else { l, u := strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]) diff --git a/internal/manifest/version_edit.go b/internal/manifest/version_edit.go index 49b20d4ea29..48f9cd459c6 100644 --- a/internal/manifest/version_edit.go +++ b/internal/manifest/version_edit.go @@ -1221,7 +1221,7 @@ func (b *BulkVersionEdit) Accumulate(ve *VersionEdit) error { // There is already a TableBacking associated with fb.DiskFileNum. // This should never happen. There must always be only one TableBacking // associated with a backing sstable. - panic(fmt.Sprintf("pebble: duplicate file backing %s", fb.DiskFileNum.String())) + panic(errors.AssertionFailedf("pebble: duplicate file backing %s", fb.DiskFileNum.String())) } b.AddedFileBacking[fb.DiskFileNum] = fb } @@ -1398,7 +1398,7 @@ func (b *BulkVersionEdit) Apply(curr *Version, readCompactionRate int64) (*Versi phys, ok := v.BlobFiles.LookupPhysical(ref.FileID) if !ok { return nil, errors.AssertionFailedf("pebble: blob file %s referenced by L%d.%s not found", - ref.FileID, level, f.TableNum) + ref.FileID, errors.Safe(level), f.TableNum) } // NB: It's possible that the reference already has an estimated // physical size if the table was moved. diff --git a/internal/manifest/version_edit_test.go b/internal/manifest/version_edit_test.go index 9b508910907..75245af3383 100644 --- a/internal/manifest/version_edit_test.go +++ b/internal/manifest/version_edit_test.go @@ -336,7 +336,8 @@ func TestVersionEditDecode(t *testing.T) { fmt.Fprint(&outputBuf, ve.DebugString(base.DefaultFormatter)) return outputBuf.String() default: - panic(fmt.Sprintf("unknown command: %s", d.Cmd)) + t.Fatalf("unknown command: %s", d.Cmd) + return "" } }) } diff --git a/internal/manifest/virtual_backings.go b/internal/manifest/virtual_backings.go index 590a9d42e2c..c318374d938 100644 --- a/internal/manifest/virtual_backings.go +++ b/internal/manifest/virtual_backings.go @@ -148,8 +148,7 @@ func (bv *VirtualBackings) Remove(n base.DiskFileNum) { if v.inUse() { panic(errors.AssertionFailedf( "backing %s still in use (useCount=%d protectionCount=%d)", - v.backing.DiskFileNum, len(v.virtualTables), v.protectionCount, - )) + v.backing.DiskFileNum, errors.Safe(len(v.virtualTables)), errors.Safe(v.protectionCount))) } if v.heapIndex != -1 { panic(errors.AssertionFailedf("backing %s still in rewriteCandidates heap", v.backing.DiskFileNum)) @@ -369,7 +368,7 @@ func (bv *VirtualBackings) mustAdd(v *backingWithMetadata) { func (bv *VirtualBackings) mustGet(n base.DiskFileNum) *backingWithMetadata { v, ok := bv.m[n] if !ok { - panic(fmt.Sprintf("unknown backing %s", n)) + panic(errors.AssertionFailedf("unknown backing %s", n)) } return v } diff --git a/internal/manual/manual.go b/internal/manual/manual.go index d099e779e72..b9964f9eb8f 100644 --- a/internal/manual/manual.go +++ b/internal/manual/manual.go @@ -5,10 +5,10 @@ package manual import ( - "fmt" "sync/atomic" "unsafe" + "github.com/cockroachdb/errors" "github.com/cockroachdb/pebble/internal/invariants" ) @@ -78,7 +78,7 @@ func recordAlloc(purpose Purpose, n uintptr) { func recordFree(purpose Purpose, n uintptr) { newVal := counters[purpose].InUseBytes.Add(-int64(n)) if invariants.Enabled && newVal < 0 { - panic(fmt.Sprintf("negative counter value %d", newVal)) + panic(errors.AssertionFailedf("negative counter value %d", errors.Safe(newVal))) } } diff --git a/internal/metamorphic/metaflags/meta_flags.go b/internal/metamorphic/metaflags/meta_flags.go index 4459e8ac519..dbf27649c96 100644 --- a/internal/metamorphic/metaflags/meta_flags.go +++ b/internal/metamorphic/metaflags/meta_flags.go @@ -63,7 +63,7 @@ type CommonFlags struct { func (c *CommonFlags) KeyFormat() metamorphic.KeyFormat { keyFormat, ok := KeyFormats[c.KeyFormatName] if !ok { - panic(fmt.Sprintf("unknown key format: %q", c.KeyFormatName)) + panic(errors.AssertionFailedf("unknown key format: %q", c.KeyFormatName)) } return keyFormat } @@ -372,7 +372,7 @@ func (r *RunFlags) MakeRunOptions() ([]metamorphic.RunOption, error) { case "mem": opts = append(opts, metamorphic.UseInMemory) default: - panic(fmt.Sprintf("unknown forced filesystem type: %q", r.FS)) + panic(errors.AssertionFailedf("unknown forced filesystem type: %q", r.FS)) } if r.InnerBinary != "" { opts = append(opts, metamorphic.InnerBinary(r.InnerBinary)) diff --git a/internal/overlap/checker_test.go b/internal/overlap/checker_test.go index 08190dac366..13cd892d2c9 100644 --- a/internal/overlap/checker_test.go +++ b/internal/overlap/checker_test.go @@ -14,6 +14,7 @@ import ( "github.com/cockroachdb/crlib/crstrings" "github.com/cockroachdb/datadriven" + "github.com/cockroachdb/errors" "github.com/cockroachdb/pebble/internal/base" "github.com/cockroachdb/pebble/internal/keyspan" "github.com/cockroachdb/pebble/internal/manifest" @@ -232,7 +233,7 @@ func splitLinesInSections(input string) [][]string { for l := range crstrings.LinesSeq(input) { if l[0] == ' ' || l[0] == '\t' { if len(res) == 0 { - panic(fmt.Sprintf("invalid first line %q", l)) + panic(errors.AssertionFailedf("invalid first line %q", l)) } res[len(res)-1] = append(res[len(res)-1], strings.TrimSpace(l)) } else { diff --git a/internal/problemspans/set_test.go b/internal/problemspans/set_test.go index c2854921eb8..de349a020b5 100644 --- a/internal/problemspans/set_test.go +++ b/internal/problemspans/set_test.go @@ -15,6 +15,7 @@ import ( "github.com/cockroachdb/crlib/crstrings" "github.com/cockroachdb/crlib/crtime" "github.com/cockroachdb/datadriven" + "github.com/cockroachdb/errors" "github.com/cockroachdb/pebble/internal/base" ) @@ -90,12 +91,12 @@ func parseSetLine(line string, withTime bool) (base.UserKeyBounds, crtime.Mono) if withTime { n, err := fmt.Sscanf(line, "%s %s %ds", &span1, &span2, &timeVal) if err != nil || n != 3 { - panic(fmt.Sprintf("error parsing line %q: n=%d err=%v", line, n, err)) + panic(errors.AssertionFailedf("error parsing line %q: n=%d err=%v", line, n, err)) } } else { n, err := fmt.Sscanf(line, "%s %s", &span1, &span2) if err != nil || n != 2 { - panic(fmt.Sprintf("error parsing line %q: n=%d err=%v", line, n, err)) + panic(errors.AssertionFailedf("error parsing line %q: n=%d err=%v", line, n, err)) } } bounds := base.ParseUserKeyBounds(span1 + " " + span2) diff --git a/internal/rangekeystack/user_iterator_test.go b/internal/rangekeystack/user_iterator_test.go index c2a9577592b..779ca834ccf 100644 --- a/internal/rangekeystack/user_iterator_test.go +++ b/internal/rangekeystack/user_iterator_test.go @@ -16,6 +16,7 @@ import ( "github.com/cockroachdb/crlib/crstrings" "github.com/cockroachdb/datadriven" + "github.com/cockroachdb/errors" "github.com/cockroachdb/pebble/internal/base" "github.com/cockroachdb/pebble/internal/keyspan" "github.com/cockroachdb/pebble/internal/keyspan/keyspanimpl" @@ -325,7 +326,7 @@ func runIterOp(w io.Writer, it keyspan.FragmentIterator, op string) { case "prev": s, err = it.Prev() default: - panic(fmt.Sprintf("unrecognized iter op %q", fields[0])) + panic(errors.AssertionFailedf("unrecognized iter op %q", fields[0])) } fmt.Fprintf(w, "%-10s", op) switch { diff --git a/internal/testkeys/testkeys.go b/internal/testkeys/testkeys.go index 450935836f5..90f0717bfa0 100644 --- a/internal/testkeys/testkeys.go +++ b/internal/testkeys/testkeys.go @@ -152,15 +152,15 @@ func compareTimestamps(a, b []byte) int { return cmp.Compare(len(a), len(b)) } if a[0] != suffixDelim || b[0] != suffixDelim { - panic(fmt.Sprintf("invalid suffixes %q %q", a, b)) + panic(errors.AssertionFailedf("invalid suffixes %q %q", a, b)) } ai, err := parseUintBytes(a[1:], 10, 64) if err != nil { - panic(fmt.Sprintf("invalid test mvcc timestamp %q", a)) + panic(errors.AssertionFailedf("invalid test mvcc timestamp %q", a)) } bi, err := parseUintBytes(b[1:], 10, 64) if err != nil { - panic(fmt.Sprintf("invalid test mvcc timestamp %q", b)) + panic(errors.AssertionFailedf("invalid test mvcc timestamp %q", b)) } return cmp.Compare(bi, ai) } @@ -390,7 +390,7 @@ func generateAlphabetKey(buf, alphabet []byte, i, keyCount uint64) int { func computeAlphabetKeyIndex(key []byte, alphabet map[byte]uint64, n int) uint64 { i, ok := alphabet[key[0]] if !ok { - panic(fmt.Sprintf("unrecognized alphabet character %v", key[0])) + panic(errors.AssertionFailedf("unrecognized alphabet character %v", key[0])) } // How many keys exist that start with the preceding i characters? Each of // the i characters themselves are a key, plus the count of all the keys @@ -455,19 +455,19 @@ func RandomPrefixInRange(a, b []byte, rng *rand.Rand) []byte { func assertValidPrefix(p []byte) { if !prefixRE.Match(p) { - panic(fmt.Sprintf("invalid prefix %q", p)) + panic(errors.AssertionFailedf("invalid prefix %q", p)) } } func assertLess(a, b []byte) { if Comparer.Compare(a, b) >= 0 { - panic(fmt.Sprintf("invalid key ordering: %q >= %q", a, b)) + panic(errors.AssertionFailedf("invalid key ordering: %q >= %q", a, b)) } } func assertLE(a, b []byte) { if Comparer.Compare(a, b) > 0 { - panic(fmt.Sprintf("invalid key ordering: %q > %q", a, b)) + panic(errors.AssertionFailedf("invalid key ordering: %q > %q", a, b)) } } @@ -553,12 +553,12 @@ func ExtractKVMeta(value []byte) base.KVMeta { var res base.KVMeta v, e := strconv.ParseUint(m[1], 10, 64) if e != nil { - panic(fmt.Sprintf("invalid tiering span in KV metadata in %q", value)) + panic(errors.AssertionFailedf("invalid tiering span in KV metadata in %q", value)) } res.TieringSpanID = base.TieringSpanID(v) v, e = strconv.ParseUint(m[2], 10, 64) if e != nil { - panic(fmt.Sprintf("invalid tiering attr in KV metadata in %q", value)) + panic(errors.AssertionFailedf("invalid tiering attr in KV metadata in %q", value)) } res.TieringAttribute = base.TieringAttribute(v) return res diff --git a/internal/testutils/reflect.go b/internal/testutils/reflect.go index 8b962f32003..eaf066bfa31 100644 --- a/internal/testutils/reflect.go +++ b/internal/testutils/reflect.go @@ -5,8 +5,9 @@ package testutils import ( - "fmt" "reflect" + + "github.com/cockroachdb/errors" ) // AnyPointers returns true if the provided type contains any pointers. @@ -29,7 +30,7 @@ func AnyPointers(typ reflect.Type) bool { case reflect.Array: return AnyPointers(typ.Elem()) default: - panic(fmt.Sprintf("unexpected kind: %s", kind)) + panic(errors.AssertionFailedf("unexpected kind: %s", kind)) } } diff --git a/internal/treesteps/tree_steps_on.go b/internal/treesteps/tree_steps_on.go index ea9445125a4..2ce5f1a42b7 100644 --- a/internal/treesteps/tree_steps_on.go +++ b/internal/treesteps/tree_steps_on.go @@ -14,6 +14,8 @@ import ( "sync" "sync/atomic" "unicode" + + "github.com/cockroachdb/errors" ) const Enabled = true @@ -372,7 +374,7 @@ func nodeStateLocked(rec *Recording, n Node) *nodeState { ns = &nodeState{recording: rec, node: n} mu.nodeMap[n] = ns } else if rec != ns.recording { - panic(fmt.Sprintf("node %v part of multiple recordings", n)) + panic(errors.AssertionFailedf("node %v part of multiple recordings", errors.Safe(n))) } return ns } diff --git a/iterator_test.go b/iterator_test.go index 643deaaffe5..f6533e186f6 100644 --- a/iterator_test.go +++ b/iterator_test.go @@ -1319,7 +1319,7 @@ func TestIteratorBoundsLifetimes(t *testing.T) { case "both": opts.KeyTypes = IterKeyTypePointsAndRanges default: - panic(fmt.Sprintf("unrecognized key type %q", arg.Vals[0])) + t.Fatalf("unrecognized key type %q", arg.Vals[0]) } } } diff --git a/mem_table.go b/mem_table.go index 5e88cf4a29f..984cd4fdf5a 100644 --- a/mem_table.go +++ b/mem_table.go @@ -168,7 +168,7 @@ func (m *memTable) init(opts memTableOptions) { func (m *memTable) writerRef() { switch v := m.writerRefs.Add(1); { case v <= 1: - panic(fmt.Sprintf("pebble: inconsistent reference count: %d", v)) + panic(errors.AssertionFailedf("pebble: inconsistent reference count: %d", errors.Safe(v))) } } @@ -176,7 +176,7 @@ func (m *memTable) writerRef() { func (m *memTable) writerUnref() (wasLastRef bool) { switch v := m.writerRefs.Add(-1); { case v < 0: - panic(fmt.Sprintf("pebble: inconsistent reference count: %d", v)) + panic(errors.AssertionFailedf("pebble: inconsistent reference count: %d", errors.Safe(v))) case v == 0: return true default: diff --git a/metamorphic/cockroachkvs.go b/metamorphic/cockroachkvs.go index 3e4e89cd50c..522be42ed00 100644 --- a/metamorphic/cockroachkvs.go +++ b/metamorphic/cockroachkvs.go @@ -9,6 +9,7 @@ import ( "math/rand/v2" "slices" + "github.com/cockroachdb/errors" "github.com/cockroachdb/pebble" "github.com/cockroachdb/pebble/cockroachkvs" "github.com/cockroachdb/pebble/internal/testkeys" @@ -210,7 +211,7 @@ func (kg *cockroachKeyGenerator) randKey( knownKeys = kg.keyManager.knownKeys() } else { if cockroachkvs.Compare(bounds.Start, bounds.End) >= 0 { - panic(fmt.Sprintf("invalid bounds [%q, %q)", bounds.Start, bounds.End)) + panic(errors.AssertionFailedf("invalid bounds [%q, %q)", bounds.Start, bounds.End)) } knownKeys = kg.keyManager.knownKeysInRange(*bounds) } @@ -282,7 +283,7 @@ func (kg *cockroachKeyGenerator) randKey( prefix = startPrefix // Bounds have the same prefix, generate a suffix in-between. if startSuffixIdx <= endSuffixIdx { - panic(fmt.Sprintf("invalid bounds [%q, %q)", bounds.Start, bounds.End)) + panic(errors.AssertionFailedf("invalid bounds [%q, %q)", bounds.Start, bounds.End)) } suffixIdx = kg.skewedSuffixInt(0.01) for i := 0; !(startSuffixIdx >= suffixIdx && endSuffixIdx < suffixIdx); i++ { @@ -320,7 +321,7 @@ func (kg *cockroachKeyGenerator) randKey( key = append(key, kg.suffixSpace.ToMaterializedSuffix(suffixIdx)...) } if cockroachkvs.Compare(key, bounds.Start) < 0 || cockroachkvs.Compare(key, bounds.End) >= 0 { - panic(fmt.Sprintf("invalid randKey %q; bounds: [%q, %q) %v %v", + panic(errors.AssertionFailedf("invalid randKey %q; bounds: [%q, %q) %v %v", key, bounds.Start, bounds.End, cockroachkvs.Compare(key, bounds.Start), cockroachkvs.Compare(key, bounds.End))) diff --git a/metamorphic/generator.go b/metamorphic/generator.go index 0bbb10df6fa..88c28b3ec9d 100644 --- a/metamorphic/generator.go +++ b/metamorphic/generator.go @@ -12,6 +12,7 @@ import ( "os" "slices" + "github.com/cockroachdb/errors" "github.com/cockroachdb/pebble" "github.com/cockroachdb/pebble/internal/base" "github.com/cockroachdb/pebble/internal/randvar" @@ -504,7 +505,7 @@ func (g *generator) dbRestart() { g.add(&closeOp{objID: batchID}) } if len(g.liveReaders) != len(g.dbs) || len(g.liveWriters) != len(g.dbs) { - panic(fmt.Sprintf("unexpected counts: liveReaders %d, liveWriters: %d", + panic(errors.AssertionFailedf("unexpected counts: liveReaders %d, liveWriters: %d", len(g.liveReaders), len(g.liveWriters))) } g.add(&dbRestartOp{dbID: dbID}) @@ -634,7 +635,7 @@ func (g *generator) deriveDB(readerID objID) objID { func (g *generator) dbIDForObj(objID objID) objID { if g.objDB[objID] == 0 { - panic(fmt.Sprintf("object %s has no associated DB", objID)) + panic(errors.AssertionFailedf("object %s has no associated DB", objID)) } return g.objDB[objID] } @@ -1123,7 +1124,7 @@ func (g *generator) writerApply() { return } if len(g.liveWriters) < 2 { - panic(fmt.Sprintf("insufficient liveWriters (%d) to apply batch", len(g.liveWriters))) + panic(errors.AssertionFailedf("insufficient liveWriters (%d) to apply batch", len(g.liveWriters))) } batchID := g.liveBatches.rand(g.rng) diff --git a/metamorphic/history.go b/metamorphic/history.go index 52ca07865b1..e4e67123d21 100644 --- a/metamorphic/history.go +++ b/metamorphic/history.go @@ -57,7 +57,7 @@ func (h *history) Recordf(op int, format string, args ...interface{}) { } if strings.Contains(format, "\n") { // We could remove this restriction but suffixing every line with "#". - panic(fmt.Sprintf("format string must not contain \\n: %q", format)) + panic(errors.AssertionFailedf("format string must not contain \\n: %q", format)) } // We suffix every line with # in order to provide a marker to locate @@ -123,7 +123,7 @@ func (h *history) Fatalf(format string, args ...interface{}) { h.mu.Lock() defer h.mu.Unlock() if h.mu.closed { - panic(fmt.Sprintf(format, args...)) + panic(errors.AssertionFailedf(format, args...)) } _ = h.log.Output(2, h.format("FATAL", format, args...)) // Store the first fatal error message. @@ -221,7 +221,7 @@ func extractOp(line string) int { } v, err := strconv.Atoi(line[i+1 : i+1+j]) if err != nil { - panic(fmt.Sprintf("unable to parse line %q: %s", line, err)) + panic(errors.AssertionFailedf("unable to parse line %q: %s", line, err)) } return v } diff --git a/metamorphic/key_manager.go b/metamorphic/key_manager.go index 0cc754dda81..98e91e6fb27 100644 --- a/metamorphic/key_manager.go +++ b/metamorphic/key_manager.go @@ -74,9 +74,9 @@ type bounds struct { func (b bounds) checkValid(cmp base.Compare) { if c := cmp(b.smallest, b.largest); c > 0 { - panic(fmt.Sprintf("invalid bound [%q, %q]", b.smallest, b.largest)) + panic(errors.AssertionFailedf("invalid bound [%q, %q]", b.smallest, b.largest)) } else if c == 0 && b.largestExcl { - panic(fmt.Sprintf("invalid bound [%q, %q)", b.smallest, b.largest)) + panic(errors.AssertionFailedf("invalid bound [%q, %q)", b.smallest, b.largest)) } } @@ -277,7 +277,7 @@ func (k *keyManager) SortedKeysForObj(o objID) []keyMeta { slices.SortFunc(res, func(a, b keyMeta) int { cmp := k.kf.Comparer.Compare(a.key, b.key) if cmp == 0 { - panic(fmt.Sprintf("distinct keys %q and %q compared as equal", a.key, b.key)) + panic(errors.AssertionFailedf("distinct keys %q and %q compared as equal", a.key, b.key)) } return cmp }) @@ -332,7 +332,7 @@ func (k *keyManager) KeysForExternalIngest(obj externalObjWithBounds) []keyMeta // Check for duplicate resulting keys. for i := 1; i < len(res); i++ { if k.kf.Comparer.Compare(res[i].key, res[i-1].key) == 0 { - panic(fmt.Sprintf("duplicate external ingest key %q", res[i].key)) + panic(errors.AssertionFailedf("duplicate external ingest key %q", res[i].key)) } } return res @@ -398,7 +398,7 @@ func (k *keyManager) addNewKey(key []byte) bool { prefixLen := k.kf.Comparer.Split(key) if prefixLen == 0 { - panic(fmt.Sprintf("key %q has zero length prefix", key)) + panic(errors.AssertionFailedf("key %q has zero length prefix", key)) } if _, ok := k.globalKeyPrefixesMap[string(key[:prefixLen])]; !ok { insertSorted(k.kf.Comparer.Compare, &k.globalKeyPrefixes, key[:prefixLen]) diff --git a/metamorphic/ops.go b/metamorphic/ops.go index 108e2a5843a..b781b0ec003 100644 --- a/metamorphic/ops.go +++ b/metamorphic/ops.go @@ -1915,7 +1915,7 @@ func (o *newExternalObjOp) run(t *Test, h historyRecorder) { t.clearObj(o.batchID) if o.externalObjID.tag() != externalObjTag { - panic(fmt.Sprintf("invalid externalObjID %s", o.externalObjID)) + panic(errors.AssertionFailedf("invalid externalObjID %s", o.externalObjID)) } // We add a unique number to the object name to avoid collisions with existing // external objects (when using an initial starting state). diff --git a/metamorphic/options.go b/metamorphic/options.go index 8637520b05d..6add9691440 100644 --- a/metamorphic/options.go +++ b/metamorphic/options.go @@ -71,7 +71,7 @@ func parseOptions( OnUnknown: func(name, value string) { if strings.EqualFold(value, "false") { // TODO(radu): audit all settings and use ParseBool wherever necessary. - panic(fmt.Sprintf("%s: boolean options can only be set to true", name)) + panic(errors.AssertionFailedf("%s: boolean options can only be set to true", name)) } switch name { case "TestOptions": @@ -272,7 +272,7 @@ func optionsToString(opts *TestOptions) string { } if opts.Opts.AllocatorSizeClasses != nil { if fmt.Sprint(opts.Opts.AllocatorSizeClasses) != fmt.Sprint(pebble.JemallocSizeClasses) { - panic(fmt.Sprintf("unexpected AllocatorSizeClasses %v", opts.Opts.AllocatorSizeClasses)) + panic(errors.AssertionFailedf("unexpected AllocatorSizeClasses %v", opts.Opts.AllocatorSizeClasses)) } fmt.Fprint(&buf, " use_jemalloc_size_classes=true\n") } diff --git a/metamorphic/parser.go b/metamorphic/parser.go index f0147c2c833..fed59c4842e 100644 --- a/metamorphic/parser.go +++ b/metamorphic/parser.go @@ -133,7 +133,7 @@ func opArgs(op op) (receiverID *objID, targetID *objID, args []interface{}) { case *estimateDiskUsageOp: return &t.dbID, nil, []interface{}{&t.start, &t.end} } - panic(fmt.Sprintf("unsupported op type: %T", op)) + panic(errors.AssertionFailedf("unsupported op type: %T", op)) } // ignoreExtraArgs is used as a stand-in for a variable length argument for @@ -601,11 +601,11 @@ func (p *parser) parseExternalObjsWithBounds(list []listElem) []externalObjWithB } if len(syntheticPrefix) > 0 { if !bytes.HasPrefix(objs[i].bounds.Start, []byte(syntheticPrefix)) { - panic(fmt.Sprintf("invalid synthetic prefix %q [%x] %s [%x]", + panic(errors.AssertionFailedf("invalid synthetic prefix %q [%x] %s [%x]", objs[i].bounds.Start, []byte(objs[i].bounds.Start), syntheticPrefix, []byte(syntheticPrefix))) } if !bytes.HasPrefix(objs[i].bounds.End, []byte(syntheticPrefix)) { - panic(fmt.Sprintf("invalid synthetic prefix %q %s", objs[i].bounds.End, syntheticPrefix)) + panic(errors.AssertionFailedf("invalid synthetic prefix %q %s", objs[i].bounds.End, syntheticPrefix)) } objs[i].syntheticPrefix = blockiter.SyntheticPrefix(syntheticPrefix) } diff --git a/metamorphic/test.go b/metamorphic/test.go index 4a7b97f53b4..f9ef1949e47 100644 --- a/metamorphic/test.go +++ b/metamorphic/test.go @@ -276,7 +276,7 @@ func (t *Test) finalizeOptions() pebble.Options { // test's data dir and can be shared among multiple dbs. externalDir := o.FS.PathJoin(t.dir, "external") if err := o.FS.MkdirAll(externalDir, 0755); err != nil { - panic(fmt.Sprintf("failed to create directory %q: %s", externalDir, err)) + panic(errors.AssertionFailedf("failed to create directory %q: %s", externalDir, err)) } // Even if externalStorageEnabled is false, the test uses externalStorage to // emulate external ingestion. @@ -289,7 +289,7 @@ func (t *Test) finalizeOptions() pebble.Options { if t.testOpts.sharedStorageEnabled || t.testOpts.initialStatePath != "" { sharedDir := o.FS.PathJoin(t.dir, "shared") if err := o.FS.MkdirAll(sharedDir, 0755); err != nil { - panic(fmt.Sprintf("failed to create directory %q: %s", sharedDir, err)) + panic(errors.AssertionFailedf("failed to create directory %q: %s", sharedDir, err)) } m[""] = remote.NewLocalFS(sharedDir, o.FS) } @@ -441,7 +441,7 @@ func (t *Test) runOp(idx int, h historyRecorder) { opTimeout *= 8 } timer = time.AfterFunc(opTimeout, func() { - panic(fmt.Sprintf("operation took longer than %s: %s", + panic(errors.AssertionFailedf("operation took longer than %s: %s", opTimeout, op.formattedString(t.testOpts.KeyFormat))) }) } @@ -464,14 +464,14 @@ func (t *Test) runOp(idx int, h historyRecorder) { func (t *Test) setBatch(id objID, b *pebble.Batch) { if id.tag() != batchTag { - panic(fmt.Sprintf("invalid batch ID: %s", id)) + panic(errors.AssertionFailedf("invalid batch ID: %s", id)) } t.batches[id.slot()] = b } func (t *Test) setIter(id objID, i *pebble.Iterator) { if id.tag() != iterTag { - panic(fmt.Sprintf("invalid iter ID: %s", id)) + panic(errors.AssertionFailedf("invalid iter ID: %s", id)) } t.iters[id.slot()] = &retryableIter{ iter: i, @@ -487,21 +487,21 @@ type readerCloser interface { func (t *Test) setSnapshot(id objID, s readerCloser) { if id.tag() != snapTag { - panic(fmt.Sprintf("invalid snapshot ID: %s", id)) + panic(errors.AssertionFailedf("invalid snapshot ID: %s", id)) } t.snapshots[id.slot()] = s } func (t *Test) setExternalObj(id objID, meta externalObjMeta) { if id.tag() != externalObjTag { - panic(fmt.Sprintf("invalid external object ID: %s", id)) + panic(errors.AssertionFailedf("invalid external object ID: %s", id)) } t.externalObjs[id.slot()] = meta } func (t *Test) getExternalObj(id objID) externalObjMeta { if id.tag() != externalObjTag || t.externalObjs[id.slot()].sstMeta == nil { - panic(fmt.Sprintf("metamorphic test internal error: invalid external object ID: %s", id)) + panic(errors.AssertionFailedf("metamorphic test internal error: invalid external object ID: %s", id)) } return t.externalObjs[id.slot()] } @@ -517,13 +517,13 @@ func (t *Test) clearObj(id objID) { case snapTag: t.snapshots[id.slot()] = nil default: - panic(fmt.Sprintf("cannot clear ID: %s", id)) + panic(errors.AssertionFailedf("cannot clear ID: %s", id)) } } func (t *Test) getBatch(id objID) *pebble.Batch { if id.tag() != batchTag || t.batches[id.slot()] == nil { - panic(fmt.Sprintf("metamorphic test internal error: invalid batch ID: %s", id)) + panic(errors.AssertionFailedf("metamorphic test internal error: invalid batch ID: %s", id)) } return t.batches[id.slot()] } @@ -539,13 +539,13 @@ func (t *Test) getCloser(id objID) io.Closer { case snapTag: return t.snapshots[id.slot()] default: - panic(fmt.Sprintf("cannot close ID: %s", id)) + panic(errors.AssertionFailedf("cannot close ID: %s", id)) } } func (t *Test) getIter(id objID) *retryableIter { if id.tag() != iterTag { - panic(fmt.Sprintf("invalid iter ID: %s", id)) + panic(errors.AssertionFailedf("invalid iter ID: %s", id)) } return t.iters[id.slot()] } @@ -559,7 +559,7 @@ func (t *Test) getReader(id objID) pebble.Reader { case snapTag: return t.snapshots[id.slot()] default: - panic(fmt.Sprintf("invalid reader ID: %s", id)) + panic(errors.AssertionFailedf("invalid reader ID: %s", id)) } } @@ -570,7 +570,7 @@ func (t *Test) getWriter(id objID) pebble.Writer { case batchTag: return t.batches[id.slot()] default: - panic(fmt.Sprintf("invalid writer ID: %s", id)) + panic(errors.AssertionFailedf("invalid writer ID: %s", id)) } } @@ -579,7 +579,7 @@ func (t *Test) getDB(id objID) *pebble.DB { case dbTag: return t.dbs[id.slot()-1] default: - panic(fmt.Sprintf("invalid writer tag: %v", id.tag())) + panic(errors.AssertionFailedf("invalid writer tag: %v", id.tag())) } } @@ -608,7 +608,7 @@ func computeSynchronizationPoints(ops []op) (opsWaitOn [][]int, opsDone []chan s // have been referenced by some other operation before it's used as // a receiver. if i != 0 && receiver.tag() != dbTag { - panic(fmt.Sprintf("op %T on receiver %s; first reference of %s", + panic(errors.AssertionFailedf("op %T on receiver %s; first reference of %s", ops[i], receiver, receiver)) } // The initOp is a little special. We do want to store the objects it's @@ -634,7 +634,7 @@ func computeSynchronizationPoints(ops []op) (opsWaitOn [][]int, opsDone []chan s for _, syncObjID := range o.syncObjs() { if vi, vok := lastOpReference[syncObjID]; vok { if vi == i { - panic(fmt.Sprintf("%T has %s as syncObj multiple times", ops[i], syncObjID)) + panic(errors.AssertionFailedf("%T has %s as syncObj multiple times", ops[i], syncObjID)) } opsWaitOn[i] = append(opsWaitOn[i], vi) } diff --git a/metamorphic/testkeys.go b/metamorphic/testkeys.go index 68437666bea..bf368d26d46 100644 --- a/metamorphic/testkeys.go +++ b/metamorphic/testkeys.go @@ -6,7 +6,6 @@ package metamorphic import ( "cmp" - "fmt" "math/rand/v2" "slices" @@ -209,7 +208,7 @@ func (kg *testkeyKeyGenerator) randKey(newKeyProbability float64, bounds *pebble knownKeys = kg.keyManager.knownKeys() } else { if kg.cmp(bounds.Start, bounds.End) >= 0 { - panic(fmt.Sprintf("invalid bounds [%q, %q)", bounds.Start, bounds.End)) + panic(errors.AssertionFailedf("invalid bounds [%q, %q)", bounds.Start, bounds.End)) } knownKeys = kg.keyManager.knownKeysInRange(*bounds) } @@ -283,7 +282,7 @@ func (kg *testkeyKeyGenerator) randKey(newKeyProbability float64, bounds *pebble prefix = startPrefix // Bounds have the same prefix, generate a suffix in-between. if cmpSuffix(startSuffix, endSuffix) >= 0 { - panic(fmt.Sprintf("invalid bounds [%q, %q)", bounds.Start, bounds.End)) + panic(errors.AssertionFailedf("invalid bounds [%q, %q)", bounds.Start, bounds.End)) } suffix = kg.skewedSuffixInt(0.01) for i := 0; !(cmpSuffix(startSuffix, suffix) <= 0 && cmpSuffix(suffix, endSuffix) < 0); i++ { @@ -315,7 +314,7 @@ func (kg *testkeyKeyGenerator) randKey(newKeyProbability float64, bounds *pebble key = append(key, testkeys.Suffix(suffix)...) } if kg.cmp(key, bounds.Start) < 0 || kg.cmp(key, bounds.End) >= 0 { - panic(fmt.Sprintf("invalid randKey %q; bounds: [%q, %q) %v %v", + panic(errors.AssertionFailedf("invalid randKey %q; bounds: [%q, %q) %v %v", key, bounds.Start, bounds.End, kg.cmp(key, bounds.Start), kg.cmp(key, bounds.End))) } // We might (rarely) produce an existing key here, that's ok. @@ -373,7 +372,7 @@ func (kg *testkeyKeyGenerator) parseKey(k []byte) (prefix []byte, suffix int64) } suffix, err := testkeys.ParseSuffix(k[n:]) if err != nil { - panic(fmt.Sprintf("error parsing suffix for key %q", k)) + panic(errors.AssertionFailedf("error parsing suffix for key %q", k)) } return k[:n:n], suffix } diff --git a/metrics.go b/metrics.go index b3c820b5503..264767696cc 100644 --- a/metrics.go +++ b/metrics.go @@ -13,6 +13,7 @@ import ( "unsafe" "github.com/cockroachdb/crlib/crhumanize" + "github.com/cockroachdb/errors" "github.com/cockroachdb/pebble/internal/ascii" "github.com/cockroachdb/pebble/internal/ascii/table" "github.com/cockroachdb/pebble/internal/base" @@ -1262,10 +1263,10 @@ func (m *Metrics) StringForTests() string { if math.MaxInt == math.MaxInt64 { // Verify the 64-bit sizes, so they are kept updated. if sstableReaderSize64bit != unsafe.Sizeof(sstable.Reader{}) { - panic(fmt.Sprintf("sstableReaderSize64bit should be updated to %d", unsafe.Sizeof(sstable.Reader{}))) + panic(errors.AssertionFailedf("sstableReaderSize64bit should be updated to %d", errors.Safe(unsafe.Sizeof(sstable.Reader{})))) } if blobFileReaderSize64bit != unsafe.Sizeof(blob.FileReader{}) { - panic(fmt.Sprintf("blobFileReaderSize64bit should be updated to %d", unsafe.Sizeof(blob.FileReader{}))) + panic(errors.AssertionFailedf("blobFileReaderSize64bit should be updated to %d", errors.Safe(unsafe.Sizeof(blob.FileReader{})))) } } // Don't show cgo memory statistics as they can vary based on architecture, diff --git a/metrics_test.go b/metrics_test.go index 559900f9a7f..6f2cadd9424 100644 --- a/metrics_test.go +++ b/metrics_test.go @@ -514,7 +514,7 @@ func TestMetrics(t *testing.T) { panic(err) } if l >= numLevels { - panic(fmt.Sprintf("invalid level %d", l)) + t.Fatalf("invalid level %d", l) } buf.WriteString(fmt.Sprintf("%d\n", m.Levels[l].VirtualTables.Count)) } else if line == "remote-count" { @@ -524,7 +524,7 @@ func TestMetrics(t *testing.T) { cs := m.RemoteTablesTotal() buf.WriteString(fmt.Sprintf("%s\n", humanize.Bytes.Uint64(cs.Bytes))) } else { - panic(fmt.Sprintf("invalid field: %s", line)) + t.Fatalf("invalid field: %s", line) } } return buf.String() diff --git a/objstorage/objstorage.go b/objstorage/objstorage.go index c68d3127d5e..6f2ee79037c 100644 --- a/objstorage/objstorage.go +++ b/objstorage/objstorage.go @@ -202,7 +202,7 @@ func (meta *ObjectMetadata) AssertValid() { if !meta.IsRemote() { // Verify all Remote fields are empty. if meta.Remote != (ObjectMetadata{}).Remote { - panic(errors.AssertionFailedf("meta.Remote not empty: %#v", meta.Remote)) + panic(errors.AssertionFailedf("meta.Remote not empty: %#v", errors.Safe(meta.Remote))) } } else { if meta.Remote.CustomObjectName == "" { @@ -214,7 +214,7 @@ func (meta *ObjectMetadata) AssertValid() { } } if meta.Remote.CleanupMethod != SharedNoCleanup && meta.Remote.CleanupMethod != SharedRefTracking { - panic(errors.AssertionFailedf("invalid CleanupMethod %d", meta.Remote.CleanupMethod)) + panic(errors.AssertionFailedf("invalid CleanupMethod %d", errors.Safe(meta.Remote.CleanupMethod))) } if meta.Remote.Storage == nil { panic(errors.AssertionFailedf("Storage not set")) diff --git a/objstorage/objstorageprovider/sharedcache/shared_cache.go b/objstorage/objstorageprovider/sharedcache/shared_cache.go index a100cbe1509..2d1af95db2b 100644 --- a/objstorage/objstorageprovider/sharedcache/shared_cache.go +++ b/objstorage/objstorageprovider/sharedcache/shared_cache.go @@ -220,7 +220,7 @@ func (c *Cache) ReadAt( c.metrics.totalReads.Add(1) if ofs >= objSize { if invariants.Enabled { - panic(fmt.Sprintf("invalid ReadAt offset %v %v", ofs, objSize)) + panic(errors.AssertionFailedf("invalid ReadAt offset %v %v", errors.Safe(ofs), errors.Safe(objSize))) } return io.EOF } @@ -251,7 +251,7 @@ func (c *Cache) ReadAt( if invariants.Enabled { if n != 0 && c.bm.Remainder(ofs) != 0 { - panic(fmt.Sprintf("after non-zero read from cache, ofs is not block-aligned: %v %v", ofs, n)) + panic(errors.AssertionFailedf("after non-zero read from cache, ofs is not block-aligned: %v %v", errors.Safe(ofs), errors.Safe(n))) } } } @@ -335,7 +335,7 @@ func (c *Cache) get(fileNum base.DiskFileNum, p []byte, ofs int64) (n int, _ err func (c *Cache) set(fileNum base.DiskFileNum, p []byte, ofs int64) error { if invariants.Enabled { if c.bm.Remainder(ofs) != 0 || c.bm.Remainder(int64(len(p))) != 0 { - panic(fmt.Sprintf("set with ofs & len not multiples of block size: %v %v", ofs, len(p))) + panic(errors.AssertionFailedf("set with ofs & len not multiples of block size: %v %v", errors.Safe(ofs), errors.Safe(len(p)))) } } @@ -548,7 +548,7 @@ func (s *shard) lruUnlink(index cacheBlockIndex) { func (s *shard) get(fileNum base.DiskFileNum, p []byte, ofs int64) (n int, _ error) { if invariants.Enabled { if ofs/s.shardingBlockSize != (ofs+int64(len(p))-1)/s.shardingBlockSize { - panic(fmt.Sprintf("get crosses shard boundary: %v %v", ofs, len(p))) + panic(errors.AssertionFailedf("get crosses shard boundary: %v %v", errors.Safe(ofs), errors.Safe(len(p)))) } s.assertShardStateIsConsistent() } @@ -627,10 +627,10 @@ func (s *shard) get(fileNum base.DiskFileNum, p []byte, ofs int64) (n int, _ err func (s *shard) set(fileNum base.DiskFileNum, p []byte, ofs int64) error { if invariants.Enabled { if ofs/s.shardingBlockSize != (ofs+int64(len(p))-1)/s.shardingBlockSize { - panic(fmt.Sprintf("set crosses shard boundary: %v %v", ofs, len(p))) + panic(errors.AssertionFailedf("set crosses shard boundary: %v %v", errors.Safe(ofs), errors.Safe(len(p)))) } if s.bm.Remainder(ofs) != 0 || s.bm.Remainder(int64(len(p))) != 0 { - panic(fmt.Sprintf("set with ofs & len not multiples of block size: %v %v", ofs, len(p))) + panic(errors.AssertionFailedf("set with ofs & len not multiples of block size: %v %v", errors.Safe(ofs), errors.Safe(len(p)))) } s.assertShardStateIsConsistent() } @@ -645,7 +645,7 @@ func (s *shard) set(fileNum base.DiskFileNum, p []byte, ofs int64) error { } if invariants.Enabled { if n > len(p) { - panic(fmt.Sprintf("set with n greater than len(p): %v %v", n, len(p))) + panic(errors.AssertionFailedf("set with n greater than len(p): %v %v", errors.Safe(n), errors.Safe(len(p)))) } } @@ -726,7 +726,7 @@ func (s *shard) dropReadLock(cacheBlockInd cacheBlockIndex) { s.mu.Lock() s.mu.blocks[cacheBlockInd].lock -= readLockTakenInc if invariants.Enabled && s.mu.blocks[cacheBlockInd].lock < 0 { - panic(fmt.Sprintf("unexpected lock state %v in dropReadLock", s.mu.blocks[cacheBlockInd].lock)) + panic(errors.AssertionFailedf("unexpected lock state %v in dropReadLock", errors.Safe(s.mu.blocks[cacheBlockInd].lock))) } s.mu.Unlock() } @@ -735,7 +735,7 @@ func (s *shard) dropReadLock(cacheBlockInd cacheBlockIndex) { func (s *shard) dropWriteLock(cacheBlockInd cacheBlockIndex) { s.mu.Lock() if invariants.Enabled && s.mu.blocks[cacheBlockInd].lock != writeLockTaken { - panic(fmt.Sprintf("unexpected lock state %v in dropWriteLock", s.mu.blocks[cacheBlockInd].lock)) + panic(errors.AssertionFailedf("unexpected lock state %v in dropWriteLock", errors.Safe(s.mu.blocks[cacheBlockInd].lock))) } s.mu.blocks[cacheBlockInd].lock = unlocked s.mu.Unlock() @@ -759,7 +759,7 @@ func (s *shard) assertShardStateIsConsistent() { } } if lruLen != len(s.mu.where) { - panic(fmt.Sprintf("lru list len is %d but where map has %d entries", lruLen, len(s.mu.where))) + panic(errors.AssertionFailedf("lru list len is %d but where map has %d entries", errors.Safe(lruLen), errors.Safe(len(s.mu.where)))) } freeLen := 0 for n := s.mu.freeHead; n != invalidBlockIndex; n = s.mu.blocks[n].next { @@ -767,11 +767,11 @@ func (s *shard) assertShardStateIsConsistent() { } if lruLen+freeLen != int(s.sizeInBlocks) { - panic(fmt.Sprintf("%d lru blocks and %d free blocks don't add up to %d", lruLen, freeLen, s.sizeInBlocks)) + panic(errors.AssertionFailedf("%d lru blocks and %d free blocks don't add up to %d", errors.Safe(lruLen), errors.Safe(freeLen), errors.Safe(s.sizeInBlocks))) } for i := range s.mu.blocks { if state := s.mu.blocks[i].lock; state < writeLockTaken { - panic(fmt.Sprintf("lock state %v is not allowed", state)) + panic(errors.AssertionFailedf("lock state %v is not allowed", errors.Safe(state))) } } } @@ -793,7 +793,7 @@ func makeBlockMath(blockSize int) blockMath { blockSizeBits: int8(bits.Len64(uint64(blockSize)) - 1), } if blockSize != (1 << bm.blockSizeBits) { - panic(fmt.Sprintf("blockSize %d is not a power of 2", blockSize)) + panic(errors.AssertionFailedf("blockSize %d is not a power of 2", errors.Safe(blockSize))) } return bm } diff --git a/options.go b/options.go index 910cb66d0d8..3d6c425ecd7 100644 --- a/options.go +++ b/options.go @@ -121,7 +121,7 @@ func (t IterKeyType) String() string { case IterKeyTypePointsAndRanges: return "points-and-ranges" default: - panic(fmt.Sprintf("unknown key type %d", t)) + panic(errors.AssertionFailedf("unknown key type %d", errors.Safe(t))) } } @@ -1769,7 +1769,7 @@ func (o *Options) TargetFileSize(outputLevel int, baseLevel int) int64 { return o.TargetFileSizes[0] } if baseLevel > outputLevel { - panic(fmt.Sprintf("invalid base level %d (output level %d)", baseLevel, outputLevel)) + panic(errors.AssertionFailedf("invalid base level %d (output level %d)", errors.Safe(baseLevel), errors.Safe(outputLevel))) } return o.TargetFileSizes[outputLevel-baseLevel+1] } @@ -2757,7 +2757,7 @@ func (o *Options) MakeWriterOptions(level int, format sstable.TableFormat) sstab var ok bool writerOpts.KeySchema, ok = o.KeySchemas[o.KeySchema] if !ok { - panic(fmt.Sprintf("invalid schema %q", redact.Safe(o.KeySchema))) + panic(errors.AssertionFailedf("invalid schema %q", redact.Safe(o.KeySchema))) } } if format >= sstable.TableFormatPebblev3 { diff --git a/replay/replay.go b/replay/replay.go index 5069afa59cd..41c911472f3 100644 --- a/replay/replay.go +++ b/replay/replay.go @@ -648,7 +648,7 @@ func (r *Runner) eventListener() pebble.EventListener { case "L0", "memtable": r.writeStallMetrics.countByReason[writeStallReason]++ default: - panic(fmt.Sprintf("unrecognized write stall reason %q", info.Reason)) + panic(errors.AssertionFailedf("unrecognized write stall reason %q", errors.Safe(info.Reason))) } writeStallBegin = time.Now() }, diff --git a/scan_internal.go b/scan_internal.go index 42e961cb994..f2df1134229 100644 --- a/scan_internal.go +++ b/scan_internal.go @@ -6,7 +6,6 @@ package pebble import ( "context" - "fmt" "slices" "sync" @@ -372,7 +371,7 @@ func (p *pointCollapsingIterator) verifySeqNum(kv *base.InternalKV) *base.Intern return kv } if kv.SeqNum() != p.fixedSeqNum { - panic(fmt.Sprintf("expected foreign point key to have seqnum %d, got %d", p.fixedSeqNum, kv.SeqNum())) + panic(errors.AssertionFailedf("expected foreign point key to have seqnum %d, got %d", p.fixedSeqNum, kv.SeqNum())) } return kv } @@ -438,7 +437,7 @@ func (p *pointCollapsingIterator) findNextEntry() *base.InternalKV { p.pos = pcIterPosCur return p.verifySeqNum(p.iterKV) default: - panic(fmt.Sprintf("unexpected kind: %d", p.iterKV.Kind())) + panic(errors.AssertionFailedf("unexpected kind: %d", p.iterKV.Kind())) } } p.resetKey() diff --git a/sstable/blob/blob.go b/sstable/blob/blob.go index 1ef5cd2de5f..4b4fd751797 100644 --- a/sstable/blob/blob.go +++ b/sstable/blob/blob.go @@ -318,7 +318,7 @@ func (w *FileWriter) Close() (FileWriterStats, error) { if w.stats.BlockCount != uint32(w.indexEncoder.countBlocks) { panic(errors.AssertionFailedf("block count mismatch: %d vs %d", - w.stats.BlockCount, w.indexEncoder.countBlocks)) + errors.Safe(w.stats.BlockCount), errors.Safe(w.indexEncoder.countBlocks))) } if w.stats.BlockCount == 0 { panic(errors.AssertionFailedf("no blocks written")) @@ -641,7 +641,7 @@ func (p *FileProperties) set(key []byte, value []byte) { var err error p.CompressionStats, err = block.ParseCompressionStats(string(value)) if invariants.Enabled && err != nil { - panic(errors.AssertionFailedf("pebble: error parsing blob file compression stats %q", string(value))) + panic(errors.AssertionFailedf("pebble: error parsing blob file compression stats %q", errors.Safe(string(value)))) } default: diff --git a/sstable/blob/blob_test.go b/sstable/blob/blob_test.go index 7c2da9a30c0..3e79eb6b4ea 100644 --- a/sstable/blob/blob_test.go +++ b/sstable/blob/blob_test.go @@ -81,7 +81,8 @@ func TestBlobWriter(t *testing.T) { } return buf.String() default: - panic(fmt.Sprintf("unknown command: %s", td.Cmd)) + t.Fatalf("unknown command: %s", td.Cmd) + return "" } }) } diff --git a/sstable/blob/blocks.go b/sstable/blob/blocks.go index 6fd3baca11f..652820c5d1f 100644 --- a/sstable/blob/blocks.go +++ b/sstable/blob/blocks.go @@ -81,7 +81,7 @@ func (e *indexBlockEncoder) AddBlockHandle(h block.Handle) { if e.countBlocks == 0 { e.offsets.Set(0, h.Offset) } else if expected := e.offsets.Get(e.countBlocks); expected != h.Offset { - panic(errors.AssertionFailedf("block handle %s doesn't have expected offset of %d", h, expected)) + panic(errors.AssertionFailedf("block handle %s doesn't have expected offset of %d", errors.Safe(h), errors.Safe(expected))) } // Increment the number blocks, and set the endOffset. @@ -97,7 +97,7 @@ func (e *indexBlockEncoder) AddVirtualBlockMapping( ) { // Require that virtual blocks are added in order. if virtualBlockID < BlockID(e.countVirtualBlocks) { - panic(errors.AssertionFailedf("virtual block ID %d is out of order; expected %d", virtualBlockID, e.countVirtualBlocks)) + panic(errors.AssertionFailedf("virtual block ID %d is out of order; expected %d", errors.Safe(virtualBlockID), errors.Safe(e.countVirtualBlocks))) } // If there's a gap within the virtual block IDs, we fill in the gap with // entries that clarify these blocks are empty. diff --git a/sstable/blob/blocks_test.go b/sstable/blob/blocks_test.go index d6f0f8bc58a..fef2a106293 100644 --- a/sstable/blob/blocks_test.go +++ b/sstable/blob/blocks_test.go @@ -82,7 +82,8 @@ func TestIndexBlockEncoding(t *testing.T) { return buf.String() default: - panic(fmt.Sprintf("unknown command: %s", d.Cmd)) + t.Fatalf("unknown command: %s", d.Cmd) + return "" } }) } diff --git a/sstable/blob/fetcher.go b/sstable/blob/fetcher.go index 3ac4f857ecc..cb7d3362883 100644 --- a/sstable/blob/fetcher.go +++ b/sstable/blob/fetcher.go @@ -126,7 +126,7 @@ func (r *ValueFetcher) FetchHandle( v, err := r.retrieve(ctx, vh) if err == nil && len(v) != int(vh.ValueLen) { return nil, false, - errors.AssertionFailedf("value length mismatch: %d != %d", len(v), vh.ValueLen) + errors.AssertionFailedf("value length mismatch: %d != %d", errors.Safe(len(v)), errors.Safe(vh.ValueLen)) } if invariants.Enabled { v = r.bufMangler.MaybeMangleLater(v) @@ -309,7 +309,7 @@ func (cr *cachedReader) GetUnsafeValue( physicalBlockIndex, valueIDOffset = cr.indexBlock.dec.RemapVirtualBlockID(vh.BlockID) if valueIDOffset == virtualBlockIndexMask { return nil, errors.AssertionFailedf("blob file indicates virtual block ID %d in %s should be unreferenced", - vh.BlockID, vh.BlobFileID) + errors.Safe(vh.BlockID), vh.BlobFileID) } } invariants.CheckBounds(physicalBlockIndex, cr.indexBlock.dec.BlockCount()) diff --git a/sstable/blob/fetcher_test.go b/sstable/blob/fetcher_test.go index 7767a0eead9..94bebcca9f6 100644 --- a/sstable/blob/fetcher_test.go +++ b/sstable/blob/fetcher_test.go @@ -159,7 +159,8 @@ func TestValueFetcher(t *testing.T) { fmt.Fprintf(&buf, "%s\n", val) return buf.String() default: - panic(fmt.Sprintf("unknown command: %s", td.Cmd)) + t.Fatalf("unknown command: %s", td.Cmd) + return "" } }) } diff --git a/sstable/block/block.go b/sstable/block/block.go index 4664e360681..902adf23760 100644 --- a/sstable/block/block.go +++ b/sstable/block/block.go @@ -222,7 +222,7 @@ func CastMetadataZero[T any](md *Metadata) *T { var z T if invariants.Enabled { if uintptr(unsafe.Pointer(md))%unsafe.Alignof(z) != 0 { - panic(errors.AssertionFailedf("incorrect alignment for %T (%p)", z, unsafe.Pointer(md))) + panic(errors.AssertionFailedf("incorrect alignment for %T (%p)", errors.Safe(z), errors.Safe(unsafe.Pointer(md)))) } } clear((*md)[:unsafe.Sizeof(z)]) @@ -236,7 +236,7 @@ func CastMetadata[T any](md *Metadata) *T { var z T if invariants.Enabled { if uintptr(unsafe.Pointer(md))%unsafe.Alignof(z) != 0 { - panic(fmt.Sprintf("incorrect alignment for %T (%p)", z, unsafe.Pointer(md))) + panic(errors.AssertionFailedf("incorrect alignment for %T (%p)", errors.Safe(z), errors.Safe(unsafe.Pointer(md)))) } } return (*T)(unsafe.Pointer(md)) diff --git a/sstable/block/compression.go b/sstable/block/compression.go index 8dc37f04779..0a94fa2c3c1 100644 --- a/sstable/block/compression.go +++ b/sstable/block/compression.go @@ -161,7 +161,7 @@ var compressionProfileMap = make(map[string]*CompressionProfile) func registerCompressionProfile(p CompressionProfile) *CompressionProfile { key := strings.ToLower(p.Name) if _, ok := compressionProfileMap[key]; ok { - panic(errors.AssertionFailedf("duplicate compression profile: %s", p.Name)) + panic(errors.AssertionFailedf("duplicate compression profile: %s", errors.Safe(p.Name))) } compressionProfileMap[key] = &p return &p diff --git a/sstable/block_property.go b/sstable/block_property.go index 433923e48b3..a64f5c1fa4e 100644 --- a/sstable/block_property.go +++ b/sstable/block_property.go @@ -6,11 +6,11 @@ package sstable import ( "encoding/binary" - "fmt" "math" "sync" "unsafe" + "github.com/cockroachdb/errors" "github.com/cockroachdb/pebble/internal/base" "github.com/cockroachdb/pebble/internal/invariants" ) @@ -529,7 +529,7 @@ func (id shortID) IsValid() bool { func (id shortID) ToByte() byte { if invariants.Enabled && !id.IsValid() { - panic(fmt.Sprintf("inavlid id %d", id)) + panic(errors.AssertionFailedf("inavlid id %d", errors.Safe(id))) } return byte(id) } diff --git a/sstable/block_property_test_utils.go b/sstable/block_property_test_utils.go index 8b9a2087389..ced4cfe2ec3 100644 --- a/sstable/block_property_test_utils.go +++ b/sstable/block_property_test_utils.go @@ -5,10 +5,10 @@ package sstable import ( - "fmt" "math" "strconv" + "github.com/cockroachdb/errors" "github.com/cockroachdb/pebble/internal/testkeys" ) @@ -54,7 +54,7 @@ func (sr testKeysBlockIntervalSyntheticReplacer) ApplySuffixReplacement( // The testKeysSuffixIntervalMapper below maps keys with no suffix to // [0, MaxUint64); ignore that. if interval.Upper != math.MaxUint64 && uint64(decoded) < interval.Upper { - panic(fmt.Sprintf("the synthetic suffix %d is less than the property upper bound %d", decoded, interval.Upper)) + panic(errors.AssertionFailedf("the synthetic suffix %d is less than the property upper bound %d", decoded, interval.Upper)) } return BlockInterval{uint64(decoded), uint64(decoded) + 1}, nil } diff --git a/sstable/blockiter/transforms.go b/sstable/blockiter/transforms.go index d5307b703c1..62df229f0d3 100644 --- a/sstable/blockiter/transforms.go +++ b/sstable/blockiter/transforms.go @@ -6,9 +6,9 @@ package blockiter import ( "bytes" - "fmt" "unsafe" + "github.com/cockroachdb/errors" "github.com/cockroachdb/pebble/internal/base" ) @@ -157,7 +157,7 @@ func (sp SyntheticPrefix) Apply(key []byte) []byte { func (sp SyntheticPrefix) Invert(key []byte) []byte { res, ok := bytes.CutPrefix(key, sp) if !ok { - panic(fmt.Sprintf("unexpected prefix: %s", key)) + panic(errors.AssertionFailedf("unexpected prefix: %s", key)) } return res } diff --git a/sstable/colblk/bitmap.go b/sstable/colblk/bitmap.go index f27f7cb40e2..e8b067df634 100644 --- a/sstable/colblk/bitmap.go +++ b/sstable/colblk/bitmap.go @@ -50,7 +50,7 @@ func DecodeBitmap(b []byte, off uint32, bitCount int) (bitmap Bitmap, endOffset sz := bitmapRequiredSize(bitCount) if len(b) < int(off)+sz { panic(errors.AssertionFailedf("bitmap of %d bits requires at least %d bytes; provided with %d-byte slice", - bitCount, bitmapRequiredSize(bitCount), len(b[off:]))) + errors.Safe(bitCount), errors.Safe(bitmapRequiredSize(bitCount)), errors.Safe(len(b[off:])))) } return Bitmap{ data: makeUnsafeUint64Decoder(b[off:], sz>>align64Shift), @@ -435,7 +435,7 @@ func bitmapToBinFormatter(f *binfmt.Formatter, tp treeprinter.Node, rows int) { return } if encoding != defaultBitmapEncoding { - panic(fmt.Sprintf("unknown bitmap encoding %d", encoding)) + panic(errors.AssertionFailedf("unknown bitmap encoding %d", errors.Safe(encoding))) } f.HexBytesln(1, "default bitmap encoding") if aligned := align(f.RelativeOffset(), 8); aligned-f.RelativeOffset() != 0 { diff --git a/sstable/colblk/bitmap_test.go b/sstable/colblk/bitmap_test.go index e4846468658..a894f547d98 100644 --- a/sstable/colblk/bitmap_test.go +++ b/sstable/colblk/bitmap_test.go @@ -95,7 +95,7 @@ func TestBitmapFixed(t *testing.T) { } default: - panic(fmt.Sprintf("unknown command: %s", td.Cmd)) + t.Fatalf("unknown command: %s", td.Cmd) } return buf.String() }) diff --git a/sstable/colblk/block.go b/sstable/colblk/block.go index 57817e9b3c2..a30c0969a00 100644 --- a/sstable/colblk/block.go +++ b/sstable/colblk/block.go @@ -253,7 +253,7 @@ func (e *BlockEncoder) Finish() []byte { e.buf[e.pageOffset] = 0x00 // Padding byte e.pageOffset++ if e.pageOffset != uint32(len(e.buf)) { - panic(errors.AssertionFailedf("expected pageOffset=%d to equal size=%d", e.pageOffset, len(e.buf))) + panic(errors.AssertionFailedf("expected pageOffset=%d to equal size=%d", errors.Safe(e.pageOffset), errors.Safe(len(e.buf)))) } return e.buf } @@ -288,14 +288,14 @@ func DecodeColumn[V any]( d *BlockDecoder, col int, rows int, dataType DataType, decodeFunc DecodeFunc[V], ) V { if uint16(col) >= d.header.Columns { - panic(errors.AssertionFailedf("column %d is out of range [0, %d)", col, d.header.Columns)) + panic(errors.AssertionFailedf("column %d is out of range [0, %d)", errors.Safe(col), errors.Safe(d.header.Columns))) } if dt := d.dataType(col); dt != dataType { - panic(errors.AssertionFailedf("column %d is type %s; not %s", col, dt, dataType)) + panic(errors.AssertionFailedf("column %d is type %s; not %s", errors.Safe(col), dt, dataType)) } v, endOffset := decodeFunc(d.data, d.pageStart(col), rows) if nextColumnOff := d.pageStart(col + 1); endOffset != nextColumnOff { - panic(errors.AssertionFailedf("column %d decoded to offset %d; expected %d", col, endOffset, nextColumnOff)) + panic(errors.AssertionFailedf("column %d decoded to offset %d; expected %d", errors.Safe(col), errors.Safe(endOffset), errors.Safe(nextColumnOff))) } return v } @@ -334,7 +334,7 @@ func (d *BlockDecoder) Rows() int { // is encoded within the block header. func (d *BlockDecoder) DataType(col int) DataType { if uint16(col) >= d.header.Columns { - panic(errors.AssertionFailedf("column %d is out of range [0, %d)", col, d.header.Columns)) + panic(errors.AssertionFailedf("column %d is out of range [0, %d)", errors.Safe(col), errors.Safe(d.header.Columns))) } return d.dataType(col) } @@ -432,10 +432,10 @@ func (d *BlockDecoder) formatColumn( // between the column's pageOffset and the next column's pageOffset. switch v := endOff - f.Offset(); cmp.Compare[int](v, 0) { case +1: - panic(fmt.Sprintf("expected f.Offset() = %d, but found %d; did column %s format too few bytes?", endOff, f.Offset(), dataType)) + panic(errors.AssertionFailedf("expected f.Offset() = %d, but found %d; did column %s format too few bytes?", errors.Safe(endOff), errors.Safe(f.Offset()), errors.Safe(dataType))) case 0: case -1: - panic(fmt.Sprintf("expected f.Offset() = %d, but found %d; did column %s format too many bytes?", endOff, f.Offset(), dataType)) + panic(errors.AssertionFailedf("expected f.Offset() = %d, but found %d; did column %s format too many bytes?", errors.Safe(endOff), errors.Safe(f.Offset()), errors.Safe(dataType))) } } diff --git a/sstable/colblk/block_test.go b/sstable/colblk/block_test.go index 41f6f09914c..a6440ad7fb8 100644 --- a/sstable/colblk/block_test.go +++ b/sstable/colblk/block_test.go @@ -77,7 +77,7 @@ var interestingIntRanges = []intRange{ func TestBlockWriter(t *testing.T) { panicIfErr := func(dataType DataType, stringValue string, err error) { if err != nil { - panic(fmt.Sprintf("unable to decode %q as value for data type %s: %s", stringValue, dataType, err)) + t.Fatalf("unable to decode %q as value for data type %s: %s", stringValue, dataType, err) } } var buf bytes.Buffer @@ -110,7 +110,7 @@ func TestBlockWriter(t *testing.T) { case DataTypePrefixBytes: panic("unimplemented") default: - panic(fmt.Sprintf("unsupported data type: %s", v)) + t.Fatalf("unsupported data type: %s", v) } colWriters[i].Reset() } @@ -149,7 +149,7 @@ func TestBlockWriter(t *testing.T) { case DataTypePrefixBytes: panic("unimplemented") default: - panic(fmt.Sprintf("unsupported data type: %s", dataType)) + t.Fatalf("unsupported data type: %s", dataType) } } return "" diff --git a/sstable/colblk/data_block.go b/sstable/colblk/data_block.go index 8375ab7d268..8a9455ccc0d 100644 --- a/sstable/colblk/data_block.go +++ b/sstable/colblk/data_block.go @@ -109,11 +109,11 @@ func AssertKeyCompare(comparer *base.Comparer, a, b []byte, kcmp KeyComparison) recomputed.CommonPrefixLen = int32(crbytes.CommonPrefix(a[:recomputed.PrefixLen], b[:bi])) recomputed.UserKeyComparison = int32(comparer.Compare(a, b)) if recomputed.PrefixEqual() != bytes.Equal(a[:recomputed.PrefixLen], b[:bi]) { - panic(errors.AssertionFailedf("PrefixEqual()=%t doesn't hold: %q, %q", kcmp.PrefixEqual(), a, b)) + panic(errors.AssertionFailedf("PrefixEqual()=%t doesn't hold: %q, %q", errors.Safe(kcmp.PrefixEqual()), a, b)) } if recomputed != kcmp { panic(errors.AssertionFailedf("KeyComparison of (%q, %q) = %s, ComparePrev gave %s", - a, b, recomputed, kcmp)) + a, b, errors.Safe(recomputed), errors.Safe(kcmp))) } } @@ -285,7 +285,7 @@ func (w *defaultKeyWriter) ComparePrev(key []byte) KeyComparison { } if v := w.comparer.Compare(key, lp); v != int(cmpv.UserKeyComparison) { panic(errors.AssertionFailedf("user key comparison mismatch: Compare(%q, %q) = %d ≠ %d", - key, lp, v, cmpv.UserKeyComparison)) + key, lp, errors.Safe(v), cmpv.UserKeyComparison)) } } return cmpv @@ -341,7 +341,7 @@ func (w *defaultKeyWriter) Finish(col, rows int, offset uint32, buf []byte) (nex case defaultKeySchemaColumnSuffix: return w.suffixes.Finish(0, rows, offset, buf) default: - panic(fmt.Sprintf("unknown default key column: %d", col)) + panic(errors.AssertionFailedf("unknown default key column: %d", errors.Safe(col))) } } @@ -807,7 +807,7 @@ func (w *DataBlockEncoder) MaterializeLastUserKey(appendTo []byte) []byte { // copy it if they require it to outlive a Reset of the writer. func (w *DataBlockEncoder) Finish(rows, size int) (finished []byte, lastKey base.InternalKey) { if invariants.Enabled && rows != w.rows && rows != w.rows-1 { - panic(errors.AssertionFailedf("data block has %d rows; asked to finish %d", w.rows, rows)) + panic(errors.AssertionFailedf("data block has %d rows; asked to finish %d", errors.Safe(w.rows), errors.Safe(rows))) } cols := len(w.Schema.ColumnTypes) + w.numFormatColumns() @@ -1163,7 +1163,7 @@ func (v *DataBlockValidator) Validate( ucmp := comparer.Compare(k.UserKey, prevKey.UserKey) if ucmp < 0 || (ucmp == 0 && k.Trailer >= prevKey.Trailer) { return errors.AssertionFailedf("key %s (row %d) and key %s (row %d) are out of order", - prevKey, i-1, k, i) + prevKey, errors.Safe(i-1), k, errors.Safe(i)) } // Ensure the obsolete bit is set if the key is definitively obsolete. // Not all sources of obsolescence are evident with only a data block @@ -1171,7 +1171,7 @@ func (v *DataBlockValidator) Validate( // a key to be obsolete). if ucmp == 0 && prevKey.Kind() != base.InternalKeyKindMerge && !v.dec.isObsolete.At(i) { return errors.AssertionFailedf("key %s (row %d) is shadowed by previous key %s but is not marked as obsolete", - k, i, prevKey) + k, errors.Safe(i), prevKey) } // Ensure that the prefix-changed bit is set correctly. if i > 0 { @@ -1180,7 +1180,7 @@ func (v *DataBlockValidator) Validate( prefixChanged := !bytes.Equal(prevPrefix, currPrefix) if prefixChanged != v.dec.prefixChanged.At(i) { return errors.AssertionFailedf("prefix changed bit for key %q (row %d) is %t, expected %t [prev key was %q]", - k.UserKey, i, v.dec.prefixChanged.At(i), prefixChanged, prevKey.UserKey) + k.UserKey, errors.Safe(i), errors.Safe(v.dec.prefixChanged.At(i)), errors.Safe(prefixChanged), prevKey.UserKey) } } diff --git a/sstable/colblk/index_block.go b/sstable/colblk/index_block.go index 2f7be908c54..a0971c202e9 100644 --- a/sstable/colblk/index_block.go +++ b/sstable/colblk/index_block.go @@ -120,7 +120,7 @@ func (w *IndexBlockWriter) size(rows int) int { // The value of [rows] must be Rows() or Rows()-1. func (w *IndexBlockWriter) Finish(rows int) []byte { if invariants.Enabled && rows != w.rows && rows != w.rows-1 { - panic(errors.AssertionFailedf("index block has %d rows; asked to finish %d", w.rows, rows)) + panic(errors.AssertionFailedf("index block has %d rows; asked to finish %d", errors.Safe(w.rows), errors.Safe(rows))) } w.enc.Init(w.size(rows), Header{ @@ -308,7 +308,7 @@ func (i *IndexIter) applyTransforms(key []byte) []byte { // properties at the iterator's current position. func (i *IndexIter) BlockHandleWithProperties() (block.HandleWithProperties, error) { if invariants.Enabled && !i.Valid() { - panic(errors.AssertionFailedf("invalid row %d (n=%d)", i.row, i.n)) + panic(errors.AssertionFailedf("invalid row %d (n=%d)", errors.Safe(i.row), errors.Safe(i.n))) } return block.HandleWithProperties{ Handle: block.Handle{ diff --git a/sstable/colblk/index_block_test.go b/sstable/colblk/index_block_test.go index 2cb1d3f4fb1..15577a95784 100644 --- a/sstable/colblk/index_block_test.go +++ b/sstable/colblk/index_block_test.go @@ -83,7 +83,7 @@ func TestIndexBlock(t *testing.T) { case "invalidate": it.Invalidate() default: - panic(fmt.Sprintf("unknown command: %s", fields[0])) + t.Fatalf("unknown command: %s", fields[0]) } if valid { var bp string @@ -103,7 +103,8 @@ func TestIndexBlock(t *testing.T) { } return buf.String() default: - panic(fmt.Sprintf("unknown command: %s", d.Cmd)) + t.Fatalf("unknown command: %s", d.Cmd) + return "" } }) } diff --git a/sstable/colblk/keyspan.go b/sstable/colblk/keyspan.go index 5f385703184..c2bc9a21cbe 100644 --- a/sstable/colblk/keyspan.go +++ b/sstable/colblk/keyspan.go @@ -523,7 +523,7 @@ func (i *keyspanIter) Prev() (span *keyspan.Span, _ error) { // [startBoundIndex] as the span's start boundary. func (i *keyspanIter) gatherKeysForward(startBoundIndex int) *keyspan.Span { if invariants.Enabled && startBoundIndex < 0 { - panic(errors.AssertionFailedf("out of bounds: i.startBoundIndex=%d", startBoundIndex)) + panic(errors.AssertionFailedf("out of bounds: i.startBoundIndex=%d", errors.Safe(startBoundIndex))) } i.startBoundIndex = startBoundIndex if i.startBoundIndex >= int(i.r.boundaryKeysCount)-1 { @@ -552,7 +552,7 @@ func (i *keyspanIter) gatherKeysBackward(startBoundIndex int) *keyspan.Span { } if invariants.Enabled && i.startBoundIndex >= int(i.r.boundaryKeysCount)-1 { panic(errors.AssertionFailedf("out of bounds: i.startBoundIndex=%d, i.r.boundaryKeysCount=%d", - i.startBoundIndex, i.r.boundaryKeysCount)) + errors.Safe(i.startBoundIndex), errors.Safe(i.r.boundaryKeysCount))) } if !i.isNonemptySpan(i.startBoundIndex) { if i.startBoundIndex == 0 { diff --git a/sstable/colblk/prefix_bytes.go b/sstable/colblk/prefix_bytes.go index 223154773d1..d768213899b 100644 --- a/sstable/colblk/prefix_bytes.go +++ b/sstable/colblk/prefix_bytes.go @@ -702,7 +702,7 @@ var _ ColumnWriter = (*PrefixBytesBuilder)(nil) // column will share a column-wide prefix if there is one. func (b *PrefixBytesBuilder) Init(bundleSize int) { if bundleSize > 0 && (bundleSize&(bundleSize-1)) != 0 { - panic(errors.AssertionFailedf("prefixbytes bundle size %d is not a power of 2", bundleSize)) + panic(errors.AssertionFailedf("prefixbytes bundle size %d is not a power of 2", errors.Safe(bundleSize))) } *b = PrefixBytesBuilder{ bundleCalc: makeBundleCalc(uint32(bits.TrailingZeros32(uint32(bundleSize)))), @@ -806,7 +806,7 @@ func (b *PrefixBytesBuilder) Put(key []byte, bytesSharedWithPrev int) { panic(errors.AssertionFailedf("keys must be added in order: %q < %q", key, b.data[prev.lastKeyOff:])) } if bytesSharedWithPrev != crbytes.CommonPrefix(key, b.data[prev.lastKeyOff:]) { - panic(errors.AssertionFailedf("bytesSharedWithPrev %d != %d", bytesSharedWithPrev, + panic(errors.AssertionFailedf("bytesSharedWithPrev %d != %d", errors.Safe(bytesSharedWithPrev), crbytes.CommonPrefix(key, b.data[prev.lastKeyOff:]))) } } @@ -933,7 +933,7 @@ func (b *PrefixBytesBuilder) UnsafeGet(i int) []byte { } return b.data[secondLastKeyOff:lastKeyOff] default: - panic(errors.AssertionFailedf("UnsafeGet(%d) called on PrefixBytes with %d keys", i, b.nKeys)) + panic(errors.AssertionFailedf("UnsafeGet(%d) called on PrefixBytes with %d keys", errors.Safe(i), errors.Safe(b.nKeys))) } } @@ -1032,7 +1032,7 @@ func writePrefixCompressed[T Uint]( lastRowOffset = off } if invariants.Enabled && len(buf) < int(destOffset)+len(suffix) { - panic(errors.AssertionFailedf("buf is too small: %d < %d", len(buf[destOffset:]), len(suffix))) + panic(errors.AssertionFailedf("buf is too small: %d < %d", errors.Safe(len(buf[destOffset:])), errors.Safe(len(suffix)))) } memmove( unsafe.Add(unsafe.Pointer(unsafe.SliceData(buf)), destOffset), @@ -1043,7 +1043,7 @@ func writePrefixCompressed[T Uint]( offsetDeltas.UnsafeSet(i, destOffset) } if destOffset != T(sz.compressedDataLen) { - panic(errors.AssertionFailedf("wrote %d, expected %d", destOffset, sz.compressedDataLen)) + panic(errors.AssertionFailedf("wrote %d, expected %d", errors.Safe(destOffset), errors.Safe(sz.compressedDataLen))) } } @@ -1057,7 +1057,7 @@ func (b *PrefixBytesBuilder) Finish( col int, rows int, offset uint32, buf []byte, ) (endOffset uint32) { if rows < b.nKeys-1 || rows > b.nKeys { - panic(errors.AssertionFailedf("PrefixBytes has accumulated %d keys, asked to Finish %d", b.nKeys, rows)) + panic(errors.AssertionFailedf("PrefixBytes has accumulated %d keys, asked to Finish %d", errors.Safe(b.nKeys), errors.Safe(rows))) } if rows == 0 { return offset @@ -1104,7 +1104,7 @@ func (b *PrefixBytesBuilder) Size(rows int, offset uint32) uint32 { if rows == 0 { return offset } else if rows != b.nKeys && rows != b.nKeys-1 { - panic(errors.AssertionFailedf("PrefixBytes has accumulated %d keys, asked to Size %d", b.nKeys, rows)) + panic(errors.AssertionFailedf("PrefixBytes has accumulated %d keys, asked to Size %d", errors.Safe(b.nKeys), errors.Safe(rows))) } sz := &b.sizings[rows&1^1] // The 1-byte bundleSize. diff --git a/sstable/colblk/prefix_bytes_test.go b/sstable/colblk/prefix_bytes_test.go index 2a44fee4b35..3b1ecde05eb 100644 --- a/sstable/colblk/prefix_bytes_test.go +++ b/sstable/colblk/prefix_bytes_test.go @@ -110,7 +110,8 @@ func TestPrefixBytes(t *testing.T) { } return out.String() default: - panic(fmt.Sprintf("unrecognized command %q", td.Cmd)) + t.Fatalf("unrecognized command %q", td.Cmd) + return "" } }) } diff --git a/sstable/colblk/raw_bytes_test.go b/sstable/colblk/raw_bytes_test.go index 2ac4c191a6a..217434129b2 100644 --- a/sstable/colblk/raw_bytes_test.go +++ b/sstable/colblk/raw_bytes_test.go @@ -70,7 +70,8 @@ func TestRawBytes(t *testing.T) { } return out.String() default: - panic(fmt.Sprintf("unrecognized command %q", td.Cmd)) + t.Fatalf("unrecognized command %q", td.Cmd) + return "" } }) } diff --git a/sstable/colblk/uints.go b/sstable/colblk/uints.go index 70109ea091b..f20cb38d6b4 100644 --- a/sstable/colblk/uints.go +++ b/sstable/colblk/uints.go @@ -248,7 +248,7 @@ func (b *UintBuilder) Get(row int) uint64 { // possible that the array has not been grown to a size that includes [row]. if len(b.elems) <= row { if invariants.Enabled && !b.useDefault { - panic(errors.AssertionFailedf("Get(%d) on UintBuilder with array of size %d", row, len(b.elems))) + panic(errors.AssertionFailedf("Get(%d) on UintBuilder with array of size %d", errors.Safe(row), errors.Safe(len(b.elems)))) } return 0 } @@ -311,7 +311,7 @@ func (b *UintBuilder) determineEncoding(rows int) (_ UintEncoding, deltaBase uin // base for b.stats.encoding. if invariants.Enabled && invariants.Sometimes(1) && rows > 0 { if enc, _ := b.recalculateEncoding(rows); enc != b.stats.encoding { - panic(fmt.Sprintf("fast and slow paths don't agree: %s vs %s", b.stats.encoding, enc)) + panic(errors.AssertionFailedf("fast and slow paths don't agree: %s vs %s", errors.Safe(b.stats.encoding), errors.Safe(enc))) } } return b.stats.encoding, b.stats.minimum @@ -492,7 +492,7 @@ func uintsToBinFormatter( e := UintEncoding(f.PeekUint(1)) // UintEncoding byte if !e.IsValid() { - panic(fmt.Sprintf("%d", e)) + panic(errors.AssertionFailedf("%d", errors.Safe(e))) } f.HexBytesln(1, "encoding: %s", e) diff --git a/sstable/colblk/uints_decode.go b/sstable/colblk/uints_decode.go index c77b91cd043..1b4fe66df68 100644 --- a/sstable/colblk/uints_decode.go +++ b/sstable/colblk/uints_decode.go @@ -25,7 +25,7 @@ func makeUnsafeUint64Decoder(buf []byte, n int) unsafeUint64Decoder { } ptr := unsafe.Pointer(unsafe.SliceData(buf)) if align(uintptr(ptr), align64) != uintptr(ptr) { - panic(errors.AssertionFailedf("slice pointer %p not %d-byte aligned", ptr, align64)) + panic(errors.AssertionFailedf("slice pointer %p not %d-byte aligned", errors.Safe(ptr), errors.Safe(align64))) } if len(buf) < n< 0 || (uc == 0 && v.Upper.IsExclusiveSentinel()) { - panic(fmt.Sprintf("key %q out of singleLeveliterator virtual bounds %s %s", key, v.Lower.UserKey, v.Upper.UserKey)) + panic(errors.AssertionFailedf("key %q out of singleLeveliterator virtual bounds %s %s", key, v.Lower.UserKey, v.Upper.UserKey)) } } return kv diff --git a/sstable/reader_iter_two_lvl.go b/sstable/reader_iter_two_lvl.go index 7cc3002492a..d6a101a5404 100644 --- a/sstable/reader_iter_two_lvl.go +++ b/sstable/reader_iter_two_lvl.go @@ -410,8 +410,8 @@ func (i *twoLevelIterator[I, PI, D, PD]) internalSeekGE( // This invariant checking is important enough that we do not gate it // behind invariants.Enabled. if i.secondLevel.boundsCmp > 0 == flags.TrySeekUsingNext() { - panic(fmt.Sprintf("inconsistency in optimization case 1 %t and case 2 %t", - i.secondLevel.boundsCmp > 0, flags.TrySeekUsingNext())) + panic(errors.AssertionFailedf("inconsistency in optimization case 1 %t and case 2 %t", + errors.Safe(i.secondLevel.boundsCmp > 0), errors.Safe(flags.TrySeekUsingNext()))) } if !flags.TrySeekUsingNext() { @@ -679,8 +679,8 @@ func (i *twoLevelIterator[I, PI, D, PD]) seekPrefixGE( // This invariant checking is important enough that we do not gate it // behind invariants.Enabled. if i.secondLevel.boundsCmp > 0 == flags.TrySeekUsingNext() { - panic(fmt.Sprintf("inconsistency in optimization case 1 %t and case 2 %t", - i.secondLevel.boundsCmp > 0, flags.TrySeekUsingNext())) + panic(errors.AssertionFailedf("inconsistency in optimization case 1 %t and case 2 %t", + errors.Safe(i.secondLevel.boundsCmp > 0), errors.Safe(flags.TrySeekUsingNext()))) } if !flags.TrySeekUsingNext() { diff --git a/sstable/rowblk/rowblk_iter.go b/sstable/rowblk/rowblk_iter.go index 1a7af6871a9..729f94a075a 100644 --- a/sstable/rowblk/rowblk_iter.go +++ b/sstable/rowblk/rowblk_iter.go @@ -1276,7 +1276,7 @@ func (i *Iter) nextPrefixV3(succKey []byte) *base.InternalKV { // This should not happen since only the key prefix is shared, so even // if the prefix length is the same as the user key length, the unshared // will include the trailer. - panic(errors.AssertionFailedf("unshared %d is too small", unshared)) + panic(errors.AssertionFailedf("unshared %d is too small", errors.Safe(unshared))) } // The trailer is written in little endian, so the key kind is the first // byte in the trailer that is encoded in the slice [unshared-8:unshared]. diff --git a/sstable/rowblk_writer.go b/sstable/rowblk_writer.go index e2625691fd0..0f1b952c77b 100644 --- a/sstable/rowblk_writer.go +++ b/sstable/rowblk_writer.go @@ -621,7 +621,7 @@ func (w *RawRowWriter) makeAddPointDecisionV3( case InternalKeyKindSet, InternalKeyKindSetWithDelete, InternalKeyKindMerge, InternalKeyKindDelete, InternalKeyKindSingleDelete, InternalKeyKindDeleteSized: default: - panic(errors.AssertionFailedf("unexpected key kind %s", keyKind.String())) + panic(errors.AssertionFailedf("unexpected key kind %s", errors.Safe(keyKind.String()))) } // If same user key, then the current key is obsolete if any of the // following is true: diff --git a/sstable/tablefilters/binaryfuse/binary_fuse.go b/sstable/tablefilters/binaryfuse/binary_fuse.go index 70624200360..6c4b4f57388 100644 --- a/sstable/tablefilters/binaryfuse/binary_fuse.go +++ b/sstable/tablefilters/binaryfuse/binary_fuse.go @@ -8,6 +8,7 @@ import ( "fmt" "slices" + "github.com/cockroachdb/errors" "github.com/cockroachdb/pebble/internal/base" "github.com/cockroachdb/pebble/sstable/tablefilters/binaryfuse/bitpacking" "github.com/zeebo/xxh3" @@ -43,7 +44,7 @@ var SupportedBitsPerFingerprint = bitpacking.SupportedBitsPerValue // for exact figures. func FilterPolicy(bitsPerFingerprint int) base.TableFilterPolicy { if !slices.Contains(SupportedBitsPerFingerprint, bitsPerFingerprint) { - panic(fmt.Sprintf("invalid bitsPerFingerprint %d", bitsPerFingerprint)) + panic(errors.AssertionFailedf("invalid bitsPerFingerprint %d", errors.Safe(bitsPerFingerprint))) } return filterPolicyImpl{BitsPerFingerprint: bitsPerFingerprint} } diff --git a/sstable/tablefilters/binaryfuse/simulation.go b/sstable/tablefilters/binaryfuse/simulation.go index 85293503214..7b23a79f898 100644 --- a/sstable/tablefilters/binaryfuse/simulation.go +++ b/sstable/tablefilters/binaryfuse/simulation.go @@ -10,6 +10,7 @@ import ( "sync/atomic" "github.com/cockroachdb/crlib/crhumanize" + "github.com/cockroachdb/errors" "github.com/cockroachdb/pebble/sstable/tablefilters/internal/filtersim" ) @@ -26,7 +27,7 @@ func SimulateFPR(avgSize int, fpBits int) (fprMean, fprStdDev, avgBitsPerKey flo } filter, ok := buildFilter(hc, fpBits) if !ok { - panic(fmt.Sprintf("could not build filter (size=%d, bits=%d)", size, fpBits)) + panic(errors.AssertionFailedf("could not build filter (size=%d, bits=%d)", errors.Safe(size), errors.Safe(fpBits))) } sizeSum.Add(uint64(size)) filterSizeSum.Add(uint64(len(filter))) diff --git a/sstable/tablefilters/bloom/adaptive_policy.go b/sstable/tablefilters/bloom/adaptive_policy.go index 687e7e4f543..a402c44fa7e 100644 --- a/sstable/tablefilters/bloom/adaptive_policy.go +++ b/sstable/tablefilters/bloom/adaptive_policy.go @@ -116,14 +116,14 @@ func MaxBitsPerKey(numKeys uint, maxFilterSize uint64) uint32 { if size := FilterSize(numKeys, result); size > maxFilterSize { panic(errors.AssertionFailedf( "MaxBitsPerKey invariant violated: FilterSize(%d, %d) = %d > %d", - numKeys, result, size, maxFilterSize)) + errors.Safe(numKeys), errors.Safe(result), errors.Safe(size), errors.Safe(maxFilterSize))) } // Cross-check: FilterSize with result+1 should be > maxFilterSize. if result < cacheLineBits { if sizeNext := FilterSize(numKeys, result+1); sizeNext <= maxFilterSize { panic(errors.AssertionFailedf( "MaxBitsPerKey invariant violated: FilterSize(%d, %d) = %d <= %d but returned %d", - numKeys, result+1, sizeNext, maxFilterSize, result)) + errors.Safe(numKeys), errors.Safe(result+1), errors.Safe(sizeNext), errors.Safe(maxFilterSize), errors.Safe(result))) } } } diff --git a/sstable/tablefilters/bloom/bloom.go b/sstable/tablefilters/bloom/bloom.go index 289ba0bae19..88836096ba9 100644 --- a/sstable/tablefilters/bloom/bloom.go +++ b/sstable/tablefilters/bloom/bloom.go @@ -8,6 +8,7 @@ package bloom import ( "fmt" + "github.com/cockroachdb/errors" "github.com/cockroachdb/pebble/internal/base" ) @@ -159,7 +160,7 @@ const Family base.TableFilterFamily = "rocksdb.BuiltinBloomFilter" // 20 | 6 | 0.140% (1 in 713) func FilterPolicy(bitsPerKey uint32) base.TableFilterPolicy { if bitsPerKey < 1 { - panic(fmt.Sprintf("invalid bitsPerKey %d", bitsPerKey)) + panic(errors.AssertionFailedf("invalid bitsPerKey %d", errors.Safe(bitsPerKey))) } return filterPolicyImpl{BitsPerKey: bitsPerKey} } diff --git a/sstable/test_fixtures.go b/sstable/test_fixtures.go index cc402782ba1..ef249b2bfae 100644 --- a/sstable/test_fixtures.go +++ b/sstable/test_fixtures.go @@ -6,7 +6,6 @@ package sstable import ( "bufio" - "fmt" "io" "math" "os" @@ -15,6 +14,7 @@ import ( "strings" "sync" + "github.com/cockroachdb/errors" "github.com/cockroachdb/pebble/internal/base" "github.com/cockroachdb/pebble/objstorage/objstorageprovider" "github.com/cockroachdb/pebble/sstable/block" @@ -66,11 +66,11 @@ func hamletWordCount() testKVs { wordCount[k] = v } if len(wordCount) != 1710 { - panic(fmt.Sprintf("h.txt entry count: got %d, want %d", len(wordCount), 1710)) + panic(errors.AssertionFailedf("h.txt entry count: got %d, want %d", len(wordCount), 1710)) } for _, s := range hamletNonsenseWords { if _, ok := wordCount[s]; ok { - panic(fmt.Sprintf("nonsense word %q was in h.txt", s)) + panic(errors.AssertionFailedf("nonsense word %q was in h.txt", s)) } } hamletWordCountState.data = wordCount diff --git a/sstable/tieredmeta/histogram_block.go b/sstable/tieredmeta/histogram_block.go index 90b30a8e677..d22ef4e8313 100644 --- a/sstable/tieredmeta/histogram_block.go +++ b/sstable/tieredmeta/histogram_block.go @@ -90,7 +90,7 @@ func (k *Key) decode(data []byte) error { } kt := KindAndTier(data[0]) if kt >= NumKindAndTiers { - return errors.AssertionFailedf("invalid KindAndTier %d", kt) + return errors.AssertionFailedf("invalid KindAndTier %d", errors.Safe(kt)) } id, n := binary.Uvarint(data[1:]) if n <= 0 { diff --git a/sstable/values.go b/sstable/values.go index fda0ef477c6..ca085aae8bf 100644 --- a/sstable/values.go +++ b/sstable/values.go @@ -118,7 +118,7 @@ func (i *defaultInternalValueConstructor) GetInternalValueForPrefixAndValueHandl if vp.IsValueBlockHandle() { return i.vbReader.GetInternalValueForPrefixAndValueHandle(handle) } else if !vp.IsBlobValueHandle() { - panic(errors.AssertionFailedf("block: %x is neither a valblk or blob handle prefix", vp)) + panic(errors.AssertionFailedf("block: %x is neither a valblk or blob handle prefix", errors.Safe(vp))) } // The first byte of [handle] is the valuePrefix byte. diff --git a/sstable/writer_test.go b/sstable/writer_test.go index 80ec15da4d7..9e65577d381 100644 --- a/sstable/writer_test.go +++ b/sstable/writer_test.go @@ -459,7 +459,7 @@ func TestWriterWithValueBlocks(t *testing.T) { fmt.Fprintf(&buf, "%s:value-handle len %d block %d offset %d, att %d, same-pre %t\n", kv.K, vh.ValueLen, vh.BlockNum, vh.OffsetInBlock, attribute, setWithSamePrefix) } else { - panic(fmt.Sprintf("unknown value prefix: %d", lv.ValueOrHandle[0])) + t.Fatalf("unknown value prefix: %d", lv.ValueOrHandle[0]) } } else { fmt.Fprintf(&buf, "%s:%s\n", kv.K, lv.ValueOrHandle) diff --git a/table_stats.go b/table_stats.go index 79f018aec6c..2ae5a0ee820 100644 --- a/table_stats.go +++ b/table_stats.go @@ -6,7 +6,6 @@ package pebble import ( "context" - "fmt" "time" "github.com/cockroachdb/crlib/crmath" @@ -787,11 +786,10 @@ func sanityCheckStats( if stats.PointDeletionsBytesEstimate > maxDeletionBytesEstimate || stats.RangeDeletionsBytesEstimate > maxDeletionBytesEstimate { if invariants.Enabled { - panic(fmt.Sprintf("%s: table %s has extreme deletion bytes estimates: point=%d range=%d", - info, meta.TableNum, + panic(errors.AssertionFailedf("%s: table %s has extreme deletion bytes estimates: point=%d range=%d", + errors.Safe(info), meta.TableNum, redact.Safe(stats.PointDeletionsBytesEstimate), - redact.Safe(stats.RangeDeletionsBytesEstimate), - )) + redact.Safe(stats.RangeDeletionsBytesEstimate))) } if v := lastSanityCheckStatsLog.Load(); v == 0 || v.Elapsed() > 30*time.Second { logger.Errorf("%s: table %s has extreme deletion bytes estimates: point=%d range=%d", diff --git a/tool/logs/compaction.go b/tool/logs/compaction.go index 141d2dbbba9..1297728b376 100644 --- a/tool/logs/compaction.go +++ b/tool/logs/compaction.go @@ -1557,7 +1557,7 @@ func unHumanize(s string) uint64 { } val, err := strconv.ParseFloat(s, 64) if err != nil { - panic(fmt.Sprintf("parsing %s: %v", s, err)) + panic(errors.AssertionFailedf("parsing %s: %v", errors.Safe(s), err)) } return uint64(val * float64(multiplier)) diff --git a/valsep/value_separator.go b/valsep/value_separator.go index c614f7db4fd..f3843251516 100644 --- a/valsep/value_separator.go +++ b/valsep/value_separator.go @@ -444,7 +444,7 @@ func (vs *ValueSeparator) maybeCheckInvariants() { } accumulatedPreservedValueSize := vs.blobTiers[base.HotTier].totalPreservedValueSize if totalValueSize != accumulatedPreservedValueSize { - panic(errors.AssertionFailedf("totalPreservedValueSize mismatch: %d != %d", totalValueSize, accumulatedPreservedValueSize)) + panic(errors.AssertionFailedf("totalPreservedValueSize mismatch: %d != %d", errors.Safe(totalValueSize), errors.Safe(accumulatedPreservedValueSize))) } } } @@ -467,7 +467,7 @@ func (vs *ValueSeparator) FinishOutput() (ValueSeparationMetadata, error) { phys, ok := vs.inputBlobPhysicalFiles[ref.blobFileID] if !ok { return ValueSeparationMetadata{}, - errors.AssertionFailedf("pebble: blob file %s not found among input sstables", ref.blobFileID) + errors.AssertionFailedf("pebble: blob file %s not found among input sstables", errors.Safe(ref.blobFileID)) } references[i] = manifest.MakeBlobReference(ref.blobFileID, ref.valueSize, ref.valueSize, phys) } diff --git a/version_set.go b/version_set.go index 50e8f0e62c9..baa524e10c3 100644 --- a/version_set.go +++ b/version_set.go @@ -5,7 +5,6 @@ package pebble import ( - "fmt" "slices" "sync" "sync/atomic" @@ -364,7 +363,7 @@ func (vs *versionSet) UpdateVersionLocked( if ve.MinUnflushedLogNum != 0 { if ve.MinUnflushedLogNum < vs.minUnflushedLogNum || vs.nextFileNum.Load() <= uint64(ve.MinUnflushedLogNum) { - panic(fmt.Sprintf("pebble: inconsistent versionEdit minUnflushedLogNum %d", + panic(errors.AssertionFailedf("pebble: inconsistent versionEdit minUnflushedLogNum %d", ve.MinUnflushedLogNum)) } } @@ -928,7 +927,7 @@ func (vs *versionSet) append(v *manifest.Version) { for f := range l.All() { if f.Virtual { if _, ok := vs.latest.virtualBackings.Get(f.TableBacking.DiskFileNum); !ok { - panic(fmt.Sprintf("%s is not in virtualBackings", f.TableBacking.DiskFileNum)) + panic(errors.AssertionFailedf("%s is not in virtualBackings", f.TableBacking.DiskFileNum)) } } } @@ -1020,30 +1019,27 @@ func setBasicLevelMetrics(lm *AllLevelMetrics, newVersion *manifest.Version) { if invariants.Enabled { levelFiles := newVersion.Levels[i].Slice() if size := levelFiles.TableSizeSum(); l.Tables.Bytes != size { - panic(fmt.Sprintf("versionSet metrics L%d Size = %d, actual size = %d", i, l.Tables.Bytes, size)) + panic(errors.AssertionFailedf("versionSet metrics L%d Size = %d, actual size = %d", errors.Safe(i), errors.Safe(l.Tables.Bytes), errors.Safe(size))) } refSize := uint64(0) for f := range levelFiles.All() { refSize += f.EstimatedReferenceSize() } if refSize != l.EstimatedReferencesSize { - panic(fmt.Sprintf( + panic(errors.AssertionFailedf( "versionSet metrics L%d EstimatedReferencesSize = %d, recomputed size = %d", - i, l.EstimatedReferencesSize, refSize, - )) + errors.Safe(i), errors.Safe(l.EstimatedReferencesSize), errors.Safe(refSize))) } if nVirtual := levelFiles.NumVirtual(); nVirtual != l.VirtualTables.Count { - panic(fmt.Sprintf( + panic(errors.AssertionFailedf( "versionSet metrics L%d NumVirtual = %d, actual NumVirtual = %d", - i, l.VirtualTables.Count, nVirtual, - )) + errors.Safe(i), errors.Safe(l.VirtualTables.Count), errors.Safe(nVirtual))) } if vSize := levelFiles.VirtualTableSizeSum(); vSize != l.VirtualTables.Bytes { - panic(fmt.Sprintf( + panic(errors.AssertionFailedf( "versionSet metrics L%d Virtual size = %d, actual size = %d", - i, l.VirtualTables.Bytes, vSize, - )) + errors.Safe(i), errors.Safe(l.VirtualTables.Bytes), errors.Safe(vSize))) } } } diff --git a/version_set_test.go b/version_set_test.go index 34aa716c91f..21c3f3aaa1f 100644 --- a/version_set_test.go +++ b/version_set_test.go @@ -421,7 +421,7 @@ func TestLargeKeys(t *testing.T) { end := parseLargeKey(rest) require.NoError(t, b.DeleteRange(start, end, nil)) default: - panic(fmt.Sprintf("unknown op: %s", op)) + t.Fatalf("unknown op: %s", op) } } require.NoError(t, b.Commit(Sync)) diff --git a/vfs/atomicfs/marker_test.go b/vfs/atomicfs/marker_test.go index 688a750ca92..f08601391bd 100644 --- a/vfs/atomicfs/marker_test.go +++ b/vfs/atomicfs/marker_test.go @@ -159,7 +159,8 @@ func TestMarker(t *testing.T) { return "" default: - panic(fmt.Sprintf("unknown command %q", td.Cmd)) + t.Fatalf("unknown command %q", td.Cmd) + return "" } }) } diff --git a/vfs/disk_health.go b/vfs/disk_health.go index f4f3b8fe4c3..b55c0a4bb26 100644 --- a/vfs/disk_health.go +++ b/vfs/disk_health.go @@ -6,7 +6,6 @@ package vfs import ( "cmp" - "fmt" "io" "os" "path/filepath" @@ -16,6 +15,7 @@ import ( "time" "github.com/cockroachdb/crlib/crtime" + "github.com/cockroachdb/errors" "github.com/cockroachdb/redact" ) @@ -69,6 +69,11 @@ const ( opTypeMax ) +// SafeFormat implements redact.SafeFormatter. +func (o OpType) SafeFormat(w redact.SafePrinter, _ rune) { + w.Print(redact.SafeString(o.String())) +} + // String implements fmt.Stringer. func (o OpType) String() string { switch o { @@ -99,7 +104,7 @@ func (o OpType) String() string { case OpTypeUnknown: return "unknown" default: - panic(fmt.Sprintf("vfs: unknown op type: %d", o)) + panic(errors.AssertionFailedf("vfs: unknown op type: %d", o)) } } diff --git a/vfs/errorfs/errorfs.go b/vfs/errorfs/errorfs.go index a69ed678d20..acdb689f705 100644 --- a/vfs/errorfs/errorfs.go +++ b/vfs/errorfs/errorfs.go @@ -93,14 +93,14 @@ const ( func (o OpKind) IsRead() bool { if o < 0 || o >= numOpKinds { - panic(fmt.Sprintf("invalid op kind: %d", o)) + panic(errors.AssertionFailedf("invalid op kind: %d", o)) } return ReadOps.Contains(o) } func (o OpKind) IsWrite() bool { if o < 0 || o >= numOpKinds { - panic(fmt.Sprintf("invalid op kind: %d", o)) + panic(errors.AssertionFailedf("invalid op kind: %d", o)) } return WriteOps.Contains(o) } diff --git a/vfs/mem_fs.go b/vfs/mem_fs.go index 4397fa38e03..661305ea08e 100644 --- a/vfs/mem_fs.go +++ b/vfs/mem_fs.go @@ -743,7 +743,7 @@ var _ File = (*memFile)(nil) func (f *memFile) Close() error { if n := f.n.refs.Add(-1); n < 0 { - panic(fmt.Sprintf("pebble: close of unopened file: %d", n)) + panic(errors.AssertionFailedf("pebble: close of unopened file: %d", errors.Safe(n))) } // Set node pointer to nil, to cause panic on any subsequent method call. This // is a defence-in-depth to catch use-after-close or double-close bugs. diff --git a/wal/failover_manager.go b/wal/failover_manager.go index 7eab9b1d942..0a2913daabe 100644 --- a/wal/failover_manager.go +++ b/wal/failover_manager.go @@ -372,7 +372,7 @@ func (m *failoverMonitor) accumulateDurationLocked(now time.Time) { dur := now.Sub(m.mu.lastAccumulateIntoDurations) if invariants.Enabled && dur < 0 { panic(errors.AssertionFailedf("time regressed: last accumulated %s; now is %s", - m.mu.lastAccumulateIntoDurations, now)) + errors.Safe(m.mu.lastAccumulateIntoDurations), errors.Safe(now))) } m.mu.lastAccumulateIntoDurations = now if m.mu.dirIndex == primaryDirIndex { diff --git a/wal/failover_writer.go b/wal/failover_writer.go index a2556930245..7b6c856f2e6 100644 --- a/wal/failover_writer.go +++ b/wal/failover_writer.go @@ -948,7 +948,7 @@ func (ww *failoverWriter) closeInternal() (logicalOffset int64, err error) { ww.mu.closed = true n, m := ww.q.popAll(err) if err == nil && (n > 0 || m > 0) { - panic(errors.AssertionFailedf("no error but recordQueue had %d records and %d syncs", n, m)) + panic(errors.AssertionFailedf("no error but recordQueue had %d records and %d syncs", errors.Safe(n), errors.Safe(m))) } return logicalOffset, err } diff --git a/wal/reader_test.go b/wal/reader_test.go index a00aa1d2a80..93e8e6c42f8 100644 --- a/wal/reader_test.go +++ b/wal/reader_test.go @@ -213,7 +213,7 @@ func TestReader(t *testing.T) { fmt.Fprintf(&buf, "%d..%d: corrupt-tail\n", offset-length, offset) default: - panic(fmt.Sprintf("unrecognized command %q", fields[0])) + t.Fatalf("unrecognized command %q", fields[0]) } } if td.HasArg("close-unclean") {