Skip to content

Commit 41e97bb

Browse files
*: wrap safe diagnostic arguments with errors.Safe() for redacted crash reports
For panic messages using errors.AssertionFailedf, arguments are redacted by default in CockroachDB crash reports. Wrap safe diagnostic arguments (internal identifiers, counts, sizes, enum values) with errors.Safe() so they appear in redacted reports, while leaving user keys and values unwrapped to prevent PII leaks. Also add SafeFormat/String implementations for interleavePos, OpKind, and OpType so these types are self-formatting as safe values. Co-Authored-By: roachdev-claude <roachdev-claude-bot@cockroachlabs.com>
1 parent 6c6e333 commit 41e97bb

File tree

88 files changed

+292
-213
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

88 files changed

+292
-213
lines changed

cmd/pebble/test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -142,7 +142,7 @@ func (w *namedHistogram) Record(elapsed time.Duration) {
142142
// Note that a histogram only drops recorded values that are out of range,
143143
// but we clamp the latency value to the configured range to prevent such
144144
// drops. This code path should never happen.
145-
panic(errors.AssertionFailedf(`%s: recording value: %s`, w.name, err))
145+
panic(errors.AssertionFailedf(`%s: recording value: %s`, errors.Safe(w.name), err))
146146
}
147147
}
148148

cockroachkvs/cockroachkvs.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -476,7 +476,7 @@ func normalizeVersionForCompare(a []byte) []byte {
476476
func normalizeEngineSuffixForCompare(a []byte) []byte {
477477
// Check sentinel byte.
478478
if invariants.Enabled && len(a) != int(a[len(a)-1]) {
479-
panic(errors.AssertionFailedf("malformed suffix: %x (length byte is %d; but suffix is %d bytes)", a, a[len(a)-1], len(a)))
479+
panic(errors.AssertionFailedf("malformed suffix: %x (length byte is %d; but suffix is %d bytes)", errors.Safe(a), errors.Safe(a[len(a)-1]), errors.Safe(len(a))))
480480
}
481481
// Strip off sentinel byte.
482482
a = a[:len(a)-1]
@@ -659,7 +659,7 @@ func (kw *cockroachKeyWriter) WriteKey(
659659
versionLen := int(key[len(key)-1])
660660
if (len(key)-versionLen) != int(keyPrefixLen) || key[keyPrefixLen-1] != 0x00 {
661661
panic(errors.AssertionFailedf("invalid %d-byte key with %d-byte prefix (%q)",
662-
len(key), keyPrefixLen, key))
662+
errors.Safe(len(key)), errors.Safe(keyPrefixLen), key))
663663
}
664664
// TODO(jackson): Avoid copying the previous suffix.
665665
kw.prevSuffix = append(kw.prevSuffix[:0], key[keyPrefixLen:]...)
@@ -754,7 +754,7 @@ func (kw *cockroachKeyWriter) Finish(
754754
case cockroachColUntypedVersion:
755755
return kw.untypedVersions.Finish(0, rows, offset, buf)
756756
default:
757-
panic(errors.AssertionFailedf("unknown default key column: %d", col))
757+
panic(errors.AssertionFailedf("unknown default key column: %d", errors.Safe(col)))
758758
}
759759
}
760760

@@ -793,7 +793,7 @@ func (ks *cockroachKeySeeker) init(
793793
header := bd.Header()
794794
header = header[:len(header)-colblk.DataBlockCustomHeaderSize]
795795
if len(header) != 1 {
796-
panic(errors.AssertionFailedf("invalid key schema-specific header %x", header))
796+
panic(errors.AssertionFailedf("invalid key schema-specific header %x", errors.Safe(header)))
797797
}
798798
ks.suffixTypes = suffixTypes(header[0])
799799
}
@@ -1003,7 +1003,7 @@ func (ks *cockroachKeySeeker) MaterializeUserKey(
10031003
ki *colblk.PrefixBytesIter, prevRow, row int,
10041004
) []byte {
10051005
if invariants.Enabled && (row < 0 || row >= ks.roachKeys.Rows()) {
1006-
panic(errors.AssertionFailedf("invalid row number %d", row))
1006+
panic(errors.AssertionFailedf("invalid row number %d", errors.Safe(row)))
10071007
}
10081008
if prevRow+1 == row && prevRow >= 0 {
10091009
ks.roachKeys.SetNext(ki)
@@ -1148,7 +1148,7 @@ func (fk formatKeySuffix) Format(f fmt.State, c rune) {
11481148
fmt.Fprintf(f, "%d.%09d,%d", wallTime/1e9, wallTime%1e9, logical)
11491149
}
11501150
default:
1151-
panic(errors.AssertionFailedf("invalid suffix length: %d", len(fk)))
1151+
panic(errors.AssertionFailedf("invalid suffix length: %d", errors.Safe(len(fk))))
11521152
}
11531153
}
11541154

compaction.go

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -188,6 +188,10 @@ func (k compactionKind) String() string {
188188
return "?"
189189
}
190190

191+
func (k compactionKind) SafeFormat(w redact.SafePrinter, _ rune) {
192+
w.Print(redact.SafeString(k.String()))
193+
}
194+
191195
// compactingOrFlushing returns "flushing" if the compaction kind is a flush,
192196
// otherwise it returns "compacting".
193197
func (k compactionKind) compactingOrFlushing() string {
@@ -1690,9 +1694,9 @@ func (d *DB) flush1() (bytesFlushed uint64, err error) {
16901694
for i := 0; i < n; i++ {
16911695
if logNum := d.mu.mem.queue[i].logNum; logNum >= minUnflushedLogNum {
16921696
panic(errors.AssertionFailedf("logNum invariant violated: flushing %d items; %d:type=%T,logNum=%d; %d:type=%T,logNum=%d",
1693-
n,
1694-
i, d.mu.mem.queue[i].flushable, logNum,
1695-
n, d.mu.mem.queue[n].flushable, minUnflushedLogNum))
1697+
errors.Safe(n),
1698+
errors.Safe(i), errors.Safe(d.mu.mem.queue[i].flushable), logNum,
1699+
errors.Safe(n), errors.Safe(d.mu.mem.queue[n].flushable), minUnflushedLogNum))
16961700
}
16971701
}
16981702
}
@@ -1837,7 +1841,7 @@ func (d *DB) flush1() (bytesFlushed uint64, err error) {
18371841
d.clearCompactingState(c, err != nil)
18381842
if c.UsesBurstConcurrency() {
18391843
if v := d.mu.compact.burstConcurrency.Add(-1); v < 0 {
1840-
panic(errors.AssertionFailedf("burst concurrency underflow: %d", v))
1844+
panic(errors.AssertionFailedf("burst concurrency underflow: %d", errors.Safe(v)))
18411845
}
18421846
}
18431847
delete(d.mu.compact.inProgress, c)
@@ -2101,7 +2105,7 @@ func (d *DB) runPickedCompaction(pc pickedCompaction, grantHandle CompactionGran
21012105
}
21022106
}
21032107
if doneChannel == nil {
2104-
panic(errors.AssertionFailedf("did not find manual compaction with id %d", pc.ManualID()))
2108+
panic(errors.AssertionFailedf("did not find manual compaction with id %d", errors.Safe(pc.ManualID())))
21052109
}
21062110
}
21072111

@@ -2267,7 +2271,7 @@ func (d *DB) compact(c compaction, errChannel chan error) {
22672271
}
22682272
if c.UsesBurstConcurrency() {
22692273
if v := d.mu.compact.burstConcurrency.Add(-1); v < 0 {
2270-
panic(errors.AssertionFailedf("burst concurrency underflow: %d", v))
2274+
panic(errors.AssertionFailedf("burst concurrency underflow: %d", errors.Safe(v)))
22712275
}
22722276
}
22732277
delete(d.mu.compact.inProgress, c)

compaction_picker.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -282,7 +282,7 @@ func newPickedTableCompaction(
282282
}
283283
if startLevel > 0 && startLevel < baseLevel {
284284
panic(errors.AssertionFailedf("invalid compaction: start level %d should not be empty (base level %d)",
285-
startLevel, baseLevel))
285+
errors.Safe(startLevel), errors.Safe(baseLevel)))
286286
}
287287

288288
pc := &pickedTableCompaction{
@@ -514,7 +514,7 @@ func (pc *pickedTableCompaction) maybeGrow(
514514
func (pc *pickedTableCompaction) maybeGrowL0ForBase(cmp base.Compare, maxExpandedBytes uint64) {
515515
if invariants.Enabled {
516516
if pc.startLevel.level != 0 {
517-
panic(errors.AssertionFailedf("pc.startLevel.level is %d, expected 0", pc.startLevel.level))
517+
panic(errors.AssertionFailedf("pc.startLevel.level is %d, expected 0", errors.Safe(pc.startLevel.level)))
518518
}
519519
}
520520

@@ -1664,7 +1664,7 @@ func (p *compactionPickerByScore) pickedCompactionFromCandidateFile(
16641664
}
16651665
}
16661666
if !found {
1667-
panic(errors.AssertionFailedf("file %s not found in level %d as expected", candidate.TableNum, startLevel))
1667+
panic(errors.AssertionFailedf("file %s not found in level %d as expected", candidate.TableNum, errors.Safe(startLevel)))
16681668
}
16691669
}
16701670

db.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -795,7 +795,7 @@ func (d *DB) applyInternal(batch *Batch, opts *WriteOptions, noSyncWait bool) er
795795
return ErrReadOnly
796796
}
797797
if batch.db != nil && batch.db != d {
798-
panic(errors.AssertionFailedf("pebble: batch db mismatch: %p != %p", batch.db, d))
798+
panic(errors.AssertionFailedf("pebble: batch db mismatch: %p != %p", errors.Safe(batch.db), errors.Safe(d)))
799799
}
800800

801801
sync := opts.GetSync()
@@ -994,7 +994,7 @@ func assertZeroed(v reflect.Value) error {
994994
return assertZeroed(v.Elem())
995995
case reflect.Slice:
996996
if v.Len() > 0 {
997-
return errors.AssertionFailedf("%s is not zeroed (%d len): %#v", typ.Name(), v.Len(), v)
997+
return errors.AssertionFailedf("%s is not zeroed (%d len): %#v", errors.Safe(typ.Name()), errors.Safe(v.Len()), v)
998998
}
999999
resliced := v.Slice(0, v.Cap())
10001000
for i := 0; i < resliced.Len(); i++ {
@@ -1014,7 +1014,7 @@ func assertZeroed(v reflect.Value) error {
10141014
}
10151015
return nil
10161016
}
1017-
return errors.AssertionFailedf("%s (%s) is not zeroed: %#v", typ.Name(), typ.Kind(), v)
1017+
return errors.AssertionFailedf("%s (%s) is not zeroed: %#v", errors.Safe(typ.Name()), typ.Kind(), v)
10181018
}
10191019

10201020
func newIterAlloc() *iterAlloc {

file_cache.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -291,7 +291,7 @@ func (h *fileCacheHandle) findOrCreateBlob(
291291
func (h *fileCacheHandle) Evict(fileNum base.DiskFileNum, fileType base.FileType) {
292292
defer func() {
293293
if p := recover(); p != nil {
294-
panic(errors.AssertionFailedf("pebble: evicting in-use file %s(%s): %v", fileNum, fileType, p))
294+
panic(errors.AssertionFailedf("pebble: evicting in-use file %s(%s): %v", fileNum, fileType, errors.Safe(p)))
295295
}
296296
}()
297297
h.fileCache.c.Evict(fileCacheKey{handle: h, fileNum: fileNum, fileType: fileType})
@@ -429,7 +429,7 @@ func (c *FileCache) Ref() {
429429
// We don't want the reference count to ever go from 0 -> 1,
430430
// cause a reference count of 0 implies that we've closed the cache.
431431
if v <= 1 {
432-
panic(errors.AssertionFailedf("pebble: inconsistent reference count: %d", v))
432+
panic(errors.AssertionFailedf("pebble: inconsistent reference count: %d", errors.Safe(v)))
433433
}
434434
}
435435

@@ -438,7 +438,7 @@ func (c *FileCache) Unref() {
438438
v := c.refs.Add(-1)
439439
switch {
440440
case v < 0:
441-
panic(errors.AssertionFailedf("pebble: inconsistent reference count: %d", v))
441+
panic(errors.AssertionFailedf("pebble: inconsistent reference count: %d", errors.Safe(v)))
442442
case v == 0:
443443
c.c.Close()
444444
c.c = genericcache.Cache[fileCacheKey, fileCacheValue, initFileOpts]{}

flushable.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ type flushableEntry struct {
121121
func (e *flushableEntry) readerRef() {
122122
switch v := e.readerRefs.Add(1); {
123123
case v <= 1:
124-
panic(errors.AssertionFailedf("pebble: inconsistent reference count: %d", v))
124+
panic(errors.AssertionFailedf("pebble: inconsistent reference count: %d", errors.Safe(v)))
125125
}
126126
}
127127

@@ -140,7 +140,7 @@ func (e *flushableEntry) readerUnrefHelper(
140140
) {
141141
switch v := e.readerRefs.Add(-1); {
142142
case v < 0:
143-
panic(errors.AssertionFailedf("pebble: inconsistent reference count: %d", v))
143+
panic(errors.AssertionFailedf("pebble: inconsistent reference count: %d", errors.Safe(v)))
144144
case v == 0:
145145
if e.releaseMemAccounting == nil {
146146
panic("pebble: memtable reservation already released")

ingest.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2393,7 +2393,7 @@ func (d *DB) ingestApply(
23932393
}
23942394
if isShared && f.Level < sharedLevelsStart {
23952395
panic(errors.AssertionFailedf("cannot slot a shared file higher than the highest shared level: %d < %d",
2396-
f.Level, sharedLevelsStart))
2396+
errors.Safe(f.Level), errors.Safe(sharedLevelsStart)))
23972397
}
23982398
f.Meta = m
23992399
levelMetrics := metrics.level(f.Level)
@@ -2525,7 +2525,7 @@ func (d *DB) ingestApply(
25252525
d.opts.Comparer.FormatKey(exciseSpan.Start), d.opts.Comparer.FormatKey(exciseSpan.End),
25262526
exciseSeqNum, d.opts.Comparer.FormatKey(efos.protectedRanges[i].Start),
25272527
d.opts.Comparer.FormatKey(efos.protectedRanges[i].End), efos.seqNum,
2528-
efos.hasTransitioned(), efos.isClosed(), d.mu.versions.visibleSeqNum.Load()))
2528+
errors.Safe(efos.hasTransitioned()), errors.Safe(efos.isClosed()), d.mu.versions.visibleSeqNum.Load()))
25292529
}
25302530
}
25312531
}

internal/arenaskl/arena.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ const MaxArenaSize = min(math.MaxUint32, math.MaxInt)
4747
func NewArena(buf []byte) *Arena {
4848
if len(buf) > MaxArenaSize {
4949
if invariants.Enabled {
50-
panic(errors.AssertionFailedf("attempting to create arena of size %d", len(buf)))
50+
panic(errors.AssertionFailedf("attempting to create arena of size %d", errors.Safe(len(buf))))
5151
}
5252
buf = buf[:MaxArenaSize]
5353
}
@@ -84,7 +84,7 @@ func (a *Arena) Capacity() uint32 {
8484
// requested size but don't use those extra bytes).
8585
func (a *Arena) alloc(size, alignment, overflow uint32) (uint32, error) {
8686
if invariants.Enabled && (alignment&(alignment-1)) != 0 {
87-
panic(errors.AssertionFailedf("invalid alignment %d", alignment))
87+
panic(errors.AssertionFailedf("invalid alignment %d", errors.Safe(alignment)))
8888
}
8989
// Verify that the arena isn't already full.
9090
origSize := a.n.Load()

internal/ascii/table/table.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ func Define[T any](fields ...Element) Layout[T] {
4949
for i := range fields {
5050
if f, ok := fields[i].(Field[T]); ok {
5151
if h := f.header(); len(h) > f.width() {
52-
panic(errors.AssertionFailedf("header %q is too long for column %d", h, i))
52+
panic(errors.AssertionFailedf("header %q is too long for column %d", errors.Safe(h), errors.Safe(i)))
5353
}
5454
}
5555
}

0 commit comments

Comments
 (0)