@@ -99,7 +99,7 @@ type generator struct {
9999 // or snapshots maps.
100100 iters map [objID ]objIDSet
101101 // objectID -> db: used to keep track of the DB a batch, iter, or snapshot
102- // was created on.
102+ // was created on. It should be read through the dbIDForObj method.
103103 objDB map [objID ]objID
104104 // readerID -> reader iters: used to keep track of the open iterators on a
105105 // reader. The iter set value will also be indexed by either the batches or
@@ -318,7 +318,7 @@ func (g *generator) batchCommit() {
318318 }
319319
320320 batchID := g .liveBatches .rand (g .rng )
321- dbID := g .objDB [ batchID ]
321+ dbID := g .dbIDForObj ( batchID )
322322 g .removeBatchFromGenerator (batchID )
323323
324324 // The batch we're applying may contain single delete tombstones that when
@@ -574,17 +574,24 @@ func (g *generator) randKeyTypesAndMask() (keyTypes uint32, maskSuffix []byte) {
574574}
575575
576576func (g * generator ) deriveDB (readerID objID ) objID {
577+ dbParentID := readerID
577578 if readerID .tag () == iterTag {
578- readerID = g .iterReaderID [readerID ]
579+ dbParentID = g .iterReaderID [readerID ]
579580 }
580- dbParentID := readerID
581581 if dbParentID .tag () != dbTag {
582- dbParentID = g .objDB [ dbParentID ]
582+ dbParentID = g .dbIDForObj ( dbParentID )
583583 }
584584 g .objDB [readerID ] = dbParentID
585585 return dbParentID
586586}
587587
588+ func (g * generator ) dbIDForObj (objID objID ) objID {
589+ if g .objDB [objID ] == 0 {
590+ panic (fmt .Sprintf ("object %s has no associated DB" , objID ))
591+ }
592+ return g .objDB [objID ]
593+ }
594+
588595func (g * generator ) newIterUsingClone () {
589596 if len (g .liveIters ) == 0 {
590597 return
@@ -815,7 +822,7 @@ func (g *generator) iterSeekPrefixGE(iterID objID) {
815822 // random key.
816823 if g .rng .Intn (10 ) >= 1 {
817824 possibleKeys := make ([][]byte , 0 , 100 )
818- inRangeKeys := g .keyManager .InRangeKeysForObj (g .objDB [ iterID ] , lower , upper )
825+ inRangeKeys := g .keyManager .InRangeKeysForObj (g .dbIDForObj ( iterID ) , lower , upper )
819826 for _ , keyMeta := range inRangeKeys {
820827 visibleHistory := keyMeta .history .before (iterCreationTimestamp )
821828
@@ -1079,7 +1086,7 @@ func (g *generator) writerApply() {
10791086 }
10801087
10811088 batchID := g .liveBatches .rand (g .rng )
1082- dbID := g .objDB [ batchID ]
1089+ dbID := g .dbIDForObj ( batchID )
10831090
10841091 var writerID objID
10851092 for {
@@ -1089,7 +1096,7 @@ func (g *generator) writerApply() {
10891096 writerID = g .liveWriters .rand (g .rng )
10901097 writerDBID := writerID
10911098 if writerID .tag () != dbTag {
1092- writerDBID = g .objDB [ writerID ]
1099+ writerDBID = g .dbIDForObj ( writerID )
10931100 }
10941101 if writerID != batchID && writerDBID == dbID {
10951102 break
@@ -1131,7 +1138,7 @@ func (g *generator) writerDelete() {
11311138 writerID := g .liveWriters .rand (g .rng )
11321139 derivedDBID := writerID
11331140 if derivedDBID .tag () != dbTag {
1134- derivedDBID = g .objDB [ writerID ]
1141+ derivedDBID = g .dbIDForObj ( writerID )
11351142 }
11361143 g .add (& deleteOp {
11371144 writerID : writerID ,
@@ -1230,7 +1237,7 @@ func (g *generator) writerIngest() {
12301237 for i := 0 ; i < n ; i ++ {
12311238 batchID := g .liveBatches .rand (g .rng )
12321239 batchIDs [i ] = batchID
1233- derivedDBIDs [i ] = g.objDB [ batchIDs [ i ]]
1240+ derivedDBIDs [i ] = g .dbIDForObj ( batchID )
12341241 g .removeBatchFromGenerator (batchID )
12351242 }
12361243
@@ -1275,7 +1282,7 @@ func (g *generator) writerIngestAndExcise() {
12751282 g .removeBatchFromGenerator (batchID )
12761283
12771284 start , end := g .prefixKeyRange ()
1278- derivedDBID := g .objDB [ batchID ]
1285+ derivedDBID := g .dbIDForObj ( batchID )
12791286
12801287 // Check for any single delete conflicts. If this batch is single-deleting
12811288 // a key that isn't safe to single delete in the underlying db, _and_ this
0 commit comments