@@ -520,9 +520,7 @@ func (r *replicaLogStorage) stagePendingTruncationSharedRaftMuLockedMuLocked(pt
520
520
}
521
521
}
522
522
523
- func (r * replicaLogStorage ) stagePendingTruncationOnSnapshotRaftMuLocked (
524
- truncState kvserverpb.RaftTruncatedState ,
525
- ) {
523
+ func (r * replicaLogStorage ) stageApplySnapshot (truncState kvserverpb.RaftTruncatedState ) {
526
524
r .raftMu .AssertHeld ()
527
525
528
526
// A snapshot application implies a log truncation to the snapshot's index,
@@ -533,11 +531,16 @@ func (r *replicaLogStorage) stagePendingTruncationOnSnapshotRaftMuLocked(
533
531
//
534
532
// The truncation finalized below, after the snapshot is visible.
535
533
536
- // Clear the raft entry cache at the end of this method (after mu has
537
- // been released). This means that the cache will lag the disk, but since
538
- // we update the in-memory metadata for the raft log before mutating disk,
539
- // the cached entries will never be visible to any readers until they are
540
- // cleared from the cache.
534
+ // Clear the raft entry cache at the end of this method (after mu has been
535
+ // released). Any reader that obtains their log bounds after the critical
536
+ // section but before the clear will see an empty log anyway, since the
537
+ // in-memory state is already updated to reflect the truncation, even if
538
+ // entries are still present in the cache.
539
+ //
540
+ // NB: a reader that obtained bounds pre-critical section might be able to
541
+ // load entries, though, and could repopulate the cache after it has been
542
+ // cleared - the cache is not "snapshotted". Ideally, mu-only readers simply
543
+ // cannot populate the cache.
541
544
defer r .cache .Drop (r .ls .RangeID )
542
545
543
546
r .mu .Lock ()
@@ -553,15 +556,6 @@ func (r *replicaLogStorage) stagePendingTruncationOnSnapshotRaftMuLocked(
553
556
//
554
557
// [1]: this is not properly supported yet and will currently fatal.
555
558
// See: https://github.com/cockroachdb/cockroach/pull/125530
556
- r .stagePendingTruncationSharedRaftMuLockedMuLocked (pendingTruncation {
557
- RaftTruncatedState : truncState ,
558
- expectedFirstIndex : r .shMu .trunc .Index + 1 ,
559
- logDeltaBytes : - r .shMu .size , // we're removing everything...
560
- isDeltaTrusted : true , // ... and we know it
561
- hasSideloaded : true , // just in case, no point in actually checking
562
- })
563
- r .shMu .sizeTrusted = true // we know it's zero (see above)
564
-
565
559
// We also, in the same mu critical section, update the in-memory metadata
566
560
// accordingly before the change is visible on the engine. This means that
567
561
// even if someone used the in-memory state to grab an iterator (all within
@@ -572,8 +566,11 @@ func (r *replicaLogStorage) stagePendingTruncationOnSnapshotRaftMuLocked(
572
566
r .updateStateRaftMuLockedMuLocked (logstore.RaftState {
573
567
LastIndex : truncState .Index ,
574
568
LastTerm : truncState .Term ,
575
- ByteSize : 0 , // it's already zero, but we have to do it again
569
+ ByteSize : 0 ,
576
570
})
571
+ r .shMu .trunc = truncState
572
+ r .shMu .lastCheckSize = 0
573
+ r .shMu .sizeTrusted = true
577
574
}
578
575
579
576
func (r * replicaLogStorage ) stagePendingTruncationRaftMuLocked (pt pendingTruncation ) {
0 commit comments