Skip to content

Commit f05d298

Browse files
committed
kvserver: clean up comments
This commit cleans up several comments, clarifying concurrency concerns around `tryConstructMMARangeMsg` and correcting a misunderstanding about `raftStatusRLocked`. The issue with it wasn’t holding only RLock while returning a pointer, but that returning a pointer incurs unnecessary heap allocation.
1 parent 65e1ef5 commit f05d298

File tree

2 files changed

+15
-19
lines changed

2 files changed

+15
-19
lines changed

pkg/kv/kvserver/mma_replica_store.go

Lines changed: 10 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -45,18 +45,6 @@ func (mr *mmaReplica) mmaRangeLoad() mmaprototype.RangeLoad {
4545
// should be sent to mma and overrides mmaSpanConfigIsUpToDate as true. The
4646
// contract is: if this returns true, mmaReplica must send a fully populated
4747
// range message to mma.
48-
//
49-
// Ideally, this would be called within isLeaseholderWithDescAndConfig so that we
50-
// acquire the lock on the replica only once. However, we can't do that because,
51-
// at that point, it's still unclear whether the message will be sent. We only
52-
// want to set mmaSpanConfigIsUpToDate to true that the message will definitely
53-
// be sent to MMA. Technically, it should be fine since mmaSpanConfigIsUpToDate
54-
// would be set to true soonly after during markSpanConfigNeedsUpdate, but it
55-
// would be wrong if tryConstructMMARangeMsg is concurrent. Specifically, if
56-
// another call to tryConstructMMARangeMsg sees mmaSpanConfigIsUpToDate as true,
57-
// it may decide to drop the span config even though the most up-to-date span
58-
// config might end up being dropped. In addition, isLeaseholderWithDescAndConfig
59-
// would only need a read lock on the replica without this.
6048
func (mr *mmaReplica) maybePromiseSpanConfigUpdate() (needed bool) {
6149
r := (*Replica)(mr)
6250
r.mu.Lock()
@@ -68,7 +56,7 @@ func (mr *mmaReplica) maybePromiseSpanConfigUpdate() (needed bool) {
6856

6957
// markSpanConfigNeedsUpdate marks the span config as needing an update. This is
7058
// called by tryConstructMMARangeMsg when it cannot construct the RangeMsg. It
71-
// is signaled that mma might have deleted this range state (including the span
59+
// signals that mma might have deleted this range state (including the span
7260
// config), so we need to send a full range message to mma the next time
7361
// mmaReplica is the leaseholder.
7462
func (mr *mmaReplica) markSpanConfigNeedsUpdate() {
@@ -110,7 +98,7 @@ func (mr *mmaReplica) isLeaseholderWithDescAndConfig(
11098
defer r.mu.RUnlock()
11199
now := r.store.Clock().NowAsClockTimestamp()
112100
leaseStatus := r.leaseStatusAtRLocked(ctx, now)
113-
// TODO(wenyihu6): Alternatively, we could just read r.shMu.state.Leas` and
101+
// TODO(wenyihu6): Alternatively, we could just read r.shMu.state.Lease and
114102
// check if it is non-nil and call OwnedBy(r.store.StoreID()), which would be
115103
// cheaper. One tradeoff is that if the lease has expired, and a new lease has
116104
// not been installed in the state machine, the cheaper approach would think
@@ -174,6 +162,14 @@ func constructRangeMsgReplicas(
174162
// Called periodically by the same entity (and must not be called concurrently).
175163
// If this method returned isLeaseholder=true and shouldBeSkipped=false the last
176164
// time, that message must have been fed to the allocator.
165+
//
166+
// Concurrency concerns:
167+
// Caller1 may call into maybePromiseSpanConfigUpdate() and include the SpanConfig
168+
// (marking it as no longer needed an update), and then Caller2 decides not to
169+
// include the SpanConfig in the rangeMsg. But caller2 may get ahead and call
170+
// into mma first with a RangeMsg that is lacking a SpanConfig and mma wouldn't
171+
// know how to initialize a range it knows nothing about. More thinking is needed
172+
// to claim this method is not racy.
177173
func (mr *mmaReplica) tryConstructMMARangeMsg(
178174
ctx context.Context, knownStores map[roachpb.StoreID]struct{},
179175
) (isLeaseholder bool, shouldBeSkipped bool, msg mmaprototype.RangeMsg) {

pkg/kv/kvserver/replica.go

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1007,7 +1007,7 @@ type Replica struct {
10071007
// when the range is not included in the store's leaseholder message either
10081008
// due to non-leaseholder replica or unknown store). The invariant is that
10091009
// mma must hold the latest span config of the range when the range messages
1010-
// sent to mma.
1010+
// is sent to mma.
10111011
mmaSpanConfigIsUpToDate bool
10121012
}
10131013

@@ -1846,8 +1846,8 @@ func (r *Replica) RaftBasicStatus() raft.BasicStatus {
18461846
// NB: This incurs deep copies of Status.Config and Status.Progress.Inflights
18471847
// and is not suitable for use in hot paths. See raftSparseStatusRLocked().
18481848
//
1849-
// TODO(wenyihu6): odd that this is returning a pointer while holding only an
1850-
// RLock.
1849+
// TODO(wenyihu6): returning a pointer here incurs an unnecessary heap
1850+
// allocation. We should return raft.Status instead.
18511851
func (r *Replica) raftStatusRLocked() *raft.Status {
18521852
if rg := r.mu.internalRaftGroup; rg != nil {
18531853
s := rg.Status()
@@ -1860,8 +1860,8 @@ func (r *Replica) raftStatusRLocked() *raft.Status {
18601860
// Progress.Inflights which are expensive to copy, or nil if the Raft group has
18611861
// not been initialized yet. Progress is only populated on the leader.
18621862
//
1863-
// TODO(wenyihu6): odd that this is returning a pointer while holding only an
1864-
// RLock.
1863+
// TODO(wenyihu6): returning a pointer here incurs an unnecessary heap
1864+
// allocation. We should return raft.SparseStatus instead.
18651865
func (r *Replica) raftSparseStatusRLocked() *raft.SparseStatus {
18661866
rg := r.mu.internalRaftGroup
18671867
if rg == nil {

0 commit comments

Comments
 (0)