Skip to content

Commit 765be4c

Browse files
committed
mmaprototype: add datadriven test for rebalanceStores
1 parent d3ea8d9 commit 765be4c

File tree

2 files changed

+76
-0
lines changed

2 files changed

+76
-0
lines changed

pkg/kv/kvserver/allocator/mmaprototype/cluster_state_test.go

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ package mmaprototype
88
import (
99
"context"
1010
"fmt"
11+
"math/rand"
1112
"slices"
1213
"sort"
1314
"strconv"
@@ -513,6 +514,13 @@ func TestClusterState(t *testing.T) {
513514
case "get-pending-changes":
514515
return printPendingChangesTest(testingGetPendingChanges(t, cs))
515516

517+
case "rebalance-stores":
518+
storeID := dd.ScanArg[roachpb.StoreID](t, d, "store-id")
519+
rng := rand.New(rand.NewSource(0))
520+
dsm := newDiversityScoringMemo()
521+
cs.rebalanceStores(context.Background(), storeID, rng, dsm)
522+
return printPendingChangesTest(testingGetPendingChanges(t, cs))
523+
516524
case "tick":
517525
seconds := dd.ScanArg[int](t, d, "seconds")
518526
ts.Advance(time.Second * time.Duration(seconds))
Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
# Basic test that rebalanceStores issues a lease transfer when seeing a single replica
2+
# contributing to overload on the first out of three single-store nodes.
3+
4+
set-store
5+
store-id=1 node-id=1 attrs=purple locality-tiers=region=us-west-1,zone=us-west-1a
6+
store-id=2 node-id=2 attrs=yellow locality-tiers=region=us-east-1,zone=us-east-1a
7+
store-id=3 node-id=3 attrs=green locality-tiers=region=us-central-1,zone=us-central-1a
8+
----
9+
node-id=1 locality-tiers=region=us-west-1,zone=us-west-1a,node=1
10+
store-id=1 attrs=purple locality-code=1:2:3:
11+
node-id=2 locality-tiers=region=us-east-1,zone=us-east-1a,node=2
12+
store-id=2 attrs=yellow locality-code=4:5:6:
13+
node-id=3 locality-tiers=region=us-central-1,zone=us-central-1a,node=3
14+
store-id=3 attrs=green locality-code=7:8:9:
15+
16+
store-load-msg
17+
store-id=1 node-id=1 load=[80,0,0] capacity=[100,100,100] secondary-load=0 load-time=0s
18+
----
19+
20+
store-load-msg
21+
store-id=2 node-id=2 load=[10,0,0] capacity=[100,100,100] secondary-load=0 load-time=0s
22+
----
23+
24+
store-load-msg
25+
store-id=3 node-id=3 load=[10,0,0] capacity=[100,100,100] secondary-load=0 load-time=0s
26+
----
27+
28+
get-load-info
29+
----
30+
store-id=1 node-id=1 status=ok accepting all reported=[cpu:80, write-bandwidth:0, byte-size:0] adjusted=[cpu:80, write-bandwidth:0, byte-size:0] node-reported-cpu=80 node-adjusted-cpu=80 seq=1
31+
store-id=2 node-id=2 status=ok accepting all reported=[cpu:10, write-bandwidth:0, byte-size:0] adjusted=[cpu:10, write-bandwidth:0, byte-size:0] node-reported-cpu=10 node-adjusted-cpu=10 seq=1
32+
store-id=3 node-id=3 status=ok accepting all reported=[cpu:10, write-bandwidth:0, byte-size:0] adjusted=[cpu:10, write-bandwidth:0, byte-size:0] node-reported-cpu=10 node-adjusted-cpu=10 seq=1
33+
34+
store-leaseholder-msg
35+
store-id=1
36+
range-id=1 load=[60,0,0] raft-cpu=20 config=(num_replicas=3 constraints={} voter_constraints={})
37+
store-id=1 replica-id=1 type=VOTER_FULL leaseholder=true
38+
store-id=2 replica-id=2 type=VOTER_FULL
39+
store-id=3 replica-id=3 type=VOTER_FULL
40+
----
41+
42+
ranges
43+
----
44+
range-id=1 local-store=1 load=[cpu:60, write-bandwidth:0, byte-size:0] raft-cpu=20
45+
store-id=1 replica-id=1 type=VOTER_FULL leaseholder=true
46+
store-id=2 replica-id=2 type=VOTER_FULL
47+
store-id=3 replica-id=3 type=VOTER_FULL
48+
49+
get-load-info
50+
----
51+
store-id=1 node-id=1 status=ok accepting all reported=[cpu:80, write-bandwidth:0, byte-size:0] adjusted=[cpu:80, write-bandwidth:0, byte-size:0] node-reported-cpu=80 node-adjusted-cpu=80 seq=1
52+
top-k-ranges (local-store-id=1) dim=CPURate: r1
53+
store-id=2 node-id=2 status=ok accepting all reported=[cpu:10, write-bandwidth:0, byte-size:0] adjusted=[cpu:10, write-bandwidth:0, byte-size:0] node-reported-cpu=10 node-adjusted-cpu=10 seq=1
54+
top-k-ranges (local-store-id=1) dim=WriteBandwidth: r1
55+
store-id=3 node-id=3 status=ok accepting all reported=[cpu:10, write-bandwidth:0, byte-size:0] adjusted=[cpu:10, write-bandwidth:0, byte-size:0] node-reported-cpu=10 node-adjusted-cpu=10 seq=1
56+
top-k-ranges (local-store-id=1) dim=WriteBandwidth: r1
57+
58+
# TODO(tbg): maybe it'll be good to at least optionally be able to have the
59+
# trace printed in the output here.
60+
rebalance-stores store-id=1
61+
----
62+
pending(2)
63+
change-id=1 store-id=1 node-id=1 range-id=1 load-delta=[cpu:-40, write-bandwidth:0, byte-size:0] start=0s
64+
prev=(replica-id=1 type=VOTER_FULL leaseholder=true)
65+
next=(replica-id=1 type=VOTER_FULL)
66+
change-id=2 store-id=2 node-id=2 range-id=1 load-delta=[cpu:44, write-bandwidth:0, byte-size:0] start=0s
67+
prev=(replica-id=2 type=VOTER_FULL)
68+
next=(replica-id=2 type=VOTER_FULL leaseholder=true)

0 commit comments

Comments
 (0)