Skip to content

Commit a8691b0

Browse files
authored
Merge pull request #153820 from iskettaneh/backport24.3-153326
release-24.3: allocatorimpl: rm TestAllocatorFullDisks
2 parents f00749c + 80ffe2f commit a8691b0

File tree

1 file changed

+0
-154
lines changed

1 file changed

+0
-154
lines changed

pkg/kv/kvserver/allocator/allocatorimpl/allocator_test.go

Lines changed: 0 additions & 154 deletions
Original file line numberDiff line numberDiff line change
@@ -8773,160 +8773,6 @@ func (ts *testStore) rebalance(ots *testStore, bytes int64, qps float64, do Disk
87738773
ots.Capacity.LogicalBytes += bytes
87748774
}
87758775

8776-
func (ts *testStore) compact() {
8777-
ts.Capacity.Used = ts.Capacity.LogicalBytes
8778-
ts.Capacity.Available = ts.Capacity.Capacity - ts.Capacity.Used
8779-
}
8780-
8781-
func TestAllocatorFullDisks(t *testing.T) {
8782-
defer leaktest.AfterTest(t)()
8783-
defer log.Scope(t).Close(t)
8784-
8785-
ctx := context.Background()
8786-
stopper := stop.NewStopper()
8787-
defer stopper.Stop(ctx)
8788-
8789-
st := cluster.MakeTestingClusterSettings()
8790-
tr := tracing.NewTracer()
8791-
clock := hlc.NewClockForTesting(nil)
8792-
8793-
g := gossip.NewTest(1, stopper, metric.NewRegistry())
8794-
8795-
liveness.TimeUntilNodeDead.Override(ctx, &st.SV, liveness.TestTimeUntilNodeDeadOff)
8796-
8797-
const generations = 100
8798-
const nodes = 20
8799-
const capacity = (1 << 30) + 1
8800-
const rangeSize = 16 << 20
8801-
8802-
mockNodeLiveness := storepool.NewMockNodeLiveness(livenesspb.NodeLivenessStatus_LIVE)
8803-
sp := storepool.NewStorePool(
8804-
log.MakeTestingAmbientContext(tr),
8805-
st,
8806-
g,
8807-
clock,
8808-
func() int {
8809-
return nodes
8810-
},
8811-
mockNodeLiveness.NodeLivenessFunc,
8812-
false, /* deterministic */
8813-
)
8814-
alloc := MakeAllocator(st, false /* deterministic */, func(id roachpb.NodeID) (time.Duration, bool) {
8815-
return 0, false
8816-
}, nil)
8817-
8818-
var wg sync.WaitGroup
8819-
g.RegisterCallback(gossip.MakePrefixPattern(gossip.KeyStoreDescPrefix),
8820-
func(_ string, _ roachpb.Value) { wg.Done() },
8821-
// Redundant callbacks are required by this test.
8822-
gossip.Redundant)
8823-
8824-
do := makeDiskCapacityOptions(&st.SV)
8825-
8826-
// Each range is equally sized (16mb), we want the number of ranges per node,
8827-
// when their size is added, to be no greater than the full disk rebalance
8828-
// threshold (0.925%) e.g for below:
8829-
// capacity = 1024mb
8830-
// rangeSize = 16mb
8831-
// threshold = 0.925
8832-
// rangesPerNode = ⌊1024mb * 0.925 / 16mb⌋ = 59
8833-
rangesPerNode := int(math.Floor(capacity * do.RebalanceToThreshold / rangeSize))
8834-
rangesToAdd := rangesPerNode * nodes
8835-
8836-
// Initialize testStores.
8837-
var testStores [nodes]testStore
8838-
for i := 0; i < len(testStores); i++ {
8839-
// Don't immediately reclaim disk space from removed ranges. This mimics
8840-
// range deletions don't immediately reclaim disk space in rocksdb.
8841-
testStores[i].immediateCompaction = false
8842-
testStores[i].StoreID = roachpb.StoreID(i)
8843-
testStores[i].Node = roachpb.NodeDescriptor{NodeID: roachpb.NodeID(i)}
8844-
testStores[i].Capacity = roachpb.StoreCapacity{Capacity: capacity, Available: capacity}
8845-
}
8846-
// Initialize the cluster with a single range.
8847-
testStores[0].add(rangeSize, 0)
8848-
rangesAdded := 1
8849-
8850-
for i := 0; i < generations; i++ {
8851-
// First loop through test stores and randomly add data.
8852-
for j := 0; j < len(testStores); j++ {
8853-
if mockNodeLiveness.NodeLivenessFunc(roachpb.NodeID(j)) == livenesspb.NodeLivenessStatus_DEAD {
8854-
continue
8855-
}
8856-
ts := &testStores[j]
8857-
// Add [0,2) ranges to the node, simulating splits and data growth.
8858-
toAdd := alloc.randGen.Intn(2)
8859-
for k := 0; k < toAdd; k++ {
8860-
if rangesAdded < rangesToAdd {
8861-
ts.add(rangeSize, 0)
8862-
rangesAdded++
8863-
}
8864-
}
8865-
if ts.Capacity.Available <= 0 {
8866-
t.Errorf("testStore %d ran out of space during generation %d (rangesAdded=%d/%d): %+v",
8867-
j, i, rangesAdded, rangesToAdd, ts.Capacity)
8868-
mockNodeLiveness.SetNodeStatus(roachpb.NodeID(j), livenesspb.NodeLivenessStatus_DEAD)
8869-
}
8870-
wg.Add(1)
8871-
if err := g.AddInfoProto(gossip.MakeStoreDescKey(roachpb.StoreID(j)), &ts.StoreDescriptor, 0); err != nil {
8872-
t.Fatal(err)
8873-
}
8874-
}
8875-
wg.Wait()
8876-
8877-
// Loop through each store a number of times and maybe rebalance.
8878-
for j := 0; j < 10; j++ {
8879-
for k := 0; k < len(testStores); k++ {
8880-
if mockNodeLiveness.NodeLivenessFunc(roachpb.NodeID(k)) == livenesspb.NodeLivenessStatus_DEAD {
8881-
continue
8882-
}
8883-
ts := &testStores[k]
8884-
// Rebalance until there's no more rebalancing to do.
8885-
if ts.Capacity.RangeCount > 0 {
8886-
var rangeUsageInfo allocator.RangeUsageInfo
8887-
target, _, details, ok := alloc.RebalanceVoter(
8888-
ctx,
8889-
sp,
8890-
emptySpanConfig(),
8891-
nil,
8892-
[]roachpb.ReplicaDescriptor{{NodeID: ts.Node.NodeID, StoreID: ts.StoreID}},
8893-
nil,
8894-
rangeUsageInfo,
8895-
storepool.StoreFilterThrottled,
8896-
alloc.ScorerOptions(ctx),
8897-
)
8898-
if ok {
8899-
if log.V(1) {
8900-
log.Infof(ctx, "rebalancing to %v; details: %s", target, details)
8901-
}
8902-
testStores[k].rebalance(&testStores[int(target.StoreID)], rangeSize, 0 /* qps */, do)
8903-
}
8904-
}
8905-
// Gossip occasionally, as real Stores do when replicas move around.
8906-
if j%3 == 2 {
8907-
wg.Add(1)
8908-
if err := g.AddInfoProto(gossip.MakeStoreDescKey(roachpb.StoreID(j)), &ts.StoreDescriptor, 0); err != nil {
8909-
t.Fatal(err)
8910-
}
8911-
}
8912-
}
8913-
}
8914-
8915-
// Simulate rocksdb compactions freeing up disk space.
8916-
for j := 0; j < len(testStores); j++ {
8917-
if mockNodeLiveness.NodeLivenessFunc(roachpb.NodeID(j)) != livenesspb.NodeLivenessStatus_DEAD {
8918-
ts := &testStores[j]
8919-
if ts.Capacity.Available <= 0 {
8920-
t.Errorf("testStore %d ran out of space during generation %d: %+v", j, i, ts.Capacity)
8921-
mockNodeLiveness.SetNodeStatus(roachpb.NodeID(j), livenesspb.NodeLivenessStatus_DEAD)
8922-
} else {
8923-
ts.compact()
8924-
}
8925-
}
8926-
}
8927-
}
8928-
}
8929-
89308776
func Example_rangeCountRebalancing() {
89318777
testStores := make([]testStore, 20)
89328778
rebalanceFn := func(ctx context.Context, ts *testStore, testStores []testStore, alloc *Allocator, storePool *storepool.StorePool) {

0 commit comments

Comments
 (0)