@@ -112,17 +112,19 @@ func registerRebalanceLoad(r registry.Registry) {
112
112
})
113
113
mvt .InMixedVersion ("rebalance load run" ,
114
114
func (ctx context.Context , l * logger.Logger , r * rand.Rand , h * mixedversion.Helper ) error {
115
+ binary := uploadCockroach (ctx , t , c , appNode , h .System .FromVersion )
115
116
return rebalanceByLoad (
116
- ctx , t , l , c , rebalanceMode , maxDuration , concurrency , appNode , numStores , numNodes )
117
+ ctx , t , l , c , maxDuration , concurrency , appNode ,
118
+ fmt .Sprintf ("%s workload" , binary ), numStores , numNodes )
117
119
})
118
120
mvt .Run ()
119
121
} else {
120
122
// Note that CPU profiling is already enabled by default, should there be
121
123
// a failure it will be available in the artifacts.
122
124
c .Start (ctx , t .L (), startOpts , settings , roachNodes )
123
125
require .NoError (t , rebalanceByLoad (
124
- ctx , t , t .L (), c , rebalanceMode , maxDuration ,
125
- concurrency , appNode , numStores , numNodes ,
126
+ ctx , t , t .L (), c , maxDuration ,
127
+ concurrency , appNode , "./cockroach workload" , numStores , numNodes ,
126
128
))
127
129
}
128
130
@@ -236,19 +238,20 @@ func rebalanceByLoad(
236
238
t test.Test ,
237
239
l * logger.Logger ,
238
240
c cluster.Cluster ,
239
- rebalanceMode string ,
240
241
maxDuration time.Duration ,
241
242
concurrency int ,
242
243
appNode option.NodeListOption ,
244
+ workloadPath string ,
243
245
numStores , numNodes int ,
244
246
) error {
247
+
245
248
// We want each store to end up with approximately storeToRangeFactor
246
249
// (factor) leases such that the CPU load is evenly spread, e.g.
247
250
// (n * factor) -1 splits = factor * n ranges = factor leases per store
248
251
// Note that we only assert on the CPU of each store w.r.t the mean, not
249
252
// the lease count.
250
253
splits := (numStores * storeToRangeFactor ) - 1
251
- c .Run (ctx , option .WithNodes (appNode ), fmt .Sprintf ("./cockroach workload init kv --drop --splits=%d {pgurl:1}" , splits ))
254
+ c .Run (ctx , option .WithNodes (appNode ), fmt .Sprintf ("%s init kv --drop --splits=%d {pgurl:1}" , workloadPath , splits ))
252
255
253
256
db := c .Conn (ctx , l , 1 )
254
257
defer db .Close ()
@@ -264,9 +267,9 @@ func rebalanceByLoad(
264
267
m .Go (func (ctx context.Context , l * logger.Logger ) error {
265
268
l .Printf ("starting load generator" )
266
269
err := c .RunE (ctx , option .WithNodes (appNode ), fmt .Sprintf (
267
- "./cockroach workload run kv --read-percent=95 --tolerate-errors --concurrency=%d " +
270
+ "%s run kv --read-percent=95 --tolerate-errors --concurrency=%d " +
268
271
"--duration=%v {pgurl:1-%d}" ,
269
- concurrency , maxDuration , numNodes ))
272
+ workloadPath , concurrency , maxDuration , numNodes ))
270
273
if errors .Is (ctx .Err (), context .Canceled ) {
271
274
// We got canceled either because CPU balance was achieved or the
272
275
// other worker hit an error. In either case, it's not this worker's
0 commit comments