Skip to content

Commit e51953e

Browse files
MB-48136 uneven data in collections
Change-Id: I3b42547297ce36b3a4aeab09f610c7c8feeae0cd Reviewed-on: https://review.couchbase.org/c/perfrunner/+/166448 Tested-by: Build Bot <[email protected]> Reviewed-by: vikas chaudhary <[email protected]>
1 parent ff55f50 commit e51953e

17 files changed

+4974
-7
lines changed

clusters/aether_5indexes.spec

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
[clusters]
2+
aether =
3+
172.23.110.53:kv
4+
172.23.110.54:kv
5+
172.23.110.72:index
6+
172.23.110.55:index
7+
172.23.110.56:index
8+
172.23.110.71:index
9+
172.23.110.73:index
10+
11+
[clients]
12+
hosts =
13+
172.23.110.74
14+
credentials = root:couchbase
15+
16+
[storage]
17+
data = /data
18+
19+
[credentials]
20+
rest = Administrator:password
21+
ssh = root:couchbase
22+
23+
[parameters]
24+
OS = CentOS 7
25+
CPU = Data: 2xGold 6230 (80 vCPU), Index: CPU 2xGold 6230 (80 vCPU)
26+
Memory = Data: 128 GB, Index: 512 GB
27+
Disk = Samsung SSD 860 1TB
Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
{
2+
"bucket-1": {
3+
"_default": {},
4+
"scope-1": {
5+
"collection-1": {
6+
"load": 1,
7+
"access": 1,
8+
"ratio": 1
9+
},
10+
"collection-2": {
11+
"load": 1,
12+
"access": 1,
13+
"ratio": 2
14+
},
15+
"collection-3": {
16+
"load": 1,
17+
"access": 1,
18+
"ratio": 3
19+
},
20+
"collection-4": {
21+
"load": 1,
22+
"access": 1,
23+
"ratio": 4
24+
},
25+
"collection-5": {
26+
"load": 1,
27+
"access": 5,
28+
"ratio": 5
29+
},
30+
"collection-6": {
31+
"load": 1,
32+
"access": 1,
33+
"ratio": 6
34+
},
35+
"collection-7": {
36+
"load": 1,
37+
"access": 1,
38+
"ratio": 7
39+
},
40+
"collection-8": {
41+
"load": 1,
42+
"access": 1,
43+
"ratio": 8
44+
},
45+
"collection-9": {
46+
"load": 1,
47+
"access": 1,
48+
"ratio": 9
49+
},
50+
"collection-10": {
51+
"load": 1,
52+
"access": 1,
53+
"ratio": 10
54+
}
55+
}
56+
}
57+
}

perfrunner/helpers/rest.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -200,6 +200,16 @@ def set_index_settings(self, host: str, settings: dict):
200200
else:
201201
logger.warn('Skipping unknown option: {}'.format(option))
202202

203+
def set_planner_settings(self, host: str, settings: dict):
204+
api = 'http://{}:9102/settings/planner'.format(host)
205+
logger.info('Changing host {} to {}'.format(host, settings))
206+
self.post(url=api, data=settings)
207+
208+
def get_index_metadata(self, host: str) -> dict:
209+
api = 'http://{}:9102/getLocalIndexMetadata'.format(host)
210+
211+
return self.get(url=api).json()
212+
203213
def get_index_settings(self, host: str) -> dict:
204214
api = 'http://{}:9102/settings?internal=ok'.format(host)
205215

perfrunner/settings.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1203,7 +1203,7 @@ def __init__(self, options: dict):
12031203

12041204
self.settings = {}
12051205
for option in options:
1206-
if option.startswith(('indexer', 'projector', 'queryport')):
1206+
if option.startswith(('indexer', 'projector', 'queryport', 'planner')):
12071207
value = options.get(option)
12081208
if '.' in value:
12091209
self.settings[option] = maybe_atoi(value, t=float)
@@ -1219,6 +1219,11 @@ def __init__(self, options: dict):
12191219
self.settings['indexer.cpu.throttle.target'] = \
12201220
self.settings.get('indexer.cpu.throttle.target', 1.00)
12211221

1222+
self.excludeNode = None
1223+
if self.settings.get('planner.excludeNode'):
1224+
self.excludeNode = self.settings.get('planner.excludeNode')
1225+
self.settings.pop('planner.excludeNode')
1226+
12221227
def __str__(self) -> str:
12231228
return str(self.__dict__)
12241229

perfrunner/tests/secondary.py

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
run_cbindex,
1616
run_cbindexperf,
1717
)
18+
from perfrunner.helpers.misc import pretty_dict
1819
from perfrunner.helpers.profiler import with_profiles
1920
from perfrunner.tests import PerfTest, TargetIterator
2021
from perfrunner.tests.rebalance import RebalanceTest
@@ -1253,17 +1254,25 @@ def run(self):
12531254
self.access_bg()
12541255
self.apply_scanworkload(path_to_tool="./opt/couchbase/bin/cbindexperf",
12551256
run_in_background=True)
1257+
if self.test_config.gsi_settings.excludeNode:
1258+
planner_settings = {"excludeNode": self.test_config.gsi_settings.excludeNode}
1259+
nodes = self.rest.get_active_nodes_by_role(self.master_node, 'index')
1260+
for node in nodes:
1261+
logger.info(f"setting planner settings {planner_settings}")
1262+
self.rest.set_planner_settings(node, planner_settings)
1263+
meta = self.rest.get_index_metadata(node)
1264+
logger.info('Index Metadata: {}'.format(pretty_dict(meta['localSettings'])))
1265+
12561266
self.rebalance_indexer()
12571267
logger.info("Indexes after rebalance")
12581268
for server in self.index_nodes:
12591269
logger.info("{} : {} Indexes".format(server, self.rest.indexes_per_node(server)))
1260-
1270+
self.report_kpi(rebalance_time=True)
12611271
kill_process("cbindexperf")
12621272
scan_thr = self.get_throughput()
12631273
percentile_latencies = self.calculate_scan_latencies()
12641274
logger.info('Scan throughput: {}'.format(scan_thr))
12651275
self.print_index_disk_usage()
1266-
self.report_kpi(rebalance_time=True)
12671276
self.report_kpi(percentile_latencies, scan_thr)
12681277
self.validate_num_connections()
12691278

spring/wgen3.py

Lines changed: 33 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -126,15 +126,33 @@ def __init__(self, workload_settings, target_settings, shutdown_event=None):
126126

127127
def init_load_targets(self):
128128
self.load_targets = []
129+
self.load_map = {}
130+
num_ratio = 0
129131
if self.ws.collections is not None:
130132
target_scope_collections = self.ws.collections[self.ts.bucket]
131133
for scope in target_scope_collections.keys():
132134
for collection in target_scope_collections[scope].keys():
133135
if target_scope_collections[scope][collection]['load'] == 1:
134136
self.load_targets += [scope+":"+collection]
137+
if target_scope_collections[scope][collection].get('ratio'):
138+
num_ratio += target_scope_collections[scope][collection].get('ratio')
135139
else:
136140
self.load_targets = ["_default:_default"]
137141
self.num_load_targets = len(self.load_targets)
142+
if num_ratio > 0:
143+
base_items = self.ws.items // num_ratio
144+
target_scope_collections = self.ws.collections[self.ts.bucket]
145+
for scope in target_scope_collections.keys():
146+
for collection in target_scope_collections[scope].keys():
147+
if target_scope_collections[scope][collection]['load'] == 1:
148+
self.load_targets += [scope+":"+collection]
149+
if target_scope_collections[scope][collection].get('ratio'):
150+
ratio = target_scope_collections[scope][collection].get('ratio')
151+
self.load_map[scope + ":" + collection] = ratio * base_items
152+
else:
153+
for target in self.load_targets:
154+
self.load_map[target] = self.ws.items // self.num_load_targets
155+
logger.info(f"load map {self.load_map}")
138156

139157
def init_access_targets(self):
140158
self.access_targets = []
@@ -448,7 +466,8 @@ def gen_cmd_sequence(self, cb: Client = None) -> Sequence:
448466
if not cb:
449467
cb = self.cb
450468
target = self.random_target()
451-
curr_items = self.ws.items // self.num_load_targets
469+
curr_items = self.load_map[target]
470+
# curr_items = self.ws.items // self.num_load_targets
452471
deleted_items = 0
453472
if self.ws.creates or self.ws.deletes:
454473
max_batch_deletes_buffer = self.ws.deletes * self.ws.workers
@@ -673,10 +692,11 @@ def run(self, sid, *args):
673692
class SeqUpsertsWorker(Worker):
674693

675694
def run(self, sid, *args):
695+
logger.info("running SeqUpsertsWorker")
676696
ws = copy.deepcopy(self.ws)
677-
ws.items = ws.items // self.num_load_targets
678697
self.cb.connect_collections(self.load_targets)
679698
for target in self.load_targets:
699+
ws.items = self.load_map[target]
680700
for key in SequentialKey(sid, ws, self.ts.prefix):
681701
doc = self.docs.next(key)
682702
self.cb.update(target, key.string, doc)
@@ -970,7 +990,6 @@ def do_batch_update(self, *args, **kwargs):
970990
t0 = time.time()
971991
self.op_delay = self.op_delay + (self.delta / self.ws.n1ql_batch_size)
972992

973-
self.next_target()
974993
target_curr_items = self.ws.items // self.num_load_targets
975994

976995
for i in range(self.ws.n1ql_batch_size):
@@ -1354,18 +1373,28 @@ def start_all_workers(self):
13541373
self.shared_dict = self.manager.dict()
13551374
if self.ws.collections is not None:
13561375
num_load = 0
1376+
num_ratio = 0
13571377
target_scope_collections = self.ws.collections[self.ts.bucket]
13581378
for scope in target_scope_collections.keys():
13591379
for collection in target_scope_collections[scope].keys():
13601380
if target_scope_collections[scope][collection]['load'] == 1:
13611381
num_load += 1
1382+
if target_scope_collections[scope][collection].get('ratio'):
1383+
num_ratio += target_scope_collections[scope][collection].get('ratio')
13621384

13631385
curr_items = self.ws.items // num_load
1386+
if num_ratio > 0:
1387+
curr_items = self.ws.items // num_ratio
13641388
for scope in target_scope_collections.keys():
13651389
for collection in target_scope_collections[scope].keys():
13661390
target = scope+":"+collection
13671391
if target_scope_collections[scope][collection]['load'] == 1:
1368-
self.shared_dict[target] = [curr_items, 0]
1392+
if target_scope_collections[scope][collection].get('ratio'):
1393+
ratio = target_scope_collections[scope][collection]['ratio']
1394+
final_items = curr_items * ratio
1395+
self.shared_dict[target] = [final_items, 0]
1396+
else:
1397+
self.shared_dict[target] = [curr_items, 0]
13691398
else:
13701399
self.shared_dict[target] = [0, 0]
13711400
else:

0 commit comments

Comments
 (0)