Skip to content

Commit 86fc618

Browse files
authored
Merge pull request #226 from tchaikov/wip-gitcheck-lint
tools/githubcheck: add --lint option Reviewed-by: Ernesto Puerta <epuertat@redhat.com>
2 parents f715410 + 0f4def8 commit 86fc618

34 files changed

+612
-389
lines changed

benchmark/benchmark.py

Lines changed: 8 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,7 @@
1-
import subprocess
21
import logging
32

43
import settings
54
import common
6-
import monitoring
75
import hashlib
86
import os
97
import json
@@ -12,6 +10,7 @@
1210

1311
logger = logging.getLogger('cbt')
1412

13+
1514
class Benchmark(object):
1615
def __init__(self, archive_dir, cluster, config):
1716
self.acceptable = config.pop('acceptable', {})
@@ -29,7 +28,7 @@ def __init__(self, archive_dir, cluster, config):
2928
self.osd_ra = config.get('osd_ra', None)
3029
self.cmd_path = ''
3130
self.valgrind = config.get('valgrind', None)
32-
self.cmd_path_full = ''
31+
self.cmd_path_full = ''
3332
self.log_iops = config.get('log_iops', True)
3433
self.log_bw = config.get('log_bw', True)
3534
self.log_lat = config.get('log_lat', True)
@@ -53,7 +52,7 @@ def _compare_client_results(self, client_run, self_analyzer, baseline_analyzer):
5352
'iops_stddev': 'Stddev IOPS',
5453
'latency_avg': 'Average Latency(s)',
5554
'cpu_cycles_per_op': 'Cycles per operation'}
56-
res_outputs = [] # list of dictionaries containing the self and baseline benchmark results
55+
res_outputs = [] # list of dictionaries containing the self and baseline benchmark results
5756
compare_results = []
5857
self_analyzer_res = {}
5958
baseline_analyzer_res = {}
@@ -75,7 +74,7 @@ def _compare_client_results(self, client_run, self_analyzer, baseline_analyzer):
7574
baseline_getter = getattr(baseline_analyzer, 'get_' + alias)
7675
baseline_analyzer_res[name] = baseline_getter()
7776
res_outputs.append(self_analyzer_res)
78-
res_outputs.append(baseline_analyzer_res)
77+
res_outputs.append(baseline_analyzer_res)
7978
for alias, stmt in list(self.acceptable.items()):
8079
name = aliases[alias]
8180
result, baseline = [float(j[name]) for j in res_outputs]
@@ -97,16 +96,13 @@ def evaluate(self, baseline):
9796
runs.append(self.readmode)
9897
results = []
9998
for run in runs:
100-
out_dirs = [os.path.join(self.out_dir, run),
101-
os.path.join(baseline.out_dir, run)]
10299
for client in settings.getnodes('clients').split(','):
103100
host = settings.host_info(client)["host"]
104101
for proc in range(self.concurrent_procs):
105102
self_analyzer = self.create_data_analyzer(run, host, proc)
106103
baseline_analyzer = baseline.create_data_analyzer(run, host, proc)
107104
client_run = '{run}/{client}/{proc}'.format(run=run, client=client, proc=proc)
108105
compare_results = self._compare_client_results(client_run, self_analyzer, baseline_analyzer)
109-
rejected = sum(not result.accepted for result in compare_results)
110106
results.extend(compare_results)
111107
# TODO: check results from monitors
112108
return results
@@ -161,19 +157,20 @@ def cleanup(self):
161157
pass
162158

163159
def dropcaches(self):
164-
nodes = settings.getnodes('clients', 'osds')
160+
nodes = settings.getnodes('clients', 'osds')
165161

166162
common.pdsh(nodes, 'sync').communicate()
167163
common.pdsh(nodes, 'echo 3 | sudo tee /proc/sys/vm/drop_caches').communicate()
168164

169165
def __str__(self):
170166
return str(self.config)
171167

168+
172169
class DataAnalyzer(ABC):
173170
def __init__(self, archive_dir, run, host, proc):
174171
super().__init__()
175172
self.archive_dir = archive_dir
176-
173+
177174
@abstractmethod
178175
def get_cpu_cycles_per_op(self):
179176
pass
@@ -189,7 +186,7 @@ def get_bandwidth(self):
189186
@abstractmethod
190187
def get_iops_avg(self):
191188
pass
192-
189+
193190
@abstractmethod
194191
def get_iops_stddev(self):
195192
pass

benchmark/cephtestrados.py

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,13 @@
1-
import subprocess
1+
from .benchmark import Benchmark
22
import common
33
import settings
44
import monitoring
55
import os
66
import time
7-
import threading
87
import logging
98

109
logger = logging.getLogger('cbt')
1110

12-
from cluster.ceph import Ceph
13-
from .benchmark import Benchmark
1411

1512
class CephTestRados(Benchmark):
1613

@@ -20,10 +17,14 @@ def __init__(self, archive_dir, cluster, config):
2017
self.tmp_conf = self.cluster.tmp_conf
2118

2219
self.bools = {}
23-
if config.get('ec_pool', False): self.bools['ec_pool'] = True
24-
if config.get('write_fadvise_dontneed', False): self.bools['write_fadvise_dontneed'] = True
25-
if config.get('pool_snaps', False): self.bools['pool_snaps'] = True
26-
if config.get('write_append_excl', True): self.bools['write_append_excl'] = True
20+
if config.get('ec_pool', False):
21+
self.bools['ec_pool'] = True
22+
if config.get('write_fadvise_dontneed', False):
23+
self.bools['write_fadvise_dontneed'] = True
24+
if config.get('pool_snaps', False):
25+
self.bools['pool_snaps'] = True
26+
if config.get('write_append_excl', True):
27+
self.bools['write_append_excl'] = True
2728

2829
self.variables = {}
2930
self.variables['object_size'] = int(config.get('object_size', 4000000))
@@ -35,8 +36,7 @@ def __init__(self, archive_dir, cluster, config):
3536
self.variables['max_stride_size'] = str(config.get('max_stride_size', self.variables['object_size'] / 5))
3637
self.variables['max_seconds'] = str(config.get('max_seconds', 0))
3738

38-
39-
self.weights = {'read': 100, 'write':100, 'delete':10}
39+
self.weights = {'read': 100, 'write': 100, 'delete': 10}
4040
for weight in ['snap_create', 'snap_remove', 'rollback', 'setattr', 'rmattr', 'watch', 'copy_from', 'hit_set_list', 'is_dirty', 'cache_flush', 'cache_try_flush', 'cache_evict' 'append', 'write', 'read', 'delete']:
4141
self.addweight(weight)
4242
if 'write_append_excl' in self.bools and 'append' in self.weights:
@@ -64,12 +64,12 @@ def exists(self):
6464
return False
6565

6666
# Initialize may only be called once depending on rebuild_every_test setting
67-
def initialize(self):
67+
def initialize(self):
6868
super(CephTestRados, self).initialize()
6969

7070
def run(self):
7171
super(CephTestRados, self).run()
72-
72+
7373
# Remake the pool
7474
self.mkpool()
7575
self.dropcaches()
@@ -122,7 +122,7 @@ def mkpool(self):
122122
self.cluster.mkpool('ceph_test_rados', self.pool_profile, 'ceph_test_rados')
123123
monitoring.stop()
124124

125-
def recovery_callback(self):
125+
def recovery_callback(self):
126126
common.pdsh(settings.getnodes('clients'), 'sudo pkill -f ceph_test_rados').communicate()
127127

128128
def __str__(self):

benchmark/cosbench.py

Lines changed: 33 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,18 @@
1-
import subprocess
21
import common
32
import settings
43
import monitoring
5-
import os, sys
4+
import os
5+
import sys
66
import time
7-
import threading
87
import lxml.etree as ET
98
import re
10-
import time
119
import logging
1210

13-
from cluster.ceph import Ceph
1411
from .benchmark import Benchmark
1512

1613
logger = logging.getLogger("cbt")
1714

15+
1816
class Cosbench(Benchmark):
1917

2018
def __init__(self, archive_dir, cluster, config):
@@ -32,7 +30,7 @@ def __init__(self, archive_dir, cluster, config):
3230
self.use_existing = settings.cluster.get('use_existing')
3331
self.is_teuthology = settings.cluster.get('is_teuthology', False)
3432

35-
self.run_dir = '%s/osd_ra-%08d/op_size-%s/concurrent_procs-%03d/containers-%05d/objects-%05d/%s' % (self.run_dir, int(self.osd_ra), self.op_size, int(self.total_procs), int(self.containers),int(self.objects), self.mode)
33+
self.run_dir = '%s/osd_ra-%08d/op_size-%s/concurrent_procs-%03d/containers-%05d/objects-%05d/%s' % (self.run_dir, int(self.osd_ra), self.op_size, int(self.total_procs), int(self.containers), int(self.objects), self.mode)
3634
self.out_dir = self.archive_dir
3735

3836
def _filter_ssh_output(self, output):
@@ -62,10 +60,10 @@ def _do_ctrl(self, cmd_fmt, **kwargs):
6260
return stdout, stderr
6361

6462
def prerun_check(self):
65-
#1. check cosbench
63+
# 1. check cosbench
6664
if not self.check_workload_status():
6765
sys.exit()
68-
#2. check rgw
66+
# 2. check rgw
6967
cosconf = {}
7068
for param in self.config["auth"].split(';'):
7169
try:
@@ -97,7 +95,7 @@ def prerun_check(self):
9795
if re.search("AccessDenied", stdout):
9896
logger.error("Cosbench connect to Radosgw Auth Failed\n%s", stdout)
9997
sys.exit()
100-
#3. check if container and obj created
98+
# 3. check if container and obj created
10199
target_name = "%s-%s-%s" % (self.config["obj_size"], self.config["mode"], self.config["objects_max"])
102100
container_count = 0
103101
stdout, stderr = common.pdsh(self.rgw, "swift -A %s -U %s -K %s list" % (cosconf["url"], cosconf["username"], cosconf["password"])).communicate()
@@ -120,11 +118,11 @@ def exists(self):
120118
return False
121119

122120
def choose_template(self, temp_name, conf):
123-
ratio = { "read": 0, "write": 0 }
121+
ratio = {"read": 0, "write": 0}
124122
if conf["mode"] == "read" or conf["mode"] == "write":
125123
mode = [conf["mode"]]
126124
ratio[conf["mode"]] = 100
127-
elif conf["mode"] == "mix":
125+
elif conf["mode"] == "mix":
128126
mode = ["read", "write"]
129127
ratio["read"] = conf["ratio"]
130128
ratio["write"] = 100 - conf["ratio"]
@@ -135,24 +133,24 @@ def choose_template(self, temp_name, conf):
135133
operation = []
136134
for tmp_mode in mode:
137135
operation.append({
138-
"config":"containers=%s;objects=%s;cprefix=%s-%s-%s;sizes=c(%s)%s"
139-
%(conf["containers"], conf["objects"], conf["obj_size"], conf["mode"], conf["objects_max"], conf["obj_size_num"], conf["obj_size_unit"]),
140-
"ratio":ratio[tmp_mode],
141-
"type":tmp_mode
136+
"config": "containers=%s;objects=%s;cprefix=%s-%s-%s;sizes=c(%s)%s"
137+
% (conf["containers"], conf["objects"], conf["obj_size"], conf["mode"], conf["objects_max"], conf["obj_size_num"], conf["obj_size_unit"]),
138+
"ratio": ratio[tmp_mode],
139+
"type": tmp_mode
142140
})
143141

144142
template = {
145-
"default":{
143+
"default": {
146144
"description": conf["mode"],
147145
"name": "%s_%scon_%sobj_%s_%dw" % (conf["mode"], conf["containers_max"], conf["objects_max"], conf["obj_size"], conf["workers"]),
148-
"storage": {"type":"swift", "config":"timeout=300000" },
149-
"auth": {"type":"swauth", "config":"%s" % (conf["auth"])},
146+
"storage": {"type": "swift", "config": "timeout=300000"},
147+
"auth": {"type": "swauth", "config": "%s" % (conf["auth"])},
150148
"workflow": {
151149
"workstage": [{
152150
"name": "main",
153-
"work": {"rampup":conf["rampup"], "rampdown":conf["rampdown"], "name":conf["obj_size"], "workers":conf["workers"], "runtime":conf["runtime"],
154-
"operation":operation
155-
}
151+
"work": {"rampup": conf["rampup"], "rampdown":conf["rampdown"], "name":conf["obj_size"], "workers":conf["workers"], "runtime":conf["runtime"],
152+
"operation":operation
153+
}
156154
}]
157155
}
158156
}
@@ -162,19 +160,19 @@ def choose_template(self, temp_name, conf):
162160

163161
def parse_conf(self, conf):
164162
if "containers" in conf:
165-
m = re.findall("(\w{1})\((\d+),(\d+)\)", conf["containers"])
163+
m = re.findall(r"(\w{1})\((\d+),(\d+)\)", conf["containers"])
166164
if m:
167165
conf["containers_method"] = m[0][0]
168166
conf["containers_min"] = m[0][1]
169167
conf["containers_max"] = m[0][2]
170168
if "objects" in conf:
171-
m = re.findall("(\w{1})\((\d+),(\d+)\)", conf["objects"])
169+
m = re.findall(r"(\w{1})\((\d+),(\d+)\)", conf["objects"])
172170
if m:
173171
conf["objects_method"] = m[0][0]
174172
conf["objects_min"] = m[0][1]
175173
conf["objects_max"] = m[0][2]
176174
if "obj_size" in conf:
177-
m = re.findall("(\d+)(\w+)", conf["obj_size"])
175+
m = re.findall(r"(\d+)(\w+)", conf["obj_size"])
178176
if m:
179177
conf["obj_size_num"] = m[0][0]
180178
conf["obj_size_unit"] = m[0][1]
@@ -205,14 +203,14 @@ def initialize(self):
205203
if not self.container_prepare_check():
206204
workstage_init = {
207205
"name": "init",
208-
"work": {"type":"init", "workers":conf["workers"], "config":"containers=r(1,%s);cprefix=%s-%s-%s" % (conf["containers_max"], conf["obj_size"], conf["mode"], conf["objects_max"])}
206+
"work": {"type": "init", "workers": conf["workers"], "config": "containers=r(1,%s);cprefix=%s-%s-%s" % (conf["containers_max"], conf["obj_size"], conf["mode"], conf["objects_max"])}
209207
}
210208
workstage_prepare = {
211-
"name":"prepare",
209+
"name": "prepare",
212210
"work": {
213-
"type":"prepare",
214-
"workers":conf["workers"],
215-
"config":"containers=r(1,%s);objects=r(1,%s);cprefix=%s-%s-%s;sizes=c(%s)%s" %
211+
"type": "prepare",
212+
"workers": conf["workers"],
213+
"config": "containers=r(1,%s);objects=r(1,%s);cprefix=%s-%s-%s;sizes=c(%s)%s" %
216214
(conf["containers_max"], conf["objects_max"], conf["obj_size"], conf["mode"], conf["objects_max"], conf["obj_size_num"], conf["obj_size_unit"])
217215
}
218216
}
@@ -224,15 +222,15 @@ def initialize(self):
224222
def container_prepare_check(self):
225223
return self.container_prepared
226224

227-
#function use_template, set_leaf and run_content, add_leaf_to_tree all used for generate a cosbench xml.
225+
# function use_template, set_leaf and run_content, add_leaf_to_tree all used for generate a cosbench xml.
228226
def prepare_xml(self, leaves):
229227
conf = self.config
230228
root = ET.Element("workload")
231229
parent = root
232230
self.add_leaf_to_tree(leaves, parent)
233231
self.config["xml_name"] = leaves["name"]
234232
tree = ET.ElementTree(root)
235-
tree.write("%s/%s.xml" % (conf["cosbench_xml_dir"], leaves["name"]),pretty_print=True)
233+
tree.write("%s/%s.xml" % (conf["cosbench_xml_dir"], leaves["name"]), pretty_print=True)
236234
logger.info("Write xml conf to %s/%s.xml", conf["cosbench_xml_dir"], leaves["name"])
237235

238236
def add_leaf_to_tree(self, leaves, parent):
@@ -258,7 +256,7 @@ def run(self):
258256
except KeyboardInterrupt:
259257
logger.warning("accept keyboard interrupt, cancel this run")
260258
conf = self.config
261-
stdout, stderr = common.pdsh(conf["controller"],'sh %s/cli.sh cancel %s' % (conf["cosbench_dir"], self.runid)).communicate()
259+
stdout, stderr = common.pdsh(conf["controller"], 'sh %s/cli.sh cancel %s' % (conf["cosbench_dir"], self.runid)).communicate()
262260
logger.info("%s", stdout)
263261

264262
self.check_workload_status()
@@ -295,7 +293,7 @@ def check_workload_status(self):
295293
return True
296294

297295
def check_cosbench_res_dir(self):
298-
#check res dir
296+
# check res dir
299297
check_time = 0
300298
while True:
301299
stdout, stderr = self._do_ctrl("find {cosbench_dir}/archive -maxdepth 1 -name '{runid}-*'",
@@ -314,13 +312,13 @@ def _run(self):
314312
cosbench_dir=conf["cosbench_dir"],
315313
cosbench_xml_dir=conf["cosbench_xml_dir"],
316314
xml_name=conf["xml_name"])
317-
m = re.findall('Accepted with ID:\s*(\w+)', stdout)
315+
m = re.findall(r'Accepted with ID:\s*(\w+)', stdout)
318316
if not m:
319317
logger.error("cosbench start failing with error: %s", stderr)
320318
sys.exit()
321319
self.runid = m[0]
322320
logger.info("cosbench job start, job number %s", self.runid)
323-
wait_time = conf["rampup"]+conf["rampdown"]+conf["runtime"]
321+
wait_time = conf["rampup"] + conf["rampdown"] + conf["runtime"]
324322
logger.info("====== cosbench job: %s started ======", conf["xml_name"])
325323
logger.info("wait %d secs to finish the test", wait_time)
326324
logger.info("You can monitor the runtime status and results on http://localhost:19088/controller")

0 commit comments

Comments
 (0)