Skip to content

Commit 62a06c4

Browse files
committed
mgr/cephadm: autopep8 changes
Signed-off-by: Adam King <[email protected]>
1 parent c1f3497 commit 62a06c4

File tree

6 files changed

+50
-25
lines changed

6 files changed

+50
-25
lines changed

src/pybind/mgr/cephadm/module.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2719,7 +2719,8 @@ def upgrade_start(self, image: str, version: str, daemon_types: Optional[List[st
27192719

27202720
if limit is not None:
27212721
if limit < 1:
2722-
raise OrchestratorError(f'Upgrade aborted - --limit arg must be a positive integer, not {limit}')
2722+
raise OrchestratorError(
2723+
f'Upgrade aborted - --limit arg must be a positive integer, not {limit}')
27232724

27242725
return self.upgrade.upgrade_start(image, version, daemon_types, hosts, services, limit)
27252726

src/pybind/mgr/cephadm/services/osd.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -298,7 +298,8 @@ def generate_previews(self, osdspecs: List[DriveGroupSpec], for_host: str) -> Li
298298

299299
# driveselection for host
300300
cmds: List[str] = self.driveselection_to_ceph_volume(ds,
301-
osd_id_claims.filtered_by_host(host),
301+
osd_id_claims.filtered_by_host(
302+
host),
302303
preview=True)
303304
if not cmds:
304305
logger.debug("No data_devices, skipping DriveGroup: {}".format(

src/pybind/mgr/cephadm/tests/test_cephadm.py

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -166,9 +166,11 @@ def test_re_add_host_receive_loopback(self, resolve_ip, cephadm_module):
166166
resolve_ip.side_effect = ['192.168.122.1', '127.0.0.1', '127.0.0.1']
167167
assert wait(cephadm_module, cephadm_module.get_hosts()) == []
168168
cephadm_module._add_host(HostSpec('test', '192.168.122.1'))
169-
assert wait(cephadm_module, cephadm_module.get_hosts()) == [HostSpec('test', '192.168.122.1')]
169+
assert wait(cephadm_module, cephadm_module.get_hosts()) == [
170+
HostSpec('test', '192.168.122.1')]
170171
cephadm_module._add_host(HostSpec('test'))
171-
assert wait(cephadm_module, cephadm_module.get_hosts()) == [HostSpec('test', '192.168.122.1')]
172+
assert wait(cephadm_module, cephadm_module.get_hosts()) == [
173+
HostSpec('test', '192.168.122.1')]
172174
with pytest.raises(OrchestratorError):
173175
cephadm_module._add_host(HostSpec('test2'))
174176

@@ -894,7 +896,8 @@ def test_driveselection_to_ceph_volume(self, cephadm_module, devices, preview, e
894896
ds = DriveSelection(dg, Devices([Device(path) for path in devices]))
895897
preview = preview
896898
out = cephadm_module.osd_service.driveselection_to_ceph_volume(ds, [], preview)
897-
assert all(any(cmd in exp_cmd for exp_cmd in exp_commands) for cmd in out), f'Expected cmds from f{out} in {exp_commands}'
899+
assert all(any(cmd in exp_cmd for exp_cmd in exp_commands)
900+
for cmd in out), f'Expected cmds from f{out} in {exp_commands}'
898901

899902
@pytest.mark.parametrize(
900903
"devices, preview, exp_commands",
@@ -919,7 +922,8 @@ def test_raw_driveselection_to_ceph_volume(self, cephadm_module, devices, previe
919922
ds = DriveSelection(dg, Devices([Device(path) for path in devices]))
920923
preview = preview
921924
out = cephadm_module.osd_service.driveselection_to_ceph_volume(ds, [], preview)
922-
assert all(any(cmd in exp_cmd for exp_cmd in exp_commands) for cmd in out), f'Expected cmds from f{out} in {exp_commands}'
925+
assert all(any(cmd in exp_cmd for exp_cmd in exp_commands)
926+
for cmd in out), f'Expected cmds from f{out} in {exp_commands}'
923927

924928
@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(
925929
json.dumps([

src/pybind/mgr/cephadm/upgrade.py

Lines changed: 34 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -368,15 +368,18 @@ def _get_earlier_daemons(dtypes: List[str], candidates: List[DaemonDescription])
368368
return [d for d in candidates if d.daemon_type in earlier_types]
369369

370370
if self.upgrade_state:
371-
raise OrchestratorError('Cannot set values for --daemon-types, --services or --hosts when upgrade already in progress.')
371+
raise OrchestratorError(
372+
'Cannot set values for --daemon-types, --services or --hosts when upgrade already in progress.')
372373
try:
373-
target_id, target_version, target_digests = self.mgr.wait_async(CephadmServe(self.mgr)._get_container_image_info(target_name))
374+
target_id, target_version, target_digests = self.mgr.wait_async(
375+
CephadmServe(self.mgr)._get_container_image_info(target_name))
374376
except OrchestratorError as e:
375377
raise OrchestratorError(f'Failed to pull {target_name}: {str(e)}')
376378
# what we need to do here is build a list of daemons that must already be upgraded
377379
# in order for the user's selection of daemons to upgrade to be valid. for example,
378380
# if they say --daemon-types 'osd,mds' but mons have not been upgraded, we block.
379-
daemons = [d for d in self.mgr.cache.get_daemons() if d.daemon_type not in MONITORING_STACK_TYPES]
381+
daemons = [d for d in self.mgr.cache.get_daemons(
382+
) if d.daemon_type not in MONITORING_STACK_TYPES]
380383
err_msg_base = 'Cannot start upgrade. '
381384
# "dtypes" will later be filled in with the types of daemons that will be upgraded with the given parameters
382385
dtypes = []
@@ -394,7 +397,8 @@ def _get_earlier_daemons(dtypes: List[str], candidates: List[DaemonDescription])
394397
# for our purposes here we can effectively convert our list of services into the
395398
# set of daemon types the services contain. This works because we don't allow --services
396399
# and --daemon-types at the same time and we only allow services of the same type
397-
sspecs = [self.mgr.spec_store[s].spec for s in services if self.mgr.spec_store[s].spec is not None]
400+
sspecs = [
401+
self.mgr.spec_store[s].spec for s in services if self.mgr.spec_store[s].spec is not None]
398402
stypes = list(set([s.service_type for s in sspecs]))
399403
if len(stypes) != 1:
400404
raise OrchestratorError('Doing upgrade by service only support services of one type at '
@@ -414,15 +418,18 @@ def _get_earlier_daemons(dtypes: List[str], candidates: List[DaemonDescription])
414418
# that reside on hosts in the list of hosts we will upgrade. Then take the type from
415419
# that list that is latest in the upgrade order and check if any daemons on hosts not in the
416420
# provided list of hosts have a daemon with a type earlier in the upgrade order that is not upgraded.
417-
dtypes = list(set([d.daemon_type for d in daemons if d.daemon_type is not None and d.hostname in hosts]))
418-
other_hosts_daemons = [d for d in daemons if d.hostname is not None and d.hostname not in hosts]
421+
dtypes = list(
422+
set([d.daemon_type for d in daemons if d.daemon_type is not None and d.hostname in hosts]))
423+
other_hosts_daemons = [
424+
d for d in daemons if d.hostname is not None and d.hostname not in hosts]
419425
daemons = _get_earlier_daemons([_latest_type(dtypes)], other_hosts_daemons)
420426
err_msg_base += 'Daemons with types earlier in upgrade order than daemons on given host need upgrading.\n'
421427
need_upgrade_self, n1, n2, _ = self._detect_need_upgrade(daemons, target_digests)
422428
if need_upgrade_self and ('mgr' not in dtypes or (daemon_types is None and services is None)):
423429
# also report active mgr as needing to be upgraded. It is not included in the resulting list
424430
# by default as it is treated special and handled via the need_upgrade_self bool
425-
n1.insert(0, (self.mgr.mgr_service.get_active_daemon(self.mgr.cache.get_daemons_by_type('mgr')), True))
431+
n1.insert(0, (self.mgr.mgr_service.get_active_daemon(
432+
self.mgr.cache.get_daemons_by_type('mgr')), True))
426433
if n1 or n2:
427434
raise OrchestratorError(f'{err_msg_base}Please first upgrade '
428435
f'{", ".join(list(set([d[0].name() for d in n1] + [d[0].name() for d in n2])))}\n'
@@ -780,7 +787,8 @@ def _upgrade_daemons(self, to_upgrade: List[Tuple[DaemonDescription, bool]], tar
780787
target_digests = []
781788
for d_entry in to_upgrade:
782789
if self.upgrade_state.remaining_count is not None and self.upgrade_state.remaining_count <= 0 and not d_entry[1]:
783-
self.mgr.log.info(f'Hit upgrade limit of {self.upgrade_state.total_count}. Stopping upgrade')
790+
self.mgr.log.info(
791+
f'Hit upgrade limit of {self.upgrade_state.total_count}. Stopping upgrade')
784792
return
785793
d = d_entry[0]
786794
assert d.daemon_type is not None
@@ -822,7 +830,8 @@ def _upgrade_daemons(self, to_upgrade: List[Tuple[DaemonDescription, bool]], tar
822830
self.upgrade_info_str = 'Currently upgrading %s daemons' % (d.daemon_type)
823831

824832
if len(to_upgrade) > 1:
825-
logger.info('Upgrade: Updating %s.%s (%d/%d)' % (d.daemon_type, d.daemon_id, num, min(len(to_upgrade), self.upgrade_state.remaining_count if self.upgrade_state.remaining_count is not None else 9999999)))
833+
logger.info('Upgrade: Updating %s.%s (%d/%d)' % (d.daemon_type, d.daemon_id, num, min(len(to_upgrade),
834+
self.upgrade_state.remaining_count if self.upgrade_state.remaining_count is not None else 9999999)))
826835
else:
827836
logger.info('Upgrade: Updating %s.%s' %
828837
(d.daemon_type, d.daemon_id))
@@ -1038,15 +1047,19 @@ def _do_upgrade(self):
10381047
})
10391048

10401049
if self.upgrade_state.daemon_types is not None:
1041-
logger.debug(f'Filtering daemons to upgrade by daemon types: {self.upgrade_state.daemon_types}')
1042-
daemons = [d for d in self.mgr.cache.get_daemons() if d.daemon_type in self.upgrade_state.daemon_types]
1050+
logger.debug(
1051+
f'Filtering daemons to upgrade by daemon types: {self.upgrade_state.daemon_types}')
1052+
daemons = [d for d in self.mgr.cache.get_daemons(
1053+
) if d.daemon_type in self.upgrade_state.daemon_types]
10431054
elif self.upgrade_state.services is not None:
1044-
logger.debug(f'Filtering daemons to upgrade by services: {self.upgrade_state.daemon_types}')
1055+
logger.debug(
1056+
f'Filtering daemons to upgrade by services: {self.upgrade_state.daemon_types}')
10451057
daemons = []
10461058
for service in self.upgrade_state.services:
10471059
daemons += self.mgr.cache.get_daemons_by_service(service)
10481060
else:
1049-
daemons = [d for d in self.mgr.cache.get_daemons() if d.daemon_type in CEPH_UPGRADE_ORDER]
1061+
daemons = [d for d in self.mgr.cache.get_daemons(
1062+
) if d.daemon_type in CEPH_UPGRADE_ORDER]
10501063
if self.upgrade_state.hosts is not None:
10511064
logger.debug(f'Filtering daemons to upgrade by hosts: {self.upgrade_state.hosts}')
10521065
daemons = [d for d in daemons if d.hostname in self.upgrade_state.hosts]
@@ -1067,15 +1080,18 @@ def _do_upgrade(self):
10671080
logger.debug('Upgrade: Checking %s daemons' % daemon_type)
10681081
daemons_of_type = [d for d in daemons if d.daemon_type == daemon_type]
10691082

1070-
need_upgrade_self, need_upgrade, need_upgrade_deployer, done = self._detect_need_upgrade(daemons_of_type, target_digests)
1083+
need_upgrade_self, need_upgrade, need_upgrade_deployer, done = self._detect_need_upgrade(
1084+
daemons_of_type, target_digests)
10711085
upgraded_daemon_count += done
10721086
self._update_upgrade_progress(upgraded_daemon_count / len(daemons))
10731087

10741088
# make sure mgr and monitoring stack daemons are properly redeployed in staggered upgrade scenarios
10751089
if daemon_type == 'mgr' or daemon_type in MONITORING_STACK_TYPES:
10761090
if any(d in target_digests for d in self.mgr.get_active_mgr_digests()):
1077-
need_upgrade_names = [d[0].name() for d in need_upgrade] + [d[0].name() for d in need_upgrade_deployer]
1078-
dds = [d for d in self.mgr.cache.get_daemons_by_type(daemon_type) if d.name() not in need_upgrade_names]
1091+
need_upgrade_names = [d[0].name() for d in need_upgrade] + \
1092+
[d[0].name() for d in need_upgrade_deployer]
1093+
dds = [d for d in self.mgr.cache.get_daemons_by_type(
1094+
daemon_type) if d.name() not in need_upgrade_names]
10791095
need_upgrade_active, n1, n2, __ = self._detect_need_upgrade(dds, target_digests)
10801096
if not n1:
10811097
if not need_upgrade_self and need_upgrade_active:
@@ -1113,7 +1129,8 @@ def _do_upgrade(self):
11131129
# following bits of _do_upgrade are for completing upgrade for given
11141130
# types. If we haven't actually finished upgrading all the daemons
11151131
# of this type, we should exit the loop here
1116-
_, n1, n2, _ = self._detect_need_upgrade(self.mgr.cache.get_daemons_by_type(daemon_type), target_digests)
1132+
_, n1, n2, _ = self._detect_need_upgrade(
1133+
self.mgr.cache.get_daemons_by_type(daemon_type), target_digests)
11171134
if n1 or n2:
11181135
continue
11191136

src/pybind/mgr/orchestrator/module.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -822,7 +822,8 @@ def _daemon_add_osd(self,
822822
values.remove(v)
823823

824824
for dev_type in ['data_devices', 'db_devices', 'wal_devices', 'journal_devices']:
825-
drive_group_spec[dev_type] = DeviceSelection(paths=drive_group_spec[dev_type]) if drive_group_spec.get(dev_type) else None
825+
drive_group_spec[dev_type] = DeviceSelection(
826+
paths=drive_group_spec[dev_type]) if drive_group_spec.get(dev_type) else None
826827

827828
drive_group = DriveGroupSpec(
828829
placement=PlacementSpec(host_pattern=host_name),

src/pybind/mgr/prometheus/module.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1062,7 +1062,8 @@ def get_service_list(self) -> Dict[Tuple[str, str], Tuple[str, str, str]]:
10621062
for server in self.list_servers():
10631063
host = cast(str, server.get('hostname', ''))
10641064
for service in cast(List[ServiceInfoT], server.get('services', [])):
1065-
ret.update({(service['id'], service['type']): (host, service['ceph_version'], service.get('name', ''))})
1065+
ret.update({(service['id'], service['type']): (
1066+
host, service['ceph_version'], service.get('name', ''))})
10661067
return ret
10671068

10681069
@profile_method()

0 commit comments

Comments
 (0)