Skip to content

Commit 10402b6

Browse files
authored
Merge pull request ceph#55350 from rkachach/fix_issue_64211
mgr/rook: removing all the code related to OSDs creation/removal
2 parents 5f7baab + 9902902 commit 10402b6

File tree

2 files changed

+12
-503
lines changed

2 files changed

+12
-503
lines changed

src/pybind/mgr/rook/module.py

Lines changed: 11 additions & 146 deletions
Original file line numberDiff line numberDiff line change
@@ -82,12 +82,6 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
8282
default='local',
8383
desc='storage class name for LSO-discovered PVs',
8484
),
85-
Option(
86-
'drive_group_interval',
87-
type='float',
88-
default=300.0,
89-
desc='interval in seconds between re-application of applied drive_groups',
90-
),
9185
]
9286

9387
@staticmethod
@@ -126,9 +120,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None:
126120
self.config_notify()
127121
if TYPE_CHECKING:
128122
self.storage_class = 'foo'
129-
self.drive_group_interval = 10.0
130123

131-
self._load_drive_groups()
132124
self._shutdown = threading.Event()
133125

134126
def config_notify(self) -> None:
@@ -144,7 +136,6 @@ def config_notify(self) -> None:
144136
self.log.debug(' mgr option %s = %s',
145137
opt['name'], getattr(self, opt['name'])) # type: ignore
146138
assert isinstance(self.storage_class, str)
147-
assert isinstance(self.drive_group_interval, float)
148139

149140
if self._rook_cluster:
150141
self._rook_cluster.storage_class_name = self.storage_class
@@ -211,10 +202,6 @@ def serve(self) -> None:
211202
self._initialized.set()
212203
self.config_notify()
213204

214-
while not self._shutdown.is_set():
215-
self._apply_drivegroups(list(self._drive_group_map.values()))
216-
self._shutdown.wait(self.drive_group_interval)
217-
218205
@handle_orch_error
219206
def get_inventory(self, host_filter: Optional[orchestrator.InventoryFilter] = None, refresh: bool = False) -> List[orchestrator.InventoryHost]:
220207
host_list = None
@@ -415,15 +402,6 @@ def sum_running_pods(service_type: str, service_name: Optional[str] = None) -> i
415402
running=sum_running_pods('osd')
416403
)
417404

418-
# drivegroups
419-
for name, dg in self._drive_group_map.items():
420-
spec[f'osd.{name}'] = orchestrator.ServiceDescription(
421-
spec=dg,
422-
last_refresh=now,
423-
size=0,
424-
running=0,
425-
)
426-
427405
if service_type == 'rbd-mirror' or service_type is None:
428406
# rbd-mirrors
429407
all_mirrors = self.rook_cluster.get_resource("cephrbdmirrors")
@@ -576,9 +554,6 @@ def remove_service(self, service_name: str, force: bool = False) -> str:
576554
elif service_type == 'rbd-mirror':
577555
return self.rook_cluster.rm_service('cephrbdmirrors', service_id)
578556
elif service_type == 'osd':
579-
if service_id in self._drive_group_map:
580-
del self._drive_group_map[service_id]
581-
self._save_drive_groups()
582557
return f'Removed {service_name}'
583558
elif service_type == 'ingress':
584559
self.log.info("{0} service '{1}' does not exist".format('ingress', service_id))
@@ -634,134 +609,24 @@ def apply_nfs(self, spec):
634609
def remove_daemons(self, names: List[str]) -> List[str]:
635610
return self.rook_cluster.remove_pods(names)
636611

637-
def apply_drivegroups(self, specs: List[DriveGroupSpec]) -> OrchResult[List[str]]:
638-
for drive_group in specs:
639-
self._drive_group_map[str(drive_group.service_id)] = drive_group
640-
self._save_drive_groups()
641-
return OrchResult(self._apply_drivegroups(specs))
642-
643-
def _apply_drivegroups(self, ls: List[DriveGroupSpec]) -> List[str]:
644-
all_hosts = raise_if_exception(self.get_hosts())
645-
result_list: List[str] = []
646-
for drive_group in ls:
647-
matching_hosts = drive_group.placement.filter_matching_hosts(
648-
lambda label=None, as_hostspec=None: all_hosts
649-
)
612+
def add_host_label(self, host: str, label: str) -> OrchResult[str]:
613+
return self.rook_cluster.add_host_label(host, label)
614+
615+
def remove_host_label(self, host: str, label: str, force: bool = False) -> OrchResult[str]:
616+
return self.rook_cluster.remove_host_label(host, label)
650617

651-
if not self.rook_cluster.node_exists(matching_hosts[0]):
652-
raise RuntimeError("Node '{0}' is not in the Kubernetes "
653-
"cluster".format(matching_hosts))
654-
655-
# Validate whether cluster CRD can accept individual OSD
656-
# creations (i.e. not useAllDevices)
657-
if not self.rook_cluster.can_create_osd():
658-
raise RuntimeError("Rook cluster configuration does not "
659-
"support OSD creation.")
660-
result_list.append(self.rook_cluster.add_osds(drive_group, matching_hosts))
661-
return result_list
662-
663-
def _load_drive_groups(self) -> None:
664-
stored_drive_group = self.get_store("drive_group_map")
665-
self._drive_group_map: Dict[str, DriveGroupSpec] = {}
666-
if stored_drive_group:
667-
for name, dg in json.loads(stored_drive_group).items():
668-
try:
669-
self._drive_group_map[name] = DriveGroupSpec.from_json(dg)
670-
except ValueError as e:
671-
self.log.error(f'Failed to load drive group {name} ({dg}): {e}')
672-
673-
def _save_drive_groups(self) -> None:
674-
json_drive_group_map = {
675-
name: dg.to_json() for name, dg in self._drive_group_map.items()
676-
}
677-
self.set_store("drive_group_map", json.dumps(json_drive_group_map))
618+
@handle_orch_error
619+
def create_osds(self, drive_group: DriveGroupSpec) -> str:
620+
raise orchestrator.OrchestratorError('Creating OSDs is not supported by rook orchestrator. Please, use Rook operator.')
678621

622+
@handle_orch_error
679623
def remove_osds(self,
680624
osd_ids: List[str],
681625
replace: bool = False,
682626
force: bool = False,
683627
zap: bool = False,
684-
no_destroy: bool = False) -> OrchResult[str]:
685-
assert self._rook_cluster is not None
686-
if zap:
687-
raise RuntimeError("Rook does not support zapping devices during OSD removal.")
688-
res = self._rook_cluster.remove_osds(osd_ids, replace, force, self.mon_command)
689-
return OrchResult(res)
690-
691-
def add_host_label(self, host: str, label: str) -> OrchResult[str]:
692-
return self.rook_cluster.add_host_label(host, label)
693-
694-
def remove_host_label(self, host: str, label: str, force: bool = False) -> OrchResult[str]:
695-
return self.rook_cluster.remove_host_label(host, label)
696-
"""
697-
@handle_orch_error
698-
def create_osds(self, drive_group):
699-
# type: (DriveGroupSpec) -> str
700-
# Creates OSDs from a drive group specification.
701-
702-
# $: ceph orch osd create -i <dg.file>
703-
704-
# The drivegroup file must only contain one spec at a time.
705-
#
706-
707-
targets = [] # type: List[str]
708-
if drive_group.data_devices and drive_group.data_devices.paths:
709-
targets += [d.path for d in drive_group.data_devices.paths]
710-
if drive_group.data_directories:
711-
targets += drive_group.data_directories
712-
713-
all_hosts = raise_if_exception(self.get_hosts())
714-
715-
matching_hosts = drive_group.placement.filter_matching_hosts(lambda label=None, as_hostspec=None: all_hosts)
716-
717-
assert len(matching_hosts) == 1
718-
719-
if not self.rook_cluster.node_exists(matching_hosts[0]):
720-
raise RuntimeError("Node '{0}' is not in the Kubernetes "
721-
"cluster".format(matching_hosts))
722-
723-
# Validate whether cluster CRD can accept individual OSD
724-
# creations (i.e. not useAllDevices)
725-
if not self.rook_cluster.can_create_osd():
726-
raise RuntimeError("Rook cluster configuration does not "
727-
"support OSD creation.")
728-
729-
return self.rook_cluster.add_osds(drive_group, matching_hosts)
730-
731-
# TODO: this was the code to update the progress reference:
732-
733-
@handle_orch_error
734-
def has_osds(matching_hosts: List[str]) -> bool:
735-
736-
# Find OSD pods on this host
737-
pod_osd_ids = set()
738-
pods = self.k8s.list_namespaced_pod(self._rook_env.namespace,
739-
label_selector="rook_cluster={},app=rook-ceph-osd".format(self._rook_env.cluster_name),
740-
field_selector="spec.nodeName={0}".format(
741-
matching_hosts[0]
742-
)).items
743-
for p in pods:
744-
pod_osd_ids.add(int(p.metadata.labels['ceph-osd-id']))
745-
746-
self.log.debug('pod_osd_ids={0}'.format(pod_osd_ids))
747-
748-
found = []
749-
osdmap = self.get("osd_map")
750-
for osd in osdmap['osds']:
751-
osd_id = osd['osd']
752-
if osd_id not in pod_osd_ids:
753-
continue
754-
755-
metadata = self.get_metadata('osd', "%s" % osd_id)
756-
if metadata and metadata['devices'] in targets:
757-
found.append(osd_id)
758-
else:
759-
self.log.info("ignoring osd {0} {1}".format(
760-
osd_id, metadata['devices'] if metadata else 'DNE'
761-
))
762-
763-
return found is not None
764-
"""
628+
no_destroy: bool = False) -> str:
629+
raise orchestrator.OrchestratorError('Removing OSDs is not supported by rook orchestrator. Please, use Rook operator.')
765630

766631
@handle_orch_error
767632
def blink_device_light(self, ident_fault: str, on: bool, locs: List[orchestrator.DeviceLightLoc]) -> List[str]:

0 commit comments

Comments
 (0)