Skip to content

Commit b9b6f0e

Browse files
authored
Merge pull request ceph#59488 from adk3798/revert-cephadm-require-nvmeof-group
mgr/cephadm: default to empty string nvmeof group name rather than requiring it Reviewed-by: John Mulligan <[email protected]>
2 parents 7ea1f01 + b377085 commit b9b6f0e

File tree

5 files changed

+6
-211
lines changed

5 files changed

+6
-211
lines changed

src/pybind/mgr/cephadm/inventory.py

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -26,11 +26,7 @@
2626
from cephadm.services.cephadmservice import CephadmDaemonDeploySpec
2727

2828
from .utils import resolve_ip, SpecialHostLabels
29-
from .migrations import (
30-
queue_migrate_nfs_spec,
31-
queue_migrate_rgw_spec,
32-
queue_migrate_nvmeof_spec,
33-
)
29+
from .migrations import queue_migrate_nfs_spec, queue_migrate_rgw_spec
3430

3531
if TYPE_CHECKING:
3632
from .module import CephadmOrchestrator
@@ -299,12 +295,6 @@ def load(self):
299295
):
300296
queue_migrate_rgw_spec(self.mgr, j)
301297

302-
if (
303-
(self.mgr.migration_current or 0) < 8
304-
and j['spec'].get('service_type') == 'nvmeof'
305-
):
306-
queue_migrate_nvmeof_spec(self.mgr, j)
307-
308298
spec = ServiceSpec.from_json(j['spec'])
309299
created = str_to_datetime(cast(str, j['created']))
310300
self._specs[service_name] = spec

src/pybind/mgr/cephadm/migrations.py

Lines changed: 2 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,7 @@
33
import logging
44
from typing import TYPE_CHECKING, Iterator, Optional, Dict, Any, List
55

6-
from ceph.deployment.service_spec import (
7-
PlacementSpec,
8-
ServiceSpec,
9-
HostPlacementSpec,
10-
RGWSpec,
11-
NvmeofServiceSpec,
12-
)
6+
from ceph.deployment.service_spec import PlacementSpec, ServiceSpec, HostPlacementSpec, RGWSpec
137
from cephadm.schedule import HostAssignment
148
from cephadm.utils import SpecialHostLabels
159
import rados
@@ -20,7 +14,7 @@
2014
if TYPE_CHECKING:
2115
from .module import CephadmOrchestrator
2216

23-
LAST_MIGRATION = 8
17+
LAST_MIGRATION = 7
2418

2519
logger = logging.getLogger(__name__)
2620

@@ -47,9 +41,6 @@ def __init__(self, mgr: "CephadmOrchestrator"):
4741
r = mgr.get_store('rgw_migration_queue')
4842
self.rgw_migration_queue = json.loads(r) if r else []
4943

50-
n = mgr.get_store('nvmeof_migration_queue')
51-
self.nvmeof_migration_queue = json.loads(n) if n else []
52-
5344
# for some migrations, we don't need to do anything except for
5445
# incrementing migration_current.
5546
# let's try to shortcut things here.
@@ -118,10 +109,6 @@ def migrate(self, startup: bool = False) -> None:
118109
if self.migrate_6_7():
119110
self.set(7)
120111

121-
if self.mgr.migration_current == 7:
122-
if self.migrate_7_8():
123-
self.set(8)
124-
125112
def migrate_0_1(self) -> bool:
126113
"""
127114
Migration 0 -> 1
@@ -455,51 +442,6 @@ def migrate_6_7(self) -> bool:
455442
# was set to true. Therefore we have nothing to migrate for those daemons
456443
return True
457444

458-
def migrate_nvmeof_spec(self, spec: Dict[Any, Any], migration_counter: int) -> Optional[NvmeofServiceSpec]:
459-
""" Add value for group parameter to nvmeof spec """
460-
new_spec = spec.copy()
461-
# Note: each spec must have a different group name so we append
462-
# the value of a counter to the end
463-
new_spec['spec']['group'] = f'default{str(migration_counter + 1)}'
464-
return NvmeofServiceSpec.from_json(new_spec)
465-
466-
def nvmeof_spec_needs_migration(self, spec: Dict[Any, Any]) -> bool:
467-
spec = spec.get('spec', None)
468-
return (bool(spec) and spec.get('group', None) is None)
469-
470-
def migrate_7_8(self) -> bool:
471-
"""
472-
Add a default value for the "group" parameter to nvmeof specs that don't have one
473-
"""
474-
self.mgr.log.debug(f'Starting nvmeof migration (queue length is {len(self.nvmeof_migration_queue)})')
475-
migrated_spec_counter = 0
476-
for s in self.nvmeof_migration_queue:
477-
spec = s['spec']
478-
if self.nvmeof_spec_needs_migration(spec):
479-
nvmeof_spec = self.migrate_nvmeof_spec(spec, migrated_spec_counter)
480-
if nvmeof_spec is not None:
481-
logger.info(f"Added group 'default{migrated_spec_counter + 1}' to nvmeof spec {spec}")
482-
self.mgr.spec_store.save(nvmeof_spec)
483-
migrated_spec_counter += 1
484-
else:
485-
logger.info(f"No Migration is needed for nvmeof spec: {spec}")
486-
self.nvmeof_migration_queue = []
487-
return True
488-
489-
490-
def queue_migrate_nvmeof_spec(mgr: "CephadmOrchestrator", spec_dict: Dict[Any, Any]) -> None:
491-
"""
492-
group has become a required field for nvmeof specs and has been added
493-
to spec validation. We need to assign a default group to nvmeof specs
494-
that don't have one
495-
"""
496-
service_id = spec_dict['spec']['service_id']
497-
queued = mgr.get_store('nvmeof_migration_queue') or '[]'
498-
ls = json.loads(queued)
499-
ls.append(spec_dict)
500-
mgr.set_store('nvmeof_migration_queue', json.dumps(ls))
501-
mgr.log.info(f'Queued nvmeof.{service_id} for migration')
502-
503445

504446
def queue_migrate_rgw_spec(mgr: "CephadmOrchestrator", spec_dict: Dict[Any, Any]) -> None:
505447
"""

src/pybind/mgr/cephadm/tests/test_migration.py

Lines changed: 0 additions & 136 deletions
Original file line numberDiff line numberDiff line change
@@ -382,139 +382,3 @@ def test_migrate_cert_store(cephadm_module: CephadmOrchestrator):
382382
assert cephadm_module.cert_key_store.get_cert('grafana_cert', host='host2')
383383
assert cephadm_module.cert_key_store.get_key('grafana_key', host='host1')
384384
assert cephadm_module.cert_key_store.get_key('grafana_key', host='host2')
385-
386-
387-
@pytest.mark.parametrize(
388-
"nvmeof_spec_store_entries, should_migrate_specs, expected_groups",
389-
[
390-
(
391-
[
392-
{'spec': {
393-
'service_type': 'nvmeof',
394-
'service_name': 'nvmeof.foo',
395-
'service_id': 'foo',
396-
'placement': {
397-
'hosts': ['host1']
398-
},
399-
'spec': {
400-
'pool': 'foo',
401-
},
402-
},
403-
'created': datetime_to_str(datetime_now())},
404-
{'spec': {
405-
'service_type': 'nvmeof',
406-
'service_name': 'nvmeof.bar',
407-
'service_id': 'bar',
408-
'placement': {
409-
'hosts': ['host2']
410-
},
411-
'spec': {
412-
'pool': 'bar',
413-
},
414-
},
415-
'created': datetime_to_str(datetime_now())}
416-
],
417-
[True, True], ['default1', 'default2']),
418-
(
419-
[
420-
{'spec':
421-
{
422-
'service_type': 'nvmeof',
423-
'service_name': 'nvmeof.foo',
424-
'service_id': 'foo',
425-
'placement': {
426-
'hosts': ['host1']
427-
},
428-
'spec': {
429-
'pool': 'foo',
430-
},
431-
},
432-
'created': datetime_to_str(datetime_now())},
433-
{'spec':
434-
{
435-
'service_type': 'nvmeof',
436-
'service_name': 'nvmeof.bar',
437-
'service_id': 'bar',
438-
'placement': {
439-
'hosts': ['host2']
440-
},
441-
'spec': {
442-
'pool': 'bar',
443-
'group': 'bar'
444-
},
445-
},
446-
'created': datetime_to_str(datetime_now())},
447-
{'spec':
448-
{
449-
'service_type': 'nvmeof',
450-
'service_name': 'nvmeof.testing_testing',
451-
'service_id': 'testing_testing',
452-
'placement': {
453-
'label': 'baz'
454-
},
455-
'spec': {
456-
'pool': 'baz',
457-
},
458-
},
459-
'created': datetime_to_str(datetime_now())}
460-
], [True, False, True], ['default1', 'bar', 'default2']
461-
),
462-
]
463-
)
464-
@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
465-
def test_migrate_nvmeof_spec(
466-
cephadm_module: CephadmOrchestrator,
467-
nvmeof_spec_store_entries,
468-
should_migrate_specs,
469-
expected_groups
470-
):
471-
with with_host(cephadm_module, 'host1'):
472-
for spec in nvmeof_spec_store_entries:
473-
cephadm_module.set_store(
474-
SPEC_STORE_PREFIX + spec['spec']['service_name'],
475-
json.dumps(spec, sort_keys=True),
476-
)
477-
478-
# make sure nvmeof_migration_queue is populated accordingly
479-
cephadm_module.migration_current = 1
480-
cephadm_module.spec_store.load()
481-
ls = json.loads(cephadm_module.get_store('nvmeof_migration_queue'))
482-
assert all([s['spec']['service_type'] == 'nvmeof' for s in ls])
483-
# shortcut nvmeof_migration_queue loading by directly assigning
484-
# ls output to nvmeof_migration_queue list
485-
cephadm_module.migration.nvmeof_migration_queue = ls
486-
487-
# skip other migrations and go directly to 7_8 migration (nvmeof spec)
488-
cephadm_module.migration_current = 7
489-
cephadm_module.migration.migrate()
490-
assert cephadm_module.migration_current == LAST_MIGRATION
491-
492-
print(cephadm_module.spec_store.all_specs)
493-
494-
for i in range(len(nvmeof_spec_store_entries)):
495-
nvmeof_spec_store_entry = nvmeof_spec_store_entries[i]
496-
should_migrate = should_migrate_specs[i]
497-
expected_group = expected_groups[i]
498-
499-
service_name = nvmeof_spec_store_entry['spec']['service_name']
500-
service_id = nvmeof_spec_store_entry['spec']['service_id']
501-
placement = nvmeof_spec_store_entry['spec']['placement']
502-
pool = nvmeof_spec_store_entry['spec']['spec']['pool']
503-
504-
if should_migrate:
505-
assert service_name in cephadm_module.spec_store.all_specs
506-
nvmeof_spec = cephadm_module.spec_store.all_specs[service_name]
507-
nvmeof_spec_json = nvmeof_spec.to_json()
508-
assert nvmeof_spec_json['service_type'] == 'nvmeof'
509-
assert nvmeof_spec_json['service_id'] == service_id
510-
assert nvmeof_spec_json['service_name'] == service_name
511-
assert nvmeof_spec_json['placement'] == placement
512-
assert nvmeof_spec_json['spec']['pool'] == pool
513-
# make sure spec has the "group" parameter set to "default<counter>"
514-
assert nvmeof_spec_json['spec']['group'] == expected_group
515-
else:
516-
nvmeof_spec = cephadm_module.spec_store.all_specs[service_name]
517-
nvmeof_spec_json = nvmeof_spec.to_json()
518-
assert nvmeof_spec_json['spec']['pool'] == pool
519-
# make sure spec has the "group" parameter still set to its old value
520-
assert nvmeof_spec_json['spec']['group'] == expected_group

src/pybind/mgr/orchestrator/module.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1540,6 +1540,7 @@ def _iscsi_add(self,
15401540
@_cli_write_command('orch daemon add nvmeof')
15411541
def _nvmeof_add(self,
15421542
pool: str,
1543+
group: str,
15431544
placement: Optional[str] = None,
15441545
inbuf: Optional[str] = None) -> HandleCommandResult:
15451546
"""Start nvmeof daemon(s)"""
@@ -1549,6 +1550,7 @@ def _nvmeof_add(self,
15491550
spec = NvmeofServiceSpec(
15501551
service_id='nvmeof',
15511552
pool=pool,
1553+
group=group,
15521554
placement=PlacementSpec.from_string(placement),
15531555
)
15541556
return self._daemon_add_misc(spec)

src/python-common/ceph/deployment/service_spec.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1395,7 +1395,7 @@ def __init__(self,
13951395
#: ``name`` name of the nvmeof gateway
13961396
self.name = name
13971397
#: ``group`` name of the nvmeof gateway
1398-
self.group = group
1398+
self.group = group or ''
13991399
#: ``enable_auth`` enables user authentication on nvmeof gateway
14001400
self.enable_auth = enable_auth
14011401
#: ``state_update_notify`` enables automatic update from OMAP in nvmeof gateway
@@ -1489,9 +1489,6 @@ def validate(self) -> None:
14891489
if not self.pool:
14901490
raise SpecValidationError('Cannot add NVMEOF: No Pool specified')
14911491

1492-
if not self.group:
1493-
raise SpecValidationError('Cannot add NVMEOF: No group specified')
1494-
14951492
if self.enable_auth:
14961493
if not all([self.server_key, self.server_cert, self.client_key,
14971494
self.client_cert, self.root_ca_cert]):

0 commit comments

Comments
 (0)