Skip to content

Commit 51fa6ad

Browse files
authored
Merge pull request ceph#61408 from adk3798/cephadm-nvmeof-more-group-validation
mgr/cephadm: validate no duplicate groups and group vs. service id for nvmeof Reviewed-by: John Mulligan <[email protected]>
2 parents c3952c9 + 85d2633 commit 51fa6ad

File tree

3 files changed

+107
-15
lines changed

3 files changed

+107
-15
lines changed

src/pybind/mgr/cephadm/inventory.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -273,6 +273,20 @@ def __getitem__(self, name: str) -> SpecDescription:
273273
self.spec_created[name],
274274
self.spec_deleted.get(name, None))
275275

276+
def get_by_service_type(self, service_type: str) -> List[SpecDescription]:
277+
matching_specs: List[SpecDescription] = []
278+
for name, spec in self._specs.items():
279+
if spec.service_type == service_type:
280+
matching_specs.append(
281+
SpecDescription(
282+
spec,
283+
self._rank_maps.get(name),
284+
self.spec_created[name],
285+
self.spec_deleted.get(name, None)
286+
)
287+
)
288+
return matching_specs
289+
276290
@property
277291
def active_specs(self) -> Mapping[str, ServiceSpec]:
278292
return {k: v for k, v in self._specs.items() if k not in self.spec_deleted}

src/pybind/mgr/cephadm/module.py

Lines changed: 23 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -33,12 +33,15 @@
3333
from ceph.cephadm.images import DefaultImages
3434
from ceph.deployment import inventory
3535
from ceph.deployment.drive_group import DriveGroupSpec
36-
from ceph.deployment.service_spec import \
37-
ServiceSpec, PlacementSpec, \
38-
HostPlacementSpec, IngressSpec, \
39-
TunedProfileSpec, \
40-
MgmtGatewaySpec, \
41-
NvmeofServiceSpec
36+
from ceph.deployment.service_spec import (
37+
ServiceSpec,
38+
PlacementSpec,
39+
HostPlacementSpec,
40+
IngressSpec,
41+
TunedProfileSpec,
42+
MgmtGatewaySpec,
43+
NvmeofServiceSpec,
44+
)
4245
from ceph.utils import str_to_datetime, datetime_to_str, datetime_now
4346
from cephadm.serve import CephadmServe
4447
from cephadm.services.cephadmservice import CephadmDaemonDeploySpec
@@ -3398,13 +3401,24 @@ def _apply_service_spec(self, spec: ServiceSpec) -> str:
33983401
raise OrchestratorError("The 'oauth2-proxy' service depends on the 'mgmt-gateway' service, but it is not configured.")
33993402

34003403
if spec.service_type == 'nvmeof':
3401-
spec = cast(NvmeofServiceSpec, spec)
3402-
assert spec.pool is not None, "Pool cannot be None for nvmeof services"
3404+
nvmeof_spec = cast(NvmeofServiceSpec, spec)
3405+
assert nvmeof_spec.pool is not None, "Pool cannot be None for nvmeof services"
3406+
assert nvmeof_spec.service_id is not None # for mypy
34033407
try:
3404-
self._check_pool_exists(spec.pool, spec.service_name())
3408+
self._check_pool_exists(nvmeof_spec.pool, nvmeof_spec.service_name())
34053409
except OrchestratorError as e:
34063410
self.log.debug(f"{e}")
34073411
raise
3412+
nvmeof_spec = cast(NvmeofServiceSpec, spec)
3413+
assert nvmeof_spec.service_id is not None # for mypy
3414+
if nvmeof_spec.group and not nvmeof_spec.service_id.endswith(nvmeof_spec.group):
3415+
raise OrchestratorError("The 'nvmeof' service id/name must end with '.<nvmeof-group-name>'. Found "
3416+
f"group name '{nvmeof_spec.group}' and service id '{nvmeof_spec.service_id}'")
3417+
for sspec in [s.spec for s in self.spec_store.get_by_service_type('nvmeof')]:
3418+
nspec = cast(NvmeofServiceSpec, sspec)
3419+
if nvmeof_spec.group == nspec.group:
3420+
raise OrchestratorError(f"Cannot create nvmeof service with group {nvmeof_spec.group}. That group is already "
3421+
f"being used by the service {nspec.service_name()}")
34083422

34093423
if spec.placement.count is not None:
34103424
if spec.service_type in ['mon', 'mgr']:

src/pybind/mgr/cephadm/tests/test_services.py

Lines changed: 70 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -339,11 +339,11 @@ def test_nvmeof_dashboard_config(self, mock_resolve_ip):
339339
@patch("cephadm.module.CephadmOrchestrator.get_unique_name")
340340
def test_nvmeof_config(self, _get_name, _run_cephadm, cephadm_module: CephadmOrchestrator):
341341

342-
nvmeof_daemon_id = 'testpool.test.qwert'
343342
pool = 'testpool'
343+
group = 'mygroup'
344+
nvmeof_daemon_id = f'{pool}.{group}.test.qwert'
344345
tgt_cmd_extra_args = '--cpumask=0xFF --msg-mempool-size=524288'
345346
default_port = 5500
346-
group = 'mygroup'
347347
_run_cephadm.side_effect = async_side_effect(('{}', '', 0))
348348
_get_name.return_value = nvmeof_daemon_id
349349

@@ -427,7 +427,7 @@ def test_nvmeof_config(self, _get_name, _run_cephadm, cephadm_module: CephadmOrc
427427
timeout = 1.0\n"""
428428

429429
with with_host(cephadm_module, 'test'):
430-
with with_service(cephadm_module, NvmeofServiceSpec(service_id=pool,
430+
with with_service(cephadm_module, NvmeofServiceSpec(service_id=f'{pool}.{group}',
431431
tgt_cmd_extra_args=tgt_cmd_extra_args,
432432
group=group,
433433
pool=pool)):
@@ -438,14 +438,14 @@ def test_nvmeof_config(self, _get_name, _run_cephadm, cephadm_module: CephadmOrc
438438
[],
439439
stdin=json.dumps({
440440
"fsid": "fsid",
441-
"name": "nvmeof.testpool.test.qwert",
441+
"name": "nvmeof.testpool.mygroup.test.qwert",
442442
"image": "",
443443
"deploy_arguments": [],
444444
"params": {
445445
"tcp_ports": [5500, 4420, 8009, 10008]
446446
},
447447
"meta": {
448-
"service_name": "nvmeof.testpool",
448+
"service_name": "nvmeof.testpool.mygroup",
449449
"ports": [5500, 4420, 8009, 10008],
450450
"ip": None,
451451
"deployed_by": [],
@@ -456,7 +456,7 @@ def test_nvmeof_config(self, _get_name, _run_cephadm, cephadm_module: CephadmOrc
456456
},
457457
"config_blobs": {
458458
"config": "",
459-
"keyring": "[client.nvmeof.testpool.test.qwert]\nkey = None\n",
459+
"keyring": "[client.nvmeof.testpool.mygroup.test.qwert]\nkey = None\n",
460460
"files": {
461461
"ceph-nvmeof.conf": nvmeof_gateway_conf
462462
}
@@ -466,6 +466,70 @@ def test_nvmeof_config(self, _get_name, _run_cephadm, cephadm_module: CephadmOrc
466466
use_current_daemon_image=False,
467467
)
468468

469+
@patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
470+
def test_validate_no_group_duplicate_on_apply(self, cephadm_module: CephadmOrchestrator):
471+
nvmeof_spec_group1 = NvmeofServiceSpec(
472+
service_id='testpool.testgroup',
473+
group='testgroup',
474+
pool='testpool'
475+
)
476+
nvmeof_spec_also_group1 = NvmeofServiceSpec(
477+
service_id='testpool2.testgroup',
478+
group='testgroup',
479+
pool='testpool2'
480+
)
481+
with with_host(cephadm_module, 'test'):
482+
out = cephadm_module._apply_service_spec(nvmeof_spec_group1)
483+
assert out == 'Scheduled nvmeof.testpool.testgroup update...'
484+
nvmeof_specs = cephadm_module.spec_store.get_by_service_type('nvmeof')
485+
assert len(nvmeof_specs) == 1
486+
assert nvmeof_specs[0].spec.service_name() == 'nvmeof.testpool.testgroup'
487+
with pytest.raises(
488+
OrchestratorError,
489+
match='Cannot create nvmeof service with group testgroup. That group is already '
490+
'being used by the service nvmeof.testpool.testgroup'
491+
):
492+
cephadm_module._apply_service_spec(nvmeof_spec_also_group1)
493+
assert len(cephadm_module.spec_store.get_by_service_type('nvmeof')) == 1
494+
495+
@patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
496+
def test_validate_service_id_matches_group_on_apply(self, cephadm_module: CephadmOrchestrator):
497+
matching_nvmeof_spec_group_service_id = NvmeofServiceSpec(
498+
service_id='pool1.right_group',
499+
group='right_group',
500+
pool='pool1'
501+
)
502+
mismatch_nvmeof_spec_group_service_id = NvmeofServiceSpec(
503+
service_id='pool2.wrong_group',
504+
group='right_group',
505+
pool='pool2'
506+
)
507+
matching_nvmeof_spec_group_service_id_with_dot = NvmeofServiceSpec(
508+
service_id='pool3.right.group',
509+
group='right.group',
510+
pool='pool3'
511+
)
512+
mismatch_nvmeof_spec_group_service_id_with_dot = NvmeofServiceSpec(
513+
service_id='pool4.wrong.group',
514+
group='right.group',
515+
pool='pool4'
516+
)
517+
with with_host(cephadm_module, 'test'):
518+
cephadm_module._apply_service_spec(matching_nvmeof_spec_group_service_id)
519+
with pytest.raises(
520+
OrchestratorError,
521+
match='The \'nvmeof\' service id/name must end with \'.<nvmeof-group-name>\'. Found '
522+
'group name \'right_group\' and service id \'pool2.wrong_group\''
523+
):
524+
cephadm_module._apply_service_spec(mismatch_nvmeof_spec_group_service_id)
525+
cephadm_module._apply_service_spec(matching_nvmeof_spec_group_service_id_with_dot)
526+
with pytest.raises(
527+
OrchestratorError,
528+
match='The \'nvmeof\' service id/name must end with \'.<nvmeof-group-name>\'. Found '
529+
'group name \'right.group\' and service id \'pool4.wrong.group\''
530+
):
531+
cephadm_module._apply_service_spec(mismatch_nvmeof_spec_group_service_id_with_dot)
532+
469533

470534
class TestMonitoring:
471535
def _get_config(self, url: str) -> str:

0 commit comments

Comments
 (0)