@@ -339,11 +339,11 @@ def test_nvmeof_dashboard_config(self, mock_resolve_ip):
339339 @patch ("cephadm.module.CephadmOrchestrator.get_unique_name" )
340340 def test_nvmeof_config (self , _get_name , _run_cephadm , cephadm_module : CephadmOrchestrator ):
341341
342- nvmeof_daemon_id = 'testpool.test.qwert'
343342 pool = 'testpool'
343+ group = 'mygroup'
344+ nvmeof_daemon_id = f'{ pool } .{ group } .test.qwert'
344345 tgt_cmd_extra_args = '--cpumask=0xFF --msg-mempool-size=524288'
345346 default_port = 5500
346- group = 'mygroup'
347347 _run_cephadm .side_effect = async_side_effect (('{}' , '' , 0 ))
348348 _get_name .return_value = nvmeof_daemon_id
349349
@@ -427,7 +427,7 @@ def test_nvmeof_config(self, _get_name, _run_cephadm, cephadm_module: CephadmOrc
427427timeout = 1.0\n """
428428
429429 with with_host (cephadm_module , 'test' ):
430- with with_service (cephadm_module , NvmeofServiceSpec (service_id = pool ,
430+ with with_service (cephadm_module , NvmeofServiceSpec (service_id = f' { pool } . { group } ' ,
431431 tgt_cmd_extra_args = tgt_cmd_extra_args ,
432432 group = group ,
433433 pool = pool )):
@@ -438,14 +438,14 @@ def test_nvmeof_config(self, _get_name, _run_cephadm, cephadm_module: CephadmOrc
438438 [],
439439 stdin = json .dumps ({
440440 "fsid" : "fsid" ,
441- "name" : "nvmeof.testpool.test.qwert" ,
441+ "name" : "nvmeof.testpool.mygroup. test.qwert" ,
442442 "image" : "" ,
443443 "deploy_arguments" : [],
444444 "params" : {
445445 "tcp_ports" : [5500 , 4420 , 8009 , 10008 ]
446446 },
447447 "meta" : {
448- "service_name" : "nvmeof.testpool" ,
448+ "service_name" : "nvmeof.testpool.mygroup " ,
449449 "ports" : [5500 , 4420 , 8009 , 10008 ],
450450 "ip" : None ,
451451 "deployed_by" : [],
@@ -456,7 +456,7 @@ def test_nvmeof_config(self, _get_name, _run_cephadm, cephadm_module: CephadmOrc
456456 },
457457 "config_blobs" : {
458458 "config" : "" ,
459- "keyring" : "[client.nvmeof.testpool.test.qwert]\n key = None\n " ,
459+ "keyring" : "[client.nvmeof.testpool.mygroup. test.qwert]\n key = None\n " ,
460460 "files" : {
461461 "ceph-nvmeof.conf" : nvmeof_gateway_conf
462462 }
@@ -466,6 +466,70 @@ def test_nvmeof_config(self, _get_name, _run_cephadm, cephadm_module: CephadmOrc
466466 use_current_daemon_image = False ,
467467 )
468468
469+ @patch ("cephadm.serve.CephadmServe._run_cephadm" , _run_cephadm ('{}' ))
470+ def test_validate_no_group_duplicate_on_apply (self , cephadm_module : CephadmOrchestrator ):
471+ nvmeof_spec_group1 = NvmeofServiceSpec (
472+ service_id = 'testpool.testgroup' ,
473+ group = 'testgroup' ,
474+ pool = 'testpool'
475+ )
476+ nvmeof_spec_also_group1 = NvmeofServiceSpec (
477+ service_id = 'testpool2.testgroup' ,
478+ group = 'testgroup' ,
479+ pool = 'testpool2'
480+ )
481+ with with_host (cephadm_module , 'test' ):
482+ out = cephadm_module ._apply_service_spec (nvmeof_spec_group1 )
483+ assert out == 'Scheduled nvmeof.testpool.testgroup update...'
484+ nvmeof_specs = cephadm_module .spec_store .get_by_service_type ('nvmeof' )
485+ assert len (nvmeof_specs ) == 1
486+ assert nvmeof_specs [0 ].spec .service_name () == 'nvmeof.testpool.testgroup'
487+ with pytest .raises (
488+ OrchestratorError ,
489+ match = 'Cannot create nvmeof service with group testgroup. That group is already '
490+ 'being used by the service nvmeof.testpool.testgroup'
491+ ):
492+ cephadm_module ._apply_service_spec (nvmeof_spec_also_group1 )
493+ assert len (cephadm_module .spec_store .get_by_service_type ('nvmeof' )) == 1
494+
495+ @patch ("cephadm.serve.CephadmServe._run_cephadm" , _run_cephadm ('{}' ))
496+ def test_validate_service_id_matches_group_on_apply (self , cephadm_module : CephadmOrchestrator ):
497+ matching_nvmeof_spec_group_service_id = NvmeofServiceSpec (
498+ service_id = 'pool1.right_group' ,
499+ group = 'right_group' ,
500+ pool = 'pool1'
501+ )
502+ mismatch_nvmeof_spec_group_service_id = NvmeofServiceSpec (
503+ service_id = 'pool2.wrong_group' ,
504+ group = 'right_group' ,
505+ pool = 'pool2'
506+ )
507+ matching_nvmeof_spec_group_service_id_with_dot = NvmeofServiceSpec (
508+ service_id = 'pool3.right.group' ,
509+ group = 'right.group' ,
510+ pool = 'pool3'
511+ )
512+ mismatch_nvmeof_spec_group_service_id_with_dot = NvmeofServiceSpec (
513+ service_id = 'pool4.wrong.group' ,
514+ group = 'right.group' ,
515+ pool = 'pool4'
516+ )
517+ with with_host (cephadm_module , 'test' ):
518+ cephadm_module ._apply_service_spec (matching_nvmeof_spec_group_service_id )
519+ with pytest .raises (
520+ OrchestratorError ,
521+ match = 'The \' nvmeof\' service id/name must end with \' .<nvmeof-group-name>\' . Found '
522+ 'group name \' right_group\' and service id \' pool2.wrong_group\' '
523+ ):
524+ cephadm_module ._apply_service_spec (mismatch_nvmeof_spec_group_service_id )
525+ cephadm_module ._apply_service_spec (matching_nvmeof_spec_group_service_id_with_dot )
526+ with pytest .raises (
527+ OrchestratorError ,
528+ match = 'The \' nvmeof\' service id/name must end with \' .<nvmeof-group-name>\' . Found '
529+ 'group name \' right.group\' and service id \' pool4.wrong.group\' '
530+ ):
531+ cephadm_module ._apply_service_spec (mismatch_nvmeof_spec_group_service_id_with_dot )
532+
469533
470534class TestMonitoring :
471535 def _get_config (self , url : str ) -> str :
0 commit comments