@@ -454,32 +454,6 @@ def tearDown(self):
454454
455455class TestVolumes (TestVolumesHelper ):
456456 """Tests for FS volume operations."""
457- def test_volume_create (self ):
458- """
459- That the volume can be created and then cleans up
460- """
461- volname = self ._gen_vol_name ()
462- self ._fs_cmd ("volume" , "create" , volname )
463- volumels = json .loads (self ._fs_cmd ("volume" , "ls" ))
464-
465- if not (volname in ([volume ['name' ] for volume in volumels ])):
466- raise RuntimeError ("Error creating volume '{0}'" .format (volname ))
467-
468- # check that the pools were created with the correct config
469- pool_details = json .loads (self ._raw_cmd ("osd" , "pool" , "ls" , "detail" , "--format=json" ))
470- pool_flags = {}
471- for pool in pool_details :
472- pool_flags [pool ["pool_id" ]] = pool ["flags_names" ].split ("," )
473-
474- volume_details = json .loads (self ._fs_cmd ("get" , volname , "--format=json" ))
475- for data_pool_id in volume_details ['mdsmap' ]['data_pools' ]:
476- self .assertIn ("bulk" , pool_flags [data_pool_id ])
477- meta_pool_id = volume_details ['mdsmap' ]['metadata_pool' ]
478- self .assertNotIn ("bulk" , pool_flags [meta_pool_id ])
479-
480- # clean up
481- self ._fs_cmd ("volume" , "rm" , volname , "--yes-i-really-mean-it" )
482-
483457 def test_volume_ls (self ):
484458 """
485459 That the existing and the newly created volumes can be listed and
@@ -678,6 +652,163 @@ def test_volume_info_with_human_readable_flag_without_subvolumegroup(self):
678652 " of subvolumegroup" )
679653
680654
655+ class TestVolumeCreate (TestVolumesHelper ):
656+ '''
657+ Contains test for "ceph fs volume create" command.
658+ '''
659+
660+ def test_volume_create (self ):
661+ """
662+ That the volume can be created and then cleans up
663+ """
664+ volname = self ._gen_vol_name ()
665+ self ._fs_cmd ("volume" , "create" , volname )
666+ volumels = json .loads (self ._fs_cmd ("volume" , "ls" ))
667+
668+ if not (volname in ([volume ['name' ] for volume in volumels ])):
669+ raise RuntimeError ("Error creating volume '{0}'" .format (volname ))
670+
671+ # check that the pools were created with the correct config
672+ pool_details = json .loads (self ._raw_cmd ("osd" , "pool" , "ls" , "detail" , "--format=json" ))
673+ pool_flags = {}
674+ for pool in pool_details :
675+ pool_flags [pool ["pool_id" ]] = pool ["flags_names" ].split ("," )
676+
677+ volume_details = json .loads (self ._fs_cmd ("get" , volname , "--format=json" ))
678+ for data_pool_id in volume_details ['mdsmap' ]['data_pools' ]:
679+ self .assertIn ("bulk" , pool_flags [data_pool_id ])
680+ meta_pool_id = volume_details ['mdsmap' ]['metadata_pool' ]
681+ self .assertNotIn ("bulk" , pool_flags [meta_pool_id ])
682+
683+ # clean up
684+ self ._fs_cmd ("volume" , "rm" , volname , "--yes-i-really-mean-it" )
685+
686+ def test_with_both_pool_names (self ):
687+ '''
688+ Test that "ceph fs volume create" command accepts metadata pool name
689+ and data pool name as arguments and uses these pools to create a new
690+ volume.
691+ '''
692+ v = self ._gen_vol_name ()
693+
694+ meta = 'meta4521'
695+ data = 'data4521'
696+ self .run_ceph_cmd (f'osd pool create { meta } ' )
697+ self .run_ceph_cmd (f'osd pool create { data } ' )
698+
699+ self .run_ceph_cmd (f'fs volume create { v } --data-pool { data } '
700+ f'--meta-pool { meta } ' )
701+
702+ outer_break_ = False
703+ # once in few runs "fs ls" output didn't have above created volume.
704+ # giving it a bit time should sort that out.
705+ with safe_while (tries = 3 , sleep = 1 ) as proceed :
706+ while proceed ():
707+ o = self .get_ceph_cmd_stdout ('fs ls --format json-pretty' )
708+ o = json .loads (o )
709+ for d in o :
710+ if d ['name' ] == v :
711+ self .assertEqual (meta , d ['metadata_pool' ])
712+ self .assertIn (data , d ['data_pools' ])
713+ outer_break_ = True
714+ break
715+ else :
716+ continue
717+ if outer_break_ :
718+ break
719+
720+ def test_with_data_pool_name_only (self ):
721+ '''
722+ Test that "ceph fs volume create" command runs successfully when data
723+ pool name is passed, the data pool name aborts with an complain about
724+ not passing metadata pool name.
725+ '''
726+ v = self ._gen_vol_name ()
727+
728+ data = 'data4521'
729+ self .run_ceph_cmd (f'osd pool create { data } ' )
730+
731+ self .negtest_ceph_cmd (f'fs volume create { v } --data-pool { data } ' ,
732+ retval = errno .EINVAL ,
733+ errmsgs = ('metadata pool name isn\' t passed' ))
734+
735+ o = self .get_ceph_cmd_stdout ('fs ls --format json-pretty' )
736+ o = json .loads (o )
737+ for d in o :
738+ if v == d ['name' ]:
739+ raise RuntimeError (f'volume "{ v } " was found in "fs ls" output' )
740+ else :
741+ pass
742+
743+ def test_with_metadata_pool_name_only (self ):
744+ '''
745+ Test that when only metadata pool name is passed to "ceph fs volume
746+ create" command, the command aborts with an error complaining about
747+ not passing data pool name.
748+ '''
749+ v = self ._gen_vol_name ()
750+ meta = 'meta4521'
751+ self .run_ceph_cmd (f'osd pool create { meta } ' )
752+
753+ self .negtest_ceph_cmd (f'fs volume create { v } --meta-pool { meta } ' ,
754+ retval = errno .EINVAL ,
755+ errmsgs = ('data pool name isn\' t passed' ))
756+
757+ o = self .get_ceph_cmd_stdout ('fs ls --format json-pretty' )
758+ o = json .loads (o )
759+ for d in o :
760+ if v == d ['name' ]:
761+ raise RuntimeError (f'volume "{ v } " was found in "fs ls" output' )
762+ else :
763+ pass
764+
765+ def test_with_nonempty_meta_pool_name (self ):
766+ '''
767+ Test that when meta pool name passed to the command "ceph fs volume
768+ create" is an non-empty of pool, the command aborts with an appropriate
769+ error number and error message.
770+ '''
771+ v = self ._gen_vol_name ()
772+ meta = f'cephfs.{ v } .meta'
773+ data = f'cephfs.{ v } .data'
774+
775+ self .run_ceph_cmd (f'osd pool create { meta } ' )
776+ self .run_ceph_cmd (f'osd pool create { data } ' )
777+ self .mon_manager .controller .run (args = 'echo somedata > file1' )
778+ self .mon_manager .do_rados (['put' , 'obj1' , 'file1' , '--pool' , meta ])
779+ # XXX
780+ log .info ('sleeping for 10 secs for stats to be generated so that "fs '
781+ 'new" command, which is called by "fs volume create" command, '
782+ 'can detect that the metadata pool is not empty and therefore '
783+ 'abort with an error.' )
784+ time .sleep (10 )
785+
786+ try :
787+ # actual test...
788+ self .negtest_ceph_cmd (f'fs volume create { v } --meta-pool { meta } '
789+ f'--data-pool { data } ' ,
790+ retval = errno .EINVAL ,
791+ errmsgs = ('already contains some objects. use '
792+ 'an empty pool instead' ))
793+
794+ # being extra sure that volume wasn't created
795+ o = self .get_ceph_cmd_stdout ('fs ls --format json-pretty' )
796+ o = json .loads (o )
797+ for d in o :
798+ if v == d ['name' ]:
799+ raise RuntimeError (f'volume "{ v } " was found in "fs ls" output' )
800+ else :
801+ pass
802+ # regardless of how this test goes, ensure that these leftover pools
803+ # are deleted. else, they might mess up the teardown or setup code
804+ # somehow.
805+ finally :
806+ self .run_ceph_cmd (f'osd pool rm { meta } { meta } '
807+ '--yes-i-really-really-mean-it' )
808+ self .run_ceph_cmd (f'osd pool rm { data } { data } '
809+ '--yes-i-really-really-mean-it' )
810+
811+
681812class TestRenameCmd (TestVolumesHelper ):
682813
683814 def test_volume_rename (self ):
0 commit comments