Skip to content

Commit 6fc1a6d

Browse files
Merge pull request ceph#61732 from rishabh-d-dave/mgr-vol-pools
mgr/vol: allow passing pool names to "fs volume create" cmd Reviewed-by: Venky Shankar <[email protected]>
2 parents 5534356 + a74dbbe commit 6fc1a6d

File tree

8 files changed

+229
-44
lines changed

8 files changed

+229
-44
lines changed

PendingReleaseNotes

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,11 @@
103103
`PutBucketPolicy`. Additionally, the root user will always have access to modify
104104
the bucket policy, even if the current policy explicitly denies access.
105105

106+
* CephFS: The ``ceph fs volume create`` command now allows users to pass
107+
metadata and data pool names to be used for creating the volume. If either
108+
is not passed or if either is a non-empty pool, the command will abort.
109+
110+
106111
>=19.2.1
107112

108113
* CephFS: Command `fs subvolume create` now allows tagging subvolumes through option

doc/cephfs/fs-volumes.rst

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -44,11 +44,14 @@ Create a volume by running the following command:
4444

4545
.. prompt:: bash #
4646

47-
ceph fs volume create <vol_name> [placement]
48-
49-
This creates a CephFS file system and its data and metadata pools. This command
50-
can also deploy MDS daemons for the filesystem using a Ceph Manager orchestrator
51-
module (for example Rook). See :doc:`/mgr/orchestrator`.
47+
ceph fs volume create <vol_name> [placement] [--data-pool <data-pool-name>] [--meta-pool <metadata-pool-name>]
48+
49+
This creates a CephFS file system and its data and metadata pools. Alternately,
50+
if the data pool and/or metadata pool needed for creating a CephFS volume
51+
already exist, these pool names can be passed to this command so that the
52+
volume is created using these existing pools. This command can also deploy MDS
53+
daemons for the filesystem using a Ceph Manager orchestrator module (for
54+
example Rook). See :doc:`/mgr/orchestrator`.
5255

5356
``<vol_name>`` is the volume name (an arbitrary string). ``[placement]`` is an
5457
optional string that specifies the :ref:`orchestrator-cli-placement-spec` for

doc/cephfs/multifs.rst

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,12 @@ support the new file system. The deployment technology used, e.g. cephadm, will
2929
also configure the MDS affinity (see: :ref:`mds-join-fs`) of new MDS daemons to
3030
operate the new file system.
3131

32+
If the data and metadata pools for the volume are already present, the names of
33+
these pool(s) can be passed as follows::
34+
35+
ceph fs volume create <vol-name> --meta-pool <meta-pool-name> --data-pool <data-pool-name>
36+
37+
3238

3339
Securing access
3440
---------------

qa/suites/fs/volumes/tasks/volumes/test/basic.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ tasks:
33
fail_on_skip: false
44
modules:
55
- tasks.cephfs.test_volumes.TestVolumes
6+
- tasks.cephfs.test_volumes.TestVolumeCreate
67
- tasks.cephfs.test_volumes.TestSubvolumeGroups
78
- tasks.cephfs.test_volumes.TestSubvolumes
89
- tasks.cephfs.test_subvolume

qa/tasks/cephfs/test_volumes.py

Lines changed: 157 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -454,32 +454,6 @@ def tearDown(self):
454454

455455
class TestVolumes(TestVolumesHelper):
456456
"""Tests for FS volume operations."""
457-
def test_volume_create(self):
458-
"""
459-
That the volume can be created and then cleans up
460-
"""
461-
volname = self._gen_vol_name()
462-
self._fs_cmd("volume", "create", volname)
463-
volumels = json.loads(self._fs_cmd("volume", "ls"))
464-
465-
if not (volname in ([volume['name'] for volume in volumels])):
466-
raise RuntimeError("Error creating volume '{0}'".format(volname))
467-
468-
# check that the pools were created with the correct config
469-
pool_details = json.loads(self._raw_cmd("osd", "pool", "ls", "detail", "--format=json"))
470-
pool_flags = {}
471-
for pool in pool_details:
472-
pool_flags[pool["pool_id"]] = pool["flags_names"].split(",")
473-
474-
volume_details = json.loads(self._fs_cmd("get", volname, "--format=json"))
475-
for data_pool_id in volume_details['mdsmap']['data_pools']:
476-
self.assertIn("bulk", pool_flags[data_pool_id])
477-
meta_pool_id = volume_details['mdsmap']['metadata_pool']
478-
self.assertNotIn("bulk", pool_flags[meta_pool_id])
479-
480-
# clean up
481-
self._fs_cmd("volume", "rm", volname, "--yes-i-really-mean-it")
482-
483457
def test_volume_ls(self):
484458
"""
485459
That the existing and the newly created volumes can be listed and
@@ -678,6 +652,163 @@ def test_volume_info_with_human_readable_flag_without_subvolumegroup(self):
678652
" of subvolumegroup")
679653

680654

655+
class TestVolumeCreate(TestVolumesHelper):
656+
'''
657+
Contains test for "ceph fs volume create" command.
658+
'''
659+
660+
def test_volume_create(self):
661+
"""
662+
That the volume can be created and then cleans up
663+
"""
664+
volname = self._gen_vol_name()
665+
self._fs_cmd("volume", "create", volname)
666+
volumels = json.loads(self._fs_cmd("volume", "ls"))
667+
668+
if not (volname in ([volume['name'] for volume in volumels])):
669+
raise RuntimeError("Error creating volume '{0}'".format(volname))
670+
671+
# check that the pools were created with the correct config
672+
pool_details = json.loads(self._raw_cmd("osd", "pool", "ls", "detail", "--format=json"))
673+
pool_flags = {}
674+
for pool in pool_details:
675+
pool_flags[pool["pool_id"]] = pool["flags_names"].split(",")
676+
677+
volume_details = json.loads(self._fs_cmd("get", volname, "--format=json"))
678+
for data_pool_id in volume_details['mdsmap']['data_pools']:
679+
self.assertIn("bulk", pool_flags[data_pool_id])
680+
meta_pool_id = volume_details['mdsmap']['metadata_pool']
681+
self.assertNotIn("bulk", pool_flags[meta_pool_id])
682+
683+
# clean up
684+
self._fs_cmd("volume", "rm", volname, "--yes-i-really-mean-it")
685+
686+
def test_with_both_pool_names(self):
687+
'''
688+
Test that "ceph fs volume create" command accepts metadata pool name
689+
and data pool name as arguments and uses these pools to create a new
690+
volume.
691+
'''
692+
v = self._gen_vol_name()
693+
694+
meta = 'meta4521'
695+
data = 'data4521'
696+
self.run_ceph_cmd(f'osd pool create {meta}')
697+
self.run_ceph_cmd(f'osd pool create {data}')
698+
699+
self.run_ceph_cmd(f'fs volume create {v} --data-pool {data} '
700+
f'--meta-pool {meta}')
701+
702+
outer_break_ = False
703+
# once in few runs "fs ls" output didn't have above created volume.
704+
# giving it a bit time should sort that out.
705+
with safe_while(tries=3, sleep=1) as proceed:
706+
while proceed():
707+
o = self.get_ceph_cmd_stdout('fs ls --format json-pretty')
708+
o = json.loads(o)
709+
for d in o:
710+
if d['name'] == v:
711+
self.assertEqual(meta, d['metadata_pool'])
712+
self.assertIn(data, d['data_pools'])
713+
outer_break_ = True
714+
break
715+
else:
716+
continue
717+
if outer_break_:
718+
break
719+
720+
def test_with_data_pool_name_only(self):
721+
'''
722+
Test that "ceph fs volume create" command runs successfully when data
723+
pool name is passed, the data pool name aborts with an complain about
724+
not passing metadata pool name.
725+
'''
726+
v = self._gen_vol_name()
727+
728+
data = 'data4521'
729+
self.run_ceph_cmd(f'osd pool create {data}')
730+
731+
self.negtest_ceph_cmd(f'fs volume create {v} --data-pool {data}',
732+
retval=errno.EINVAL,
733+
errmsgs=('metadata pool name isn\'t passed'))
734+
735+
o = self.get_ceph_cmd_stdout('fs ls --format json-pretty')
736+
o = json.loads(o)
737+
for d in o:
738+
if v == d['name']:
739+
raise RuntimeError(f'volume "{v}" was found in "fs ls" output')
740+
else:
741+
pass
742+
743+
def test_with_metadata_pool_name_only(self):
744+
'''
745+
Test that when only metadata pool name is passed to "ceph fs volume
746+
create" command, the command aborts with an error complaining about
747+
not passing data pool name.
748+
'''
749+
v = self._gen_vol_name()
750+
meta = 'meta4521'
751+
self.run_ceph_cmd(f'osd pool create {meta}')
752+
753+
self.negtest_ceph_cmd(f'fs volume create {v} --meta-pool {meta}',
754+
retval=errno.EINVAL,
755+
errmsgs=('data pool name isn\'t passed'))
756+
757+
o = self.get_ceph_cmd_stdout('fs ls --format json-pretty')
758+
o = json.loads(o)
759+
for d in o:
760+
if v == d['name']:
761+
raise RuntimeError(f'volume "{v}" was found in "fs ls" output')
762+
else:
763+
pass
764+
765+
def test_with_nonempty_meta_pool_name(self):
766+
'''
767+
Test that when meta pool name passed to the command "ceph fs volume
768+
create" is an non-empty of pool, the command aborts with an appropriate
769+
error number and error message.
770+
'''
771+
v = self._gen_vol_name()
772+
meta = f'cephfs.{v}.meta'
773+
data = f'cephfs.{v}.data'
774+
775+
self.run_ceph_cmd(f'osd pool create {meta}')
776+
self.run_ceph_cmd(f'osd pool create {data}')
777+
self.mon_manager.controller.run(args='echo somedata > file1')
778+
self.mon_manager.do_rados(['put', 'obj1', 'file1', '--pool', meta])
779+
# XXX
780+
log.info('sleeping for 10 secs for stats to be generated so that "fs '
781+
'new" command, which is called by "fs volume create" command, '
782+
'can detect that the metadata pool is not empty and therefore '
783+
'abort with an error.')
784+
time.sleep(10)
785+
786+
try:
787+
# actual test...
788+
self.negtest_ceph_cmd(f'fs volume create {v} --meta-pool {meta} '
789+
f'--data-pool {data}',
790+
retval=errno.EINVAL,
791+
errmsgs=('already contains some objects. use '
792+
'an empty pool instead'))
793+
794+
# being extra sure that volume wasn't created
795+
o = self.get_ceph_cmd_stdout('fs ls --format json-pretty')
796+
o = json.loads(o)
797+
for d in o:
798+
if v == d['name']:
799+
raise RuntimeError(f'volume "{v}" was found in "fs ls" output')
800+
else:
801+
pass
802+
# regardless of how this test goes, ensure that these leftover pools
803+
# are deleted. else, they might mess up the teardown or setup code
804+
# somehow.
805+
finally:
806+
self.run_ceph_cmd(f'osd pool rm {meta} {meta} '
807+
'--yes-i-really-really-mean-it')
808+
self.run_ceph_cmd(f'osd pool rm {data} {data} '
809+
'--yes-i-really-really-mean-it')
810+
811+
681812
class TestRenameCmd(TestVolumesHelper):
682813

683814
def test_volume_rename(self):

src/pybind/mgr/volumes/fs/operations/volume.py

Lines changed: 44 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -72,22 +72,57 @@ def get_pool_ids(mgr, volname):
7272
return None, None
7373
return metadata_pool_id, data_pool_ids
7474

75-
def create_volume(mgr, volname, placement):
76-
"""
77-
create volume (pool, filesystem and mds)
78-
"""
75+
def create_fs_pools(mgr, volname, data_pool, metadata_pool):
76+
'''
77+
Generate names of metadata pool and data pool and create these pools.
78+
79+
This methods returns a list where the first member represents whether or
80+
not this method ran successfullly.
81+
'''
82+
assert not data_pool and not metadata_pool
83+
7984
metadata_pool, data_pool = gen_pool_names(volname)
80-
# create pools
85+
8186
r, outb, outs = create_pool(mgr, metadata_pool)
8287
if r != 0:
83-
return r, outb, outs
88+
return [False, r, outb, outs]
89+
8490
# default to a bulk pool for data. In case autoscaling has been disabled
85-
# for the cluster with `ceph osd pool set noautoscale`, this will have no effect.
91+
# for the cluster with `ceph osd pool set noautoscale`, this will have
92+
# no effect.
8693
r, outb, outs = create_pool(mgr, data_pool, bulk=True)
94+
# cleanup
8795
if r != 0:
88-
#cleanup
8996
remove_pool(mgr, metadata_pool)
90-
return r, outb, outs
97+
return [False, r, outb, outs]
98+
99+
return [True, data_pool, metadata_pool]
100+
101+
def create_volume(mgr, volname, placement, data_pool, metadata_pool):
102+
"""
103+
Create volume, create pools if pool names are not passed and create MDS
104+
based on placement passed.
105+
"""
106+
# although writing this case is technically redundant (because pool names
107+
# are passed by user they must exist already), leave it here so that some
108+
# future readers know that this case is already considered and not missed
109+
# by chance.
110+
if data_pool and metadata_pool:
111+
pass
112+
elif not data_pool and metadata_pool:
113+
errmsg = 'data pool name isn\'t passed'
114+
return -errno.EINVAL, '', errmsg
115+
elif data_pool and not metadata_pool:
116+
errmsg = 'metadata pool name isn\'t passed'
117+
return -errno.EINVAL, '', errmsg
118+
elif not data_pool and not metadata_pool:
119+
retval = create_fs_pools(mgr, volname, data_pool, metadata_pool)
120+
success = retval.pop(0)
121+
if success:
122+
data_pool, metadata_pool = retval
123+
else:
124+
return retval
125+
91126
# create filesystem
92127
r, outb, outs = create_filesystem(mgr, volname, metadata_pool, data_pool)
93128
if r != 0:

src/pybind/mgr/volumes/fs/volume.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -120,8 +120,8 @@ def wrapper(self, *args, **kwargs):
120120

121121
### volume operations -- create, rm, ls
122122

123-
def create_fs_volume(self, volname, placement):
124-
return create_volume(self.mgr, volname, placement)
123+
def create_fs_volume(self, volname, placement, data_pool, meta_pool):
124+
return create_volume(self.mgr, volname, placement, data_pool, meta_pool)
125125

126126
def delete_fs_volume(self, volname, confirm):
127127
if confirm != "--yes-i-really-mean-it":

src/pybind/mgr/volumes/module.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,9 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
4949
{
5050
'cmd': 'fs volume create '
5151
f'name=name,type=CephString,goodchars={goodchars} '
52-
'name=placement,type=CephString,req=false ',
52+
'name=placement,type=CephString,req=false '
53+
f'name=meta_pool,type=CephString,goodchars={goodchars},req=false '
54+
f'name=data_pool,type=CephString,goodchars={goodchars},req=false ',
5355
'desc': "Create a CephFS volume",
5456
'perm': 'rw'
5557
},
@@ -679,7 +681,9 @@ def handle_command(self, inbuf, cmd):
679681
def _cmd_fs_volume_create(self, inbuf, cmd):
680682
vol_id = cmd['name']
681683
placement = cmd.get('placement', '')
682-
return self.vc.create_fs_volume(vol_id, placement)
684+
data_pool = cmd.get('data_pool', None)
685+
meta_pool = cmd.get('meta_pool', None)
686+
return self.vc.create_fs_volume(vol_id, placement, data_pool, meta_pool)
683687

684688
@mgr_cmd_wrap
685689
def _cmd_fs_volume_rm(self, inbuf, cmd):

0 commit comments

Comments
 (0)