Skip to content

Commit 37bd496

Browse files
committed
Merge PR ceph#63214 into main
* refs/pull/63214/head: release note: add a note that "subvolume info" cmd output can also... doc/cephfs: update docs since "subvolume info" cmd output can also... qa/cephfs: add test to check clone source info's present in... mgr/vol: show clone source info in "subvolume info" cmd output mgr/vol: keep clone source info even after cloning is finished Reviewed-by: Venky Shankar <[email protected]> Reviewed-by: Kotresh Hiremath Ravishankar <[email protected]> Reviewed-by: Neeraj Pratap Singh <[email protected]>
2 parents e1caea2 + 37244a7 commit 37bd496

File tree

6 files changed

+120
-31
lines changed

6 files changed

+120
-31
lines changed

PendingReleaseNotes

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -147,6 +147,14 @@
147147
allowed. `rbd trash mv` command now behaves the same way as `rbd rm` in this
148148
scenario.
149149

150+
* CephFS: If the subvolume name passed to the command `ceph fs subvolume info`
151+
is a clone, the output now will also contain a "source" field that tells user
152+
the name of source snapshot along with the name of volume, subvolume group and
153+
subvolume in which the source snapshot is located. For clones created with
154+
Tentacle or earlier release, the value of this field will be 'N/A'. Regular
155+
subvolumes don't have a source subvolume and therefore the output for them
156+
won't contain a "source" field regardless of the release.
157+
150158
* RGW: Replication policies now validate permissions using `s3:ReplicateObject`,
151159
`s3:ReplicateDelete`, and `s3:ReplicateTags` for destination buckets. For source
152160
buckets, both `s3:GetObjectVersionForReplication` and `s3:GetObject(Version)`

doc/cephfs/fs-volumes.rst

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -480,6 +480,10 @@ The output format is JSON and contains the following fields.
480480
* ``features``: features supported by the subvolume
481481
* ``state``: current state of the subvolume
482482
* ``earmark``: earmark of the subvolume
483+
* ``source``: exists only if subvolume is a clone. It contains name of the
484+
source snapshot and names of the volume, subvolume group and subvolume in
485+
which the source snapshot is located. If the clone was created with Tentacle
486+
or earlier release, value of this field is 'N/A'.
483487

484488
If a subvolume has been removed but its snapshots have been retained, the
485489
output contains only the following fields.

qa/tasks/cephfs/test_volumes.py

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2887,12 +2887,34 @@ def test_subvolume_info(self):
28872887

28882888
self.assertEqual(subvol_info["earmark"], earmark)
28892889

2890+
self.assertNotIn('source', subvol_info)
2891+
28902892
# remove subvolumes
28912893
self._fs_cmd("subvolume", "rm", self.volname, subvolume)
28922894

28932895
# verify trash dir is clean
28942896
self._wait_for_trash_empty()
28952897

2898+
def test_subvol_src_info_with_custom_group(self):
2899+
'''
2900+
Test that source info is NOT printed by "subvolume info" command for a
2901+
subvolume that is not created by cloning even when it is located in a
2902+
custom group.
2903+
'''
2904+
subvol_name = self._gen_subvol_name()
2905+
group_name = self._gen_subvol_grp_name()
2906+
2907+
self.run_ceph_cmd(f'fs subvolumegroup create {self.volname} '
2908+
f'{group_name}')
2909+
self.run_ceph_cmd(f'fs subvolume create {self.volname} {subvol_name} '
2910+
f'{group_name}')
2911+
2912+
subvol_info = self.get_ceph_cmd_stdout(
2913+
f'fs subvolume info {self.volname} {subvol_name} {group_name}')
2914+
subvol_info = json.loads(subvol_info)
2915+
2916+
self.assertNotIn('source', subvol_info)
2917+
28962918
def test_subvolume_ls(self):
28972919
# tests the 'fs subvolume ls' command
28982920

@@ -6810,6 +6832,7 @@ def test_clone_subvolume_info(self):
68106832
# remove snapshot
68116833
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
68126834

6835+
# actual testing begins now...
68136836
subvol_info = json.loads(self._get_subvolume_info(self.volname, clone))
68146837
if len(subvol_info) == 0:
68156838
raise RuntimeError("Expected the 'fs subvolume info' command to list metadata of subvolume")
@@ -6819,13 +6842,48 @@ def test_clone_subvolume_info(self):
68196842
if subvol_info["type"] != "clone":
68206843
raise RuntimeError("type should be set to clone")
68216844

6845+
self.assertEqual(subvol_info['source']['volume'], self.volname)
6846+
self.assertEqual(subvol_info['source']['subvolume'], subvolume)
6847+
self.assertEqual(subvol_info['source']['snapshot'], snapshot)
6848+
self.assertEqual(subvol_info['source']['group'], '_nogroup')
6849+
68226850
# remove subvolumes
68236851
self._fs_cmd("subvolume", "rm", self.volname, subvolume)
68246852
self._fs_cmd("subvolume", "rm", self.volname, clone)
68256853

68266854
# verify trash dir is clean
68276855
self._wait_for_trash_empty()
68286856

6857+
def test_clone_src_info_with_custom_group(self):
6858+
'''
6859+
Test that clone's source subvolume's group is printed properly when
6860+
"subvolume info" command is run for clone.
6861+
'''
6862+
subvol_name = self._gen_subvol_name()
6863+
group_name = self._gen_subvol_grp_name()
6864+
snap_name = self._gen_subvol_snap_name()
6865+
clone_name = self._gen_subvol_clone_name()
6866+
6867+
self.run_ceph_cmd(f'fs subvolumegroup create {self.volname} '
6868+
f'{group_name} --mode=777')
6869+
self.run_ceph_cmd(f'fs subvolume create {self.volname} {subvol_name} '
6870+
f'{group_name} --mode=777')
6871+
self._do_subvolume_io(subvol_name, group_name, number_of_files=1)
6872+
self.run_ceph_cmd(f'fs subvolume snapshot create {self.volname} '
6873+
f'{subvol_name} {snap_name} {group_name}')
6874+
self.run_ceph_cmd(f'fs subvolume snapshot clone {self.volname} '
6875+
f'{subvol_name} {snap_name} {clone_name} '
6876+
f'--group-name {group_name}')
6877+
self._wait_for_clone_to_complete(clone_name)
6878+
6879+
subvol_info = self.get_ceph_cmd_stdout(
6880+
f'fs subvolume info {self.volname} {clone_name}')
6881+
subvol_info = json.loads(subvol_info)
6882+
self.assertEqual(subvol_info['source']['volume'], self.volname)
6883+
self.assertEqual(subvol_info['source']['subvolume'], subvol_name)
6884+
self.assertEqual(subvol_info['source']['snapshot'], snap_name)
6885+
self.assertEqual(subvol_info['source']['group'], group_name)
6886+
68296887
def test_subvolume_snapshot_info_without_snapshot_clone(self):
68306888
"""
68316889
Verify subvolume snapshot info output without cloning snapshot.

src/pybind/mgr/volumes/fs/async_cloner.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -273,7 +273,6 @@ def handle_clone_complete(fs_client, volspec, volname, index, groupname, subvoln
273273
with open_clone_subvol_pair_in_vol(fs_client, volspec, volname,
274274
groupname, subvolname) as (subvol0, subvol1, subvol2):
275275
subvol1.detach_snapshot(subvol2, index)
276-
subvol0.remove_clone_source(flush=True)
277276
except (MetadataMgrException, VolumeException) as e:
278277
log.error("failed to detach clone from snapshot: {0}".format(e))
279278
return (None, True)

src/pybind/mgr/volumes/fs/operations/versions/subvolume_base.py

Lines changed: 50 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -442,6 +442,34 @@ def create_base_dir(self, mode):
442442
except cephfs.Error as e:
443443
raise VolumeException(-e.args[0], e.args[1])
444444

445+
def _get_clone_source(self):
446+
try:
447+
clone_source = {
448+
'volume' : self.metadata_mgr.get_option("source", "volume"),
449+
'subvolume': self.metadata_mgr.get_option("source", "subvolume"),
450+
'snapshot' : self.metadata_mgr.get_option("source", "snapshot"),
451+
}
452+
453+
try:
454+
clone_source["group"] = self.metadata_mgr.get_option("source", "group")
455+
except MetadataMgrException as me:
456+
if me.errno == -errno.ENOENT:
457+
pass
458+
else:
459+
raise
460+
except MetadataMgrException as e:
461+
if e.errno == -errno.ENOENT:
462+
clone_source = {}
463+
else:
464+
raise VolumeException(-errno.EINVAL,
465+
"error fetching subvolume metadata")
466+
return clone_source
467+
468+
def get_clone_source(self):
469+
src = self._get_clone_source()
470+
return (src['volume'], src.get('group', None), src['subvolume'],
471+
src['snapshot'])
472+
445473
def info(self):
446474
subvolpath = (self.metadata_mgr.get_global_option(
447475
MetadataManager.GLOBAL_META_KEY_PATH))
@@ -494,7 +522,8 @@ def info(self):
494522
except cephfs.NoData:
495523
casesensitive = True
496524

497-
return {'path': subvolpath,
525+
subvol_info = {
526+
'path': subvolpath,
498527
'type': etype.value,
499528
'uid': int(st["uid"]),
500529
'gid': int(st["gid"]),
@@ -517,6 +546,26 @@ def info(self):
517546
'casesensitive': casesensitive,
518547
}
519548

549+
subvol_src_info = self._get_clone_source()
550+
if subvol_src_info:
551+
if subvol_src_info.get('group', None) == None:
552+
# group name won't be saved in .meta file in case it's
553+
# default group
554+
subvol_src_info['group'] = '_nogroup'
555+
subvol_info['source'] = subvol_src_info
556+
else:
557+
# it could be that the clone was created in previous release of Ceph
558+
# where its source info used to be deleted after cloning finishes.
559+
# print "N/A" for such cases.
560+
if self.subvol_type == SubvolumeTypes.TYPE_CLONE:
561+
subvol_info['source'] = 'N/A'
562+
else:
563+
# only clones can have a source subvol, therefore don't even
564+
# print "N/A" for source info if subvolume is not a clone.
565+
pass
566+
567+
return subvol_info
568+
520569
def set_user_metadata(self, keyname, value):
521570
try:
522571
self.metadata_mgr.add_section(MetadataManager.USER_METADATA_SECTION)

src/pybind/mgr/volumes/fs/operations/versions/subvolume_v1.py

Lines changed: 0 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -138,11 +138,6 @@ def add_clone_source(self, volname, subvolume, snapname, flush=False):
138138
if flush:
139139
self.metadata_mgr.flush()
140140

141-
def remove_clone_source(self, flush=False):
142-
self.metadata_mgr.remove_section("source")
143-
if flush:
144-
self.metadata_mgr.flush()
145-
146141
def add_clone_failure(self, errno, error_msg):
147142
try:
148143
self.metadata_mgr.add_section(MetadataManager.CLONE_FAILURE_SECTION)
@@ -657,30 +652,6 @@ def evict(self, volname, auth_id, timeout=30):
657652
log.error(msg)
658653
raise EvictionError(msg)
659654

660-
def _get_clone_source(self):
661-
try:
662-
clone_source = {
663-
'volume' : self.metadata_mgr.get_option("source", "volume"),
664-
'subvolume': self.metadata_mgr.get_option("source", "subvolume"),
665-
'snapshot' : self.metadata_mgr.get_option("source", "snapshot"),
666-
}
667-
668-
try:
669-
clone_source["group"] = self.metadata_mgr.get_option("source", "group")
670-
except MetadataMgrException as me:
671-
if me.errno == -errno.ENOENT:
672-
pass
673-
else:
674-
raise
675-
except MetadataMgrException:
676-
raise VolumeException(-errno.EINVAL, "error fetching subvolume metadata")
677-
return clone_source
678-
679-
def get_clone_source(self):
680-
src = self._get_clone_source()
681-
return (src['volume'], src.get('group', None), src['subvolume'],
682-
src['snapshot'])
683-
684655
def _get_clone_failure(self):
685656
clone_failure = {
686657
'errno' : self.metadata_mgr.get_option(MetadataManager.CLONE_FAILURE_SECTION, MetadataManager.CLONE_FAILURE_META_KEY_ERRNO),

0 commit comments

Comments
 (0)