Skip to content

Commit e4c1c9a

Browse files
authored
Merge pull request ceph#52650 from guits/cv-refactor-osd-objectstore
ceph-volume: osd objectstore refactor
2 parents 87dbfb6 + bcf9803 commit e4c1c9a

38 files changed

+2593
-1146
lines changed

src/ceph-volume/ceph_volume/activate/main.py

Lines changed: 14 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@
33
import argparse
44

55
from ceph_volume import terminal
6-
from ceph_volume.devices.lvm.activate import Activate as LVMActivate
7-
from ceph_volume.devices.raw.activate import Activate as RAWActivate
6+
from ceph_volume.objectstore.lvmbluestore import LvmBlueStore as LVMActivate
7+
from ceph_volume.objectstore.rawbluestore import RawBlueStore as RAWActivate
88
from ceph_volume.devices.simple.activate import Activate as SimpleActivate
99

1010

@@ -44,27 +44,24 @@ def main(self):
4444

4545
# first try raw
4646
try:
47-
RAWActivate([]).activate(
48-
devs=None,
49-
start_osd_id=self.args.osd_id,
50-
start_osd_uuid=self.args.osd_uuid,
51-
tmpfs=not self.args.no_tmpfs,
52-
systemd=not self.args.no_systemd,
53-
)
47+
raw_activate = RAWActivate([])
48+
raw_activate.activate(None,
49+
self.args.osd_id,
50+
self.args.osd_uuid,
51+
not self.args.no_tmpfs)
5452
return
5553
except Exception as e:
5654
terminal.info(f'Failed to activate via raw: {e}')
5755

5856
# then try lvm
5957
try:
60-
LVMActivate([]).activate(
61-
argparse.Namespace(
62-
osd_id=self.args.osd_id,
63-
osd_fsid=self.args.osd_uuid,
64-
no_tmpfs=self.args.no_tmpfs,
65-
no_systemd=self.args.no_systemd,
66-
)
67-
)
58+
lvm_activate = LVMActivate(argparse.Namespace(
59+
no_tmpfs=self.args.no_tmpfs,
60+
no_systemd=self.args.no_systemd,
61+
osd_fsid=self.args.osd_uuid))
62+
lvm_activate.activate(None,
63+
self.args.osd_id,
64+
self.args.osd_uuid)
6865
return
6966
except Exception as e:
7067
terminal.info(f'Failed to activate via LVM: {e}')
Lines changed: 21 additions & 205 deletions
Original file line numberDiff line numberDiff line change
@@ -1,216 +1,20 @@
11
from __future__ import print_function
22
import argparse
33
import logging
4-
import os
54
from textwrap import dedent
6-
from ceph_volume import process, conf, decorators, terminal, configuration
7-
from ceph_volume.util import system, disk
8-
from ceph_volume.util import prepare as prepare_utils
9-
from ceph_volume.util import encryption as encryption_utils
10-
from ceph_volume.systemd import systemctl
11-
from ceph_volume.api import lvm as api
12-
from .listing import direct_report
5+
from ceph_volume import objectstore
136

147

158
logger = logging.getLogger(__name__)
169

1710

18-
19-
def get_osd_device_path(osd_lvs, device_type, dmcrypt_secret=None):
20-
"""
21-
``device_type`` can be one of ``db``, ``wal`` or ``block`` so that we can
22-
query LVs on system and fallback to querying the uuid if that is not
23-
present.
24-
25-
Return a path if possible, failing to do that a ``None``, since some of
26-
these devices are optional.
27-
"""
28-
osd_block_lv = None
29-
for lv in osd_lvs:
30-
if lv.tags.get('ceph.type') == 'block':
31-
osd_block_lv = lv
32-
break
33-
if osd_block_lv:
34-
is_encrypted = osd_block_lv.tags.get('ceph.encrypted', '0') == '1'
35-
logger.debug('Found block device (%s) with encryption: %s', osd_block_lv.name, is_encrypted)
36-
uuid_tag = 'ceph.%s_uuid' % device_type
37-
device_uuid = osd_block_lv.tags.get(uuid_tag)
38-
if not device_uuid:
39-
return None
40-
41-
device_lv = None
42-
for lv in osd_lvs:
43-
if lv.tags.get('ceph.type') == device_type:
44-
device_lv = lv
45-
break
46-
if device_lv:
47-
if is_encrypted:
48-
encryption_utils.luks_open(dmcrypt_secret, device_lv.lv_path, device_uuid)
49-
return '/dev/mapper/%s' % device_uuid
50-
return device_lv.lv_path
51-
52-
# this could be a regular device, so query it with blkid
53-
physical_device = disk.get_device_from_partuuid(device_uuid)
54-
if physical_device:
55-
if is_encrypted:
56-
encryption_utils.luks_open(dmcrypt_secret, physical_device, device_uuid)
57-
return '/dev/mapper/%s' % device_uuid
58-
return physical_device
59-
60-
raise RuntimeError('could not find %s with uuid %s' % (device_type, device_uuid))
61-
62-
63-
def activate_bluestore(osd_lvs, no_systemd=False, no_tmpfs=False):
64-
for lv in osd_lvs:
65-
if lv.tags.get('ceph.type') == 'block':
66-
osd_block_lv = lv
67-
break
68-
else:
69-
raise RuntimeError('could not find a bluestore OSD to activate')
70-
71-
is_encrypted = osd_block_lv.tags.get('ceph.encrypted', '0') == '1'
72-
if is_encrypted and conf.dmcrypt_no_workqueue is None:
73-
encryption_utils.set_dmcrypt_no_workqueue()
74-
dmcrypt_secret = None
75-
osd_id = osd_block_lv.tags['ceph.osd_id']
76-
conf.cluster = osd_block_lv.tags['ceph.cluster_name']
77-
osd_fsid = osd_block_lv.tags['ceph.osd_fsid']
78-
configuration.load_ceph_conf_path(osd_block_lv.tags['ceph.cluster_name'])
79-
configuration.load()
80-
81-
# mount on tmpfs the osd directory
82-
osd_path = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id)
83-
if not system.path_is_mounted(osd_path):
84-
# mkdir -p and mount as tmpfs
85-
prepare_utils.create_osd_path(osd_id, tmpfs=not no_tmpfs)
86-
# XXX This needs to be removed once ceph-bluestore-tool can deal with
87-
# symlinks that exist in the osd dir
88-
for link_name in ['block', 'block.db', 'block.wal']:
89-
link_path = os.path.join(osd_path, link_name)
90-
if os.path.exists(link_path):
91-
os.unlink(os.path.join(osd_path, link_name))
92-
# encryption is handled here, before priming the OSD dir
93-
if is_encrypted:
94-
osd_lv_path = '/dev/mapper/%s' % osd_block_lv.lv_uuid
95-
lockbox_secret = osd_block_lv.tags['ceph.cephx_lockbox_secret']
96-
encryption_utils.write_lockbox_keyring(osd_id, osd_fsid, lockbox_secret)
97-
dmcrypt_secret = encryption_utils.get_dmcrypt_key(osd_id, osd_fsid)
98-
encryption_utils.luks_open(dmcrypt_secret, osd_block_lv.lv_path, osd_block_lv.lv_uuid)
99-
else:
100-
osd_lv_path = osd_block_lv.lv_path
101-
102-
db_device_path = get_osd_device_path(osd_lvs, 'db', dmcrypt_secret=dmcrypt_secret)
103-
wal_device_path = get_osd_device_path(osd_lvs, 'wal', dmcrypt_secret=dmcrypt_secret)
104-
105-
# Once symlinks are removed, the osd dir can be 'primed again. chown first,
106-
# regardless of what currently exists so that ``prime-osd-dir`` can succeed
107-
# even if permissions are somehow messed up
108-
system.chown(osd_path)
109-
prime_command = [
110-
'ceph-bluestore-tool', '--cluster=%s' % conf.cluster,
111-
'prime-osd-dir', '--dev', osd_lv_path,
112-
'--path', osd_path, '--no-mon-config']
113-
114-
process.run(prime_command)
115-
# always re-do the symlink regardless if it exists, so that the block,
116-
# block.wal, and block.db devices that may have changed can be mapped
117-
# correctly every time
118-
process.run(['ln', '-snf', osd_lv_path, os.path.join(osd_path, 'block')])
119-
system.chown(os.path.join(osd_path, 'block'))
120-
system.chown(osd_path)
121-
if db_device_path:
122-
destination = os.path.join(osd_path, 'block.db')
123-
process.run(['ln', '-snf', db_device_path, destination])
124-
system.chown(db_device_path)
125-
system.chown(destination)
126-
if wal_device_path:
127-
destination = os.path.join(osd_path, 'block.wal')
128-
process.run(['ln', '-snf', wal_device_path, destination])
129-
system.chown(wal_device_path)
130-
system.chown(destination)
131-
132-
if no_systemd is False:
133-
# enable the ceph-volume unit for this OSD
134-
systemctl.enable_volume(osd_id, osd_fsid, 'lvm')
135-
136-
# enable the OSD
137-
systemctl.enable_osd(osd_id)
138-
139-
# start the OSD
140-
systemctl.start_osd(osd_id)
141-
terminal.success("ceph-volume lvm activate successful for osd ID: %s" % osd_id)
142-
143-
14411
class Activate(object):
145-
14612
help = 'Discover and mount the LVM device associated with an OSD ID and start the Ceph OSD'
14713

148-
def __init__(self, argv):
14+
def __init__(self, argv, args=None):
15+
self.objectstore = None
14916
self.argv = argv
150-
151-
@decorators.needs_root
152-
def activate_all(self, args):
153-
listed_osds = direct_report()
154-
osds = {}
155-
for osd_id, devices in listed_osds.items():
156-
# the metadata for all devices in each OSD will contain
157-
# the FSID which is required for activation
158-
for device in devices:
159-
fsid = device.get('tags', {}).get('ceph.osd_fsid')
160-
if fsid:
161-
osds[fsid] = osd_id
162-
break
163-
if not osds:
164-
terminal.warning('Was unable to find any OSDs to activate')
165-
terminal.warning('Verify OSDs are present with "ceph-volume lvm list"')
166-
return
167-
for osd_fsid, osd_id in osds.items():
168-
if not args.no_systemd and systemctl.osd_is_active(osd_id):
169-
terminal.warning(
170-
'OSD ID %s FSID %s process is active. Skipping activation' % (osd_id, osd_fsid)
171-
)
172-
else:
173-
terminal.info('Activating OSD ID %s FSID %s' % (osd_id, osd_fsid))
174-
self.activate(args, osd_id=osd_id, osd_fsid=osd_fsid)
175-
176-
@decorators.needs_root
177-
def activate(self, args, osd_id=None, osd_fsid=None):
178-
"""
179-
:param args: The parsed arguments coming from the CLI
180-
:param osd_id: When activating all, this gets populated with an
181-
existing OSD ID
182-
:param osd_fsid: When activating all, this gets populated with an
183-
existing OSD FSID
184-
"""
185-
osd_id = osd_id if osd_id else args.osd_id
186-
osd_fsid = osd_fsid if osd_fsid else args.osd_fsid
187-
188-
if osd_id and osd_fsid:
189-
tags = {'ceph.osd_id': osd_id, 'ceph.osd_fsid': osd_fsid}
190-
elif not osd_id and osd_fsid:
191-
tags = {'ceph.osd_fsid': osd_fsid}
192-
elif osd_id and not osd_fsid:
193-
raise RuntimeError('could not activate osd.{}, please provide the '
194-
'osd_fsid too'.format(osd_id))
195-
else:
196-
raise RuntimeError('Please provide both osd_id and osd_fsid')
197-
lvs = api.get_lvs(tags=tags)
198-
if not lvs:
199-
raise RuntimeError('could not find osd.%s with osd_fsid %s' %
200-
(osd_id, osd_fsid))
201-
202-
# This argument is only available when passed in directly or via
203-
# systemd, not when ``create`` is being used
204-
# placeholder when a new objectstore support will be added
205-
if getattr(args, 'auto_detect_objectstore', False):
206-
logger.info('auto detecting objectstore')
207-
return activate_bluestore(lvs, args.no_systemd)
208-
209-
# explicit 'objectstore' flags take precedence
210-
if getattr(args, 'bluestore', False):
211-
activate_bluestore(lvs, args.no_systemd, getattr(args, 'no_tmpfs', False))
212-
elif any('ceph.block_device' in lv.tags for lv in lvs):
213-
activate_bluestore(lvs, args.no_systemd, getattr(args, 'no_tmpfs', False))
17+
self.args = args
21418

21519
def main(self):
21620
sub_command_help = dedent("""
@@ -256,6 +60,14 @@ def main(self):
25660
action='store_true',
25761
help='force bluestore objectstore activation',
25862
)
63+
parser.add_argument(
64+
'--objectstore',
65+
dest='objectstore',
66+
help='The OSD objectstore.',
67+
default='bluestore',
68+
choices=['bluestore', 'seastore'],
69+
type=str,
70+
)
25971
parser.add_argument(
26072
'--all',
26173
dest='activate_all',
@@ -273,11 +85,15 @@ def main(self):
27385
action='store_true',
27486
help='Do not use a tmpfs mount for OSD data dir'
27587
)
276-
if len(self.argv) == 0:
88+
if len(self.argv) == 0 and self.args is None:
27789
print(sub_command_help)
27890
return
279-
args = parser.parse_args(self.argv)
280-
if args.activate_all:
281-
self.activate_all(args)
91+
if self.args is None:
92+
self.args = parser.parse_args(self.argv)
93+
if self.args.bluestore:
94+
self.args.objectstore = 'bluestore'
95+
self.objectstore = objectstore.mapping['LVM'][self.args.objectstore](args=self.args)
96+
if self.args.activate_all:
97+
self.objectstore.activate_all()
28298
else:
283-
self.activate(args)
99+
self.objectstore.activate()

0 commit comments

Comments
 (0)