|
55 | 55 | DEFAULT_GRAFANA_IMAGE = 'quay.io/ceph/ceph-grafana:9.4.7' |
56 | 56 | DEFAULT_HAPROXY_IMAGE = 'quay.io/ceph/haproxy:2.3' |
57 | 57 | DEFAULT_KEEPALIVED_IMAGE = 'quay.io/ceph/keepalived:2.2.4' |
| 58 | +DEFAULT_NVMEOF_IMAGE = 'quay.io/ceph/nvmeof:0.0.1' |
58 | 59 | DEFAULT_SNMP_GATEWAY_IMAGE = 'docker.io/maxwo/snmp-notifier:v1.2.1' |
59 | 60 | DEFAULT_ELASTICSEARCH_IMAGE = 'quay.io/omrizeneva/elasticsearch:6.8.23' |
60 | 61 | DEFAULT_JAEGER_COLLECTOR_IMAGE = 'quay.io/jaegertracing/jaeger-collector:1.29' |
@@ -389,7 +390,7 @@ class UnauthorizedRegistryError(Error): |
389 | 390 | class Ceph(object): |
390 | 391 | daemons = ('mon', 'mgr', 'osd', 'mds', 'rgw', 'rbd-mirror', |
391 | 392 | 'crash', 'cephfs-mirror', 'ceph-exporter') |
392 | | - gateways = ('iscsi', 'nfs') |
| 393 | + gateways = ('iscsi', 'nfs', 'nvmeof') |
393 | 394 |
|
394 | 395 | ################################## |
395 | 396 |
|
@@ -912,7 +913,8 @@ def get_version(ctx, container_id): |
912 | 913 | version = None |
913 | 914 | out, err, code = call(ctx, |
914 | 915 | [ctx.container_engine.path, 'exec', container_id, |
915 | | - '/usr/bin/python3', '-c', "import pkg_resources; print(pkg_resources.require('ceph_iscsi')[0].version)"], |
| 916 | + '/usr/bin/python3', '-c', |
| 917 | + "import pkg_resources; print(pkg_resources.require('ceph_iscsi')[0].version)"], |
916 | 918 | verbosity=CallVerbosity.QUIET) |
917 | 919 | if code == 0: |
918 | 920 | version = out.strip() |
@@ -979,6 +981,133 @@ def get_tcmu_runner_container(self): |
979 | 981 | tcmu_container.cname = self.get_container_name(desc='tcmu') |
980 | 982 | return tcmu_container |
981 | 983 |
|
| 984 | + |
| 985 | +################################## |
| 986 | + |
| 987 | + |
| 988 | +class CephNvmeof(object): |
| 989 | + """Defines a Ceph-Nvmeof container""" |
| 990 | + |
| 991 | + daemon_type = 'nvmeof' |
| 992 | + required_files = ['ceph-nvmeof.conf'] |
| 993 | + default_image = DEFAULT_NVMEOF_IMAGE |
| 994 | + |
| 995 | + def __init__(self, |
| 996 | + ctx, |
| 997 | + fsid, |
| 998 | + daemon_id, |
| 999 | + config_json, |
| 1000 | + image=DEFAULT_NVMEOF_IMAGE): |
| 1001 | + # type: (CephadmContext, str, Union[int, str], Dict, str) -> None |
| 1002 | + self.ctx = ctx |
| 1003 | + self.fsid = fsid |
| 1004 | + self.daemon_id = daemon_id |
| 1005 | + self.image = image |
| 1006 | + |
| 1007 | + # config-json options |
| 1008 | + self.files = dict_get(config_json, 'files', {}) |
| 1009 | + |
| 1010 | + # validate the supplied args |
| 1011 | + self.validate() |
| 1012 | + |
| 1013 | + @classmethod |
| 1014 | + def init(cls, ctx, fsid, daemon_id): |
| 1015 | + # type: (CephadmContext, str, Union[int, str]) -> CephNvmeof |
| 1016 | + return cls(ctx, fsid, daemon_id, |
| 1017 | + fetch_configs(ctx), ctx.image) |
| 1018 | + |
| 1019 | + @staticmethod |
| 1020 | + def get_container_mounts(data_dir: str) -> Dict[str, str]: |
| 1021 | + mounts = dict() |
| 1022 | + mounts[os.path.join(data_dir, 'config')] = '/etc/ceph/ceph.conf:z' |
| 1023 | + # mounts[os.path.join(data_dir, 'keyring')] = '/etc/ceph/keyring:z' |
| 1024 | + mounts['/etc/ceph/ceph.client.admin.keyring'] = '/etc/ceph/keyring:z' # TODO: FIXME |
| 1025 | + mounts[os.path.join(data_dir, 'ceph-nvmeof.conf')] = '/src/ceph-nvmeof.conf:z' |
| 1026 | + mounts[os.path.join(data_dir, 'configfs')] = '/sys/kernel/config' |
| 1027 | + # mounts[log_dir] = '/var/log:z' # TODO: would we need a logdir? |
| 1028 | + mounts['/dev'] = '/dev' |
| 1029 | + return mounts |
| 1030 | + |
| 1031 | + @staticmethod |
| 1032 | + def get_container_binds(): |
| 1033 | + # type: () -> List[List[str]] |
| 1034 | + binds = [] |
| 1035 | + lib_modules = ['type=bind', |
| 1036 | + 'source=/lib/modules', |
| 1037 | + 'destination=/lib/modules', |
| 1038 | + 'ro=true'] |
| 1039 | + binds.append(lib_modules) |
| 1040 | + return binds |
| 1041 | + |
| 1042 | + @staticmethod |
| 1043 | + def get_version(ctx: CephadmContext, container_id: str) -> Optional[str]: |
| 1044 | + out, err, ret = call_throws(ctx, [ |
| 1045 | + ctx.container_engine.path, 'inspect', |
| 1046 | + '--format', '{{index .Config.Labels "io.ceph.version"}}', |
| 1047 | + ctx.image]) |
| 1048 | + version = None |
| 1049 | + if ret == 0: |
| 1050 | + version = out.strip() |
| 1051 | + return version |
| 1052 | + |
| 1053 | + def validate(self): |
| 1054 | + # type: () -> None |
| 1055 | + if not is_fsid(self.fsid): |
| 1056 | + raise Error('not an fsid: %s' % self.fsid) |
| 1057 | + if not self.daemon_id: |
| 1058 | + raise Error('invalid daemon_id: %s' % self.daemon_id) |
| 1059 | + if not self.image: |
| 1060 | + raise Error('invalid image: %s' % self.image) |
| 1061 | + |
| 1062 | + # check for the required files |
| 1063 | + if self.required_files: |
| 1064 | + for fname in self.required_files: |
| 1065 | + if fname not in self.files: |
| 1066 | + raise Error('required file missing from config-json: %s' % fname) |
| 1067 | + |
| 1068 | + def get_daemon_name(self): |
| 1069 | + # type: () -> str |
| 1070 | + return '%s.%s' % (self.daemon_type, self.daemon_id) |
| 1071 | + |
| 1072 | + def get_container_name(self, desc=None): |
| 1073 | + # type: (Optional[str]) -> str |
| 1074 | + cname = '%s-%s' % (self.fsid, self.get_daemon_name()) |
| 1075 | + if desc: |
| 1076 | + cname = '%s-%s' % (cname, desc) |
| 1077 | + return cname |
| 1078 | + |
| 1079 | + def create_daemon_dirs(self, data_dir, uid, gid): |
| 1080 | + # type: (str, int, int) -> None |
| 1081 | + """Create files under the container data dir""" |
| 1082 | + if not os.path.isdir(data_dir): |
| 1083 | + raise OSError('data_dir is not a directory: %s' % (data_dir)) |
| 1084 | + |
| 1085 | + logger.info('Creating ceph-nvmeof config...') |
| 1086 | + configfs_dir = os.path.join(data_dir, 'configfs') |
| 1087 | + makedirs(configfs_dir, uid, gid, 0o755) |
| 1088 | + |
| 1089 | + # populate files from the config-json |
| 1090 | + populate_files(data_dir, self.files, uid, gid) |
| 1091 | + |
| 1092 | + @staticmethod |
| 1093 | + def configfs_mount_umount(data_dir, mount=True): |
| 1094 | + # type: (str, bool) -> List[str] |
| 1095 | + mount_path = os.path.join(data_dir, 'configfs') |
| 1096 | + if mount: |
| 1097 | + cmd = 'if ! grep -qs {0} /proc/mounts; then ' \ |
| 1098 | + 'mount -t configfs none {0}; fi'.format(mount_path) |
| 1099 | + else: |
| 1100 | + cmd = 'if grep -qs {0} /proc/mounts; then ' \ |
| 1101 | + 'umount {0}; fi'.format(mount_path) |
| 1102 | + return cmd.split() |
| 1103 | + |
| 1104 | + @staticmethod |
| 1105 | + def get_sysctl_settings() -> List[str]: |
| 1106 | + return [ |
| 1107 | + 'vm.nr_hugepages = 2048', |
| 1108 | + ] |
| 1109 | + |
| 1110 | + |
982 | 1111 | ################################## |
983 | 1112 |
|
984 | 1113 |
|
@@ -1431,6 +1560,7 @@ def get_supported_daemons(): |
1431 | 1560 | supported_daemons.extend(Monitoring.components) |
1432 | 1561 | supported_daemons.append(NFSGanesha.daemon_type) |
1433 | 1562 | supported_daemons.append(CephIscsi.daemon_type) |
| 1563 | + supported_daemons.append(CephNvmeof.daemon_type) |
1434 | 1564 | supported_daemons.append(CustomContainer.daemon_type) |
1435 | 1565 | supported_daemons.append(HAproxy.daemon_type) |
1436 | 1566 | supported_daemons.append(Keepalived.daemon_type) |
@@ -2323,6 +2453,8 @@ def update_default_image(ctx: CephadmContext) -> None: |
2323 | 2453 | ctx.image = Keepalived.default_image |
2324 | 2454 | if type_ == SNMPGateway.daemon_type: |
2325 | 2455 | ctx.image = SNMPGateway.default_image |
| 2456 | + if type_ == CephNvmeof.daemon_type: |
| 2457 | + ctx.image = CephNvmeof.default_image |
2326 | 2458 | if type_ in Tracing.components: |
2327 | 2459 | ctx.image = Tracing.components[type_]['image'] |
2328 | 2460 | if not ctx.image: |
@@ -2962,6 +3094,10 @@ def create_daemon_dirs(ctx, fsid, daemon_type, daemon_id, uid, gid, |
2962 | 3094 | ceph_iscsi = CephIscsi.init(ctx, fsid, daemon_id) |
2963 | 3095 | ceph_iscsi.create_daemon_dirs(data_dir, uid, gid) |
2964 | 3096 |
|
| 3097 | + elif daemon_type == CephNvmeof.daemon_type: |
| 3098 | + ceph_nvmeof = CephNvmeof.init(ctx, fsid, daemon_id) |
| 3099 | + ceph_nvmeof.create_daemon_dirs(data_dir, uid, gid) |
| 3100 | + |
2965 | 3101 | elif daemon_type == HAproxy.daemon_type: |
2966 | 3102 | haproxy = HAproxy.init(ctx, fsid, daemon_id) |
2967 | 3103 | haproxy.create_daemon_dirs(data_dir, uid, gid) |
@@ -3140,6 +3276,8 @@ def get_container_binds(ctx, fsid, daemon_type, daemon_id): |
3140 | 3276 |
|
3141 | 3277 | if daemon_type == CephIscsi.daemon_type: |
3142 | 3278 | binds.extend(CephIscsi.get_container_binds()) |
| 3279 | + if daemon_type == CephNvmeof.daemon_type: |
| 3280 | + binds.extend(CephNvmeof.get_container_binds()) |
3143 | 3281 | elif daemon_type == CustomContainer.daemon_type: |
3144 | 3282 | assert daemon_id |
3145 | 3283 | cc = CustomContainer.init(ctx, fsid, daemon_id) |
@@ -3252,6 +3390,11 @@ def get_container_mounts(ctx, fsid, daemon_type, daemon_id, |
3252 | 3390 | data_dir = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id) |
3253 | 3391 | mounts.update(HAproxy.get_container_mounts(data_dir)) |
3254 | 3392 |
|
| 3393 | + if daemon_type == CephNvmeof.daemon_type: |
| 3394 | + assert daemon_id |
| 3395 | + data_dir = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id) |
| 3396 | + mounts.update(CephNvmeof.get_container_mounts(data_dir)) |
| 3397 | + |
3255 | 3398 | if daemon_type == CephIscsi.daemon_type: |
3256 | 3399 | assert daemon_id |
3257 | 3400 | data_dir = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id) |
@@ -3386,6 +3529,11 @@ def get_container(ctx: CephadmContext, |
3386 | 3529 | name = '%s.%s' % (daemon_type, daemon_id) |
3387 | 3530 | envs.extend(Keepalived.get_container_envs()) |
3388 | 3531 | container_args.extend(['--cap-add=NET_ADMIN', '--cap-add=NET_RAW']) |
| 3532 | + elif daemon_type == CephNvmeof.daemon_type: |
| 3533 | + name = '%s.%s' % (daemon_type, daemon_id) |
| 3534 | + container_args.extend(['--ulimit', 'memlock=-1:-1']) |
| 3535 | + container_args.extend(['--ulimit', 'nofile=10240']) |
| 3536 | + container_args.extend(['--cap-add=SYS_ADMIN', '--cap-add=CAP_SYS_NICE']) |
3389 | 3537 | elif daemon_type == CephIscsi.daemon_type: |
3390 | 3538 | entrypoint = CephIscsi.entrypoint |
3391 | 3539 | name = '%s.%s' % (daemon_type, daemon_id) |
@@ -3963,6 +4111,8 @@ def _write(conf: Path, lines: List[str]) -> None: |
3963 | 4111 | lines = HAproxy.get_sysctl_settings() |
3964 | 4112 | elif daemon_type == 'keepalived': |
3965 | 4113 | lines = Keepalived.get_sysctl_settings() |
| 4114 | + elif daemon_type == 'nvmeof': |
| 4115 | + lines = CephNvmeof.get_sysctl_settings() |
3966 | 4116 | lines = filter_sysctl_settings(ctx, lines) |
3967 | 4117 |
|
3968 | 4118 | # apply the sysctl settings |
@@ -6400,6 +6550,14 @@ def _dispatch_deploy( |
6400 | 6550 | config=config, keyring=keyring, |
6401 | 6551 | deployment_type=deployment_type, |
6402 | 6552 | ports=daemon_ports) |
| 6553 | + elif daemon_type == CephNvmeof.daemon_type: |
| 6554 | + config, keyring = get_config_and_keyring(ctx) |
| 6555 | + uid, gid = 65534, 65534 # TODO: check this |
| 6556 | + c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id) |
| 6557 | + deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid, gid, |
| 6558 | + config=config, keyring=keyring, |
| 6559 | + deployment_type=deployment_type, |
| 6560 | + ports=daemon_ports) |
6403 | 6561 | elif daemon_type in Tracing.components: |
6404 | 6562 | uid, gid = 65534, 65534 |
6405 | 6563 | c = get_container(ctx, ctx.fsid, daemon_type, daemon_id) |
@@ -6941,6 +7099,8 @@ def list_daemons(ctx, detail=True, legacy_dir=None): |
6941 | 7099 | version = NFSGanesha.get_version(ctx, container_id) |
6942 | 7100 | if daemon_type == CephIscsi.daemon_type: |
6943 | 7101 | version = CephIscsi.get_version(ctx, container_id) |
| 7102 | + if daemon_type == CephNvmeof.daemon_type: |
| 7103 | + version = CephNvmeof.get_version(ctx, container_id) |
6944 | 7104 | elif not version: |
6945 | 7105 | if daemon_type in Ceph.daemons: |
6946 | 7106 | out, err, code = call(ctx, |
|
0 commit comments