Skip to content

Commit ef15ecb

Browse files
authored
Merge pull request ceph#63924 from phlogistonjohn/jjm-smb-custom-ports
smb: support custom ports Reviewed-by: Adam King <[email protected]> Reviewed-by: Avan Thakkar <[email protected]> Reviewed-by: Sachin Prabhu <[email protected]> Reviewed-by: Xavi Hernandez <[email protected]>
2 parents 38578de + 17668d5 commit ef15ecb

File tree

14 files changed

+476
-28
lines changed

14 files changed

+476
-28
lines changed

doc/cephadm/services/smb.rst

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,15 @@ custom_dns
103103
Active Directory even if the Ceph host nodes are not tied into the Active
104104
Directory DNS domain(s).
105105

106+
custom_ports
107+
A mapping of service names to port numbers that will override the
108+
default ports used for those services. The service names are:
109+
``smb``, ``smbmetrics``, and ``ctdb``. If a service name is not
110+
present in the mapping the default port will be used.
111+
For example, ``{"smb": 4455, "smbmetrics": 9009}`` will change the
112+
ports used by smb for client access and the metrics exporter, but
113+
not change the port used by the CTDB clustering daemon.
114+
106115
include_ceph_users
107116
A list of cephx user (aka entity) names that the Samba Containers may use.
108117
The cephx keys for each user in the list will automatically be added to

doc/mgr/smb.rst

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -447,6 +447,15 @@ custom_dns
447447
Optional. List of IP Addresses. IP addresses will be used as DNS
448448
resolver(s) in Samba containers allowing the containers to use domain DNS
449449
even if the Ceph host does not
450+
custom_ports
451+
Optional. A mapping of service names to port numbers that will override the
452+
default ports used for those services. The service names are:
453+
``smb``, ``smbmetrics``, and ``ctdb``. If a service name is not
454+
present in the mapping the default port will be used.
455+
For example, ``{"smb": 4455, "smbmetrics": 9009}`` will change the
456+
ports used by smb for client access and the metrics exporter, but
457+
not change the port used by the CTDB clustering daemon.
458+
Note - not all SMB clients are able to use alternate port numbers.
450459
placement
451460
Optional. A Ceph Orchestration :ref:`placement specifier
452461
<orchestrator-cli-placement-spec>`. Defaults to one host if not provided
Lines changed: 208 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,208 @@
1+
roles:
2+
# Three node ceph cluster for smb + ctdb
3+
- - host.a
4+
- mon.a
5+
- mgr.x
6+
- osd.0
7+
- osd.1
8+
- client.0
9+
- - host.b
10+
- mon.b
11+
- osd.2
12+
- osd.3
13+
- - host.c
14+
- mon.c
15+
- osd.4
16+
- osd.5
17+
# Reserve a host for acting as a domain controller and smb client
18+
- - host.d
19+
- cephadm.exclude
20+
overrides:
21+
ceph:
22+
log-only-match:
23+
- CEPHADM_
24+
tasks:
25+
- cephadm.deploy_samba_ad_dc:
26+
role: host.d
27+
- vip:
28+
count: 3
29+
- pexec:
30+
all:
31+
- sudo setsebool -P virt_sandbox_use_netlink 1 || true
32+
- cephadm:
33+
34+
- cephadm.shell:
35+
host.a:
36+
- ceph fs volume create cephfs
37+
- cephadm.wait_for_service:
38+
service: mds.cephfs
39+
40+
- cephadm.shell:
41+
host.a:
42+
# add subvolgroup & subvolumes for test
43+
- cmd: ceph fs subvolumegroup create cephfs smb
44+
- cmd: ceph fs subvolume create cephfs sv1 --group-name=smb --mode=0777
45+
- cmd: ceph fs subvolume create cephfs sv2 --group-name=smb --mode=0777
46+
- cmd: ceph fs subvolume create cephfs sv3 --group-name=smb --mode=0777
47+
# set up smb cluster and shares
48+
- cmd: ceph mgr module enable smb
49+
# TODO: replace sleep with poll of mgr state?
50+
- cmd: sleep 30
51+
- cmd: ceph smb apply -i -
52+
stdin: |
53+
# --- Begin Embedded YAML
54+
- resource_type: ceph.smb.cluster
55+
cluster_id: ac1
56+
auth_mode: active-directory
57+
domain_settings:
58+
realm: DOMAIN1.SINK.TEST
59+
join_sources:
60+
- source_type: resource
61+
ref: join1-admin
62+
custom_dns:
63+
- "{{ctx.samba_ad_dc_ip}}"
64+
public_addrs:
65+
- address: {{VIP0}}/{{VIPPREFIXLEN}}
66+
- address: {{VIP1}}/{{VIPPREFIXLEN}}
67+
placement:
68+
count: 3
69+
- resource_type: ceph.smb.join.auth
70+
auth_id: join1-admin
71+
auth:
72+
username: Administrator
73+
password: Passw0rd
74+
- resource_type: ceph.smb.share
75+
cluster_id: ac1
76+
share_id: share1
77+
cephfs:
78+
volume: cephfs
79+
subvolumegroup: smb
80+
subvolume: sv1
81+
path: /
82+
- resource_type: ceph.smb.share
83+
cluster_id: ac1
84+
share_id: share2
85+
cephfs:
86+
volume: cephfs
87+
subvolumegroup: smb
88+
subvolume: sv2
89+
path: /
90+
# cluster two
91+
- resource_type: ceph.smb.cluster
92+
cluster_id: ac2
93+
auth_mode: active-directory
94+
domain_settings:
95+
realm: DOMAIN1.SINK.TEST
96+
join_sources:
97+
- source_type: resource
98+
ref: join1-admin
99+
custom_dns:
100+
- "{{ctx.samba_ad_dc_ip}}"
101+
custom_ports:
102+
smb: 4455
103+
smbmetrics: 9909
104+
ctdb: 9999
105+
public_addrs:
106+
- address: {{VIP2}}/{{VIPPREFIXLEN}}
107+
placement:
108+
count: 3
109+
- resource_type: ceph.smb.share
110+
cluster_id: ac2
111+
share_id: s1ac2
112+
cephfs:
113+
volume: cephfs
114+
subvolumegroup: smb
115+
subvolume: sv3
116+
path: /
117+
# --- End Embedded YAML
118+
# Wait for the smb service to start
119+
- cephadm.wait_for_service:
120+
service: smb.ac1
121+
- cephadm.wait_for_service:
122+
service: smb.ac2
123+
124+
# debugging breadcrumbs
125+
- cephadm.shell:
126+
host.a:
127+
# dump clustermeta objects from rados
128+
- cmd: rados --pool=.smb -N ac1 get cluster.meta.json /dev/stdout
129+
- cmd: rados --pool=.smb -N ac2 get cluster.meta.json /dev/stdout
130+
131+
# Check if shares exist
132+
- cephadm.exec:
133+
host.d:
134+
- sleep 30
135+
- "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -U DOMAIN1\\\\ckent%1115Rose. //{{'host.a'|role_to_remote|attr('ip_address')}}/share1 -c ls"
136+
- "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -U DOMAIN1\\\\ckent%1115Rose. //{{'host.a'|role_to_remote|attr('ip_address')}}/share2 -c ls"
137+
- "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -p4455 -U DOMAIN1\\\\ckent%1115Rose. //{{'host.a'|role_to_remote|attr('ip_address')}}/s1ac2 -c ls"
138+
139+
# verify CTDB is healthy, cluster 1 is well formed
140+
- cephadm.exec:
141+
host.a:
142+
- "{{ctx.cephadm}} ls --no-detail | {{ctx.cephadm}} shell jq -r 'map(select(.name | startswith(\"smb.ac1\")))[-1].name' > /tmp/svcname"
143+
- "{{ctx.cephadm}} enter -n $(cat /tmp/svcname) ctdb status > /tmp/ctdb_status"
144+
- cat /tmp/ctdb_status
145+
- grep 'pnn:0 .*OK' /tmp/ctdb_status
146+
- grep 'pnn:1 .*OK' /tmp/ctdb_status
147+
- grep 'pnn:2 .*OK' /tmp/ctdb_status
148+
- grep 'Number of nodes:3' /tmp/ctdb_status
149+
- rm -rf /tmp/svcname /tmp/ctdb_status
150+
# verify CTDB is healthy, cluster 2 is well formed
151+
- cephadm.exec:
152+
host.a:
153+
- "{{ctx.cephadm}} ls --no-detail | {{ctx.cephadm}} shell jq -r 'map(select(.name | startswith(\"smb.ac2\")))[-1].name' > /tmp/svcname"
154+
- "{{ctx.cephadm}} enter -n $(cat /tmp/svcname) ctdb status > /tmp/ctdb_status"
155+
- cat /tmp/ctdb_status
156+
- grep 'pnn:0 .*OK' /tmp/ctdb_status
157+
- grep 'pnn:1 .*OK' /tmp/ctdb_status
158+
- grep 'pnn:2 .*OK' /tmp/ctdb_status
159+
- grep 'Number of nodes:3' /tmp/ctdb_status
160+
- rm -rf /tmp/svcname /tmp/ctdb_status
161+
162+
# Test the two assigned VIPs on cluster 1
163+
- cephadm.exec:
164+
host.d:
165+
- sleep 30
166+
- "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -U DOMAIN1\\\\ckent%1115Rose. //{{VIP0}}/share1 -c ls"
167+
- "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -U DOMAIN1\\\\ckent%1115Rose. //{{VIP1}}/share1 -c ls"
168+
- "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -U DOMAIN1\\\\ckent%1115Rose. //{{VIP0}}/share2 -c ls"
169+
- "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -U DOMAIN1\\\\ckent%1115Rose. //{{VIP1}}/share2 -c ls"
170+
# Test the assigned VIP on cluster 2
171+
- cephadm.exec:
172+
host.d:
173+
- sleep 30
174+
- "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -p4455 -U DOMAIN1\\\\ckent%1115Rose. //{{VIP2}}/s1ac2 -c ls"
175+
176+
- cephadm.shell:
177+
host.a:
178+
- cmd: ceph smb apply -i -
179+
stdin: |
180+
# --- Begin Embedded YAML
181+
# cluster1
182+
- resource_type: ceph.smb.cluster
183+
cluster_id: ac1
184+
intent: removed
185+
- resource_type: ceph.smb.share
186+
cluster_id: ac1
187+
share_id: share1
188+
intent: removed
189+
- resource_type: ceph.smb.share
190+
cluster_id: ac1
191+
share_id: share2
192+
intent: removed
193+
# cluster2
194+
- resource_type: ceph.smb.cluster
195+
cluster_id: ac2
196+
intent: removed
197+
- resource_type: ceph.smb.share
198+
cluster_id: ac2
199+
share_id: s1ac2
200+
intent: removed
201+
# common
202+
- resource_type: ceph.smb.join.auth
203+
auth_id: join1-admin
204+
intent: removed
205+
# --- End Embedded YAML
206+
# Wait for the smb service to be removed
207+
- cephadm.wait_for_service_not_present:
208+
service: smb.ac1
Lines changed: 110 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,110 @@
1+
roles:
2+
# Test is for basic smb deployment & functionality. one node cluster is OK
3+
- - host.a
4+
- mon.a
5+
- mgr.x
6+
- osd.0
7+
- osd.1
8+
- client.0
9+
# Reserve a host for acting as a domain controller
10+
- - host.b
11+
- cephadm.exclude
12+
overrides:
13+
ceph:
14+
log-only-match:
15+
- CEPHADM_
16+
tasks:
17+
- cephadm.deploy_samba_ad_dc:
18+
role: host.b
19+
- cephadm:
20+
single_host_defaults: true
21+
22+
- cephadm.shell:
23+
host.a:
24+
- ceph fs volume create cephfs
25+
- cephadm.wait_for_service:
26+
service: mds.cephfs
27+
28+
- cephadm.shell:
29+
host.a:
30+
# add subvolgroup & subvolumes for test
31+
- cmd: ceph fs subvolumegroup create cephfs smb
32+
- cmd: ceph fs subvolume create cephfs sv1 --group-name=smb --mode=0777
33+
- cmd: ceph fs subvolume create cephfs sv2 --group-name=smb --mode=0777
34+
# set up smb cluster and shares
35+
- cmd: ceph mgr module enable smb
36+
# TODO: replace sleep with poll of mgr state?
37+
- cmd: sleep 30
38+
- cmd: ceph smb apply -i -
39+
stdin: |
40+
# --- Begin Embedded YAML
41+
- resource_type: ceph.smb.cluster
42+
cluster_id: altports
43+
auth_mode: active-directory
44+
domain_settings:
45+
realm: DOMAIN1.SINK.TEST
46+
join_sources:
47+
- source_type: resource
48+
ref: join1-admin
49+
custom_dns:
50+
- "{{ctx.samba_ad_dc_ip}}"
51+
custom_ports:
52+
smb: 4455
53+
smbmetrics: 9909
54+
placement:
55+
count: 1
56+
- resource_type: ceph.smb.join.auth
57+
auth_id: join1-admin
58+
auth:
59+
username: Administrator
60+
password: Passw0rd
61+
- resource_type: ceph.smb.share
62+
cluster_id: altports
63+
share_id: share1
64+
cephfs:
65+
volume: cephfs
66+
subvolumegroup: smb
67+
subvolume: sv1
68+
path: /
69+
- resource_type: ceph.smb.share
70+
cluster_id: altports
71+
share_id: share2
72+
cephfs:
73+
volume: cephfs
74+
subvolumegroup: smb
75+
subvolume: sv2
76+
path: /
77+
# --- End Embedded YAML
78+
# Wait for the smb service to start
79+
- cephadm.wait_for_service:
80+
service: smb.altports
81+
# Check if shares exist
82+
- cephadm.exec:
83+
host.b:
84+
- sleep 30
85+
- "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -p4455 -U DOMAIN1\\\\ckent%1115Rose. //{{'host.a'|role_to_remote|attr('ip_address')}}/share1 -c ls"
86+
- "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -p4455 -U DOMAIN1\\\\ckent%1115Rose. //{{'host.a'|role_to_remote|attr('ip_address')}}/share2 -c ls"
87+
88+
- cephadm.shell:
89+
host.a:
90+
- cmd: ceph smb apply -i -
91+
stdin: |
92+
# --- Begin Embedded YAML
93+
- resource_type: ceph.smb.cluster
94+
cluster_id: altports
95+
intent: removed
96+
- resource_type: ceph.smb.join.auth
97+
auth_id: join1-admin
98+
intent: removed
99+
- resource_type: ceph.smb.share
100+
cluster_id: altports
101+
share_id: share1
102+
intent: removed
103+
- resource_type: ceph.smb.share
104+
cluster_id: altports
105+
share_id: share2
106+
intent: removed
107+
# --- End Embedded YAML
108+
# Wait for the smb service to be removed
109+
- cephadm.wait_for_service_not_present:
110+
service: smb.altports

0 commit comments

Comments
 (0)