Skip to content

Commit e6b5940

Browse files
qa/cephadm/smb: add test using custom ports to run two ctdb clusters
Add a teuthology test that exercises basic colocation of two smb clusters on the same ceph cluster by giving the 2nd cluster a distinct set of ports. Signed-off-by: John Mulligan <[email protected]>
1 parent f07ae5e commit e6b5940

File tree

1 file changed

+208
-0
lines changed

1 file changed

+208
-0
lines changed
Lines changed: 208 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,208 @@
1+
roles:
2+
# Three node ceph cluster for smb + ctdb
3+
- - host.a
4+
- mon.a
5+
- mgr.x
6+
- osd.0
7+
- osd.1
8+
- client.0
9+
- - host.b
10+
- mon.b
11+
- osd.2
12+
- osd.3
13+
- - host.c
14+
- mon.c
15+
- osd.4
16+
- osd.5
17+
# Reserve a host for acting as a domain controller and smb client
18+
- - host.d
19+
- cephadm.exclude
20+
overrides:
21+
ceph:
22+
log-only-match:
23+
- CEPHADM_
24+
tasks:
25+
- cephadm.deploy_samba_ad_dc:
26+
role: host.d
27+
- vip:
28+
count: 3
29+
- pexec:
30+
all:
31+
- sudo setsebool -P virt_sandbox_use_netlink 1 || true
32+
- cephadm:
33+
34+
- cephadm.shell:
35+
host.a:
36+
- ceph fs volume create cephfs
37+
- cephadm.wait_for_service:
38+
service: mds.cephfs
39+
40+
- cephadm.shell:
41+
host.a:
42+
# add subvolgroup & subvolumes for test
43+
- cmd: ceph fs subvolumegroup create cephfs smb
44+
- cmd: ceph fs subvolume create cephfs sv1 --group-name=smb --mode=0777
45+
- cmd: ceph fs subvolume create cephfs sv2 --group-name=smb --mode=0777
46+
- cmd: ceph fs subvolume create cephfs sv3 --group-name=smb --mode=0777
47+
# set up smb cluster and shares
48+
- cmd: ceph mgr module enable smb
49+
# TODO: replace sleep with poll of mgr state?
50+
- cmd: sleep 30
51+
- cmd: ceph smb apply -i -
52+
stdin: |
53+
# --- Begin Embedded YAML
54+
- resource_type: ceph.smb.cluster
55+
cluster_id: ac1
56+
auth_mode: active-directory
57+
domain_settings:
58+
realm: DOMAIN1.SINK.TEST
59+
join_sources:
60+
- source_type: resource
61+
ref: join1-admin
62+
custom_dns:
63+
- "{{ctx.samba_ad_dc_ip}}"
64+
public_addrs:
65+
- address: {{VIP0}}/{{VIPPREFIXLEN}}
66+
- address: {{VIP1}}/{{VIPPREFIXLEN}}
67+
placement:
68+
count: 3
69+
- resource_type: ceph.smb.join.auth
70+
auth_id: join1-admin
71+
auth:
72+
username: Administrator
73+
password: Passw0rd
74+
- resource_type: ceph.smb.share
75+
cluster_id: ac1
76+
share_id: share1
77+
cephfs:
78+
volume: cephfs
79+
subvolumegroup: smb
80+
subvolume: sv1
81+
path: /
82+
- resource_type: ceph.smb.share
83+
cluster_id: ac1
84+
share_id: share2
85+
cephfs:
86+
volume: cephfs
87+
subvolumegroup: smb
88+
subvolume: sv2
89+
path: /
90+
# cluster two
91+
- resource_type: ceph.smb.cluster
92+
cluster_id: ac2
93+
auth_mode: active-directory
94+
domain_settings:
95+
realm: DOMAIN1.SINK.TEST
96+
join_sources:
97+
- source_type: resource
98+
ref: join1-admin
99+
custom_dns:
100+
- "{{ctx.samba_ad_dc_ip}}"
101+
custom_ports:
102+
smb: 4455
103+
smbmetrics: 9909
104+
ctdb: 9999
105+
public_addrs:
106+
- address: {{VIP2}}/{{VIPPREFIXLEN}}
107+
placement:
108+
count: 3
109+
- resource_type: ceph.smb.share
110+
cluster_id: ac2
111+
share_id: s1ac2
112+
cephfs:
113+
volume: cephfs
114+
subvolumegroup: smb
115+
subvolume: sv3
116+
path: /
117+
# --- End Embedded YAML
118+
# Wait for the smb service to start
119+
- cephadm.wait_for_service:
120+
service: smb.ac1
121+
- cephadm.wait_for_service:
122+
service: smb.ac2
123+
124+
# debugging breadcrumbs
125+
- cephadm.shell:
126+
host.a:
127+
# dump clustermeta objects from rados
128+
- cmd: rados --pool=.smb -N ac1 get cluster.meta.json /dev/stdout
129+
- cmd: rados --pool=.smb -N ac2 get cluster.meta.json /dev/stdout
130+
131+
# Check if shares exist
132+
- cephadm.exec:
133+
host.d:
134+
- sleep 30
135+
- "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -U DOMAIN1\\\\ckent%1115Rose. //{{'host.a'|role_to_remote|attr('ip_address')}}/share1 -c ls"
136+
- "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -U DOMAIN1\\\\ckent%1115Rose. //{{'host.a'|role_to_remote|attr('ip_address')}}/share2 -c ls"
137+
- "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -p4455 -U DOMAIN1\\\\ckent%1115Rose. //{{'host.a'|role_to_remote|attr('ip_address')}}/s1ac2 -c ls"
138+
139+
# verify CTDB is healthy, cluster 1 is well formed
140+
- cephadm.exec:
141+
host.a:
142+
- "{{ctx.cephadm}} ls --no-detail | {{ctx.cephadm}} shell jq -r 'map(select(.name | startswith(\"smb.ac1\")))[-1].name' > /tmp/svcname"
143+
- "{{ctx.cephadm}} enter -n $(cat /tmp/svcname) ctdb status > /tmp/ctdb_status"
144+
- cat /tmp/ctdb_status
145+
- grep 'pnn:0 .*OK' /tmp/ctdb_status
146+
- grep 'pnn:1 .*OK' /tmp/ctdb_status
147+
- grep 'pnn:2 .*OK' /tmp/ctdb_status
148+
- grep 'Number of nodes:3' /tmp/ctdb_status
149+
- rm -rf /tmp/svcname /tmp/ctdb_status
150+
# verify CTDB is healthy, cluster 2 is well formed
151+
- cephadm.exec:
152+
host.a:
153+
- "{{ctx.cephadm}} ls --no-detail | {{ctx.cephadm}} shell jq -r 'map(select(.name | startswith(\"smb.ac2\")))[-1].name' > /tmp/svcname"
154+
- "{{ctx.cephadm}} enter -n $(cat /tmp/svcname) ctdb status > /tmp/ctdb_status"
155+
- cat /tmp/ctdb_status
156+
- grep 'pnn:0 .*OK' /tmp/ctdb_status
157+
- grep 'pnn:1 .*OK' /tmp/ctdb_status
158+
- grep 'pnn:2 .*OK' /tmp/ctdb_status
159+
- grep 'Number of nodes:3' /tmp/ctdb_status
160+
- rm -rf /tmp/svcname /tmp/ctdb_status
161+
162+
# Test the two assigned VIPs on cluster 1
163+
- cephadm.exec:
164+
host.d:
165+
- sleep 30
166+
- "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -U DOMAIN1\\\\ckent%1115Rose. //{{VIP0}}/share1 -c ls"
167+
- "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -U DOMAIN1\\\\ckent%1115Rose. //{{VIP1}}/share1 -c ls"
168+
- "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -U DOMAIN1\\\\ckent%1115Rose. //{{VIP0}}/share2 -c ls"
169+
- "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -U DOMAIN1\\\\ckent%1115Rose. //{{VIP1}}/share2 -c ls"
170+
# Test the assigned VIP on cluster 2
171+
- cephadm.exec:
172+
host.d:
173+
- sleep 30
174+
- "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -p4455 -U DOMAIN1\\\\ckent%1115Rose. //{{VIP2}}/s1ac2 -c ls"
175+
176+
- cephadm.shell:
177+
host.a:
178+
- cmd: ceph smb apply -i -
179+
stdin: |
180+
# --- Begin Embedded YAML
181+
# cluster1
182+
- resource_type: ceph.smb.cluster
183+
cluster_id: ac1
184+
intent: removed
185+
- resource_type: ceph.smb.share
186+
cluster_id: ac1
187+
share_id: share1
188+
intent: removed
189+
- resource_type: ceph.smb.share
190+
cluster_id: ac1
191+
share_id: share2
192+
intent: removed
193+
# cluster2
194+
- resource_type: ceph.smb.cluster
195+
cluster_id: ac2
196+
intent: removed
197+
- resource_type: ceph.smb.share
198+
cluster_id: ac2
199+
share_id: s1ac2
200+
intent: removed
201+
# common
202+
- resource_type: ceph.smb.join.auth
203+
auth_id: join1-admin
204+
intent: removed
205+
# --- End Embedded YAML
206+
# Wait for the smb service to be removed
207+
- cephadm.wait_for_service_not_present:
208+
service: smb.ac1

0 commit comments

Comments
 (0)