|
| 1 | +roles: |
| 2 | +# Test is for basic smb deployment & functionality. one node cluster is OK |
| 3 | +- - host.a |
| 4 | + - mon.a |
| 5 | + - mgr.x |
| 6 | + - osd.0 |
| 7 | + - osd.1 |
| 8 | + - client.0 |
| 9 | +- - host.b |
| 10 | + - mon.b |
| 11 | + - osd.2 |
| 12 | + - osd.3 |
| 13 | +- - host.c |
| 14 | + - mon.c |
| 15 | + - osd.4 |
| 16 | + - osd.5 |
| 17 | +# Reserve a host for acting as a domain controller and smb client |
| 18 | +- - host.d |
| 19 | + - cephadm.exclude |
| 20 | +overrides: |
| 21 | + ceph: |
| 22 | + log-only-match: |
| 23 | + - CEPHADM_ |
| 24 | +tasks: |
| 25 | +- cephadm.configure_samba_client_container: |
| 26 | + role: host.d |
| 27 | +- vip: |
| 28 | + count: 1 |
| 29 | +- cephadm: |
| 30 | + |
| 31 | +- cephadm.shell: |
| 32 | + host.a: |
| 33 | + - ceph fs volume create cephfs |
| 34 | +- cephadm.wait_for_service: |
| 35 | + service: mds.cephfs |
| 36 | + |
| 37 | +- cephadm.shell: |
| 38 | + host.a: |
| 39 | + # add subvolgroup & subvolumes for test |
| 40 | + - cmd: ceph fs subvolumegroup create cephfs smb |
| 41 | + - cmd: ceph fs subvolume create cephfs sv1 --group-name=smb --mode=0777 |
| 42 | + - cmd: ceph fs subvolume create cephfs sv2 --group-name=smb --mode=0777 |
| 43 | + # set up smb cluster and shares |
| 44 | + - cmd: ceph mgr module enable smb |
| 45 | + - cmd: sleep 30 |
| 46 | + - cmd: > |
| 47 | + ceph smb cluster create modusr1 user |
| 48 | + --define-user-pass=user1%t3stP4ss1 |
| 49 | + --placement=count:3 |
| 50 | + --clustering=default |
| 51 | + --public_addrs={{VIP0}}/{{VIPPREFIXLEN}} |
| 52 | + - cmd: ceph smb share create modusr1 share1 cephfs / --subvolume=smb/sv1 |
| 53 | + - cmd: ceph smb share create modusr1 share2 cephfs / --subvolume=smb/sv2 |
| 54 | +# Wait for the smb service to start |
| 55 | +- cephadm.wait_for_service: |
| 56 | + service: smb.modusr1 |
| 57 | + |
| 58 | +# Check if shares exist |
| 59 | +- cephadm.exec: |
| 60 | + host.d: |
| 61 | + - sleep 30 |
| 62 | + - "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -U user1%t3stP4ss1 //{{'host.a'|role_to_remote|attr('ip_address')}}/share1 -c ls" |
| 63 | + - "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -U user1%t3stP4ss1 //{{'host.a'|role_to_remote|attr('ip_address')}}/share2 -c ls" |
| 64 | + |
| 65 | +# verify CTDB is healthy, cluster well formed |
| 66 | +- cephadm.exec: |
| 67 | + host.a: |
| 68 | + - "{{ctx.cephadm}} ls --no-detail | {{ctx.cephadm}} shell jq -r 'map(select(.name | startswith(\"smb.modusr1\")))[-1].name' > /tmp/svcname" |
| 69 | + - "{{ctx.cephadm}} enter -n $(cat /tmp/svcname) ctdb status > /tmp/ctdb_status" |
| 70 | + - cat /tmp/ctdb_status |
| 71 | + - grep 'pnn:0 .*OK' /tmp/ctdb_status |
| 72 | + - grep 'pnn:1 .*OK' /tmp/ctdb_status |
| 73 | + - grep 'pnn:2 .*OK' /tmp/ctdb_status |
| 74 | + - grep 'Number of nodes:3' /tmp/ctdb_status |
| 75 | + - rm -rf /tmp/svcname /tmp/ctdb_status |
| 76 | + |
| 77 | +# Test the assigned VIP |
| 78 | +- cephadm.exec: |
| 79 | + host.d: |
| 80 | + - sleep 30 |
| 81 | + - "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -U user1%t3stP4ss1 //{{VIP0}}/share1 -c ls" |
| 82 | + - "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -U user1%t3stP4ss1 //{{VIP0}}/share2 -c ls" |
| 83 | + |
| 84 | +- cephadm.shell: |
| 85 | + host.a: |
| 86 | + - cmd: ceph smb share rm modusr1 share2 |
| 87 | + - cmd: ceph smb share rm modusr1 share1 |
| 88 | + - cmd: ceph smb cluster rm modusr1 |
| 89 | +# Wait for the smb service to be removed |
| 90 | +- cephadm.wait_for_service_not_present: |
| 91 | + service: smb.modusr1 |
0 commit comments