|
6 | 6 | include_role: |
7 | 7 | name: check-cluster-health |
8 | 8 |
|
9 | | -# Checking if worker node contains 500 Gb volume attached |
| 9 | +# Extract worker node name dynamically |
| 10 | +- name: Export kubeconfig |
| 11 | + shell: oc get nodes |
| 12 | + register: nodes |
| 13 | + changed_when: false |
| 14 | + |
| 15 | +- name: Show raw output from 'oc get nodes' |
| 16 | + debug: |
| 17 | + var: nodes.stdout_lines |
| 18 | + |
| 19 | +- name: Extract only worker node names |
| 20 | + set_fact: |
| 21 | + node_names: >- |
| 22 | + {{ nodes.stdout_lines[1:] |
| 23 | + | select('search', 'worker') |
| 24 | + | map('split') |
| 25 | + | map('first') |
| 26 | + | list }} |
| 27 | +
|
| 28 | +- name: Print extracted worker node names |
| 29 | + debug: |
| 30 | + var: node_names |
| 31 | + |
| 32 | +- name: Print each node being added to dynamic 'workers' group |
| 33 | + debug: |
| 34 | + msg: "Adding {{ item }} to workers group" |
| 35 | + loop: "{{ node_names }}" |
| 36 | + when: node_names is defined and node_names | length > 0 |
| 37 | + |
| 38 | +- name: Add worker nodes to dynamic 'workers' group |
| 39 | + add_host: |
| 40 | + name: "{{ item }}" |
| 41 | + groups: workers |
| 42 | + loop: "{{ node_names }}" |
| 43 | + when: node_names is defined and node_names | length > 0 |
| 44 | + |
| 45 | + |
10 | 46 | - name: Check if 500GB volume is attached to all worker nodes |
11 | 47 | shell: ssh -o StrictHostKeyChecking=no core@{{ item }} lsblk | grep 500 |
12 | 48 | register: volume_check |
13 | | - loop: "{{ groups['workers'] }}" |
14 | | - when: "'workers' in group_names and ansible_facts.devices is defined and ansible_facts.devices | selectattr('size', 'equalto', 500000000000) | list | length > 0" |
| 49 | + loop: "{{ groups['workers'] }}" |
| 50 | + when: upi_cluster |
| 51 | + ignore_errors: yes |
15 | 52 |
|
16 | 53 | - name: Fail if 500GB volume is not found on any worker node |
17 | 54 | fail: |
18 | | - msg: "500GB volume is not attached to the worker node {{ item }}. Please add a 500GB attached volume to each worker node and try again." |
19 | | - loop: "{{ groups['workers'] }}" |
| 55 | + msg: "500GB volume is not attached to the worker node {{ item.item }}. Please add a 500GB attached volume to each worker node and try again." |
| 56 | + loop: "{{ volume_check.results }}" |
20 | 57 | when: |
21 | | - - volume_check is not defined |
22 | | - - volume_check.rc != 0 |
| 58 | + - upi_cluster |
| 59 | + - item.rc is defined |
| 60 | + - item.rc != 0 |
| 61 | + |
| 62 | +- name: IPI Cluster (Verify 500gb Volume attached) |
| 63 | + when: not upi_cluster |
| 64 | + block: |
| 65 | + - name: Get cluster nodes |
| 66 | + set_fact: |
| 67 | + ipi_node_names: >- |
| 68 | + {{ nodes.stdout_lines[1:] | map('split') | map('first') | list }} |
| 69 | +
|
| 70 | + - name: Get worker node name |
| 71 | + set_fact: |
| 72 | + worker_node: "{{ ipi_node_names | select('search', 'worker') | list | first }}" |
| 73 | + |
| 74 | + - name: Verify 500GB volume on worker node |
| 75 | + shell: | |
| 76 | + oc debug node/{{ worker_node }} -- chroot /host lsblk | grep 500 || (echo "❌ Additional 500GB volume not found on worker node, please attach to all workers" && exit 1) |
| 77 | + register: lsblk_result |
| 78 | + changed_when: false |
| 79 | + |
| 80 | + - name: Show lsblk output from all nodes |
| 81 | + debug: |
| 82 | + var: lsblk_result.stdout_lines |
23 | 83 |
|
24 | 84 | - name: Check if lso_namespace is defined, and set default if not |
25 | 85 | set_fact: |
|
34 | 94 | kind: Namespace |
35 | 95 | metadata: |
36 | 96 | name: "{{ lso_namespace }}" |
37 | | - when: lso_namespace is defined |
| 97 | + when: lso_namespace is defined |
38 | 98 |
|
39 | 99 | # Create ICSP & catalogsource for installing local storage operator |
40 | 100 | - name: Local Storage Operator deployment |
|
93 | 153 | retries: 10 |
94 | 154 | delay: 30 |
95 | 155 | failed_when: |
96 | | - - ip_status.rc != 0 |
97 | | - - ip_status.stdout == "" |
| 156 | + - ip_status.rc != 0 |
| 157 | + - ip_status.stdout == "" |
98 | 158 |
|
99 | 159 | # Check the ClusterServiceVersion (CSV) for the LocalStorage Operator |
100 | 160 | - name: Check LocalStorage Operator CSV |
|
103 | 163 | until: csv_status.stdout != "" and csv_status.stderr == "" |
104 | 164 | retries: 10 |
105 | 165 | delay: 30 |
106 | | - failed_when: csv_status.rc != 0 |
| 166 | + failed_when: csv_status.rc != 0 |
107 | 167 | when: |
108 | 168 | - lso_namespace is defined |
109 | 169 |
|
|
112 | 172 | msg: "The LocalStorage Operator CSV is available." |
113 | 173 | when: "'local-storage-operator' in csv_status.stdout" |
114 | 174 |
|
| 175 | +- name: Set unified node_names for IPI |
| 176 | + when: not upi_cluster |
| 177 | + set_fact: |
| 178 | + node_names: "{{ ipi_node_names }}" |
| 179 | + |
115 | 180 | # Create LocalVolumeSet YAML content for local block volumes |
116 | 181 | - name: Create LocalVolumeSet YAML for local block volumes |
117 | 182 | kubernetes.core.k8s: |
|
138 | 203 | - matchExpressions: |
139 | 204 | - key: kubernetes.io/hostname |
140 | 205 | operator: In |
141 | | - values: |
142 | | - - worker-0 |
143 | | - - worker-1 |
144 | | - - worker-2 |
| 206 | + values: "{{ node_names | list }}" |
145 | 207 | storageClassName: localblock |
146 | 208 | volumeMode: Block |
147 | 209 |
|
|
152 | 214 | retries: 5 |
153 | 215 | delay: 10 |
154 | 216 | until: lvs_status.rc == 0 |
155 | | - failed_when: lvs_status.rc != 0 |
| 217 | + failed_when: lvs_status.rc != 0 |
156 | 218 |
|
157 | 219 | - name: Verify creation of diskmanager pods |
158 | | - shell: "oc get pods -n openshift-local-storage --selector=app=diskmaker-manager | grep Running" |
| 220 | + shell: | |
| 221 | + oc get pods -n openshift-local-storage --selector=app=diskmaker-manager \ |
| 222 | + --no-headers | awk '{print $3}' | grep -v Running | wc -l |
159 | 223 | register: check_pods |
160 | | - until: check_pods.stdout|int == 0 and check_pods.stderr == "" |
| 224 | + until: check_pods.stdout | int == 0 |
161 | 225 | retries: 10 |
162 | 226 | delay: 30 |
163 | | - failed_when: check_pods.rc != 0 |
| 227 | + failed_when: check_pods.rc != 0 and check_pods.stdout | int != 0 |
164 | 228 |
|
165 | 229 | - name: Verify the creation of StorageClass |
166 | 230 | command: oc get sc localblock |
|
170 | 234 | delay: 30 |
171 | 235 | failed_when: check_sc.rc != 0 |
172 | 236 |
|
173 | | -- name: Delete existing PersistentVolumeClaim and Pod (if they exist) |
174 | | - kubernetes.core.k8s: |
175 | | - state: absent |
176 | | - definition: |
177 | | - - kind: PersistentVolumeClaim |
178 | | - apiVersion: v1 |
179 | | - metadata: |
180 | | - name: test-local-claim |
181 | | - - apiVersion: v1 |
182 | | - kind: Pod |
183 | | - metadata: |
184 | | - name: busybox-lso |
185 | | - namespace: default |
186 | | - register: delete_pvc |
187 | | - changed_when: delete_pvc is changed |
188 | | - |
189 | | -# Testing localblock storage class |
190 | | -# Creating PVC |
191 | | -- name: Create PersistentVolumeClaim YAML |
192 | | - kubernetes.core.k8s: |
193 | | - definition: |
194 | | - - kind: PersistentVolumeClaim |
195 | | - apiVersion: v1 |
196 | | - metadata: |
197 | | - name: test-local-claim |
198 | | - namespace: default |
199 | | - spec: |
200 | | - accessModes: |
201 | | - - ReadWriteOnce |
202 | | - volumeMode: Block |
203 | | - resources: |
204 | | - requests: |
205 | | - storage: 100Gi |
206 | | - storageClassName: localblock |
207 | | - |
208 | | -# Creating POD for PersistentVolumeClaim |
209 | | -- name: Create POD for PersistentVolumeClaim |
210 | | - kubernetes.core.k8s: |
211 | | - definition: |
212 | | - - apiVersion: v1 |
213 | | - kind: Pod |
214 | | - metadata: |
215 | | - name: busybox-lso |
216 | | - namespace: default |
217 | | - spec: |
218 | | - volumes: |
219 | | - - name: local-data |
220 | | - persistentVolumeClaim: |
221 | | - claimName: test-local-claim |
222 | | - containers: |
223 | | - - name: busybox |
224 | | - image: quay.io/powercloud/busybox:ubi |
225 | | - command: ['sh', '-c', 'echo "Hello, Kubernetes!" && sleep 600'] |
226 | | - volumeDevices: |
227 | | - - devicePath: "{{ device_path }}" |
228 | | - name: local-data |
229 | | - |
230 | | -- name: Verify the status of Persistent Volumes |
231 | | - shell: "oc get pv --selector storage.openshift.com/owner-kind=LocalVolumeSet | grep Bound" |
232 | | - register: check_pv |
233 | | - until: check_pv.stdout|int == 0 and check_pv.stderr == "" |
234 | | - retries: 10 |
235 | | - delay: 30 |
236 | | - failed_when: check_pv.rc != 0 |
| 237 | +- name: Manage PVC and Pod for UPI cluster |
| 238 | + when: upi_cluster |
| 239 | + block: |
237 | 240 |
|
238 | | -- name: Verify PVC status |
239 | | - shell: "oc get pvc test-local-claim -n default | grep Bound" |
240 | | - register: pvc_status |
241 | | - until: pvc_status.stdout|int == 0 and pvc_status.stderr == "" |
242 | | - retries: 10 |
243 | | - delay: 30 |
244 | | - failed_when: pvc_status.rc != 0 |
| 241 | + - name: Delete existing PersistentVolumeClaim and Pod (if they exist) |
| 242 | + kubernetes.core.k8s: |
| 243 | + state: absent |
| 244 | + definition: |
| 245 | + - kind: PersistentVolumeClaim |
| 246 | + apiVersion: v1 |
| 247 | + metadata: |
| 248 | + name: test-local-claim |
| 249 | + - apiVersion: v1 |
| 250 | + kind: Pod |
| 251 | + metadata: |
| 252 | + name: busybox-lso |
| 253 | + namespace: default |
| 254 | + register: delete_pvc |
| 255 | + changed_when: delete_pvc is changed |
245 | 256 |
|
246 | | -- name: Verify Pod status |
247 | | - shell: "oc get pods busybox-lso -n default -o jsonpath='{.status.phase}'" |
248 | | - register: pod_status |
249 | | - until: pod_status.stdout|int == 0 and pod_status.stderr == "" |
250 | | - retries: 5 |
251 | | - delay: 10 |
252 | | - failed_when: pod_status.stdout != "Pending" |
| 257 | + - name: Create PersistentVolumeClaim YAML |
| 258 | + kubernetes.core.k8s: |
| 259 | + definition: |
| 260 | + - kind: PersistentVolumeClaim |
| 261 | + apiVersion: v1 |
| 262 | + metadata: |
| 263 | + name: test-local-claim |
| 264 | + namespace: default |
| 265 | + spec: |
| 266 | + accessModes: |
| 267 | + - ReadWriteOnce |
| 268 | + volumeMode: Block |
| 269 | + resources: |
| 270 | + requests: |
| 271 | + storage: 100Gi |
| 272 | + storageClassName: localblock |
| 273 | + |
| 274 | + - name: Create POD for PersistentVolumeClaim |
| 275 | + kubernetes.core.k8s: |
| 276 | + definition: |
| 277 | + - apiVersion: v1 |
| 278 | + kind: Pod |
| 279 | + metadata: |
| 280 | + name: busybox-lso |
| 281 | + namespace: default |
| 282 | + spec: |
| 283 | + volumes: |
| 284 | + - name: local-data |
| 285 | + persistentVolumeClaim: |
| 286 | + claimName: test-local-claim |
| 287 | + containers: |
| 288 | + - name: busybox |
| 289 | + image: "{{ busybox_image }}" |
| 290 | + command: ['sh', '-c', 'echo "Hello, Kubernetes!" && sleep 600'] |
| 291 | + volumeDevices: |
| 292 | + - devicePath: "{{ device_path }}" |
| 293 | + name: local-data |
| 294 | + |
| 295 | + - name: Verify the status of Persistent Volumes |
| 296 | + shell: "oc get pv --selector storage.openshift.com/owner-kind=LocalVolumeSet | grep Bound" |
| 297 | + register: check_pv |
| 298 | + until: check_pv.stdout|int == 0 and check_pv.stderr == "" |
| 299 | + retries: 10 |
| 300 | + delay: 30 |
| 301 | + failed_when: check_pv.rc != 0 |
| 302 | + |
| 303 | + - name: Verify PVC status |
| 304 | + shell: "oc get pvc test-local-claim -n default | grep Bound" |
| 305 | + register: pvc_status |
| 306 | + until: pvc_status.stdout|int == 0 and pvc_status.stderr == "" |
| 307 | + retries: 10 |
| 308 | + delay: 30 |
| 309 | + failed_when: pvc_status.rc != 0 |
| 310 | + |
| 311 | + - name: Verify Pod status |
| 312 | + shell: "oc get pods busybox-lso -n default -o jsonpath='{.status.phase}'" |
| 313 | + register: pod_status |
| 314 | + until: pod_status.stdout|int == 0 and pod_status.stderr == "" |
| 315 | + retries: 5 |
| 316 | + delay: 10 |
| 317 | + failed_when: pod_status.stdout != "Pending" |
0 commit comments