|
45 | 45 | regexp: "https://127.0.0.1:6443" |
46 | 46 | replace: "https://{{ k3s_registration_address }}:6443" |
47 | 47 |
|
48 | | - - name: Resource Readiness Check |
| 48 | + # Unmanaging and removing the Cilium HelmChart is required for |
| 49 | + # flux to take over managing the lifecycle of Cilium |
| 50 | + |
| 51 | + - name: Post installation of custom manifests tasks |
49 | 52 | run_once: true |
50 | | - kubernetes.core.k8s_info: |
51 | | - kubeconfig: /etc/rancher/k3s/k3s.yaml |
52 | | - kind: "{{ item.kind }}" |
53 | | - name: "{{ item.name }}" |
54 | | - namespace: "{{ item.namespace | default('') }}" |
55 | | - wait: true |
56 | | - wait_sleep: 10 |
57 | | - wait_timeout: 360 |
58 | | - loop: |
59 | | - - kind: Deployment |
60 | | - name: tigera-operator |
61 | | - namespace: tigera-operator |
62 | | - - kind: DaemonSet |
63 | | - name: kube-vip |
64 | | - namespace: kube-system |
65 | | - - kind: Installation |
66 | | - name: default |
67 | | - - kind: CustomResourceDefinition |
68 | | - name: alertmanagerconfigs.monitoring.coreos.com |
69 | | - - kind: CustomResourceDefinition |
70 | | - name: alertmanagers.monitoring.coreos.com |
71 | | - - kind: CustomResourceDefinition |
72 | | - name: podmonitors.monitoring.coreos.com |
73 | | - - kind: CustomResourceDefinition |
74 | | - name: probes.monitoring.coreos.com |
75 | | - - kind: CustomResourceDefinition |
76 | | - name: prometheuses.monitoring.coreos.com |
77 | | - - kind: CustomResourceDefinition |
78 | | - name: prometheusrules.monitoring.coreos.com |
79 | | - - kind: CustomResourceDefinition |
80 | | - name: servicemonitors.monitoring.coreos.com |
81 | | - - kind: CustomResourceDefinition |
82 | | - name: thanosrulers.monitoring.coreos.com |
83 | | - - kind: CustomResourceDefinition |
84 | | - name: scrapeconfigs.monitoring.coreos.com |
85 | | - - kind: CustomResourceDefinition |
86 | | - name: prometheusagents.monitoring.coreos.com |
87 | 53 | when: |
88 | 54 | - k3s_server_manifests_templates | length > 0 |
89 | 55 | or k3s_server_manifests_urls | length > 0 |
90 | 56 | - k3s_control_node is defined |
91 | 57 | - k3s_control_node |
| 58 | + block: |
| 59 | + - name: Wait for custom manifests to rollout |
| 60 | + kubernetes.core.k8s_info: |
| 61 | + kubeconfig: /etc/rancher/k3s/k3s.yaml |
| 62 | + kind: "{{ item.kind }}" |
| 63 | + name: "{{ item.name }}" |
| 64 | + namespace: "{{ item.namespace | default('') }}" |
| 65 | + wait: true |
| 66 | + wait_sleep: 10 |
| 67 | + wait_timeout: 360 |
| 68 | + loop: |
| 69 | + - name: cilium |
| 70 | + kind: HelmChart |
| 71 | + namespace: kube-system |
| 72 | + - name: podmonitors.monitoring.coreos.com |
| 73 | + kind: CustomResourceDefinition |
| 74 | + - name: prometheusrules.monitoring.coreos.com |
| 75 | + kind: CustomResourceDefinition |
| 76 | + - name: servicemonitors.monitoring.coreos.com |
| 77 | + kind: CustomResourceDefinition |
| 78 | + - name: Wait for Cilium to rollout |
| 79 | + kubernetes.core.k8s_info: |
| 80 | + kubeconfig: /etc/rancher/k3s/k3s.yaml |
| 81 | + kind: Job |
| 82 | + name: helm-install-cilium |
| 83 | + namespace: kube-system |
| 84 | + wait: true |
| 85 | + wait_condition: |
| 86 | + type: Complete |
| 87 | + status: true |
| 88 | + wait_timeout: 360 |
| 89 | + - name: Patch the Cilium HelmChart to unmanage it |
| 90 | + kubernetes.core.k8s_json_patch: |
| 91 | + kubeconfig: /etc/rancher/k3s/k3s.yaml |
| 92 | + name: cilium |
| 93 | + kind: HelmChart |
| 94 | + namespace: kube-system |
| 95 | + patch: |
| 96 | + - op: add |
| 97 | + path: /metadata/annotations/helmcharts.helm.cattle.io~1unmanaged |
| 98 | + value: "true" |
| 99 | + - name: Remove the Cilium HelmChart CR |
| 100 | + kubernetes.core.k8s: |
| 101 | + kubeconfig: /etc/rancher/k3s/k3s.yaml |
| 102 | + name: cilium |
| 103 | + kind: HelmChart |
| 104 | + namespace: kube-system |
| 105 | + state: absent |
92 | 106 |
|
93 | 107 | # NOTE |
94 | | - # Cleaning up the manifests from the /var/lib/rancher/k3s/server/manifests directory |
| 108 | + # Cleaning up certain manifests from the /var/lib/rancher/k3s/server/manifests directory |
95 | 109 | # is needed because k3s has an awesome "feature" to always re-deploy them when the k3s |
96 | 110 | # service is restarted. Removing them does not uninstall the manifests from your cluster. |
97 | 111 |
|
|
0 commit comments