Skip to content

Commit 361f9f5

Browse files
author
Joshua Hemmings
committed
Merge branch 'main' of github.com:jLemmings/ansible-role-rke2
2 parents ea46fc8 + 768771c commit 361f9f5

File tree

10 files changed

+56
-27
lines changed

10 files changed

+56
-27
lines changed

README.md

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ rke2_kubevip_ipvs_lb_enable: false
9090
# Enable layer 4 load balancing for control plane using IPVS kernel module
9191
# Must use kube-vip version 0.4.0 or later
9292

93-
rke2_kubevip_service_election_enable: false
93+
rke2_kubevip_service_election_enable: true
9494
# By default ARP mode provides a HA implementation of a VIP (your service IP address) which will receive traffic on the kube-vip leader.
9595
# To circumvent this kube-vip has implemented a new function which is "leader election per service",
9696
# instead of one node becoming the leader for all services an election is held across all kube-vip instances and the leader from that election becomes the holder of that service. Ultimately,
@@ -316,6 +316,13 @@ rke2_debug: false
316316
# (Optional) Customize default kubelet arguments
317317
# rke2_kubelet_arg:
318318
# - "--system-reserved=cpu=100m,memory=100Mi"
319+
320+
# (Optional) Customize default kube-proxy arguments
321+
# rke2_kube_proxy_arg:
322+
# - "proxy-mode=ipvs"
323+
324+
# The value for the node-name configuration item
325+
rke2_node_name: "{{ inventory_hostname }}"
319326
```
320327
321328
## Inventory file example

defaults/main.yml

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ rke2_kubevip_ipvs_lb_enable: false
5151
# Enable layer 4 load balancing for control plane using IPVS kernel module
5252
# Must use kube-vip version 0.4.0 or later
5353

54-
rke2_kubevip_service_election_enable: false
54+
rke2_kubevip_service_election_enable: true
5555
# By default ARP mode provides a HA implementation of a VIP (your service IP address) which will receive traffic on the kube-vip leader.
5656
# To circumvent this kube-vip has implemented a new function which is "leader election per service",
5757
# instead of one node becoming the leader for all services an election is held across all kube-vip instances and the leader from that election becomes the holder of that service. Ultimately,
@@ -277,3 +277,10 @@ rke2_debug: false
277277
# (Optional) Customize default kubelet arguments
278278
# rke2_kubelet_arg:
279279
# - "--system-reserved=cpu=100m,memory=100Mi"
280+
281+
# (Optional) Customize default kube-proxy arguments
282+
# rke2_kube_proxy_arg:
283+
# - "proxy-mode=ipvs"
284+
285+
# The value for the node-name configuration item
286+
rke2_node_name: "{{ inventory_hostname }}"

tasks/change_config.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
- name: Restart RKE2 service on {{ inventory_hostname }}
1+
- name: Restart RKE2 service on {{ rke2_node_name }}
22
ansible.builtin.service:
33
name: "{{ rke2_service_name }}"
44
state: restarted

tasks/first_server.yml

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@
104104

105105
- name: Wait for the first server be ready - no CNI
106106
ansible.builtin.shell: |
107-
{{ rke2_data_path }}/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml get node "{{ inventory_hostname }}" -o jsonpath='{range .status.conditions[*]}{.message}{"\n"}{end}'
107+
{{ rke2_data_path }}/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml get node "{{ rke2_node_name }}" -o jsonpath='{range .status.conditions[*]}{.message}{"\n"}{end}'
108108
args:
109109
executable: /bin/bash
110110
changed_when: false
@@ -121,7 +121,7 @@
121121
- name: Wait for the first server be ready - with CNI
122122
ansible.builtin.shell: |
123123
set -o pipefail
124-
{{ rke2_data_path }}/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml get nodes | grep "{{ inventory_hostname }}"
124+
{{ rke2_data_path }}/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml get nodes | grep "{{ rke2_node_name }}"
125125
args:
126126
executable: /bin/bash
127127
changed_when: false
@@ -145,7 +145,9 @@
145145
- name: Set an Active Server variable
146146
ansible.builtin.set_fact:
147147
active_server: "{{ inventory_hostname }}"
148-
run_once: true
148+
delegate_to: "{{ item }}"
149+
delegate_facts: true
150+
loop: "{{ groups[rke2_cluster_group_name] }}"
149151

150152
- name: Get all nodes
151153
ansible.builtin.shell: |

tasks/remaining_nodes.yml

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -73,9 +73,9 @@
7373
retries: 100
7474
delay: 15
7575
loop: "{{ groups[rke2_cluster_group_name] }}"
76-
delegate_to: "{{ active_server | default(groups[rke2_servers_group_name].0) }}"
77-
run_once: true
78-
when: rke2_cni == 'none'
76+
when:
77+
- rke2_cni == 'none'
78+
- inventory_hostname == active_server or inventory_hostname == groups[rke2_servers_group_name].0
7979

8080
- name: Wait for remaining nodes to be ready - with CNI
8181
ansible.builtin.shell: |
@@ -89,6 +89,6 @@
8989
"groups[rke2_cluster_group_name] | length == all_ready_nodes.stdout | int"
9090
retries: 100
9191
delay: 15
92-
delegate_to: "{{ active_server | default(groups[rke2_servers_group_name].0) }}"
93-
run_once: true
94-
when: rke2_cni != 'none'
92+
when:
93+
- rke2_cni != 'none'
94+
- inventory_hostname == active_server or inventory_hostname == groups[rke2_servers_group_name].0

tasks/rolling_restart.yml

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
11
---
22

3-
- name: Cordon and Drain the node {{ inventory_hostname }}
3+
- name: Cordon and Drain the node {{ rke2_node_name }}
44
ansible.builtin.shell: |
55
set -o pipefail
66
{{ rke2_data_path }}/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml \
7-
cordon "{{ inventory_hostname }}" && \
7+
cordon "{{ rke2_node_name }}" && \
88
{{ rke2_data_path }}/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml \
9-
drain "{{ inventory_hostname }}" --ignore-daemonsets --delete-emptydir-data
9+
drain "{{ rke2_node_name }}" --ignore-daemonsets --delete-emptydir-data
1010
args:
1111
executable: /bin/bash
1212
register: drain
@@ -19,7 +19,7 @@
1919
run_once: true
2020
when: rke2_drain_node_during_upgrade
2121

22-
- name: Restart RKE2 service on {{ inventory_hostname }}
22+
- name: Restart RKE2 service on {{ rke2_node_name }}
2323
ansible.builtin.service:
2424
name: "{{ rke2_service_name }}"
2525
state: restarted
@@ -40,11 +40,11 @@
4040
delegate_to: "{{ active_server | default(groups[rke2_servers_group_name].0) }}"
4141
run_once: true
4242

43-
- name: Uncordon the node {{ inventory_hostname }}
43+
- name: Uncordon the node {{ rke2_node_name }}
4444
ansible.builtin.shell: |
4545
set -o pipefail
4646
{{ rke2_data_path }}/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml \
47-
uncordon "{{ inventory_hostname }}"
47+
uncordon "{{ rke2_node_name }}"
4848
args:
4949
executable: /bin/bash
5050
changed_when: false

tasks/standalone.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@
3838

3939
- name: Wait for the first server be ready - no CNI
4040
ansible.builtin.shell: |
41-
{{ rke2_data_path }}/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml get node "{{ inventory_hostname }}" -o jsonpath='{range .status.conditions[*]}{.message}{"\n"}{end}'
41+
{{ rke2_data_path }}/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml get node "{{ rke2_node_name }}" -o jsonpath='{range .status.conditions[*]}{.message}{"\n"}{end}'
4242
args:
4343
executable: /bin/bash
4444
changed_when: false
@@ -55,7 +55,7 @@
5555
- name: Wait for the first server be ready - with CNI
5656
ansible.builtin.shell: |
5757
set -o pipefail
58-
{{ rke2_data_path }}/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml get nodes | grep "{{ inventory_hostname }}"
58+
{{ rke2_data_path }}/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml get nodes | grep "{{ rke2_node_name }}"
5959
args:
6060
executable: /bin/bash
6161
changed_when: false

tasks/summary.yml

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,9 @@
55
src: /etc/rancher/rke2/rke2.yaml
66
dest: "{{ rke2_download_kubeconf_path }}/{{ rke2_download_kubeconf_file_name }}"
77
flat: yes
8-
delegate_to: "{{ groups[rke2_servers_group_name].0 }}"
9-
run_once: true
108
when:
119
- rke2_download_kubeconf | bool
10+
- inventory_hostname == groups[rke2_servers_group_name].0
1211

1312
- name: Replace loopback IP by master server IP
1413
ansible.builtin.replace:
@@ -31,11 +30,9 @@
3130
args:
3231
executable: /bin/bash
3332
changed_when: false
34-
run_once: true
3533
retries: 5
3634
register: nodes_summary
3735

3836
- name: K8s nodes state
3937
ansible.builtin.debug:
4038
var: nodes_summary.stdout_lines
41-
run_once: true

templates/config.yaml.j2

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ node-label:
3939
{% endfor %}
4040
{% endif %}
4141
snapshotter: {{ rke2_snapshooter }}
42-
node-name: {{ inventory_hostname }}
42+
node-name: {{ rke2_node_name }}
4343
{% if ( disable_kube_proxy | bool ) %}
4444
disable-kube-proxy: true
4545
{% endif %}
@@ -77,6 +77,12 @@ kubelet-arg:
7777
- {{ argument }}
7878
{% endfor %}
7979
{% endif %}
80+
{% if ( rke2_kube_proxy_arg is defined ) %}
81+
kube-proxy-arg:
82+
{% for argument in rke2_kube_proxy_arg %}
83+
- {{ argument }}
84+
{% endfor %}
85+
{% endif %}
8086
{% if (rke2_disable_cloud_controller | bool ) %}
8187
disable-cloud-controller: true
8288
cloud-provider-name: "{{ rke2_cloud_provider_name }}"

templates/kube-vip/kube-vip.yml.j2

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -49,8 +49,6 @@ spec:
4949
- name: svc_enable
5050
value: "{{ rke2_kubevip_svc_enable }}"
5151
- name: svc_election
52-
value: "true"
53-
- name: enableServicesElection
5452
value: "{{ rke2_kubevip_service_election_enable }}"
5553
- name: vip_leaderelection
5654
value: "true"
@@ -83,11 +81,23 @@ spec:
8381
add:
8482
- NET_ADMIN
8583
- NET_RAW
84+
volumeMounts:
85+
- mountPath: /etc/kubernetes/admin.conf
86+
name: kubeconfig
87+
hostAliases:
88+
- hostnames:
89+
- kubernetes
90+
ip: 127.0.0.1
8691
hostNetwork: true
8792
serviceAccountName: kube-vip
8893
tolerations:
8994
- effect: NoSchedule
9095
operator: Exists
9196
- effect: NoExecute
9297
operator: Exists
98+
volumes:
99+
- hostPath:
100+
path: /etc/rancher/rke2/rke2.yaml
101+
type: File
102+
name: kubeconfig
93103
updateStrategy: {}

0 commit comments

Comments
 (0)