Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ name: CI
pull_request:
push:
branches:
- master
- '*'
schedule:
- cron: "0 4 * * 3"

Expand Down
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
*.retry
*/__pycache__
*.pyc
.ansible
.cache

278 changes: 276 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@ kubernetes_packages:
Kubernetes packages to be installed on the server. You can either provide a list of package names, or set `name` and `state` to have more control over whether the package is `present`, `absent`, `latest`, etc.

```yaml
kubernetes_version: '1.32'
kubernetes_version_rhel_package: '1.32'
kubernetes_version: '1.33'
kubernetes_version_rhel_package: '1.33'
```

The minor version of Kubernetes to install. The plain `kubernetes_version` is used to pin an apt package version on Debian, and as the Kubernetes version passed into the `kubeadm init` command (see `kubernetes_version_kubeadm`). The `kubernetes_version_rhel_package` variable must be a specific Kubernetes release, and is used to pin the version on Red Hat / CentOS servers.
Expand Down Expand Up @@ -213,6 +213,280 @@ Playbook:
- geerlingguy.kubernetes
```

### IPv6 only two or more nodes (single control-plane) cluster using containerd

Download default config file for flannel from https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml and store it in files/kube-flannel-config.yaml next to ansible scripts.

Update net-conf.json in property:

```yaml
---
kind: Namespace
apiVersion: v1
metadata:
name: kube-flannel
labels:
k8s-app: flannel
pod-security.kubernetes.io/enforce: privileged
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: flannel
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: flannel
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: flannel
name: flannel
namespace: kube-flannel
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-flannel
labels:
tier: node
k8s-app: flannel
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"EnableIPv4": false,
"EnableIPv6": true,
"IPv6Network": "fd76:cac2:6150::/56",
"IPv6Subnet": "fd76:cac2:6150::1/64",
"EnableNFTables": false,
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-flannel
labels:
tier: node
app: flannel
k8s-app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni-plugin
image: ghcr.io/flannel-io/flannel-cni-plugin:v1.7.1-flannel1
command:
- cp
args:
- -f
- /flannel
- /opt/cni/bin/flannel
volumeMounts:
- name: cni-plugin
mountPath: /opt/cni/bin
- name: install-cni
image: ghcr.io/flannel-io/flannel:v0.27.0
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: ghcr.io/flannel-io/flannel:v0.27.0
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: EVENT_QUEUE_DEPTH
value: "5000"
- name: CONT_WHEN_CACHE_NOT_READY
value: "false"
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: xtables-lock
mountPath: /run/xtables.lock
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni-plugin
hostPath:
path: /opt/cni/bin
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
```

Control plane inventory vars:

```yaml
kubernetes_role: "control_plane"
```

Node(s) inventory vars:

```yaml
kubernetes_role: "node"
```

Playbook:

```yaml

- hosts: all

vars:
kubernetes_flannel_manifest_file: '/etc/kubernetes/kube-flannel-config.yaml'

tasks:
- name: Create the directory for the kubernetes_config_file
ansible.builtin.file:
path: "{{ kubernetes_flannel_manifest_file | dirname }}"
state: directory

- name: Copy 'kube-flannel-config.yaml'
ansible.builtin.copy:
src: files/kube-flannel-config.yaml
dest: "{{ kubernetes_flannel_manifest_file }}"
owner: root
group: root
mode: u+rw

- hosts: all

vars:
containerd_config_cgroup_driver_systemd: true
kubernetes_allow_pods_on_control_plane: true
kubernetes_flannel_manifest_file: '/etc/kubernetes/kube-flannel-config.yaml'
kubernetes_pod_network:
cni: 'flannel'
cidr: 'fd76:cac2:6150::/56'
serviceCidr: 'fd76:cac2:6150:1::/112'

roles:
- geerlingguy.containerd
- geerlingguy.kubernetes
```

Then, log into the Kubernetes control plane, and run `kubectl get nodes` as root, and you should see a list of all the servers.

## License
Expand Down
5 changes: 4 additions & 1 deletion defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,11 @@ kubernetes_pod_network:
# Flannel CNI.
cni: 'flannel'
cidr: '10.244.0.0/16'
serviceCidr: '10.96.0.0/12'
# Calico CNI.
# cni: 'calico'
# cidr: '192.168.0.0/16'
# serviceCidr: '10.96.0.0/12'

kubernetes_kubeadm_kubelet_config_file_path: '/etc/kubernetes/kubeadm-kubelet-config.yaml'

Expand All @@ -39,7 +41,7 @@ kubernetes_config_kubelet_configuration:

kubernetes_config_init_configuration:
localAPIEndpoint:
advertiseAddress: "{{ kubernetes_apiserver_advertise_address | default(ansible_default_ipv4.address, true) }}"
advertiseAddress: "{{ kubernetes_apiserver_advertise_address | default(ansible_default_ipv4.address, true) | default(ansible_default_ipv6.address, true) }}"
# if you use the next lines, remove the command line argument below
# nodeRegistration:
# ignorePreflightErrors:
Expand All @@ -48,6 +50,7 @@ kubernetes_config_init_configuration:
kubernetes_config_cluster_configuration:
networking:
podSubnet: "{{ kubernetes_pod_network.cidr }}"
serviceSubnet: "{{ kubernetes_pod_network.serviceCidr }}"
kubernetesVersion: "{{ kubernetes_version_kubeadm }}"

kubernetes_config_kube_proxy_configuration: {}
Expand Down
1 change: 1 addition & 0 deletions molecule/default/calico.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
kubernetes_pod_network:
cni: 'calico'
cidr: '192.168.0.0/16'
serviceCidr: '10.96.0.0/12'

# Allow swap in test environments (hard to control in some envs).
kubernetes_config_kubelet_configuration:
Expand Down
13 changes: 12 additions & 1 deletion tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,18 @@
kubernetes_join_command: >
{{ kubernetes_join_command_result.stdout }}
{{ kubernetes_join_command_extra_opts }}
when: kubernetes_join_command_result.stdout is defined
when: (kubernetes_join_command_result.stdout is defined) and (kubernetes_ignore_preflight_errors is not defined)
delegate_to: "{{ item }}"
delegate_facts: true
with_items: "{{ groups['all'] }}"

- name: Set the kubeadm join command with ignore preflight errors globally.
set_fact:
kubernetes_join_command: >
{{ kubernetes_join_command_result.stdout }}
--ignore-preflight-errors={{ kubernetes_ignore_preflight_errors }}
{{ kubernetes_join_command_extra_opts }}
when: (kubernetes_join_command_result.stdout is defined) and (kubernetes_ignore_preflight_errors is defined)
delegate_to: "{{ item }}"
delegate_facts: true
with_items: "{{ groups['all'] }}"
Expand Down
9 changes: 9 additions & 0 deletions tasks/sysctl-setup.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,15 @@
ansible_distribution != 'Debian'
or ansible_distribution_major_version | int < 10

- name: Activate forwarding
sysctl:
name: "{{ item }}"
value: '1'
state: present
loop:
- net.ipv4.ip_forward
- net.ipv6.conf.all.forwarding

- name: Ensure module br_netfilter is loaded
modprobe:
name: br_netfilter
Expand Down
Loading