Skip to content

Commit 58006ae

Browse files
robo-caphyder
authored andcommitted
Upgrade cilium extension to 1.16
1 parent 98b246f commit 58006ae

File tree

10 files changed

+161
-67
lines changed

10 files changed

+161
-67
lines changed

docs/src/guide/cluster_addons.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# Cluster Add-ons
22

3-
With this module to manage both essential and optional add-ons on enhanced OKE clusters.
3+
With this module to manage both essential and optional add-ons on **enhanced** OKE clusters.
44

55
This module provides the option to remove [Essential addons](https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contengintroducingclusteraddons.htm#contengintroducingclusteraddons__section-essential-addons) and to manage, both essential & [optional addons](https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contengintroducingclusteraddons.htm#contengintroducingclusteraddons__section-optional-addons).
66

docs/src/guide/extensions_networking.md

Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,91 @@
2323
{{#include ../../../examples/extensions/vars-extensions-cilium.auto.tfvars:4:}}
2424
```
2525

26+
Cillium is a eBPF based CNI for Kubernetes that can be configured on OKE clusters.
27+
28+
The OKE cluster should be initially configured to run **flannel**.
29+
30+
On **enhanced** clusters we can use the cluster-addons module to remove flannel extension and kube-proxy (Optional) at cluster creation.
31+
32+
33+
```
34+
cluster_addons_to_remove = {
35+
Flannel = {
36+
remove_k8s_resources = true
37+
},
38+
KubeProxy = {
39+
remove_k8s_resources = true
40+
}
41+
}
42+
```
43+
44+
If you want to use cilium as [kube-proxy replacement](https://docs.cilium.io/en/stable/network/kubernetes/kubeproxy-free/), you can use the following helm_values:
45+
46+
```
47+
cilium_helm_values = {
48+
kubeProxyReplacement = true
49+
}
50+
```
51+
52+
For the basic clusters you can add the following label to the worker nodes to prevent flannel pods from being scheduled:
53+
54+
```
55+
oci.oraclecloud.com/custom-k8s-networking=true
56+
```
57+
58+
If you want to override and of the default values(listed below) you can use the `cilium_helm_values` variable:
59+
60+
```
61+
"annotateK8sNode": true
62+
"cluster":
63+
"id": 1
64+
"name": "oke-${var.state_id}"
65+
"clustermesh":
66+
"apiserver":
67+
"kvstoremesh":
68+
"enabled": false
69+
"useAPIServer": false
70+
"cni":
71+
"exclusive": true
72+
"install": true
73+
"hubble":
74+
"metrics":
75+
"dashboards":
76+
"enabled": false
77+
"relay":
78+
"enabled": true
79+
"ui":
80+
"enabled": true
81+
"installNoConntrackIptablesRules": false
82+
"ipam":
83+
"mode": "kubernetes"
84+
"k8s":
85+
"requireIPv4PodCIDR": true
86+
"k8sServiceHost": "${var.cluster_private_endpoint}"
87+
"k8sServicePort": "6443"
88+
"kubeProxyReplacement": false
89+
"operator":
90+
"prometheus":
91+
"enabled": false
92+
"pmtuDiscovery":
93+
"enabled": true
94+
"rollOutCiliumPods": true
95+
"tunnelProtocol": "vxlan"
96+
```
97+
98+
99+
**Notes:**
100+
1. Tested with OKE version `v1.29.1` and the worker nodes running: `Oracle-Linux-8.9-2024.05.29-0-OKE-1.29.1-707`.
101+
102+
2. In case the `hubble-relay` and `hubble-ui` pods fail to start, run the following commands:
103+
104+
```
105+
kubectl delete pod --namespace kube-system -l k8s-app=kube-dns
106+
kubectl delete pod --namespace kube-system -l k8s-app=hubble-relay
107+
kubectl delete pod --namespace kube-system -l k8s-app=hubble-ui
108+
kubectl delete pod --namespace kube-system -l k8s-app=kube-dns-autoscaler
109+
```
110+
26111
### References
27112
* [cilium.io](https://cilium.io)
28113

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
1-
# Copyright (c) 2017, 2023 Oracle Corporation and/or its affiliates.
1+
# Copyright (c) 2017, 2024 Oracle Corporation and/or its affiliates.
22
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl
33

44
cilium_install = true
55
cilium_reapply = false
6-
cilium_namespace = "network"
7-
cilium_helm_version = "45.2.0"
6+
cilium_namespace = "kube-system"
7+
cilium_helm_version = "1.16.3"
88
cilium_helm_values = {}
99
cilium_helm_values_files = []

module-extensions.tf

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,13 @@
11
# Copyright (c) 2017, 2023 Oracle Corporation and/or its affiliates.
22
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl
33

4+
locals {
5+
cluster_private_endpoint = ( var.create_cluster ?
6+
coalesce(split(":", lookup(one(module.cluster[*].endpoints), "private_endpoint", ""))...) :
7+
coalesce(split(":", lookup(local.existing_cluster_endpoints, "private_endpoint", ""))...)
8+
)
9+
}
10+
411
module "extensions" {
512
source = "./modules/extensions"
613
depends_on = [ module.network ]
@@ -12,6 +19,7 @@ module "extensions" {
1219
kubernetes_version = var.kubernetes_version
1320
expected_node_count = local.worker_count_expected
1421
worker_pools = one(module.workers[*].worker_pools)
22+
cluster_private_endpoint = local.cluster_private_endpoint
1523

1624
# Bastion/operator connection
1725
ssh_private_key = sensitive(local.ssh_private_key)

module-workers.tf

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ module "workers" {
5858
image_type = var.worker_image_type
5959
kubeproxy_mode = var.kubeproxy_mode
6060
max_pods_per_node = var.max_pods_per_node
61-
node_labels = var.worker_node_labels
61+
node_labels = alltrue([var.cluster_type == "basic", var.cilium_install == true]) ? merge(var.worker_node_labels, {"oci.oraclecloud.com/custom-k8s-networking" = true}) : var.worker_node_labels
6262
node_metadata = var.worker_node_metadata
6363
agent_config = var.agent_config
6464
platform_config = var.platform_config

modules/extensions/cilium.tf

Lines changed: 57 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -2,45 +2,47 @@
22
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl
33

44
locals {
5-
cilium_helm_crds_file = join("/", [local.yaml_manifest_path, "cilium.crds.yaml"])
6-
cilium_helm_manifest_file = join("/", [local.yaml_manifest_path, "cilium.manifest.yaml"])
7-
cilium_helm_values_file = join("/", [local.yaml_manifest_path, "cilium.values.yaml"])
8-
cilium_net_attach_def_file = join("/", [local.yaml_manifest_path, "cilium.net_attach_def.yaml"])
9-
cilium_veth_config_map_file = join("/", [local.yaml_manifest_path, "cilium.cni_config_map.yaml"])
10-
11-
cilium_helm_crds = one(data.helm_template.cilium[*].crds)
12-
cilium_helm_manifest = one(data.helm_template.cilium[*].manifest)
5+
cilium_helm_crds_file = join("/", [local.yaml_manifest_path, "cilium.crds.yaml"])
6+
cilium_helm_manifest_file = join("/", [local.yaml_manifest_path, "cilium.manifest.yaml"])
7+
cilium_helm_values_file = join("/", [local.yaml_manifest_path, "cilium.values.yaml"])
8+
cilium_helm_values_override_file = join("/", [local.yaml_manifest_path, "cilium.values-override.yaml"])
9+
cilium_net_attach_def_file = join("/", [local.yaml_manifest_path, "cilium.net_attach_def.yaml"])
10+
cilium_veth_config_map_file = join("/", [local.yaml_manifest_path, "cilium.cni_config_map.yaml"])
11+
12+
cilium_helm_crds = one(data.helm_template.cilium[*].crds)
13+
cilium_helm_values_override = one(data.helm_template.cilium[*].values)
14+
15+
cilium_helm_repository = "https://helm.cilium.io"
1316

1417
cilium_vxlan_cni = {
1518
install = true
16-
chainingMode = "none"
1719
exclusive = true # !var.multus_install
1820
}
1921

20-
# TODO Support Flannel w/ generic-veth & tunnel disabled
21-
cilium_tunnel = "vxlan" # var.cni_type == "flannel" ? "disabled" : "vxlan"
22-
cilium_flannel_cni = {
23-
install = true
24-
chainingMode = "generic-veth"
25-
configMap = "cni-configuration"
26-
customConf = var.cni_type == "flannel"
27-
exclusive = !var.multus_install
28-
}
29-
3022
cilium_helm_values = {
3123
annotateK8sNode = true
32-
cluster = { name = "oke-${var.state_id}" }
33-
clustermesh = { useAPIServer = true }
24+
cluster = {
25+
name = "oke-${var.state_id}"
26+
id = 1
27+
}
28+
clustermesh = {
29+
useAPIServer = false
30+
apiserver = {
31+
kvstoremesh = {
32+
enabled = false
33+
}
34+
}
35+
}
3436
cni = local.cilium_vxlan_cni
35-
containerRuntime = { integration = "crio" }
36-
installIptablesRules = true
3737
installNoConntrackIptablesRules = false
3838
ipam = { mode = "kubernetes" }
39-
ipv4NativeRoutingCIDR = element(var.vcn_cidrs, 0)
40-
kubeProxyReplacement = "disabled"
39+
kubeProxyReplacement = false
40+
k8sServiceHost = var.cluster_private_endpoint
41+
k8sServicePort = "6443"
4142
pmtuDiscovery = { enabled = true }
42-
tunnel = local.cilium_tunnel
43-
43+
rollOutCiliumPods = true
44+
tunnelProtocol = local.cilium_tunnel
45+
4446
hubble = {
4547
metrics = {
4648
dashboards = { enabled = var.prometheus_install }
@@ -52,19 +54,9 @@ locals {
5254

5355
k8s = {
5456
requireIPv4PodCIDR = true # wait for Kubernetes to provide the PodCIDR (ipam kubernetes)
55-
enableIPv4Masquerade = true # var.cni_type != "flannel" # masquerade IPv4 traffic leaving the node from endpoints
5657
}
5758

5859
# Prometheus metrics
59-
metrics = {
60-
dashboards = { enabled = var.prometheus_install }
61-
# # serviceMonitor = { enabled = var.prometheus_enabled }
62-
}
63-
64-
prometheus = {
65-
enabled = var.prometheus_install
66-
# serviceMonitor = { enabled = var.prometheus_enabled }
67-
}
6860

6961
operator = {
7062
prometheus = {
@@ -74,6 +66,17 @@ locals {
7466
}
7567
}
7668

69+
# TODO Support Flannel w/ generic-veth & tunnel disabled
70+
cilium_tunnel = "vxlan" # var.cni_type == "flannel" ? "disabled" : "vxlan"
71+
72+
cilium_flannel_cni = {
73+
install = true
74+
chainingMode = "generic-veth"
75+
configMap = "cni-configuration"
76+
customConf = var.cni_type == "flannel"
77+
exclusive = !var.multus_install
78+
}
79+
7780
cilium_net_attach_def_conf = {
7881
cniVersion = "0.3.1"
7982
name = "cilium"
@@ -126,15 +129,16 @@ locals {
126129
data = { "cni-config" = jsonencode(local.cilium_veth_conf) }
127130
}
128131

129-
cilium_net_attach_def_yaml = yamlencode(local.cilium_net_attach_def)
130-
cilium_veth_config_map_yaml = yamlencode(local.cilium_veth_config_map)
131-
cilium_helm_values_yaml = yamlencode(local.cilium_helm_values)
132+
cilium_net_attach_def_yaml = yamlencode(local.cilium_net_attach_def)
133+
cilium_veth_config_map_yaml = yamlencode(local.cilium_veth_config_map)
134+
cilium_helm_values_yaml = yamlencode(merge(local.cilium_helm_values, var.cilium_helm_values))
135+
cilium_helm_values_override_yaml = local.cilium_helm_values_override != null ? join("\n", local.cilium_helm_values_override) : ""
132136
}
133137

134138
data "helm_template" "cilium" {
135139
count = var.cilium_install ? 1 : 0
136140
chart = "cilium"
137-
repository = "https://helm.cilium.io"
141+
repository = local.cilium_helm_repository
138142
version = var.cilium_helm_version
139143
kube_version = var.kubernetes_version
140144

@@ -165,7 +169,7 @@ resource "null_resource" "cilium" {
165169
triggers = {
166170
helm_version = var.cilium_helm_version
167171
crds_md5 = try(md5(join("\n", local.cilium_helm_crds)), null)
168-
manifest_md5 = try(md5(local.cilium_helm_manifest), null)
172+
manifest_md5 = try(md5(local.cilium_helm_values_override_yaml), null)
169173
reapply = var.cilium_reapply ? uuid() : null
170174
}
171175

@@ -190,24 +194,19 @@ resource "null_resource" "cilium" {
190194
}
191195

192196
provisioner "file" {
193-
content = local.cilium_helm_manifest
194-
destination = local.cilium_helm_manifest_file
195-
}
196-
197-
provisioner "file" {
198-
content = local.cilium_helm_values_yaml
199-
destination = local.cilium_helm_values_file
197+
content = local.cilium_helm_values_override_yaml
198+
destination = local.cilium_helm_values_override_file
200199
}
201200

202-
provisioner "file" {
203-
content = local.cilium_net_attach_def_yaml
204-
destination = local.cilium_net_attach_def_file
205-
}
201+
# provisioner "file" {
202+
# content = local.cilium_net_attach_def_yaml
203+
# destination = local.cilium_net_attach_def_file
204+
# }
206205

207-
provisioner "file" {
208-
content = local.cilium_veth_config_map_yaml
209-
destination = local.cilium_veth_config_map_file
210-
}
206+
# provisioner "file" {
207+
# content = local.cilium_veth_config_map_yaml
208+
# destination = local.cilium_veth_config_map_file
209+
# }
211210

212211
provisioner "remote-exec" {
213212
inline = [for c in compact([
@@ -219,7 +218,7 @@ resource "null_resource" "cilium" {
219218
format(local.kubectl_apply_server_ns_file, var.cilium_namespace, local.cilium_helm_crds_file),
220219

221220
# Install full manifest
222-
format(local.kubectl_apply_ns_file, var.cilium_namespace, local.cilium_helm_manifest_file),
221+
format(local.helm_upgrade_install, "cilium", "cilium", local.cilium_helm_repository, var.cilium_helm_version, var.cilium_namespace, local.cilium_helm_values_override_file),
223222

224223
# Install Network Attachment Definition when Multus is enabled
225224
# var.multus_install ? format(local.kubectl_apply_file, local.cilium_net_attach_def_file) : null,

modules/extensions/locals.tf

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,11 +4,13 @@
44
locals {
55
yaml_manifest_path = "/home/${var.operator_user}/yaml"
66
kubectl = "set -o pipefail; kubectl"
7+
helm = "set -o pipefail; helm"
78
kubectl_apply_ns_file = "${local.kubectl} apply -n %s -f %s"
89
kubectl_apply_file = "${local.kubectl} apply -f %s"
910
kubectl_apply_server_file = "${local.kubectl} apply --force-conflicts=true --server-side -f %s"
1011
kubectl_apply_server_ns_file = "${local.kubectl} apply -n %s --force-conflicts=true --server-side -f %s"
1112
kubectl_create_missing_ns = "${local.kubectl} create ns %s --dry-run=client -o yaml | kubectl apply -f -"
1213
selector_linux = { "kubernetes.io/os" = "linux" }
1314
output_log = "bash -c \"%s | tee >(systemd-cat -t %s -p info)\""
15+
helm_upgrade_install = "${local.helm} upgrade --install %s %s --repo %s --version %s --namespace %s --create-namespace --skip-crds -f %s"
1416
}

modules/extensions/variables.tf

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ variable "state_id" { type = string }
77
variable "worker_pools" { type = any }
88
variable "kubernetes_version" { type = string }
99
variable "expected_node_count" { type = number }
10+
variable "cluster_private_endpoint" { type = string }
1011

1112
# Connection
1213
variable "bastion_host" { type = string }
@@ -25,7 +26,7 @@ variable "cilium_install" { type = bool }
2526
variable "cilium_reapply" { type = bool }
2627
variable "cilium_namespace" { type = string }
2728
variable "cilium_helm_version" { type = string }
28-
variable "cilium_helm_values" { type = map(string) }
29+
variable "cilium_helm_values" { type = any }
2930
variable "cilium_helm_values_files" { type = list(string) }
3031

3132
# CNI: Multus

modules/workers/locals.tf

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,6 @@ locals {
157157
{
158158
"oke.oraclecloud.com/tf.module" = "terraform-oci-oke"
159159
"oke.oraclecloud.com/tf.state_id" = var.state_id
160-
# "oke.oraclecloud.com/tf.workspace" = terraform.workspace
161160
"oke.oraclecloud.com/pool.name" = pool_name
162161
"oke.oraclecloud.com/pool.mode" = pool.mode
163162
"oke.oraclecloud.com/cluster_autoscaler" = pool.allow_autoscaler ? "allowed" : "disabled"

variables-extensions.tf

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,15 +22,15 @@ variable "cilium_namespace" {
2222
}
2323

2424
variable "cilium_helm_version" {
25-
default = "1.14.4"
25+
default = "1.16.3"
2626
description = "Version of the Helm chart to install. List available releases using `helm search repo [keyword] --versions`."
2727
type = string
2828
}
2929

3030
variable "cilium_helm_values" {
3131
default = {}
3232
description = "Map of individual Helm chart values. See https://registry.terraform.io/providers/hashicorp/helm/latest/docs/data-sources/template."
33-
type = map(string)
33+
type = any
3434
}
3535

3636
variable "cilium_helm_values_files" {

0 commit comments

Comments
 (0)