-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathcilium-values.yaml
More file actions
96 lines (91 loc) · 3.38 KB
/
cilium-values.yaml
File metadata and controls
96 lines (91 loc) · 3.38 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
# helm repo add cilium https://helm.cilium.io/
# helm pull cilium/cilium --version 1.18.5
# kubectl delete node $(kubectl get nodes | grep NotReady | awk '{print $1;}')
# k delete ciliumnodes --all
# k api-resources | grep -i cilium | cut -d' ' -f1 | xargs -I% kubectl delete % --all
# ./label.sh
# cat nodes.txt | xargs -I% bash -c 'kubectl annotate node % cilium.io/bgp-virtual-router.65551="router-id=192.168.255.$(shuf -i 0-255 -n 1)" --overwrite'
# kgn -o custom-columns=Node:.metadata.name,Id:".metadata.annotations.cilium\.io/bgp-virtual-router\.65551"
# k apply -f ./cilium-bgp.yaml
# helm upgrade --install cilium ./cilium-1.18.5.tgz --namespace kube-system --values cilium-values.yaml
#
# helm upgrade --install cilium cilium/cilium --namespace kube-system --values cilium-values.yaml --version 1.18.5
cluster:
name: kubernetes
# No kube-proxy
kubeProxyReplacement: true
# Enables healthz endpoint
kubeProxyReplacementHealthzBindAddr: "[::]:10256"
# Required to bypass the non-working default APIserver service without kube-proxy
k8sServiceHost: fat-controller.systems.richtman.au
k8sServicePort: 6443
# Set our networking stack
ipv4:
enabled: false
ipv6:
enabled: true
# -- Roll out cilium agent pods automatically when configmap is updated.
rollOutCiliumPods: true
l2NeighDiscovery:
enabled: true
# -- This feature set enables virtual BGP routers to be created via
# CiliumBGPPeeringPolicy CRDs.
bgpControlPlane:
# -- Enables the BGP control plane.
enabled: true
# This makes kubectl logs and execs default nicely
podAnnotations:
kubectl.kubernetes.io/default-container: cilium-agent
# Could not modify IPv6 gro_max_size and gso_max_size
enableIPv6BIGTCP: false
enableIPv6Masquerade: true
# Disabling for now as it fails tests
l7Proxy: false
# Ref: https://docs.cilium.io/en/latest/network/concepts/masquerading/#ebpf-based
bpf:
# Use BPF instead of iptables masquerading
# Masquerading is off for anything in the IPv6 native routing CIDR though
# which means it's not SNATing pod outbound traffic.
masquerade: true
# Allow cluster external access to ClusterIP services.
lbExternalClusterIP: true
# TODO: explore
# dataPathMode: "netkit-l2"
autoDirectNodeRoutes: true
# WIP: maybe our ticket
# directRoutingSkipUnreachable: true
# Put cilium info on the k8s node resource - why not?
annotateK8sNode: true
# https://github.com/cilium/cilium/issues/21538
# https://github.com/cilium/cilium/issues/35822
# https://github.com/cilium/cilium/issues/17240
routingMode: native
externalIPs:
enabled: true
# ipv6NativeRoutingCIDR: 2403:581e:ab78::/48
ipv6NativeRoutingCIDR: 2403:581e:ab78::/64
ipam:
# mode: "cluster-pool" # Should be unnecessary
operator:
# Unable to init cluster-pool allocator" error="unable to initialize IPv6 allocator: New CIDR set failed; the node CIDR size is too big" subsys=cilium-operator-generic
# Ref: https://github.com/cilium/cilium/issues/20756
# clusterPoolIPv6PodCIDRList: ["2403:581e:ab78::/64"]
clusterPoolIPv6PodCIDRList: ["2403:581e:ab78:1::/64"]
# Ref: https://github.com/cilium/cilium/issues/20756#issuecomment-2450328186
clusterPoolIPv6MaskSize: 80
#region Development
envoy:
enabled: false
# Fully uninstall
# cni:
# uninstall: true
# debug:
# enabled: true
operator:
rolloutPods: true # Should update when configMap changes, not sure if they hot-reload anyway
# replicas: 2
hubble:
enabled: true
# relay:
# enabled: true
#endregion