Skip to content

Commit 6455529

Browse files
committed
Make it clear that only EKS works and prevent unmanaged clusters from using ipv6
1 parent 4c66c95 commit 6455529

File tree

6 files changed

+60
-198
lines changed

6 files changed

+60
-198
lines changed

api/v1beta1/awscluster_webhook.go

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,7 @@ func (r *AWSCluster) ValidateCreate() error {
5656
allErrs = append(allErrs, r.validateSSHKeyName()...)
5757
allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...)
5858
allErrs = append(allErrs, r.Spec.S3Bucket.Validate()...)
59+
allErrs = append(allErrs, r.validateNetwork()...)
5960

6061
return aggregateObjErrors(r.GroupVersionKind().GroupKind(), r.Name, allErrs)
6162
}
@@ -178,3 +179,11 @@ func (r *AWSCluster) Default() {
178179
func (r *AWSCluster) validateSSHKeyName() field.ErrorList {
179180
return validateSSHKeyName(r.Spec.SSHKeyName)
180181
}
182+
183+
func (r *AWSCluster) validateNetwork() field.ErrorList {
184+
var allErrs field.ErrorList
185+
if r.Spec.NetworkSpec.VPC.IsIPv6Enabled() {
186+
allErrs = append(allErrs, field.Invalid(field.NewPath("ipv6"), r.Spec.NetworkSpec.VPC.IPv6, "IPv6 cannot be used with unmanaged clusters at this time."))
187+
}
188+
return allErrs
189+
}

api/v1beta1/awscluster_webhook_test.go

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -222,6 +222,22 @@ func TestAWSCluster_ValidateCreate(t *testing.T) {
222222
},
223223
wantErr: false,
224224
},
225+
{
226+
name: "rejects ipv6",
227+
cluster: &AWSCluster{
228+
Spec: AWSClusterSpec{
229+
NetworkSpec: NetworkSpec{
230+
VPC: VPCSpec{
231+
IPv6: &IPv6{
232+
CidrBlock: "2001:2345:5678::/64",
233+
PoolID: "pool-id",
234+
},
235+
},
236+
},
237+
},
238+
},
239+
wantErr: true,
240+
},
225241
}
226242
for _, tt := range tests {
227243
t.Run(tt.name, func(t *testing.T) {

controlplane/eks/api/v1beta1/awsmanagedcontrolplane_webhook.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -388,7 +388,7 @@ func (r *AWSManagedControlPlane) validateNetwork() field.ErrorList {
388388
var allErrs field.ErrorList
389389

390390
if r.Spec.NetworkSpec.VPC.IsIPv6Enabled() && r.Spec.NetworkSpec.VPC.IPv6.CidrBlock != "" && r.Spec.NetworkSpec.VPC.IPv6.PoolID == "" {
391-
poolField := field.NewPath("spec", "networkSpec", "vpc", "poolId")
391+
poolField := field.NewPath("spec", "networkSpec", "vpc", "ipv6", "poolId")
392392
allErrs = append(allErrs, field.Invalid(poolField, r.Spec.NetworkSpec.VPC.IPv6.PoolID, "poolId cannot be empty if cidrBlock is set"))
393393
}
394394

docs/book/src/topics/eks/ipv6-enabled-cluster.md

Lines changed: 1 addition & 176 deletions
Original file line numberDiff line numberDiff line change
@@ -83,179 +83,4 @@ address range of `fc00::/7`.
8383

8484
## Unmanaged Clusters
8585

86-
Now comes the tricky part. If you wish, it's possible to set up IPv6 with unmanaged clusters, however, that requires
87-
a lot of extra manual steps and some extra configuration settings.
88-
89-
_Note_: I DO NOT recommend doing this in production. AWS provides a more robust and easier approach on doing IPv6.
90-
This approach is brittle and has many manual steps that need to be performed in order to get things working.
91-
92-
### Extra Config
93-
94-
The extra configs are on the kubeadm side. These are `node-ip` and `bind-address`. These need to be set as follows:
95-
96-
```yaml
97-
kubeadmConfigSpec:
98-
initConfiguration:
99-
nodeRegistration:
100-
name: '{{ ds.meta_data.local_hostname }}'
101-
kubeletExtraArgs:
102-
cloud-provider: aws
103-
node-ip: '::'
104-
clusterConfiguration:
105-
apiServer:
106-
extraArgs:
107-
cloud-provider: aws
108-
bind-address: '::'
109-
controllerManager:
110-
extraArgs:
111-
cloud-provider: aws
112-
bind-address: '::'
113-
scheduler:
114-
extraArgs:
115-
bind-address: '::'
116-
joinConfiguration:
117-
nodeRegistration:
118-
name: '{{ ds.meta_data.local_hostname }}'
119-
kubeletExtraArgs:
120-
node-ip: '::'
121-
cloud-provider: aws
122-
123-
```
124-
125-
This will tell kubeadm to bind to a specific address type which should be IPv6.
126-
127-
Next, it's pod CIDRs and service CIDRs. This is a bit more tricky. You need to know your IPv6 CIDR beforehand.
128-
Having your own IPv6 pool is most of the time, impractical. But there is a way to get you started up quickly
129-
and with low effort. You can ask CAPA to create the network topology for you with a simple cluster config such
130-
as this one:
131-
132-
```yaml
133-
---
134-
apiVersion: cluster.x-k8s.io/v1beta1
135-
kind: Cluster
136-
metadata:
137-
name: "ipv6-network-topology"
138-
spec:
139-
infrastructureRef:
140-
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
141-
kind: AWSCluster
142-
name: "ipv6-network-topology"
143-
controlPlaneRef:
144-
kind: KubeadmControlPlane
145-
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
146-
name: "ipv6-network-topology-control-plane"
147-
---
148-
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
149-
kind: AWSCluster
150-
metadata:
151-
name: "ipv6-network-topology"
152-
spec:
153-
network:
154-
vpc:
155-
ipv6: {}
156-
region: "eu-central-1"
157-
```
158-
159-
This will create a VPC with proper load-balancing and elastic ips and everything. This can be fine-tuned as
160-
desired. This is the bare minimum where CAPA creates the whole topology.
161-
162-
Once this is done, we can acquire the IPv6 CIDR, and we can continue by using the vpc id and subnets in the
163-
unmanaged setting like this:
164-
165-
```yaml
166-
---
167-
apiVersion: cluster.x-k8s.io/v1beta1
168-
kind: Cluster
169-
metadata:
170-
name: "unmanaged-ipv6"
171-
spec:
172-
clusterNetwork:
173-
services:
174-
cidrBlocks: ["192.168.0.0/16", "2a05:d014:852:f::/112"]
175-
pods:
176-
cidrBlocks: ["192.168.0.0/16", "2a05:d014:852:f::/56"]
177-
infrastructureRef:
178-
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
179-
kind: AWSCluster
180-
name: "unmanaged-ipv6"
181-
controlPlaneRef:
182-
kind: KubeadmControlPlane
183-
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
184-
name: "test-ipv6-unmanaged-2-control-plane"
185-
---
186-
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
187-
kind: AWSCluster
188-
metadata:
189-
name: "unmanaged-ipv6"
190-
spec:
191-
network:
192-
subnets:
193-
- id: "subnet-0812d970f75867a72"
194-
- id: "subnet-0aa534118f62cab86"
195-
- id: "subnet-0380be0501f2c8bb0"
196-
- id: "subnet-09d083492229f6281"
197-
- id: "subnet-0b12a78a3b2cebdec"
198-
- id: "subnet-0a0b33746595ecc89"
199-
vpc:
200-
id: "vpc-024ae81c0ca3b7209"
201-
ipv6: {}
202-
region: "eu-central-1"
203-
---
204-
```
205-
206-
### Cilium
207-
208-
Since we are on an unmanaged cluster, we need a cni installed. We'll use Cilium as an example. There are two settings
209-
that are needed for cilium to work. (3 really, you also have to set up ipv4 cidr to the proper cidr the VPC provides).
210-
211-
These are:
212-
213-
```
214-
--cluster-pool-ipv6-cidr="2a05:d014:852:f02::/112"
215-
--cluster-pool-ipv6-cidr-size="128"
216-
```
217-
218-
Note that Cilium doesn't allow any CIDR above `112`. So the CIDR you've got from AWS with size `64` needs to be cut down
219-
to `112`.
220-
221-
Once this is done, we can install Cilium into the workload cluster and restart all Pods so they can acquire IPv6
222-
addresses.
223-
224-
### Calico
225-
226-
Another approach is to use Calico. Calico has detailed guides on how to set up IPv6 located [here](https://projectcalico.docs.tigera.io/networking/ipv6) and [here](https://projectcalico.docs.tigera.io/networking/ipv6-control-plane).
227-
228-
You can use CAPA to bootstrap Calico in the following way:
229-
230-
- Create a ClusterResourceSet like this:
231-
```yaml
232-
apiVersion: addons.cluster.x-k8s.io/v1alpha3
233-
kind: ClusterResourceSet
234-
metadata:
235-
name: crs1
236-
namespace: default
237-
spec:
238-
mode: "ApplyOnce"
239-
clusterSelector:
240-
matchLabels:
241-
cni: calico
242-
resources:
243-
- name: db-secret
244-
kind: Secret
245-
- name: calico-addon
246-
kind: ConfigMap
247-
```
248-
- Download the latest Calico manifest and set up the required properties for IPv6 as the guides suggest ( you will already
249-
need to have an IPv6 CIDR )
250-
- Create a config map in the control plane with the following command: `kubectl create configmap calico-addon --from-file=calico.yaml`
251-
- Tag your cluster with the label `cni: calico` so cluster-api can find it and install the cni addon
252-
```yaml
253-
apiVersion: cluster.x-k8s.io/v1beta1
254-
kind: Cluster
255-
metadata:
256-
name: "test-ipv6-unmanaged-2"
257-
labels:
258-
cni: calico
259-
```
260-
- Apply and monitor
261-
- Note that only new pods will get an ipv6 address; existing pods will remain using ipv4
86+
Unmanaged clusters are not supported at this time.

docs/proposal/20220718-ipv6.md

Lines changed: 27 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,21 @@
11
---
2-
title: Proposal Template
2+
title: IPv6 for EKS
33
authors:
44
- @Skarlso
55
- @nikimanoledaki
66
- @richardcase
77
reviewers:
88
- "@richardcase"
99
creation-date: 2022-04-28
10-
last-updated: 2022-07-19
10+
last-updated: 2022-08-23
1111
status: provisional
1212
---
1313

14-
# IPv6 Support in CAPA
14+
# IPv6 Support in CAPA for EKS
1515

1616
## Table of Contents
1717

18-
- [IPv6 Support in CAPA](#ipv6-support-in-capa)
18+
- [IPv6 Support in CAPA](#ipv6-support-in-capa-for-eks)
1919
- [Table of Contents](#table-of-contents)
2020
- [Glossary](#glossary)
2121
- [Summary](#summary)
@@ -26,6 +26,7 @@ status: provisional
2626
- [Non-Goals/Future Work](#non-goalsfuture-work)
2727
- [Proposal](#proposal)
2828
- [Plan](#plan)
29+
- [Managed and Unmanaged clusters](#managed-and-unmanaged-clusters)
2930
- [Additions and Configuration changes](#additions-and-configuration-changes)
3031
- [Networking and Subnet Splitting strategies](#networking-and-subnet-splitting-strategies)
3132
- [vpc-cni](#vpc-cni)
@@ -54,9 +55,9 @@ communication between old services is still functioning. Only IPv6 is not suppor
5455

5556
## Summary
5657

57-
This proposal defines how to implement IPv6 for clusters in CAPA. It defines various validations that need to take place
58-
in order to properly inform the user when IPv6 can be used. It defines components which need to be created and set up.
59-
It also details with examples and images how the architecture looks like using IPv6.
58+
This proposal defines how to implement IPv6 for clusters in CAPA for EKS. It defines various validations that need to
59+
take place in order to properly inform the user when IPv6 can be used. It defines components which need to be created
60+
and set up. It also details with examples and images how the architecture looks like using IPv6 in EKS.
6061

6162
## Motivation
6263

@@ -80,7 +81,7 @@ limitations. Now users can run as many pods as their instances CPU and RAM capac
8081

8182
## Goals
8283

83-
- Create a cluster with IPv6 networking features for new clusters created with k8s v1.21+
84+
- Create a cluster with IPv6 networking features for new clusters created with k8s v1.21+ on EKS
8485
- Dual-stack (IPv4+IPv6) VPC, subnets and EC2 instances/nodes
8586
- Allow users to set their own VPC in config
8687
- Allow users to create VPC with own IPv6 CIDR
@@ -91,6 +92,7 @@ limitations. Now users can run as many pods as their instances CPU and RAM capac
9192
## Non-Goals/Future Work
9293

9394
- IPv6-only VPC
95+
- Unmanaged clusters
9496
- Migrate to IPv6 after cluster creation ( means that reconciliation will not update existing cluster to use ipv6 )
9597
- Make IPv6 the default IP family
9698
- Support k8s version that are `< 1.21`
@@ -101,17 +103,22 @@ limitations. Now users can run as many pods as their instances CPU and RAM capac
101103

102104
### Plan
103105

104-
Newly created clusters should be able to support IPv6 based communication throughout the entire cluster and in addition,
105-
to the outside world via exposed services. The pods should have IPv6 addresses but should be able to contact AWS metadata
106-
service using IPv4. A mixed communication is preferred as fully IPv6 clusters are not supported yet. Note, AWS does
107-
provide an IPv6 metadata service under `fd00:ec2::254` well-known address.
106+
Newly created clusters backed on EKS should be able to support IPv6 based communication throughout the entire cluster
107+
and in addition, to the outside world via exposed services. The pods should have IPv6 addresses but should be able to
108+
contact AWS metadata service using IPv4. A mixed communication is preferred as fully IPv6 clusters are not supported yet
109+
using EKS. Note, AWS does provide an IPv6 metadata service under `fd00:ec2::254` well-known address.
108110

109-
### Managed and Unmanaged clusters
111+
#### Managed and Unmanaged clusters
110112

111-
For managed clusters the described approach below will work. For unmanaged clusters there is a lot more that needs to be
112-
done, but all the things that do need to be done are actually manual steps from users. The code will support it both.
113+
After careful considering and a lot of debugging and back and forth, we decided that unmanaged clusters will not be
114+
supported at this time. It will come at a later date. The implementation as it stands, allows for unmanaged clusters to
115+
work with ipv6 ( once the validation is removed from `AWSCluster` ) but the circumstances regarding getting the nodes
116+
to work and kubeadm to play nicely are difficult to pinpoint.
113117

114-
The details on how to create an IPv6 enabled unmanaged cluster can be found in [ipv6-enabled-cluster.md](../book/src/topics/eks/ipv6-enabled-cluster.md).
118+
Nevertheless, a sample template can be found under ![template](../../templates/cluster-template-ipv6.yaml). This
119+
represents a possible combination of configuration objects that kubeadm requires.
120+
121+
A validation is added to prevent unmanaged clusters from being able to use IPv6 specific configurations.
115122

116123
#### Additions and Configuration changes
117124

@@ -253,6 +260,7 @@ The following validations need to be applied:
253260
- Cluster version must be 1.21 or higher
254261
- Addon version of CNI must be 1.10 or higher in case of IPv6
255262
- Possibly validate ( if we don't set it automatically ) that the right environment properties are set for vpc-cni
263+
- Prevent unmanaged clusters from using IPv6 settings
256264

257265
#### Instance Type
258266

@@ -326,10 +334,10 @@ connectivity works such as, but not limited to:
326334
## User Stories
327335

328336
As a CAPA user:
329-
- I can create a cluster that is in a new IPv6 & IPv4 dual-stack VPC
337+
- I can create a cluster using EKS that is in a new IPv6 & IPv4 dual-stack VPC
330338
- I can create a nodegroup which completely supports IPv6 CIDR
331339
- I can bring my own IPv6 subnet and create a nodegroup with that
332-
- I can create infrastructure using an IPv6 & IPv4 dual-stack VPC
340+
- I can create infrastructure on EKS using an IPv6 & IPv4 dual-stack VPC
333341

334342
## Security Model
335343

@@ -345,7 +353,6 @@ No other alternatives.
345353
- [x] 04/28/2022: Proposed idea in an issue or [community meeting]
346354
- [x] 04/28/2022: Compile a Google Doc following the CAEP template (link here)
347355
- [x] 08/06/2022: Open proposal PR
348-
- [ ] MM/DD/YYYY: First round of feedback from community
349-
- [ ] MM/DD/YYYY: Present proposal at a [community meeting]
356+
- [x] 08/20/2022: First round of feedback from community
350357

351358
<!-- Links -->

templates/cluster-template-ipv6.yaml

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,10 @@ metadata:
55
name: "${CLUSTER_NAME}"
66
spec:
77
clusterNetwork:
8+
services:
9+
cidrBlocks: ["2a05:d014:2d3:4200::/112"]
810
pods:
9-
cidrBlocks: ["192.168.0.0/16"]
11+
cidrBlocks: ["2a05:d014:2d3:4200::/56"]
1012
infrastructureRef:
1113
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
1214
kind: AWSCluster
@@ -42,6 +44,7 @@ spec:
4244
kubeletExtraArgs:
4345
cloud-provider: aws
4446
node-ip: '::'
47+
cluster-dns: '2a05:d014:2d3:4200::/56'
4548
clusterConfiguration:
4649
apiServer:
4750
extraArgs:
@@ -51,6 +54,7 @@ spec:
5154
extraArgs:
5255
cloud-provider: aws
5356
bind-address: '::'
57+
cluster-cidr: "2a05:d014:2d3:4200::/56"
5458
scheduler:
5559
extraArgs:
5660
bind-address: '::'
@@ -59,6 +63,7 @@ spec:
5963
name: '{{ ds.meta_data.local_hostname }}'
6064
kubeletExtraArgs:
6165
node-ip: '::'
66+
cluster-dns: "2a05:d014:2d3:4200::/56"
6267
cloud-provider: aws
6368
version: "${KUBERNETES_VERSION}"
6469
---

0 commit comments

Comments
 (0)