Skip to content

Commit c3c5a53

Browse files
authored
Merge pull request #4648 from cnmcavoy/eks-cluster-autoscaler-secret
✨ Add separate eks kubeconfig secret keys for the cluster-autoscaler
2 parents 2a4d434 + f3d1caa commit c3c5a53

File tree

4 files changed

+348
-14
lines changed

4 files changed

+348
-14
lines changed

docs/book/src/topics/eks/creating-a-cluster.md

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,4 +34,12 @@ kubectl --namespace=default get secret managed-test-user-kubeconfig \
3434

3535
This kubeconfig is used internally by CAPI and shouldn't be used outside of the management server. It is used by CAPI to perform operations, such as draining a node. The name of the secret that contains the kubeconfig will be `[cluster-name]-kubeconfig` where you need to replace **[cluster-name]** with the name of your cluster. Note that there is NO `-user` in the name.
3636

37-
The kubeconfig is regenerated every `sync-period` as the token that is embedded in the kubeconfig is only valid for a short period of time. When EKS support is enabled the maximum sync period is 10 minutes. If you try to set `--sync-period` to greater than 10 minutes then an error will be raised.
37+
There are three keys in the CAPI kubeconfig for eks clusters:
38+
39+
| keys | purpose |
40+
|-------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
41+
| value | contains a complete kubeconfig with the cluster admin user and token embedded |
42+
| relative | contains a kubeconfig with the cluster admin user, referencing the token file in a relative path - assumes you are mounting all the secret keys in the same dir |
43+
| single-file | contains the same token embedded in the complete kubeconfig, it is separated into a single file so that existing APIMachinery can reload the token file when the secret is updated |
44+
45+
The secret contents are regenerated every `sync-period` as the token that is embedded in the kubeconfig and token file is only valid for a short period of time. When EKS support is enabled the maximum sync period is 10 minutes. If you try to set `--sync-period` to greater than 10 minutes then an error will be raised.

pkg/cloud/services/eks/config.go

Lines changed: 70 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -31,9 +31,12 @@ import (
3131
"k8s.io/apimachinery/pkg/types"
3232
"k8s.io/client-go/tools/clientcmd"
3333
"k8s.io/client-go/tools/clientcmd/api"
34+
"sigs.k8s.io/controller-runtime/pkg/client"
3435

3536
ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
3637
"sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record"
38+
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
39+
"sigs.k8s.io/cluster-api/util"
3740
"sigs.k8s.io/cluster-api/util/kubeconfig"
3841
"sigs.k8s.io/cluster-api/util/secret"
3942
)
@@ -42,6 +45,9 @@ const (
4245
tokenPrefix = "k8s-aws-v1." //nolint:gosec
4346
clusterNameHeader = "x-k8s-aws-id"
4447
tokenAgeMins = 15
48+
49+
relativeKubeconfigKey = "relative"
50+
relativeTokenFileKey = "token-file"
4551
)
4652

4753
func (s *Service) reconcileKubeconfig(ctx context.Context, cluster *eks.Cluster) error {
@@ -110,28 +116,44 @@ func (s *Service) createCAPIKubeconfigSecret(ctx context.Context, cluster *eks.C
110116
clusterName := s.scope.KubernetesClusterName()
111117
userName := s.getKubeConfigUserName(clusterName, false)
112118

113-
cfg, err := s.createBaseKubeConfig(cluster, userName)
119+
config, err := s.createBaseKubeConfig(cluster, userName)
114120
if err != nil {
115121
return fmt.Errorf("creating base kubeconfig: %w", err)
116122
}
123+
clusterConfig := config.DeepCopy()
117124

118125
token, err := s.generateToken()
119126
if err != nil {
120127
return fmt.Errorf("generating presigned token: %w", err)
121128
}
122129

123-
cfg.AuthInfos = map[string]*api.AuthInfo{
130+
clusterConfig.AuthInfos = map[string]*api.AuthInfo{
124131
userName: {
125132
Token: token,
126133
},
127134
}
128135

129-
out, err := clientcmd.Write(*cfg)
136+
out, err := clientcmd.Write(*clusterConfig)
130137
if err != nil {
131138
return errors.Wrap(err, "failed to serialize config to yaml")
132139
}
133140

134-
kubeconfigSecret := kubeconfig.GenerateSecretWithOwner(*clusterRef, out, controllerOwnerRef)
141+
secretData := make(map[string][]byte)
142+
secretData[secret.KubeconfigDataName] = out
143+
144+
config.AuthInfos = map[string]*api.AuthInfo{
145+
userName: {
146+
TokenFile: "./" + relativeTokenFileKey,
147+
},
148+
}
149+
out, err = clientcmd.Write(*config)
150+
if err != nil {
151+
return errors.Wrap(err, "failed to serialize config to yaml")
152+
}
153+
secretData[relativeKubeconfigKey] = out
154+
secretData[relativeTokenFileKey] = []byte(token)
155+
156+
kubeconfigSecret := generateSecretWithOwner(*clusterRef, secretData, controllerOwnerRef)
135157
if err := s.scope.Client.Create(ctx, kubeconfigSecret); err != nil {
136158
return errors.Wrap(err, "failed to create kubeconfig secret")
137159
}
@@ -142,32 +164,49 @@ func (s *Service) createCAPIKubeconfigSecret(ctx context.Context, cluster *eks.C
142164

143165
func (s *Service) updateCAPIKubeconfigSecret(ctx context.Context, configSecret *corev1.Secret, cluster *eks.Cluster) error {
144166
s.scope.Debug("Updating EKS kubeconfigs for cluster", "cluster-name", s.scope.KubernetesClusterName())
167+
controllerOwnerRef := *metav1.NewControllerRef(s.scope.ControlPlane, ekscontrolplanev1.GroupVersion.WithKind("AWSManagedControlPlane"))
145168

146-
data, ok := configSecret.Data[secret.KubeconfigDataName]
147-
if !ok {
148-
return errors.Errorf("missing key %q in secret data", secret.KubeconfigDataName)
169+
if !util.HasOwnerRef(configSecret.OwnerReferences, controllerOwnerRef) {
170+
return fmt.Errorf("EKS kubeconfig %s/%s missing expected AWSManagedControlPlane ownership", configSecret.Namespace, configSecret.Name)
149171
}
150172

151-
config, err := clientcmd.Load(data)
173+
clusterName := s.scope.KubernetesClusterName()
174+
userName := s.getKubeConfigUserName(clusterName, false)
175+
config, err := s.createBaseKubeConfig(cluster, userName)
152176
if err != nil {
153-
return errors.Wrap(err, "failed to convert kubeconfig Secret into a clientcmdapi.Config")
177+
return fmt.Errorf("creating base kubeconfig: %w", err)
154178
}
179+
clusterConfig := config.DeepCopy()
155180

156181
token, err := s.generateToken()
157182
if err != nil {
158183
return fmt.Errorf("generating presigned token: %w", err)
159184
}
160185

161-
userName := s.getKubeConfigUserName(*cluster.Name, false)
162-
config.AuthInfos[userName].Token = token
186+
clusterConfig.AuthInfos = map[string]*api.AuthInfo{
187+
userName: {
188+
Token: token,
189+
},
190+
}
163191

164-
out, err := clientcmd.Write(*config)
192+
out, err := clientcmd.Write(*clusterConfig)
165193
if err != nil {
166194
return errors.Wrap(err, "failed to serialize config to yaml")
167195
}
168-
169196
configSecret.Data[secret.KubeconfigDataName] = out
170197

198+
config.AuthInfos = map[string]*api.AuthInfo{
199+
userName: {
200+
TokenFile: "./" + relativeTokenFileKey,
201+
},
202+
}
203+
out, err = clientcmd.Write(*config)
204+
if err != nil {
205+
return errors.Wrap(err, "failed to serialize config to yaml")
206+
}
207+
configSecret.Data[relativeKubeconfigKey] = out
208+
configSecret.Data[relativeTokenFileKey] = []byte(token)
209+
171210
err = s.scope.Client.Update(ctx, configSecret)
172211
if err != nil {
173212
return fmt.Errorf("updating kubeconfig secret: %w", err)
@@ -283,3 +322,21 @@ func (s *Service) getKubeConfigUserName(clusterName string, isUser bool) string
283322

284323
return fmt.Sprintf("%s-capi-admin", clusterName)
285324
}
325+
326+
// generateSecretWithOwner returns a Kubernetes secret for the given Cluster name, namespace, kubeconfig data, and ownerReference.
327+
func generateSecretWithOwner(clusterName client.ObjectKey, data map[string][]byte, owner metav1.OwnerReference) *corev1.Secret {
328+
return &corev1.Secret{
329+
ObjectMeta: metav1.ObjectMeta{
330+
Name: secret.Name(clusterName.Name, secret.Kubeconfig),
331+
Namespace: clusterName.Namespace,
332+
Labels: map[string]string{
333+
clusterv1.ClusterNameLabel: clusterName.Name,
334+
},
335+
OwnerReferences: []metav1.OwnerReference{
336+
owner,
337+
},
338+
},
339+
Data: data,
340+
Type: clusterv1.ClusterSecretType,
341+
}
342+
}

0 commit comments

Comments
 (0)