diff --git a/Makefile b/Makefile index 84e7b00d8f..07bf590963 100644 --- a/Makefile +++ b/Makefile @@ -606,7 +606,7 @@ promote-images: $(KPROMO) $(YQ) .PHONY: release-binaries release-binaries: $(GORELEASER) ## Builds only the binaries, not a release. - $(GORELEASER) build --config $(GORELEASER_CONFIG) --snapshot --clean + GOMAXPROCS=2 $(GORELEASER) build --config $(GORELEASER_CONFIG) --snapshot --clean .PHONY: release-staging release-staging: ## Builds and push container images and manifests to the staging bucket. diff --git a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml index 29cc567267..7af31039ed 100644 --- a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml +++ b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml @@ -2208,6 +2208,101 @@ spec: description: AWSManagedControlPlaneSpec defines the desired state of an Amazon EKS Cluster. properties: + accessConfig: + description: AccessConfig specifies the access configuration information + for the cluster + properties: + accessEntries: + description: |- + AccessEntries specifies the access entries for the cluster + Access entries require AuthenticationMode to be either API or API_AND_CONFIG_MAP + items: + description: AccessEntry represents an AWS EKS access entry + for IAM principals + properties: + accessPolicies: + description: |- + AccessPolicies specifies the policies to associate with this access entry + Cannot be specified if Type is EC2_LINUX or EC2_WINDOWS + items: + description: AccessPolicyReference represents a reference + to an AWS EKS access policy + properties: + accessScope: + description: AccessScope specifies the scope for the + policy + properties: + namespaces: + description: |- + Namespaces are the namespaces for the access scope + Only valid when Type is namespace + items: + type: string + minItems: 1 + type: array + type: + default: cluster + description: Type is the type of access scope. + Defaults to "cluster". + enum: + - cluster + - namespace + type: string + required: + - type + type: object + policyARN: + description: PolicyARN is the Amazon Resource Name + (ARN) of the access policy + type: string + required: + - accessScope + - policyARN + type: object + maxItems: 20 + type: array + kubernetesGroups: + description: |- + KubernetesGroups represents the Kubernetes groups for the access entry + Cannot be specified if Type is EC2_LINUX or EC2_WINDOWS + items: + type: string + type: array + principalARN: + description: PrincipalARN is the Amazon Resource Name (ARN) + of the IAM principal + type: string + type: + default: STANDARD + description: Type is the type of access entry. Defaults + to STANDARD if not specified. + enum: + - STANDARD + - EC2_LINUX + - EC2_WINDOWS + - FARGATE_LINUX + - EC2 + - HYBRID_LINUX + - HYPERPOD_LINUX + type: string + username: + description: Username is the username for the access entry + type: string + required: + - principalARN + type: object + type: array + authenticationMode: + default: CONFIG_MAP + description: |- + AuthenticationMode specifies the desired authentication mode for the cluster + Defaults to CONFIG_MAP + enum: + - CONFIG_MAP + - API + - API_AND_CONFIG_MAP + type: string + type: object additionalTags: additionalProperties: type: string diff --git a/controlplane/eks/api/v1beta1/conversion.go b/controlplane/eks/api/v1beta1/conversion.go index 9a0c2720c6..0ba21248ab 100644 --- a/controlplane/eks/api/v1beta1/conversion.go +++ b/controlplane/eks/api/v1beta1/conversion.go @@ -42,6 +42,7 @@ func (r *AWSManagedControlPlane) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.VpcCni.Disable = r.Spec.DisableVPCCNI dst.Spec.Partition = restored.Spec.Partition dst.Spec.RestrictPrivateSubnets = restored.Spec.RestrictPrivateSubnets + dst.Spec.AccessConfig = restored.Spec.AccessConfig dst.Spec.RolePath = restored.Spec.RolePath dst.Spec.RolePermissionsBoundary = restored.Spec.RolePermissionsBoundary dst.Status.Version = restored.Status.Version diff --git a/controlplane/eks/api/v1beta1/zz_generated.conversion.go b/controlplane/eks/api/v1beta1/zz_generated.conversion.go index b7bb9b0a6f..eaea4b61b1 100644 --- a/controlplane/eks/api/v1beta1/zz_generated.conversion.go +++ b/controlplane/eks/api/v1beta1/zz_generated.conversion.go @@ -375,6 +375,7 @@ func autoConvert_v1beta2_AWSManagedControlPlaneSpec_To_v1beta1_AWSManagedControl out.AssociateOIDCProvider = in.AssociateOIDCProvider out.Addons = (*[]Addon)(unsafe.Pointer(in.Addons)) out.OIDCIdentityProviderConfig = (*OIDCIdentityProviderConfig)(unsafe.Pointer(in.OIDCIdentityProviderConfig)) + // WARNING: in.AccessConfig requires manual conversion: does not exist in peer-type if err := Convert_v1beta2_VpcCni_To_v1beta1_VpcCni(&in.VpcCni, &out.VpcCni, s); err != nil { return err } diff --git a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go index 4f4c559b81..6e44c5d0b6 100644 --- a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go +++ b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go @@ -193,6 +193,10 @@ type AWSManagedControlPlaneSpec struct { //nolint: maligned // +optional OIDCIdentityProviderConfig *OIDCIdentityProviderConfig `json:"oidcIdentityProviderConfig,omitempty"` + // AccessConfig specifies the access configuration information for the cluster + // +optional + AccessConfig *AccessConfig `json:"accessConfig,omitempty"` + // VpcCni is used to set configuration options for the VPC CNI plugin // +optional VpcCni VpcCni `json:"vpcCni,omitempty"` @@ -249,6 +253,73 @@ type EndpointAccess struct { Private *bool `json:"private,omitempty"` } +// AccessEntry represents an AWS EKS access entry for IAM principals +type AccessEntry struct { + // PrincipalARN is the Amazon Resource Name (ARN) of the IAM principal + // +kubebuilder:validation:Required + PrincipalARN string `json:"principalARN"` + + // Type is the type of access entry. Defaults to STANDARD if not specified. + // +kubebuilder:default=STANDARD + // +kubebuilder:validation:Enum=STANDARD;EC2_LINUX;EC2_WINDOWS;FARGATE_LINUX;EC2;HYBRID_LINUX;HYPERPOD_LINUX + // +optional + Type string `json:"type,omitempty"` + + // KubernetesGroups represents the Kubernetes groups for the access entry + // Cannot be specified if Type is EC2_LINUX or EC2_WINDOWS + // +optional + KubernetesGroups []string `json:"kubernetesGroups,omitempty"` + + // Username is the username for the access entry + // +optional + Username string `json:"username,omitempty"` + + // AccessPolicies specifies the policies to associate with this access entry + // Cannot be specified if Type is EC2_LINUX or EC2_WINDOWS + // +optional + // +kubebuilder:validation:MaxItems=20 + AccessPolicies []AccessPolicyReference `json:"accessPolicies,omitempty"` +} + +// AccessPolicyReference represents a reference to an AWS EKS access policy +type AccessPolicyReference struct { + // PolicyARN is the Amazon Resource Name (ARN) of the access policy + // +kubebuilder:validation:Required + PolicyARN string `json:"policyARN"` + + // AccessScope specifies the scope for the policy + // +kubebuilder:validation:Required + AccessScope AccessScope `json:"accessScope"` +} + +// AccessScope represents the scope for an access policy +type AccessScope struct { + // Type is the type of access scope. Defaults to "cluster". + // +kubebuilder:validation:Enum=cluster;namespace + // +kubebuilder:default=cluster + Type string `json:"type"` + + // Namespaces are the namespaces for the access scope + // Only valid when Type is namespace + // +optional + // +kubebuilder:validation:MinItems=1 + Namespaces []string `json:"namespaces,omitempty"` +} + +// AccessConfig represents the access configuration information for the cluster +type AccessConfig struct { + // AuthenticationMode specifies the desired authentication mode for the cluster + // Defaults to CONFIG_MAP + // +kubebuilder:default=CONFIG_MAP + // +kubebuilder:validation:Enum=CONFIG_MAP;API;API_AND_CONFIG_MAP + AuthenticationMode EKSAuthenticationMode `json:"authenticationMode,omitempty"` + + // AccessEntries specifies the access entries for the cluster + // Access entries require AuthenticationMode to be either API or API_AND_CONFIG_MAP + // +optional + AccessEntries []AccessEntry `json:"accessEntries,omitempty"` +} + // EncryptionConfig specifies the encryption configuration for the EKS clsuter. type EncryptionConfig struct { // Provider specifies the ARN or alias of the CMK (in AWS KMS) diff --git a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook.go b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook.go index 8970b29cd7..f6a315211d 100644 --- a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook.go +++ b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook.go @@ -98,6 +98,7 @@ func (*awsManagedControlPlaneWebhook) ValidateCreate(_ context.Context, obj runt // TODO: Add ipv6 validation things in these validations. allErrs = append(allErrs, r.validateEKSVersion(nil)...) allErrs = append(allErrs, r.Spec.Bastion.Validate()...) + allErrs = append(allErrs, r.validateAccessConfig(nil)...) allErrs = append(allErrs, r.validateIAMAuthConfig()...) allErrs = append(allErrs, r.validateSecondaryCIDR()...) allErrs = append(allErrs, r.validateEKSAddons()...) @@ -140,6 +141,7 @@ func (*awsManagedControlPlaneWebhook) ValidateUpdate(ctx context.Context, oldObj allErrs = append(allErrs, r.validateEKSClusterNameSame(oldAWSManagedControlplane)...) allErrs = append(allErrs, r.validateEKSVersion(oldAWSManagedControlplane)...) allErrs = append(allErrs, r.Spec.Bastion.Validate()...) + allErrs = append(allErrs, r.validateAccessConfig(oldAWSManagedControlplane)...) allErrs = append(allErrs, r.validateIAMAuthConfig()...) allErrs = append(allErrs, r.validateSecondaryCIDR()...) allErrs = append(allErrs, r.validateEKSAddons()...) @@ -304,6 +306,79 @@ func (r *AWSManagedControlPlane) validateEKSAddons() field.ErrorList { return allErrs } +func (r *AWSManagedControlPlane) validateAccessConfig(old *AWSManagedControlPlane) field.ErrorList { + var allErrs field.ErrorList + + // If accessConfig is already set, do not allow removal of it. + if old != nil && old.Spec.AccessConfig != nil && r.Spec.AccessConfig == nil { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec", "accessConfig"), r.Spec.AccessConfig, "removing AccessConfig is not allowed after it has been enabled"), + ) + } + + // AuthenticationMode is ratcheting - do not allow downgrades + if old != nil && old.Spec.AccessConfig != nil && r.Spec.AccessConfig != nil && old.Spec.AccessConfig.AuthenticationMode != r.Spec.AccessConfig.AuthenticationMode && + ((old.Spec.AccessConfig.AuthenticationMode == EKSAuthenticationModeAPIAndConfigMap && r.Spec.AccessConfig.AuthenticationMode == EKSAuthenticationModeConfigMap) || + old.Spec.AccessConfig.AuthenticationMode == EKSAuthenticationModeAPI) { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec", "accessConfig", "authenticationMode"), r.Spec.AccessConfig.AuthenticationMode, "downgrading authentication mode is not allowed after it has been enabled"), + ) + } + + // AccessEntries require AuthenticationMode to be API or API_AND_CONFIG_MAP + if r.Spec.AccessConfig != nil && len(r.Spec.AccessConfig.AccessEntries) > 0 { + if r.Spec.AccessConfig.AuthenticationMode != EKSAuthenticationModeAPI && + r.Spec.AccessConfig.AuthenticationMode != EKSAuthenticationModeAPIAndConfigMap { + allErrs = append(allErrs, + field.Invalid( + field.NewPath("spec", "accessConfig", "accessEntries"), + r.Spec.AccessConfig.AccessEntries, + "accessEntries can only be used when authenticationMode is set to API or API_AND_CONFIG_MAP", + ), + ) + } + + // Validate that EC2 types don't have kubernetes groups or access policies + for i, entry := range r.Spec.AccessConfig.AccessEntries { + if entry.Type == "EC2_LINUX" || entry.Type == "EC2_WINDOWS" { + if len(entry.KubernetesGroups) > 0 { + allErrs = append(allErrs, + field.Invalid( + field.NewPath("spec", "accessConfig", "accessEntries").Index(i).Child("kubernetesGroups"), + entry.KubernetesGroups, + "kubernetesGroups cannot be specified when type is EC2_LINUX or EC2_WINDOWS", + ), + ) + } + if len(entry.AccessPolicies) > 0 { + allErrs = append(allErrs, + field.Invalid( + field.NewPath("spec", "accessConfig", "accessEntries").Index(i).Child("accessPolicies"), + entry.AccessPolicies, + "accessPolicies cannot be specified when type is EC2_LINUX or EC2_WINDOWS", + ), + ) + } + } + + // Validate namespace scopes + for j, policy := range entry.AccessPolicies { + if policy.AccessScope.Type == "namespace" && len(policy.AccessScope.Namespaces) == 0 { + allErrs = append(allErrs, + field.Invalid( + field.NewPath("spec", "accessConfig", "accessEntries").Index(i).Child("accessPolicies").Index(j).Child("accessScope", "namespaces"), + policy.AccessScope.Namespaces, + "at least one value must be provided when accessScope type is namespace", + ), + ) + } + } + } + } + + return allErrs +} + func (r *AWSManagedControlPlane) validateIAMAuthConfig() field.ErrorList { var allErrs field.ErrorList diff --git a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook_test.go b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook_test.go index 16727b1c82..7729355219 100644 --- a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook_test.go +++ b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook_test.go @@ -526,6 +526,166 @@ func TestWebhookCreateIPv6Details(t *testing.T) { } } +func TestWebhookValidateAccessEntries(t *testing.T) { + tests := []struct { + name string + accessConfig *AccessConfig + expectError bool + errorSubstr string + }{ + { + name: "valid access entries with API auth mode", + accessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeAPI, + AccessEntries: []AccessEntry{ + { + PrincipalARN: "arn:aws:iam::123456789012:role/EKSAdmin", + Type: "STANDARD", + KubernetesGroups: []string{"system:masters"}, + }, + }, + }, + expectError: false, + }, + { + name: "valid access entries with API_AND_CONFIG_MAP auth mode", + accessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeAPIAndConfigMap, + AccessEntries: []AccessEntry{ + { + PrincipalARN: "arn:aws:iam::123456789012:role/EKSAdmin", + Type: "STANDARD", + KubernetesGroups: []string{"system:masters"}, + }, + }, + }, + expectError: false, + }, + { + name: "invalid access entries with CONFIG_MAP auth mode", + accessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeConfigMap, + AccessEntries: []AccessEntry{ + { + PrincipalARN: "arn:aws:iam::123456789012:role/EKSAdmin", + Type: "STANDARD", + KubernetesGroups: []string{"system:masters"}, + }, + }, + }, + expectError: true, + errorSubstr: "accessEntries can only be used when authenticationMode is set to API or API_AND_CONFIG_MAP", + }, + { + name: "invalid EC2_LINUX access entry with kubernetes groups", + accessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeAPI, + AccessEntries: []AccessEntry{ + { + PrincipalARN: "arn:aws:iam::123456789012:role/EKSAdmin", + Type: "EC2_LINUX", + KubernetesGroups: []string{"system:masters"}, + }, + }, + }, + expectError: true, + errorSubstr: "kubernetesGroups cannot be specified when type is EC2_LINUX or EC2_WINDOWS", + }, + { + name: "invalid EC2_WINDOWS access entry with access policies", + accessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeAPI, + AccessEntries: []AccessEntry{ + { + PrincipalARN: "arn:aws:iam::123456789012:role/EKSAdmin", + Type: "EC2_WINDOWS", + AccessPolicies: []AccessPolicyReference{ + { + PolicyARN: "arn:aws:eks::aws:cluster-access-policy/AmazonEKSViewPolicy", + AccessScope: AccessScope{ + Type: "cluster", + }, + }, + }, + }, + }, + }, + expectError: true, + errorSubstr: "accessPolicies cannot be specified when type is EC2_LINUX or EC2_WINDOWS", + }, + { + name: "invalid access policy with namespace type and no namespaces", + accessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeAPI, + AccessEntries: []AccessEntry{ + { + PrincipalARN: "arn:aws:iam::123456789012:role/EKSAdmin", + Type: "STANDARD", + AccessPolicies: []AccessPolicyReference{ + { + PolicyARN: "arn:aws:eks::aws:cluster-access-policy/AmazonEKSViewPolicy", + AccessScope: AccessScope{ + Type: "namespace", + }, + }, + }, + }, + }, + }, + expectError: true, + errorSubstr: "at least one value must be provided when accessScope type is namespace", + }, + { + name: "valid access policy with namespace type and namespaces", + accessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeAPI, + AccessEntries: []AccessEntry{ + { + PrincipalARN: "arn:aws:iam::123456789012:role/EKSAdmin", + Type: "STANDARD", + AccessPolicies: []AccessPolicyReference{ + { + PolicyARN: "arn:aws:eks::aws:cluster-access-policy/AmazonEKSViewPolicy", + AccessScope: AccessScope{ + Type: "namespace", + Namespaces: []string{"default", "kube-system"}, + }, + }, + }, + }, + }, + }, + expectError: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + mcp := &AWSManagedControlPlane{ + Spec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + AccessConfig: tc.accessConfig, + }, + } + + warn, err := (&awsManagedControlPlaneWebhook{}).ValidateCreate(context.Background(), mcp) + + if tc.expectError { + g.Expect(err).ToNot(BeNil()) + if tc.errorSubstr != "" { + g.Expect(err.Error()).To(ContainSubstring(tc.errorSubstr)) + } + } else { + g.Expect(err).To(BeNil()) + } + // Nothing emits warnings yet + g.Expect(warn).To(BeEmpty()) + }) + } +} + func TestWebhookUpdate(t *testing.T) { tests := []struct { name string @@ -603,6 +763,96 @@ func TestWebhookUpdate(t *testing.T) { }, expectError: false, }, + { + name: "no change in access config", + oldClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + AccessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeConfigMap, + }, + }, + newClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + AccessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeConfigMap, + }, + }, + expectError: false, + }, + { + name: "change in access config to nil", + oldClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + AccessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeConfigMap, + }, + }, + newClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + }, + expectError: true, + }, + { + name: "change in access config from nil to valid", + oldClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + }, + newClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + AccessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeConfigMap, + }, + }, + expectError: false, + }, + { + name: "change in access config auth mode from ApiAndConfigMap to API is allowed", + oldClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + AccessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeAPIAndConfigMap, + }, + }, + newClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + AccessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeAPI, + }, + }, + expectError: false, + }, + { + name: "change in access config auth mode from API to Config Map is denied", + oldClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + AccessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeAPI, + }, + }, + newClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + AccessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeConfigMap, + }, + }, + expectError: true, + }, + { + name: "change in access config auth mode from APIAndConfigMap to Config Map is denied", + oldClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + AccessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeAPIAndConfigMap, + }, + }, + newClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + AccessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeConfigMap, + }, + }, + expectError: true, + }, { name: "change in encryption config to nil", oldClusterSpec: AWSManagedControlPlaneSpec{ diff --git a/controlplane/eks/api/v1beta2/types.go b/controlplane/eks/api/v1beta2/types.go index c740f868f1..e3b61bcd17 100644 --- a/controlplane/eks/api/v1beta2/types.go +++ b/controlplane/eks/api/v1beta2/types.go @@ -79,6 +79,21 @@ var ( EKSTokenMethodAWSCli = EKSTokenMethod("aws-cli") ) +// EKSAuthenticationMode defines the authentication mode for the cluster +type EKSAuthenticationMode string + +var ( + // EKSAuthenticationModeConfigMap indicates that only `aws-auth` ConfigMap will be used for authentication + EKSAuthenticationModeConfigMap = EKSAuthenticationMode("CONFIG_MAP") + + // EKSAuthenticationModeAPI indicates that only AWS Access Entries will be used for authentication + EKSAuthenticationModeAPI = EKSAuthenticationMode("API") + + // EKSAuthenticationModeAPIAndConfigMap indicates that both `aws-auth` ConfigMap and AWS Access Entries will + // be used for authentication + EKSAuthenticationModeAPIAndConfigMap = EKSAuthenticationMode("API_AND_CONFIG_MAP") +) + var ( // DefaultEKSControlPlaneRole is the name of the default IAM role to use for the EKS control plane // if no other role is supplied in the spec and if iam role creation is not enabled. The default diff --git a/controlplane/eks/api/v1beta2/zz_generated.deepcopy.go b/controlplane/eks/api/v1beta2/zz_generated.deepcopy.go index 216357e2fe..71ee81d349 100644 --- a/controlplane/eks/api/v1beta2/zz_generated.deepcopy.go +++ b/controlplane/eks/api/v1beta2/zz_generated.deepcopy.go @@ -170,6 +170,11 @@ func (in *AWSManagedControlPlaneSpec) DeepCopyInto(out *AWSManagedControlPlaneSp *out = new(OIDCIdentityProviderConfig) (*in).DeepCopyInto(*out) } + if in.AccessConfig != nil { + in, out := &in.AccessConfig, &out.AccessConfig + *out = new(AccessConfig) + (*in).DeepCopyInto(*out) + } in.VpcCni.DeepCopyInto(&out.VpcCni) out.KubeProxy = in.KubeProxy } @@ -243,6 +248,91 @@ func (in *AWSManagedControlPlaneStatus) DeepCopy() *AWSManagedControlPlaneStatus return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessConfig) DeepCopyInto(out *AccessConfig) { + *out = *in + if in.AccessEntries != nil { + in, out := &in.AccessEntries, &out.AccessEntries + *out = make([]AccessEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessConfig. +func (in *AccessConfig) DeepCopy() *AccessConfig { + if in == nil { + return nil + } + out := new(AccessConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessEntry) DeepCopyInto(out *AccessEntry) { + *out = *in + if in.KubernetesGroups != nil { + in, out := &in.KubernetesGroups, &out.KubernetesGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AccessPolicies != nil { + in, out := &in.AccessPolicies, &out.AccessPolicies + *out = make([]AccessPolicyReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessEntry. +func (in *AccessEntry) DeepCopy() *AccessEntry { + if in == nil { + return nil + } + out := new(AccessEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessPolicyReference) DeepCopyInto(out *AccessPolicyReference) { + *out = *in + in.AccessScope.DeepCopyInto(&out.AccessScope) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessPolicyReference. +func (in *AccessPolicyReference) DeepCopy() *AccessPolicyReference { + if in == nil { + return nil + } + out := new(AccessPolicyReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessScope) DeepCopyInto(out *AccessScope) { + *out = *in + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessScope. +func (in *AccessScope) DeepCopy() *AccessScope { + if in == nil { + return nil + } + out := new(AccessScope) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Addon) DeepCopyInto(out *Addon) { *out = *in diff --git a/pkg/cloud/services/eks/accessentry.go b/pkg/cloud/services/eks/accessentry.go new file mode 100644 index 0000000000..5646f90c93 --- /dev/null +++ b/pkg/cloud/services/eks/accessentry.go @@ -0,0 +1,271 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package eks + +import ( + "context" + "slices" + + "github.com/aws/aws-sdk-go-v2/service/eks" + ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/pkg/errors" + + ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" +) + +func (s *Service) reconcileAccessEntries(ctx context.Context) error { + if s.scope.ControlPlane.Spec.AccessConfig == nil || len(s.scope.ControlPlane.Spec.AccessConfig.AccessEntries) == 0 { + s.scope.Info("no access entries defined, skipping reconcile") + return nil + } + + existingAccessEntries, err := s.getExistingAccessEntries(ctx) + if err != nil { + return errors.Wrap(err, "failed to list existing access entries") + } + + for _, accessEntry := range s.scope.ControlPlane.Spec.AccessConfig.AccessEntries { + if _, exists := existingAccessEntries[accessEntry.PrincipalARN]; exists { + if err := s.updateAccessEntry(ctx, accessEntry); err != nil { + return errors.Wrapf(err, "failed to update access entry for principal %s", accessEntry.PrincipalARN) + } + delete(existingAccessEntries, accessEntry.PrincipalARN) + } else { + if err := s.createAccessEntry(ctx, accessEntry); err != nil { + return errors.Wrapf(err, "failed to create access entry for principal %s", accessEntry.PrincipalARN) + } + } + } + + for principalArn := range existingAccessEntries { + if err := s.deleteAccessEntry(ctx, principalArn); err != nil { + return errors.Wrapf(err, "failed to delete access entry for principal %s", principalArn) + } + } + + record.Event(s.scope.ControlPlane, "SuccessfulReconcileAccessEntries", "Reconciled access entries") + return nil +} + +func (s *Service) getExistingAccessEntries(ctx context.Context) (map[string]bool, error) { + existingAccessEntries := make(map[string]bool) + var nextToken *string + + clusterName := s.scope.KubernetesClusterName() + for { + input := &eks.ListAccessEntriesInput{ + ClusterName: &clusterName, + NextToken: nextToken, + } + + output, err := s.EKSClient.ListAccessEntries(ctx, input) + if err != nil { + return nil, errors.Wrap(err, "failed to list access entries") + } + + for _, principalArn := range output.AccessEntries { + existingAccessEntries[principalArn] = true + } + + if output.NextToken == nil { + break + } + + nextToken = output.NextToken + } + + return existingAccessEntries, nil +} + +func (s *Service) createAccessEntry(ctx context.Context, accessEntry ekscontrolplanev1.AccessEntry) error { + clusterName := s.scope.KubernetesClusterName() + createInput := &eks.CreateAccessEntryInput{ + ClusterName: &clusterName, + PrincipalArn: &accessEntry.PrincipalARN, + } + + if len(accessEntry.KubernetesGroups) > 0 { + createInput.KubernetesGroups = accessEntry.KubernetesGroups + } + + if accessEntry.Type != "" { + createInput.Type = &accessEntry.Type + } + + if accessEntry.Username != "" { + createInput.Username = &accessEntry.Username + } + + if _, err := s.EKSClient.CreateAccessEntry(ctx, createInput); err != nil { + return errors.Wrapf(err, "failed to create access entry for principal %s", accessEntry.PrincipalARN) + } + + if err := s.reconcileAccessPolicies(ctx, accessEntry); err != nil { + return errors.Wrapf(err, "failed to reconcile access policies for principal %s", accessEntry.PrincipalARN) + } + + return nil +} + +func (s *Service) updateAccessEntry(ctx context.Context, accessEntry ekscontrolplanev1.AccessEntry) error { + clusterName := s.scope.KubernetesClusterName() + describeInput := &eks.DescribeAccessEntryInput{ + ClusterName: &clusterName, + PrincipalArn: &accessEntry.PrincipalARN, + } + + describeOutput, err := s.EKSClient.DescribeAccessEntry(ctx, describeInput) + if err != nil { + return errors.Wrapf(err, "failed to describe access entry for principal %s", accessEntry.PrincipalARN) + } + + // EKS requires recreate when changing type + if accessEntry.Type != *describeOutput.AccessEntry.Type { + if err = s.deleteAccessEntry(ctx, accessEntry.PrincipalARN); err != nil { + return errors.Wrapf(err, "failed to delete access entry for principal %s during recreation", accessEntry.PrincipalARN) + } + + if err = s.createAccessEntry(ctx, accessEntry); err != nil { + return errors.Wrapf(err, "failed to recreate access entry for principal %s", accessEntry.PrincipalARN) + } + return nil + } + + slices.Sort(accessEntry.KubernetesGroups) + slices.Sort(describeOutput.AccessEntry.KubernetesGroups) + + updateInput := &eks.UpdateAccessEntryInput{ + ClusterName: &clusterName, + PrincipalArn: &accessEntry.PrincipalARN, + } + + needsUpdate := false + + if accessEntry.Username != *describeOutput.AccessEntry.Username { + updateInput.Username = &accessEntry.Username + needsUpdate = true + } + + if !slices.Equal(accessEntry.KubernetesGroups, describeOutput.AccessEntry.KubernetesGroups) { + updateInput.KubernetesGroups = accessEntry.KubernetesGroups + needsUpdate = true + } + + if needsUpdate { + if _, err := s.EKSClient.UpdateAccessEntry(ctx, updateInput); err != nil { + return errors.Wrapf(err, "failed to update access entry for principal %s", accessEntry.PrincipalARN) + } + } + + if err := s.reconcileAccessPolicies(ctx, accessEntry); err != nil { + return errors.Wrapf(err, "failed to reconcile access policies for principal %s", accessEntry.PrincipalARN) + } + + return nil +} + +func (s *Service) deleteAccessEntry(ctx context.Context, principalArn string) error { + clusterName := s.scope.KubernetesClusterName() + + if _, err := s.EKSClient.DeleteAccessEntry(ctx, &eks.DeleteAccessEntryInput{ + ClusterName: &clusterName, + PrincipalArn: &principalArn, + }); err != nil { + return errors.Wrapf(err, "failed to delete access entry for principal %s", principalArn) + } + + return nil +} + +func (s *Service) reconcileAccessPolicies(ctx context.Context, accessEntry ekscontrolplanev1.AccessEntry) error { + if accessEntry.Type == "EC2_LINUX" || accessEntry.Type == "EC2_WINDOWS" { + s.scope.Info("Skipping access policy reconciliation for EC2 access type", "principalARN", accessEntry.PrincipalARN) + return nil + } + + existingPolicies, err := s.getExistingAccessPolicies(ctx, accessEntry.PrincipalARN) + if err != nil { + return errors.Wrapf(err, "failed to get existing access policies for principal %s", accessEntry.PrincipalARN) + } + + clusterName := s.scope.KubernetesClusterName() + + for _, policy := range accessEntry.AccessPolicies { + input := &eks.AssociateAccessPolicyInput{ + ClusterName: &clusterName, + PrincipalArn: &accessEntry.PrincipalARN, + PolicyArn: &policy.PolicyARN, + AccessScope: &ekstypes.AccessScope{ + Type: ekstypes.AccessScopeType(policy.AccessScope.Type), + }, + } + + if policy.AccessScope.Type == "namespace" && len(policy.AccessScope.Namespaces) > 0 { + input.AccessScope.Namespaces = policy.AccessScope.Namespaces + } + + if _, err := s.EKSClient.AssociateAccessPolicy(ctx, input); err != nil { + return errors.Wrapf(err, "failed to associate access policy %s", policy.PolicyARN) + } + + delete(existingPolicies, policy.PolicyARN) + } + + for policyARN := range existingPolicies { + if _, err := s.EKSClient.DisassociateAccessPolicy(ctx, &eks.DisassociateAccessPolicyInput{ + ClusterName: &clusterName, + PrincipalArn: &accessEntry.PrincipalARN, + PolicyArn: &policyARN, + }); err != nil { + return errors.Wrapf(err, "failed to disassociate access policy %s", policyARN) + } + } + + return nil +} + +func (s *Service) getExistingAccessPolicies(ctx context.Context, principalARN string) (map[string]ekstypes.AssociatedAccessPolicy, error) { + existingPolicies := map[string]ekstypes.AssociatedAccessPolicy{} + var nextToken *string + clusterName := s.scope.KubernetesClusterName() + + for { + input := &eks.ListAssociatedAccessPoliciesInput{ + ClusterName: &clusterName, + PrincipalArn: &principalARN, + NextToken: nextToken, + } + + output, err := s.EKSClient.ListAssociatedAccessPolicies(ctx, input) + if err != nil { + return nil, errors.Wrapf(err, "failed to list associated access policies for principal %s", principalARN) + } + + for _, policy := range output.AssociatedAccessPolicies { + existingPolicies[*policy.PolicyArn] = policy + } + + if output.NextToken == nil { + break + } + + nextToken = output.NextToken + } + + return existingPolicies, nil +} diff --git a/pkg/cloud/services/eks/accessentry_test.go b/pkg/cloud/services/eks/accessentry_test.go new file mode 100644 index 0000000000..5399f8cb99 --- /dev/null +++ b/pkg/cloud/services/eks/accessentry_test.go @@ -0,0 +1,813 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package eks + +import ( + "context" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" + ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/golang/mock/gomock" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks/mock_eksiface" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +) + +const ( + clusterName = "test-cluster" + principalARN = "arn:aws:iam::123456789012:role/my-role" + secondPrincipalARN = "arn:aws:iam::123456789012:role/second-role" + policyARN = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy" +) + +func TestReconcileAccessEntries(t *testing.T) { + tests := []struct { + name string + accessConfig *ekscontrolplanev1.AccessConfig + expect func(m *mock_eksiface.MockEKSAPIMockRecorder) + expectError bool + }{ + { + name: "access config nil", + expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) {}, + expectError: false, + }, + { + name: "no access entries", + accessConfig: &ekscontrolplanev1.AccessConfig{ + AuthenticationMode: ekscontrolplanev1.EKSAuthenticationModeAPIAndConfigMap, + AccessEntries: []ekscontrolplanev1.AccessEntry{}, + }, + expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) {}, + expectError: false, + }, + { + name: "create new access entry", + accessConfig: &ekscontrolplanev1.AccessConfig{ + AuthenticationMode: ekscontrolplanev1.EKSAuthenticationModeAPIAndConfigMap, + AccessEntries: []ekscontrolplanev1.AccessEntry{ + { + PrincipalARN: principalARN, + Type: "STANDARD", + Username: "admin", + KubernetesGroups: []string{"system:masters"}, + AccessPolicies: []ekscontrolplanev1.AccessPolicyReference{ + { + PolicyARN: policyARN, + AccessScope: ekscontrolplanev1.AccessScope{ + Type: "cluster", + }, + }, + }, + }, + }, + }, + expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) { + m.ListAccessEntries(gomock.Any(), gomock.Any()).Return(&eks.ListAccessEntriesOutput{ + AccessEntries: []string{}, + }, nil) + + m.CreateAccessEntry(gomock.Any(), &eks.CreateAccessEntryInput{ + ClusterName: aws.String(clusterName), + PrincipalArn: aws.String(principalARN), + Type: aws.String("STANDARD"), + Username: aws.String("admin"), + KubernetesGroups: []string{"system:masters"}, + }).Return(&eks.CreateAccessEntryOutput{}, nil) + + m.ListAssociatedAccessPolicies(gomock.Any(), gomock.Any()).Return(&eks.ListAssociatedAccessPoliciesOutput{ + AssociatedAccessPolicies: []ekstypes.AssociatedAccessPolicy{}, + }, nil) + + m.AssociateAccessPolicy(gomock.Any(), &eks.AssociateAccessPolicyInput{ + ClusterName: aws.String(clusterName), + PrincipalArn: aws.String(principalARN), + PolicyArn: aws.String(policyARN), + AccessScope: &ekstypes.AccessScope{ + Type: ekstypes.AccessScopeTypeCluster, + }, + }).Return(&eks.AssociateAccessPolicyOutput{}, nil) + }, + expectError: false, + }, + { + name: "update existing access entry", + accessConfig: &ekscontrolplanev1.AccessConfig{ + AuthenticationMode: ekscontrolplanev1.EKSAuthenticationModeAPIAndConfigMap, + AccessEntries: []ekscontrolplanev1.AccessEntry{ + { + PrincipalARN: principalARN, + Type: "STANDARD", + Username: "admin-updated", + KubernetesGroups: []string{"system:masters", "developers"}, + AccessPolicies: []ekscontrolplanev1.AccessPolicyReference{ + { + PolicyARN: policyARN, + AccessScope: ekscontrolplanev1.AccessScope{ + Type: "cluster", + }, + }, + }, + }, + }, + }, + expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) { + m.ListAccessEntries(gomock.Any(), gomock.Any()).Return(&eks.ListAccessEntriesOutput{ + AccessEntries: []string{principalARN}, + }, nil) + + m.DescribeAccessEntry(gomock.Any(), gomock.Any()).Return(&eks.DescribeAccessEntryOutput{ + AccessEntry: &ekstypes.AccessEntry{ + PrincipalArn: aws.String(principalARN), + Username: aws.String("admin"), + Type: aws.String("STANDARD"), + KubernetesGroups: []string{"system:masters"}, + }, + }, nil) + + m.UpdateAccessEntry(gomock.Any(), gomock.Any()).Return(&eks.UpdateAccessEntryOutput{}, nil) + + m.ListAssociatedAccessPolicies(gomock.Any(), gomock.Any()).Return(&eks.ListAssociatedAccessPoliciesOutput{ + AssociatedAccessPolicies: []ekstypes.AssociatedAccessPolicy{ + { + PolicyArn: aws.String(policyARN), + AccessScope: &ekstypes.AccessScope{ + Type: ekstypes.AccessScopeTypeCluster, + }, + }, + }, + }, nil) + + m.AssociateAccessPolicy(gomock.Any(), &eks.AssociateAccessPolicyInput{ + ClusterName: aws.String(clusterName), + PrincipalArn: aws.String(principalARN), + PolicyArn: aws.String(policyARN), + AccessScope: &ekstypes.AccessScope{ + Type: ekstypes.AccessScopeTypeCluster, + }, + }).Return(&eks.AssociateAccessPolicyOutput{}, nil) + }, + expectError: false, + }, + { + name: "delete access entry", + accessConfig: &ekscontrolplanev1.AccessConfig{ + AuthenticationMode: ekscontrolplanev1.EKSAuthenticationModeAPIAndConfigMap, + AccessEntries: []ekscontrolplanev1.AccessEntry{ + { + PrincipalARN: principalARN, + Type: "STANDARD", + Username: "admin", + KubernetesGroups: []string{"system:masters"}, + AccessPolicies: []ekscontrolplanev1.AccessPolicyReference{ + { + PolicyARN: policyARN, + AccessScope: ekscontrolplanev1.AccessScope{ + Type: "cluster", + }, + }, + }, + }, + }, + }, + expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) { + m.ListAccessEntries(gomock.Any(), gomock.Any()).Return(&eks.ListAccessEntriesOutput{ + AccessEntries: []string{principalARN, secondPrincipalARN}, + }, nil) + + m.DescribeAccessEntry(gomock.Any(), gomock.Any()).Return(&eks.DescribeAccessEntryOutput{ + AccessEntry: &ekstypes.AccessEntry{ + PrincipalArn: aws.String(principalARN), + Username: aws.String("admin"), + Type: aws.String("STANDARD"), + KubernetesGroups: []string{"system:masters"}, + }, + }, nil) + + m.ListAssociatedAccessPolicies(gomock.Any(), gomock.Any()).Return(&eks.ListAssociatedAccessPoliciesOutput{ + AssociatedAccessPolicies: []ekstypes.AssociatedAccessPolicy{ + { + PolicyArn: aws.String(policyARN), + AccessScope: &ekstypes.AccessScope{ + Type: ekstypes.AccessScopeTypeCluster, + }, + }, + }, + }, nil) + + m.AssociateAccessPolicy(gomock.Any(), &eks.AssociateAccessPolicyInput{ + ClusterName: aws.String(clusterName), + PrincipalArn: aws.String(principalARN), + PolicyArn: aws.String(policyARN), + AccessScope: &ekstypes.AccessScope{ + Type: ekstypes.AccessScopeTypeCluster, + }, + }).Return(&eks.AssociateAccessPolicyOutput{}, nil) + + m.DeleteAccessEntry(gomock.Any(), &eks.DeleteAccessEntryInput{ + ClusterName: aws.String(clusterName), + PrincipalArn: aws.String(secondPrincipalARN), + }).Return(&eks.DeleteAccessEntryOutput{}, nil) + }, + expectError: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + mockControl := gomock.NewController(t) + defer mockControl.Finish() + + eksMock := mock_eksiface.NewMockEKSAPI(mockControl) + + scheme := runtime.NewScheme() + _ = infrav1.AddToScheme(scheme) + _ = ekscontrolplanev1.AddToScheme(scheme) + client := fake.NewClientBuilder().WithScheme(scheme).Build() + + controlPlane := &ekscontrolplanev1.AWSManagedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: clusterName, + }, + Spec: ekscontrolplanev1.AWSManagedControlPlaneSpec{ + EKSClusterName: clusterName, + AccessConfig: tc.accessConfig, + }, + } + + scope, err := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{ + Client: client, + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: clusterName, + }, + }, + ControlPlane: controlPlane, + }) + g.Expect(err).To(BeNil()) + + tc.expect(eksMock.EXPECT()) + s := NewService(scope) + s.EKSClient = eksMock + + err = s.reconcileAccessEntries(context.TODO()) + if tc.expectError { + g.Expect(err).To(HaveOccurred()) + return + } + g.Expect(err).To(BeNil()) + }) + } +} + +func TestReconcileAccessPolicies(t *testing.T) { + tests := []struct { + name string + accessEntry ekscontrolplanev1.AccessEntry + expect func(m *mock_eksiface.MockEKSAPIMockRecorder) + expectError bool + }{ + { + name: "EC2_LINUX type skips policy reconciliation", + accessEntry: ekscontrolplanev1.AccessEntry{ + PrincipalARN: principalARN, + Type: "EC2_LINUX", + }, + expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) {}, + expectError: false, + }, + { + name: "EC2_WINDOWS type skips policy reconciliation", + accessEntry: ekscontrolplanev1.AccessEntry{ + PrincipalARN: principalARN, + Type: "EC2_WINDOWS", + }, + expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) {}, + expectError: false, + }, + { + name: "associate new policy", + accessEntry: ekscontrolplanev1.AccessEntry{ + PrincipalARN: principalARN, + Type: "STANDARD", + AccessPolicies: []ekscontrolplanev1.AccessPolicyReference{ + { + PolicyARN: policyARN, + AccessScope: ekscontrolplanev1.AccessScope{ + Type: "cluster", + }, + }, + }, + }, + expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) { + m.ListAssociatedAccessPolicies(gomock.Any(), gomock.Any()).Return(&eks.ListAssociatedAccessPoliciesOutput{ + AssociatedAccessPolicies: []ekstypes.AssociatedAccessPolicy{}, + }, nil) + + m.AssociateAccessPolicy(gomock.Any(), &eks.AssociateAccessPolicyInput{ + ClusterName: aws.String(clusterName), + PrincipalArn: aws.String(principalARN), + PolicyArn: aws.String(policyARN), + AccessScope: &ekstypes.AccessScope{ + Type: ekstypes.AccessScopeTypeCluster, + }, + }).Return(&eks.AssociateAccessPolicyOutput{}, nil) + }, + expectError: false, + }, + { + name: "disassociate policy", + accessEntry: ekscontrolplanev1.AccessEntry{ + PrincipalARN: principalARN, + Type: "STANDARD", + AccessPolicies: []ekscontrolplanev1.AccessPolicyReference{}, + }, + expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) { + m.ListAssociatedAccessPolicies(gomock.Any(), gomock.Any()).Return(&eks.ListAssociatedAccessPoliciesOutput{ + AssociatedAccessPolicies: []ekstypes.AssociatedAccessPolicy{ + { + PolicyArn: aws.String(policyARN), + AccessScope: &ekstypes.AccessScope{ + Type: ekstypes.AccessScopeTypeCluster, + }, + }, + }, + }, nil) + + m.DisassociateAccessPolicy(gomock.Any(), &eks.DisassociateAccessPolicyInput{ + ClusterName: aws.String(clusterName), + PrincipalArn: aws.String(principalARN), + PolicyArn: aws.String(policyARN), + }).Return(&eks.DisassociateAccessPolicyOutput{}, nil) + }, + expectError: false, + }, + { + name: "namespace scoped policy", + accessEntry: ekscontrolplanev1.AccessEntry{ + PrincipalARN: principalARN, + Type: "STANDARD", + AccessPolicies: []ekscontrolplanev1.AccessPolicyReference{ + { + PolicyARN: policyARN, + AccessScope: ekscontrolplanev1.AccessScope{ + Type: "namespace", + Namespaces: []string{"kube-system", "default"}, + }, + }, + }, + }, + expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) { + m.ListAssociatedAccessPolicies(gomock.Any(), gomock.Any()).Return(&eks.ListAssociatedAccessPoliciesOutput{ + AssociatedAccessPolicies: []ekstypes.AssociatedAccessPolicy{}, + }, nil) + + m.AssociateAccessPolicy(gomock.Any(), &eks.AssociateAccessPolicyInput{ + ClusterName: aws.String(clusterName), + PrincipalArn: aws.String(principalARN), + PolicyArn: aws.String(policyARN), + AccessScope: &ekstypes.AccessScope{ + Type: ekstypes.AccessScopeTypeNamespace, + Namespaces: []string{"kube-system", "default"}, + }, + }).Return(&eks.AssociateAccessPolicyOutput{}, nil) + }, + expectError: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + mockControl := gomock.NewController(t) + defer mockControl.Finish() + + eksMock := mock_eksiface.NewMockEKSAPI(mockControl) + + scheme := runtime.NewScheme() + _ = infrav1.AddToScheme(scheme) + _ = ekscontrolplanev1.AddToScheme(scheme) + client := fake.NewClientBuilder().WithScheme(scheme).Build() + + scope, err := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{ + Client: client, + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: clusterName, + }, + }, + ControlPlane: &ekscontrolplanev1.AWSManagedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: clusterName, + }, + Spec: ekscontrolplanev1.AWSManagedControlPlaneSpec{ + EKSClusterName: clusterName, + }, + }, + }) + g.Expect(err).To(BeNil()) + + tc.expect(eksMock.EXPECT()) + s := NewService(scope) + s.EKSClient = eksMock + + err = s.reconcileAccessPolicies(context.TODO(), tc.accessEntry) + if tc.expectError { + g.Expect(err).To(HaveOccurred()) + return + } + g.Expect(err).To(BeNil()) + }) + } +} + +func TestCreateAccessEntry(t *testing.T) { + tests := []struct { + name string + accessEntry ekscontrolplanev1.AccessEntry + expect func(m *mock_eksiface.MockEKSAPIMockRecorder) + expectError bool + }{ + { + name: "basic access entry", + accessEntry: ekscontrolplanev1.AccessEntry{ + PrincipalARN: principalARN, + Type: "STANDARD", + Username: "admin", + }, + expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) { + m.CreateAccessEntry(gomock.Any(), &eks.CreateAccessEntryInput{ + ClusterName: aws.String(clusterName), + PrincipalArn: aws.String(principalARN), + Type: aws.String("STANDARD"), + Username: aws.String("admin"), + }).Return(&eks.CreateAccessEntryOutput{}, nil) + + m.ListAssociatedAccessPolicies(gomock.Any(), gomock.Any()).Return(&eks.ListAssociatedAccessPoliciesOutput{ + AssociatedAccessPolicies: []ekstypes.AssociatedAccessPolicy{}, + }, nil) + }, + expectError: false, + }, + { + name: "access entry with groups", + accessEntry: ekscontrolplanev1.AccessEntry{ + PrincipalARN: principalARN, + Type: "STANDARD", + Username: "admin", + KubernetesGroups: []string{"system:masters", "developers"}, + }, + expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) { + m.CreateAccessEntry(gomock.Any(), &eks.CreateAccessEntryInput{ + ClusterName: aws.String(clusterName), + PrincipalArn: aws.String(principalARN), + Type: aws.String("STANDARD"), + Username: aws.String("admin"), + KubernetesGroups: []string{"system:masters", "developers"}, + }).Return(&eks.CreateAccessEntryOutput{}, nil) + + m.ListAssociatedAccessPolicies(gomock.Any(), gomock.Any()).Return(&eks.ListAssociatedAccessPoliciesOutput{ + AssociatedAccessPolicies: []ekstypes.AssociatedAccessPolicy{}, + }, nil) + }, + expectError: false, + }, + { + name: "api error", + accessEntry: ekscontrolplanev1.AccessEntry{ + PrincipalARN: principalARN, + Type: "STANDARD", + Username: "admin", + KubernetesGroups: []string{"system:masters"}, + }, + expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) { + m.CreateAccessEntry(gomock.Any(), gomock.Any()).Return(nil, &ekstypes.InvalidParameterException{ + Message: aws.String("error creating access entry"), + }) + }, + expectError: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + mockControl := gomock.NewController(t) + defer mockControl.Finish() + + eksMock := mock_eksiface.NewMockEKSAPI(mockControl) + + scheme := runtime.NewScheme() + _ = infrav1.AddToScheme(scheme) + _ = ekscontrolplanev1.AddToScheme(scheme) + client := fake.NewClientBuilder().WithScheme(scheme).Build() + + scope, err := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{ + Client: client, + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: clusterName, + }, + }, + ControlPlane: &ekscontrolplanev1.AWSManagedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: clusterName, + }, + Spec: ekscontrolplanev1.AWSManagedControlPlaneSpec{ + EKSClusterName: clusterName, + }, + }, + }) + g.Expect(err).To(BeNil()) + + tc.expect(eksMock.EXPECT()) + s := NewService(scope) + s.EKSClient = eksMock + + err = s.createAccessEntry(context.TODO(), tc.accessEntry) + if tc.expectError { + g.Expect(err).To(HaveOccurred()) + return + } + g.Expect(err).To(BeNil()) + }) + } +} + +func TestUpdateAccessEntry(t *testing.T) { + tests := []struct { + name string + accessEntry ekscontrolplanev1.AccessEntry + expect func(m *mock_eksiface.MockEKSAPIMockRecorder) + expectError bool + }{ + { + name: "no updates needed", + accessEntry: ekscontrolplanev1.AccessEntry{ + PrincipalARN: principalARN, + Type: "STANDARD", + Username: "admin", + KubernetesGroups: []string{"system:masters"}, + }, + expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) { + m.DescribeAccessEntry(gomock.Any(), gomock.Any()).Return(&eks.DescribeAccessEntryOutput{ + AccessEntry: &ekstypes.AccessEntry{ + PrincipalArn: aws.String(principalARN), + Type: aws.String("STANDARD"), + Username: aws.String("admin"), + KubernetesGroups: []string{"system:masters"}, + }, + }, nil) + + m.ListAssociatedAccessPolicies(gomock.Any(), gomock.Any()).Return(&eks.ListAssociatedAccessPoliciesOutput{ + AssociatedAccessPolicies: []ekstypes.AssociatedAccessPolicy{}, + }, nil) + }, + expectError: false, + }, + { + name: "type change requires recreate", + accessEntry: ekscontrolplanev1.AccessEntry{ + PrincipalARN: principalARN, + Type: "FARGATE_LINUX", + Username: "admin", + KubernetesGroups: []string{"system:masters"}, + }, + expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) { + m.DescribeAccessEntry(gomock.Any(), gomock.Any()).Return(&eks.DescribeAccessEntryOutput{ + AccessEntry: &ekstypes.AccessEntry{ + PrincipalArn: aws.String(principalARN), + Type: aws.String("STANDARD"), + Username: aws.String("admin"), + KubernetesGroups: []string{"system:masters"}, + }, + }, nil) + + m.DeleteAccessEntry(gomock.Any(), gomock.Any()).Return(&eks.DeleteAccessEntryOutput{}, nil) + + m.CreateAccessEntry(gomock.Any(), gomock.Any()).Return(&eks.CreateAccessEntryOutput{}, nil) + + m.ListAssociatedAccessPolicies(gomock.Any(), gomock.Any()).Return(&eks.ListAssociatedAccessPoliciesOutput{ + AssociatedAccessPolicies: []ekstypes.AssociatedAccessPolicy{}, + }, nil) + }, + expectError: false, + }, + { + name: "username update", + accessEntry: ekscontrolplanev1.AccessEntry{ + PrincipalARN: principalARN, + Type: "STANDARD", + Username: "new-admin", + KubernetesGroups: []string{"system:masters"}, + }, + expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) { + m.DescribeAccessEntry(gomock.Any(), gomock.Any()).Return(&eks.DescribeAccessEntryOutput{ + AccessEntry: &ekstypes.AccessEntry{ + PrincipalArn: aws.String(principalARN), + Type: aws.String("STANDARD"), + Username: aws.String("admin"), + KubernetesGroups: []string{"system:masters"}, + }, + }, nil) + + m.UpdateAccessEntry(gomock.Any(), &eks.UpdateAccessEntryInput{ + ClusterName: aws.String(clusterName), + PrincipalArn: aws.String(principalARN), + Username: aws.String("new-admin"), + }).Return(&eks.UpdateAccessEntryOutput{}, nil) + + m.ListAssociatedAccessPolicies(gomock.Any(), gomock.Any()).Return(&eks.ListAssociatedAccessPoliciesOutput{ + AssociatedAccessPolicies: []ekstypes.AssociatedAccessPolicy{}, + }, nil) + }, + expectError: false, + }, + { + name: "kubernetes groups update", + accessEntry: ekscontrolplanev1.AccessEntry{ + PrincipalARN: principalARN, + Type: "STANDARD", + Username: "admin", + KubernetesGroups: []string{"developers"}, + }, + expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) { + m.DescribeAccessEntry(gomock.Any(), gomock.Any()).Return(&eks.DescribeAccessEntryOutput{ + AccessEntry: &ekstypes.AccessEntry{ + PrincipalArn: aws.String(principalARN), + Type: aws.String("STANDARD"), + Username: aws.String("admin"), + KubernetesGroups: []string{"system:masters"}, + }, + }, nil) + + m.UpdateAccessEntry(gomock.Any(), &eks.UpdateAccessEntryInput{ + ClusterName: aws.String(clusterName), + PrincipalArn: aws.String(principalARN), + KubernetesGroups: []string{"developers"}, + }).Return(&eks.UpdateAccessEntryOutput{}, nil) + + m.ListAssociatedAccessPolicies(gomock.Any(), gomock.Any()).Return(&eks.ListAssociatedAccessPoliciesOutput{ + AssociatedAccessPolicies: []ekstypes.AssociatedAccessPolicy{}, + }, nil) + }, + expectError: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + mockControl := gomock.NewController(t) + defer mockControl.Finish() + + eksMock := mock_eksiface.NewMockEKSAPI(mockControl) + + scheme := runtime.NewScheme() + _ = infrav1.AddToScheme(scheme) + _ = ekscontrolplanev1.AddToScheme(scheme) + client := fake.NewClientBuilder().WithScheme(scheme).Build() + + scope, err := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{ + Client: client, + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: clusterName, + }, + }, + ControlPlane: &ekscontrolplanev1.AWSManagedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: clusterName, + }, + Spec: ekscontrolplanev1.AWSManagedControlPlaneSpec{ + EKSClusterName: clusterName, + }, + }, + }) + g.Expect(err).To(BeNil()) + + tc.expect(eksMock.EXPECT()) + s := NewService(scope) + s.EKSClient = eksMock + + err = s.updateAccessEntry(context.TODO(), tc.accessEntry) + if tc.expectError { + g.Expect(err).To(HaveOccurred()) + return + } + g.Expect(err).To(BeNil()) + }) + } +} + +func TestDeleteAccessEntry(t *testing.T) { + tests := []struct { + name string + expect func(m *mock_eksiface.MockEKSAPIMockRecorder) + expectError bool + }{ + { + name: "successful delete", + expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) { + m.DeleteAccessEntry(gomock.Any(), &eks.DeleteAccessEntryInput{ + ClusterName: aws.String(clusterName), + PrincipalArn: aws.String(principalARN), + }).Return(&eks.DeleteAccessEntryOutput{}, nil) + }, + expectError: false, + }, + { + name: "api error", + expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) { + m.DeleteAccessEntry(gomock.Any(), gomock.Any()).Return(nil, &ekstypes.ResourceNotFoundException{ + Message: aws.String("access entry not found"), + }) + }, + expectError: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + mockControl := gomock.NewController(t) + defer mockControl.Finish() + + eksMock := mock_eksiface.NewMockEKSAPI(mockControl) + + scheme := runtime.NewScheme() + _ = infrav1.AddToScheme(scheme) + _ = ekscontrolplanev1.AddToScheme(scheme) + client := fake.NewClientBuilder().WithScheme(scheme).Build() + + scope, err := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{ + Client: client, + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: clusterName, + }, + }, + ControlPlane: &ekscontrolplanev1.AWSManagedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: clusterName, + }, + Spec: ekscontrolplanev1.AWSManagedControlPlaneSpec{ + EKSClusterName: clusterName, + }, + }, + }) + g.Expect(err).To(BeNil()) + + tc.expect(eksMock.EXPECT()) + s := NewService(scope) + s.EKSClient = eksMock + + err = s.deleteAccessEntry(context.TODO(), principalARN) + if tc.expectError { + g.Expect(err).To(HaveOccurred()) + return + } + g.Expect(err).To(BeNil()) + }) + } +} + diff --git a/pkg/cloud/services/eks/cluster.go b/pkg/cloud/services/eks/cluster.go index a16e6d1d34..1cf66b225c 100644 --- a/pkg/cloud/services/eks/cluster.go +++ b/pkg/cloud/services/eks/cluster.go @@ -121,6 +121,10 @@ func (s *Service) reconcileCluster(ctx context.Context) error { return errors.Wrap(err, "failed reconciling cluster config") } + if err := s.reconcileAccessConfig(ctx, cluster.AccessConfig); err != nil { + return errors.Wrap(err, "failed reconciling access config") + } + if err := s.reconcileLogging(ctx, cluster.Logging); err != nil { return errors.Wrap(err, "failed reconciling logging") } @@ -422,6 +426,13 @@ func (s *Service) createCluster(ctx context.Context, eksClusterName string) (*ek return nil, errors.Wrap(err, "couldn't create vpc config for cluster") } + var accessConfig *ekstypes.CreateAccessConfigRequest + if s.scope.ControlPlane.Spec.AccessConfig != nil && s.scope.ControlPlane.Spec.AccessConfig.AuthenticationMode != "" { + accessConfig = &ekstypes.CreateAccessConfigRequest{ + AuthenticationMode: ekstypes.AuthenticationMode(string(s.scope.ControlPlane.Spec.AccessConfig.AuthenticationMode)), + } + } + var netConfig *ekstypes.KubernetesNetworkConfigRequest if s.scope.VPC().IsIPv6Enabled() { netConfig = &ekstypes.KubernetesNetworkConfigRequest{ @@ -465,6 +476,7 @@ func (s *Service) createCluster(ctx context.Context, eksClusterName string) (*ek Name: aws.String(eksClusterName), Version: eksVersion, Logging: logging, + AccessConfig: accessConfig, EncryptionConfig: encryptionConfigs, ResourcesVpcConfig: vpcConfig, RoleArn: role.Arn, @@ -542,6 +554,58 @@ func (s *Service) reconcileClusterConfig(ctx context.Context, cluster *ekstypes. return nil } +func (s *Service) reconcileAccessConfig(ctx context.Context, accessConfig *ekstypes.AccessConfigResponse) error { + input := &eks.UpdateClusterConfigInput{Name: aws.String(s.scope.KubernetesClusterName())} + + if s.scope.ControlPlane.Spec.AccessConfig == nil || s.scope.ControlPlane.Spec.AccessConfig.AuthenticationMode == "" { + return nil + } + + expectedAuthenticationMode := string(s.scope.ControlPlane.Spec.AccessConfig.AuthenticationMode) + currentAuthenticationMode := string(accessConfig.AuthenticationMode) + s.scope.Debug("Reconciling EKS Access Config for cluster", "cluster-name", s.scope.KubernetesClusterName(), "expected", expectedAuthenticationMode, "current", currentAuthenticationMode) + if expectedAuthenticationMode != currentAuthenticationMode { + input.AccessConfig = &ekstypes.UpdateAccessConfigRequest{ + AuthenticationMode: ekstypes.AuthenticationMode(expectedAuthenticationMode), + } + } + + if input.AccessConfig != nil { + if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) { + if _, err := s.EKSClient.UpdateClusterConfig(ctx, input); err != nil { + return false, err + } + + // Wait until status transitions to UPDATING because there's a short + // window after UpdateClusterVersion returns where the cluster + // status is ACTIVE and the update would be tried again + if err := s.EKSClient.WaitUntilClusterUpdating( + ctx, + &eks.DescribeClusterInput{Name: aws.String(s.scope.KubernetesClusterName())}, + s.scope.MaxWaitActiveUpdateDelete, + ); err != nil { + return false, err + } + + conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneUpdatingCondition) + record.Eventf(s.scope.ControlPlane, "InitiatedUpdateEKSControlPlane", "Initiated auth config update for EKS control plane %s", s.scope.KubernetesClusterName()) + return true, nil + }); err != nil { + record.Warnf(s.scope.ControlPlane, "FailedUpdateEKSControlPlane", "Failed to update EKS control plane auth config: %v", err) + return errors.Wrapf(err, "failed to update EKS cluster") + } + } + + if expectedAuthenticationMode == string(ekscontrolplanev1.EKSAuthenticationModeAPI) || + expectedAuthenticationMode == string(ekscontrolplanev1.EKSAuthenticationModeAPIAndConfigMap) { + if err := s.reconcileAccessEntries(ctx); err != nil { + return errors.Wrap(err, "failed to reconcile access entries") + } + } + + return nil +} + func (s *Service) reconcileLogging(ctx context.Context, logging *ekstypes.Logging) error { input := &eks.UpdateClusterConfigInput{Name: aws.String(s.scope.KubernetesClusterName())} diff --git a/pkg/cloud/services/eks/cluster_test.go b/pkg/cloud/services/eks/cluster_test.go index 7e397f329e..50df27e4dd 100644 --- a/pkg/cloud/services/eks/cluster_test.go +++ b/pkg/cloud/services/eks/cluster_test.go @@ -25,6 +25,7 @@ import ( ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" "github.com/aws/aws-sdk-go-v2/service/iam" iamtypes "github.com/aws/aws-sdk-go-v2/service/iam/types" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/golang/mock/gomock" . "github.com/onsi/gomega" "github.com/pkg/errors" @@ -474,6 +475,123 @@ func TestReconcileClusterVersion(t *testing.T) { } } +func TestReconcileAccessConfig(t *testing.T) { + clusterName := "default.cluster" + invalidParameterException := ekstypes.InvalidParameterException{} + tests := []struct { + name string + expect func(m *mock_eksiface.MockEKSAPIMockRecorder) + expectError bool + }{ + { + name: "no upgrade necessary", + expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) { + m. + DescribeCluster(gomock.Eq(context.TODO()), gomock.AssignableToTypeOf(&eks.DescribeClusterInput{})). + Return(&eks.DescribeClusterOutput{ + Cluster: &ekstypes.Cluster{ + Name: aws.String("default.cluster"), + AccessConfig: &ekstypes.AccessConfigResponse{ + AuthenticationMode: ekstypes.AuthenticationModeApiAndConfigMap, + }, + }, + }, nil) + }, + expectError: false, + }, + { + name: "needs upgrade", + expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) { + m. + DescribeCluster(gomock.Eq(context.TODO()), gomock.AssignableToTypeOf(&eks.DescribeClusterInput{})). + Return(&eks.DescribeClusterOutput{ + Cluster: &ekstypes.Cluster{ + Name: aws.String("default.cluster"), + AccessConfig: &ekstypes.AccessConfigResponse{ + AuthenticationMode: ekstypes.AuthenticationModeConfigMap, + }, + }, + }, nil) + m.WaitUntilClusterUpdating( + gomock.Eq(context.TODO()), + gomock.AssignableToTypeOf(&eks.DescribeClusterInput{}), gomock.Any(), + ).Return(nil) + m. + UpdateClusterConfig(gomock.Eq(context.TODO()), gomock.AssignableToTypeOf(&eks.UpdateClusterConfigInput{})). + Return(&eks.UpdateClusterConfigOutput{}, nil) + }, + expectError: false, + }, + { + name: "api error", + expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) { + m. + DescribeCluster(gomock.Eq(context.TODO()), gomock.AssignableToTypeOf(&eks.DescribeClusterInput{})). + Return(&eks.DescribeClusterOutput{ + Cluster: &ekstypes.Cluster{ + Name: aws.String("default.cluster"), + AccessConfig: &ekstypes.AccessConfigResponse{ + AuthenticationMode: ekstypes.AuthenticationModeApi, + }, + }, + }, nil) + m. + UpdateClusterConfig(gomock.Eq(context.TODO()), gomock.AssignableToTypeOf(&eks.UpdateClusterConfigInput{})). + Return(&eks.UpdateClusterConfigOutput{}, awserr.New(invalidParameterException.ErrorCode(), "Unsupported authentication mode update", nil)) + }, + expectError: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + mockControl := gomock.NewController(t) + defer mockControl.Finish() + + eksMock := mock_eksiface.NewMockEKSAPI(mockControl) + + scheme := runtime.NewScheme() + _ = infrav1.AddToScheme(scheme) + _ = ekscontrolplanev1.AddToScheme(scheme) + client := fake.NewClientBuilder().WithScheme(scheme).Build() + scope, err := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{ + Client: client, + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: clusterName, + }, + }, + ControlPlane: &ekscontrolplanev1.AWSManagedControlPlane{ + Spec: ekscontrolplanev1.AWSManagedControlPlaneSpec{ + EKSClusterName: clusterName, + AccessConfig: &ekscontrolplanev1.AccessConfig{ + AuthenticationMode: ekscontrolplanev1.EKSAuthenticationModeAPIAndConfigMap, + }, + }, + }, + }) + g.Expect(err).To(BeNil()) + + tc.expect(eksMock.EXPECT()) + s := NewService(scope) + s.EKSClient = eksMock + + cluster, err := s.describeEKSCluster(context.TODO(), clusterName) + g.Expect(err).To(BeNil()) + + err = s.reconcileAccessConfig(context.TODO(), cluster.AccessConfig) + if tc.expectError { + g.Expect(err).To(HaveOccurred()) + return + } + g.Expect(err).To(BeNil()) + }) + } +} + func TestCreateCluster(t *testing.T) { clusterName := "cluster.default" version := aws.String("1.24") diff --git a/pkg/cloud/services/eks/mock_eksiface/eksapi_mock.go b/pkg/cloud/services/eks/mock_eksiface/eksapi_mock.go index cbc119e961..20e7898593 100644 --- a/pkg/cloud/services/eks/mock_eksiface/eksapi_mock.go +++ b/pkg/cloud/services/eks/mock_eksiface/eksapi_mock.go @@ -52,6 +52,26 @@ func (m *MockEKSAPI) EXPECT() *MockEKSAPIMockRecorder { return m.recorder } +// AssociateAccessPolicy mocks base method. +func (m *MockEKSAPI) AssociateAccessPolicy(arg0 context.Context, arg1 *eks.AssociateAccessPolicyInput, arg2 ...func(*eks.Options)) (*eks.AssociateAccessPolicyOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "AssociateAccessPolicy", varargs...) + ret0, _ := ret[0].(*eks.AssociateAccessPolicyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AssociateAccessPolicy indicates an expected call of AssociateAccessPolicy. +func (mr *MockEKSAPIMockRecorder) AssociateAccessPolicy(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AssociateAccessPolicy", reflect.TypeOf((*MockEKSAPI)(nil).AssociateAccessPolicy), varargs...) +} + // AssociateEncryptionConfig mocks base method. func (m *MockEKSAPI) AssociateEncryptionConfig(arg0 context.Context, arg1 *eks.AssociateEncryptionConfigInput, arg2 ...func(*eks.Options)) (*eks.AssociateEncryptionConfigOutput, error) { m.ctrl.T.Helper() @@ -92,6 +112,26 @@ func (mr *MockEKSAPIMockRecorder) AssociateIdentityProviderConfig(arg0, arg1 int return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AssociateIdentityProviderConfig", reflect.TypeOf((*MockEKSAPI)(nil).AssociateIdentityProviderConfig), varargs...) } +// CreateAccessEntry mocks base method. +func (m *MockEKSAPI) CreateAccessEntry(arg0 context.Context, arg1 *eks.CreateAccessEntryInput, arg2 ...func(*eks.Options)) (*eks.CreateAccessEntryOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateAccessEntry", varargs...) + ret0, _ := ret[0].(*eks.CreateAccessEntryOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateAccessEntry indicates an expected call of CreateAccessEntry. +func (mr *MockEKSAPIMockRecorder) CreateAccessEntry(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAccessEntry", reflect.TypeOf((*MockEKSAPI)(nil).CreateAccessEntry), varargs...) +} + // CreateAddon mocks base method. func (m *MockEKSAPI) CreateAddon(arg0 context.Context, arg1 *eks.CreateAddonInput, arg2 ...func(*eks.Options)) (*eks.CreateAddonOutput, error) { m.ctrl.T.Helper() @@ -172,6 +212,26 @@ func (mr *MockEKSAPIMockRecorder) CreateNodegroup(arg0, arg1 interface{}, arg2 . return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateNodegroup", reflect.TypeOf((*MockEKSAPI)(nil).CreateNodegroup), varargs...) } +// DeleteAccessEntry mocks base method. +func (m *MockEKSAPI) DeleteAccessEntry(arg0 context.Context, arg1 *eks.DeleteAccessEntryInput, arg2 ...func(*eks.Options)) (*eks.DeleteAccessEntryOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteAccessEntry", varargs...) + ret0, _ := ret[0].(*eks.DeleteAccessEntryOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteAccessEntry indicates an expected call of DeleteAccessEntry. +func (mr *MockEKSAPIMockRecorder) DeleteAccessEntry(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAccessEntry", reflect.TypeOf((*MockEKSAPI)(nil).DeleteAccessEntry), varargs...) +} + // DeleteAddon mocks base method. func (m *MockEKSAPI) DeleteAddon(arg0 context.Context, arg1 *eks.DeleteAddonInput, arg2 ...func(*eks.Options)) (*eks.DeleteAddonOutput, error) { m.ctrl.T.Helper() @@ -252,6 +312,26 @@ func (mr *MockEKSAPIMockRecorder) DeleteNodegroup(arg0, arg1 interface{}, arg2 . return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNodegroup", reflect.TypeOf((*MockEKSAPI)(nil).DeleteNodegroup), varargs...) } +// DescribeAccessEntry mocks base method. +func (m *MockEKSAPI) DescribeAccessEntry(arg0 context.Context, arg1 *eks.DescribeAccessEntryInput, arg2 ...func(*eks.Options)) (*eks.DescribeAccessEntryOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeAccessEntry", varargs...) + ret0, _ := ret[0].(*eks.DescribeAccessEntryOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeAccessEntry indicates an expected call of DescribeAccessEntry. +func (mr *MockEKSAPIMockRecorder) DescribeAccessEntry(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeAccessEntry", reflect.TypeOf((*MockEKSAPI)(nil).DescribeAccessEntry), varargs...) +} + // DescribeAddon mocks base method. func (m *MockEKSAPI) DescribeAddon(arg0 context.Context, arg1 *eks.DescribeAddonInput, arg2 ...func(*eks.Options)) (*eks.DescribeAddonOutput, error) { m.ctrl.T.Helper() @@ -412,6 +492,26 @@ func (mr *MockEKSAPIMockRecorder) DescribeUpdate(arg0, arg1 interface{}, arg2 .. return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeUpdate", reflect.TypeOf((*MockEKSAPI)(nil).DescribeUpdate), varargs...) } +// DisassociateAccessPolicy mocks base method. +func (m *MockEKSAPI) DisassociateAccessPolicy(arg0 context.Context, arg1 *eks.DisassociateAccessPolicyInput, arg2 ...func(*eks.Options)) (*eks.DisassociateAccessPolicyOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DisassociateAccessPolicy", varargs...) + ret0, _ := ret[0].(*eks.DisassociateAccessPolicyOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DisassociateAccessPolicy indicates an expected call of DisassociateAccessPolicy. +func (mr *MockEKSAPIMockRecorder) DisassociateAccessPolicy(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisassociateAccessPolicy", reflect.TypeOf((*MockEKSAPI)(nil).DisassociateAccessPolicy), varargs...) +} + // DisassociateIdentityProviderConfig mocks base method. func (m *MockEKSAPI) DisassociateIdentityProviderConfig(arg0 context.Context, arg1 *eks.DisassociateIdentityProviderConfigInput, arg2 ...func(*eks.Options)) (*eks.DisassociateIdentityProviderConfigOutput, error) { m.ctrl.T.Helper() @@ -432,6 +532,26 @@ func (mr *MockEKSAPIMockRecorder) DisassociateIdentityProviderConfig(arg0, arg1 return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisassociateIdentityProviderConfig", reflect.TypeOf((*MockEKSAPI)(nil).DisassociateIdentityProviderConfig), varargs...) } +// ListAccessEntries mocks base method. +func (m *MockEKSAPI) ListAccessEntries(arg0 context.Context, arg1 *eks.ListAccessEntriesInput, arg2 ...func(*eks.Options)) (*eks.ListAccessEntriesOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListAccessEntries", varargs...) + ret0, _ := ret[0].(*eks.ListAccessEntriesOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListAccessEntries indicates an expected call of ListAccessEntries. +func (mr *MockEKSAPIMockRecorder) ListAccessEntries(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAccessEntries", reflect.TypeOf((*MockEKSAPI)(nil).ListAccessEntries), varargs...) +} + // ListAddons mocks base method. func (m *MockEKSAPI) ListAddons(arg0 context.Context, arg1 *eks.ListAddonsInput, arg2 ...func(*eks.Options)) (*eks.ListAddonsOutput, error) { m.ctrl.T.Helper() @@ -452,6 +572,26 @@ func (mr *MockEKSAPIMockRecorder) ListAddons(arg0, arg1 interface{}, arg2 ...int return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAddons", reflect.TypeOf((*MockEKSAPI)(nil).ListAddons), varargs...) } +// ListAssociatedAccessPolicies mocks base method. +func (m *MockEKSAPI) ListAssociatedAccessPolicies(arg0 context.Context, arg1 *eks.ListAssociatedAccessPoliciesInput, arg2 ...func(*eks.Options)) (*eks.ListAssociatedAccessPoliciesOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListAssociatedAccessPolicies", varargs...) + ret0, _ := ret[0].(*eks.ListAssociatedAccessPoliciesOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListAssociatedAccessPolicies indicates an expected call of ListAssociatedAccessPolicies. +func (mr *MockEKSAPIMockRecorder) ListAssociatedAccessPolicies(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAssociatedAccessPolicies", reflect.TypeOf((*MockEKSAPI)(nil).ListAssociatedAccessPolicies), varargs...) +} + // ListClusters mocks base method. func (m *MockEKSAPI) ListClusters(arg0 context.Context, arg1 *eks.ListClustersInput, arg2 ...func(*eks.Options)) (*eks.ListClustersOutput, error) { m.ctrl.T.Helper() @@ -532,6 +672,26 @@ func (mr *MockEKSAPIMockRecorder) UntagResource(arg0, arg1 interface{}, arg2 ... return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagResource", reflect.TypeOf((*MockEKSAPI)(nil).UntagResource), varargs...) } +// UpdateAccessEntry mocks base method. +func (m *MockEKSAPI) UpdateAccessEntry(arg0 context.Context, arg1 *eks.UpdateAccessEntryInput, arg2 ...func(*eks.Options)) (*eks.UpdateAccessEntryOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateAccessEntry", varargs...) + ret0, _ := ret[0].(*eks.UpdateAccessEntryOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateAccessEntry indicates an expected call of UpdateAccessEntry. +func (mr *MockEKSAPIMockRecorder) UpdateAccessEntry(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAccessEntry", reflect.TypeOf((*MockEKSAPI)(nil).UpdateAccessEntry), varargs...) +} + // UpdateAddon mocks base method. func (m *MockEKSAPI) UpdateAddon(arg0 context.Context, arg1 *eks.UpdateAddonInput, arg2 ...func(*eks.Options)) (*eks.UpdateAddonOutput, error) { m.ctrl.T.Helper() diff --git a/pkg/cloud/services/eks/service.go b/pkg/cloud/services/eks/service.go index c4c274a123..976e978243 100644 --- a/pkg/cloud/services/eks/service.go +++ b/pkg/cloud/services/eks/service.go @@ -62,6 +62,14 @@ type EKSAPI interface { TagResource(ctx context.Context, params *eks.TagResourceInput, optFns ...func(*eks.Options)) (*eks.TagResourceOutput, error) UntagResource(ctx context.Context, params *eks.UntagResourceInput, optFns ...func(*eks.Options)) (*eks.UntagResourceOutput, error) DisassociateIdentityProviderConfig(ctx context.Context, params *eks.DisassociateIdentityProviderConfigInput, optFns ...func(*eks.Options)) (*eks.DisassociateIdentityProviderConfigOutput, error) + ListAccessEntries(ctx context.Context, params *eks.ListAccessEntriesInput, optFns ...func(*eks.Options)) (*eks.ListAccessEntriesOutput, error) + DescribeAccessEntry(ctx context.Context, params *eks.DescribeAccessEntryInput, optFns ...func(*eks.Options)) (*eks.DescribeAccessEntryOutput, error) + CreateAccessEntry(ctx context.Context, params *eks.CreateAccessEntryInput, optFns ...func(*eks.Options)) (*eks.CreateAccessEntryOutput, error) + UpdateAccessEntry(ctx context.Context, params *eks.UpdateAccessEntryInput, optFns ...func(*eks.Options)) (*eks.UpdateAccessEntryOutput, error) + DeleteAccessEntry(ctx context.Context, params *eks.DeleteAccessEntryInput, optFns ...func(*eks.Options)) (*eks.DeleteAccessEntryOutput, error) + ListAssociatedAccessPolicies(ctx context.Context, params *eks.ListAssociatedAccessPoliciesInput, optFns ...func(*eks.Options)) (*eks.ListAssociatedAccessPoliciesOutput, error) + AssociateAccessPolicy(ctx context.Context, params *eks.AssociateAccessPolicyInput, optFns ...func(*eks.Options)) (*eks.AssociateAccessPolicyOutput, error) + DisassociateAccessPolicy(ctx context.Context, params *eks.DisassociateAccessPolicyInput, optFns ...func(*eks.Options)) (*eks.DisassociateAccessPolicyOutput, error) // Waiters for EKS Cluster WaitUntilClusterActive(ctx context.Context, params *eks.DescribeClusterInput, maxWait time.Duration) error diff --git a/pkg/cloud/services/elb/loadbalancer.go b/pkg/cloud/services/elb/loadbalancer.go index 5af91b26c5..c2265012a6 100644 --- a/pkg/cloud/services/elb/loadbalancer.go +++ b/pkg/cloud/services/elb/loadbalancer.go @@ -67,11 +67,21 @@ const additionalTargetGroupPrefix = "additional-listener-" // cantAttachSGToNLBRegions is a set of regions that do not support Security Groups in NLBs. var cantAttachSGToNLBRegions = sets.New("us-iso-east-1", "us-iso-west-1", "us-isob-east-1") +type lbReconciler func() error + // ReconcileLoadbalancers reconciles the load balancers for the given cluster. func (s *Service) ReconcileLoadbalancers() error { s.scope.Debug("Reconciling load balancers") var errs []error + var lbReconcilers []lbReconciler + + // The following splits load balancer reconciliation into 2 phases: + // 1. Get or create the load balancer + // 2. Reconcile the load balancer + // We ensure that we only wait for the load balancer to become available in + // the reconcile phase. This is useful when creating multiple load + // balancers, as they can take several minutes to become available. for _, lbSpec := range s.scope.ControlPlaneLoadBalancers() { if lbSpec == nil { @@ -79,48 +89,72 @@ func (s *Service) ReconcileLoadbalancers() error { } switch lbSpec.LoadBalancerType { case infrav1.LoadBalancerTypeClassic: - errs = append(errs, s.reconcileClassicLoadBalancer()) + reconciler, err := s.getOrCreateClassicLoadBalancer() + if err != nil { + errs = append(errs, err) + } else { + lbReconcilers = append(lbReconcilers, reconciler) + } case infrav1.LoadBalancerTypeNLB, infrav1.LoadBalancerTypeALB, infrav1.LoadBalancerTypeELB: - errs = append(errs, s.reconcileV2LB(lbSpec)) + reconciler, err := s.getOrCreateV2LB(lbSpec) + if err != nil { + errs = append(errs, err) + } else { + lbReconcilers = append(lbReconcilers, reconciler) + } default: errs = append(errs, fmt.Errorf("unknown or unsupported load balancer type on primary load balancer: %s", lbSpec.LoadBalancerType)) } } + // Reconcile all load balancers + for _, reconciler := range lbReconcilers { + if err := reconciler(); err != nil { + errs = append(errs, err) + } + } + return kerrors.NewAggregate(errs) } -// reconcileV2LB creates a load balancer. It also takes care of generating unique names across -// namespaces by appending the namespace to the name. -func (s *Service) reconcileV2LB(lbSpec *infrav1.AWSLoadBalancerSpec) error { +// getOrCreateV2LB gets an existing load balancer, or creates a new one if it does not exist. +// It also takes care of generating unique names across namespaces by appending the namespace to the name. +// It returns a function that reconciles the load balancer. +func (s *Service) getOrCreateV2LB(lbSpec *infrav1.AWSLoadBalancerSpec) (lbReconciler, error) { name, err := LBName(s.scope, lbSpec) if err != nil { - return errors.Wrap(err, "failed to get control plane load balancer name") + return nil, errors.Wrap(err, "failed to get control plane load balancer name") } // Get default api server spec. desiredLB, err := s.getAPIServerLBSpec(name, lbSpec) if err != nil { - return err + return nil, err } lb, err := s.describeLB(name, lbSpec) switch { case IsNotFound(err) && s.scope.ControlPlaneEndpoint().IsValid(): // if elb is not found and owner cluster ControlPlaneEndpoint is already populated, then we should not recreate the elb. - return errors.Wrapf(err, "no loadbalancer exists for the AWSCluster %s, the cluster has become unrecoverable and should be deleted manually", s.scope.InfraClusterName()) + return nil, errors.Wrapf(err, "no loadbalancer exists for the AWSCluster %s, the cluster has become unrecoverable and should be deleted manually", s.scope.InfraClusterName()) case IsNotFound(err): lb, err = s.createLB(desiredLB, lbSpec) if err != nil { s.scope.Error(err, "failed to create LB") - return err + return nil, err } s.scope.Debug("Created new network load balancer for apiserver", "api-server-lb-name", lb.Name) case err != nil: // Failed to describe the classic ELB - return err + return nil, err } + return func() error { + return s.reconcileV2LB(lb, desiredLB, lbSpec) + }, nil +} + +func (s *Service) reconcileV2LB(lb *infrav1.LoadBalancer, desiredLB *infrav1.LoadBalancer, lbSpec *infrav1.AWSLoadBalancerSpec) error { wReq := &elbv2.DescribeLoadBalancersInput{ LoadBalancerArns: aws.StringSlice([]string{lb.ARN}), } @@ -442,12 +476,21 @@ func (s *Service) createLB(spec *infrav1.LoadBalancer, lbSpec *infrav1.AWSLoadBa // Target Groups and listeners will be reconciled separately - s.scope.Info("Created network load balancer", "dns-name", *out.LoadBalancers[0].DNSName) + if out.LoadBalancers[0].DNSName == nil { + return nil, fmt.Errorf("CreateLoadBalancer did not return a DNS name for %s", spec.Name) + } + dnsName := *out.LoadBalancers[0].DNSName + if out.LoadBalancers[0].LoadBalancerArn == nil { + return nil, fmt.Errorf("CreateLoadBalancer did not return an ARN for %s", spec.Name) + } + arn := *out.LoadBalancers[0].LoadBalancerArn + + s.scope.Info("Created network load balancer", "dns-name", dnsName) res := spec.DeepCopy() - s.scope.Debug("applying load balancer DNS to result", "dns", *out.LoadBalancers[0].DNSName) - res.DNSName = *out.LoadBalancers[0].DNSName - res.ARN = *out.LoadBalancers[0].LoadBalancerArn + s.scope.Debug("applying load balancer DNS to result", "dns", dnsName) + res.DNSName = dnsName + res.ARN = arn return res, nil } @@ -507,37 +550,46 @@ func (s *Service) describeLB(name string, lbSpec *infrav1.AWSLoadBalancerSpec) ( return fromSDKTypeToLB(out.LoadBalancers[0], outAtt.Attributes, tags), nil } -func (s *Service) reconcileClassicLoadBalancer() error { +// getOrCreateClassicLoadBalancer gets an existing classic load balancer, or creates a new one if it does not exist. +// It also takes care of generating unique names across namespaces by appending the namespace to the name. +// It returns a function that reconciles the load balancer. +func (s *Service) getOrCreateClassicLoadBalancer() (lbReconciler, error) { // Generate a default control plane load balancer name. The load balancer name cannot be // generated by the defaulting webhook, because it is derived from the cluster name, and that // name is undefined at defaulting time when generateName is used. name, err := ELBName(s.scope) if err != nil { - return errors.Wrap(err, "failed to get control plane load balancer name") + return nil, errors.Wrap(err, "failed to get control plane load balancer name") } // Get default api server spec. spec, err := s.getAPIServerClassicELBSpec(name) if err != nil { - return err + return nil, err } apiELB, err := s.describeClassicELB(spec.Name) switch { case IsNotFound(err) && s.scope.ControlPlaneEndpoint().IsValid(): // if elb is not found and owner cluster ControlPlaneEndpoint is already populated, then we should not recreate the elb. - return errors.Wrapf(err, "no loadbalancer exists for the AWSCluster %s, the cluster has become unrecoverable and should be deleted manually", s.scope.InfraClusterName()) + return nil, errors.Wrapf(err, "no loadbalancer exists for the AWSCluster %s, the cluster has become unrecoverable and should be deleted manually", s.scope.InfraClusterName()) case IsNotFound(err): apiELB, err = s.createClassicELB(spec) if err != nil { - return err + return nil, err } s.scope.Debug("Created new classic load balancer for apiserver", "api-server-elb-name", apiELB.Name) case err != nil: // Failed to describe the classic ELB - return err + return nil, err } + return func() error { + return s.reconcileClassicLoadBalancer(apiELB, spec) + }, nil +} + +func (s *Service) reconcileClassicLoadBalancer(apiELB *infrav1.LoadBalancer, spec *infrav1.LoadBalancer) error { if apiELB.IsManaged(s.scope.Name()) { if !cmp.Equal(spec.ClassicElbAttributes, apiELB.ClassicElbAttributes) { err := s.configureAttributes(apiELB.Name, spec.ClassicElbAttributes) @@ -546,6 +598,9 @@ func (s *Service) reconcileClassicLoadBalancer() error { } } + // BUG: note that describeClassicELB doesn't set HealthCheck in its output, + // so we're configuring the health check on every reconcile whether it's + // needed or not. if !cmp.Equal(spec.HealthCheck, apiELB.HealthCheck) { s.scope.Debug("Reconciling health check for apiserver load balancer", "health-check", spec.HealthCheck) err := s.configureHealthCheck(apiELB.Name, spec.HealthCheck) @@ -597,7 +652,7 @@ func (s *Service) reconcileClassicLoadBalancer() error { } func (s *Service) configureHealthCheck(name string, healthCheck *infrav1.ClassicELBHealthCheck) error { - if _, err := s.ELBClient.ConfigureHealthCheck(&elb.ConfigureHealthCheckInput{ + healthCheckInput := &elb.ConfigureHealthCheckInput{ LoadBalancerName: aws.String(name), HealthCheck: &elb.HealthCheck{ Target: aws.String(healthCheck.Target), @@ -606,7 +661,14 @@ func (s *Service) configureHealthCheck(name string, healthCheck *infrav1.Classic HealthyThreshold: aws.Int64(healthCheck.HealthyThreshold), UnhealthyThreshold: aws.Int64(healthCheck.UnhealthyThreshold), }, - }); err != nil { + } + + if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) { + if _, err := s.ELBClient.ConfigureHealthCheck(healthCheckInput); err != nil { + return false, err + } + return true, nil + }, awserrors.LoadBalancerNotFound); err != nil { return errors.Wrapf(err, "failed to configure health check for classic load balancer: %s", name) } return nil @@ -1193,30 +1255,15 @@ func (s *Service) createClassicELB(spec *infrav1.LoadBalancer) (*infrav1.LoadBal return nil, errors.Wrapf(err, "failed to create classic load balancer: %v", spec) } - if spec.HealthCheck != nil { - if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) { - if _, err := s.ELBClient.ConfigureHealthCheck(&elb.ConfigureHealthCheckInput{ - LoadBalancerName: aws.String(spec.Name), - HealthCheck: &elb.HealthCheck{ - Target: aws.String(spec.HealthCheck.Target), - Interval: aws.Int64(int64(spec.HealthCheck.Interval.Seconds())), - Timeout: aws.Int64(int64(spec.HealthCheck.Timeout.Seconds())), - HealthyThreshold: aws.Int64(spec.HealthCheck.HealthyThreshold), - UnhealthyThreshold: aws.Int64(spec.HealthCheck.UnhealthyThreshold), - }, - }); err != nil { - return false, err - } - return true, nil - }, awserrors.LoadBalancerNotFound); err != nil { - return nil, errors.Wrapf(err, "failed to configure health check for classic load balancer: %v", spec) - } - } - s.scope.Info("Created classic load balancer", "dns-name", *out.DNSName) res := spec.DeepCopy() res.DNSName = *out.DNSName + + // We haven't configured any health check yet. Don't report it here so it + // will be set later during reconciliation. + res.HealthCheck = nil + return res, nil } diff --git a/pkg/cloud/services/elb/loadbalancer_test.go b/pkg/cloud/services/elb/loadbalancer_test.go index 073c64e0b2..7a578fd7dd 100644 --- a/pkg/cloud/services/elb/loadbalancer_test.go +++ b/pkg/cloud/services/elb/loadbalancer_test.go @@ -2546,7 +2546,10 @@ func TestReconcileV2LB(t *testing.T) { scope: clusterScope, ELBV2Client: elbV2APIMocks, } - err = s.reconcileV2LB(clusterScope.ControlPlaneLoadBalancer()) + reconciler, err := s.getOrCreateV2LB(clusterScope.ControlPlaneLoadBalancer()) + if err == nil { + err = reconciler() + } lb := s.scope.Network().APIServerELB tc.check(t, &lb, err) @@ -2567,9 +2570,41 @@ func TestReconcileLoadbalancers(t *testing.T) { az = "us-west-1a" ) + primaryELB := func() *elbv2.LoadBalancer { + return &elbv2.LoadBalancer{ + LoadBalancerArn: aws.String(elbArn), + LoadBalancerName: aws.String(elbName), + DNSName: aws.String(elbName), + Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)), + AvailabilityZones: []*elbv2.AvailabilityZone{ + { + SubnetId: aws.String(clusterSubnetID), + ZoneName: aws.String(az), + }, + }, + VpcId: aws.String(vpcID), + } + } + + secondaryELB := func() *elbv2.LoadBalancer { + return &elbv2.LoadBalancer{ + LoadBalancerArn: aws.String(secondElbArn), + LoadBalancerName: aws.String(secondElbName), + DNSName: aws.String(secondElbName), + Scheme: aws.String(string(infrav1.ELBSchemeInternal)), + AvailabilityZones: []*elbv2.AvailabilityZone{ + { + SubnetId: aws.String(clusterSubnetID), + ZoneName: aws.String(az), + }, + }, + VpcId: aws.String(vpcID), + } + } + tests := []struct { name string - elbV2APIMocks func(m *mocks.MockELBV2APIMockRecorder) + elbV2APIMocks func(*mocks.MockELBV2APIMockRecorder) check func(t *testing.T, firstLB, secondLB *infrav1.LoadBalancer, err error) awsCluster func(acl infrav1.AWSCluster) infrav1.AWSCluster spec func(spec infrav1.LoadBalancer) infrav1.LoadBalancer @@ -2590,20 +2625,7 @@ func TestReconcileLoadbalancers(t *testing.T) { Names: aws.StringSlice([]string{elbName}), })). Return(&elbv2.DescribeLoadBalancersOutput{ - LoadBalancers: []*elbv2.LoadBalancer{ - { - LoadBalancerArn: aws.String(elbArn), - LoadBalancerName: aws.String(elbName), - Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)), - AvailabilityZones: []*elbv2.AvailabilityZone{ - { - SubnetId: aws.String(clusterSubnetID), - ZoneName: aws.String(az), - }, - }, - VpcId: aws.String(vpcID), - }, - }, + LoadBalancers: []*elbv2.LoadBalancer{primaryELB()}, }, nil) m.DescribeLoadBalancerAttributes(&elbv2.DescribeLoadBalancerAttributesInput{LoadBalancerArn: aws.String(elbArn)}).Return( &elbv2.DescribeLoadBalancerAttributesOutput{ @@ -2632,20 +2654,7 @@ func TestReconcileLoadbalancers(t *testing.T) { Names: aws.StringSlice([]string{secondElbName}), })). Return(&elbv2.DescribeLoadBalancersOutput{ - LoadBalancers: []*elbv2.LoadBalancer{ - { - LoadBalancerArn: aws.String(secondElbArn), - LoadBalancerName: aws.String(secondElbName), - Scheme: aws.String(string(infrav1.ELBSchemeInternal)), - AvailabilityZones: []*elbv2.AvailabilityZone{ - { - SubnetId: aws.String(clusterSubnetID), - ZoneName: aws.String(az), - }, - }, - VpcId: aws.String(vpcID), - }, - }, + LoadBalancers: []*elbv2.LoadBalancer{secondaryELB()}, }, nil) m.DescribeLoadBalancerAttributes(&elbv2.DescribeLoadBalancerAttributesInput{LoadBalancerArn: aws.String(secondElbArn)}).Return( &elbv2.DescribeLoadBalancerAttributesOutput{ @@ -2690,6 +2699,121 @@ func TestReconcileLoadbalancers(t *testing.T) { } }, }, + { + name: "ensure two load balancers are created concurrently", + awsCluster: func(acl infrav1.AWSCluster) infrav1.AWSCluster { + acl.Spec.ControlPlaneLoadBalancer.Name = aws.String(elbName) + acl.Spec.SecondaryControlPlaneLoadBalancer = &infrav1.AWSLoadBalancerSpec{ + Name: aws.String(secondElbName), + Scheme: &infrav1.ELBSchemeInternal, + LoadBalancerType: infrav1.LoadBalancerTypeNLB, + } + return acl + }, + elbV2APIMocks: func(m *mocks.MockELBV2APIMockRecorder) { + // Initial DescribeLoadBalancers return empty + m.DescribeLoadBalancers(gomock.Eq(&elbv2.DescribeLoadBalancersInput{ + Names: aws.StringSlice([]string{elbName}), + })).Return(&elbv2.DescribeLoadBalancersOutput{}, nil) + m.DescribeLoadBalancers(gomock.Eq(&elbv2.DescribeLoadBalancersInput{ + Names: aws.StringSlice([]string{secondElbName}), + })).Return(&elbv2.DescribeLoadBalancersOutput{}, nil) + + // Create both load balancers + createLB1 := m.CreateLoadBalancer(gomock.Eq(&elbv2.CreateLoadBalancerInput{ + Name: aws.String(elbName), + Scheme: aws.String(string(infrav1.ELBSchemeInternetFacing)), + SecurityGroups: []*string{aws.String("")}, + Subnets: []*string{}, + Tags: []*elbv2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String(elbName), + }, + { + Key: aws.String(infrav1.ClusterTagKey(clusterName)), + Value: aws.String(string(infrav1.ResourceLifecycleOwned)), + }, + { + Key: aws.String(infrav1.NameAWSClusterAPIRole), + Value: aws.String(infrav1.APIServerRoleTagValue), + }, + }, + Type: aws.String("network"), + })).Return(&elbv2.CreateLoadBalancerOutput{ + LoadBalancers: []*elbv2.LoadBalancer{primaryELB()}, + }, nil) + + createLB2 := m.CreateLoadBalancer(gomock.Eq(&elbv2.CreateLoadBalancerInput{ + Name: aws.String(secondElbName), + Scheme: aws.String(string(infrav1.ELBSchemeInternal)), + SecurityGroups: []*string{aws.String("")}, + Subnets: []*string{}, + Tags: []*elbv2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String(secondElbName), + }, + { + Key: aws.String(infrav1.ClusterTagKey(clusterName)), + Value: aws.String(string(infrav1.ResourceLifecycleOwned)), + }, + { + Key: aws.String(infrav1.NameAWSClusterAPIRole), + Value: aws.String(infrav1.APIServerRoleTagValue), + }, + }, + Type: aws.String("network"), + })).Return(&elbv2.CreateLoadBalancerOutput{ + LoadBalancers: []*elbv2.LoadBalancer{secondaryELB()}, + }, nil) + + // Assert that we don't wait for either load balancer to be + // available until both load balancers have been created + m.WaitUntilLoadBalancerAvailableWithContext(gomock.Any(), gomock.Eq(&elbv2.DescribeLoadBalancersInput{ + LoadBalancerArns: aws.StringSlice([]string{elbArn}), + })).Return(nil).After(createLB1).After(createLB2) + m.WaitUntilLoadBalancerAvailableWithContext(gomock.Any(), gomock.Eq(&elbv2.DescribeLoadBalancersInput{ + LoadBalancerArns: aws.StringSlice([]string{secondElbArn}), + })).Return(nil).After(createLB1).After(createLB2) + + // Make minimal assertions on other calls not under test + m.DescribeTargetGroups(gomock.Any()).Return(&elbv2.DescribeTargetGroupsOutput{}, nil).AnyTimes() + m.DescribeListeners(gomock.Any()).Return(&elbv2.DescribeListenersOutput{}, nil).AnyTimes() + m.ModifyTargetGroupAttributes(gomock.Any()).Return(nil, nil).AnyTimes() + m.ModifyListener(gomock.Any()).Return(nil, nil).AnyTimes() + m.SetSecurityGroups(gomock.Any()).Return(nil, nil).AnyTimes() + + // These calls return the same info for both load balancers, but it's not important to this test + m.CreateTargetGroup(gomock.Any()).Return(&elbv2.CreateTargetGroupOutput{ + TargetGroups: []*elbv2.TargetGroup{ + { + TargetGroupName: aws.String(elbName), + TargetGroupArn: aws.String(elbArn), + }, + }, + }, nil).AnyTimes() + m.CreateListener(gomock.Any()).Return(&elbv2.CreateListenerOutput{ + Listeners: []*elbv2.Listener{ + { + ListenerArn: aws.String(elbArn), + }, + }, + }, nil).AnyTimes() + }, + check: func(t *testing.T, firstLB, secondLB *infrav1.LoadBalancer, err error) { + t.Helper() + if err != nil { + t.Fatalf("did not expect error: %v", err) + } + if firstLB == nil { + t.Errorf("Expected first LB to be populated, was nil") + } + if secondLB == nil { + t.Errorf("Expected second LB to be populated, was nil") + } + }, + }, } for _, tc := range tests { diff --git a/pkg/cloud/throttle/throttle.go b/pkg/cloud/throttle/throttle.go index 77511952b7..128bb121a6 100644 --- a/pkg/cloud/throttle/throttle.go +++ b/pkg/cloud/throttle/throttle.go @@ -18,10 +18,13 @@ limitations under the License. package throttle import ( + "context" "regexp" "strings" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/smithy-go/middleware" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/internal/rate" @@ -52,6 +55,11 @@ func (o *OperationLimiter) Wait(r *request.Request) error { return o.getLimiter().Wait(r.Context()) } +// WaitV2 will wait on a request for AWS SDK V2. +func (o *OperationLimiter) WaitV2(ctx context.Context) error { + return o.getLimiter().Wait(ctx) +} + // Match will match a request. func (o *OperationLimiter) Match(r *request.Request) (bool, error) { if o.regexp == nil { @@ -64,6 +72,19 @@ func (o *OperationLimiter) Match(r *request.Request) (bool, error) { return o.regexp.MatchString(r.Operation.Name), nil } +// MatchV2 will match a request for AWS SDK V2. +func (o *OperationLimiter) MatchV2(ctx context.Context) (bool, error) { + if o.regexp == nil { + var err error + o.regexp, err = regexp.Compile("^" + o.Operation) + if err != nil { + return false, err + } + } + opName := awsmiddleware.GetOperationName(ctx) + return o.regexp.MatchString(opName), nil +} + // LimitRequest will limit a request. func (s ServiceLimiter) LimitRequest(r *request.Request) { if ol, ok := s.matchRequest(r); ok { @@ -71,6 +92,13 @@ func (s ServiceLimiter) LimitRequest(r *request.Request) { } } +// LimitRequestV2 will limit a request for AWS SDK V2. +func (s ServiceLimiter) LimitRequestV2(ctx context.Context) { + if ol, ok := s.matchRequestV2(ctx); ok { + _ = ol.WaitV2(ctx) + } +} + func (o *OperationLimiter) getLimiter() *rate.Limiter { if o.limiter == nil { o.limiter = rate.NewLimiter(o.RefillRate, o.Burst) @@ -92,6 +120,16 @@ func (s ServiceLimiter) ReviewResponse(r *request.Request) { } } +// ReviewResponseV2 will review the limits of a Request's response for AWS SDK V2. +func (s ServiceLimiter) ReviewResponseV2(ctx context.Context, errorCode string) { + switch errorCode { + case "Throttling", "RequestLimitExceeded": + if ol, ok := s.matchRequestV2(ctx); ok { + ol.limiter.ResetTokens() + } + } +} + func (s ServiceLimiter) matchRequest(r *request.Request) (*OperationLimiter, bool) { for _, ol := range s { match, err := ol.Match(r) @@ -104,3 +142,37 @@ func (s ServiceLimiter) matchRequest(r *request.Request) (*OperationLimiter, boo } return nil, false } + +// matchRequestV2 is used for matching request for AWS SDK V2. +func (s ServiceLimiter) matchRequestV2(ctx context.Context) (*OperationLimiter, bool) { + for _, ol := range s { + match, err := ol.MatchV2(ctx) + if err != nil { + return nil, false + } + if match { + return ol, true + } + } + return nil, false +} + +// WithServiceLimiterMiddleware returns ServiceLimiter middleware stack for specified service name. +func WithServiceLimiterMiddleware(limiter *ServiceLimiter) func(stack *middleware.Stack) error { + return func(stack *middleware.Stack) error { + // Inserts service Limiter middleware after RequestContext initialization. + return stack.Finalize.Insert(getServiceLimiterMiddleware(limiter), "capa/RequestMetricContextMiddleware", middleware.After) + } +} + +// getServiceLimiterMiddleware implements serviceLimiter middleware. +func getServiceLimiterMiddleware(limiter *ServiceLimiter) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc("capa/ServiceLimiterMiddleware", func(ctx context.Context, input middleware.FinalizeInput, handler middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + limiter.LimitRequestV2(ctx) + + out, metadata, err := handler.HandleFinalize(ctx, input) + smithyErr := awserrors.ParseSmithyError(err) + limiter.ReviewResponseV2(ctx, smithyErr.ErrorCode()) + return out, metadata, err + }) +} diff --git a/test/e2e/data/eks/cluster-template-eks-control-plane-only-with-accessentries.yaml b/test/e2e/data/eks/cluster-template-eks-control-plane-only-with-accessentries.yaml new file mode 100644 index 0000000000..158b1cc62c --- /dev/null +++ b/test/e2e/data/eks/cluster-template-eks-control-plane-only-with-accessentries.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" +spec: + clusterNetwork: + pods: + cidrBlocks: ["192.168.0.0/16"] + infrastructureRef: + kind: AWSManagedControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 + name: "${CLUSTER_NAME}-control-plane" + controlPlaneRef: + kind: AWSManagedControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 + name: "${CLUSTER_NAME}-control-plane" +--- +kind: AWSManagedControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + region: "${AWS_REGION}" + sshKeyName: "${AWS_SSH_KEY_NAME}" + version: "${KUBERNETES_VERSION}" + accessConfig: + authenticationMode: API + accessEntries: + - principalARN: "arn:aws:iam::123456789012:role/KubernetesAdmin" + type: STANDARD + username: kubernetes-admin + kubernetesGroups: + - system:masters + accessPolicies: + - policyARN: "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy" + accessScope: + type: "cluster" + - principalARN: "arn:aws:iam::123456789012:role/DeveloperRole" + type: STANDARD + username: developer + kubernetesGroups: + - developers + accessPolicies: + - policyARN: "arn:aws:eks::aws:cluster-access-policy/AmazonEKSViewPolicy" + accessScope: + type: "namespace" + namespaces: ["default"] + identityRef: + kind: AWSClusterStaticIdentity + name: e2e-account diff --git a/test/e2e/suites/managed/eks_test.go b/test/e2e/suites/managed/eks_test.go index 3ee6a575c0..45ae3b6621 100644 --- a/test/e2e/suites/managed/eks_test.go +++ b/test/e2e/suites/managed/eks_test.go @@ -165,6 +165,56 @@ var _ = ginkgo.Describe("[managed] [general] EKS cluster tests", func() { } }) + ginkgo.By("should create a cluster with access entries") + ManagedClusterSpec(ctx, func() ManagedClusterSpecInput { + return ManagedClusterSpecInput{ + E2EConfig: e2eCtx.E2EConfig, + ConfigClusterFn: defaultConfigCluster, + BootstrapClusterProxy: e2eCtx.Environment.BootstrapClusterProxy, + AWSSession: e2eCtx.BootstrapUserAWSSession, + AWSSessionV2: e2eCtx.BootstrapUserAWSSessionV2, + Namespace: namespace, + ClusterName: clusterName, + Flavour: EKSControlPlaneOnlyWithAccessEntriesFlavor, + ControlPlaneMachineCount: 1, // NOTE: this cannot be zero as clusterctl returns an error + WorkerMachineCount: 0, + } + }) + + ginkgo.By("should have created the expected access entries") + expectedEntries := []ekscontrolplanev1.AccessEntry{ + { + PrincipalARN: "arn:aws:iam::123456789012:role/KubernetesAdmin", + Type: "STANDARD", + Username: "kubernetes-admin", + KubernetesGroups: []string{"system:masters"}, + AccessPolicies: []ekscontrolplanev1.AccessPolicyReference{ + { + PolicyARN: "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy", + AccessScope: ekscontrolplanev1.AccessScope{ + Type: "cluster", + }, + }, + }, + }, + { + PrincipalARN: "arn:aws:iam::123456789012:role/DeveloperRole", + Type: "STANDARD", + Username: "developer", + KubernetesGroups: []string{"developers"}, + AccessPolicies: []ekscontrolplanev1.AccessPolicyReference{ + { + PolicyARN: "arn:aws:eks::aws:cluster-access-policy/AmazonEKSViewPolicy", + AccessScope: ekscontrolplanev1.AccessScope{ + Type: "namespace", + Namespaces: []string{"default"}, + }, + }, + }, + }, + } + verifyAccessEntries(ctx, eksClusterName, expectedEntries, e2eCtx.BootstrapUserAWSSessionV2) + ginkgo.By(fmt.Sprintf("getting cluster with name %s", clusterName)) cluster := framework.GetClusterByName(ctx, framework.GetClusterByNameInput{ Getter: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), diff --git a/test/e2e/suites/managed/helpers.go b/test/e2e/suites/managed/helpers.go index f6c5a942a0..9e48c98341 100644 --- a/test/e2e/suites/managed/helpers.go +++ b/test/e2e/suites/managed/helpers.go @@ -22,6 +22,7 @@ package managed import ( "context" "fmt" + "slices" "time" "github.com/aws/aws-sdk-go-v2/aws" @@ -35,6 +36,7 @@ import ( crclient "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api/test/framework/clusterctl" ) @@ -49,6 +51,7 @@ const ( EKSMachinePoolOnlyFlavor = "eks-machinepool-only" EKSIPv6ClusterFlavor = "eks-ipv6-cluster" EKSControlPlaneOnlyLegacyFlavor = "eks-control-plane-only-legacy" + EKSControlPlaneOnlyWithAccessEntriesFlavor = "eks-control-plane-only-with-accessentries" ) const ( @@ -231,3 +234,68 @@ func verifyASG(eksClusterName, asgName string, checkOwned bool, cfg *aws.Config) Expect(found).To(BeTrue(), "expecting the cluster owned tag to exist") } } + +func verifyAccessEntries(ctx context.Context, eksClusterName string, expectedEntries []ekscontrolplanev1.AccessEntry, cfg *aws.Config) { + eksClient := eks.NewFromConfig(*cfg) + + listOutput, err := eksClient.ListAccessEntries(ctx, &eks.ListAccessEntriesInput{ + ClusterName: &eksClusterName, + }) + Expect(err).ToNot(HaveOccurred(), "failed to list access entries") + + expectedEntriesMap := make(map[string]ekscontrolplanev1.AccessEntry, len(expectedEntries)) + for _, entry := range expectedEntries { + expectedEntriesMap[entry.PrincipalARN] = entry + } + + for _, principalARN := range listOutput.AccessEntries { + expectedEntry, exists := expectedEntriesMap[principalARN] + Expect(exists).To(BeTrue(), fmt.Sprintf("unexpected access entry: %s", principalARN)) + + describeOutput, err := eksClient.DescribeAccessEntry(ctx, &eks.DescribeAccessEntryInput{ + ClusterName: &eksClusterName, + PrincipalArn: &principalARN, + }) + Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("failed to describe access entry: %s", principalARN)) + + Expect(describeOutput.AccessEntry.Type).To(Equal(expectedEntry.Type), "access entry type does not match") + Expect(describeOutput.AccessEntry.Username).To(Equal(expectedEntry.Username), "access entry username does not match") + + if len(expectedEntry.KubernetesGroups) > 0 { + slices.Sort(expectedEntry.KubernetesGroups) + slices.Sort(describeOutput.AccessEntry.KubernetesGroups) + Expect(describeOutput.AccessEntry.KubernetesGroups).To(Equal(expectedEntry.KubernetesGroups), "access entry kubernetes groups do not match") + } + + if len(expectedEntry.AccessPolicies) > 0 { + listOutput, err := eksClient.ListAssociatedAccessPolicies(ctx, &eks.ListAssociatedAccessPoliciesInput{ + ClusterName: &eksClusterName, + PrincipalArn: &principalARN, + }) + Expect(err).ToNot(HaveOccurred(), "failed to list access policies") + + expectedPolicies := make(map[string]ekscontrolplanev1.AccessPolicyReference, len(expectedEntry.AccessPolicies)) + for _, policy := range expectedEntry.AccessPolicies { + expectedPolicies[policy.PolicyARN] = policy + } + + for _, policy := range listOutput.AssociatedAccessPolicies { + expectedPolicy, exists := expectedPolicies[*policy.PolicyArn] + Expect(exists).To(BeTrue(), fmt.Sprintf("unexpected access policy: %s", *policy.PolicyArn)) + + Expect(policy.AccessScope.Type).To(Equal(expectedPolicy.AccessScope.Type), "access policy scope type does not match") + + if expectedPolicy.AccessScope.Type == "namespace" { + slices.Sort(expectedPolicy.AccessScope.Namespaces) + slices.Sort(policy.AccessScope.Namespaces) + Expect(policy.AccessScope.Namespaces).To(Equal(expectedPolicy.AccessScope.Namespaces), "access policy scope namespaces do not match") + } + + delete(expectedPolicies, *policy.PolicyArn) + } + Expect(expectedPolicies).To(BeEmpty(), "not all expected access policies were found") + } + delete(expectedEntriesMap, principalARN) + } + Expect(expectedEntriesMap).To(BeEmpty(), "not all expected access entries were found") +}