Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions config/rbac/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,14 @@ rules:
- get
- list
- watch
- apiGroups:
- cluster.x-k8s.io
resources:
- machinedeployments
verbs:
- get
- list
- watch
- apiGroups:
- cluster.x-k8s.io
resources:
Expand Down Expand Up @@ -260,6 +268,14 @@ rules:
- get
- patch
- update
- apiGroups:
- infrastructure.cluster.x-k8s.io
resources:
- awsmachinetemplates
verbs:
- get
- list
- watch
- apiGroups:
- infrastructure.cluster.x-k8s.io
resources:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,10 @@ func (r *AWSManagedControlPlaneReconciler) SetupWithManager(ctx context.Context,
// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;delete;patch
// +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinedeployments,verbs=get;list;watch
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinepools,verbs=get;list;watch
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachines;awsmachines/status,verbs=get;list;watch
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachinetemplates,verbs=get;list;watch
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmanagedmachinepools;awsmanagedmachinepools/status,verbs=get;list;watch
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachinepools;awsmachinepools/status,verbs=get;list;watch
// +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=awsmanagedcontrolplanes,verbs=get;list;watch;update;patch;delete
Expand Down Expand Up @@ -285,7 +288,6 @@ func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context,
managedScope.Error(err, "non-fatal: failed to set up EventBridge")
}
}

if err := authService.ReconcileIAMAuthenticator(ctx); err != nil {
conditions.MarkFalse(awsManagedControlPlane, ekscontrolplanev1.IAMAuthenticatorConfiguredCondition, ekscontrolplanev1.IAMAuthenticatorConfigurationFailedReason, clusterv1.ConditionSeverityError, err.Error())
return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile aws-iam-authenticator config for AWSManagedControlPlane %s/%s", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name)
Expand Down
224 changes: 224 additions & 0 deletions controlplane/eks/controllers/awsmanagedcontrolplane_controller_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,224 @@
package controllers

import (
"context"
"fmt"
"testing"

"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
"k8s.io/utils/pointer"

infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
"sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/util"
)

func TestAWSManagedControlPlaneReconcilerIntegrationTests(t *testing.T) {
var (
reconciler AWSManagedControlPlaneReconciler
mockCtrl *gomock.Controller
recorder *record.FakeRecorder
ctx context.Context
)
setup := func(t *testing.T) {
t.Helper()
mockCtrl = gomock.NewController(t)
recorder = record.NewFakeRecorder(10)
ctx = context.TODO()
reconciler = AWSManagedControlPlaneReconciler{
Client: testEnv.Client,
Recorder: recorder,
}
}

teardown := func() {
mockCtrl.Finish()
}
t.Run("Should successfully find roles for MachineDeployments and MachinePools", func(t *testing.T) {
g := NewWithT(t)
setup(t)
namespace, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("integ-test-%s", util.RandomString(5)))
g.Expect(err).To(BeNil())
ns := namespace.Name
name := "default"
eksCluster := createEKSCluster(name, ns)
g.Expect(testEnv.Create(ctx, eksCluster)).To(Succeed())
awsMP := createAWSMachinePoolForClusterWithInstanceProfile(name, ns, eksCluster.Name, "nodes.cluster-api-provider-aws.sigs.k8s.io")
infraRef := corev1.ObjectReference{
Kind: awsMP.TypeMeta.Kind,
Name: awsMP.Name,
Namespace: awsMP.Namespace,
APIVersion: awsMP.TypeMeta.APIVersion,
}
g.Expect(testEnv.Create(ctx, awsMP)).To(Succeed())
mp := createMachinepoolForCluster(name, ns, eksCluster.Name, infraRef)
g.Expect(testEnv.Create(ctx, mp)).To(Succeed())

awsMachineTemplate := createAWSMachineTemplateForClusterWithInstanceProfile(name, ns, eksCluster.Name, "eks-nodes.cluster-api-provider-aws.sigs.k8s.io")
infraRefForMD := corev1.ObjectReference{
Kind: awsMachineTemplate.TypeMeta.Kind,
Name: awsMachineTemplate.Name,
Namespace: awsMachineTemplate.Namespace,
APIVersion: awsMachineTemplate.TypeMeta.APIVersion,
}
g.Expect(testEnv.Create(ctx, awsMachineTemplate)).To(Succeed())
md := createMachineDeploymentForCluster(name, ns, eksCluster.Name, infraRefForMD)
g.Expect(testEnv.Create(ctx, md)).To(Succeed())

expectedRoles := map[string]struct{}{
"nodes.cluster-api-provider-aws.sigs.k8s.io": {},
"eks-nodes.cluster-api-provider-aws.sigs.k8s.io": {},
}

controllerIdentity := createControllerIdentity()
g.Expect(testEnv.Create(ctx, controllerIdentity)).To(Succeed())
managedScope, err := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{
Client: testEnv,
ControlPlane: eksCluster,
Cluster: &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: ns,
},
},
})
g.Expect(err).To(BeNil(), "failed to create managedScope")
gotRoles, err := reconciler.getRolesForWorkers(ctx, managedScope)
g.Expect(err).To(BeNil(), "failed to get roles for workers")
g.Expect(gotRoles).To(BeEquivalentTo(expectedRoles), "did not get correct roles for workers")
defer teardown()
defer t.Cleanup(func() {
g.Expect(testEnv.Cleanup(ctx, namespace, eksCluster, awsMP, mp, awsMachineTemplate, md, controllerIdentity)).To(Succeed())
})
})
}

func createEKSCluster(name, namespace string) *ekscontrolplanev1.AWSManagedControlPlane {
eksCluster := &ekscontrolplanev1.AWSManagedControlPlane{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Labels: map[string]string{
clusterv1.ClusterLabelName: name,
},
},
Spec: ekscontrolplanev1.AWSManagedControlPlaneSpec{},
}
return eksCluster
}

func createAWSMachinePoolForClusterWithInstanceProfile(name, namespace, clusterName, instanceProfile string) *expinfrav1.AWSMachinePool {
awsMP := &expinfrav1.AWSMachinePool{
TypeMeta: metav1.TypeMeta{
Kind: "AWSMachinePool",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Labels: map[string]string{
clusterv1.ClusterLabelName: clusterName,
},
},
Spec: expinfrav1.AWSMachinePoolSpec{
AWSLaunchTemplate: expinfrav1.AWSLaunchTemplate{
IamInstanceProfile: instanceProfile,
},
MaxSize: 1,
},
}
return awsMP
}

func createMachinepoolForCluster(name, namespace, clusterName string, infrastructureRef corev1.ObjectReference) *expclusterv1.MachinePool {
mp := &expclusterv1.MachinePool{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Labels: map[string]string{
clusterv1.ClusterLabelName: clusterName,
},
},
Spec: expclusterv1.MachinePoolSpec{
ClusterName: clusterName,
Template: clusterv1.MachineTemplateSpec{
Spec: clusterv1.MachineSpec{
ClusterName: clusterName,
InfrastructureRef: infrastructureRef,
},
},
},
}
return mp
}

func createAWSMachineTemplateForClusterWithInstanceProfile(name, namespace, clusterName, instanceProfile string) *infrav1.AWSMachineTemplate {
mt := &infrav1.AWSMachineTemplate{
TypeMeta: metav1.TypeMeta{
Kind: "AWSMachineTemplate",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Labels: map[string]string{
clusterv1.ClusterLabelName: clusterName,
},
},
Spec: infrav1.AWSMachineTemplateSpec{
Template: infrav1.AWSMachineTemplateResource{
Spec: infrav1.AWSMachineSpec{
IAMInstanceProfile: instanceProfile,
InstanceType: "m5.xlarge",
},
},
},
}
return mt
}

func createMachineDeploymentForCluster(name, namespace, clusterName string, infrastructureRef corev1.ObjectReference) *clusterv1.MachineDeployment {
md := &clusterv1.MachineDeployment{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Labels: map[string]string{
clusterv1.ClusterLabelName: clusterName,
},
},
Spec: clusterv1.MachineDeploymentSpec{
ClusterName: clusterName,
Template: clusterv1.MachineTemplateSpec{
Spec: clusterv1.MachineSpec{
ClusterName: clusterName,
InfrastructureRef: infrastructureRef,
},
},
Replicas: pointer.Int32(2),
},
}
return md
}

func createControllerIdentity() *infrav1.AWSClusterControllerIdentity {
controllerIdentity := &infrav1.AWSClusterControllerIdentity{
TypeMeta: metav1.TypeMeta{
Kind: string(infrav1.ControllerIdentityKind),
},
ObjectMeta: metav1.ObjectMeta{
Name: "default",
},
Spec: infrav1.AWSClusterControllerIdentitySpec{
AWSClusterIdentitySpec: infrav1.AWSClusterIdentitySpec{
AllowedNamespaces: &infrav1.AllowedNamespaces{},
},
},
}
return controllerIdentity
}
89 changes: 89 additions & 0 deletions controlplane/eks/controllers/suite_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
/*
Copyright 2022 The Kubernetes Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package controllers

import (
"fmt"
"path"
"testing"

utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"

infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
"sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
)

var (
testEnv *helpers.TestEnvironment
ctx = ctrl.SetupSignalHandler()
)

func TestMain(m *testing.M) {
setup()
defer teardown()
m.Run()
}

func setup() {
utilruntime.Must(infrav1.AddToScheme(scheme.Scheme))
utilruntime.Must(ekscontrolplanev1.AddToScheme(scheme.Scheme))
utilruntime.Must(expinfrav1.AddToScheme(scheme.Scheme))
utilruntime.Must(expclusterv1.AddToScheme(scheme.Scheme))
utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme))
utilruntime.Must(expclusterv1.AddToScheme(scheme.Scheme))

testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{
path.Join("config", "crd", "bases"),
},
).WithWebhookConfiguration("managed", path.Join("config", "webhook", "manifests.yaml"))
var err error
testEnv, err = testEnvConfig.Build()
if err != nil {
panic(err)
}
if err := (&ekscontrolplanev1.AWSManagedControlPlane{}).SetupWebhookWithManager(testEnv); err != nil {
panic(fmt.Sprintf("Unable to setup AWSManagedControlPlane webhook: %v", err))
}
if err := (&infrav1.AWSMachineTemplateWebhook{}).SetupWebhookWithManager(testEnv); err != nil {
panic(fmt.Sprintf("Unable to setup AWSMachineTemplate webhook: %v", err))
}
if err := (&expinfrav1.AWSMachinePool{}).SetupWebhookWithManager(testEnv); err != nil {
panic(fmt.Sprintf("Unable to setup AWSMachineTemplate webhook: %v", err))
}
if err := (&infrav1.AWSClusterControllerIdentity{}).SetupWebhookWithManager(testEnv); err != nil {
panic(fmt.Sprintf("Unable to setup AWSMachineTemplate webhook: %v", err))
}
go func() {
fmt.Println("Starting the manager")
if err := testEnv.StartManager(ctx); err != nil {
panic(fmt.Sprintf("Failed to start the envtest manager: %v", err))
}
}()
testEnv.WaitForWebhooks()
}

func teardown() {
if err := testEnv.Stop(); err != nil {
panic(fmt.Sprintf("Failed to stop envtest: %v", err))
}
}
Loading