diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 4a87546461..c6534e0ef4 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -73,6 +73,14 @@ rules: - get - list - watch +- apiGroups: + - cluster.x-k8s.io + resources: + - machinedeployments + verbs: + - get + - list + - watch - apiGroups: - cluster.x-k8s.io resources: @@ -260,6 +268,14 @@ rules: - get - patch - update +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - awsmachinetemplates + verbs: + - get + - list + - watch - apiGroups: - infrastructure.cluster.x-k8s.io resources: diff --git a/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go b/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go index 41f71638d5..5d1c2f99b3 100644 --- a/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go +++ b/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go @@ -130,7 +130,10 @@ func (r *AWSManagedControlPlaneReconciler) SetupWithManager(ctx context.Context, // +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;delete;patch // +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinedeployments,verbs=get;list;watch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinepools,verbs=get;list;watch // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachines;awsmachines/status,verbs=get;list;watch +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachinetemplates,verbs=get;list;watch // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmanagedmachinepools;awsmanagedmachinepools/status,verbs=get;list;watch // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachinepools;awsmachinepools/status,verbs=get;list;watch // +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=awsmanagedcontrolplanes,verbs=get;list;watch;update;patch;delete @@ -285,7 +288,6 @@ func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, managedScope.Error(err, "non-fatal: failed to set up EventBridge") } } - if err := authService.ReconcileIAMAuthenticator(ctx); err != nil { conditions.MarkFalse(awsManagedControlPlane, ekscontrolplanev1.IAMAuthenticatorConfiguredCondition, ekscontrolplanev1.IAMAuthenticatorConfigurationFailedReason, clusterv1.ConditionSeverityError, err.Error()) return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile aws-iam-authenticator config for AWSManagedControlPlane %s/%s", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name) diff --git a/controlplane/eks/controllers/awsmanagedcontrolplane_controller_test.go b/controlplane/eks/controllers/awsmanagedcontrolplane_controller_test.go new file mode 100644 index 0000000000..c24a414dfb --- /dev/null +++ b/controlplane/eks/controllers/awsmanagedcontrolplane_controller_test.go @@ -0,0 +1,224 @@ +package controllers + +import ( + "context" + "fmt" + "testing" + + "github.com/golang/mock/gomock" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "k8s.io/utils/pointer" + + infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" + expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + "sigs.k8s.io/cluster-api/util" +) + +func TestAWSManagedControlPlaneReconcilerIntegrationTests(t *testing.T) { + var ( + reconciler AWSManagedControlPlaneReconciler + mockCtrl *gomock.Controller + recorder *record.FakeRecorder + ctx context.Context + ) + setup := func(t *testing.T) { + t.Helper() + mockCtrl = gomock.NewController(t) + recorder = record.NewFakeRecorder(10) + ctx = context.TODO() + reconciler = AWSManagedControlPlaneReconciler{ + Client: testEnv.Client, + Recorder: recorder, + } + } + + teardown := func() { + mockCtrl.Finish() + } + t.Run("Should successfully find roles for MachineDeployments and MachinePools", func(t *testing.T) { + g := NewWithT(t) + setup(t) + namespace, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("integ-test-%s", util.RandomString(5))) + g.Expect(err).To(BeNil()) + ns := namespace.Name + name := "default" + eksCluster := createEKSCluster(name, ns) + g.Expect(testEnv.Create(ctx, eksCluster)).To(Succeed()) + awsMP := createAWSMachinePoolForClusterWithInstanceProfile(name, ns, eksCluster.Name, "nodes.cluster-api-provider-aws.sigs.k8s.io") + infraRef := corev1.ObjectReference{ + Kind: awsMP.TypeMeta.Kind, + Name: awsMP.Name, + Namespace: awsMP.Namespace, + APIVersion: awsMP.TypeMeta.APIVersion, + } + g.Expect(testEnv.Create(ctx, awsMP)).To(Succeed()) + mp := createMachinepoolForCluster(name, ns, eksCluster.Name, infraRef) + g.Expect(testEnv.Create(ctx, mp)).To(Succeed()) + + awsMachineTemplate := createAWSMachineTemplateForClusterWithInstanceProfile(name, ns, eksCluster.Name, "eks-nodes.cluster-api-provider-aws.sigs.k8s.io") + infraRefForMD := corev1.ObjectReference{ + Kind: awsMachineTemplate.TypeMeta.Kind, + Name: awsMachineTemplate.Name, + Namespace: awsMachineTemplate.Namespace, + APIVersion: awsMachineTemplate.TypeMeta.APIVersion, + } + g.Expect(testEnv.Create(ctx, awsMachineTemplate)).To(Succeed()) + md := createMachineDeploymentForCluster(name, ns, eksCluster.Name, infraRefForMD) + g.Expect(testEnv.Create(ctx, md)).To(Succeed()) + + expectedRoles := map[string]struct{}{ + "nodes.cluster-api-provider-aws.sigs.k8s.io": {}, + "eks-nodes.cluster-api-provider-aws.sigs.k8s.io": {}, + } + + controllerIdentity := createControllerIdentity() + g.Expect(testEnv.Create(ctx, controllerIdentity)).To(Succeed()) + managedScope, err := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{ + Client: testEnv, + ControlPlane: eksCluster, + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + }, + }, + }) + g.Expect(err).To(BeNil(), "failed to create managedScope") + gotRoles, err := reconciler.getRolesForWorkers(ctx, managedScope) + g.Expect(err).To(BeNil(), "failed to get roles for workers") + g.Expect(gotRoles).To(BeEquivalentTo(expectedRoles), "did not get correct roles for workers") + defer teardown() + defer t.Cleanup(func() { + g.Expect(testEnv.Cleanup(ctx, namespace, eksCluster, awsMP, mp, awsMachineTemplate, md, controllerIdentity)).To(Succeed()) + }) + }) +} + +func createEKSCluster(name, namespace string) *ekscontrolplanev1.AWSManagedControlPlane { + eksCluster := &ekscontrolplanev1.AWSManagedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{ + clusterv1.ClusterLabelName: name, + }, + }, + Spec: ekscontrolplanev1.AWSManagedControlPlaneSpec{}, + } + return eksCluster +} + +func createAWSMachinePoolForClusterWithInstanceProfile(name, namespace, clusterName, instanceProfile string) *expinfrav1.AWSMachinePool { + awsMP := &expinfrav1.AWSMachinePool{ + TypeMeta: metav1.TypeMeta{ + Kind: "AWSMachinePool", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{ + clusterv1.ClusterLabelName: clusterName, + }, + }, + Spec: expinfrav1.AWSMachinePoolSpec{ + AWSLaunchTemplate: expinfrav1.AWSLaunchTemplate{ + IamInstanceProfile: instanceProfile, + }, + MaxSize: 1, + }, + } + return awsMP +} + +func createMachinepoolForCluster(name, namespace, clusterName string, infrastructureRef corev1.ObjectReference) *expclusterv1.MachinePool { + mp := &expclusterv1.MachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{ + clusterv1.ClusterLabelName: clusterName, + }, + }, + Spec: expclusterv1.MachinePoolSpec{ + ClusterName: clusterName, + Template: clusterv1.MachineTemplateSpec{ + Spec: clusterv1.MachineSpec{ + ClusterName: clusterName, + InfrastructureRef: infrastructureRef, + }, + }, + }, + } + return mp +} + +func createAWSMachineTemplateForClusterWithInstanceProfile(name, namespace, clusterName, instanceProfile string) *infrav1.AWSMachineTemplate { + mt := &infrav1.AWSMachineTemplate{ + TypeMeta: metav1.TypeMeta{ + Kind: "AWSMachineTemplate", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{ + clusterv1.ClusterLabelName: clusterName, + }, + }, + Spec: infrav1.AWSMachineTemplateSpec{ + Template: infrav1.AWSMachineTemplateResource{ + Spec: infrav1.AWSMachineSpec{ + IAMInstanceProfile: instanceProfile, + InstanceType: "m5.xlarge", + }, + }, + }, + } + return mt +} + +func createMachineDeploymentForCluster(name, namespace, clusterName string, infrastructureRef corev1.ObjectReference) *clusterv1.MachineDeployment { + md := &clusterv1.MachineDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{ + clusterv1.ClusterLabelName: clusterName, + }, + }, + Spec: clusterv1.MachineDeploymentSpec{ + ClusterName: clusterName, + Template: clusterv1.MachineTemplateSpec{ + Spec: clusterv1.MachineSpec{ + ClusterName: clusterName, + InfrastructureRef: infrastructureRef, + }, + }, + Replicas: pointer.Int32(2), + }, + } + return md +} + +func createControllerIdentity() *infrav1.AWSClusterControllerIdentity { + controllerIdentity := &infrav1.AWSClusterControllerIdentity{ + TypeMeta: metav1.TypeMeta{ + Kind: string(infrav1.ControllerIdentityKind), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + }, + Spec: infrav1.AWSClusterControllerIdentitySpec{ + AWSClusterIdentitySpec: infrav1.AWSClusterIdentitySpec{ + AllowedNamespaces: &infrav1.AllowedNamespaces{}, + }, + }, + } + return controllerIdentity +} diff --git a/controlplane/eks/controllers/suite_test.go b/controlplane/eks/controllers/suite_test.go new file mode 100644 index 0000000000..8299d7d745 --- /dev/null +++ b/controlplane/eks/controllers/suite_test.go @@ -0,0 +1,89 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "fmt" + "path" + "testing" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + + infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" + expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" +) + +var ( + testEnv *helpers.TestEnvironment + ctx = ctrl.SetupSignalHandler() +) + +func TestMain(m *testing.M) { + setup() + defer teardown() + m.Run() +} + +func setup() { + utilruntime.Must(infrav1.AddToScheme(scheme.Scheme)) + utilruntime.Must(ekscontrolplanev1.AddToScheme(scheme.Scheme)) + utilruntime.Must(expinfrav1.AddToScheme(scheme.Scheme)) + utilruntime.Must(expclusterv1.AddToScheme(scheme.Scheme)) + utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme)) + utilruntime.Must(expclusterv1.AddToScheme(scheme.Scheme)) + + testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{ + path.Join("config", "crd", "bases"), + }, + ).WithWebhookConfiguration("managed", path.Join("config", "webhook", "manifests.yaml")) + var err error + testEnv, err = testEnvConfig.Build() + if err != nil { + panic(err) + } + if err := (&ekscontrolplanev1.AWSManagedControlPlane{}).SetupWebhookWithManager(testEnv); err != nil { + panic(fmt.Sprintf("Unable to setup AWSManagedControlPlane webhook: %v", err)) + } + if err := (&infrav1.AWSMachineTemplateWebhook{}).SetupWebhookWithManager(testEnv); err != nil { + panic(fmt.Sprintf("Unable to setup AWSMachineTemplate webhook: %v", err)) + } + if err := (&expinfrav1.AWSMachinePool{}).SetupWebhookWithManager(testEnv); err != nil { + panic(fmt.Sprintf("Unable to setup AWSMachineTemplate webhook: %v", err)) + } + if err := (&infrav1.AWSClusterControllerIdentity{}).SetupWebhookWithManager(testEnv); err != nil { + panic(fmt.Sprintf("Unable to setup AWSMachineTemplate webhook: %v", err)) + } + go func() { + fmt.Println("Starting the manager") + if err := testEnv.StartManager(ctx); err != nil { + panic(fmt.Sprintf("Failed to start the envtest manager: %v", err)) + } + }() + testEnv.WaitForWebhooks() +} + +func teardown() { + if err := testEnv.Stop(); err != nil { + panic(fmt.Sprintf("Failed to stop envtest: %v", err)) + } +} diff --git a/pkg/cloud/services/iamauth/reconcile.go b/pkg/cloud/services/iamauth/reconcile.go index 64b9f07ef7..c53f25aa43 100644 --- a/pkg/cloud/services/iamauth/reconcile.go +++ b/pkg/cloud/services/iamauth/reconcile.go @@ -21,21 +21,27 @@ import ( "fmt" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/sts" + "github.com/aws/aws-sdk-go/service/iam" "github.com/pkg/errors" "k8s.io/klog/v2" + infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" + expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" ) // ReconcileIAMAuthenticator is used to create the aws-iam-authenticator in a cluster. func (s *Service) ReconcileIAMAuthenticator(ctx context.Context) error { s.scope.Info("Reconciling aws-iam-authenticator configuration", "cluster", klog.KRef(s.scope.Namespace(), s.scope.Name())) - accountID, err := s.getAccountID() + nodeRoles, err := s.getRolesForWorkers(ctx) if err != nil { - return fmt.Errorf("getting account id: %w", err) + s.scope.Error(err, "getting roles for workers") + return fmt.Errorf("getting roles for workers: %w", err) } remoteClient, err := s.scope.RemoteClient() @@ -49,17 +55,22 @@ func (s *Service) ReconcileIAMAuthenticator(ctx context.Context) error { return fmt.Errorf("getting aws-iam-authenticator backend: %w", err) } - roleARN := fmt.Sprintf("arn:aws:iam::%s:role/nodes%s", accountID, iamv1.DefaultNameSuffix) - nodesRoleMapping := ekscontrolplanev1.RoleMapping{ - RoleARN: roleARN, - KubernetesMapping: ekscontrolplanev1.KubernetesMapping{ - UserName: EC2NodeUserName, - Groups: NodeGroups, - }, - } - s.scope.Debug("Mapping node IAM role", "iam-role", nodesRoleMapping.RoleARN, "user", nodesRoleMapping.UserName) - if err := authBackend.MapRole(nodesRoleMapping); err != nil { - return fmt.Errorf("mapping iam node role: %w", err) + for roleName := range nodeRoles { + roleARN, err := s.getARNForRole(roleName) + if err != nil { + return fmt.Errorf("failed to get ARN for role %s: %w", roleARN, err) + } + nodesRoleMapping := ekscontrolplanev1.RoleMapping{ + RoleARN: roleARN, + KubernetesMapping: ekscontrolplanev1.KubernetesMapping{ + UserName: EC2NodeUserName, + Groups: NodeGroups, + }, + } + s.scope.Debug("Mapping node IAM role", "iam-role", nodesRoleMapping.RoleARN, "user", nodesRoleMapping.UserName) + if err := authBackend.MapRole(nodesRoleMapping); err != nil { + return fmt.Errorf("mapping iam node role: %w", err) + } } s.scope.Debug("Mapping additional IAM roles and users") @@ -83,13 +94,111 @@ func (s *Service) ReconcileIAMAuthenticator(ctx context.Context) error { return nil } -func (s *Service) getAccountID() (string, error) { - input := &sts.GetCallerIdentityInput{} +func (s *Service) getARNForRole(role string) (string, error) { + input := &iam.GetRoleInput{ + RoleName: aws.String(role), + } + out, err := s.IAMClient.GetRole(input) + if err != nil { + return "", errors.Wrap(err, "unable to get role") + } + return aws.StringValue(out.Role.Arn), nil +} - out, err := s.STSClient.GetCallerIdentity(input) +func (s *Service) getRolesForWorkers(ctx context.Context) (map[string]struct{}, error) { + // previously this was the default role always added to the IAM authenticator config + // we'll keep this to not break existing behavior for users + allRoles := map[string]struct{}{ + fmt.Sprintf("nodes%s", iamv1.DefaultNameSuffix): {}, + } + if err := s.getRolesForMachineDeployments(ctx, allRoles); err != nil { + return nil, fmt.Errorf("failed to get roles from machine deployments %w", err) + } + if err := s.getRolesForMachinePools(ctx, allRoles); err != nil { + return nil, fmt.Errorf("failed to get roles from machine pools %w", err) + } + return allRoles, nil +} + +func (s *Service) getRolesForMachineDeployments(ctx context.Context, allRoles map[string]struct{}) error { + deploymentList := &clusterv1.MachineDeploymentList{} + selectors := []client.ListOption{ + client.InNamespace(s.scope.Namespace()), + client.MatchingLabels{ + clusterv1.ClusterLabelName: s.scope.Name(), + }, + } + err := s.client.List(ctx, deploymentList, selectors...) if err != nil { - return "", errors.Wrap(err, "unable to get caller identity") + return fmt.Errorf("failed to list machine deployments for cluster %s/%s: %w", s.scope.Namespace(), s.scope.Name(), err) + } + + for _, deployment := range deploymentList.Items { + ref := deployment.Spec.Template.Spec.InfrastructureRef + if ref.Kind != "AWSMachineTemplate" { + continue + } + awsMachineTemplate := &infrav1.AWSMachineTemplate{} + err := s.client.Get(ctx, client.ObjectKey{ + Name: ref.Name, + Namespace: s.scope.Namespace(), + }, awsMachineTemplate) + if err != nil { + return fmt.Errorf("failed to get AWSMachine %s/%s: %w", ref.Namespace, ref.Name, err) + } + instanceProfile := awsMachineTemplate.Spec.Template.Spec.IAMInstanceProfile + if _, ok := allRoles[instanceProfile]; !ok && instanceProfile != "" { + allRoles[instanceProfile] = struct{}{} + } } + return nil +} - return aws.StringValue(out.Account), nil +func (s *Service) getRolesForMachinePools(ctx context.Context, allRoles map[string]struct{}) error { + machinePoolList := &expclusterv1.MachinePoolList{} + selectors := []client.ListOption{ + client.InNamespace(s.scope.Namespace()), + client.MatchingLabels{ + clusterv1.ClusterLabelName: s.scope.Name(), + }, + } + err := s.client.List(ctx, machinePoolList, selectors...) + if err != nil { + return fmt.Errorf("failed to list machine pools for cluster %s/%s: %w", s.scope.Namespace(), s.scope.Name(), err) + } + for _, pool := range machinePoolList.Items { + ref := pool.Spec.Template.Spec.InfrastructureRef + switch ref.Kind { + case "AWSMachinePool": + awsMachinePool := &expinfrav1.AWSMachinePool{} + fmt.Println("here") + err := s.client.Get(ctx, client.ObjectKey{ + Name: ref.Name, + Namespace: s.scope.Namespace(), + }, awsMachinePool) + if err != nil { + return fmt.Errorf("failed to get AWSMachine %s/%s: %w", ref.Namespace, ref.Name, err) + } + instanceProfile := awsMachinePool.Spec.AWSLaunchTemplate.IamInstanceProfile + if _, ok := allRoles[instanceProfile]; !ok && instanceProfile != "" { + allRoles[instanceProfile] = struct{}{} + } + case "AWSManagedMachinePool": + awsMachineManagedPool := &expinfrav1.AWSManagedMachinePool{} + err := s.client.Get(ctx, client.ObjectKey{ + Name: ref.Name, + Namespace: s.scope.Namespace(), + }, awsMachineManagedPool) + if err != nil { + return fmt.Errorf("failed to get AWSManagedMachinePool %s/%s: %w", ref.Namespace, ref.Name, err) + } + instanceProfile := awsMachineManagedPool.Spec.RoleName + if _, ok := allRoles[instanceProfile]; !ok && instanceProfile != "" { + allRoles[instanceProfile] = struct{}{} + } + + default: + } + } + return nil } diff --git a/pkg/cloud/services/iamauth/service.go b/pkg/cloud/services/iamauth/service.go index 22c184fc51..477e7c4928 100644 --- a/pkg/cloud/services/iamauth/service.go +++ b/pkg/cloud/services/iamauth/service.go @@ -17,7 +17,7 @@ limitations under the License. package iamauth import ( - "github.com/aws/aws-sdk-go/service/sts/stsiface" + "github.com/aws/aws-sdk-go/service/iam/iamiface" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" @@ -28,7 +28,7 @@ type Service struct { scope scope.IAMAuthScope backend BackendType client client.Client - STSClient stsiface.STSAPI + IAMClient iamiface.IAMAPI } // NewService will create a new Service object. @@ -37,6 +37,6 @@ func NewService(iamScope scope.IAMAuthScope, backend BackendType, client client. scope: iamScope, backend: backend, client: client, - STSClient: scope.NewSTSClient(iamScope, iamScope, iamScope, iamScope.InfraCluster()), + IAMClient: scope.NewIAMClient(iamScope, iamScope, iamScope, iamScope.InfraCluster()), } }