diff --git a/.changelog/44334.txt b/.changelog/44334.txt new file mode 100644 index 00000000000..ec4da706c7f --- /dev/null +++ b/.changelog/44334.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_eks_cluster: Change `compute_config`, `kubernetes_network_config.elastic_load_balancing`, and `storage_config.` to Optional and Computed, allowing EKS Auto Mode settings to be enabled, disabled, and removed from configuration +``` diff --git a/internal/service/eks/cluster.go b/internal/service/eks/cluster.go index 49b43f0f012..fe559e43665 100644 --- a/internal/service/eks/cluster.go +++ b/internal/service/eks/cluster.go @@ -54,46 +54,11 @@ func resourceCluster() *schema.Resource { CustomizeDiff: customdiff.Sequence( validateAutoModeCustomizeDiff, + validateAutoModeComputeConfigCustomizeDiff, customdiff.ForceNewIfChange("encryption_config", func(_ context.Context, old, new, meta any) bool { // You cannot disable envelope encryption after enabling it. This action is irreversible. return len(old.([]any)) == 1 && len(new.([]any)) == 0 }), - func(ctx context.Context, rd *schema.ResourceDiff, meta any) error { - if rd.Id() == "" { - return nil - } - oldValue, newValue := rd.GetChange("compute_config") - - oldComputeConfig := expandComputeConfigRequest(oldValue.([]any)) - newComputeConfig := expandComputeConfigRequest(newValue.([]any)) - - if newComputeConfig == nil || oldComputeConfig == nil { - return nil - } - - oldRoleARN := aws.ToString(oldComputeConfig.NodeRoleArn) - newRoleARN := aws.ToString(newComputeConfig.NodeRoleArn) - - newComputeConfigEnabled := aws.ToBool(newComputeConfig.Enabled) - - // Do not force new if auto mode is disabled in new config and role ARN is unset - if !newComputeConfigEnabled && newRoleARN == "" { - return nil - } - - // Do not force new if built-in node pools are zeroed in new config and role ARN is unset - if len(newComputeConfig.NodePools) == 0 && newRoleARN == "" { - return nil - } - - // only force new if an existing role has changed, not if a new role is added - if oldRoleARN != "" && oldRoleARN != newRoleARN { - if err := rd.ForceNew("compute_config.0.node_role_arn"); err != nil { - return err - } - } - return nil - }, ), Timeouts: &schema.ResourceTimeout{ @@ -153,12 +118,14 @@ func resourceCluster() *schema.Resource { "compute_config": { Type: schema.TypeList, Optional: true, + Computed: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ names.AttrEnabled: { Type: schema.TypeBool, Optional: true, + Computed: true, }, "node_pools": { Type: schema.TypeSet, @@ -270,6 +237,7 @@ func resourceCluster() *schema.Resource { names.AttrEnabled: { Type: schema.TypeBool, Optional: true, + Computed: true, }, }, }, @@ -411,6 +379,7 @@ func resourceCluster() *schema.Resource { "storage_config": { Type: schema.TypeList, Optional: true, + Computed: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -423,6 +392,7 @@ func resourceCluster() *schema.Resource { names.AttrEnabled: { Type: schema.TypeBool, Optional: true, + Computed: true, }, }, }, @@ -520,17 +490,19 @@ func resourceCluster() *schema.Resource { func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EKSClient(ctx) name := d.Get(names.AttrName).(string) input := eks.CreateClusterInput{ BootstrapSelfManagedAddons: aws.Bool(d.Get("bootstrap_self_managed_addons").(bool)), + ComputeConfig: expandComputeConfigRequest(d.Get("compute_config").([]any)), EncryptionConfig: expandEncryptionConfig(d.Get("encryption_config").([]any)), + KubernetesNetworkConfig: expandKubernetesNetworkConfigRequest(d.Get("kubernetes_network_config").([]any)), Logging: expandLogging(d.Get("enabled_cluster_log_types").(*schema.Set)), Name: aws.String(name), ResourcesVpcConfig: expandVpcConfigRequest(d.Get(names.AttrVPCConfig).([]any)), RoleArn: aws.String(d.Get(names.AttrRoleARN).(string)), + StorageConfig: expandStorageConfigRequest(d.Get("storage_config").([]any)), Tags: getTagsIn(ctx), } @@ -538,18 +510,10 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta any input.AccessConfig = expandCreateAccessConfigRequest(v.([]any)) } - if v, ok := d.GetOk("compute_config"); ok { - input.ComputeConfig = expandComputeConfigRequest(v.([]any)) - } - if v, ok := d.GetOk(names.AttrDeletionProtection); ok { input.DeletionProtection = aws.Bool(v.(bool)) } - if v, ok := d.GetOk("kubernetes_network_config"); ok { - input.KubernetesNetworkConfig = expandKubernetesNetworkConfigRequest(v.([]any)) - } - if v, ok := d.GetOk("outpost_config"); ok { input.OutpostConfig = expandOutpostConfigRequest(v.([]any)) } @@ -558,10 +522,6 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta any input.RemoteNetworkConfig = expandCreateRemoteNetworkConfigRequest(v.([]any)) } - if v, ok := d.GetOk("storage_config"); ok { - input.StorageConfig = expandStorageConfigRequest(v.([]any)) - } - if v, ok := d.GetOk("upgrade_policy"); ok { input.UpgradePolicy = expandUpgradePolicy(v.([]any)) } @@ -574,8 +534,8 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta any input.ZonalShiftConfig = expandZonalShiftConfig(v.([]any)) } - outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, - func(ctx context.Context) (any, error) { + output, err := tfresource.RetryWhen(ctx, propagationTimeout, + func(ctx context.Context) (*eks.CreateClusterOutput, error) { return conn.CreateCluster(ctx, &input) }, func(err error) (bool, error) { @@ -611,7 +571,7 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta any return sdkdiag.AppendErrorf(diags, "creating EKS Cluster (%s): %s", name, err) } - d.SetId(aws.ToString(outputRaw.(*eks.CreateClusterOutput).Cluster.Name)) + d.SetId(aws.ToString(output.Cluster.Name)) if _, err := waitClusterCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for EKS Cluster (%s) create: %s", d.Id(), err) @@ -705,7 +665,6 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta any) func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EKSClient(ctx) // Do any version update first. @@ -755,27 +714,28 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta any } } + // All three fields are required to enable/disable Auto Mode or else you receive the error: + // InvalidParameterException: For EKS Auto Mode, please ensure that all required configs, + // including computeConfig, kubernetesNetworkConfig, and blockStorage are all either fully enabled or fully disabled. + // In addition, when updating other Auto Mode arguments (i.e. - computeConfig.nodePools/nodeRoleARN), all 3 fields are required. if d.HasChanges("compute_config", "kubernetes_network_config", "storage_config") { - computeConfig := expandComputeConfigRequest(d.Get("compute_config").([]any)) - kubernetesNetworkConfig := expandKubernetesNetworkConfigRequest(d.Get("kubernetes_network_config").([]any)) - storageConfig := expandStorageConfigRequest(d.Get("storage_config").([]any)) input := eks.UpdateClusterConfigInput{ - ComputeConfig: computeConfig, - KubernetesNetworkConfig: kubernetesNetworkConfig, + ComputeConfig: expandComputeConfigRequest(d.Get("compute_config").([]any)), + KubernetesNetworkConfig: expandKubernetesNetworkConfigRequest(d.Get("kubernetes_network_config").([]any)), Name: aws.String(d.Id()), - StorageConfig: storageConfig, + StorageConfig: expandStorageConfigRequest(d.Get("storage_config").([]any)), } output, err := conn.UpdateClusterConfig(ctx, &input) if err != nil { - return sdkdiag.AppendErrorf(diags, "updating EKS Cluster (%s) compute config: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "updating EKS Cluster (%s) Auto Mode settings: %s", d.Id(), err) } updateID := aws.ToString(output.Update.Id) if _, err = waitClusterUpdateSuccessful(ctx, conn, d.Id(), updateID, d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for EKS Cluster (%s) compute config update (%s): %s", d.Id(), updateID, err) + return sdkdiag.AppendErrorf(diags, "waiting for EKS Cluster (%s) Auto Mode settings update (%s): %s", d.Id(), updateID, err) } } @@ -923,7 +883,6 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta any func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EKSClient(ctx) // If a cluster is scaling up due to load a delete request will fail @@ -936,9 +895,7 @@ func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta any Name: aws.String(d.Id()), } err := tfresource.Retry(ctx, timeout, func(ctx context.Context) *tfresource.RetryError { - var err error - - _, err = conn.DeleteCluster(ctx, &input) + _, err := conn.DeleteCluster(ctx, &input) if errs.IsAErrorMessageContains[*types.ResourceInUseException](err, "in progress") { return tfresource.RetryableError(err) @@ -1209,8 +1166,15 @@ func expandUpdateAccessConfigRequest(tfList []any) *types.UpdateAccessConfigRequ } func expandComputeConfigRequest(tfList []any) *types.ComputeConfigRequest { + apiObject := &types.ComputeConfigRequest{} + if len(tfList) == 0 { - return nil + // Ensure this is always present to avoid the error: + // InvalidParameterException: The type for cluster update was not provided. + // when the field is removed (nil). + apiObject.Enabled = aws.Bool(false) + + return apiObject } tfMap, ok := tfList[0].(map[string]any) @@ -1218,8 +1182,6 @@ func expandComputeConfigRequest(tfList []any) *types.ComputeConfigRequest { return nil } - apiObject := &types.ComputeConfigRequest{} - if v, ok := tfMap[names.AttrEnabled].(bool); ok { apiObject.Enabled = aws.Bool(v) } @@ -1282,8 +1244,17 @@ func expandProvider(tfList []any) *types.Provider { } func expandStorageConfigRequest(tfList []any) *types.StorageConfigRequest { + apiObject := &types.StorageConfigRequest{} + if len(tfList) == 0 { - return nil + // Ensure this is always present to avoid the error: + // InvalidParameterException: The type for cluster update was not provided. + // when the field is removed (nil). + apiObject.BlockStorage = &types.BlockStorage{ + Enabled: aws.Bool(false), + } + + return apiObject } tfMap, ok := tfList[0].(map[string]any) @@ -1291,8 +1262,6 @@ func expandStorageConfigRequest(tfList []any) *types.StorageConfigRequest { return nil } - apiObject := &types.StorageConfigRequest{} - if v, ok := tfMap["block_storage"].([]any); ok { apiObject.BlockStorage = expandBlockStorage(v) } @@ -1390,8 +1359,18 @@ func expandVpcConfigRequest(tfList []any) *types.VpcConfigRequest { // nosemgrep } func expandKubernetesNetworkConfigRequest(tfList []any) *types.KubernetesNetworkConfigRequest { + apiObject := &types.KubernetesNetworkConfigRequest{} + if len(tfList) == 0 { - return nil + // Required to avoid the error: + // InvalidParameterException: For EKS Auto Mode, please ensure that all required configs, + // including computeConfig, kubernetesNetworkConfig, and blockStorage are all either fully enabled or fully disabled. + // since the other two fields have been injected with `enabled: false` when the field is not present. + apiObject.ElasticLoadBalancing = &types.ElasticLoadBalancing{ + Enabled: aws.Bool(false), + } + + return apiObject } tfMap, ok := tfList[0].(map[string]any) @@ -1399,8 +1378,6 @@ func expandKubernetesNetworkConfigRequest(tfList []any) *types.KubernetesNetwork return nil } - apiObject := &types.KubernetesNetworkConfigRequest{} - if v, ok := tfMap["elastic_load_balancing"].([]any); ok { apiObject.ElasticLoadBalancing = expandKubernetesNetworkConfigElasticLoadBalancing(v) } @@ -1479,6 +1456,7 @@ func expandUpdateRemoteNetworkConfigRequest(tfList []any) *types.RemoteNetworkCo return apiObject } + func expandRemoteNodeNetworks(tfList []any) []types.RemoteNodeNetwork { var apiObjects = []types.RemoteNodeNetwork{} @@ -1581,16 +1559,16 @@ func expandZonalShiftConfig(tfList []any) *types.ZonalShiftConfigRequest { return ZonalShiftConfigRequest } -func flattenCertificate(certificate *types.Certificate) []map[string]any { - if certificate == nil { +func flattenCertificate(apiObject *types.Certificate) []map[string]any { + if apiObject == nil { return []map[string]any{} } - m := map[string]any{ - "data": aws.ToString(certificate.Data), + tfMap := map[string]any{ + "data": aws.ToString(apiObject.Data), } - return []map[string]any{m} + return []map[string]any{tfMap} } func flattenComputeConfigResponse(apiObject *types.ComputeConfigResponse) []map[string]any { @@ -1598,37 +1576,37 @@ func flattenComputeConfigResponse(apiObject *types.ComputeConfigResponse) []map[ return []map[string]any{} } - m := map[string]any{ + tfMap := map[string]any{ names.AttrEnabled: aws.ToBool(apiObject.Enabled), - "node_pools": flex.FlattenStringValueList(apiObject.NodePools), + "node_pools": apiObject.NodePools, "node_role_arn": aws.ToString(apiObject.NodeRoleArn), } - return []map[string]any{m} + return []map[string]any{tfMap} } -func flattenIdentity(identity *types.Identity) []map[string]any { - if identity == nil { +func flattenIdentity(apiObject *types.Identity) []map[string]any { + if apiObject == nil { return []map[string]any{} } - m := map[string]any{ - "oidc": flattenOIDC(identity.Oidc), + tfMap := map[string]any{ + "oidc": flattenOIDC(apiObject.Oidc), } - return []map[string]any{m} + return []map[string]any{tfMap} } -func flattenOIDC(oidc *types.OIDC) []map[string]any { - if oidc == nil { +func flattenOIDC(apiObject *types.OIDC) []map[string]any { + if apiObject == nil { return []map[string]any{} } - m := map[string]any{ - names.AttrIssuer: aws.ToString(oidc.Issuer), + tfMap := map[string]any{ + names.AttrIssuer: aws.ToString(apiObject.Issuer), } - return []map[string]any{m} + return []map[string]any{tfMap} } func flattenAccessConfigResponse(apiObject *types.AccessConfigResponse, bootstrapClusterCreatorAdminPermissions *bool) []any { @@ -1681,30 +1659,29 @@ func flattenProvider(apiObject *types.Provider) []any { return []any{tfMap} } -func flattenVPCConfigResponse(vpcConfig *types.VpcConfigResponse) []map[string]any { // nosemgrep:ci.caps5-in-func-name - if vpcConfig == nil { +func flattenVPCConfigResponse(apiObject *types.VpcConfigResponse) []map[string]any { // nosemgrep:ci.caps5-in-func-name + if apiObject == nil { return []map[string]any{} } - m := map[string]any{ - "cluster_security_group_id": aws.ToString(vpcConfig.ClusterSecurityGroupId), - "endpoint_private_access": vpcConfig.EndpointPrivateAccess, - "endpoint_public_access": vpcConfig.EndpointPublicAccess, - names.AttrSecurityGroupIDs: vpcConfig.SecurityGroupIds, - names.AttrSubnetIDs: vpcConfig.SubnetIds, - "public_access_cidrs": vpcConfig.PublicAccessCidrs, - names.AttrVPCID: aws.ToString(vpcConfig.VpcId), + tfMap := map[string]any{ + "cluster_security_group_id": aws.ToString(apiObject.ClusterSecurityGroupId), + "endpoint_private_access": apiObject.EndpointPrivateAccess, + "endpoint_public_access": apiObject.EndpointPublicAccess, + names.AttrSecurityGroupIDs: apiObject.SecurityGroupIds, + names.AttrSubnetIDs: apiObject.SubnetIds, + "public_access_cidrs": apiObject.PublicAccessCidrs, + names.AttrVPCID: aws.ToString(apiObject.VpcId), } - return []map[string]any{m} + return []map[string]any{tfMap} } -func flattenLogging(logging *types.Logging) []string { +func flattenLogging(apiObject *types.Logging) []string { enabledLogTypes := []types.LogType{} - if logging != nil { - logSetups := logging.ClusterLogging - for _, logSetup := range logSetups { + if apiObject != nil { + for _, logSetup := range apiObject.ClusterLogging { if !aws.ToBool(logSetup.Enabled) { continue } @@ -1731,13 +1708,13 @@ func flattenKubernetesNetworkConfigResponse(apiObject *types.KubernetesNetworkCo return []any{tfMap} } -func flattenKubernetesNetworkConfigElasticLoadBalancing(apiObjects *types.ElasticLoadBalancing) []any { - if apiObjects == nil { +func flattenKubernetesNetworkConfigElasticLoadBalancing(apiObject *types.ElasticLoadBalancing) []any { + if apiObject == nil { return nil } tfMap := map[string]any{ - names.AttrEnabled: aws.ToBool(apiObjects.Enabled), + names.AttrEnabled: aws.ToBool(apiObject.Enabled), } return []any{tfMap} @@ -1779,7 +1756,7 @@ func flattenRemoteNodeNetwork(apiObjects []types.RemoteNodeNetwork) []any { for _, apiObject := range apiObjects { tfMap := map[string]any{ - "cidrs": flex.FlattenStringValueList(apiObject.Cidrs), + "cidrs": apiObject.Cidrs, } tfList = append(tfList, tfMap) @@ -1797,7 +1774,7 @@ func flattenRemotePodNetwork(apiObjects []types.RemotePodNetwork) []any { for _, apiObject := range apiObjects { tfMap := map[string]any{ - "cidrs": flex.FlattenStringValueList(apiObject.Cidrs), + "cidrs": apiObject.Cidrs, } tfList = append(tfList, tfMap) @@ -1885,3 +1862,44 @@ func validateAutoModeCustomizeDiff(_ context.Context, d *schema.ResourceDiff, _ return nil } + +// Allow setting `compute_config.node_role_arn` to `null` when disabling auto mode or +// built-in node pools without forcing re-creation of the cluster +func validateAutoModeComputeConfigCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, _ any) error { + if diff.Id() == "" { + return nil + } + + oldValue, newValue := diff.GetChange("compute_config") + + oldComputeConfig := expandComputeConfigRequest(oldValue.([]any)) + newComputeConfig := expandComputeConfigRequest(newValue.([]any)) + + if newComputeConfig == nil || oldComputeConfig == nil { + return nil + } + + oldRoleARN := aws.ToString(oldComputeConfig.NodeRoleArn) + newRoleARN := aws.ToString(newComputeConfig.NodeRoleArn) + + newComputeConfigEnabled := aws.ToBool(newComputeConfig.Enabled) + + // Do not force new if auto mode is disabled in new config and role ARN is unset + if !newComputeConfigEnabled && newRoleARN == "" { + return nil + } + + // Do not force new if built-in node pools are zeroed in new config and role ARN is unset + if len(newComputeConfig.NodePools) == 0 && newRoleARN == "" { + return nil + } + + // only force new if an existing role has changed, not if a new role is added + if oldRoleARN != "" && oldRoleARN != newRoleARN { + if err := diff.ForceNew("compute_config.0.node_role_arn"); err != nil { + return err + } + } + + return nil +} diff --git a/internal/service/eks/cluster_test.go b/internal/service/eks/cluster_test.go index a08a2d5409e..812578fff82 100644 --- a/internal/service/eks/cluster_test.go +++ b/internal/service/eks/cluster_test.go @@ -66,7 +66,7 @@ func TestAccEKSCluster_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "certificate_authority.#", "1"), resource.TestCheckResourceAttrSet(resourceName, "certificate_authority.0.data"), resource.TestCheckNoResourceAttr(resourceName, "cluster_id"), - resource.TestCheckResourceAttr(resourceName, "compute_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "compute_config.#", "1"), acctest.CheckResourceAttrRFC3339(resourceName, names.AttrCreatedAt), resource.TestCheckResourceAttr(resourceName, names.AttrDeletionProtection, acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, "enabled_cluster_log_types.#", "0"), @@ -86,7 +86,7 @@ func TestAccEKSCluster_basic(t *testing.T) { resource.TestMatchResourceAttr(resourceName, "platform_version", regexache.MustCompile(`^eks\.\d+$`)), resource.TestCheckResourceAttrPair(resourceName, names.AttrRoleARN, "aws_iam_role.cluster", names.AttrARN), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(types.ClusterStatusActive)), - resource.TestCheckResourceAttr(resourceName, "storage_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "storage_config.#", "1"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "0"), resource.TestMatchResourceAttr(resourceName, names.AttrVersion, regexache.MustCompile(`^\d+\.\d+$`)), resource.TestCheckResourceAttr(resourceName, "upgrade_policy.#", "1"), @@ -380,7 +380,7 @@ func TestAccEKSCluster_ComputeConfig_OnCreate(t *testing.T) { func TestAccEKSCluster_ComputeConfig_OnUpdate(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 types.Cluster + var cluster types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_eks_cluster.test" @@ -393,15 +393,43 @@ func TestAccEKSCluster_ComputeConfig_OnUpdate(t *testing.T) { { Config: testAccClusterConfig_computeConfig_onUpdateSetup(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckClusterExists(ctx, resourceName, &cluster1), - resource.TestCheckResourceAttr(resourceName, "compute_config.#", "0"), + testAccCheckClusterExists(ctx, resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "compute_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "compute_config.0.enabled", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "compute_config.0.node_pools.#", "0"), resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.#", "1"), resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.elastic_load_balancing.#", "1"), resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.elastic_load_balancing.0.enabled", acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.ip_family", "ipv4"), resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.service_ipv4_cidr", "172.20.0.0/16"), resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.service_ipv6_cidr", ""), - resource.TestCheckResourceAttr(resourceName, "storage_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "storage_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "storage_config.0.block_storage.#", "1"), + resource.TestCheckResourceAttr(resourceName, "storage_config.0.block_storage.0.enabled", acctest.CtFalse), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, + }, + { + Config: testAccClusterConfig_computeConfig_nodePoolsSetup(rName, false), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "compute_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "compute_config.0.enabled", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "compute_config.0.node_pools.#", "0"), + resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.elastic_load_balancing.#", "1"), + resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.elastic_load_balancing.0.enabled", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.ip_family", "ipv4"), + resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.service_ipv4_cidr", "172.20.0.0/16"), + resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.service_ipv6_cidr", ""), + resource.TestCheckResourceAttr(resourceName, "storage_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "storage_config.0.block_storage.#", "1"), + resource.TestCheckResourceAttr(resourceName, "storage_config.0.block_storage.0.enabled", acctest.CtFalse), ), }, { @@ -412,9 +440,13 @@ func TestAccEKSCluster_ComputeConfig_OnUpdate(t *testing.T) { }, { Config: testAccClusterConfig_computeConfig(rName, true, "aws_iam_role.node.arn"), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckClusterExists(ctx, resourceName, &cluster2), - testAccCheckClusterNotRecreated(&cluster1, &cluster2), + testAccCheckClusterExists(ctx, resourceName, &cluster), resource.TestCheckResourceAttr(resourceName, "compute_config.#", "1"), resource.TestCheckResourceAttr(resourceName, "compute_config.0.enabled", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "compute_config.0.node_pools.#", "1"), @@ -437,6 +469,35 @@ func TestAccEKSCluster_ComputeConfig_OnUpdate(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, }, + { + Config: testAccClusterConfig_computeConfig_nodePoolsSetup(rName, false), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "compute_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "compute_config.0.enabled", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "compute_config.0.node_pools.#", "0"), + resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.elastic_load_balancing.#", "1"), + resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.elastic_load_balancing.0.enabled", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.ip_family", "ipv4"), + resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.service_ipv4_cidr", "172.20.0.0/16"), + resource.TestCheckResourceAttr(resourceName, "kubernetes_network_config.0.service_ipv6_cidr", ""), + resource.TestCheckResourceAttr(resourceName, "storage_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "storage_config.0.block_storage.#", "1"), + resource.TestCheckResourceAttr(resourceName, "storage_config.0.block_storage.0.enabled", acctest.CtFalse), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, + }, }, }) }