diff --git a/locals.tf b/locals.tf index 68b6fff6..4d44d8c9 100755 --- a/locals.tf +++ b/locals.tf @@ -130,7 +130,6 @@ locals { launch_template_name = "${local.cluster_name}-default-lt" launch_template_use_name_prefix = true launch_template_tags = { Name = "${local.cluster_name}-default" } - tags = var.autoscaling_enabled ? merge(local.tags, { "k8s.io/cluster-autoscaler/${local.cluster_name}" = "owned", propagate_at_launch = true }, { "k8s.io/cluster-autoscaler/enabled" = "true", propagate_at_launch = true }) : local.tags # Node Pool IAM Configuration iam_role_use_name_prefix = false @@ -176,12 +175,12 @@ locals { http_tokens = var.default_nodepool_metadata_http_tokens http_put_response_hop_limit = var.default_nodepool_metadata_http_put_response_hop_limit } + # Launch Template configuration create_launch_template = true launch_template_name = "${local.cluster_name}-${key}-lt" launch_template_use_name_prefix = true launch_template_tags = { Name = "${local.cluster_name}-${key}" } - tags = var.autoscaling_enabled ? merge(local.tags, { "k8s.io/cluster-autoscaler/${local.cluster_name}" = "owned", propagate_at_launch = true }, { "k8s.io/cluster-autoscaler/enabled" = "true", propagate_at_launch = true }) : local.tags # Node Pool IAM Configuration iam_role_use_name_prefix = false iam_role_name = "${var.prefix}-${key}-eks-node-group" @@ -224,4 +223,24 @@ locals { } } : {} + # Used while tagging the EKS created ASGs with our user specified set of tags + node_group_names = keys(local.node_groups) + # Merge user tags with autoscaler tags when autoscaling is enabled + all_node_group_tags = var.autoscaling_enabled ? merge( + local.tags, + { + "k8s.io/cluster-autoscaler/${local.cluster_name}" = "owned" + "k8s.io/cluster-autoscaler/enabled" = "true" + } + ) : local.tags + node_group_tags = flatten([ + for ng_name in local.node_group_names : [ + for tag_key, tag_value in local.all_node_group_tags : { + node_group = ng_name + key = tag_key + value = tag_value + } + ] + ]) + } diff --git a/main.tf b/main.tf index d8079d87..fff4b3f7 100755 --- a/main.tf +++ b/main.tf @@ -183,12 +183,35 @@ module "eks" { # BYO - EKS Workers IAM Role create_iam_role = var.workers_iam_role_arn == null ? true : false iam_role_arn = var.workers_iam_role_arn + + # Tags to propagate to node groups and their Auto Scaling Groups + tags = local.tags } ## Any individual Node Group customizations should go here eks_managed_node_groups = local.node_groups # Node group definitions (from locals) } +# Tag the EKS created ASGs - ensures tags are explicitly applied to AutoScalingGroups +# This is more reliable than relying solely on EKS module propagation +resource "aws_autoscaling_group_tag" "node_group_tags" { + # The for_each loop results in one resource created per node group per tag + for_each = { + for item in local.node_group_tags : + "${item.node_group}-${item.key}" => item + } + # Reference the single ASG for this node group + autoscaling_group_name = module.eks.eks_managed_node_groups[each.value.node_group].node_group_autoscaling_group_names[0] + + tag { + key = each.value.key + value = each.value.value + propagate_at_launch = true + } + + depends_on = [module.eks] +} + # Resource to create EKS access entries for admin IAM roles. Used for EKS RBAC. resource "aws_eks_access_entry" "instance" { for_each = toset(coalesce(var.admin_access_entry_role_arns, [])) diff --git a/security.tf b/security.tf index 5ac2f3b4..f28bb6da 100644 --- a/security.tf +++ b/security.tf @@ -26,7 +26,7 @@ resource "aws_security_group" "sg" { # Egress rule to allow all outbound traffic from the security group resource "aws_vpc_security_group_egress_rule" "sg" { - + count = var.security_group_id == null && var.vpc_private_endpoints_enabled ? 1 : 0 security_group_id = local.security_group_id description = "Allow all outbound traffic." diff --git a/variables.tf b/variables.tf index c3c894ca..9b958293 100644 --- a/variables.tf +++ b/variables.tf @@ -778,6 +778,13 @@ variable "aws_fsx_ontap_fsxadmin_password" { default = "v3RyS3cretPa$sw0rd" } +# The ONTAP administrative password for the svmadmin user that you can use to administer your Storage Virtual Machine using the ONTAP CLI and REST API. +variable "aws_fsx_ontap_svmadmin_password" { + description = "The ONTAP administrative password for the fsxadmin user that you can use to administer your Storage Virtual Machine using the ONTAP CLI and REST API." + type = string + default = "v3RyS3cretPa$sw0rd" +} + # The storage capacity (GiB) of the ONTAP file system. Valid values between 1024 and 196608. variable "aws_fsx_ontap_file_system_storage_capacity" { description = "The storage capacity (GiB) of the ONTAP file system. Valid values between 1024 and 196608." diff --git a/vms.tf b/vms.tf index cdcb1639..fe398421 100644 --- a/vms.tf +++ b/vms.tf @@ -33,6 +33,7 @@ resource "aws_fsx_ontap_file_system" "ontap-fs" { throughput_capacity = var.aws_fsx_ontap_file_system_throughput_capacity preferred_subnet_id = module.vpc.private_subnets[0] security_group_ids = [local.workers_security_group_id] + route_table_ids = var.aws_fsx_ontap_deployment_type == "MULTI_AZ_1" ? module.vpc.private_route_table_ids : null tags = merge(local.tags, { "Name" : "${var.prefix}-ontap-fs" }) depends_on = [module.ontap] @@ -41,10 +42,11 @@ resource "aws_fsx_ontap_file_system" "ontap-fs" { # ONTAP storage virtual machine and volume resources resource "aws_fsx_ontap_storage_virtual_machine" "ontap-svm" { - count = local.storage_type_backend == "ontap" ? 1 : 0 - file_system_id = aws_fsx_ontap_file_system.ontap-fs[0].id - name = "${var.prefix}-ontap-svm" - tags = merge(local.tags, { "Name" : "${var.prefix}-ontap-svm" }) + count = local.storage_type_backend == "ontap" ? 1 : 0 + file_system_id = aws_fsx_ontap_file_system.ontap-fs[0].id + svm_admin_password = var.aws_fsx_ontap_svmadmin_password + name = "${var.prefix}-ontap-svm" + tags = merge(local.tags, { "Name" : "${var.prefix}-ontap-svm" }) } # A default volume gets created with the svm, we may want another