Skip to content

Commit 940a0e8

Browse files
committed
fix: Remove arguments that do not make sense in EKS
1 parent 7550e95 commit 940a0e8

File tree

6 files changed

+6
-81
lines changed

6 files changed

+6
-81
lines changed

README.md

Lines changed: 2 additions & 2 deletions
Large diffs are not rendered by default.

modules/self-managed-node-group/README.md

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -127,10 +127,8 @@ module "self_managed_node_group" {
127127
| <a name="input_enabled_metrics"></a> [enabled\_metrics](#input\_enabled\_metrics) | A list of metrics to collect. The allowed values are `GroupDesiredCapacity`, `GroupInServiceCapacity`, `GroupPendingCapacity`, `GroupMinSize`, `GroupMaxSize`, `GroupInServiceInstances`, `GroupPendingInstances`, `GroupStandbyInstances`, `GroupStandbyCapacity`, `GroupTerminatingCapacity`, `GroupTerminatingInstances`, `GroupTotalCapacity`, `GroupTotalInstances` | `list(string)` | `[]` | no |
128128
| <a name="input_enclave_options"></a> [enclave\_options](#input\_enclave\_options) | Enable Nitro Enclaves on launched instances | <pre>object({<br/> enabled = optional(bool)<br/> })</pre> | `null` | no |
129129
| <a name="input_force_delete"></a> [force\_delete](#input\_force\_delete) | Allows deleting the Auto Scaling Group without waiting for all instances in the pool to terminate. You can force an Auto Scaling Group to delete even if it's in the process of scaling a resource. Normally, Terraform drains all the instances before deleting the group. This bypasses that behavior and potentially leaves resources dangling | `bool` | `null` | no |
130-
| <a name="input_force_delete_warm_pool"></a> [force\_delete\_warm\_pool](#input\_force\_delete\_warm\_pool) | Allows deleting the Auto Scaling Group without waiting for all instances in the warm pool to terminate | `bool` | `null` | no |
131130
| <a name="input_health_check_grace_period"></a> [health\_check\_grace\_period](#input\_health\_check\_grace\_period) | Time (in seconds) after instance comes into service before checking health | `number` | `null` | no |
132131
| <a name="input_health_check_type"></a> [health\_check\_type](#input\_health\_check\_type) | `EC2` or `ELB`. Controls how health checking is done | `string` | `null` | no |
133-
| <a name="input_hibernation_options"></a> [hibernation\_options](#input\_hibernation\_options) | The hibernation options for the instance | <pre>object({<br/> configured = optional(bool)<br/> })</pre> | `null` | no |
134132
| <a name="input_iam_instance_profile_arn"></a> [iam\_instance\_profile\_arn](#input\_iam\_instance\_profile\_arn) | Amazon Resource Name (ARN) of an existing IAM instance profile that provides permissions for the node group. Required if `create_iam_instance_profile` = `false` | `string` | `null` | no |
135133
| <a name="input_iam_role_additional_policies"></a> [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `map(string)` | `{}` | no |
136134
| <a name="input_iam_role_arn"></a> [iam\_role\_arn](#input\_iam\_role\_arn) | ARN of the IAM role used by the instance profile. Required when `create_access_entry = true` and `create_iam_instance_profile = false` | `string` | `null` | no |
@@ -200,7 +198,6 @@ module "self_managed_node_group" {
200198
| <a name="input_vpc_security_group_ids"></a> [vpc\_security\_group\_ids](#input\_vpc\_security\_group\_ids) | A list of security group IDs to associate | `list(string)` | `[]` | no |
201199
| <a name="input_wait_for_capacity_timeout"></a> [wait\_for\_capacity\_timeout](#input\_wait\_for\_capacity\_timeout) | A maximum duration that Terraform should wait for ASG instances to be healthy before timing out. (See also Waiting for Capacity below.) Setting this to '0' causes Terraform to skip all Capacity Waiting behavior | `string` | `null` | no |
202200
| <a name="input_wait_for_elb_capacity"></a> [wait\_for\_elb\_capacity](#input\_wait\_for\_elb\_capacity) | Setting this will cause Terraform to wait for exactly this number of healthy instances in all attached load balancers on both create and update operations. Takes precedence over `min_elb_capacity` behavior | `number` | `null` | no |
203-
| <a name="input_warm_pool"></a> [warm\_pool](#input\_warm\_pool) | If this block is configured, add a Warm Pool to the specified Auto Scaling group | <pre>object({<br/> instance_reuse_policy = optional(object({<br/> reuse_on_scale_in = optional(bool)<br/> }))<br/> max_group_prepared_capacity = optional(number)<br/> min_size = optional(number)<br/> pool_state = optional(string)<br/> })</pre> | `null` | no |
204201

205202
## Outputs
206203

modules/self-managed-node-group/main.tf

Lines changed: 0 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -191,14 +191,6 @@ resource "aws_launch_template" "this" {
191191
}
192192
}
193193

194-
dynamic "hibernation_options" {
195-
for_each = var.hibernation_options != null ? [var.hibernation_options] : []
196-
197-
content {
198-
configured = hibernation_options.value.configured
199-
}
200-
}
201-
202194
iam_instance_profile {
203195
arn = var.create_iam_instance_profile ? aws_iam_instance_profile.this[0].arn : var.iam_instance_profile_arn
204196
}
@@ -500,7 +492,6 @@ resource "aws_autoscaling_group" "this" {
500492
desired_capacity_type = var.desired_size_type
501493
enabled_metrics = var.enabled_metrics
502494
force_delete = var.force_delete
503-
force_delete_warm_pool = var.force_delete_warm_pool
504495
health_check_grace_period = var.health_check_grace_period
505496
health_check_type = var.health_check_type
506497

@@ -773,24 +764,6 @@ resource "aws_autoscaling_group" "this" {
773764
wait_for_capacity_timeout = var.wait_for_capacity_timeout
774765
wait_for_elb_capacity = var.wait_for_elb_capacity
775766

776-
dynamic "warm_pool" {
777-
for_each = var.warm_pool != null ? [var.warm_pool] : []
778-
779-
content {
780-
dynamic "instance_reuse_policy" {
781-
for_each = warm_pool.value.instance_reuse_policy != null ? [warm_pool.value.instance_reuse_policy] : []
782-
783-
content {
784-
reuse_on_scale_in = instance_reuse_policy.value.reuse_on_scale_in
785-
}
786-
}
787-
788-
max_group_prepared_capacity = warm_pool.value.max_group_prepared_capacity
789-
min_size = warm_pool.value.min_size
790-
pool_state = warm_pool.value.pool_state
791-
}
792-
}
793-
794767
dynamic "timeouts" {
795768
for_each = var.timeouts != null ? [var.timeouts] : []
796769

modules/self-managed-node-group/variables.tf

Lines changed: 0 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -237,14 +237,6 @@ variable "enclave_options" {
237237
default = null
238238
}
239239

240-
variable "hibernation_options" {
241-
description = "The hibernation options for the instance"
242-
type = object({
243-
configured = optional(bool)
244-
})
245-
default = null
246-
}
247-
248240
variable "instance_market_options" {
249241
description = "The market (purchasing) option for the instance"
250242
type = object({
@@ -651,12 +643,6 @@ variable "force_delete" {
651643
default = null
652644
}
653645

654-
variable "force_delete_warm_pool" {
655-
description = "Allows deleting the Auto Scaling Group without waiting for all instances in the warm pool to terminate"
656-
type = bool
657-
default = null
658-
}
659-
660646
variable "termination_policies" {
661647
description = "A list of policies to decide how the instances in the Auto Scaling Group should be terminated. The allowed values are `OldestInstance`, `NewestInstance`, `OldestLaunchConfiguration`, `ClosestToNextInstanceHour`, `OldestLaunchTemplate`, `AllocationStrategy`, `Default`"
662648
type = list(string)
@@ -829,19 +815,6 @@ variable "mixed_instances_policy" {
829815
default = null
830816
}
831817

832-
variable "warm_pool" {
833-
description = "If this block is configured, add a Warm Pool to the specified Auto Scaling group"
834-
type = object({
835-
instance_reuse_policy = optional(object({
836-
reuse_on_scale_in = optional(bool)
837-
}))
838-
max_group_prepared_capacity = optional(number)
839-
min_size = optional(number)
840-
pool_state = optional(string)
841-
})
842-
default = null
843-
}
844-
845818
variable "timeouts" {
846819
description = "Timeout configurations for the autoscaling group"
847820
type = object({

node_groups.tf

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -424,11 +424,10 @@ module "self_managed_node_group" {
424424

425425
ignore_failed_scaling_activities = try(each.value.ignore_failed_scaling_activities, var.self_managed_node_group_defaults.ignore_failed_scaling_activities, null)
426426

427-
force_delete = try(each.value.force_delete, var.self_managed_node_group_defaults.force_delete, null)
428-
force_delete_warm_pool = try(each.value.force_delete_warm_pool, var.self_managed_node_group_defaults.force_delete_warm_pool, null)
429-
termination_policies = try(each.value.termination_policies, var.self_managed_node_group_defaults.termination_policies, null)
430-
suspended_processes = try(each.value.suspended_processes, var.self_managed_node_group_defaults.suspended_processes, null)
431-
max_instance_lifetime = try(each.value.max_instance_lifetime, var.self_managed_node_group_defaults.max_instance_lifetime, null)
427+
force_delete = try(each.value.force_delete, var.self_managed_node_group_defaults.force_delete, null)
428+
termination_policies = try(each.value.termination_policies, var.self_managed_node_group_defaults.termination_policies, null)
429+
suspended_processes = try(each.value.suspended_processes, var.self_managed_node_group_defaults.suspended_processes, null)
430+
max_instance_lifetime = try(each.value.max_instance_lifetime, var.self_managed_node_group_defaults.max_instance_lifetime, null)
432431

433432
enabled_metrics = try(each.value.enabled_metrics, var.self_managed_node_group_defaults.enabled_metrics, null)
434433
metrics_granularity = try(each.value.metrics_granularity, var.self_managed_node_group_defaults.metrics_granularity, null)
@@ -439,7 +438,6 @@ module "self_managed_node_group" {
439438
instance_refresh = try(each.value.instance_refresh, var.self_managed_node_group_defaults.instance_refresh, null)
440439
use_mixed_instances_policy = try(each.value.use_mixed_instances_policy, var.self_managed_node_group_defaults.use_mixed_instances_policy, null)
441440
mixed_instances_policy = try(each.value.mixed_instances_policy, var.self_managed_node_group_defaults.mixed_instances_policy, null)
442-
warm_pool = try(each.value.warm_pool, var.self_managed_node_group_defaults.warm_pool, null)
443441

444442
timeouts = try(each.value.timeouts, var.self_managed_node_group_defaults.timeouts, null)
445443
autoscaling_group_tags = try(each.value.autoscaling_group_tags, var.self_managed_node_group_defaults.autoscaling_group_tags, null)
@@ -486,7 +484,6 @@ module "self_managed_node_group" {
486484
cpu_options = try(each.value.cpu_options, var.self_managed_node_group_defaults.cpu_options, null)
487485
credit_specification = try(each.value.credit_specification, var.self_managed_node_group_defaults.credit_specification, null)
488486
enclave_options = try(each.value.enclave_options, var.self_managed_node_group_defaults.enclave_options, null)
489-
hibernation_options = try(each.value.hibernation_options, var.self_managed_node_group_defaults.hibernation_options, null)
490487
instance_requirements = try(each.value.instance_requirements, var.self_managed_node_group_defaults.instance_requirements, null)
491488
instance_market_options = try(each.value.instance_market_options, var.self_managed_node_group_defaults.instance_market_options, null)
492489
license_specifications = try(each.value.license_specifications, var.self_managed_node_group_defaults.license_specifications, null)

variables.tf

Lines changed: 0 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1058,9 +1058,6 @@ variable "self_managed_node_groups" {
10581058
enclave_options = optional(object({
10591059
enabled = optional(bool)
10601060
}))
1061-
hibernation_options = optional(object({
1062-
configured = optional(bool)
1063-
}))
10641061
instance_requirements = optional(object({
10651062
accelerator_count = optional(object({
10661063
max = optional(number)
@@ -1294,7 +1291,6 @@ variable "self_managed_node_group_defaults" {
12941291
health_check_grace_period = optional(number)
12951292
ignore_failed_scaling_activities = optional(number)
12961293
force_delete = optional(bool)
1297-
force_delete_warm_pool = optional(bool)
12981294
termination_policies = optional(list(string))
12991295
suspended_processes = optional(list(string))
13001296
max_instance_lifetime = optional(number)
@@ -1407,14 +1403,6 @@ variable "self_managed_node_group_defaults" {
14071403
})))
14081404
})
14091405
}))
1410-
warm_pool = optional(object({
1411-
instance_reuse_policy = optional(object({
1412-
reuse_on_scale_in = optional(bool)
1413-
}))
1414-
max_group_prepared_capacity = optional(number)
1415-
min_size = optional(number)
1416-
pool_state = optional(string)
1417-
}))
14181406
timeouts = optional(object({
14191407
delete = optional(string)
14201408
}))
@@ -1491,9 +1479,6 @@ variable "self_managed_node_group_defaults" {
14911479
enclave_options = optional(object({
14921480
enabled = optional(bool)
14931481
}))
1494-
hibernation_options = optional(object({
1495-
configured = optional(bool)
1496-
}))
14971482
instance_requirements = optional(object({
14981483
accelerator_count = optional(object({
14991484
max = optional(number)

0 commit comments

Comments
 (0)