Skip to content
Open
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 2 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ Optionally, the module supports advanced security group management for the worke
* [Submodules](./modules)
* [fscloud](./modules/fscloud)
* [kube-audit](./modules/kube-audit)
* [worker-pool](./modules/worker-pool)
* [Examples](./examples)
* [2 MZR clusters in same VPC example](./examples/multiple_mzr_clusters)
* [Advanced example (mzr, auto-scale, kms, taints)](./examples/advanced)
Expand Down Expand Up @@ -295,6 +296,7 @@ Optionally, you need the following permissions to attach Access Management tags
| <a name="module_cbr_rule"></a> [cbr\_rule](#module\_cbr\_rule) | terraform-ibm-modules/cbr/ibm//modules/cbr-rule-module | 1.33.6 |
| <a name="module_cos_instance"></a> [cos\_instance](#module\_cos\_instance) | terraform-ibm-modules/cos/ibm | 10.5.0 |
| <a name="module_existing_secrets_manager_instance_parser"></a> [existing\_secrets\_manager\_instance\_parser](#module\_existing\_secrets\_manager\_instance\_parser) | terraform-ibm-modules/common-utilities/ibm//modules/crn-parser | 1.2.0 |
| <a name="module_worker_pools"></a> [worker\_pools](#module\_worker\_pools) | ./modules/worker-pool | n/a |

### Resources

Expand All @@ -307,8 +309,6 @@ Optionally, you need the following permissions to attach Access Management tags
| [ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/resources/container_vpc_cluster) | resource |
| [ibm_container_vpc_cluster.cluster](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/resources/container_vpc_cluster) | resource |
| [ibm_container_vpc_cluster.cluster_with_upgrade](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/resources/container_vpc_cluster) | resource |
| [ibm_container_vpc_worker_pool.autoscaling_pool](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/resources/container_vpc_worker_pool) | resource |
| [ibm_container_vpc_worker_pool.pool](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/resources/container_vpc_worker_pool) | resource |
| [ibm_iam_authorization_policy.ocp_secrets_manager_iam_auth_policy](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/resources/iam_authorization_policy) | resource |
| [ibm_resource_tag.cluster_access_tag](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/resources/resource_tag) | resource |
| [ibm_resource_tag.cos_access_tag](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/resources/resource_tag) | resource |
Expand All @@ -321,7 +321,6 @@ Optionally, you need the following permissions to attach Access Management tags
| [ibm_container_addons.existing_addons](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/data-sources/container_addons) | data source |
| [ibm_container_cluster_config.cluster_config](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/data-sources/container_cluster_config) | data source |
| [ibm_container_cluster_versions.cluster_versions](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/data-sources/container_cluster_versions) | data source |
| [ibm_container_vpc_worker_pool.all_pools](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/data-sources/container_vpc_worker_pool) | data source |
| [ibm_is_lbs.all_lbs](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/data-sources/is_lbs) | data source |
| [ibm_is_virtual_endpoint_gateway.api_vpe](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/data-sources/is_virtual_endpoint_gateway) | data source |
| [ibm_is_virtual_endpoint_gateway.master_vpe](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/data-sources/is_virtual_endpoint_gateway) | data source |
Expand Down
134 changes: 16 additions & 118 deletions main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,6 @@
locals {
# ibm_container_vpc_cluster automatically names default pool "default" (See https://github.com/IBM-Cloud/terraform-provider-ibm/issues/2849)
default_pool = element([for pool in var.worker_pools : pool if pool.pool_name == "default"], 0)
# all_standalone_pools are the pools managed by a 'standalone' ibm_container_vpc_worker_pool resource
all_standalone_pools = [for pool in var.worker_pools : pool if !var.ignore_worker_pool_size_changes]
all_standalone_autoscaling_pools = [for pool in var.worker_pools : pool if var.ignore_worker_pool_size_changes]

default_ocp_version = "${data.ibm_container_cluster_versions.cluster_versions.default_openshift_version}_openshift"
ocp_version = var.ocp_version == null || var.ocp_version == "default" ? local.default_ocp_version : "${var.ocp_version}_openshift"
Expand Down Expand Up @@ -464,114 +461,15 @@ data "ibm_container_cluster_config" "cluster_config" {
endpoint_type = var.cluster_config_endpoint_type != "default" ? var.cluster_config_endpoint_type : null # null value represents default
}

##############################################################################
# Worker Pools
##############################################################################

locals {
additional_pool_names = var.ignore_worker_pool_size_changes ? [for pool in local.all_standalone_autoscaling_pools : pool.pool_name] : [for pool in local.all_standalone_pools : pool.pool_name]
pool_names = toset(flatten([["default"], local.additional_pool_names]))
}

data "ibm_container_vpc_worker_pool" "all_pools" {
depends_on = [ibm_container_vpc_worker_pool.autoscaling_pool, ibm_container_vpc_worker_pool.pool]
for_each = local.pool_names
cluster = local.cluster_id
worker_pool_name = each.value
}

resource "ibm_container_vpc_worker_pool" "pool" {
for_each = { for pool in local.all_standalone_pools : pool.pool_name => pool }
vpc_id = var.vpc_id
resource_group_id = var.resource_group_id
cluster = local.cluster_id
worker_pool_name = each.value.pool_name
flavor = each.value.machine_type
operating_system = each.value.operating_system
worker_count = each.value.workers_per_zone
secondary_storage = each.value.secondary_storage
entitlement = var.ocp_entitlement
labels = each.value.labels
crk = each.value.boot_volume_encryption_kms_config == null ? null : each.value.boot_volume_encryption_kms_config.crk
kms_instance_id = each.value.boot_volume_encryption_kms_config == null ? null : each.value.boot_volume_encryption_kms_config.kms_instance_id
kms_account_id = each.value.boot_volume_encryption_kms_config == null ? null : each.value.boot_volume_encryption_kms_config.kms_account_id

security_groups = each.value.additional_security_group_ids

dynamic "zones" {
for_each = each.value.subnet_prefix != null ? var.vpc_subnets[each.value.subnet_prefix] : each.value.vpc_subnets
content {
subnet_id = zones.value.id
name = zones.value.zone
}
}

# Apply taints to worker pools i.e. all_standalone_pools
dynamic "taints" {
for_each = var.worker_pools_taints == null ? [] : concat(var.worker_pools_taints["all"], lookup(var.worker_pools_taints, each.value["pool_name"], []))
content {
effect = taints.value.effect
key = taints.value.key
value = taints.value.value
}
}

timeouts {
# Extend create and delete timeout to 2h
delete = "2h"
create = "2h"
}

# The default workerpool has to be imported as it will already exist on cluster create
import_on_create = each.value.pool_name == "default" ? var.allow_default_worker_pool_replacement ? null : true : null
orphan_on_delete = each.value.pool_name == "default" ? var.allow_default_worker_pool_replacement ? null : true : null
}

# copy of the pool resource above which ignores changes to the worker pool for use in autoscaling scenarios
resource "ibm_container_vpc_worker_pool" "autoscaling_pool" {
for_each = { for pool in local.all_standalone_autoscaling_pools : pool.pool_name => pool }
vpc_id = var.vpc_id
resource_group_id = var.resource_group_id
cluster = local.cluster_id
worker_pool_name = each.value.pool_name
flavor = each.value.machine_type
operating_system = each.value.operating_system
worker_count = each.value.workers_per_zone
secondary_storage = each.value.secondary_storage
entitlement = var.ocp_entitlement
labels = each.value.labels
crk = each.value.boot_volume_encryption_kms_config == null ? null : each.value.boot_volume_encryption_kms_config.crk
kms_instance_id = each.value.boot_volume_encryption_kms_config == null ? null : each.value.boot_volume_encryption_kms_config.kms_instance_id
kms_account_id = each.value.boot_volume_encryption_kms_config == null ? null : each.value.boot_volume_encryption_kms_config.kms_account_id

security_groups = each.value.additional_security_group_ids

lifecycle {
ignore_changes = [worker_count]
}

dynamic "zones" {
for_each = each.value.subnet_prefix != null ? var.vpc_subnets[each.value.subnet_prefix] : each.value.vpc_subnets
content {
subnet_id = zones.value.id
name = zones.value.zone
}
}

# Apply taints to worker pools i.e. all_standalone_pools

dynamic "taints" {
for_each = var.worker_pools_taints == null ? [] : concat(var.worker_pools_taints["all"], lookup(var.worker_pools_taints, each.value["pool_name"], []))
content {
effect = taints.value.effect
key = taints.value.key
value = taints.value.value
}
}

# The default workerpool has to be imported as it will already exist on cluster create
import_on_create = each.value.pool_name == "default" ? var.allow_default_worker_pool_replacement ? null : true : null
orphan_on_delete = each.value.pool_name == "default" ? var.allow_default_worker_pool_replacement ? null : true : null
module "worker_pools" {
source = "./modules/worker-pool"
vpc_id = var.vpc_id
resource_group_id = var.resource_group_id
cluster_id = local.cluster_id
vpc_subnets = var.vpc_subnets
worker_pools = var.worker_pools
ignore_worker_pool_size_changes = var.ignore_worker_pool_size_changes
allow_default_worker_pool_replacement = var.allow_default_worker_pool_replacement
}

##############################################################################
Expand Down Expand Up @@ -603,7 +501,7 @@ resource "null_resource" "confirm_network_healthy" {
# Worker pool creation can start before the 'ibm_container_vpc_cluster' completes since there is no explicit
# depends_on in 'ibm_container_vpc_worker_pool', just an implicit depends_on on the cluster ID. Cluster ID can exist before
# 'ibm_container_vpc_cluster' completes, so hence need to add explicit depends on against 'ibm_container_vpc_cluster' here.
depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool]
depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, module.worker_pools]

provisioner "local-exec" {
command = "${path.module}/scripts/confirm_network_healthy.sh"
Expand Down Expand Up @@ -657,7 +555,7 @@ resource "ibm_container_addons" "addons" {
# Worker pool creation can start before the 'ibm_container_vpc_cluster' completes since there is no explicit
# depends_on in 'ibm_container_vpc_worker_pool', just an implicit depends_on on the cluster ID. Cluster ID can exist before
# 'ibm_container_vpc_cluster' completes, so hence need to add explicit depends on against 'ibm_container_vpc_cluster' here.
depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool, null_resource.confirm_network_healthy]
depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, module.worker_pools, null_resource.confirm_network_healthy]
cluster = local.cluster_id
resource_group_id = var.resource_group_id

Expand Down Expand Up @@ -730,7 +628,7 @@ resource "kubernetes_config_map_v1_data" "set_autoscaling" {
##############################################################################

data "ibm_is_lbs" "all_lbs" {
depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool, null_resource.confirm_network_healthy]
depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, module.worker_pools, null_resource.confirm_network_healthy]
count = length(var.additional_lb_security_group_ids) > 0 ? 1 : 0
}

Expand Down Expand Up @@ -766,19 +664,19 @@ locals {

data "ibm_is_virtual_endpoint_gateway" "master_vpe" {
count = length(var.additional_vpe_security_group_ids["master"])
depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool, null_resource.confirm_network_healthy]
depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, module.worker_pools, null_resource.confirm_network_healthy]
name = local.vpes_to_attach_to_sg["master"]
}

data "ibm_is_virtual_endpoint_gateway" "api_vpe" {
count = length(var.additional_vpe_security_group_ids["api"])
depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool, null_resource.confirm_network_healthy]
depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, module.worker_pools, null_resource.confirm_network_healthy]
name = local.vpes_to_attach_to_sg["api"]
}

data "ibm_is_virtual_endpoint_gateway" "registry_vpe" {
count = length(var.additional_vpe_security_group_ids["registry"])
depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool, null_resource.confirm_network_healthy]
depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, module.worker_pools, null_resource.confirm_network_healthy]
name = local.vpes_to_attach_to_sg["registry"]
}

Expand Down Expand Up @@ -870,7 +768,7 @@ module "existing_secrets_manager_instance_parser" {

resource "ibm_iam_authorization_policy" "ocp_secrets_manager_iam_auth_policy" {
count = var.enable_secrets_manager_integration && !var.skip_ocp_secrets_manager_iam_auth_policy ? 1 : 0
depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool]
depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, module.worker_pools]
source_service_name = "containers-kubernetes"
source_resource_instance_id = local.cluster_id
target_service_name = "secrets-manager"
Expand Down
Empty file added modules/worker-pool/README.md
Empty file.
Loading