|
7 | 7 | locals { |
8 | 8 | # ibm_container_vpc_cluster automatically names default pool "default" (See https://github.com/IBM-Cloud/terraform-provider-ibm/issues/2849) |
9 | 9 | default_pool = element([for pool in var.worker_pools : pool if pool.pool_name == "default"], 0) |
10 | | - # all_standalone_pools are the pools managed by a 'standalone' ibm_container_vpc_worker_pool resource |
11 | | - all_standalone_pools = [for pool in var.worker_pools : pool if !var.ignore_worker_pool_size_changes] |
12 | | - all_standalone_autoscaling_pools = [for pool in var.worker_pools : pool if var.ignore_worker_pool_size_changes] |
13 | 10 |
|
14 | 11 | default_ocp_version = "${data.ibm_container_cluster_versions.cluster_versions.default_openshift_version}_openshift" |
15 | 12 | ocp_version = var.ocp_version == null || var.ocp_version == "default" ? local.default_ocp_version : "${var.ocp_version}_openshift" |
@@ -466,114 +463,15 @@ data "ibm_container_cluster_config" "cluster_config" { |
466 | 463 | endpoint_type = var.cluster_config_endpoint_type != "default" ? var.cluster_config_endpoint_type : null # null value represents default |
467 | 464 | } |
468 | 465 |
|
469 | | -############################################################################## |
470 | | -# Worker Pools |
471 | | -############################################################################## |
472 | | - |
473 | | -locals { |
474 | | - additional_pool_names = var.ignore_worker_pool_size_changes ? [for pool in local.all_standalone_autoscaling_pools : pool.pool_name] : [for pool in local.all_standalone_pools : pool.pool_name] |
475 | | - pool_names = toset(flatten([["default"], local.additional_pool_names])) |
476 | | -} |
477 | | - |
478 | | -data "ibm_container_vpc_worker_pool" "all_pools" { |
479 | | - depends_on = [ibm_container_vpc_worker_pool.autoscaling_pool, ibm_container_vpc_worker_pool.pool] |
480 | | - for_each = local.pool_names |
481 | | - cluster = local.cluster_id |
482 | | - worker_pool_name = each.value |
483 | | -} |
484 | | - |
485 | | -resource "ibm_container_vpc_worker_pool" "pool" { |
486 | | - for_each = { for pool in local.all_standalone_pools : pool.pool_name => pool } |
487 | | - vpc_id = var.vpc_id |
488 | | - resource_group_id = var.resource_group_id |
489 | | - cluster = local.cluster_id |
490 | | - worker_pool_name = each.value.pool_name |
491 | | - flavor = each.value.machine_type |
492 | | - operating_system = each.value.operating_system |
493 | | - worker_count = each.value.workers_per_zone |
494 | | - secondary_storage = each.value.secondary_storage |
495 | | - entitlement = var.ocp_entitlement |
496 | | - labels = each.value.labels |
497 | | - crk = each.value.boot_volume_encryption_kms_config == null ? null : each.value.boot_volume_encryption_kms_config.crk |
498 | | - kms_instance_id = each.value.boot_volume_encryption_kms_config == null ? null : each.value.boot_volume_encryption_kms_config.kms_instance_id |
499 | | - kms_account_id = each.value.boot_volume_encryption_kms_config == null ? null : each.value.boot_volume_encryption_kms_config.kms_account_id |
500 | | - |
501 | | - security_groups = each.value.additional_security_group_ids |
502 | | - |
503 | | - dynamic "zones" { |
504 | | - for_each = each.value.subnet_prefix != null ? var.vpc_subnets[each.value.subnet_prefix] : each.value.vpc_subnets |
505 | | - content { |
506 | | - subnet_id = zones.value.id |
507 | | - name = zones.value.zone |
508 | | - } |
509 | | - } |
510 | | - |
511 | | - # Apply taints to worker pools i.e. all_standalone_pools |
512 | | - dynamic "taints" { |
513 | | - for_each = var.worker_pools_taints == null ? [] : concat(var.worker_pools_taints["all"], lookup(var.worker_pools_taints, each.value["pool_name"], [])) |
514 | | - content { |
515 | | - effect = taints.value.effect |
516 | | - key = taints.value.key |
517 | | - value = taints.value.value |
518 | | - } |
519 | | - } |
520 | | - |
521 | | - timeouts { |
522 | | - # Extend create and delete timeout to 2h |
523 | | - delete = "2h" |
524 | | - create = "2h" |
525 | | - } |
526 | | - |
527 | | - # The default workerpool has to be imported as it will already exist on cluster create |
528 | | - import_on_create = each.value.pool_name == "default" ? var.allow_default_worker_pool_replacement ? null : true : null |
529 | | - orphan_on_delete = each.value.pool_name == "default" ? var.allow_default_worker_pool_replacement ? null : true : null |
530 | | -} |
531 | | - |
532 | | -# copy of the pool resource above which ignores changes to the worker pool for use in autoscaling scenarios |
533 | | -resource "ibm_container_vpc_worker_pool" "autoscaling_pool" { |
534 | | - for_each = { for pool in local.all_standalone_autoscaling_pools : pool.pool_name => pool } |
535 | | - vpc_id = var.vpc_id |
536 | | - resource_group_id = var.resource_group_id |
537 | | - cluster = local.cluster_id |
538 | | - worker_pool_name = each.value.pool_name |
539 | | - flavor = each.value.machine_type |
540 | | - operating_system = each.value.operating_system |
541 | | - worker_count = each.value.workers_per_zone |
542 | | - secondary_storage = each.value.secondary_storage |
543 | | - entitlement = var.ocp_entitlement |
544 | | - labels = each.value.labels |
545 | | - crk = each.value.boot_volume_encryption_kms_config == null ? null : each.value.boot_volume_encryption_kms_config.crk |
546 | | - kms_instance_id = each.value.boot_volume_encryption_kms_config == null ? null : each.value.boot_volume_encryption_kms_config.kms_instance_id |
547 | | - kms_account_id = each.value.boot_volume_encryption_kms_config == null ? null : each.value.boot_volume_encryption_kms_config.kms_account_id |
548 | | - |
549 | | - security_groups = each.value.additional_security_group_ids |
550 | | - |
551 | | - lifecycle { |
552 | | - ignore_changes = [worker_count] |
553 | | - } |
554 | | - |
555 | | - dynamic "zones" { |
556 | | - for_each = each.value.subnet_prefix != null ? var.vpc_subnets[each.value.subnet_prefix] : each.value.vpc_subnets |
557 | | - content { |
558 | | - subnet_id = zones.value.id |
559 | | - name = zones.value.zone |
560 | | - } |
561 | | - } |
562 | | - |
563 | | - # Apply taints to worker pools i.e. all_standalone_pools |
564 | | - |
565 | | - dynamic "taints" { |
566 | | - for_each = var.worker_pools_taints == null ? [] : concat(var.worker_pools_taints["all"], lookup(var.worker_pools_taints, each.value["pool_name"], [])) |
567 | | - content { |
568 | | - effect = taints.value.effect |
569 | | - key = taints.value.key |
570 | | - value = taints.value.value |
571 | | - } |
572 | | - } |
573 | | - |
574 | | - # The default workerpool has to be imported as it will already exist on cluster create |
575 | | - import_on_create = each.value.pool_name == "default" ? var.allow_default_worker_pool_replacement ? null : true : null |
576 | | - orphan_on_delete = each.value.pool_name == "default" ? var.allow_default_worker_pool_replacement ? null : true : null |
| 466 | +module "worker_pools" { |
| 467 | + source = "./modules/worker-pool" |
| 468 | + vpc_id = var.vpc_id |
| 469 | + resource_group_id = var.resource_group_id |
| 470 | + cluster_id = local.cluster_id |
| 471 | + vpc_subnets = var.vpc_subnets |
| 472 | + worker_pools = var.worker_pools |
| 473 | + ignore_worker_pool_size_changes = var.ignore_worker_pool_size_changes |
| 474 | + allow_default_worker_pool_replacement = var.allow_default_worker_pool_replacement |
577 | 475 | } |
578 | 476 |
|
579 | 477 | ############################################################################## |
@@ -605,7 +503,7 @@ resource "null_resource" "confirm_network_healthy" { |
605 | 503 | # Worker pool creation can start before the 'ibm_container_vpc_cluster' completes since there is no explicit |
606 | 504 | # depends_on in 'ibm_container_vpc_worker_pool', just an implicit depends_on on the cluster ID. Cluster ID can exist before |
607 | 505 | # 'ibm_container_vpc_cluster' completes, so hence need to add explicit depends on against 'ibm_container_vpc_cluster' here. |
608 | | - depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool] |
| 506 | + depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, module.worker_pools] |
609 | 507 |
|
610 | 508 | provisioner "local-exec" { |
611 | 509 | command = "${path.module}/scripts/confirm_network_healthy.sh" |
@@ -659,7 +557,7 @@ resource "ibm_container_addons" "addons" { |
659 | 557 | # Worker pool creation can start before the 'ibm_container_vpc_cluster' completes since there is no explicit |
660 | 558 | # depends_on in 'ibm_container_vpc_worker_pool', just an implicit depends_on on the cluster ID. Cluster ID can exist before |
661 | 559 | # 'ibm_container_vpc_cluster' completes, so hence need to add explicit depends on against 'ibm_container_vpc_cluster' here. |
662 | | - depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool, null_resource.confirm_network_healthy] |
| 560 | + depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, module.worker_pools, null_resource.confirm_network_healthy] |
663 | 561 | cluster = local.cluster_id |
664 | 562 | resource_group_id = var.resource_group_id |
665 | 563 |
|
@@ -732,7 +630,7 @@ resource "kubernetes_config_map_v1_data" "set_autoscaling" { |
732 | 630 | ############################################################################## |
733 | 631 |
|
734 | 632 | data "ibm_is_lbs" "all_lbs" { |
735 | | - depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool, null_resource.confirm_network_healthy] |
| 633 | + depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, module.worker_pools, null_resource.confirm_network_healthy] |
736 | 634 | count = length(var.additional_lb_security_group_ids) > 0 ? 1 : 0 |
737 | 635 | } |
738 | 636 |
|
@@ -768,19 +666,19 @@ locals { |
768 | 666 |
|
769 | 667 | data "ibm_is_virtual_endpoint_gateway" "master_vpe" { |
770 | 668 | count = length(var.additional_vpe_security_group_ids["master"]) |
771 | | - depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool, null_resource.confirm_network_healthy] |
| 669 | + depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, module.worker_pools, null_resource.confirm_network_healthy] |
772 | 670 | name = local.vpes_to_attach_to_sg["master"] |
773 | 671 | } |
774 | 672 |
|
775 | 673 | data "ibm_is_virtual_endpoint_gateway" "api_vpe" { |
776 | 674 | count = length(var.additional_vpe_security_group_ids["api"]) |
777 | | - depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool, null_resource.confirm_network_healthy] |
| 675 | + depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, module.worker_pools, null_resource.confirm_network_healthy] |
778 | 676 | name = local.vpes_to_attach_to_sg["api"] |
779 | 677 | } |
780 | 678 |
|
781 | 679 | data "ibm_is_virtual_endpoint_gateway" "registry_vpe" { |
782 | 680 | count = length(var.additional_vpe_security_group_ids["registry"]) |
783 | | - depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool, null_resource.confirm_network_healthy] |
| 681 | + depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, module.worker_pools, null_resource.confirm_network_healthy] |
784 | 682 | name = local.vpes_to_attach_to_sg["registry"] |
785 | 683 | } |
786 | 684 |
|
@@ -872,7 +770,7 @@ module "existing_secrets_manager_instance_parser" { |
872 | 770 |
|
873 | 771 | resource "ibm_iam_authorization_policy" "ocp_secrets_manager_iam_auth_policy" { |
874 | 772 | count = var.enable_secrets_manager_integration && !var.skip_ocp_secrets_manager_iam_auth_policy ? 1 : 0 |
875 | | - depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool] |
| 773 | + depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, module.worker_pools] |
876 | 774 | source_service_name = "containers-kubernetes" |
877 | 775 | source_resource_instance_id = local.cluster_id |
878 | 776 | target_service_name = "secrets-manager" |
|
0 commit comments