Skip to content
Merged
Show file tree
Hide file tree
Changes from 14 commits
Commits
Show all changes
46 commits
Select commit Hold shift + click to select a range
ca2a200
feat: add support for major openshift version upgrade
Aug 3, 2025
e0fa073
Merge branch 'main' into 15216-optin
iamar7 Aug 3, 2025
7c1fba6
pre-commit resolve
iamar7 Aug 3, 2025
92b71a3
update main.tf & output.tf
iamar7 Aug 3, 2025
80d0971
Merge branch 'main' into 15216-optin
iamar7 Aug 4, 2025
1bd2179
Merge branch 'main' of https://github.com/terraform-ibm-modules/terra…
Sep 19, 2025
d4a8c3c
update cdev
iamar7 Sep 19, 2025
263efd0
Merge branch 'main' into 15216-optin
iamar7 Sep 19, 2025
42f4213
added the new variable to an customsg example
Sep 22, 2025
27d9e56
Merge branch '15216-optin' of https://github.com/terraform-ibm-module…
Sep 22, 2025
ee3962f
Merge branch 'main' of https://github.com/terraform-ibm-modules/terra…
iamar7 Sep 22, 2025
29543ac
update main.tf
Sep 22, 2025
58791b9
Merge branch 'main' into 15216-optin
iamar7 Sep 22, 2025
8122eca
Merge branch 'main' into 15216-optin
iamar7 Sep 23, 2025
3fe96dc
resolve comments
iamar7 Sep 24, 2025
2bc9267
Merge branch 'main' into 15216-optin
iamar7 Sep 25, 2025
2ca796c
add migration script
Sep 29, 2025
51ddbb3
Merge branch '15216-optin' of https://github.com/terraform-ibm-module…
Sep 29, 2025
ae63968
update script and documentation
Sep 30, 2025
d247026
Merge branch 'main' into 15216-optin
iamar7 Sep 30, 2025
e32b43b
update migration.md
iamar7 Sep 30, 2025
4f8eef5
move in a diff folder
iamar7 Sep 30, 2025
f125b82
Merge branch 'main' into 15216-optin
iamar7 Oct 1, 2025
272281b
update readme
Oct 1, 2025
7b64b44
resolve comments
Oct 1, 2025
dfae1b1
remove moved and revert
Oct 1, 2025
d1bd79d
update description
iamar7 Oct 1, 2025
e4557e4
update cdev
iamar7 Oct 1, 2025
9ee1dd6
Merge branch 'main' into 15216-optin
iamar7 Oct 2, 2025
297648e
Merge branch 'main' into 15216-optin
iamar7 Oct 7, 2025
77d404b
add schematics script
Oct 10, 2025
ed520db
Merge branch 'main' into 15216-optin
iamar7 Oct 10, 2025
f5e26b1
Merge branch '15216-optin' of https://github.com/terraform-ibm-module…
Oct 10, 2025
de780c1
resolve pc
iamar7 Oct 10, 2025
376dbac
update revert resource
Oct 10, 2025
3c87e26
update script and doc
Oct 10, 2025
e90a58e
update docs
Oct 10, 2025
1dee570
update docs
Oct 10, 2025
66dcd2e
update docs
Oct 10, 2025
5d2e9fc
resolve pc
iamar7 Oct 10, 2025
68f5bc7
Merge branch 'main' into 15216-optin
iamar7 Oct 13, 2025
2dae7c4
Merge branch 'main' into 15216-optin
vburckhardt Oct 13, 2025
87dcc8e
docs: recommand terraform moved block, remove generated bloat
vburckhardt Oct 13, 2025
43e0aae
docs: recommand terraform moved block, remove generated bloat
vburckhardt Oct 13, 2025
4ffca31
docs: recommand terraform moved block, remove generated bloat
vburckhardt Oct 13, 2025
d893b30
docs: recommand terraform moved block, remove generated bloat
vburckhardt Oct 13, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,9 @@ Optionally, you need the following permissions to attach Access Management tags
| [ibm_container_api_key_reset.reset_api_key](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/resources/container_api_key_reset) | resource |
| [ibm_container_ingress_instance.instance](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/resources/container_ingress_instance) | resource |
| [ibm_container_vpc_cluster.autoscaling_cluster](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/resources/container_vpc_cluster) | resource |
| [ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/resources/container_vpc_cluster) | resource |
| [ibm_container_vpc_cluster.cluster](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/resources/container_vpc_cluster) | resource |
| [ibm_container_vpc_cluster.cluster_with_upgrade](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/resources/container_vpc_cluster) | resource |
| [ibm_container_vpc_worker_pool.autoscaling_pool](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/resources/container_vpc_worker_pool) | resource |
| [ibm_container_vpc_worker_pool.pool](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/resources/container_vpc_worker_pool) | resource |
| [ibm_iam_authorization_policy.ocp_secrets_manager_iam_auth_policy](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/resources/iam_authorization_policy) | resource |
Expand Down Expand Up @@ -302,6 +304,7 @@ Optionally, you need the following permissions to attach Access Management tags
| <a name="input_disable_outbound_traffic_protection"></a> [disable\_outbound\_traffic\_protection](#input\_disable\_outbound\_traffic\_protection) | Whether to allow public outbound access from the cluster workers. This is only applicable for OCP 4.15 and later. | `bool` | `false` | no |
| <a name="input_disable_public_endpoint"></a> [disable\_public\_endpoint](#input\_disable\_public\_endpoint) | Whether access to the public service endpoint is disabled when the cluster is created. Does not affect existing clusters. You can't disable a public endpoint on an existing cluster, so you can't convert a public cluster to a private cluster. To change a public endpoint to private, create another cluster with this input set to `true`. | `bool` | `false` | no |
| <a name="input_enable_ocp_console"></a> [enable\_ocp\_console](#input\_enable\_ocp\_console) | Flag to specify whether to enable or disable the OpenShift console. If set to `null` the module does not modify the current setting on the cluster. Keep in mind that when this input is set to `true` or `false` on a cluster with private only endpoint enabled, the runtime must be able to access the private endpoint. | `bool` | `null` | no |
| <a name="input_enable_openshift_version_upgrade"></a> [enable\_openshift\_version\_upgrade](#input\_enable\_openshift\_version\_upgrade) | When set to true, allows Terraform to manage major OpenShift version upgrades by removing the ignore\_changes setting on the kube\_version field. This is intended for advanced users who manually control major version upgrades. Defaults to false to avoid unintended drift from IBM-managed patch updates. | `bool` | `false` | no |
| <a name="input_enable_registry_storage"></a> [enable\_registry\_storage](#input\_enable\_registry\_storage) | Set to `true` to enable IBM Cloud Object Storage for the Red Hat OpenShift internal image registry. Set to `false` only for new cluster deployments in an account that is allowlisted for this feature. | `bool` | `true` | no |
| <a name="input_enable_secrets_manager_integration"></a> [enable\_secrets\_manager\_integration](#input\_enable\_secrets\_manager\_integration) | Integrate with IBM Cloud Secrets Manager so you can centrally manage Ingress subdomain certificates and other secrets. [Learn more](https://cloud.ibm.com/docs/containers?topic=containers-secrets-mgr) | `bool` | `false` | no |
| <a name="input_existing_cos_id"></a> [existing\_cos\_id](#input\_existing\_cos\_id) | The COS id of an already existing COS instance to use for OpenShift internal registry storage. Only required if 'enable\_registry\_storage' and 'use\_existing\_cos' are true. | `string` | `null` | no |
Expand Down
1 change: 1 addition & 0 deletions examples/custom_sg/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,7 @@ module "ocp_base" {
vpc_id = ibm_is_vpc.vpc.id
vpc_subnets = local.cluster_vpc_subnets
ocp_version = var.ocp_version
enable_openshift_version_upgrade = var.enable_openshift_version_upgrade
worker_pools = local.worker_pools
access_tags = var.access_tags
attach_ibm_managed_security_group = true # true is the default
Expand Down
6 changes: 6 additions & 0 deletions examples/custom_sg/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,12 @@ variable "ocp_version" {
default = "4.14"
}

variable "enable_openshift_version_upgrade" {
type = bool
description = "When set to true, allows Terraform to manage major OpenShift version upgrades by removing the ignore_changes setting on the kube_version field. This is intended for advanced users who manually control major version upgrades. Defaults to false to avoid unintended drift from IBM-managed patch updates."
default = false
}

variable "access_tags" {
type = list(string)
description = "A list of access tags to apply to the resources created by the module."
Expand Down
162 changes: 151 additions & 11 deletions main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@ locals {
create_timeout = "3h"
update_timeout = "3h"

cluster_id = var.ignore_worker_pool_size_changes ? ibm_container_vpc_cluster.autoscaling_cluster[0].id : ibm_container_vpc_cluster.cluster[0].id
cluster_id = var.enable_openshift_version_upgrade ? (var.ignore_worker_pool_size_changes ? ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade[0].id : ibm_container_vpc_cluster.cluster_with_upgrade[0].id) : (var.ignore_worker_pool_size_changes ? ibm_container_vpc_cluster.autoscaling_cluster[0].id : ibm_container_vpc_cluster.cluster[0].id)
cluster_crn = var.enable_openshift_version_upgrade ? (var.ignore_worker_pool_size_changes ? ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade[0].crn : ibm_container_vpc_cluster.cluster_with_upgrade[0].crn) : (var.ignore_worker_pool_size_changes ? ibm_container_vpc_cluster.autoscaling_cluster[0].crn : ibm_container_vpc_cluster.cluster[0].crn)

# security group attached to worker pool
# the terraform provider / iks api take a security group id hardcoded to "cluster", so this pseudo-value is injected into the array based on attach_default_cluster_security_group
Expand Down Expand Up @@ -127,7 +128,7 @@ resource "ibm_resource_tag" "cos_access_tag" {

resource "ibm_container_vpc_cluster" "cluster" {
depends_on = [time_sleep.wait_for_reset_api_key]
count = var.ignore_worker_pool_size_changes ? 0 : 1
count = var.enable_openshift_version_upgrade ? 0 : (var.ignore_worker_pool_size_changes ? 0 : 1)
name = var.cluster_name
vpc_id = var.vpc_id
tags = var.tags
Expand Down Expand Up @@ -195,10 +196,78 @@ resource "ibm_container_vpc_cluster" "cluster" {
}
}

# copy of the cluster resource above which allows major openshift version upgrade
resource "ibm_container_vpc_cluster" "cluster_with_upgrade" {
depends_on = [time_sleep.wait_for_reset_api_key]
count = var.enable_openshift_version_upgrade ? (var.ignore_worker_pool_size_changes ? 0 : 1) : 0
name = var.cluster_name
vpc_id = var.vpc_id
tags = var.tags
kube_version = local.ocp_version
flavor = local.default_pool.machine_type
entitlement = var.ocp_entitlement
cos_instance_crn = local.cos_instance_crn
worker_count = local.default_pool.workers_per_zone
resource_group_id = var.resource_group_id
wait_till = var.cluster_ready_when
force_delete_storage = var.force_delete_storage
secondary_storage = local.default_pool.secondary_storage
pod_subnet = var.pod_subnet_cidr
service_subnet = var.service_subnet_cidr
operating_system = local.default_pool.operating_system
disable_public_service_endpoint = var.disable_public_endpoint
worker_labels = local.default_pool.labels
disable_outbound_traffic_protection = local.disable_outbound_traffic_protection
crk = local.default_pool.boot_volume_encryption_kms_config == null ? null : local.default_pool.boot_volume_encryption_kms_config.crk
kms_instance_id = local.default_pool.boot_volume_encryption_kms_config == null ? null : local.default_pool.boot_volume_encryption_kms_config.kms_instance_id
kms_account_id = local.default_pool.boot_volume_encryption_kms_config == null ? null : local.default_pool.boot_volume_encryption_kms_config.kms_account_id

security_groups = local.cluster_security_groups

# default workers are mapped to the subnets that are "private"
dynamic "zones" {
for_each = local.default_pool.subnet_prefix != null ? var.vpc_subnets[local.default_pool.subnet_prefix] : local.default_pool.vpc_subnets
content {
subnet_id = zones.value.id
name = zones.value.zone
}
}

# Apply taints to the default worker pools i.e private

dynamic "taints" {
for_each = var.worker_pools_taints == null ? [] : concat(var.worker_pools_taints["all"], var.worker_pools_taints["default"])
content {
effect = taints.value.effect
key = taints.value.key
value = taints.value.value
}
}

dynamic "kms_config" {
for_each = var.kms_config != null ? [1] : []
content {
crk_id = var.kms_config.crk_id
instance_id = var.kms_config.instance_id
private_endpoint = var.kms_config.private_endpoint == null ? true : var.kms_config.private_endpoint
account_id = var.kms_config.account_id
wait_for_apply = var.kms_config.wait_for_apply
}
}

timeouts {
# Extend create, update and delete timeout to static values.
delete = local.delete_timeout
create = local.create_timeout
update = local.update_timeout
}
}


# copy of the cluster resource above which ignores changes to the worker pool for use in autoscaling scenarios
resource "ibm_container_vpc_cluster" "autoscaling_cluster" {
depends_on = [time_sleep.wait_for_reset_api_key]
count = var.ignore_worker_pool_size_changes ? 1 : 0
count = var.enable_openshift_version_upgrade ? 0 : (var.ignore_worker_pool_size_changes ? 1 : 0)
name = var.cluster_name
vpc_id = var.vpc_id
tags = var.tags
Expand Down Expand Up @@ -266,13 +335,84 @@ resource "ibm_container_vpc_cluster" "autoscaling_cluster" {
}
}

# copy of the cluster resource above which allows major openshift version upgrade
resource "ibm_container_vpc_cluster" "autoscaling_cluster_with_upgrade" {
depends_on = [time_sleep.wait_for_reset_api_key]
count = var.enable_openshift_version_upgrade ? (var.ignore_worker_pool_size_changes ? 1 : 0) : 0
name = var.cluster_name
vpc_id = var.vpc_id
tags = var.tags
kube_version = local.ocp_version
flavor = local.default_pool.machine_type
entitlement = var.ocp_entitlement
cos_instance_crn = local.cos_instance_crn
worker_count = local.default_pool.workers_per_zone
resource_group_id = var.resource_group_id
wait_till = var.cluster_ready_when
force_delete_storage = var.force_delete_storage
operating_system = local.default_pool.operating_system
secondary_storage = local.default_pool.secondary_storage
pod_subnet = var.pod_subnet_cidr
service_subnet = var.service_subnet_cidr
disable_public_service_endpoint = var.disable_public_endpoint
worker_labels = local.default_pool.labels
disable_outbound_traffic_protection = local.disable_outbound_traffic_protection
crk = local.default_pool.boot_volume_encryption_kms_config == null ? null : local.default_pool.boot_volume_encryption_kms_config.crk
kms_instance_id = local.default_pool.boot_volume_encryption_kms_config == null ? null : local.default_pool.boot_volume_encryption_kms_config.kms_instance_id
kms_account_id = local.default_pool.boot_volume_encryption_kms_config == null ? null : local.default_pool.boot_volume_encryption_kms_config.kms_account_id

security_groups = local.cluster_security_groups

lifecycle {
ignore_changes = [worker_count]
}

# default workers are mapped to the subnets that are "private"
dynamic "zones" {
for_each = local.default_pool.subnet_prefix != null ? var.vpc_subnets[local.default_pool.subnet_prefix] : local.default_pool.vpc_subnets
content {
subnet_id = zones.value.id
name = zones.value.zone
}
}

# Apply taints to the default worker pools i.e private

dynamic "taints" {
for_each = var.worker_pools_taints == null ? [] : concat(var.worker_pools_taints["all"], var.worker_pools_taints["default"])
content {
effect = taints.value.effect
key = taints.value.key
value = taints.value.value
}
}

dynamic "kms_config" {
for_each = var.kms_config != null ? [1] : []
content {
crk_id = var.kms_config.crk_id
instance_id = var.kms_config.instance_id
private_endpoint = var.kms_config.private_endpoint
account_id = var.kms_config.account_id
wait_for_apply = var.kms_config.wait_for_apply
}
}

timeouts {
# Extend create, update and delete timeout to static values.
delete = local.delete_timeout
create = local.create_timeout
update = local.update_timeout
}
}

##############################################################################
# Cluster Access Tag
##############################################################################

resource "ibm_resource_tag" "cluster_access_tag" {
count = length(var.access_tags) == 0 ? 0 : 1
resource_id = var.ignore_worker_pool_size_changes ? ibm_container_vpc_cluster.autoscaling_cluster[0].crn : ibm_container_vpc_cluster.cluster[0].crn
resource_id = local.cluster_crn
tags = var.access_tags
tag_type = "access"
}
Expand Down Expand Up @@ -448,7 +588,7 @@ resource "null_resource" "confirm_network_healthy" {
# Worker pool creation can start before the 'ibm_container_vpc_cluster' completes since there is no explicit
# depends_on in 'ibm_container_vpc_worker_pool', just an implicit depends_on on the cluster ID. Cluster ID can exist before
# 'ibm_container_vpc_cluster' completes, so hence need to add explicit depends on against 'ibm_container_vpc_cluster' here.
depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool]
depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool]

provisioner "local-exec" {
command = "${path.module}/scripts/confirm_network_healthy.sh"
Expand Down Expand Up @@ -502,7 +642,7 @@ resource "ibm_container_addons" "addons" {
# Worker pool creation can start before the 'ibm_container_vpc_cluster' completes since there is no explicit
# depends_on in 'ibm_container_vpc_worker_pool', just an implicit depends_on on the cluster ID. Cluster ID can exist before
# 'ibm_container_vpc_cluster' completes, so hence need to add explicit depends on against 'ibm_container_vpc_cluster' here.
depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool, null_resource.confirm_network_healthy]
depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool, null_resource.confirm_network_healthy]
cluster = local.cluster_id
resource_group_id = var.resource_group_id

Expand Down Expand Up @@ -575,7 +715,7 @@ resource "kubernetes_config_map_v1_data" "set_autoscaling" {
##############################################################################

data "ibm_is_lbs" "all_lbs" {
depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool, null_resource.confirm_network_healthy]
depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool, null_resource.confirm_network_healthy]
count = length(var.additional_lb_security_group_ids) > 0 ? 1 : 0
}

Expand Down Expand Up @@ -611,19 +751,19 @@ locals {

data "ibm_is_virtual_endpoint_gateway" "master_vpe" {
count = length(var.additional_vpe_security_group_ids["master"])
depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool, null_resource.confirm_network_healthy]
depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool, null_resource.confirm_network_healthy]
name = local.vpes_to_attach_to_sg["master"]
}

data "ibm_is_virtual_endpoint_gateway" "api_vpe" {
count = length(var.additional_vpe_security_group_ids["api"])
depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool, null_resource.confirm_network_healthy]
depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool, null_resource.confirm_network_healthy]
name = local.vpes_to_attach_to_sg["api"]
}

data "ibm_is_virtual_endpoint_gateway" "registry_vpe" {
count = length(var.additional_vpe_security_group_ids["registry"])
depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool, null_resource.confirm_network_healthy]
depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool, null_resource.confirm_network_healthy]
name = local.vpes_to_attach_to_sg["registry"]
}

Expand Down Expand Up @@ -689,7 +829,7 @@ module "cbr_rule" {
},
{
name = "serviceInstance"
value = var.ignore_worker_pool_size_changes ? ibm_container_vpc_cluster.autoscaling_cluster[0].id : ibm_container_vpc_cluster.cluster[0].id
value = local.cluster_id
operator = "stringEquals"
},
{
Expand Down
Loading