From b9b17b59f3bf4859baaeb8ac605892a01bed82f9 Mon Sep 17 00:00:00 2001 From: "aashiq.jacob@ibm.com" Date: Mon, 21 Oct 2024 10:40:45 +0530 Subject: [PATCH 1/3] fix(deps): update base-ocp to 3.34.0 --- cluster.tf | 3 +-- dynamic_values/cluster.tf | 1 + dynamic_values/cluster_worker_pools.tf | 1 + .../config_modules/cluster_worker_pools/worker_pools.tf | 2 +- examples/override-example/override.json | 1 + patterns/mixed/config.tf | 1 + patterns/mixed/override.json | 1 + patterns/roks/module/config.tf | 1 + patterns/roks/module/variables.tf | 4 ++-- patterns/roks/override.json | 2 ++ patterns/roks/variables.tf | 4 ++-- variables.tf | 2 +- 12 files changed, 15 insertions(+), 8 deletions(-) diff --git a/cluster.tf b/cluster.tf index 93ea7dbe5..1024dd225 100644 --- a/cluster.tf +++ b/cluster.tf @@ -244,7 +244,7 @@ module "cluster" { if cluster.kube_type == "openshift" } source = "terraform-ibm-modules/base-ocp-vpc/ibm" - version = "3.32.1" + version = "3.34.0" resource_group_id = local.resource_groups[each.value.resource_group] region = var.region cluster_name = each.value.cluster_name @@ -289,7 +289,6 @@ module "cluster" { ] : [] ) force_delete_storage = each.value.cluster_force_delete_storage - operating_system = each.value.operating_system ocp_version = each.value.kube_version == null || each.value.kube_version == "default" ? each.value.kube_version : replace(each.value.kube_version, "_openshift", "") import_default_worker_pool_on_create = each.value.import_default_worker_pool_on_create allow_default_worker_pool_replacement = each.value.allow_default_worker_pool_replacement diff --git a/dynamic_values/cluster.tf b/dynamic_values/cluster.tf index 58e0f116b..7e2fac70e 100644 --- a/dynamic_values/cluster.tf +++ b/dynamic_values/cluster.tf @@ -37,6 +37,7 @@ module "ut_cluster_map" { workers_per_subnet = 2 flavor = "spicy" secondary_storage = "300gb.5iops-tier" + operating_system = "REDHAT_8_64" } ] } diff --git a/dynamic_values/cluster_worker_pools.tf b/dynamic_values/cluster_worker_pools.tf index 143b53097..c9483343c 100644 --- a/dynamic_values/cluster_worker_pools.tf +++ b/dynamic_values/cluster_worker_pools.tf @@ -36,6 +36,7 @@ module "ut_worker_pools" { workers_per_subnet = 2 flavor = "spicy" secondary_storage = "300gb.5iops-tier" + operating_system = "REDHAT_8_64" } ] } diff --git a/dynamic_values/config_modules/cluster_worker_pools/worker_pools.tf b/dynamic_values/config_modules/cluster_worker_pools/worker_pools.tf index 3698a4174..493490e64 100644 --- a/dynamic_values/config_modules/cluster_worker_pools/worker_pools.tf +++ b/dynamic_values/config_modules/cluster_worker_pools/worker_pools.tf @@ -86,7 +86,7 @@ locals { vpc_id = var.vpc_modules[pool.vpc_name].vpc_id # add vpc_id subnets = module.worker_pool_subnets["${var.prefix}-${cluster.name}-${pool.name}"].subnets kube_type = cluster.kube_type - operating_system = lookup(pool, "operating_system", null) + operating_system = lookup(pool, "operating_system", "REDHAT_8_64") }) if pool != null ] if cluster.worker_pools != null ] diff --git a/examples/override-example/override.json b/examples/override-example/override.json index d31de27ac..6b13e59b9 100644 --- a/examples/override-example/override.json +++ b/examples/override-example/override.json @@ -36,6 +36,7 @@ "flavor": "bx2.16x64", "name": "logging-worker-pool", "secondary_storage": "300gb.5iops-tier", + "operating_system" : "REDHAT_8_64", "subnet_names": [ "vsi-zone-1", "vsi-zone-2", diff --git a/patterns/mixed/config.tf b/patterns/mixed/config.tf index 23a388f69..69489cca2 100644 --- a/patterns/mixed/config.tf +++ b/patterns/mixed/config.tf @@ -133,6 +133,7 @@ locals { "vsi-zone-${zone}" ] entitlement = var.entitlement + operating_system = "REDHAT_8_64" workers_per_subnet = var.workers_per_zone flavor = var.flavor secondary_storage = var.secondary_storage diff --git a/patterns/mixed/override.json b/patterns/mixed/override.json index 3345fd017..c6f7c6db1 100644 --- a/patterns/mixed/override.json +++ b/patterns/mixed/override.json @@ -29,6 +29,7 @@ "entitlement": "cloud_pak", "flavor": "bx2.16x64", "name": "logging-worker-pool", + "operating_system": "REDHAT_8_64", "subnet_names": [ "vsi-zone-1", "vsi-zone-2", diff --git a/patterns/roks/module/config.tf b/patterns/roks/module/config.tf index d30aa1a41..8273e3f25 100644 --- a/patterns/roks/module/config.tf +++ b/patterns/roks/module/config.tf @@ -122,6 +122,7 @@ locals { # workers_per_subnet = var.workers_per_zone # flavor = var.flavor # boot_volume_crk_name = "${var.prefix}-roks-key" + # operating_system = "REDHAT_8_64" # } ] } diff --git a/patterns/roks/module/variables.tf b/patterns/roks/module/variables.tf index 4ca3ae98a..2790846ae 100644 --- a/patterns/roks/module/variables.tf +++ b/patterns/roks/module/variables.tf @@ -272,10 +272,10 @@ variable "cluster_force_delete_storage" { variable "operating_system" { type = string description = "The operating system of the workers in the default worker pool. If no value is specified, the current default version OS will be used. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available ." - default = null + default = "REDHAT_8_64" validation { error_message = "RHEL 8 (REDHAT_8_64) or Red Hat Enterprise Linux CoreOS (RHCOS) are the allowed OS values. RHCOS requires VPC clusters created from 4.15 onwards. Upgraded clusters from 4.14 cannot use RHCOS." - condition = var.operating_system == null || var.operating_system == "REDHAT_8_64" || var.operating_system == "RHCOS" + condition = var.operating_system == "REDHAT_8_64" || var.operating_system == "RHCOS" } } diff --git a/patterns/roks/override.json b/patterns/roks/override.json index 52cd9e562..2df034350 100644 --- a/patterns/roks/override.json +++ b/patterns/roks/override.json @@ -39,6 +39,7 @@ "vsi-zone-3" ], "vpc_name": "management", + "operating_system": "REDHAT_8_64", "workers_per_subnet": 2 } ], @@ -76,6 +77,7 @@ "vsi-zone-3" ], "vpc_name": "workload", + "operating_system": "REDHAT_8_64", "workers_per_subnet": 2 } ], diff --git a/patterns/roks/variables.tf b/patterns/roks/variables.tf index cc1d86fbf..902a6cd78 100644 --- a/patterns/roks/variables.tf +++ b/patterns/roks/variables.tf @@ -290,10 +290,10 @@ variable "cluster_force_delete_storage" { variable "operating_system" { type = string description = "The operating system of the workers in the default worker pool. If no value is specified, the current default version OS will be used. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available ." - default = null + default = "REDHAT_8_64" validation { error_message = "RHEL 8 (REDHAT_8_64) or Red Hat Enterprise Linux CoreOS (RHCOS) are the allowed OS values. RHCOS requires VPC clusters created from 4.15 onwards. Upgraded clusters from 4.14 cannot use RHCOS." - condition = var.operating_system == null || var.operating_system == "REDHAT_8_64" || var.operating_system == "RHCOS" + condition = var.operating_system == "REDHAT_8_64" || var.operating_system == "RHCOS" } } diff --git a/variables.tf b/variables.tf index 534a634ff..f623ee4aa 100644 --- a/variables.tf +++ b/variables.tf @@ -889,7 +889,7 @@ variable "clusters" { entitlement = optional(string) # entitlement option for openshift secondary_storage = optional(string) # Secondary storage type boot_volume_crk_name = optional(string) # Boot volume encryption key name - operating_system = optional(string) # The operating system of the workers in the default worker pool. If no value is specified, the current default version OS will be used. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available . + operating_system = string # The operating system of the workers in the default worker pool. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available . labels = optional(map(string)) # A list of labels that you want to add to all the worker nodes in the worker pool. }) ) From 05cd8609c454e99001b4554785006836f0835db9 Mon Sep 17 00:00:00 2001 From: "aashiq.jacob@ibm.com" Date: Mon, 21 Oct 2024 10:43:19 +0530 Subject: [PATCH 2/3] precommit --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index d346435f6..8ac1018b1 100644 --- a/README.md +++ b/README.md @@ -852,7 +852,7 @@ module "cluster_pattern" { | Name | Source | Version | |------|--------|---------| | [bastion\_host](#module\_bastion\_host) | terraform-ibm-modules/landing-zone-vsi/ibm | 4.2.0 | -| [cluster](#module\_cluster) | terraform-ibm-modules/base-ocp-vpc/ibm | 3.32.1 | +| [cluster](#module\_cluster) | terraform-ibm-modules/base-ocp-vpc/ibm | 3.34.0 | | [dynamic\_values](#module\_dynamic\_values) | ./dynamic_values | n/a | | [f5\_vsi](#module\_f5\_vsi) | terraform-ibm-modules/landing-zone-vsi/ibm | 4.2.0 | | [key\_management](#module\_key\_management) | ./kms | n/a | @@ -911,7 +911,7 @@ module "cluster_pattern" { |------|-------------|------|---------|:--------:| | [appid](#input\_appid) | The App ID instance to be used for the teleport vsi deployments |
object({
name = optional(string)
resource_group = optional(string)
use_data = optional(bool)
keys = optional(list(string))
use_appid = bool
})
|
{
"use_appid": false
}
| no | | [atracker](#input\_atracker) | atracker variables |
object({
resource_group = string
receive_global_events = bool
collector_bucket_name = string
add_route = bool
})
| n/a | yes | -| [clusters](#input\_clusters) | A list describing clusters workloads to create |
list(
object({
name = string # Name of Cluster
vpc_name = string # Name of VPC
subnet_names = list(string) # List of vpc subnets for cluster
workers_per_subnet = number # Worker nodes per subnet.
machine_type = string # Worker node flavor
kube_type = string # iks or openshift
kube_version = optional(string) # Can be a version from `ibmcloud ks versions` or `default`
entitlement = optional(string) # entitlement option for openshift
secondary_storage = optional(string) # Secondary storage type
pod_subnet = optional(string) # Portable subnet for pods
service_subnet = optional(string) # Portable subnet for services
resource_group = string # Resource Group used for cluster
cos_name = optional(string) # Name of COS instance Required only for OpenShift clusters
access_tags = optional(list(string), [])
boot_volume_crk_name = optional(string) # Boot volume encryption key name
disable_public_endpoint = optional(bool, true) # disable cluster public, leaving only private endpoint
disable_outbound_traffic_protection = optional(bool, false) # public outbound access from the cluster workers
cluster_force_delete_storage = optional(bool, false) # force the removal of persistent storage associated with the cluster during cluster deletion
operating_system = optional(string, null) #The operating system of the workers in the default worker pool. If no value is specified, the current default version OS will be used. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available .
kms_wait_for_apply = optional(bool, true) # make terraform wait until KMS is applied to master and it is ready and deployed
verify_cluster_network_readiness = optional(bool, true) # Flag to run a script will run kubectl commands to verify that all worker nodes can communicate successfully with the master. If the runtime does not have access to the kube cluster to run kubectl commands, this should be set to false.
use_ibm_cloud_private_api_endpoints = optional(bool, true) # Flag to force all cluster related api calls to use the IBM Cloud private endpoints.
import_default_worker_pool_on_create = optional(bool) # (Advanced users) Whether to handle the default worker pool as a stand-alone ibm_container_vpc_worker_pool resource on cluster creation. Only set to false if you understand the implications of managing the default worker pool as part of the cluster resource. Set to true to import the default worker pool as a separate resource. Set to false to manage the default worker pool as part of the cluster resource.
allow_default_worker_pool_replacement = optional(bool) # (Advanced users) Set to true to allow the module to recreate a default worker pool. Only use in the case where you are getting an error indicating that the default worker pool cannot be replaced on apply. Once the default worker pool is handled as a stand-alone ibm_container_vpc_worker_pool, if you wish to make any change to the default worker pool which requires the re-creation of the default pool set this variable to true
labels = optional(map(string)) # A list of labels that you want to add to the default worker pool.
addons = optional(object({ # Map of OCP cluster add-on versions to install
debug-tool = optional(string)
image-key-synchronizer = optional(string)
openshift-data-foundation = optional(string)
vpc-file-csi-driver = optional(string)
static-route = optional(string)
cluster-autoscaler = optional(string)
vpc-block-csi-driver = optional(string)
ibm-storage-operator = optional(string)
}), {})
manage_all_addons = optional(bool, false) # Instructs Terraform to manage all cluster addons, even if addons were installed outside of the module. If set to 'true' this module will destroy any addons that were installed by other sources.
kms_config = optional(
object({
crk_name = string # Name of key
private_endpoint = optional(bool) # Private endpoint
})
)
worker_pools = optional(
list(
object({
name = string # Worker pool name
vpc_name = string # VPC name
workers_per_subnet = number # Worker nodes per subnet
flavor = string # Worker node flavor
subnet_names = list(string) # List of vpc subnets for worker pool
entitlement = optional(string) # entitlement option for openshift
secondary_storage = optional(string) # Secondary storage type
boot_volume_crk_name = optional(string) # Boot volume encryption key name
operating_system = optional(string) # The operating system of the workers in the default worker pool. If no value is specified, the current default version OS will be used. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available .
labels = optional(map(string)) # A list of labels that you want to add to all the worker nodes in the worker pool.
})
)
)
})
)
| n/a | yes | +| [clusters](#input\_clusters) | A list describing clusters workloads to create |
list(
object({
name = string # Name of Cluster
vpc_name = string # Name of VPC
subnet_names = list(string) # List of vpc subnets for cluster
workers_per_subnet = number # Worker nodes per subnet.
machine_type = string # Worker node flavor
kube_type = string # iks or openshift
kube_version = optional(string) # Can be a version from `ibmcloud ks versions` or `default`
entitlement = optional(string) # entitlement option for openshift
secondary_storage = optional(string) # Secondary storage type
pod_subnet = optional(string) # Portable subnet for pods
service_subnet = optional(string) # Portable subnet for services
resource_group = string # Resource Group used for cluster
cos_name = optional(string) # Name of COS instance Required only for OpenShift clusters
access_tags = optional(list(string), [])
boot_volume_crk_name = optional(string) # Boot volume encryption key name
disable_public_endpoint = optional(bool, true) # disable cluster public, leaving only private endpoint
disable_outbound_traffic_protection = optional(bool, false) # public outbound access from the cluster workers
cluster_force_delete_storage = optional(bool, false) # force the removal of persistent storage associated with the cluster during cluster deletion
operating_system = optional(string, null) #The operating system of the workers in the default worker pool. If no value is specified, the current default version OS will be used. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available .
kms_wait_for_apply = optional(bool, true) # make terraform wait until KMS is applied to master and it is ready and deployed
verify_cluster_network_readiness = optional(bool, true) # Flag to run a script will run kubectl commands to verify that all worker nodes can communicate successfully with the master. If the runtime does not have access to the kube cluster to run kubectl commands, this should be set to false.
use_ibm_cloud_private_api_endpoints = optional(bool, true) # Flag to force all cluster related api calls to use the IBM Cloud private endpoints.
import_default_worker_pool_on_create = optional(bool) # (Advanced users) Whether to handle the default worker pool as a stand-alone ibm_container_vpc_worker_pool resource on cluster creation. Only set to false if you understand the implications of managing the default worker pool as part of the cluster resource. Set to true to import the default worker pool as a separate resource. Set to false to manage the default worker pool as part of the cluster resource.
allow_default_worker_pool_replacement = optional(bool) # (Advanced users) Set to true to allow the module to recreate a default worker pool. Only use in the case where you are getting an error indicating that the default worker pool cannot be replaced on apply. Once the default worker pool is handled as a stand-alone ibm_container_vpc_worker_pool, if you wish to make any change to the default worker pool which requires the re-creation of the default pool set this variable to true
labels = optional(map(string)) # A list of labels that you want to add to the default worker pool.
addons = optional(object({ # Map of OCP cluster add-on versions to install
debug-tool = optional(string)
image-key-synchronizer = optional(string)
openshift-data-foundation = optional(string)
vpc-file-csi-driver = optional(string)
static-route = optional(string)
cluster-autoscaler = optional(string)
vpc-block-csi-driver = optional(string)
ibm-storage-operator = optional(string)
}), {})
manage_all_addons = optional(bool, false) # Instructs Terraform to manage all cluster addons, even if addons were installed outside of the module. If set to 'true' this module will destroy any addons that were installed by other sources.
kms_config = optional(
object({
crk_name = string # Name of key
private_endpoint = optional(bool) # Private endpoint
})
)
worker_pools = optional(
list(
object({
name = string # Worker pool name
vpc_name = string # VPC name
workers_per_subnet = number # Worker nodes per subnet
flavor = string # Worker node flavor
subnet_names = list(string) # List of vpc subnets for worker pool
entitlement = optional(string) # entitlement option for openshift
secondary_storage = optional(string) # Secondary storage type
boot_volume_crk_name = optional(string) # Boot volume encryption key name
operating_system = string # The operating system of the workers in the default worker pool. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available .
labels = optional(map(string)) # A list of labels that you want to add to all the worker nodes in the worker pool.
})
)
)
})
)
| n/a | yes | | [cos](#input\_cos) | Object describing the cloud object storage instance, buckets, and keys. Set `use_data` to false to create instance |
list(
object({
name = string
use_data = optional(bool)
resource_group = string
plan = optional(string)
random_suffix = optional(bool) # Use a random suffix for COS instance
access_tags = optional(list(string), [])
skip_kms_s2s_auth_policy = optional(bool, false) # skip auth policy between this instance and kms instance, useful if existing resources are used
skip_flowlogs_s2s_auth_policy = optional(bool, false) # skip auth policy between flow logs service and this instance, set to true if this policy is already in place on account
skip_atracker_s2s_auth_policy = optional(bool, false) # skip auth policyt between atracker service and this instance, set to true if this is existing recipient of atracker already
buckets = list(object({
name = string
storage_class = string
endpoint_type = string
force_delete = bool
single_site_location = optional(string)
region_location = optional(string)
cross_region_location = optional(string)
kms_key = optional(string)
access_tags = optional(list(string), [])
allowed_ip = optional(list(string), [])
hard_quota = optional(number)
archive_rule = optional(object({
days = number
enable = bool
rule_id = optional(string)
type = string
}))
expire_rule = optional(object({
days = optional(number)
date = optional(string)
enable = bool
expired_object_delete_marker = optional(string)
prefix = optional(string)
rule_id = optional(string)
}))
activity_tracking = optional(object({
activity_tracker_crn = string
read_data_events = bool
write_data_events = bool
management_events = bool
}))
metrics_monitoring = optional(object({
metrics_monitoring_crn = string
request_metrics_enabled = optional(bool)
usage_metrics_enabled = optional(bool)
}))
}))
keys = optional(
list(object({
name = string
role = string
enable_HMAC = bool
}))
)

})
)
| n/a | yes | | [enable\_transit\_gateway](#input\_enable\_transit\_gateway) | Create transit gateway | `bool` | `true` | no | | [existing\_vpc\_cbr\_zone\_id](#input\_existing\_vpc\_cbr\_zone\_id) | ID of the existing CBR (Context-based restrictions) network zone, with context set to the VPC. This zone is used in a CBR rule, which allows traffic to flow only from the landing zone VPCs to specific cloud services. | `string` | `null` | no | From 65a50e709acde6dbc6e93dfdf71eff628dab1b26 Mon Sep 17 00:00:00 2001 From: "aashiq.jacob@ibm.com" Date: Tue, 22 Oct 2024 08:38:45 +0530 Subject: [PATCH 3/3] update missing value --- patterns/roks-quickstart/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/patterns/roks-quickstart/main.tf b/patterns/roks-quickstart/main.tf index 00a8b162d..fc218b914 100644 --- a/patterns/roks-quickstart/main.tf +++ b/patterns/roks-quickstart/main.tf @@ -34,7 +34,7 @@ locals { "resource_group": "workload-rg", "disable_outbound_traffic_protection": true, "cluster_force_delete_storage": true, - "operating_system": null, + "operating_system": "REDHAT_8_64", "kms_wait_for_apply": true, "kms_config": { "crk_name": "roks-key",