Skip to content

Commit 693c8e2

Browse files
authored
feat: updated [landing-zone-vsi](https://github.com/terraform-ibm-modules/terraform-ibm-landing-zone-vsi) module to 4.2.0<br>- Refactored the logic used to create OCP clusters. Its now using the [base-ocp-vpc](https://github.com/terraform-ibm-modules/terraform-ibm-base-ocp-vpc) module<br>- This module has some extra functionality which requires the runtime to have access to IBM Cloud private endpoints. (#870)
BREAKING CHANGE: If you were to upgrade the 'Red Hat OpenShift Container Platform on VPC landing zone' solution, or the 'VSI on VPC landing zone' from a previous version to this version, infrastructure will be destroyed and recreated. A fully supported migration method will be available shortly. If re-creating infrastructure is going to impact day-to-day operations, we suggest to hold off upgrading to this version until there is a fully supported migration path.
1 parent b576712 commit 693c8e2

File tree

18 files changed

+307
-115
lines changed

18 files changed

+307
-115
lines changed

README.md

Lines changed: 5 additions & 4 deletions
Large diffs are not rendered by default.

bastion_host.tf

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ module "teleport_config" {
4242

4343
module "bastion_host" {
4444
source = "terraform-ibm-modules/landing-zone-vsi/ibm"
45-
version = "3.3.0"
45+
version = "4.2.0"
4646
for_each = local.bastion_vsi_map
4747
resource_group_id = each.value.resource_group == null ? null : local.resource_groups[each.value.resource_group]
4848
create_security_group = each.value.security_group == null ? false : true

cluster.tf

Lines changed: 122 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -23,21 +23,53 @@ locals {
2323
worker_pools_map = module.dynamic_values.worker_pools_map # Convert list to map
2424
clusters_map = module.dynamic_values.clusters_map # Convert list to map
2525
default_kube_version = {
26-
openshift = "${data.ibm_container_cluster_versions.cluster_versions.default_openshift_version}_openshift"
27-
iks = data.ibm_container_cluster_versions.cluster_versions.default_kube_version
26+
iks = data.ibm_container_cluster_versions.cluster_versions.default_kube_version
2827
}
28+
cluster_data = merge({
29+
for cluster in ibm_container_vpc_cluster.cluster :
30+
cluster.name => {
31+
crn = cluster.crn
32+
id = cluster.id
33+
resource_group_name = cluster.resource_group_name
34+
resource_group_id = cluster.resource_group_id
35+
vpc_id = cluster.vpc_id
36+
region = var.region
37+
private_service_endpoint_url = cluster.private_service_endpoint_url
38+
public_service_endpoint_url = (cluster.public_service_endpoint_url != "" && cluster.public_service_endpoint_url != null) ? cluster.public_service_endpoint_url : null
39+
ingress_hostname = cluster.ingress_hostname
40+
cluster_console_url = (cluster.public_service_endpoint_url != "" && cluster.public_service_endpoint_url != null) ? "https://console-openshift-console.${cluster.ingress_hostname}" : null
41+
42+
}
43+
}, {
44+
for cluster in module.cluster :
45+
cluster.cluster_name => {
46+
crn = cluster.cluster_crn
47+
id = cluster.cluster_id
48+
resource_group_id = cluster.resource_group_id
49+
vpc_id = cluster.vpc_id
50+
region = var.region
51+
private_service_endpoint_url = cluster.private_service_endpoint_url
52+
public_service_endpoint_url = cluster.public_service_endpoint_url
53+
ingress_hostname = cluster.ingress_hostname
54+
cluster_console_url = (cluster.public_service_endpoint_url != "" && cluster.public_service_endpoint_url != null) ? "https://console-openshift-console.${cluster.ingress_hostname}" : null
55+
}
56+
}
57+
)
2958
}
3059

3160
##############################################################################
3261

3362

3463
##############################################################################
35-
# Create IKS/ROKS on VPC Cluster
64+
# Create IKS on VPC Cluster
3665
##############################################################################
3766

3867
resource "ibm_container_vpc_cluster" "cluster" {
39-
depends_on = [ibm_iam_authorization_policy.policy]
40-
for_each = local.clusters_map
68+
depends_on = [ibm_iam_authorization_policy.policy]
69+
for_each = {
70+
for index, cluster in local.clusters_map : index => cluster
71+
if cluster.kube_type == "iks"
72+
}
4173
name = "${var.prefix}-${each.value.name}"
4274
vpc_id = each.value.vpc_id
4375
resource_group_id = local.resource_groups[each.value.resource_group]
@@ -96,7 +128,10 @@ resource "ibm_container_vpc_cluster" "cluster" {
96128
}
97129

98130
resource "ibm_resource_tag" "cluster_tag" {
99-
for_each = local.clusters_map
131+
for_each = {
132+
for index, cluster in local.clusters_map : index => cluster
133+
if cluster.kube_type == "iks"
134+
}
100135
resource_id = ibm_container_vpc_cluster.cluster[each.key].crn
101136
tag_type = "access"
102137
tags = each.value.access_tags
@@ -106,11 +141,14 @@ resource "ibm_resource_tag" "cluster_tag" {
106141

107142

108143
##############################################################################
109-
# Create Worker Pools
144+
# Create IKS Worker Pools
110145
##############################################################################
111146

112147
resource "ibm_container_vpc_worker_pool" "pool" {
113-
for_each = local.worker_pools_map
148+
for_each = {
149+
for index, cluster in local.worker_pools_map : index => cluster
150+
if cluster.kube_type == "iks"
151+
}
114152
vpc_id = each.value.vpc_id
115153
resource_group_id = local.resource_groups[each.value.resource_group]
116154
entitlement = each.value.entitlement
@@ -157,14 +195,14 @@ locals {
157195

158196
# for each cluster in the clusters_map, get the addons and their versions and create an addons map including the corosponding csi_driver_version
159197
cluster_addons = {
160-
for cluster in var.clusters : "${var.prefix}-${cluster.name}" => {
198+
for cluster in local.clusters_map : "${var.prefix}-${cluster.name}" => {
161199
id = ibm_container_vpc_cluster.cluster["${var.prefix}-${cluster.name}"].id
162200
resource_group_id = ibm_container_vpc_cluster.cluster["${var.prefix}-${cluster.name}"].resource_group_id
163201
addons = merge(
164202
{ for addon_name, addon_version in(cluster.addons != null ? cluster.addons : {}) : addon_name => addon_version if addon_version != null },
165203
local.csi_driver_version["${var.prefix}-${cluster.name}"] != null ? { vpc-block-csi-driver = local.csi_driver_version["${var.prefix}-${cluster.name}"] } : {}
166204
)
167-
}
205+
} if cluster.kube_type == "iks"
168206
}
169207
}
170208

@@ -193,3 +231,77 @@ resource "ibm_container_addons" "addons" {
193231
create = "1h"
194232
}
195233
}
234+
235+
##############################################################################
236+
# Create ROKS on VPC Cluster
237+
##############################################################################
238+
239+
module "cluster" {
240+
for_each = {
241+
for index, cluster in local.clusters_map : index => cluster
242+
if cluster.kube_type == "openshift"
243+
}
244+
source = "terraform-ibm-modules/base-ocp-vpc/ibm"
245+
version = "3.30.1"
246+
resource_group_id = local.resource_groups[each.value.resource_group]
247+
region = var.region
248+
cluster_name = each.value.cluster_name
249+
vpc_id = each.value.vpc_id
250+
ocp_entitlement = each.value.entitlement
251+
vpc_subnets = each.value.vpc_subnets
252+
access_tags = each.value.access_tags
253+
worker_pools = concat(
254+
[
255+
{
256+
subnet_prefix = each.value.subnet_names[0]
257+
pool_name = "default"
258+
machine_type = each.value.machine_type
259+
workers_per_zone = each.value.workers_per_subnet
260+
operating_system = each.value.operating_system
261+
labels = each.value.labels
262+
boot_volume_encryption_kms_config = {
263+
crk = each.value.boot_volume_crk_name == null ? null : regex("key:(.*)", module.key_management.key_map[each.value.boot_volume_crk_name].crn)[0]
264+
kms_instance_id = each.value.boot_volume_crk_name == null ? null : regex(".*:(.*):key:.*", module.key_management.key_map[each.value.boot_volume_crk_name].crn)[0]
265+
kms_account_id = each.value.boot_volume_crk_name == null ? null : regex("a/([a-f0-9]{32})", module.key_management.key_map[each.value.boot_volume_crk_name].crn)[0] == data.ibm_iam_account_settings.iam_account_settings.account_id ? null : regex("a/([a-f0-9]{32})", module.key_management.key_map[each.value.boot_volume_crk_name].crn)[0]
266+
}
267+
}
268+
],
269+
each.value.worker != null ? [
270+
for pool in each.value.worker :
271+
{
272+
vpc_subnets = pool.vpc_subnets
273+
pool_name = pool.name
274+
machine_type = pool.flavor
275+
workers_per_zone = pool.workers_per_subnet
276+
operating_system = pool.operating_system
277+
labels = pool.labels
278+
boot_volume_encryption_kms_config = {
279+
crk = pool.boot_volume_crk_name == null ? null : regex("key:(.*)", module.key_management.key_map[pool.boot_volume_crk_name].crn)[0]
280+
kms_instance_id = pool.boot_volume_crk_name == null ? null : regex(".*:(.*):key:.*", module.key_management.key_map[pool.boot_volume_crk_name].crn)[0]
281+
kms_account_id = pool.boot_volume_crk_name == null ? null : regex("a/([a-f0-9]{32})", module.key_management.key_map[pool.boot_volume_crk_name].crn)[0] == data.ibm_iam_account_settings.iam_account_settings.account_id ? null : regex("a/([a-f0-9]{32})", module.key_management.key_map[pool.boot_volume_crk_name].crn)[0]
282+
}
283+
}
284+
] : []
285+
)
286+
force_delete_storage = each.value.cluster_force_delete_storage
287+
operating_system = each.value.operating_system
288+
ocp_version = each.value.kube_version == null || each.value.kube_version == "default" ? each.value.kube_version : replace(each.value.kube_version, "_openshift", "")
289+
import_default_worker_pool_on_create = each.value.import_default_worker_pool_on_create
290+
allow_default_worker_pool_replacement = each.value.allow_default_worker_pool_replacement
291+
tags = var.tags
292+
use_existing_cos = true
293+
existing_cos_id = each.value.cos_instance_crn
294+
disable_public_endpoint = coalesce(each.value.disable_public_endpoint, true) # disable if not set or null
295+
verify_worker_network_readiness = each.value.verify_worker_network_readiness
296+
use_private_endpoint = each.value.use_private_endpoint
297+
addons = each.value.addons
298+
manage_all_addons = each.value.manage_all_addons
299+
disable_outbound_traffic_protection = each.value.disable_outbound_traffic_protection
300+
kms_config = each.value.kms_config == null ? {} : {
301+
crk_id = regex("key:(.*)", module.key_management.key_map[each.value.kms_config.crk_name].crn)[0]
302+
instance_id = regex(".*:(.*):key:.*", module.key_management.key_map[each.value.kms_config.crk_name].crn)[0]
303+
private_endpoint = each.value.kms_config.private_endpoint
304+
account_id = regex("a/([a-f0-9]{32})", module.key_management.key_map[each.value.kms_config.crk_name].crn)[0] == data.ibm_iam_account_settings.iam_account_settings.account_id ? null : regex("a/([a-f0-9]{32})", module.key_management.key_map[each.value.kms_config.crk_name].crn)[0]
305+
wait_for_apply = each.value.kms_wait_for_apply
306+
}
307+
}

dynamic_values/config_modules/cluster_worker_pools/worker_pools.tf

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -79,12 +79,14 @@ locals {
7979
[
8080
for pool in cluster.worker_pools :
8181
merge(pool, {
82-
composed_name = "${var.prefix}-${cluster.name}-${pool.name}" # Composed name
83-
cluster_name = "${var.prefix}-${cluster.name}" # Cluster name with prefix
84-
entitlement = cluster.kube_type == "iks" ? null : cluster.entitlement # Add entitlement for roks pools
85-
resource_group = cluster.resource_group # add cluster rg
86-
vpc_id = var.vpc_modules[pool.vpc_name].vpc_id # add vpc_id
87-
subnets = module.worker_pool_subnets["${var.prefix}-${cluster.name}-${pool.name}"].subnets
82+
composed_name = "${var.prefix}-${cluster.name}-${pool.name}" # Composed name
83+
cluster_name = "${var.prefix}-${cluster.name}" # Cluster name with prefix
84+
entitlement = cluster.kube_type == "iks" ? null : cluster.entitlement # Add entitlement for roks pools
85+
resource_group = cluster.resource_group # add cluster rg
86+
vpc_id = var.vpc_modules[pool.vpc_name].vpc_id # add vpc_id
87+
subnets = module.worker_pool_subnets["${var.prefix}-${cluster.name}-${pool.name}"].subnets
88+
kube_type = cluster.kube_type
89+
operating_system = lookup(pool, "operating_system", null)
8890
}) if pool != null
8991
] if cluster.worker_pools != null
9092
]

dynamic_values/config_modules/clusters/clusters.tf

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,14 @@ module "cluster_subnets" {
5050

5151
##############################################################################
5252

53+
54+
module "worker_pools" {
55+
source = "../cluster_worker_pools"
56+
prefix = var.prefix
57+
clusters = var.clusters
58+
vpc_modules = var.vpc_modules
59+
}
60+
5361
##############################################################################
5462
# Cluster List To Map
5563
##############################################################################
@@ -62,6 +70,31 @@ module "composed_cluster_map" {
6270
vpc_id = var.vpc_modules[cluster.vpc_name].vpc_id
6371
subnets = module.cluster_subnets[cluster.name].subnets
6472
cos_instance_crn = cluster.kube_type == "openshift" ? var.cos_instance_ids[cluster.cos_name] : null
73+
cluster_name = "${var.prefix}-${cluster.name}"
74+
vpc_subnets = {
75+
(cluster.subnet_names[0]) = [
76+
for zone in module.cluster_subnets[cluster.name].subnets :
77+
{
78+
id = zone.id
79+
zone = zone.zone
80+
cidr_block = zone.cidr
81+
}
82+
]
83+
84+
}
85+
worker = cluster.worker_pools != null ? [
86+
for pool in cluster.worker_pools :
87+
merge(module.worker_pools.map["${var.prefix}-${cluster.name}-${pool.name}"], {
88+
vpc_subnets = [
89+
for zone in module.worker_pools.map["${var.prefix}-${cluster.name}-${pool.name}"].subnets :
90+
{
91+
id = zone.id
92+
zone = zone.zone
93+
cidr_block = zone.cidr
94+
}
95+
]
96+
}) if pool != null
97+
] : []
6598
})
6699
]
67100
prefix = var.prefix

f5_vsi.tf

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ locals {
117117

118118
module "f5_vsi" {
119119
source = "terraform-ibm-modules/landing-zone-vsi/ibm"
120-
version = "3.3.0"
120+
version = "4.2.0"
121121
for_each = local.f5_vsi_map
122122
resource_group_id = each.value.resource_group == null ? null : local.resource_groups[each.value.resource_group]
123123
create_security_group = each.value.security_group == null ? false : true

outputs.tf

Lines changed: 14 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -68,15 +68,21 @@ output "bastion_host_names" {
6868

6969
output "cluster_names" {
7070
description = "List of create cluster names"
71-
value = [
72-
for cluster in ibm_container_vpc_cluster.cluster :
73-
cluster.name
74-
]
71+
value = flatten([
72+
[
73+
for cluster in ibm_container_vpc_cluster.cluster :
74+
cluster.name
75+
],
76+
[
77+
for cluster in module.cluster :
78+
cluster.cluster_name
79+
]
80+
])
7581
}
7682

7783
output "workload_cluster_id" {
7884
description = "The id of the workload cluster. If the cluster name does not exactly match the prefix-workload-cluster pattern it will be null."
79-
value = lookup(ibm_container_vpc_cluster.cluster, "${var.prefix}-workload-cluster", null) != null ? ibm_container_vpc_cluster.cluster["${var.prefix}-workload-cluster"].id : null
85+
value = lookup(local.cluster_data, "${var.prefix}-workload-cluster", null) != null ? local.cluster_data["${var.prefix}-workload-cluster"].id : null
8086
}
8187

8288
output "workload_cluster_name" {
@@ -86,7 +92,7 @@ output "workload_cluster_name" {
8692

8793
output "management_cluster_id" {
8894
description = "The id of the management cluster. If the cluster name does not exactly match the prefix-management-cluster pattern it will be null."
89-
value = lookup(ibm_container_vpc_cluster.cluster, "${var.prefix}-management-cluster", null) != null ? ibm_container_vpc_cluster.cluster["${var.prefix}-management-cluster"].id : null
95+
value = lookup(local.cluster_data, "${var.prefix}-management-cluster", null) != null ? local.cluster_data["${var.prefix}-management-cluster"].id : null
9096
}
9197

9298
output "management_cluster_name" {
@@ -96,21 +102,7 @@ output "management_cluster_name" {
96102

97103
output "cluster_data" {
98104
description = "List of cluster data"
99-
value = {
100-
for cluster in ibm_container_vpc_cluster.cluster :
101-
cluster.name => {
102-
crn = cluster.crn
103-
id = cluster.id
104-
resource_group_name = cluster.resource_group_name
105-
resource_group_id = cluster.resource_group_id
106-
vpc_id = cluster.vpc_id
107-
region = var.region
108-
private_service_endpoint_url = cluster.private_service_endpoint_url
109-
public_service_endpoint_url = (cluster.public_service_endpoint_url != "" && cluster.public_service_endpoint_url != null) ? cluster.public_service_endpoint_url : null
110-
ingress_hostname = cluster.ingress_hostname
111-
cluster_console_url = (cluster.public_service_endpoint_url != "" && cluster.public_service_endpoint_url != null) ? "https://console-openshift-console.${cluster.ingress_hostname}" : null
112-
}
113-
}
105+
value = local.cluster_data
114106
}
115107

116108
##############################################################################
@@ -239,7 +231,7 @@ output "vpc_resource_list" {
239231
name = vpc.vpc_data.name
240232
resource_group_id = vpc.vpc_data.resource_group
241233
region = var.region
242-
clusters = flatten([for cluster in ibm_container_vpc_cluster.cluster :
234+
clusters = flatten([for cluster in local.cluster_data :
243235
cluster.id if cluster.vpc_id == vpc.vpc_data.id
244236
])
245237
vsi = distinct(flatten([

patterns/mixed/config.tf

Lines changed: 12 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -111,15 +111,18 @@ locals {
111111
crk_name = "${var.prefix}-roks-key"
112112
private_endpoint = true
113113
}
114-
workers_per_subnet = var.workers_per_zone
115-
machine_type = var.flavor
116-
kube_type = "openshift"
117-
kube_version = var.kube_version
118-
resource_group = "${var.prefix}-${var.vpcs[1]}-rg"
119-
cos_name = "cos"
120-
entitlement = var.entitlement
121-
secondary_storage = var.secondary_storage
122-
boot_volume_crk_name = "${var.prefix}-roks-key"
114+
workers_per_subnet = var.workers_per_zone
115+
machine_type = var.flavor
116+
kube_type = "openshift"
117+
kube_version = var.kube_version
118+
resource_group = "${var.prefix}-${var.vpcs[1]}-rg"
119+
cos_name = "cos"
120+
entitlement = var.entitlement
121+
secondary_storage = var.secondary_storage
122+
use_private_endpoint = var.use_private_endpoint
123+
verify_worker_network_readiness = var.verify_worker_network_readiness
124+
boot_volume_crk_name = "${var.prefix}-roks-key"
125+
import_default_worker_pool_on_create = false
123126
# By default, create dedicated pool for logging
124127
worker_pools = [
125128
{

patterns/mixed/variables.tf

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -211,6 +211,19 @@ variable "wait_till" {
211211
}
212212
}
213213

214+
# Exposing these two variables is necessary since GitHub Runtime cannot execute the verify_worker_network_readiness script during the upgrade test. We can remove these variables once we enable the ability to run upgrade tests through Schematics.
215+
variable "verify_worker_network_readiness" {
216+
type = bool
217+
description = "By setting this to true, a script will run kubectl commands to verify that all worker nodes can communicate successfully with the master. If the runtime does not have access to the kube cluster to run kubectl commands, this should be set to false."
218+
default = true
219+
}
220+
221+
variable "use_private_endpoint" {
222+
type = bool
223+
description = "Set this to true to force all api calls to use the IBM Cloud private endpoints."
224+
default = true
225+
}
226+
214227
##############################################################################
215228

216229

0 commit comments

Comments
 (0)