diff --git a/README.md b/README.md index 8ac1018b1..b008af933 100644 --- a/README.md +++ b/README.md @@ -911,7 +911,7 @@ module "cluster_pattern" { |------|-------------|------|---------|:--------:| | [appid](#input\_appid) | The App ID instance to be used for the teleport vsi deployments |
object({
name = optional(string)
resource_group = optional(string)
use_data = optional(bool)
keys = optional(list(string))
use_appid = bool
})
|
{
"use_appid": false
}
| no | | [atracker](#input\_atracker) | atracker variables |
object({
resource_group = string
receive_global_events = bool
collector_bucket_name = string
add_route = bool
})
| n/a | yes | -| [clusters](#input\_clusters) | A list describing clusters workloads to create |
list(
object({
name = string # Name of Cluster
vpc_name = string # Name of VPC
subnet_names = list(string) # List of vpc subnets for cluster
workers_per_subnet = number # Worker nodes per subnet.
machine_type = string # Worker node flavor
kube_type = string # iks or openshift
kube_version = optional(string) # Can be a version from `ibmcloud ks versions` or `default`
entitlement = optional(string) # entitlement option for openshift
secondary_storage = optional(string) # Secondary storage type
pod_subnet = optional(string) # Portable subnet for pods
service_subnet = optional(string) # Portable subnet for services
resource_group = string # Resource Group used for cluster
cos_name = optional(string) # Name of COS instance Required only for OpenShift clusters
access_tags = optional(list(string), [])
boot_volume_crk_name = optional(string) # Boot volume encryption key name
disable_public_endpoint = optional(bool, true) # disable cluster public, leaving only private endpoint
disable_outbound_traffic_protection = optional(bool, false) # public outbound access from the cluster workers
cluster_force_delete_storage = optional(bool, false) # force the removal of persistent storage associated with the cluster during cluster deletion
operating_system = optional(string, null) #The operating system of the workers in the default worker pool. If no value is specified, the current default version OS will be used. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available .
kms_wait_for_apply = optional(bool, true) # make terraform wait until KMS is applied to master and it is ready and deployed
verify_cluster_network_readiness = optional(bool, true) # Flag to run a script will run kubectl commands to verify that all worker nodes can communicate successfully with the master. If the runtime does not have access to the kube cluster to run kubectl commands, this should be set to false.
use_ibm_cloud_private_api_endpoints = optional(bool, true) # Flag to force all cluster related api calls to use the IBM Cloud private endpoints.
import_default_worker_pool_on_create = optional(bool) # (Advanced users) Whether to handle the default worker pool as a stand-alone ibm_container_vpc_worker_pool resource on cluster creation. Only set to false if you understand the implications of managing the default worker pool as part of the cluster resource. Set to true to import the default worker pool as a separate resource. Set to false to manage the default worker pool as part of the cluster resource.
allow_default_worker_pool_replacement = optional(bool) # (Advanced users) Set to true to allow the module to recreate a default worker pool. Only use in the case where you are getting an error indicating that the default worker pool cannot be replaced on apply. Once the default worker pool is handled as a stand-alone ibm_container_vpc_worker_pool, if you wish to make any change to the default worker pool which requires the re-creation of the default pool set this variable to true
labels = optional(map(string)) # A list of labels that you want to add to the default worker pool.
addons = optional(object({ # Map of OCP cluster add-on versions to install
debug-tool = optional(string)
image-key-synchronizer = optional(string)
openshift-data-foundation = optional(string)
vpc-file-csi-driver = optional(string)
static-route = optional(string)
cluster-autoscaler = optional(string)
vpc-block-csi-driver = optional(string)
ibm-storage-operator = optional(string)
}), {})
manage_all_addons = optional(bool, false) # Instructs Terraform to manage all cluster addons, even if addons were installed outside of the module. If set to 'true' this module will destroy any addons that were installed by other sources.
kms_config = optional(
object({
crk_name = string # Name of key
private_endpoint = optional(bool) # Private endpoint
})
)
worker_pools = optional(
list(
object({
name = string # Worker pool name
vpc_name = string # VPC name
workers_per_subnet = number # Worker nodes per subnet
flavor = string # Worker node flavor
subnet_names = list(string) # List of vpc subnets for worker pool
entitlement = optional(string) # entitlement option for openshift
secondary_storage = optional(string) # Secondary storage type
boot_volume_crk_name = optional(string) # Boot volume encryption key name
operating_system = string # The operating system of the workers in the default worker pool. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available .
labels = optional(map(string)) # A list of labels that you want to add to all the worker nodes in the worker pool.
})
)
)
})
)
| n/a | yes | +| [clusters](#input\_clusters) | A list describing clusters workloads to create |
list(
object({
name = string # Name of Cluster
vpc_name = string # Name of VPC
subnet_names = list(string) # List of vpc subnets for cluster
workers_per_subnet = number # Worker nodes per subnet.
machine_type = string # Worker node flavor
kube_type = string # iks or openshift
kube_version = optional(string) # Can be a version from `ibmcloud ks versions` or `default`
entitlement = optional(string) # entitlement option for openshift
secondary_storage = optional(string) # Secondary storage type
pod_subnet = optional(string) # Portable subnet for pods
service_subnet = optional(string) # Portable subnet for services
resource_group = string # Resource Group used for cluster
cos_name = optional(string) # Name of COS instance Required only for OpenShift clusters
access_tags = optional(list(string), [])
boot_volume_crk_name = optional(string) # Boot volume encryption key name
disable_public_endpoint = optional(bool, true) # disable cluster public, leaving only private endpoint
disable_outbound_traffic_protection = optional(bool, false) # public outbound access from the cluster workers
cluster_force_delete_storage = optional(bool, false) # force the removal of persistent storage associated with the cluster during cluster deletion
operating_system = string # The operating system of the workers in the default worker pool. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available .
kms_wait_for_apply = optional(bool, true) # make terraform wait until KMS is applied to master and it is ready and deployed
verify_cluster_network_readiness = optional(bool, true) # Flag to run a script will run kubectl commands to verify that all worker nodes can communicate successfully with the master. If the runtime does not have access to the kube cluster to run kubectl commands, this should be set to false.
use_ibm_cloud_private_api_endpoints = optional(bool, true) # Flag to force all cluster related api calls to use the IBM Cloud private endpoints.
import_default_worker_pool_on_create = optional(bool) # (Advanced users) Whether to handle the default worker pool as a stand-alone ibm_container_vpc_worker_pool resource on cluster creation. Only set to false if you understand the implications of managing the default worker pool as part of the cluster resource. Set to true to import the default worker pool as a separate resource. Set to false to manage the default worker pool as part of the cluster resource.
allow_default_worker_pool_replacement = optional(bool) # (Advanced users) Set to true to allow the module to recreate a default worker pool. Only use in the case where you are getting an error indicating that the default worker pool cannot be replaced on apply. Once the default worker pool is handled as a stand-alone ibm_container_vpc_worker_pool, if you wish to make any change to the default worker pool which requires the re-creation of the default pool set this variable to true
labels = optional(map(string)) # A list of labels that you want to add to the default worker pool.
addons = optional(object({ # Map of OCP cluster add-on versions to install
debug-tool = optional(string)
image-key-synchronizer = optional(string)
openshift-data-foundation = optional(string)
vpc-file-csi-driver = optional(string)
static-route = optional(string)
cluster-autoscaler = optional(string)
vpc-block-csi-driver = optional(string)
ibm-storage-operator = optional(string)
}), {})
manage_all_addons = optional(bool, false) # Instructs Terraform to manage all cluster addons, even if addons were installed outside of the module. If set to 'true' this module will destroy any addons that were installed by other sources.
kms_config = optional(
object({
crk_name = string # Name of key
private_endpoint = optional(bool) # Private endpoint
})
)
worker_pools = optional(
list(
object({
name = string # Worker pool name
vpc_name = string # VPC name
workers_per_subnet = number # Worker nodes per subnet
flavor = string # Worker node flavor
subnet_names = list(string) # List of vpc subnets for worker pool
entitlement = optional(string) # entitlement option for openshift
secondary_storage = optional(string) # Secondary storage type
boot_volume_crk_name = optional(string) # Boot volume encryption key name
operating_system = string # The operating system of the workers in the worker pool. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available .
labels = optional(map(string)) # A list of labels that you want to add to all the worker nodes in the worker pool.
})
)
)
})
)
| n/a | yes | | [cos](#input\_cos) | Object describing the cloud object storage instance, buckets, and keys. Set `use_data` to false to create instance |
list(
object({
name = string
use_data = optional(bool)
resource_group = string
plan = optional(string)
random_suffix = optional(bool) # Use a random suffix for COS instance
access_tags = optional(list(string), [])
skip_kms_s2s_auth_policy = optional(bool, false) # skip auth policy between this instance and kms instance, useful if existing resources are used
skip_flowlogs_s2s_auth_policy = optional(bool, false) # skip auth policy between flow logs service and this instance, set to true if this policy is already in place on account
skip_atracker_s2s_auth_policy = optional(bool, false) # skip auth policyt between atracker service and this instance, set to true if this is existing recipient of atracker already
buckets = list(object({
name = string
storage_class = string
endpoint_type = string
force_delete = bool
single_site_location = optional(string)
region_location = optional(string)
cross_region_location = optional(string)
kms_key = optional(string)
access_tags = optional(list(string), [])
allowed_ip = optional(list(string), [])
hard_quota = optional(number)
archive_rule = optional(object({
days = number
enable = bool
rule_id = optional(string)
type = string
}))
expire_rule = optional(object({
days = optional(number)
date = optional(string)
enable = bool
expired_object_delete_marker = optional(string)
prefix = optional(string)
rule_id = optional(string)
}))
activity_tracking = optional(object({
activity_tracker_crn = string
read_data_events = bool
write_data_events = bool
management_events = bool
}))
metrics_monitoring = optional(object({
metrics_monitoring_crn = string
request_metrics_enabled = optional(bool)
usage_metrics_enabled = optional(bool)
}))
}))
keys = optional(
list(object({
name = string
role = string
enable_HMAC = bool
}))
)

})
)
| n/a | yes | | [enable\_transit\_gateway](#input\_enable\_transit\_gateway) | Create transit gateway | `bool` | `true` | no | | [existing\_vpc\_cbr\_zone\_id](#input\_existing\_vpc\_cbr\_zone\_id) | ID of the existing CBR (Context-based restrictions) network zone, with context set to the VPC. This zone is used in a CBR rule, which allows traffic to flow only from the landing zone VPCs to specific cloud services. | `string` | `null` | no | diff --git a/examples/override-example/override.json b/examples/override-example/override.json index 6b13e59b9..ba918e3bf 100644 --- a/examples/override-example/override.json +++ b/examples/override-example/override.json @@ -18,6 +18,7 @@ "name": "workload-cluster", "secondary_storage": "300gb.5iops-tier", "resource_group": "slz-workload-rg", + "operating_system": "REDHAT_8_64", "use_ibm_cloud_private_api_endpoints": false, "verify_cluster_network_readiness": false, "kms_config": { diff --git a/patterns/mixed/config.tf b/patterns/mixed/config.tf index 69489cca2..805bf5366 100644 --- a/patterns/mixed/config.tf +++ b/patterns/mixed/config.tf @@ -120,6 +120,7 @@ locals { entitlement = var.entitlement secondary_storage = var.secondary_storage use_private_endpoint = var.use_private_endpoint + operating_system = "REDHAT_8_64" verify_worker_network_readiness = var.verify_worker_network_readiness boot_volume_crk_name = "${var.prefix}-roks-key" import_default_worker_pool_on_create = false diff --git a/patterns/roks/override.json b/patterns/roks/override.json index 2df034350..7818d5f58 100644 --- a/patterns/roks/override.json +++ b/patterns/roks/override.json @@ -16,7 +16,7 @@ "resource_group": "slz-management-rg", "disable_outbound_traffic_protection": false, "cluster_force_delete_storage": false, - "operating_system": null, + "operating_system": "REDHAT_8_64", "kms_wait_for_apply": true, "kms_config": { "crk_name": "slz-roks-key", @@ -53,6 +53,7 @@ "machine_type": "bx2.16x64", "name": "workload-cluster", "resource_group": "slz-workload-rg", + "operating_system": "REDHAT_8_64", "disable_outbound_traffic_protection": false, "cluster_force_delete_storage": false, "kms_wait_for_apply": true, @@ -222,7 +223,7 @@ ], "security_groups": [], "service_endpoints": "public-and-private", - "existing_vpc_cbr_zone_id" : null, + "existing_vpc_cbr_zone_id": null, "ssh_keys": [], "transit_gateway_connections": [ "management", diff --git a/variables.tf b/variables.tf index f623ee4aa..8a61892aa 100644 --- a/variables.tf +++ b/variables.tf @@ -850,18 +850,18 @@ variable "clusters" { resource_group = string # Resource Group used for cluster cos_name = optional(string) # Name of COS instance Required only for OpenShift clusters access_tags = optional(list(string), []) - boot_volume_crk_name = optional(string) # Boot volume encryption key name - disable_public_endpoint = optional(bool, true) # disable cluster public, leaving only private endpoint - disable_outbound_traffic_protection = optional(bool, false) # public outbound access from the cluster workers - cluster_force_delete_storage = optional(bool, false) # force the removal of persistent storage associated with the cluster during cluster deletion - operating_system = optional(string, null) #The operating system of the workers in the default worker pool. If no value is specified, the current default version OS will be used. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available . - kms_wait_for_apply = optional(bool, true) # make terraform wait until KMS is applied to master and it is ready and deployed - verify_cluster_network_readiness = optional(bool, true) # Flag to run a script will run kubectl commands to verify that all worker nodes can communicate successfully with the master. If the runtime does not have access to the kube cluster to run kubectl commands, this should be set to false. - use_ibm_cloud_private_api_endpoints = optional(bool, true) # Flag to force all cluster related api calls to use the IBM Cloud private endpoints. - import_default_worker_pool_on_create = optional(bool) # (Advanced users) Whether to handle the default worker pool as a stand-alone ibm_container_vpc_worker_pool resource on cluster creation. Only set to false if you understand the implications of managing the default worker pool as part of the cluster resource. Set to true to import the default worker pool as a separate resource. Set to false to manage the default worker pool as part of the cluster resource. - allow_default_worker_pool_replacement = optional(bool) # (Advanced users) Set to true to allow the module to recreate a default worker pool. Only use in the case where you are getting an error indicating that the default worker pool cannot be replaced on apply. Once the default worker pool is handled as a stand-alone ibm_container_vpc_worker_pool, if you wish to make any change to the default worker pool which requires the re-creation of the default pool set this variable to true - labels = optional(map(string)) # A list of labels that you want to add to the default worker pool. - addons = optional(object({ # Map of OCP cluster add-on versions to install + boot_volume_crk_name = optional(string) # Boot volume encryption key name + disable_public_endpoint = optional(bool, true) # disable cluster public, leaving only private endpoint + disable_outbound_traffic_protection = optional(bool, false) # public outbound access from the cluster workers + cluster_force_delete_storage = optional(bool, false) # force the removal of persistent storage associated with the cluster during cluster deletion + operating_system = string # The operating system of the workers in the default worker pool. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available . + kms_wait_for_apply = optional(bool, true) # make terraform wait until KMS is applied to master and it is ready and deployed + verify_cluster_network_readiness = optional(bool, true) # Flag to run a script will run kubectl commands to verify that all worker nodes can communicate successfully with the master. If the runtime does not have access to the kube cluster to run kubectl commands, this should be set to false. + use_ibm_cloud_private_api_endpoints = optional(bool, true) # Flag to force all cluster related api calls to use the IBM Cloud private endpoints. + import_default_worker_pool_on_create = optional(bool) # (Advanced users) Whether to handle the default worker pool as a stand-alone ibm_container_vpc_worker_pool resource on cluster creation. Only set to false if you understand the implications of managing the default worker pool as part of the cluster resource. Set to true to import the default worker pool as a separate resource. Set to false to manage the default worker pool as part of the cluster resource. + allow_default_worker_pool_replacement = optional(bool) # (Advanced users) Set to true to allow the module to recreate a default worker pool. Only use in the case where you are getting an error indicating that the default worker pool cannot be replaced on apply. Once the default worker pool is handled as a stand-alone ibm_container_vpc_worker_pool, if you wish to make any change to the default worker pool which requires the re-creation of the default pool set this variable to true + labels = optional(map(string)) # A list of labels that you want to add to the default worker pool. + addons = optional(object({ # Map of OCP cluster add-on versions to install debug-tool = optional(string) image-key-synchronizer = optional(string) openshift-data-foundation = optional(string) @@ -889,7 +889,7 @@ variable "clusters" { entitlement = optional(string) # entitlement option for openshift secondary_storage = optional(string) # Secondary storage type boot_volume_crk_name = optional(string) # Boot volume encryption key name - operating_system = string # The operating system of the workers in the default worker pool. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available . + operating_system = string # The operating system of the workers in the worker pool. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available . labels = optional(map(string)) # A list of labels that you want to add to all the worker nodes in the worker pool. }) )