Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions examples/override-example/override.json
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
"name": "workload-cluster",
"secondary_storage": "300gb.5iops-tier",
"resource_group": "slz-workload-rg",
"operating_system": "REDHAT_8_64",
"use_ibm_cloud_private_api_endpoints": false,
"verify_cluster_network_readiness": false,
"kms_config": {
Expand Down
1 change: 1 addition & 0 deletions patterns/mixed/config.tf
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,7 @@ locals {
entitlement = var.entitlement
secondary_storage = var.secondary_storage
use_private_endpoint = var.use_private_endpoint
operating_system = "REDHAT_8_64"
verify_worker_network_readiness = var.verify_worker_network_readiness
boot_volume_crk_name = "${var.prefix}-roks-key"
import_default_worker_pool_on_create = false
Expand Down
2 changes: 1 addition & 1 deletion patterns/roks/module/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -271,7 +271,7 @@ variable "cluster_force_delete_storage" {

variable "operating_system" {
type = string
description = "The operating system of the workers in the default worker pool. If no value is specified, the current default version OS will be used. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available ."
description = "The operating system of the workers in the default worker pool. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available ."
default = "REDHAT_8_64"
validation {
error_message = "RHEL 8 (REDHAT_8_64) or Red Hat Enterprise Linux CoreOS (RHCOS) are the allowed OS values. RHCOS requires VPC clusters created from 4.15 onwards. Upgraded clusters from 4.14 cannot use RHCOS."
Expand Down
5 changes: 3 additions & 2 deletions patterns/roks/override.json
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
"resource_group": "slz-management-rg",
"disable_outbound_traffic_protection": false,
"cluster_force_delete_storage": false,
"operating_system": null,
"operating_system": "REDHAT_8_64",
"kms_wait_for_apply": true,
"kms_config": {
"crk_name": "slz-roks-key",
Expand Down Expand Up @@ -53,6 +53,7 @@
"machine_type": "bx2.16x64",
"name": "workload-cluster",
"resource_group": "slz-workload-rg",
"operating_system": "REDHAT_8_64",
"disable_outbound_traffic_protection": false,
"cluster_force_delete_storage": false,
"kms_wait_for_apply": true,
Expand Down Expand Up @@ -222,7 +223,7 @@
],
"security_groups": [],
"service_endpoints": "public-and-private",
"existing_vpc_cbr_zone_id" : null,
"existing_vpc_cbr_zone_id": null,
"ssh_keys": [],
"transit_gateway_connections": [
"management",
Expand Down
3 changes: 1 addition & 2 deletions patterns/roks/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -289,8 +289,7 @@ variable "cluster_force_delete_storage" {

variable "operating_system" {
type = string
description = "The operating system of the workers in the default worker pool. If no value is specified, the current default version OS will be used. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available ."
default = "REDHAT_8_64"
description = "The operating system of the workers in the default worker pool. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available ."
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Isn't the same change needed in roks/module/variables.tf ?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thats the tricky part when we directly call the module folder we need to pass a value for the var.operating system even if have a value for it mentioned in the override.json. For example, in quickstart
Screenshot 2024-10-23 at 10 15 55 PM

another option is to remove the var.operating system get the value from json and hardcode "REDHAT_8_64" in the config.tf

@toddgiguere , any idea on a different approach ?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think you should leave the default in place for var.operating_system, since there is a good default for the default worker pool (which is REDHAT_8_64).

I think the idea of not having a default value was more for the additional worker pools, which this variable is not used for.

validation {
error_message = "RHEL 8 (REDHAT_8_64) or Red Hat Enterprise Linux CoreOS (RHCOS) are the allowed OS values. RHCOS requires VPC clusters created from 4.15 onwards. Upgraded clusters from 4.14 cannot use RHCOS."
condition = var.operating_system == "REDHAT_8_64" || var.operating_system == "RHCOS"
Expand Down
1 change: 1 addition & 0 deletions tests/pr_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,7 @@ func setupOptionsRoksPattern(t *testing.T, prefix string) *testhelper.TestOption
"enable_transit_gateway": false,
"use_ibm_cloud_private_api_endpoints": false,
"verify_cluster_network_readiness": false,
"operating_system": "REDHAT_8_64",
}

return options
Expand Down
26 changes: 13 additions & 13 deletions variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -850,18 +850,18 @@ variable "clusters" {
resource_group = string # Resource Group used for cluster
cos_name = optional(string) # Name of COS instance Required only for OpenShift clusters
access_tags = optional(list(string), [])
boot_volume_crk_name = optional(string) # Boot volume encryption key name
disable_public_endpoint = optional(bool, true) # disable cluster public, leaving only private endpoint
disable_outbound_traffic_protection = optional(bool, false) # public outbound access from the cluster workers
cluster_force_delete_storage = optional(bool, false) # force the removal of persistent storage associated with the cluster during cluster deletion
operating_system = optional(string, null) #The operating system of the workers in the default worker pool. If no value is specified, the current default version OS will be used. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available .
kms_wait_for_apply = optional(bool, true) # make terraform wait until KMS is applied to master and it is ready and deployed
verify_cluster_network_readiness = optional(bool, true) # Flag to run a script will run kubectl commands to verify that all worker nodes can communicate successfully with the master. If the runtime does not have access to the kube cluster to run kubectl commands, this should be set to false.
use_ibm_cloud_private_api_endpoints = optional(bool, true) # Flag to force all cluster related api calls to use the IBM Cloud private endpoints.
import_default_worker_pool_on_create = optional(bool) # (Advanced users) Whether to handle the default worker pool as a stand-alone ibm_container_vpc_worker_pool resource on cluster creation. Only set to false if you understand the implications of managing the default worker pool as part of the cluster resource. Set to true to import the default worker pool as a separate resource. Set to false to manage the default worker pool as part of the cluster resource.
allow_default_worker_pool_replacement = optional(bool) # (Advanced users) Set to true to allow the module to recreate a default worker pool. Only use in the case where you are getting an error indicating that the default worker pool cannot be replaced on apply. Once the default worker pool is handled as a stand-alone ibm_container_vpc_worker_pool, if you wish to make any change to the default worker pool which requires the re-creation of the default pool set this variable to true
labels = optional(map(string)) # A list of labels that you want to add to the default worker pool.
addons = optional(object({ # Map of OCP cluster add-on versions to install
boot_volume_crk_name = optional(string) # Boot volume encryption key name
disable_public_endpoint = optional(bool, true) # disable cluster public, leaving only private endpoint
disable_outbound_traffic_protection = optional(bool, false) # public outbound access from the cluster workers
cluster_force_delete_storage = optional(bool, false) # force the removal of persistent storage associated with the cluster during cluster deletion
operating_system = string # The operating system of the workers in the default worker pool. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available .
kms_wait_for_apply = optional(bool, true) # make terraform wait until KMS is applied to master and it is ready and deployed
verify_cluster_network_readiness = optional(bool, true) # Flag to run a script will run kubectl commands to verify that all worker nodes can communicate successfully with the master. If the runtime does not have access to the kube cluster to run kubectl commands, this should be set to false.
use_ibm_cloud_private_api_endpoints = optional(bool, true) # Flag to force all cluster related api calls to use the IBM Cloud private endpoints.
import_default_worker_pool_on_create = optional(bool) # (Advanced users) Whether to handle the default worker pool as a stand-alone ibm_container_vpc_worker_pool resource on cluster creation. Only set to false if you understand the implications of managing the default worker pool as part of the cluster resource. Set to true to import the default worker pool as a separate resource. Set to false to manage the default worker pool as part of the cluster resource.
allow_default_worker_pool_replacement = optional(bool) # (Advanced users) Set to true to allow the module to recreate a default worker pool. Only use in the case where you are getting an error indicating that the default worker pool cannot be replaced on apply. Once the default worker pool is handled as a stand-alone ibm_container_vpc_worker_pool, if you wish to make any change to the default worker pool which requires the re-creation of the default pool set this variable to true
labels = optional(map(string)) # A list of labels that you want to add to the default worker pool.
addons = optional(object({ # Map of OCP cluster add-on versions to install
debug-tool = optional(string)
image-key-synchronizer = optional(string)
openshift-data-foundation = optional(string)
Expand Down Expand Up @@ -889,7 +889,7 @@ variable "clusters" {
entitlement = optional(string) # entitlement option for openshift
secondary_storage = optional(string) # Secondary storage type
boot_volume_crk_name = optional(string) # Boot volume encryption key name
operating_system = string # The operating system of the workers in the default worker pool. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available .
operating_system = string # The operating system of the workers in the worker pool. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available .
labels = optional(map(string)) # A list of labels that you want to add to all the worker nodes in the worker pool.
})
)
Expand Down