diff --git a/ibm_catalog.json b/ibm_catalog.json index 54bde411..94e3feb0 100644 --- a/ibm_catalog.json +++ b/ibm_catalog.json @@ -114,7 +114,7 @@ "role_crns": [ "crn:v1:bluemix:public:iam::::role:Editor" ], - "service_name": "iam-access-groups", + "service_name": "iam-groups", "notes": "[Optional] Required for managing IAM access groups." }, { @@ -147,7 +147,7 @@ "crn:v1:bluemix:public:iam::::role:Editor" ], "service_name": "cloud-object-storage", - "notes": "Required to manage Object storage bucket for the cluster internal registry." + "notes": "Required to manage Object storage for the cluster internal registry." }, { "role_crns": [ @@ -166,7 +166,7 @@ }, { "role_crns": [ - "crn:v1:bluemix:public:iam::::role:Administrator", + "crn:v1:bluemix:public:iam::::role:Editor", "crn:v1:bluemix:public:iam::::serviceRole:Manager" ], "service_name": "secrets-manager", @@ -174,16 +174,16 @@ }, { "role_crns": [ - "crn:v1:bluemix:public:iam::::serviceRole:Manager", - "crn:v1:bluemix:public:iam::::role:Administrator" + "crn:v1:bluemix:public:iam::::role:Editor", + "crn:v1:bluemix:public:iam::::serviceRole:Manager" ], "service_name": "sysdig-monitor", "notes": "[Optional] Required to create an instance of Cloud Monitoring." }, { "role_crns": [ - "crn:v1:bluemix:public:iam::::serviceRole:Manager", - "crn:v1:bluemix:public:iam::::role:Editor" + "crn:v1:bluemix:public:iam::::role:Editor", + "crn:v1:bluemix:public:iam::::serviceRole:Manager" ], "service_name": "logs", "notes": "[Optional] Required to create an instance of Cloud Logs." @@ -193,7 +193,7 @@ "crn:v1:bluemix:public:iam::::serviceRole:Manager" ], "service_name": "logs-router", - "notes": "[Optional] Required for configuring cloud logs routing." + "notes": "[Optional] Required for configuring Cloud Logs routing." }, { "role_crns": [ @@ -219,17 +219,10 @@ "service_name": "apprapp", "notes": "[Optional] Required for provisioning the App Configuration instance." }, - { - "role_crns": [ - "crn:v1:bluemix:public:iam::::role:Administrator" - ], - "service_name": "metrics-router", - "notes": "[Optional] Required to enable metrics routing to the Cloud Monitoring." - }, { "role_crns": [ "crn:v1:bluemix:public:iam::::serviceRole:Manager", - "crn:v1:bluemix:public:iam::::role:Administrator" + "crn:v1:bluemix:public:iam::::role:Editor" ], "service_name": "event-notifications", "notes": "[Optional] Required when enabling the Event Notifications integration." @@ -273,7 +266,7 @@ "required": true }, { - "key": "ocp_version", + "key": "openshift_version", "required": true, "default_value": "4.18", "options": [ @@ -542,7 +535,7 @@ ] }, { - "key": "disable_public_endpoint", + "key": "allow_public_access_to_cluster", "required": true }, { @@ -712,7 +705,7 @@ ] }, { - "key": "disable_outbound_traffic_protection" + "key": "allow_outbound_traffic" }, { "key": "verify_worker_network_readiness" @@ -1252,7 +1245,7 @@ "crn:v1:bluemix:public:iam::::serviceRole:Manager", "crn:v1:bluemix:public:iam::::role:Administrator" ], - "notes": "Required to reset the cluster API key, create and edit the OpenShift cluster, and manage all related resources." + "notes": "Required to create and manage the Openshift cluster." }, { "service_name": "iam-identity", @@ -1260,14 +1253,14 @@ "crn:v1:bluemix:public:iam::::role:Administrator", "crn:v1:bluemix:public:iam-identity::::serviceRole:UserApiKeyCreator" ], - "notes": "Required to create the cluster API key needed by the OpenShift cluster on IBM Cloud and for managing and operating resources within the IBM Cloud environment." + "notes": "Required to create the containers-kubernetes-key for the OpenShift cluster." }, { "service_name": "is.vpc", "role_crns": [ "crn:v1:bluemix:public:iam::::role:Editor" ], - "notes": "Required for creating Virtual Private Cloud (VPC)." + "notes": "Required to create VPC." }, { "service_name": "cloud-object-storage", @@ -1275,7 +1268,7 @@ "crn:v1:bluemix:public:iam::::serviceRole:Manager", "crn:v1:bluemix:public:iam::::role:Editor" ], - "notes": "Required for creating the OpenShift cluster's internal registry storage bucket." + "notes": "Required to manage Object storage for the cluster internal registry." }, { "role_crns": [ @@ -1299,7 +1292,7 @@ "url": "https://raw.githubusercontent.com/terraform-ibm-modules/terraform-ibm-base-ocp-vpc/refs/heads/main/reference-architecture/deployable-architecture-ocp-cluster-qs.svg", "type": "image/svg+xml" }, - "description": "This deployable architecture enables deployment of a Red Hat OpenShift cluster within an IBM Cloud Virtual Private Cloud (VPC). It provisions the OpenShift cluster and its foundational VPC infrastructure with a limited set of essential options for rapid and streamlined setup. Additionally, the deployment creates an Object Storage bucket that serves as the internal container image registry for the OpenShift cluster. Thus, it helps ensure seamless storage integration.

Users can select from predefined cluster sizes — mini (default), small, medium, and large. Each size determines the number of availability zones, worker nodes per zone, and the machine type (worker node flavor). [Learn more](https://github.com/terraform-ibm-modules/terraform-ibm-base-ocp-vpc/blob/main/solutions/quickstart/DA_docs.md).

By default, the architecture provisions a two-zone VPC, forming the foundation for the OpenShift cluster. The cluster comprises a single worker pool distributed across these zones, with two worker nodes per zone in the mini configuration.

This streamlined architecture balances ease of use with flexibility, enabling rapid OpenShift cluster deployments with the infrastructure, integrated storage services, and right-sized compute resources of IBM Cloud." + "description": "This deployable architecture enables deployment of a Red Hat OpenShift cluster within an IBM Cloud Virtual Private Cloud (VPC). It provisions the OpenShift cluster and its foundational VPC infrastructure with a limited set of essential options for rapid and streamlined setup. Additionally, the deployment creates an Object Storage bucket that serves as the internal container image registry for the OpenShift cluster. Thus, it helps ensure seamless storage integration.

Users can select from predefined cluster sizes — mini (default), small, medium, and large. The chosen size determines the machine type of the worker nodes, the number of availability zones the cluster spans, and number of worker nodes deployed in each zone. To get more information on this, refer [here](https://github.com/terraform-ibm-modules/terraform-ibm-base-ocp-vpc/blob/main/solutions/quickstart/DA_docs.md).

By default, the architecture provisions a two-zone VPC, forming the foundation for the OpenShift cluster. The cluster comprises a single worker pool distributed across these zones, with two worker nodes per zone in the mini configuration.

This streamlined architecture balances ease of use with flexibility, enabling rapid OpenShift cluster deployments with the infrastructure, integrated storage services, and right-sized compute resources of IBM Cloud." } ] }, @@ -1387,7 +1380,7 @@ "key": "cluster_name" }, { - "key": "ocp_version", + "key": "openshift_version", "default_value": "4.18", "required": true, "options": [ @@ -1450,10 +1443,10 @@ } }, { - "key": "disable_public_endpoint" + "key": "allow_public_access_to_cluster" }, { - "key": "disable_outbound_traffic_protection" + "key": "allow_outbound_traffic" } ], "dependency_version_2": true, diff --git a/solutions/fully-configurable/main.tf b/solutions/fully-configurable/main.tf index b391b760..e42a8bd1 100644 --- a/solutions/fully-configurable/main.tf +++ b/solutions/fully-configurable/main.tf @@ -195,7 +195,7 @@ locals { # Managing the ODF version accordingly, as it changes with each OCP version. addons = lookup(var.addons, "openshift-data-foundation", null) != null ? lookup(var.addons["openshift-data-foundation"], "version", null) == null ? { for key, value in var.addons : key => value != null ? { - version = lookup(value, "version", null) == null && key == "openshift-data-foundation" ? "${var.ocp_version}.0" : lookup(value, "version", null) + version = lookup(value, "version", null) == null && key == "openshift-data-foundation" ? "${var.openshift_version}.0" : lookup(value, "version", null) parameters_json = lookup(value, "parameters_json", null) } : null } : var.addons : var.addons } @@ -211,7 +211,7 @@ module "ocp_base" { existing_cos_id = var.existing_cos_instance_crn vpc_id = local.existing_vpc_id vpc_subnets = local.vpc_subnets - ocp_version = var.ocp_version + ocp_version = var.openshift_version worker_pools = local.worker_pools access_tags = var.access_tags ocp_entitlement = var.ocp_entitlement @@ -224,8 +224,8 @@ module "ocp_base" { cbr_rules = var.cbr_rules cluster_ready_when = var.cluster_ready_when custom_security_group_ids = var.custom_security_group_ids - disable_outbound_traffic_protection = var.disable_outbound_traffic_protection - disable_public_endpoint = var.disable_public_endpoint + disable_outbound_traffic_protection = var.allow_outbound_traffic + disable_public_endpoint = !var.allow_public_access_to_cluster enable_ocp_console = var.enable_ocp_console ignore_worker_pool_size_changes = var.ignore_worker_pool_size_changes kms_config = local.kms_config diff --git a/solutions/fully-configurable/variables.tf b/solutions/fully-configurable/variables.tf index ed170cad..5f855d9b 100644 --- a/solutions/fully-configurable/variables.tf +++ b/solutions/fully-configurable/variables.tf @@ -67,7 +67,7 @@ variable "cluster_name" { default = "openshift" } -variable "ocp_version" { +variable "openshift_version" { type = string description = "Version of the OpenShift cluster to provision." default = null @@ -266,10 +266,16 @@ variable "use_private_endpoint" { default = true } -variable "disable_public_endpoint" { +variable "allow_public_access_to_cluster" { type = bool - description = "Whether access to the public service endpoint is disabled when the cluster is created. Does not affect existing clusters. You can't disable a public endpoint on an existing cluster, so you can't convert a public cluster to a private cluster. To change a public endpoint to private, create another cluster with this input set to `true`. Warning: Set this field to `false` if you want to retain public access to the cluster. Once the cluster is created, this cannot be changed." - default = true + description = "Set to true to allow public access to master node of the cluster by enabling public endpoint." + default = false +} + +variable "allow_outbound_traffic" { + type = bool + description = "Set to true to allow public outbound access from the cluster workers." + default = false } variable "cluster_config_endpoint_type" { @@ -279,12 +285,6 @@ variable "cluster_config_endpoint_type" { nullable = false } -variable "disable_outbound_traffic_protection" { - type = bool - description = "Whether to allow public outbound access from the cluster workers. This is only applicable for OCP 4.15 and later." - default = false -} - variable "verify_worker_network_readiness" { type = bool description = "By setting this to true, a script runs kubectl commands to verify that all worker nodes can communicate successfully with the master. If the runtime does not have access to the kube cluster to run kubectl commands, set this value to false." diff --git a/solutions/fully-configurable/version.tf b/solutions/fully-configurable/version.tf index 5f7e3d7a..6ba3e371 100644 --- a/solutions/fully-configurable/version.tf +++ b/solutions/fully-configurable/version.tf @@ -5,7 +5,7 @@ terraform { required_providers { ibm = { source = "IBM-Cloud/ibm" - version = "1.81.0" + version = "1.81.1" } helm = { source = "hashicorp/helm" diff --git a/solutions/quickstart/main.tf b/solutions/quickstart/main.tf index 745cde88..82f362a8 100644 --- a/solutions/quickstart/main.tf +++ b/solutions/quickstart/main.tf @@ -135,14 +135,14 @@ module "ocp_base" { cluster_name = local.cluster_name resource_group_id = module.resource_group.resource_group_id region = var.region - ocp_version = var.ocp_version + ocp_version = var.openshift_version ocp_entitlement = var.ocp_entitlement vpc_id = module.vpc.vpc_id vpc_subnets = local.cluster_vpc_subnets worker_pools = local.worker_pools - disable_outbound_traffic_protection = var.disable_outbound_traffic_protection + disable_outbound_traffic_protection = var.allow_outbound_traffic access_tags = var.access_tags - disable_public_endpoint = var.disable_public_endpoint + disable_public_endpoint = !var.allow_public_access_to_cluster use_private_endpoint = true cluster_config_endpoint_type = "default" } diff --git a/solutions/quickstart/variables.tf b/solutions/quickstart/variables.tf index 67a27760..23791a57 100644 --- a/solutions/quickstart/variables.tf +++ b/solutions/quickstart/variables.tf @@ -47,7 +47,7 @@ variable "region" { default = "us-south" } -variable "ocp_version" { +variable "openshift_version" { type = string description = "Version of the OpenShift cluster to provision." default = null @@ -91,14 +91,14 @@ variable "size" { default = "mini" } -variable "disable_public_endpoint" { +variable "allow_public_access_to_cluster" { type = bool - description = "Disables the public endpoint, which allows internet access to the cluster, during creation only." - default = false + description = "Set to true to allow public access to master node of the cluster by enabling public endpoint." + default = true } -variable "disable_outbound_traffic_protection" { +variable "allow_outbound_traffic" { type = bool - description = "Whether to allow public outbound access from the cluster workers. This is only applicable for OCP 4.15 and later. [Learn more](https://cloud.ibm.com/docs/openshift?topic=openshift-sbd-allow-outbound)." + description = "Set to true to allow public outbound access from the cluster workers." default = true } diff --git a/solutions/quickstart/version.tf b/solutions/quickstart/version.tf index deba0ac1..f25f3ece 100644 --- a/solutions/quickstart/version.tf +++ b/solutions/quickstart/version.tf @@ -5,7 +5,7 @@ terraform { required_providers { ibm = { source = "IBM-Cloud/ibm" - version = "1.80.3" + version = "1.81.1" } } } diff --git a/tests/pr_test.go b/tests/pr_test.go index 1eb91939..550156d4 100644 --- a/tests/pr_test.go +++ b/tests/pr_test.go @@ -147,7 +147,7 @@ func TestRunFullyConfigurableInSchematics(t *testing.T) { {Name: "ibmcloud_api_key", Value: options.RequiredEnvironmentVars["TF_VAR_ibmcloud_api_key"], DataType: "string", Secure: true}, {Name: "prefix", Value: options.Prefix, DataType: "string"}, {Name: "cluster_name", Value: "cluster", DataType: "string"}, - {Name: "ocp_version", Value: ocpVersion1, DataType: "string"}, + {Name: "openshift_version", Value: ocpVersion1, DataType: "string"}, {Name: "ocp_entitlement", Value: "cloud_pak", DataType: "string"}, {Name: "existing_resource_group_name", Value: terraform.Output(t, existingTerraformOptions, "resource_group_name"), DataType: "string"}, {Name: "existing_cos_instance_crn", Value: terraform.Output(t, existingTerraformOptions, "cos_instance_id"), DataType: "string"}, @@ -183,7 +183,7 @@ func TestRunUpgradeFullyConfigurable(t *testing.T) { {Name: "ibmcloud_api_key", Value: options.RequiredEnvironmentVars["TF_VAR_ibmcloud_api_key"], DataType: "string", Secure: true}, {Name: "prefix", Value: options.Prefix, DataType: "string"}, {Name: "cluster_name", Value: "cluster", DataType: "string"}, - {Name: "ocp_version", Value: ocpVersion1, DataType: "string"}, + {Name: "openshift_version", Value: ocpVersion1, DataType: "string"}, {Name: "existing_resource_group_name", Value: terraform.Output(t, existingTerraformOptions, "resource_group_name"), DataType: "string"}, {Name: "existing_cos_instance_crn", Value: terraform.Output(t, existingTerraformOptions, "cos_instance_id"), DataType: "string"}, {Name: "existing_vpc_crn", Value: terraform.Output(t, existingTerraformOptions, "vpc_crn"), DataType: "string"},