diff --git a/README.md b/README.md
index 604569a7..fc412012 100644
--- a/README.md
+++ b/README.md
@@ -285,6 +285,7 @@ Optionally, you need the following permissions to attach Access Management tags
| [null_resource.config_map_status](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [null_resource.confirm_lb_active](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [null_resource.confirm_network_healthy](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
+| [null_resource.ocp_console_management](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [null_resource.reset_api_key](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [ibm_container_addons.existing_addons](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/data-sources/container_addons) | data source |
| [ibm_container_cluster_config.cluster_config](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/data-sources/container_cluster_config) | data source |
@@ -316,6 +317,7 @@ Optionally, you need the following permissions to attach Access Management tags
| [custom\_security\_group\_ids](#input\_custom\_security\_group\_ids) | Security groups to add to all worker nodes. This comes in addition to the IBM maintained security group if attach\_ibm\_managed\_security\_group is set to true. If this variable is set, the default VPC security group is NOT assigned to the worker nodes. | `list(string)` | `null` | no |
| [disable\_outbound\_traffic\_protection](#input\_disable\_outbound\_traffic\_protection) | Whether to allow public outbound access from the cluster workers. This is only applicable for `ocp_version` 4.15 | `bool` | `false` | no |
| [disable\_public\_endpoint](#input\_disable\_public\_endpoint) | Whether access to the public service endpoint is disabled when the cluster is created. Does not affect existing clusters. You can't disable a public endpoint on an existing cluster, so you can't convert a public cluster to a private cluster. To change a public endpoint to private, create another cluster with this input set to `true`. | `bool` | `false` | no |
+| [enable\_ocp\_console](#input\_enable\_ocp\_console) | Flag to specify whether to enable or disable the OpenShift console. | `bool` | `true` | no |
| [enable\_registry\_storage](#input\_enable\_registry\_storage) | Set to `true` to enable IBM Cloud Object Storage for the Red Hat OpenShift internal image registry. Set to `false` only for new cluster deployments in an account that is allowlisted for this feature. | `bool` | `true` | no |
| [existing\_cos\_id](#input\_existing\_cos\_id) | The COS id of an already existing COS instance to use for OpenShift internal registry storage. Only required if 'enable\_registry\_storage' and 'use\_existing\_cos' are true | `string` | `null` | no |
| [force\_delete\_storage](#input\_force\_delete\_storage) | Flag indicating whether or not to delete attached storage when destroying the cluster - Default: false | `bool` | `false` | no |
diff --git a/examples/fscloud/main.tf b/examples/fscloud/main.tf
index 147bc51f..b5595afc 100644
--- a/examples/fscloud/main.tf
+++ b/examples/fscloud/main.tf
@@ -250,6 +250,7 @@ module "ocp_fscloud" {
additional_lb_security_group_ids = [module.custom_sg["custom-lb-sg"].security_group_id]
use_private_endpoint = true
ocp_entitlement = var.ocp_entitlement
+ enable_ocp_console = false
kms_config = {
instance_id = var.hpcs_instance_guid
crk_id = local.cluster_hpcs_cluster_key_id
diff --git a/main.tf b/main.tf
index 504e2d11..37b170bd 100644
--- a/main.tf
+++ b/main.tf
@@ -456,6 +456,25 @@ resource "null_resource" "confirm_network_healthy" {
}
}
+##############################################################################
+# OCP Console Patch enablement
+##############################################################################
+resource "null_resource" "ocp_console_management" {
+
+ depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool]
+ triggers = {
+ enable_ocp_console = var.enable_ocp_console
+ }
+ provisioner "local-exec" {
+ command = "${path.module}/scripts/enable_disable_ocp_console.sh"
+ interpreter = ["/bin/bash", "-c"]
+ environment = {
+ KUBECONFIG = data.ibm_container_cluster_config.cluster_config[0].config_file_path
+ ENABLE_OCP_CONSOLE = var.enable_ocp_console
+ }
+ }
+}
+
##############################################################################
# Addons
##############################################################################
diff --git a/modules/fscloud/README.md b/modules/fscloud/README.md
index b7b3f5ae..5e2b416e 100644
--- a/modules/fscloud/README.md
+++ b/modules/fscloud/README.md
@@ -123,6 +123,7 @@ No resources.
| [cluster\_name](#input\_cluster\_name) | The name that will be assigned to the provisioned cluster | `string` | n/a | yes |
| [cluster\_ready\_when](#input\_cluster\_ready\_when) | The cluster is ready when one of the following: MasterNodeReady (not recommended), OneWorkerNodeReady, Normal, IngressReady | `string` | `"IngressReady"` | no |
| [custom\_security\_group\_ids](#input\_custom\_security\_group\_ids) | Security groups to add to all worker nodes. This comes in addition to the IBM maintained security group if use\_ibm\_managed\_security\_group is set to true. If this variable is set, the default VPC security group is NOT assigned to the worker nodes. | `list(string)` | `null` | no |
+| [enable\_ocp\_console](#input\_enable\_ocp\_console) | Flag to specify whether to enable or disable the OpenShift console. | `bool` | `true` | no |
| [existing\_cos\_id](#input\_existing\_cos\_id) | The COS id of an already existing COS instance | `string` | n/a | yes |
| [force\_delete\_storage](#input\_force\_delete\_storage) | Flag indicating whether or not to delete attached storage when destroying the cluster - Default: false | `bool` | `false` | no |
| [ignore\_worker\_pool\_size\_changes](#input\_ignore\_worker\_pool\_size\_changes) | Enable if using worker autoscaling. Stops Terraform managing worker count | `bool` | `false` | no |
diff --git a/modules/fscloud/main.tf b/modules/fscloud/main.tf
index db05ef55..5c59acbf 100644
--- a/modules/fscloud/main.tf
+++ b/modules/fscloud/main.tf
@@ -35,4 +35,5 @@ module "fscloud" {
number_of_lbs = var.number_of_lbs
additional_vpe_security_group_ids = var.additional_vpe_security_group_ids
cbr_rules = var.cbr_rules
+ enable_ocp_console = var.enable_ocp_console
}
diff --git a/modules/fscloud/variables.tf b/modules/fscloud/variables.tf
index 929986dc..b617b91c 100644
--- a/modules/fscloud/variables.tf
+++ b/modules/fscloud/variables.tf
@@ -265,3 +265,9 @@ variable "cbr_rules" {
description = "The list of context-based restriction rules to create."
default = []
}
+
+variable "enable_ocp_console" {
+ description = "Flag to specify whether to enable or disable the OpenShift console."
+ type = bool
+ default = true
+}
diff --git a/scripts/enable_disable_ocp_console.sh b/scripts/enable_disable_ocp_console.sh
new file mode 100755
index 00000000..cc315512
--- /dev/null
+++ b/scripts/enable_disable_ocp_console.sh
@@ -0,0 +1,77 @@
+#!/bin/bash
+
+set -euo pipefail
+
+
+PATCH_APPLY="oc patch consoles.operator.openshift.io cluster --patch '{\"spec\":{\"managementState\":\"Managed\"}}' --type=merge"
+PATCH_REMOVE="oc patch consoles.operator.openshift.io cluster --patch '{\"spec\":{\"managementState\":\"Removed\"}}' --type=merge"
+MAX_ATTEMPTS=10
+RETRY_WAIT=5
+
+function check_oc_cli() {
+ if ! command -v oc &> /dev/null; then
+ echo "Error: OpenShift CLI (oc) is not installed. Exiting."
+ exit 1
+ fi
+}
+
+function apply_oc_patch() {
+
+ local attempt=0
+ while [ $attempt -lt $MAX_ATTEMPTS ]; do
+ echo "Attempt $((attempt+1)) of $MAX_ATTEMPTS: Applying OpenShift Console patch..."
+
+ if eval "$PATCH_APPLY"; then
+ echo "Patch applied successfully."
+ return 0
+ else
+ echo "Failed to apply patch. Retrying in ${RETRY_WAIT}s..."
+ sleep $RETRY_WAIT
+ ((attempt++))
+ RETRY_WAIT=$((RETRY_WAIT * 2))
+ fi
+ done
+
+ echo "Maximum retry attempts reached. Could not apply patch."
+ exit 1
+}
+
+function remove_oc_patch() {
+
+ local attempt=0
+ while [ $attempt -lt $MAX_ATTEMPTS ]; do
+ echo "Attempt $((attempt+1)) of $MAX_ATTEMPTS: Removing OpenShift Console patch..."
+
+ if eval "$PATCH_REMOVE"; then
+ echo "Patch removed successfully."
+ return 0
+ else
+ echo "Failed to remove patch. Retrying in ${RETRY_WAIT}s..."
+ sleep $RETRY_WAIT
+ ((attempt++))
+ RETRY_WAIT=$((RETRY_WAIT * 2))
+ fi
+ done
+
+ echo "Maximum retry attempts reached. Could not remove patch."
+ exit 1
+}
+
+echo "========================================="
+
+if [[ -z "${ENABLE_OCP_CONSOLE}" ]]; then
+ echo "ENABLE_OCP_CONSOLE must be set" >&2
+ exit 1
+fi
+
+check_oc_cli
+
+if [ "${ENABLE_OCP_CONSOLE}" == "true" ]; then
+ echo "Enabling the OpenShift Console"
+ apply_oc_patch
+else
+ echo "Disabling the OpenShift Console"
+ remove_oc_patch
+fi
+
+echo "========================================="
diff --git a/variables.tf b/variables.tf
index c09fc81e..dad888b6 100644
--- a/variables.tf
+++ b/variables.tf
@@ -333,6 +333,12 @@ variable "cluster_config_endpoint_type" {
}
}
+variable "enable_ocp_console" {
+ description = "Flag to specify whether to enable or disable the OpenShift console."
+ type = bool
+ default = true
+}
+
##############################################################################
##############################################################