Skip to content
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -285,6 +285,7 @@ Optionally, you need the following permissions to attach Access Management tags
| [null_resource.config_map_status](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [null_resource.confirm_lb_active](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [null_resource.confirm_network_healthy](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [null_resource.ocp_console_management](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [null_resource.reset_api_key](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [ibm_container_addons.existing_addons](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/data-sources/container_addons) | data source |
| [ibm_container_cluster_config.cluster_config](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/data-sources/container_cluster_config) | data source |
Expand Down Expand Up @@ -316,6 +317,7 @@ Optionally, you need the following permissions to attach Access Management tags
| <a name="input_custom_security_group_ids"></a> [custom\_security\_group\_ids](#input\_custom\_security\_group\_ids) | Security groups to add to all worker nodes. This comes in addition to the IBM maintained security group if attach\_ibm\_managed\_security\_group is set to true. If this variable is set, the default VPC security group is NOT assigned to the worker nodes. | `list(string)` | `null` | no |
| <a name="input_disable_outbound_traffic_protection"></a> [disable\_outbound\_traffic\_protection](#input\_disable\_outbound\_traffic\_protection) | Whether to allow public outbound access from the cluster workers. This is only applicable for `ocp_version` 4.15 | `bool` | `false` | no |
| <a name="input_disable_public_endpoint"></a> [disable\_public\_endpoint](#input\_disable\_public\_endpoint) | Whether access to the public service endpoint is disabled when the cluster is created. Does not affect existing clusters. You can't disable a public endpoint on an existing cluster, so you can't convert a public cluster to a private cluster. To change a public endpoint to private, create another cluster with this input set to `true`. | `bool` | `false` | no |
| <a name="input_enable_ocp_console"></a> [enable\_ocp\_console](#input\_enable\_ocp\_console) | Flag to specify whether to enable or disable the OpenShift console. | `bool` | `true` | no |
| <a name="input_enable_registry_storage"></a> [enable\_registry\_storage](#input\_enable\_registry\_storage) | Set to `true` to enable IBM Cloud Object Storage for the Red Hat OpenShift internal image registry. Set to `false` only for new cluster deployments in an account that is allowlisted for this feature. | `bool` | `true` | no |
| <a name="input_existing_cos_id"></a> [existing\_cos\_id](#input\_existing\_cos\_id) | The COS id of an already existing COS instance to use for OpenShift internal registry storage. Only required if 'enable\_registry\_storage' and 'use\_existing\_cos' are true | `string` | `null` | no |
| <a name="input_force_delete_storage"></a> [force\_delete\_storage](#input\_force\_delete\_storage) | Flag indicating whether or not to delete attached storage when destroying the cluster - Default: false | `bool` | `false` | no |
Expand Down
20 changes: 20 additions & 0 deletions main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -456,6 +456,26 @@ resource "null_resource" "confirm_network_healthy" {
}
}

##############################################################################
# OCP Console Patch enablement
##############################################################################
resource "null_resource" "ocp_console_management" {

count = var.verify_worker_network_readiness ? 1 : 0
depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_worker_pool.pool, ibm_container_vpc_worker_pool.autoscaling_pool]
triggers = {
enable_ocp_console = var.enable_ocp_console
}
provisioner "local-exec" {
command = "${path.module}/scripts/ocp_console_patch.sh"
interpreter = ["/bin/bash", "-c"]
environment = {
KUBECONFIG = data.ibm_container_cluster_config.cluster_config[0].config_file_path
ENABLE_OCP_CONSOLE = var.enable_ocp_console
}
}
}

##############################################################################
# Addons
##############################################################################
Expand Down
72 changes: 72 additions & 0 deletions scripts/ocp_console_patch.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
#!/bin/bash

set -euo pipefail


PATCH_APPLY="oc patch consoles.operator.openshift.io cluster --patch '{\"spec\":{\"managementState\":\"Managed\"}}' --type=merge"
PATCH_REMOVE="oc patch consoles.operator.openshift.io cluster --patch '{\"spec\":{\"managementState\":\"Removed\"}}' --type=merge"
MAX_ATTEMPTS=10
RETRY_WAIT=5

function check_oc_cli() {
if ! command -v oc &> /dev/null; then
echo "Error: OpenShift CLI (oc) is not installed. Exiting."
exit 1
fi
}

function apply_oc_patch() {

local attempt=0
while [ $attempt -lt $MAX_ATTEMPTS ]; do
echo "Attempt $((attempt+1)) of $MAX_ATTEMPTS: Applying OpenShift Console patch..."

if eval "$PATCH_APPLY"; then
echo "Patch applied successfully."
return 0
else
echo "Failed to apply patch. Retrying in ${RETRY_WAIT}s..."
sleep $RETRY_WAIT
((attempt++))
RETRY_WAIT=$((RETRY_WAIT * 2))
fi
done

echo "Maximum retry attempts reached. Could not apply patch."
exit 1
}

function remove_oc_patch() {

local attempt=0
while [ $attempt -lt $MAX_ATTEMPTS ]; do
echo "Attempt $((attempt+1)) of $MAX_ATTEMPTS: Removing OpenShift Console patch..."

if eval "$PATCH_REMOVE"; then
echo "Patch removed successfully."
return 0
else
echo "Failed to remove patch. Retrying in ${RETRY_WAIT}s..."
sleep $RETRY_WAIT
((attempt++))
RETRY_WAIT=$((RETRY_WAIT * 2))
fi
done

echo "Maximum retry attempts reached. Could not remove patch."
exit 1
}

echo "========================================="

echo "Starting OpenShift Console patch..."
check_oc_cli

if [ "${ENABLE_OCP_CONSOLE}" == "true" ]; then
apply_oc_patch
else
remove_oc_patch
fi

echo "Operation completed successfully!"
echo "========================================="
6 changes: 6 additions & 0 deletions variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -333,6 +333,12 @@ variable "cluster_config_endpoint_type" {
}
}

variable "enable_ocp_console" {
description = "Flag to specify whether to enable or disable the OpenShift console."
type = bool
default = true
}

##############################################################################

##############################################################
Expand Down