diff --git a/README.md b/README.md index 91038a8a2..7cd4f0d28 100644 --- a/README.md +++ b/README.md @@ -15,11 +15,12 @@ Optionally, the module supports advanced security group management for the worke ### Before you begin -- Ensure that you have an up-to-date version of the [IBM Cloud CLI](https://cloud.ibm.com/docs/cli?topic=cli-getting-started). -- Ensure that you have an up-to-date version of the [IBM Cloud Kubernetes service CLI](https://cloud.ibm.com/docs/containers?topic=containers-kubernetes-service-cli). -- Ensure that you have an up-to-date version of the [IBM Cloud VPC Infrastructure service CLI](https://cloud.ibm.com/docs/vpc?topic=vpc-vpc-reference). Only required if providing additional security groups with the `var.additional_lb_security_group_ids`. -- Ensure that you have an up-to-date version of the [jq](https://jqlang.github.io/jq). -- Ensure that you have an up-to-date version of the [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl). +- Ensure that you have an up-to-date version of [curl](https://curl.se/docs/manpage.html). +- Ensure that you have an up-to-date version of [tar](https://www.gnu.org/software/tar/). +- [OPTIONAL] Ensure that you have an up-to-date version of the [jq](https://jqlang.github.io/jq). +- [OPTIONAL] Ensure that you have an up-to-date version of the [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl). + +By default, the module automatically downloads the required dependencies if they are not already installed. You can disable this behavior by setting `install_required_binaries` to `false`. When enabled, the module fetches dependencies from official online binaries (requires public internet). @@ -323,6 +324,7 @@ Optionally, you need the following permissions to attach Access Management tags | [kubernetes_config_map_v1_data.set_autoscaling](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map_v1_data) | resource | | [null_resource.config_map_status](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | | [null_resource.confirm_network_healthy](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.install_required_binaries](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | | [null_resource.ocp_console_management](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | | [time_sleep.wait_for_auth_policy](https://registry.terraform.io/providers/hashicorp/time/latest/docs/resources/sleep) | resource | | [ibm_container_addons.existing_addons](https://registry.terraform.io/providers/ibm-cloud/ibm/latest/docs/data-sources/container_addons) | data source | @@ -359,6 +361,7 @@ Optionally, you need the following permissions to attach Access Management tags | [existing\_secrets\_manager\_instance\_crn](#input\_existing\_secrets\_manager\_instance\_crn) | CRN of the Secrets Manager instance where Ingress certificate secrets are stored. If 'enable\_secrets\_manager\_integration' is set to true then this value is required. | `string` | `null` | no | | [force\_delete\_storage](#input\_force\_delete\_storage) | Flag indicating whether or not to delete attached storage when destroying the cluster - Default: false | `bool` | `false` | no | | [ignore\_worker\_pool\_size\_changes](#input\_ignore\_worker\_pool\_size\_changes) | Enable if using worker autoscaling. Stops Terraform managing worker count | `bool` | `false` | no | +| [install\_required\_binaries](#input\_install\_required\_binaries) | When set to true, a script will run to check if `kubectl` and `jq` exist on the runtime and if not attempt to download them from the public internet and install them to /tmp. Set to false to skip running this script. | `bool` | `true` | no | | [kms\_config](#input\_kms\_config) | Use to attach a KMS instance to the cluster. If account\_id is not provided, defaults to the account in use. |
object({
crk_id = string
instance_id = string
private_endpoint = optional(bool, true) # defaults to true
account_id = optional(string) # To attach KMS instance from another account
wait_for_apply = optional(bool, true) # defaults to true so terraform will wait until the KMS is applied to the master, ready and deployed
}) | `null` | no |
| [manage\_all\_addons](#input\_manage\_all\_addons) | Instructs Terraform to manage all cluster addons, even if addons were installed outside of the module. If set to 'true' this module destroys any addons that were installed by other sources. | `bool` | `false` | no |
| [number\_of\_lbs](#input\_number\_of\_lbs) | The number of LBs to associated the `additional_lb_security_group_names` security group with. | `number` | `1` | no |
diff --git a/main.tf b/main.tf
index 191e391eb..7d8ee7a7d 100644
--- a/main.tf
+++ b/main.tf
@@ -49,6 +49,8 @@ locals {
# for versions older than 4.15, this value must be null, or provider gives error
disable_outbound_traffic_protection = startswith(local.ocp_version, "4.14") ? null : var.disable_outbound_traffic_protection
+
+ binaries_path = "/tmp"
}
# Local block to verify validations for OCP AI Addon.
@@ -101,6 +103,20 @@ locals {
default_wp_validation = local.rhcos_check ? true : tobool("If RHCOS is used with this cluster, the default worker pool should be created with RHCOS.")
}
+resource "null_resource" "install_required_binaries" {
+ count = var.install_required_binaries && (var.verify_worker_network_readiness || var.enable_ocp_console != null || lookup(var.addons, "cluster-autoscaler", null) != null) ? 1 : 0
+ triggers = {
+ verify_worker_network_readiness = var.verify_worker_network_readiness
+ cluster_autoscaler = lookup(var.addons, "cluster-autoscaler", null) != null
+ enable_ocp_console = var.enable_ocp_console
+ }
+ provisioner "local-exec" {
+ # Using the script from the kube-audit module to avoid code duplication.
+ command = "${path.module}/modules/kube-audit/scripts/install-binaries.sh ${local.binaries_path}"
+ interpreter = ["/bin/bash", "-c"]
+ }
+}
+
# Lookup the current default kube version
data "ibm_container_cluster_versions" "cluster_versions" {
resource_group_id = var.resource_group_id
@@ -478,10 +494,14 @@ resource "null_resource" "confirm_network_healthy" {
# Worker pool creation can start before the 'ibm_container_vpc_cluster' completes since there is no explicit
# depends_on in 'ibm_container_vpc_worker_pool', just an implicit depends_on on the cluster ID. Cluster ID can exist before
# 'ibm_container_vpc_cluster' completes, so hence need to add explicit depends on against 'ibm_container_vpc_cluster' here.
- depends_on = [ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, module.worker_pools]
+ depends_on = [null_resource.install_required_binaries, ibm_container_vpc_cluster.cluster, ibm_container_vpc_cluster.cluster_with_upgrade, ibm_container_vpc_cluster.autoscaling_cluster, ibm_container_vpc_cluster.autoscaling_cluster_with_upgrade, module.worker_pools]
+
+ triggers = {
+ verify_worker_network_readiness = var.verify_worker_network_readiness
+ }
provisioner "local-exec" {
- command = "${path.module}/scripts/confirm_network_healthy.sh"
+ command = "${path.module}/scripts/confirm_network_healthy.sh ${local.binaries_path}"
interpreter = ["/bin/bash", "-c"]
environment = {
KUBECONFIG = data.ibm_container_cluster_config.cluster_config[0].config_file_path
@@ -494,9 +514,12 @@ resource "null_resource" "confirm_network_healthy" {
##############################################################################
resource "null_resource" "ocp_console_management" {
count = var.enable_ocp_console != null ? 1 : 0
- depends_on = [null_resource.confirm_network_healthy]
+ depends_on = [null_resource.install_required_binaries, null_resource.confirm_network_healthy]
+ triggers = {
+ enable_ocp_console = var.enable_ocp_console
+ }
provisioner "local-exec" {
- command = "${path.module}/scripts/enable_disable_ocp_console.sh"
+ command = "${path.module}/scripts/enable_disable_ocp_console.sh ${local.binaries_path}"
interpreter = ["/bin/bash", "-c"]
environment = {
KUBECONFIG = data.ibm_container_cluster_config.cluster_config[0].config_file_path
@@ -568,10 +591,13 @@ locals {
resource "null_resource" "config_map_status" {
count = lookup(var.addons, "cluster-autoscaler", null) != null ? 1 : 0
- depends_on = [ibm_container_addons.addons]
+ depends_on = [null_resource.install_required_binaries, ibm_container_addons.addons]
+ triggers = {
+ cluster_autoscaler = lookup(var.addons, "cluster-autoscaler", null) != null
+ }
provisioner "local-exec" {
- command = "${path.module}/scripts/get_config_map_status.sh"
+ command = "${path.module}/scripts/get_config_map_status.sh ${local.binaries_path}"
interpreter = ["/bin/bash", "-c"]
environment = {
KUBECONFIG = data.ibm_container_cluster_config.cluster_config[0].config_file_path
@@ -759,7 +785,6 @@ resource "time_sleep" "wait_for_auth_policy" {
create_duration = "30s"
}
-
resource "ibm_container_ingress_instance" "instance" {
count = var.enable_secrets_manager_integration ? 1 : 0
depends_on = [time_sleep.wait_for_auth_policy]
diff --git a/modules/kube-audit/README.md b/modules/kube-audit/README.md
index aa48993f4..462f06060 100644
--- a/modules/kube-audit/README.md
+++ b/modules/kube-audit/README.md
@@ -70,6 +70,7 @@ No modules.
| Name | Type |
|------|------|
| [helm_release.kube_audit](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
+| [null_resource.install_required_binaries](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [null_resource.set_audit_log_policy](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [null_resource.set_audit_webhook](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [time_sleep.wait_for_kube_audit](https://registry.terraform.io/providers/hashicorp/time/latest/docs/resources/sleep) | resource |
@@ -89,6 +90,7 @@ No modules.
| [cluster\_id](#input\_cluster\_id) | The ID of the cluster to deploy the log collection service in. | `string` | n/a | yes |
| [cluster\_resource\_group\_id](#input\_cluster\_resource\_group\_id) | The resource group ID of the cluster. | `string` | n/a | yes |
| [ibmcloud\_api\_key](#input\_ibmcloud\_api\_key) | The IBM Cloud api key to generate an IAM token. | `string` | n/a | yes |
+| [install\_required\_binaries](#input\_install\_required\_binaries) | When set to true, a script will run to check if `kubectl` and `jq` exist on the runtime and if not attempt to download them from the public internet and install them to /tmp. Set to false to skip running this script. | `bool` | `true` | no |
| [region](#input\_region) | The IBM Cloud region where the cluster is provisioned. | `string` | n/a | yes |
| [use\_private\_endpoint](#input\_use\_private\_endpoint) | Set this to true to force all api calls to use the IBM Cloud private endpoints. | `bool` | `false` | no |
| [wait\_till](#input\_wait\_till) | To avoid long wait times when you run your Terraform code, you can specify the stage when you want Terraform to mark the cluster resource creation as completed. Depending on what stage you choose, the cluster creation might not be fully completed and continues to run in the background. However, your Terraform code can continue to run without waiting for the cluster to be fully created. Supported args are `MasterNodeReady`, `OneWorkerNodeReady`, `IngressReady` and `Normal` | `string` | `"IngressReady"` | no |
diff --git a/modules/kube-audit/main.tf b/modules/kube-audit/main.tf
index a26587a38..f042e7f86 100644
--- a/modules/kube-audit/main.tf
+++ b/modules/kube-audit/main.tf
@@ -1,3 +1,22 @@
+locals {
+ binaries_path = "/tmp"
+}
+
+resource "null_resource" "install_required_binaries" {
+ count = var.install_required_binaries ? 1 : 0
+ triggers = {
+ audit_log_policy = var.audit_log_policy
+ audit_deployment_name = var.audit_deployment_name
+ audit_namespace = var.audit_namespace
+ audit_webhook_listener_image = var.audit_webhook_listener_image
+ audit_webhook_listener_image_tag_digest = var.audit_webhook_listener_image_tag_digest
+ }
+ provisioner "local-exec" {
+ command = "${path.module}/scripts/install-binaries.sh ${local.binaries_path}"
+ interpreter = ["/bin/bash", "-c"]
+ }
+}
+
data "ibm_container_cluster_config" "cluster_config" {
cluster_name_id = var.cluster_id
config_dir = "${path.module}/kubeconfig"
@@ -19,11 +38,12 @@ locals {
}
resource "null_resource" "set_audit_log_policy" {
+ depends_on = [null_resource.install_required_binaries]
triggers = {
audit_log_policy = var.audit_log_policy
}
provisioner "local-exec" {
- command = "${path.module}/scripts/set_audit_log_policy.sh ${var.audit_log_policy}"
+ command = "${path.module}/scripts/set_audit_log_policy.sh ${var.audit_log_policy} ${local.binaries_path}"
interpreter = ["/bin/bash", "-c"]
environment = {
KUBECONFIG = data.ibm_container_cluster_config.cluster_config.config_file_path
@@ -40,7 +60,7 @@ locals {
}
resource "helm_release" "kube_audit" {
- depends_on = [null_resource.set_audit_log_policy, data.ibm_container_vpc_cluster.cluster]
+ depends_on = [null_resource.install_required_binaries, null_resource.set_audit_log_policy, data.ibm_container_vpc_cluster.cluster]
name = var.audit_deployment_name
chart = local.kube_audit_chart_location
timeout = 1200
@@ -72,7 +92,7 @@ resource "helm_release" "kube_audit" {
]
provisioner "local-exec" {
- command = "${path.module}/scripts/confirm-rollout-status.sh ${var.audit_deployment_name} ${var.audit_namespace}"
+ command = "${path.module}/scripts/confirm-rollout-status.sh ${var.audit_deployment_name} ${var.audit_namespace} ${local.binaries_path}"
interpreter = ["/bin/bash", "-c"]
environment = {
KUBECONFIG = data.ibm_container_cluster_config.cluster_config.config_file_path
@@ -96,12 +116,12 @@ locals {
# }
resource "null_resource" "set_audit_webhook" {
- depends_on = [time_sleep.wait_for_kube_audit]
+ depends_on = [null_resource.install_required_binaries, time_sleep.wait_for_kube_audit]
triggers = {
audit_log_policy = var.audit_log_policy
}
provisioner "local-exec" {
- command = "${path.module}/scripts/set_webhook.sh ${var.region} ${var.use_private_endpoint} ${var.cluster_config_endpoint_type} ${var.cluster_id} ${var.cluster_resource_group_id} ${var.audit_log_policy != "default" ? "verbose" : "default"}"
+ command = "${path.module}/scripts/set_webhook.sh ${var.region} ${var.use_private_endpoint} ${var.cluster_config_endpoint_type} ${var.cluster_id} ${var.cluster_resource_group_id} ${var.audit_log_policy != "default" ? "verbose" : "default"} ${local.binaries_path}"
interpreter = ["/bin/bash", "-c"]
environment = {
IAM_API_KEY = var.ibmcloud_api_key
diff --git a/modules/kube-audit/scripts/confirm-rollout-status.sh b/modules/kube-audit/scripts/confirm-rollout-status.sh
index 9cd407dc5..8308d2986 100755
--- a/modules/kube-audit/scripts/confirm-rollout-status.sh
+++ b/modules/kube-audit/scripts/confirm-rollout-status.sh
@@ -5,4 +5,7 @@ set -e
deployment=$1
namespace=$2
+# The binaries downloaded by the install-binaries script are located in the /tmp directory.
+export PATH=$PATH:${3:-"/tmp"}
+
kubectl rollout status deploy "${deployment}" -n "${namespace}" --timeout 30m
diff --git a/modules/kube-audit/scripts/install-binaries.sh b/modules/kube-audit/scripts/install-binaries.sh
new file mode 100755
index 000000000..bb224a090
--- /dev/null
+++ b/modules/kube-audit/scripts/install-binaries.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+# This script is stored in the kube-audit module because modules cannot access
+# scripts placed in the root module when they are invoked individually.
+# Placing it here also avoids duplicating the install-binaries script across modules.
+
+set -o errexit
+set -o pipefail
+
+DIRECTORY=${1:-"/tmp"}
+# renovate: datasource=github-tags depName=terraform-ibm-modules/common-bash-library
+TAG=v0.2.0
+
+echo "Downloading common-bash-library version ${TAG}."
+
+# download common-bash-library
+curl --silent \
+ --connect-timeout 5 \
+ --max-time 10 \
+ --retry 3 \
+ --retry-delay 2 \
+ --retry-connrefused \
+ --fail \
+ --show-error \
+ --location \
+ --output "${DIRECTORY}/common-bash.tar.gz" \
+ "https://github.com/terraform-ibm-modules/common-bash-library/archive/refs/tags/$TAG.tar.gz"
+
+mkdir -p "${DIRECTORY}/common-bash-library"
+tar -xzf "${DIRECTORY}/common-bash.tar.gz" --strip-components=1 -C "${DIRECTORY}/common-bash-library"
+rm -f "${DIRECTORY}/common-bash.tar.gz"
+
+# The file doesn’t exist at the time shellcheck runs, so this check is skipped.
+# shellcheck disable=SC1091
+source "${DIRECTORY}/common-bash-library/common/common.sh"
+
+echo "Installing jq."
+install_jq "latest" "${DIRECTORY}" "true"
+echo "Installing kubectl."
+install_kubectl "latest" "${DIRECTORY}" "true"
+
+rm -rf "${DIRECTORY}/common-bash-library"
+
+echo "Installation complete successfully"
diff --git a/modules/kube-audit/scripts/set_audit_log_policy.sh b/modules/kube-audit/scripts/set_audit_log_policy.sh
index c82d77b34..e7d2262c1 100755
--- a/modules/kube-audit/scripts/set_audit_log_policy.sh
+++ b/modules/kube-audit/scripts/set_audit_log_policy.sh
@@ -3,19 +3,21 @@
set -euo pipefail
AUDIT_POLICY="$1"
+# The binaries downloaded by the install-binaries script are located in the /tmp directory.
+export PATH=$PATH:${2:-"/tmp"}
-STORAGE_PROFILE="oc patch apiserver cluster --type='merge' -p '{\"spec\":{\"audit\":{\"profile\":\"$AUDIT_POLICY\"}}}'"
+STORAGE_PROFILE="kubectl patch apiserver cluster --type='merge' -p '{\"spec\":{\"audit\":{\"profile\":\"$AUDIT_POLICY\"}}}'"
MAX_ATTEMPTS=10
RETRY_WAIT=5
-function check_oc_cli() {
- if ! command -v oc &>/dev/null; then
- echo "Error: OpenShift CLI (oc) is not installed. Exiting."
+function check_kubectl_cli() {
+ if ! command -v kubectl &>/dev/null; then
+ echo "Error: kubectl is not installed. Exiting."
exit 1
fi
}
-function apply_oc_patch() {
+function apply_kubectl_patch() {
local attempt=0
while [ $attempt -lt $MAX_ATTEMPTS ]; do
@@ -38,7 +40,7 @@ function apply_oc_patch() {
echo "========================================="
-check_oc_cli
-apply_oc_patch
+check_kubectl_cli
+apply_kubectl_patch
sleep 30
echo "========================================="
diff --git a/modules/kube-audit/scripts/set_webhook.sh b/modules/kube-audit/scripts/set_webhook.sh
index 483567aec..6b0c6d196 100755
--- a/modules/kube-audit/scripts/set_webhook.sh
+++ b/modules/kube-audit/scripts/set_webhook.sh
@@ -9,10 +9,12 @@ CLUSTER_ID="$4"
RESOURCE_GROUP_ID="$5"
POLICY="$6"
+# The binaries downloaded by the install-binaries script are located in the /tmp directory.
+export PATH=$PATH:${7:-"/tmp"}
+
get_cloud_endpoint() {
iam_cloud_endpoint="${IBMCLOUD_IAM_API_ENDPOINT:-"iam.cloud.ibm.com"}"
IBMCLOUD_IAM_API_ENDPOINT=${iam_cloud_endpoint#https://}
-
cs_api_endpoint="${IBMCLOUD_CS_API_ENDPOINT:-"containers.cloud.ibm.com"}"
cs_api_endpoint=${cs_api_endpoint#https://}
IBMCLOUD_CS_API_ENDPOINT=${cs_api_endpoint%/global}
diff --git a/modules/kube-audit/variables.tf b/modules/kube-audit/variables.tf
index d8f713004..de06dd205 100644
--- a/modules/kube-audit/variables.tf
+++ b/modules/kube-audit/variables.tf
@@ -102,3 +102,10 @@ variable "audit_webhook_listener_image_tag_digest" {
error_message = "The value of the audit webhook listener image version must match the tag and sha256 image digest format"
}
}
+
+variable "install_required_binaries" {
+ type = bool
+ default = true
+ description = "When set to true, a script will run to check if `kubectl` and `jq` exist on the runtime and if not attempt to download them from the public internet and install them to /tmp. Set to false to skip running this script."
+ nullable = false
+}
diff --git a/scripts/confirm_lb_active.sh b/scripts/confirm_lb_active.sh
deleted file mode 100755
index 2fd4ae6b1..000000000
--- a/scripts/confirm_lb_active.sh
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/bin/bash
-
-set -euo pipefail
-
-REGION="$1"
-LB_ID="$2"
-PRIVATE_ENV="$3"
-API_VERSION="2024-03-01"
-
-if [[ -z "${REGION}" ]]; then
- echo "Region must be passed as first input script argument" >&2
- exit 1
-fi
-
-get_cloud_endpoint() {
- cloud_endpoint="${IBMCLOUD_IS_NG_API_ENDPOINT:-"iaas.cloud.ibm.com"}"
- IBMCLOUD_IS_NG_API_ENDPOINT=${cloud_endpoint#https://}
-}
-
-get_cloud_endpoint
-
-lb_attempts=1
-if [ "$IBMCLOUD_IS_NG_API_ENDPOINT" = "iaas.cloud.ibm.com" ]; then
- if [ "$PRIVATE_ENV" = true ]; then
- URL="https://$REGION.private.$IBMCLOUD_IS_NG_API_ENDPOINT/v1/load_balancers/$LB_ID?version=$API_VERSION&generation=2"
- else
- URL="https://$REGION.$IBMCLOUD_IS_NG_API_ENDPOINT/v1/load_balancers/$LB_ID?version=$API_VERSION&generation=2"
- fi
-else
- URL="https://$IBMCLOUD_IS_NG_API_ENDPOINT/v1/load_balancers/$LB_ID?version=$API_VERSION&generation=2"
-fi
-
-while true; do
- STATUS=$(curl -H "Authorization: $IAM_TOKEN" -X GET "$URL" | jq -r '.operating_status')
- echo "Load balancer status: $STATUS"
- if [[ "$STATUS" == "online" ]]; then
- sleep 300
- STATUS=$(curl -H "Authorization: $IAM_TOKEN" -X GET "$URL" | jq -r '.operating_status')
- if [[ "$STATUS" == "online" ]]; then
- break
- fi
- else
- lb_attempts=$((lb_attempts + 1))
- if [ $lb_attempts -ge 10 ]; then
- echo "Load balancer status: $STATUS"
- break
- fi
- echo "Sleeping for 30 secs.."
- sleep 30
- fi
- STATUS=""
-done
diff --git a/scripts/confirm_network_healthy.sh b/scripts/confirm_network_healthy.sh
index 35c4bdfd4..ebdd46f53 100755
--- a/scripts/confirm_network_healthy.sh
+++ b/scripts/confirm_network_healthy.sh
@@ -2,6 +2,9 @@
set -e
+# The binaries downloaded by the install-binaries script are located in the /tmp directory.
+export PATH=$PATH:${1:-"/tmp"}
+
function run_checks() {
last_attempt=$1
diff --git a/scripts/enable_disable_ocp_console.sh b/scripts/enable_disable_ocp_console.sh
index c35a85e4a..c61a8f657 100755
--- a/scripts/enable_disable_ocp_console.sh
+++ b/scripts/enable_disable_ocp_console.sh
@@ -2,19 +2,21 @@
set -euo pipefail
+# The binaries downloaded by the install-binaries script are located in the /tmp directory.
+export PATH=$PATH:${1:-"/tmp"}
-PATCH_APPLY="oc patch consoles.operator.openshift.io cluster --patch '{\"spec\":{\"managementState\":\"Managed\"}}' --type=merge"
-PATCH_REMOVE="oc patch consoles.operator.openshift.io cluster --patch '{\"spec\":{\"managementState\":\"Removed\"}}' --type=merge"
+PATCH_APPLY="kubectl patch consoles.operator.openshift.io cluster --patch '{\"spec\":{\"managementState\":\"Managed\"}}' --type=merge"
+PATCH_REMOVE="kubectl patch consoles.operator.openshift.io cluster --patch '{\"spec\":{\"managementState\":\"Removed\"}}' --type=merge"
MAX_ATTEMPTS=10
-function check_oc_cli() {
- if ! command -v oc &> /dev/null; then
- echo "Error: OpenShift CLI (oc) is not installed. Exiting."
+function check_kubectl_cli() {
+ if ! command -v kubectl &> /dev/null; then
+ echo "Error: kubectl is not installed. Exiting."
exit 1
fi
}
-function apply_oc_patch() {
+function apply_kubectl_patch() {
local attempt=0
local retry_wait_time=5
@@ -36,7 +38,7 @@ function apply_oc_patch() {
exit 1
}
-function remove_oc_patch() {
+function remove_kubectl_patch() {
local attempt=0
local retry_wait_time=5
@@ -65,14 +67,14 @@ if [[ -z "${ENABLE_OCP_CONSOLE:-}" ]]; then
exit 1
fi
-check_oc_cli
+check_kubectl_cli
if [ "${ENABLE_OCP_CONSOLE}" == "true" ]; then
echo "Enabling the OpenShift Console"
- apply_oc_patch
+ apply_kubectl_patch
else
echo "Disabling the OpenShift Console"
- remove_oc_patch
+ remove_kubectl_patch
fi
echo "========================================="
diff --git a/scripts/get_config_map_status.sh b/scripts/get_config_map_status.sh
index 4f9362beb..8e890c76b 100755
--- a/scripts/get_config_map_status.sh
+++ b/scripts/get_config_map_status.sh
@@ -2,6 +2,9 @@
set -e
+# The binaries downloaded by the install-binaries script are located in the /tmp directory.
+export PATH=$PATH:${1:-"/tmp"}
+
CONFIGMAP_NAME="iks-ca-configmap"
NAMESPACE="kube-system"
COUNTER=0
diff --git a/tests/pr_test.go b/tests/pr_test.go
index 9b58f2a3f..bc4c7ea71 100644
--- a/tests/pr_test.go
+++ b/tests/pr_test.go
@@ -102,6 +102,7 @@ func setupQuickstartOptions(t *testing.T, prefix string) *testschematic.TestSche
"*.tf",
quickStartTerraformDir + "/*.tf", "scripts/*.sh", "kubeconfig/README.md",
"modules/worker-pool/*.tf",
+ "modules/kube-audit/scripts/*.sh",
},
TemplateFolder: quickStartTerraformDir,
Tags: []string{"test-schematic"},
diff --git a/variables.tf b/variables.tf
index 3d300c264..6aaea3c68 100644
--- a/variables.tf
+++ b/variables.tf
@@ -472,3 +472,10 @@ variable "skip_ocp_secrets_manager_iam_auth_policy" {
description = "To skip creating auth policy that allows OCP cluster 'Manager' role access in the existing Secrets Manager instance for managing ingress certificates."
default = false
}
+
+variable "install_required_binaries" {
+ type = bool
+ default = true
+ description = "When set to true, a script will run to check if `kubectl` and `jq` exist on the runtime and if not attempt to download them from the public internet and install them to /tmp. Set to false to skip running this script."
+ nullable = false
+}