diff --git a/modules/common/kubeblocks-crd/standard/1.0/main.tf b/modules/common/kubeblocks-crd/standard/1.0/main.tf index 010668cd..d153441c 100644 --- a/modules/common/kubeblocks-crd/standard/1.0/main.tf +++ b/modules/common/kubeblocks-crd/standard/1.0/main.tf @@ -13,16 +13,24 @@ data "http" "kubeblocks_crds" { # Split the multi-document YAML into individual CRDs locals { crds_yaml = data.http.kubeblocks_crds.response_body - # Split by document separator and filter out empty documents - crd_documents = [for doc in split("\n---\n", local.crds_yaml) : trimspace(doc) if trimspace(doc) != ""] - crds_count = length(local.crd_documents) + + crd_documents = [for doc in split("\n---\n", local.crds_yaml) : yamldecode(doc) if trimspace(doc) != ""] + + # Key by CRD metadata.name (stable & unique) + crds = { + for crd in local.crd_documents : + crd.metadata.name => crd + } + + crds_count = length(local.crds) } # Apply each CRD using kubernetes_manifest resource "kubernetes_manifest" "kubeblocks_crds" { - for_each = { for idx, doc in local.crd_documents : idx => doc } + for_each = local.crds - manifest = sensitive(yamldecode(each.value)) + # Mark manifest as sensitive to hide CRD content from plan output + manifest = sensitive(each.value) field_manager { name = "terraform" @@ -42,11 +50,16 @@ resource "kubernetes_manifest" "kubeblocks_crds" { "metadata.finalizers", "metadata.generation", "metadata.resourceVersion", + "metadata.uid", "status" ] lifecycle { - prevent_destroy = false # Explicitly allow destruction (this is default) + prevent_destroy = false + ignore_changes = [ + manifest, + object + ] } timeouts { @@ -55,6 +68,15 @@ resource "kubernetes_manifest" "kubeblocks_crds" { } } +# Time sleep resource to ensure proper cleanup during destroy +# This gives extra time for any remaining custom resources to be deleted before CRDs are removed +resource "time_sleep" "wait_for_cleanup" { + destroy_duration = "120s" + + + # Provides a 120s buffer at the start of CRD module destruction +} + # Generate a unique release_id for dependency tracking # This will be used by kubeblocks-operator module to ensure CRDs are installed first resource "random_uuid" "release_id" { diff --git a/modules/common/kubeblocks-operator/standard/1.0/main.tf b/modules/common/kubeblocks-operator/standard/1.0/main.tf index fc77fc40..1de4cdf7 100644 --- a/modules/common/kubeblocks-operator/standard/1.0/main.tf +++ b/modules/common/kubeblocks-operator/standard/1.0/main.tf @@ -102,10 +102,21 @@ resource "helm_release" "kubeblocks" { } resource "time_sleep" "wait_for_kubeblocks" { - create_duration = "120s" # Wait 2 minutes + create_duration = "120s" depends_on = [helm_release.kubeblocks] } +# Time sleep resource to ensure proper cleanup during destroy +# This gives custom resources time to be deleted before operator is removed +resource "time_sleep" "wait_for_cleanup" { + # Sleep BEFORE destroying the operator to allow custom resources to clean up + destroy_duration = "120s" + + depends_on = [ + helm_release.database_addons + ] +} + # Database Addons Installation # Install database addons as Terraform-managed Helm releases # This ensures proper lifecycle management and clean teardown @@ -170,6 +181,18 @@ resource "helm_release" "database_addons" { atomic = true # Rollback on failure cleanup_on_fail = true # Remove failed resources to allow retries + # CRITICAL: Disable resource retention policy to allow clean deletion + # This removes 'helm.sh/resource-policy: keep' annotation from ComponentDefinitions, ConfigMaps, etc. + # Without this, resources are kept after Helm uninstall, blocking CRD deletion + # Reference: https://kubeblocks.io/docs/preview/user_docs/references/install-addons + values = [ + yamlencode({ + extra = { + keepResource = false + } + }) + ] + # Ensure operator is fully deployed before installing addons depends_on = [ helm_release.kubeblocks,