From be21a5d6af803026361650e33cd05ff44d76afde Mon Sep 17 00:00:00 2001 From: Natalia Marukovich Date: Sat, 11 Feb 2023 13:32:11 +0400 Subject: [PATCH 1/3] add new tags --- cloud/jenkins/psmdb_operator_eks_latest.groovy | 1 + cloud/jenkins/psmdb_operator_eks_version.groovy | 1 + cloud/jenkins/pxc_operator_eks_latest.groovy | 1 + cloud/jenkins/pxc_operator_eks_version.groovy | 1 + 4 files changed, 4 insertions(+) diff --git a/cloud/jenkins/psmdb_operator_eks_latest.groovy b/cloud/jenkins/psmdb_operator_eks_latest.groovy index aa583af752..3a819fef28 100644 --- a/cloud/jenkins/psmdb_operator_eks_latest.groovy +++ b/cloud/jenkins/psmdb_operator_eks_latest.groovy @@ -41,6 +41,7 @@ nodeGroups: tags: 'iit-billing-tag': 'jenkins-eks' 'delete-cluster-after-hours': '10' + 'team': 'cloud' EOF """ diff --git a/cloud/jenkins/psmdb_operator_eks_version.groovy b/cloud/jenkins/psmdb_operator_eks_version.groovy index 0dc6637a23..7dd1975b61 100644 --- a/cloud/jenkins/psmdb_operator_eks_version.groovy +++ b/cloud/jenkins/psmdb_operator_eks_version.groovy @@ -41,6 +41,7 @@ nodeGroups: tags: 'iit-billing-tag': 'jenkins-eks' 'delete-cluster-after-hours': '10' + 'team': 'cloud' EOF """ diff --git a/cloud/jenkins/pxc_operator_eks_latest.groovy b/cloud/jenkins/pxc_operator_eks_latest.groovy index 292acabbf1..17027098d4 100644 --- a/cloud/jenkins/pxc_operator_eks_latest.groovy +++ b/cloud/jenkins/pxc_operator_eks_latest.groovy @@ -41,6 +41,7 @@ nodeGroups: tags: 'iit-billing-tag': 'jenkins-eks' 'delete-cluster-after-hours': '10' + 'team': 'cloud' EOF """ diff --git a/cloud/jenkins/pxc_operator_eks_version.groovy b/cloud/jenkins/pxc_operator_eks_version.groovy index ca96f78e5d..4b1675674c 100644 --- a/cloud/jenkins/pxc_operator_eks_version.groovy +++ b/cloud/jenkins/pxc_operator_eks_version.groovy @@ -41,6 +41,7 @@ nodeGroups: tags: 'iit-billing-tag': 'jenkins-eks' 'delete-cluster-after-hours': '10' + 'team': 'cloud' EOF """ From 236c0e0d75bb5a596d6b7c6f9cfbec2d025fb78e Mon Sep 17 00:00:00 2001 From: Natalia Marukovich Date: Tue, 28 Oct 2025 13:29:55 +0100 Subject: [PATCH 2/3] fix fw rules deletion --- cloud/gcp-functions/cmd/orphanedResources.go | 136 +++++++++++++++---- 1 file changed, 112 insertions(+), 24 deletions(-) diff --git a/cloud/gcp-functions/cmd/orphanedResources.go b/cloud/gcp-functions/cmd/orphanedResources.go index 5e537382fd..1f4377c39c 100644 --- a/cloud/gcp-functions/cmd/orphanedResources.go +++ b/cloud/gcp-functions/cmd/orphanedResources.go @@ -11,69 +11,157 @@ import ( "google.golang.org/api/compute/v1" ) -func CleanOrphanedResources(w http.ResponseWriter, r *http.Request) { +// firewallNamePatterns are substrings we consider relevant for deletion candidates. +// You can adjust these patterns to your naming conventions. +var firewallNamePatterns = []string{"gke-", "-pxc-", "-psmdb-", "-ps-", "-pg-", "-jen-", "k8s-"} + +func matchesFirewallPattern(name string) bool { + for _, p := range firewallNamePatterns { + if strings.Contains(name, p) { + return true + } + } + return false +} +func CleanOrphanedResources(w http.ResponseWriter, r *http.Request) { ctx := context.Background() computeService, err := compute.NewService(ctx) if err != nil { - log.Fatal("Error: create service", err) + log.Fatalf("Error creating compute service: %v", err) } project := os.Getenv("GCP_DEV_PROJECT") - targetPoolAggregatedList, err := computeService.TargetPools.AggregatedList(project).Do() + if project == "" { + log.Fatalf("GCP_DEV_PROJECT is not set") + } + + firewallsList, err := computeService.Firewalls.List(project).Context(ctx).Do() + if err != nil { + log.Fatalf("Error listing firewalls: %v", err) + } + + firewallByName := make(map[string]*compute.Firewall) + for _, fw := range firewallsList.Items { + firewallByName[fw.Name] = fw + } + // Get aggregated target pools + targetPoolAggregatedList, err := computeService.TargetPools.AggregatedList(project).Context(ctx).Do() if err != nil { - log.Fatal("Error: get targetPoolAggregatedList", err) + log.Fatalf("Error getting target pool aggregated list: %v", err) } - toDelete := false for _, targetPoolList := range targetPoolAggregatedList.Items { for _, targetPoolItem := range targetPoolList.TargetPools { + // Reset toDelete per target pool + toDelete := false + region := strings.Split(targetPoolItem.Region, "/")[8] + // If pool has instances, check their existence. If instance missing -> mark toDelete. if len(targetPoolItem.Instances) > 0 { instanceName := strings.Split(targetPoolItem.Instances[0], "/")[10] zone := strings.Split(targetPoolItem.Instances[0], "/")[8] _, err := computeService.Instances.Get(project, zone, instanceName).Context(ctx).Do() - if err != nil && strings.Contains(err.Error(), "404") { - toDelete = true + if err != nil { + // If instance not found (404), mark target pool for deletion. + if strings.Contains(err.Error(), "404") { + toDelete = true + } else { + // For other errors, log and continue to next item + log.Printf("Error checking instance %s in zone %s: %v", instanceName, zone, err) + continue + } } } - if len(targetPoolItem.Instances) == 0 || toDelete == true { + // If pool is empty or flagged for deletion, find related resources to remove + if len(targetPoolItem.Instances) == 0 || toDelete { + // Collect firewall rules that are candidate to be deleted for this target pool. + // We look for firewalls that either contain the pool name or match our name patterns. + candidates := []string{} + for name, fw := range firewallByName { + // Candidate if name contains targetPool name OR matches patterns + if strings.Contains(name, targetPoolItem.Name) || matchesFirewallPattern(name) { + if len(fw.TargetTags) == 0 { + continue + } + // If any tag contains the pool name, mark candidate + foundTag := false + for _, tag := range fw.TargetTags { + if strings.Contains(tag, targetPoolItem.Name) || matchesFirewallPattern(tag) { + foundTag = true + break + } + } + if foundTag { + candidates = append(candidates, name) + } + } + } - // Firewall-rule deleting - firewallRuleId := fmt.Sprintf("k8s-fw-%s", targetPoolItem.Name) + // For each candidate firewall rule verify it's not used by any instance + for _, fwName := range candidates { + fw := firewallByName[fwName] + used := false + // For each target tag, check if any instance uses it in the project + for _, tag := range fw.TargetTags { + // list instances filtered by tag + // Here we use Instances.AggregatedList and filter manually. + instancesAgg, err := computeService.Instances.AggregatedList(project).Filter(fmt.Sprintf("tags.items=%s", tag)).Context(ctx).Do() + if err != nil { + log.Printf("Error listing instances for tag %s: %v", tag, err) + used = true + break + } + for _, il := range instancesAgg.Items { + if len(il.Instances) > 0 { + used = true + break + } + } + if used { + break + } + } - respFirewall, err := computeService.Firewalls.Delete(project, firewallRuleId).Context(ctx).Do() - if err != nil { - log.Printf("We can't delete firewallRuleId: %v", err) - } else { - log.Printf("firewall-rule deleted: k8s-fw- %s was deleted with status %s\n", targetPoolItem.Name, respFirewall.Status) + if used { + log.Printf("Skipping firewall %s: target tags are in use", fwName) + continue + } + + // Safe to delete firewall rule + _, err := computeService.Firewalls.Delete(project, fwName).Context(ctx).Do() + if err != nil { + log.Printf("Failed to delete firewall %s: %v", fwName, err) + } else { + log.Printf("Firewall %s deleted", fwName) + } } - // Forwarding-rule deleting + // Delete forwarding rule (if exists) respFWRule, err := computeService.ForwardingRules.Delete(project, region, targetPoolItem.Name).Context(ctx).Do() if err != nil { - log.Printf("We can't delete Forwarding-rule: %v", err) + log.Printf("Can't delete Forwarding rule %s in %s: %v", targetPoolItem.Name, region, err) } else { - log.Printf("forwarding-rule deleted: %s was deleted with status %s\n", targetPoolItem.Name, respFWRule.Status) + log.Printf("forwarding-rule deleted: %s status %s", targetPoolItem.Name, respFWRule.Status) } - // Address deleting + // Delete address (if exists) respAdd, err := computeService.Addresses.Delete(project, region, targetPoolItem.Name).Context(ctx).Do() if err != nil { - log.Printf("We can't delete Address: %v", err) + log.Printf("Can't delete Address %s in %s: %v", targetPoolItem.Name, region, err) } else { - log.Printf("address deleted: %s was deleted with status %s\n", targetPoolItem.Name, respAdd.Status) + log.Printf("address deleted: %s status %s", targetPoolItem.Name, respAdd.Status) } - // Target pool deleting + // Delete target pool respTP, err := computeService.TargetPools.Delete(project, region, targetPoolItem.Name).Context(ctx).Do() if err != nil { - log.Printf("We can't delete target pool: %v", err) + log.Printf("Can't delete target pool %s in %s: %v", targetPoolItem.Name, region, err) } else { - log.Printf("Target-pool deleted: %s was deleted with status %s\n", targetPoolItem.Name, respTP.Status) + log.Printf("Target-pool deleted: %s status %s", targetPoolItem.Name, respTP.Status) } } } From b352d8fd079f10a9eff07e96898d6c3f4343680d Mon Sep 17 00:00:00 2001 From: Natalia Marukovich Date: Thu, 30 Oct 2025 13:01:09 +0100 Subject: [PATCH 3/3] fix PR --- cloud/gcp-functions/cmd/orphanedResources.go | 247 +++++++++++-------- 1 file changed, 140 insertions(+), 107 deletions(-) diff --git a/cloud/gcp-functions/cmd/orphanedResources.go b/cloud/gcp-functions/cmd/orphanedResources.go index 1f4377c39c..1675edd477 100644 --- a/cloud/gcp-functions/cmd/orphanedResources.go +++ b/cloud/gcp-functions/cmd/orphanedResources.go @@ -2,7 +2,6 @@ package orphanedResources import ( "context" - "fmt" "log" "net/http" "os" @@ -11,19 +10,24 @@ import ( "google.golang.org/api/compute/v1" ) -// firewallNamePatterns are substrings we consider relevant for deletion candidates. -// You can adjust these patterns to your naming conventions. -var firewallNamePatterns = []string{"gke-", "-pxc-", "-psmdb-", "-ps-", "-pg-", "-jen-", "k8s-"} - func matchesFirewallPattern(name string) bool { - for _, p := range firewallNamePatterns { - if strings.Contains(name, p) { + if strings.HasPrefix(name, "gke-jen") || strings.HasPrefix(name, "k8s-") { + return true + } + return false +} + +func tagUsed(usedTags map[string]struct{}, tags []string) bool { + for _, t := range tags { + if _, ok := usedTags[t]; ok { return true } } return false } +// CleanOrphanedResources deletes orphan TargetPools (+ related ForwardingRules/Addresses/firewalls) +// and also deletes dangling firewall rules whose target tags are unused. func CleanOrphanedResources(w http.ResponseWriter, r *http.Request) { ctx := context.Background() computeService, err := compute.NewService(ctx) @@ -36,134 +40,163 @@ func CleanOrphanedResources(w http.ResponseWriter, r *http.Request) { log.Fatalf("GCP_DEV_PROJECT is not set") } - firewallsList, err := computeService.Firewalls.List(project).Context(ctx).Do() + // 1) Fetch firewalls (index by name) + fwList, err := computeService.Firewalls.List(project).Context(ctx).Do() if err != nil { log.Fatalf("Error listing firewalls: %v", err) } - - firewallByName := make(map[string]*compute.Firewall) - for _, fw := range firewallsList.Items { + firewallByName := make(map[string]*compute.Firewall, len(fwList.Items)) + for _, fw := range fwList.Items { firewallByName[fw.Name] = fw } - // Get aggregated target pools - targetPoolAggregatedList, err := computeService.TargetPools.AggregatedList(project).Context(ctx).Do() + // 2) Build a set of all in-use tags from all instances + usedTags := make(map[string]struct{}, 1024) + instAgg, err := computeService.Instances.AggregatedList(project).Context(ctx).Do() + if err != nil { + log.Printf("Error aggregating instances (building usedTags): %v", err) + } + if instAgg != nil { + for _, il := range instAgg.Items { + for _, ins := range il.Instances { + for _, t := range ins.Tags.Items { + usedTags[t] = struct{}{} + } + } + } + } + log.Printf("Collected %d used tags", len(usedTags)) + + // 3) Fetch aggregated TargetPools + tpAgg, err := computeService.TargetPools.AggregatedList(project).Context(ctx).Do() if err != nil { log.Fatalf("Error getting target pool aggregated list: %v", err) } - for _, targetPoolList := range targetPoolAggregatedList.Items { - for _, targetPoolItem := range targetPoolList.TargetPools { - // Reset toDelete per target pool - toDelete := false + deletedAny := false + deletedFW := make(map[string]struct{}) // remember deleted firewall names to avoid double-delete later - region := strings.Split(targetPoolItem.Region, "/")[8] - - // If pool has instances, check their existence. If instance missing -> mark toDelete. - if len(targetPoolItem.Instances) > 0 { - instanceName := strings.Split(targetPoolItem.Instances[0], "/")[10] - zone := strings.Split(targetPoolItem.Instances[0], "/")[8] - _, err := computeService.Instances.Get(project, zone, instanceName).Context(ctx).Do() - if err != nil { - // If instance not found (404), mark target pool for deletion. - if strings.Contains(err.Error(), "404") { - toDelete = true - } else { - // For other errors, log and continue to next item - log.Printf("Error checking instance %s in zone %s: %v", instanceName, zone, err) - continue - } + // 4) Delete orphan TargetPools and their related resources and related firewalls if tags unused + for _, tpList := range tpAgg.Items { + for _, tp := range tpList.TargetPools { + region := strings.Split(tp.Region, "/")[8] + + toDelete := false + // orphan if no instances OR the first instance ref returns 404 + if len(tp.Instances) == 0 { + toDelete = true + } else { + instanceName := strings.Split(tp.Instances[0], "/")[10] + zone := strings.Split(tp.Instances[0], "/")[8] + + _, gerr := computeService.Instances.Get(project, zone, instanceName).Context(ctx).Do() + if gerr != nil && strings.Contains(gerr.Error(), "404") { + toDelete = true + } + if gerr != nil && !strings.Contains(gerr.Error(), "404") { + log.Printf("Error checking instance %s in %s: %v", instanceName, zone, gerr) + continue } } - // If pool is empty or flagged for deletion, find related resources to remove - if len(targetPoolItem.Instances) == 0 || toDelete { - // Collect firewall rules that are candidate to be deleted for this target pool. - // We look for firewalls that either contain the pool name or match our name patterns. - candidates := []string{} - for name, fw := range firewallByName { - // Candidate if name contains targetPool name OR matches patterns - if strings.Contains(name, targetPoolItem.Name) || matchesFirewallPattern(name) { - if len(fw.TargetTags) == 0 { - continue - } - // If any tag contains the pool name, mark candidate - foundTag := false - for _, tag := range fw.TargetTags { - if strings.Contains(tag, targetPoolItem.Name) || matchesFirewallPattern(tag) { - foundTag = true - break - } - } - if foundTag { - candidates = append(candidates, name) - } - } - } + if !toDelete { + continue + } - // For each candidate firewall rule verify it's not used by any instance - for _, fwName := range candidates { - fw := firewallByName[fwName] - used := false - // For each target tag, check if any instance uses it in the project + // Build candidate firewalls to delete that are "related" to this TP + var candidates []string + for name, fw := range firewallByName { + if fw == nil { + continue + } + if strings.Contains(name, tp.Name) || matchesFirewallPattern(name) { + if len(fw.TargetTags) == 0 { + candidates = append(candidates, name) + continue + } + // only consider if FW tags "look related" to this TP + related := false for _, tag := range fw.TargetTags { - // list instances filtered by tag - // Here we use Instances.AggregatedList and filter manually. - instancesAgg, err := computeService.Instances.AggregatedList(project).Filter(fmt.Sprintf("tags.items=%s", tag)).Context(ctx).Do() - if err != nil { - log.Printf("Error listing instances for tag %s: %v", tag, err) - used = true - break - } - for _, il := range instancesAgg.Items { - if len(il.Instances) > 0 { - used = true - break - } - } - if used { + if strings.Contains(tag, tp.Name) || matchesFirewallPattern(tag) { + related = true break } } - - if used { - log.Printf("Skipping firewall %s: target tags are in use", fwName) - continue + if related { + candidates = append(candidates, name) } + } + } - // Safe to delete firewall rule - _, err := computeService.Firewalls.Delete(project, fwName).Context(ctx).Do() - if err != nil { - log.Printf("Failed to delete firewall %s: %v", fwName, err) + // Delete candidate firewalls when tags are NOT used + for _, fname := range candidates { + fw := firewallByName[fname] + if fw == nil { + continue + } + if len(fw.TargetTags) == 0 || !tagUsed(usedTags, fw.TargetTags) { + if _, derr := computeService.Firewalls.Delete(project, fname).Context(ctx).Do(); derr != nil { + log.Printf("Failed to delete firewall %s: %v", fname, derr) } else { - log.Printf("Firewall %s deleted", fwName) + deletedAny = true + deletedFW[fname] = struct{}{} + firewallByName[fname] = nil } - } - - // Delete forwarding rule (if exists) - respFWRule, err := computeService.ForwardingRules.Delete(project, region, targetPoolItem.Name).Context(ctx).Do() - if err != nil { - log.Printf("Can't delete Forwarding rule %s in %s: %v", targetPoolItem.Name, region, err) } else { - log.Printf("forwarding-rule deleted: %s status %s", targetPoolItem.Name, respFWRule.Status) + log.Printf("Skipping firewall %s: target tags are still in use\n", fname) } + } - // Delete address (if exists) - respAdd, err := computeService.Addresses.Delete(project, region, targetPoolItem.Name).Context(ctx).Do() - if err != nil { - log.Printf("Can't delete Address %s in %s: %v", targetPoolItem.Name, region, err) - } else { - log.Printf("address deleted: %s status %s", targetPoolItem.Name, respAdd.Status) - } + // Delete ForwardingRule + if _, err := computeService.ForwardingRules.Delete(project, region, tp.Name).Context(ctx).Do(); err != nil { + log.Printf("Can't delete Forwarding rule %s in %s: %v", tp.Name, region, err) + } else { + log.Printf("Forwarding-rule deleted: %s\n", tp.Name) + deletedAny = true + } - // Delete target pool - respTP, err := computeService.TargetPools.Delete(project, region, targetPoolItem.Name).Context(ctx).Do() - if err != nil { - log.Printf("Can't delete target pool %s in %s: %v", targetPoolItem.Name, region, err) - } else { - log.Printf("Target-pool deleted: %s status %s", targetPoolItem.Name, respTP.Status) - } + // Delete Address + if _, err := computeService.Addresses.Delete(project, region, tp.Name).Context(ctx).Do(); err != nil { + log.Printf("Can't delete Address %s in %s: %v", tp.Name, region, err) + } else { + log.Printf("Address deleted: %s\n", tp.Name) + deletedAny = true + } + + // Delete the TargetPool + if _, err := computeService.TargetPools.Delete(project, region, tp.Name).Context(ctx).Do(); err != nil { + log.Printf("Can't delete target pool %s in %s: %v", tp.Name, region, err) + } else { + log.Printf("Target-pool deleted: %s\n", tp.Name) + deletedAny = true + } + } + } + + // 5) Delete any remaining dangling firewalls + // that match our patterns and have unused tags. This catches cases with no orphan TP trigger. + for name, fw := range firewallByName { + if fw == nil { + continue + } + if !matchesFirewallPattern(name) { + continue + } + // no tags -> dangling + if len(fw.TargetTags) == 0 || !tagUsed(usedTags, fw.TargetTags) { + if _, derr := computeService.Firewalls.Delete(project, name).Context(ctx).Do(); derr != nil { + log.Printf("Failed to delete dangling firewall %s: %v", name, derr) + } else { + log.Printf("Dangling firewall %s deleted\n", name) + deletedAny = true + firewallByName[name] = nil } } } + + if !deletedAny { + log.Println("No orphaned resources found or deleted") + } + + w.WriteHeader(http.StatusOK) }