From 674ac3af02e5e4a9d47890a75266c999a2bd8438 Mon Sep 17 00:00:00 2001 From: Nupur Goyal Date: Fri, 11 Apr 2025 12:41:08 +0530 Subject: [PATCH 1/2] Fixing vni resource group placement issue and custom image builder bug --- ibm_catalog.json | 15 +- modules/bootstrap/datasource.tf | 5 - modules/bootstrap/locals.tf | 2 - modules/bootstrap/main.tf | 4 +- modules/landing_zone/main.tf | 2 +- modules/landing_zone_vsi/datasource.tf | 10 -- modules/landing_zone_vsi/locals.tf | 3 - modules/landing_zone_vsi/main.tf | 20 +-- modules/landing_zone_vsi/template_files.tf | 2 +- modules/landing_zone_vsi/variables.tf | 2 +- solutions/hpc/datasource.tf | 10 ++ solutions/hpc/locals.tf | 3 +- solutions/hpc/main.tf | 6 +- solutions/hpc/version.tf | 2 +- tests/go.mod | 75 ++++----- tests/go.sum | 154 +++++++++--------- tools/image-builder/locals.tf | 16 +- tools/image-builder/main.tf | 16 +- tools/image-builder/template_files.tf | 4 +- .../templates/packer_user_data.tpl | 26 +-- tools/image-builder/variables.tf | 8 +- tools/image-builder/version.tf | 2 +- 22 files changed, 197 insertions(+), 190 deletions(-) diff --git a/ibm_catalog.json b/ibm_catalog.json index 64527b17..a03f5e3e 100644 --- a/ibm_catalog.json +++ b/ibm_catalog.json @@ -372,7 +372,18 @@ "key": "worker_node_max_count" }, { - "key": "worker_node_instance_type" + "key": "worker_node_instance_type", + "type": "array", + "default_value": "[\n {\n \"count\": 0,\n \"instance_type\": \"bx2-4x16\"\n },\n {\n \"count\": 0,\n \"instance_type\": \"cx2-8x16\"\n }\n]", + "required": false, + "custom_config": { + "type": "json_editor", + "grouping": "deployment", + "original_grouping": "deployment", + "config_constraints": { + "type": "mixed" + } + } }, { "key": "enable_dedicated_host" @@ -396,7 +407,6 @@ "key": "custom_file_shares", "type": "array", "default_value": "[\n {\n \"mount_path\": \"/mnt/vpcstorage/tools\",\n \"size\": 100,\n \"iops\": 2000\n },\n {\n \"mount_path\": \"/mnt/vpcstorage/data\",\n \"size\": 100,\n \"iops\": 6000\n },\n {\n \"mount_path\": \"/mnt/scale/tools\",\n \"nfs_share\": \"\"\n }\n]\n", - "display_name": "JSON", "required": false, "custom_config": { "type": "json_editor", @@ -485,7 +495,6 @@ { "key": "existing_bastion_ssh_private_key", "type": "multiline_secure_value", - "display_name": "Multiline secure value", "required": false, "custom_config": { "grouping": "deployment", diff --git a/modules/bootstrap/datasource.tf b/modules/bootstrap/datasource.tf index 5b171679..3c983a58 100644 --- a/modules/bootstrap/datasource.tf +++ b/modules/bootstrap/datasource.tf @@ -2,11 +2,6 @@ data "ibm_is_image" "bastion" { name = local.bastion_image_name } -data "ibm_is_ssh_key" "bastion" { - for_each = toset(var.ssh_keys) - name = each.key -} - # Existing Bastion details data "ibm_is_instance" "bastion_instance_name" { count = var.bastion_instance_name != null ? 1 : 0 diff --git a/modules/bootstrap/locals.tf b/modules/bootstrap/locals.tf index 30156668..9de0ff38 100644 --- a/modules/bootstrap/locals.tf +++ b/modules/bootstrap/locals.tf @@ -30,8 +30,6 @@ locals { bastion_image_id = data.ibm_is_image.bastion.id - bastion_ssh_keys = [for name in var.ssh_keys : data.ibm_is_ssh_key.bastion[name].id] - bastion_sg_variable_cidr_list = var.network_cidr # Security group rules # TODO: Fix SG rules diff --git a/modules/bootstrap/main.tf b/modules/bootstrap/main.tf index e03d0acf..088e44cf 100644 --- a/modules/bootstrap/main.tf +++ b/modules/bootstrap/main.tf @@ -45,7 +45,7 @@ module "existing_bastion_sg_update" { module "bastion_vsi" { count = var.bastion_instance_name != null ? 0 : 1 source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "4.5.0" + version = "5.0.0" vsi_per_subnet = 1 create_security_group = false security_group = null @@ -55,7 +55,7 @@ module "bastion_vsi" { resource_group_id = var.resource_group enable_floating_ip = true security_group_ids = module.bastion_sg[*].security_group_id - ssh_key_ids = local.bastion_ssh_keys + ssh_key_ids = var.ssh_keys subnets = length(var.bastion_subnets) == 2 ? [local.bastion_subnets[1]] : [local.bastion_subnets[0]] tags = local.tags user_data = data.template_file.bastion_user_data.rendered diff --git a/modules/landing_zone/main.tf b/modules/landing_zone/main.tf index d9a9cb7c..eb759c1f 100644 --- a/modules/landing_zone/main.tf +++ b/modules/landing_zone/main.tf @@ -1,7 +1,7 @@ module "landing_zone" { count = var.enable_landing_zone ? 1 : 0 source = "terraform-ibm-modules/landing-zone/ibm" - version = "6.6.3" + version = "7.4.3" prefix = local.prefix region = local.region tags = local.tags diff --git a/modules/landing_zone_vsi/datasource.tf b/modules/landing_zone_vsi/datasource.tf index 58b0c0de..28a006a9 100644 --- a/modules/landing_zone_vsi/datasource.tf +++ b/modules/landing_zone_vsi/datasource.tf @@ -13,11 +13,6 @@ data "ibm_is_image" "login" { count = local.login_image_mapping_entry_found ? 0 : 1 } -data "ibm_is_ssh_key" "compute" { - for_each = toset(var.compute_ssh_keys) - name = each.key -} - data "ibm_is_region" "region" { name = local.region } @@ -30,11 +25,6 @@ data "ibm_is_instance_profile" "worker_node" { name = var.worker_node_instance_type[0].instance_type } -data "ibm_is_ssh_key" "bastion" { - for_each = toset(var.ssh_keys) - name = each.key -} - data "ibm_is_image" "ldap_vsi_image" { name = var.ldap_vsi_osimage_name count = var.ldap_basedns != null && var.ldap_server == "null" ? 1 : 0 diff --git a/modules/landing_zone_vsi/locals.tf b/modules/landing_zone_vsi/locals.tf index 2cb0a638..70c5e79c 100644 --- a/modules/landing_zone_vsi/locals.tf +++ b/modules/landing_zone_vsi/locals.tf @@ -12,8 +12,6 @@ locals { login_node_name = format("%s-%s", local.prefix, "login") management_node_name = format("%s-%s", local.prefix, "mgmt") worker_node_name = format("%s-%s", local.prefix, "worker") - compute_ssh_keys = [for name in var.compute_ssh_keys : data.ibm_is_ssh_key.compute[name].id] - management_ssh_keys = local.compute_ssh_keys ldap_enable = var.enable_ldap == true && var.ldap_server == "null" ? 1 : 0 # enable_worker_vsi = var.solution == "lsf" && var.worker_node_min_count >= 0 ? var.worker_node_min_count : 0 # products = var.solution == "lsf" && var.enable_app_center ? "lsf,lsf-app-center" : "lsf" @@ -141,7 +139,6 @@ locals { rc_profile = data.ibm_is_instance_profile.worker_node.name bastion_subnets = var.bastion_subnets - bastion_ssh_keys = [for name in var.ssh_keys : data.ibm_is_ssh_key.bastion[name].id] ldap_server = var.enable_ldap == true && var.ldap_server == "null" ? length(module.ldap_vsi) > 0 ? var.ldap_primary_ip[0] : null : var.ldap_server ldap_server_cert = var.enable_ldap == true && var.ldap_server_cert != "null" ? var.ldap_server_cert : "null" ldap_instance_image_id = var.enable_ldap == true && var.ldap_server == "null" ? data.ibm_is_image.ldap_vsi_image[0].id : "null" diff --git a/modules/landing_zone_vsi/main.tf b/modules/landing_zone_vsi/main.tf index c7d8b30c..070410ee 100644 --- a/modules/landing_zone_vsi/main.tf +++ b/modules/landing_zone_vsi/main.tf @@ -57,7 +57,7 @@ module "nfs_storage_sg" { module "management_vsi" { count = 1 source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "4.5.0" + version = "5.0.0" vsi_per_subnet = 1 create_security_group = false security_group = null @@ -67,7 +67,7 @@ module "management_vsi" { resource_group_id = var.resource_group enable_floating_ip = false security_group_ids = module.compute_sg[*].security_group_id - ssh_key_ids = local.management_ssh_keys + ssh_key_ids = var.compute_ssh_keys subnets = [local.compute_subnets[0]] tags = local.tags user_data = "${data.template_file.management_user_data.rendered} ${file("${path.module}/templates/lsf_management.sh")}" @@ -80,12 +80,12 @@ module "management_vsi" { module "management_candidate_vsi" { count = var.management_node_count - 1 source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "4.5.0" + version = "5.0.0" create_security_group = false security_group = null security_group_ids = module.compute_sg[*].security_group_id vpc_id = var.vpc_id - ssh_key_ids = local.management_ssh_keys + ssh_key_ids = var.compute_ssh_keys subnets = [local.compute_subnets[0]] resource_group_id = var.resource_group enable_floating_ip = false @@ -103,7 +103,7 @@ module "management_candidate_vsi" { module "worker_vsi" { count = length(local.flattened_worker_nodes) source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "4.5.0" + version = "5.0.0" vsi_per_subnet = 1 create_security_group = false security_group = null @@ -113,7 +113,7 @@ module "worker_vsi" { resource_group_id = var.resource_group enable_floating_ip = false security_group_ids = module.compute_sg[*].security_group_id - ssh_key_ids = local.management_ssh_keys + ssh_key_ids = var.compute_ssh_keys subnets = [local.compute_subnets[0]] tags = local.tags user_data = "${data.template_file.worker_user_data.rendered} ${file("${path.module}/templates/static_worker_vsi.sh")}" @@ -129,7 +129,7 @@ module "worker_vsi" { module "login_vsi" { # count = 1 source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "4.5.0" + version = "5.0.0" vsi_per_subnet = 1 create_security_group = false security_group = null @@ -139,7 +139,7 @@ module "login_vsi" { resource_group_id = var.resource_group enable_floating_ip = false security_group_ids = [var.bastion_security_group_id] - ssh_key_ids = local.bastion_ssh_keys + ssh_key_ids = var.bastion_ssh_keys subnets = length(var.bastion_subnets) == 2 ? [local.bastion_subnets[1]] : [local.bastion_subnets[0]] tags = local.tags user_data = "${data.template_file.login_user_data.rendered} ${file("${path.module}/templates/login_vsi.sh")}" @@ -153,7 +153,7 @@ module "login_vsi" { module "ldap_vsi" { count = local.ldap_enable source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "4.5.0" + version = "5.0.0" vsi_per_subnet = 1 create_security_group = false security_group = null @@ -163,7 +163,7 @@ module "ldap_vsi" { resource_group_id = var.resource_group enable_floating_ip = false security_group_ids = module.compute_sg[*].security_group_id - ssh_key_ids = local.management_ssh_keys + ssh_key_ids = var.compute_ssh_keys subnets = [local.compute_subnets[0]] tags = local.tags user_data = var.enable_ldap == true && var.ldap_server == "null" ? "${data.template_file.ldap_user_data[0].rendered} ${file("${path.module}/templates/ldap_user_data.sh")}" : "" diff --git a/modules/landing_zone_vsi/template_files.tf b/modules/landing_zone_vsi/template_files.tf index 1b9adbca..18417a10 100644 --- a/modules/landing_zone_vsi/template_files.tf +++ b/modules/landing_zone_vsi/template_files.tf @@ -94,7 +94,7 @@ data "template_file" "management_values" { image_id = local.compute_image_from_data ? data.ibm_is_image.compute[0].id : local.new_compute_image_id subnet_id = local.compute_subnets[0].crn security_group_id = module.compute_sg[0].security_group_id - sshkey_id = join(",", local.compute_ssh_keys) + sshkey_id = join(",", var.compute_ssh_keys) region_name = data.ibm_is_region.region.name zone_name = var.zones[0] vpc_id = var.vpc_id diff --git a/modules/landing_zone_vsi/variables.tf b/modules/landing_zone_vsi/variables.tf index 1263c302..afaa04bb 100644 --- a/modules/landing_zone_vsi/variables.tf +++ b/modules/landing_zone_vsi/variables.tf @@ -261,7 +261,7 @@ variable "bastion_subnets" { description = "Subnets to launch the bastion host." } -variable "ssh_keys" { +variable "bastion_ssh_keys" { type = list(string) description = "The key pair to use to access the host." } diff --git a/solutions/hpc/datasource.tf b/solutions/hpc/datasource.tf index af01891e..6dd317f8 100644 --- a/solutions/hpc/datasource.tf +++ b/solutions/hpc/datasource.tf @@ -84,3 +84,13 @@ resource "ibm_is_subnet_public_gateway_attachment" "zone_1_attachment" { data "ibm_is_dedicated_host_profiles" "worker" { count = var.enable_dedicated_host ? 1 : 0 } + +data "ibm_is_ssh_key" "bastion" { + for_each = toset(var.bastion_ssh_keys) + name = each.key +} + +data "ibm_is_ssh_key" "compute" { + for_each = toset(var.compute_ssh_keys) + name = each.key +} diff --git a/solutions/hpc/locals.tf b/solutions/hpc/locals.tf index 043c9ec4..9695e181 100644 --- a/solutions/hpc/locals.tf +++ b/solutions/hpc/locals.tf @@ -70,7 +70,8 @@ locals { # locals needed for file-storage locals { - + compute_ssh_keys = [for name in var.compute_ssh_keys : data.ibm_is_ssh_key.compute[name].id] + bastion_ssh_keys = [for name in var.bastion_ssh_keys : data.ibm_is_ssh_key.bastion[name].id] # dependency: landing_zone_vsi -> file-share compute_subnet_id = local.compute_subnets[0].id compute_security_group_id = module.landing_zone_vsi[0].compute_sg_id diff --git a/solutions/hpc/main.tf b/solutions/hpc/main.tf index c0bcbc24..be5a3c10 100644 --- a/solutions/hpc/main.tf +++ b/solutions/hpc/main.tf @@ -34,7 +34,7 @@ module "bootstrap" { vpc_id = local.vpc_id network_cidr = var.vpc_name != null && length(var.cluster_subnet_ids) > 0 ? local.existing_subnet_cidrs : split(",", var.vpc_cidr) bastion_subnets = local.bastion_subnets - ssh_keys = var.bastion_ssh_keys + ssh_keys = local.bastion_ssh_keys allowed_cidr = local.allowed_cidr kms_encryption_enabled = local.kms_encryption_enabled boot_volume_encryption_key = local.boot_volume_encryption_key @@ -95,7 +95,7 @@ module "landing_zone_vsi" { compute_private_key_content = local.compute_private_key_content bastion_private_key_content = local.bastion_ssh_private_key != null ? local.bastion_ssh_private_key : local.bastion_private_key_content compute_subnets = local.compute_subnets - compute_ssh_keys = var.compute_ssh_keys + compute_ssh_keys = local.compute_ssh_keys management_image_name = var.management_image_name compute_image_name = var.compute_image_name login_image_name = var.login_image_name @@ -114,7 +114,7 @@ module "landing_zone_vsi" { mount_path = var.custom_file_shares login_node_instance_type = var.login_node_instance_type bastion_subnets = local.bastion_subnets - ssh_keys = var.bastion_ssh_keys + bastion_ssh_keys = local.bastion_ssh_keys enable_ldap = var.enable_ldap ldap_basedns = var.ldap_basedns login_private_ips = join("", local.login_private_ips) diff --git a/solutions/hpc/version.tf b/solutions/hpc/version.tf index 13719884..20309893 100644 --- a/solutions/hpc/version.tf +++ b/solutions/hpc/version.tf @@ -3,7 +3,7 @@ terraform { required_providers { ibm = { source = "IBM-Cloud/ibm" - version = "1.71.3" + version = "1.77.0" } null = { source = "hashicorp/null" diff --git a/tests/go.mod b/tests/go.mod index 938ba64d..af6420fa 100644 --- a/tests/go.mod +++ b/tests/go.mod @@ -5,31 +5,31 @@ go 1.23.4 toolchain go1.24.0 require ( - github.com/IBM/go-sdk-core/v5 v5.18.5 + github.com/IBM/go-sdk-core/v5 v5.19.0 github.com/IBM/secrets-manager-go-sdk/v2 v2.0.10 github.com/gruntwork-io/terratest v0.48.2 github.com/stretchr/testify v1.10.0 - github.com/terraform-ibm-modules/ibmcloud-terratest-wrapper v1.46.3 - golang.org/x/crypto v0.35.0 + github.com/terraform-ibm-modules/ibmcloud-terratest-wrapper v1.48.2 + golang.org/x/crypto v0.37.0 gopkg.in/yaml.v3 v3.0.1 ) require ( - dario.cat/mergo v1.0.0 // indirect - github.com/IBM-Cloud/bluemix-go v0.0.0-20240719075425-078fcb3a55be // indirect - github.com/IBM-Cloud/power-go-client v1.10.0 // indirect + dario.cat/mergo v1.0.1 // indirect + github.com/IBM-Cloud/bluemix-go v0.0.0-20250409011132-bdd4531aaa04 // indirect + github.com/IBM-Cloud/power-go-client v1.11.0 // indirect github.com/IBM/cloud-databases-go-sdk v0.7.1 // indirect - github.com/IBM/platform-services-go-sdk v0.77.0 // indirect + github.com/IBM/platform-services-go-sdk v0.79.0 // indirect github.com/IBM/project-go-sdk v0.3.6 // indirect github.com/IBM/schematics-go-sdk v0.4.0 // indirect - github.com/IBM/vpc-go-sdk v0.64.1 // indirect + github.com/IBM/vpc-go-sdk v0.67.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/ProtonMail/go-crypto v1.1.5 // indirect + github.com/ProtonMail/go-crypto v1.1.6 // indirect github.com/agext/levenshtein v1.2.3 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect - github.com/cloudflare/circl v1.6.0 // indirect + github.com/cloudflare/circl v1.6.1 // indirect github.com/cyphar/filepath-securejoin v0.4.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emirpasic/gods v1.18.1 // indirect @@ -40,19 +40,19 @@ require ( github.com/go-git/go-git/v5 v5.14.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/analysis v0.21.5 // indirect - github.com/go-openapi/errors v0.22.0 // indirect - github.com/go-openapi/jsonpointer v0.20.1 // indirect - github.com/go-openapi/jsonreference v0.20.3 // indirect - github.com/go-openapi/loads v0.21.3 // indirect - github.com/go-openapi/runtime v0.26.0 // indirect - github.com/go-openapi/spec v0.20.12 // indirect + github.com/go-openapi/analysis v0.23.0 // indirect + github.com/go-openapi/errors v0.22.1 // indirect + github.com/go-openapi/jsonpointer v0.21.1 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/loads v0.22.0 // indirect + github.com/go-openapi/runtime v0.28.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect github.com/go-openapi/strfmt v0.23.0 // indirect - github.com/go-openapi/swag v0.22.5 // indirect - github.com/go-openapi/validate v0.22.4 // indirect + github.com/go-openapi/swag v0.23.1 // indirect + github.com/go-openapi/validate v0.24.0 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect - github.com/go-playground/validator/v10 v10.24.0 // indirect + github.com/go-playground/validator/v10 v10.26.0 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/uuid v1.6.0 // indirect @@ -63,16 +63,16 @@ require ( github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/hashicorp/go-safetemp v1.0.0 // indirect github.com/hashicorp/go-version v1.7.0 // indirect - github.com/hashicorp/hcl/v2 v2.22.0 // indirect + github.com/hashicorp/hcl/v2 v2.23.0 // indirect github.com/hashicorp/terraform-json v0.24.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jinzhu/copier v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect - github.com/klauspost/compress v1.16.7 // indirect + github.com/klauspost/compress v1.18.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect - github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-zglob v0.0.4 // indirect + github.com/mailru/easyjson v0.9.0 // indirect + github.com/mattn/go-zglob v0.0.6 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-testing-interface v1.14.1 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect @@ -83,20 +83,21 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect github.com/skeema/knownhosts v1.3.1 // indirect - github.com/tmccombs/hcl2json v0.6.4 // indirect - github.com/ulikunitz/xz v0.5.11 // indirect + github.com/tmccombs/hcl2json v0.6.7 // indirect + github.com/ulikunitz/xz v0.5.12 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect - github.com/zclconf/go-cty v1.15.1 // indirect - go.mongodb.org/mongo-driver v1.17.2 // indirect - go.opentelemetry.io/otel v1.29.0 // indirect - go.opentelemetry.io/otel/metric v1.29.0 // indirect - go.opentelemetry.io/otel/trace v1.29.0 // indirect - golang.org/x/mod v0.22.0 // indirect - golang.org/x/net v0.35.0 // indirect - golang.org/x/sync v0.11.0 // indirect - golang.org/x/sys v0.30.0 // indirect - golang.org/x/text v0.22.0 // indirect - golang.org/x/tools v0.28.0 // indirect + github.com/zclconf/go-cty v1.16.2 // indirect + go.mongodb.org/mongo-driver v1.17.3 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/otel v1.35.0 // indirect + go.opentelemetry.io/otel/metric v1.35.0 // indirect + go.opentelemetry.io/otel/trace v1.35.0 // indirect + golang.org/x/mod v0.24.0 // indirect + golang.org/x/net v0.39.0 // indirect + golang.org/x/sync v0.13.0 // indirect + golang.org/x/sys v0.32.0 // indirect + golang.org/x/text v0.24.0 // indirect + golang.org/x/tools v0.32.0 // indirect google.golang.org/protobuf v1.36.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/tests/go.sum b/tests/go.sum index 2b2119f7..25fbde2a 100644 --- a/tests/go.sum +++ b/tests/go.sum @@ -1,28 +1,28 @@ -dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= -dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= -github.com/IBM-Cloud/bluemix-go v0.0.0-20240719075425-078fcb3a55be h1:USOcBHkYQ4o/ccoEvoHinrba8NQthLJpFXnAoBY+MI4= -github.com/IBM-Cloud/bluemix-go v0.0.0-20240719075425-078fcb3a55be/go.mod h1:/7hMjdZA6fEpd/dQAOEABxKEwN0t72P3PlpEDu0Y7bE= -github.com/IBM-Cloud/power-go-client v1.10.0 h1:yBUHWwvNBmLkWpbZJQJEXoxBa1Dm+eJgMSbk9ljmXUU= -github.com/IBM-Cloud/power-go-client v1.10.0/go.mod h1:UDyXeIKEp6r7yWUXYu3r0ZnFSlNZ2YeQTHwM2Tmlgv0= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +github.com/IBM-Cloud/bluemix-go v0.0.0-20250409011132-bdd4531aaa04 h1:euG2yKR4snk8zIa5BeUKBWcIj4o1SBqX+k7CtIab6Y8= +github.com/IBM-Cloud/bluemix-go v0.0.0-20250409011132-bdd4531aaa04/go.mod h1:/7hMjdZA6fEpd/dQAOEABxKEwN0t72P3PlpEDu0Y7bE= +github.com/IBM-Cloud/power-go-client v1.11.0 h1:4xlYXF2+S3s6Crb0D2+d5c1kb6gUE7eowMXLB7Q6cWY= +github.com/IBM-Cloud/power-go-client v1.11.0/go.mod h1:UDyXeIKEp6r7yWUXYu3r0ZnFSlNZ2YeQTHwM2Tmlgv0= github.com/IBM/cloud-databases-go-sdk v0.7.1 h1:5kK4/3NUsGxZzmuUe+1ftajpOQbeDVh5VeemrPgROP4= github.com/IBM/cloud-databases-go-sdk v0.7.1/go.mod h1:JYucI1PdwqbAd8XGdDAchxzxRP7bxOh1zUnseovHKsc= -github.com/IBM/go-sdk-core/v5 v5.18.5 h1:g0JRl3sYXJczB/yuDlrN6x22LJ6jIxhp0Sa4ARNW60c= -github.com/IBM/go-sdk-core/v5 v5.18.5/go.mod h1:KonTFRR+8ZSgw5cxBSYo6E4WZoY1+7n1kfHM82VcjFU= -github.com/IBM/platform-services-go-sdk v0.77.0 h1:ifvcAYe+3/sa/ztgpqxLZVXnFyb9UzcSuxDNmmmd+iw= -github.com/IBM/platform-services-go-sdk v0.77.0/go.mod h1:sX7rPXMNqmAt6wQyjnF+oISRFwz6I6XNX9My/mzLqp4= +github.com/IBM/go-sdk-core/v5 v5.19.0 h1:YN2S5JUvq/EwYulmcNFwgyYBxZhVWl9nkY22H7Hpghw= +github.com/IBM/go-sdk-core/v5 v5.19.0/go.mod h1:deZO1J5TSlU69bCnl/YV7nPxFZA2UEaup7cq/7ZTOgw= +github.com/IBM/platform-services-go-sdk v0.79.0 h1:qCNheB3390holPcpDxdgNyi11JS6ZfsL39YgnJEOsTo= +github.com/IBM/platform-services-go-sdk v0.79.0/go.mod h1:FzCPOfbNAt0s9RwtIrbJbfDwA7mKIObtZ/18KnviKr0= github.com/IBM/project-go-sdk v0.3.6 h1:DRiANKnAePevFsIKSvR89SUaMa2xsd7YKK71Ka1eqKI= github.com/IBM/project-go-sdk v0.3.6/go.mod h1:FOJM9ihQV3EEAY6YigcWiTNfVCThtdY8bLC/nhQHFvo= github.com/IBM/schematics-go-sdk v0.4.0 h1:x01f/tPquYJYLQzJLGuxWfCbV/EdSMXRikOceNy/JLM= github.com/IBM/schematics-go-sdk v0.4.0/go.mod h1:Xe7R7xgwmXBHu09w2CbBe8lkWZaYxNQo19bS4dpLrUA= github.com/IBM/secrets-manager-go-sdk/v2 v2.0.10 h1:R9ZMCCi7yJnDIe88+UKKQf0CFBB74E6k8mOp+++kL4w= github.com/IBM/secrets-manager-go-sdk/v2 v2.0.10/go.mod h1:Bmy0woaAxxNPVHCqusarnTZVyVMnLRVwemF6gvGHcLo= -github.com/IBM/vpc-go-sdk v0.64.1 h1:1tIeb+GqPnvw7Ty+M0BknZJIdzIIrHqxWsYjgrM6NQY= -github.com/IBM/vpc-go-sdk v0.64.1/go.mod h1:6rEWo6HGt7S0Nbw7WdJQiVcz9Z+mRDmyycK4xc4kWlw= +github.com/IBM/vpc-go-sdk v0.67.0 h1:p8G5bqTUyVheBrJpT+pLpoZoA/Yu1R2xX4xJLM4tT9w= +github.com/IBM/vpc-go-sdk v0.67.0/go.mod h1:VL7sy61ybg6tvA60SepoQx7TFe20m7JyNUt+se2tHP4= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/ProtonMail/go-crypto v1.1.5 h1:eoAQfK2dwL+tFSFpr7TbOaPNUbPiJj4fLYwwGE1FQO4= -github.com/ProtonMail/go-crypto v1.1.5/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= +github.com/ProtonMail/go-crypto v1.1.6 h1:ZcV+Ropw6Qn0AX9brlQLAUXfqLBc7Bl+f/DmNxpLfdw= +github.com/ProtonMail/go-crypto v1.1.6/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= @@ -40,8 +40,8 @@ github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cloudflare/circl v1.6.0 h1:cr5JKic4HI+LkINy2lg3W2jF8sHCVTBncJr5gIIq7qk= -github.com/cloudflare/circl v1.6.0/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= +github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= +github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= @@ -80,36 +80,36 @@ github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/analysis v0.21.5 h1:3tHfEBh6Ia8eKc4M7khOGjPOAlWKJ10d877Cr9teujI= -github.com/go-openapi/analysis v0.21.5/go.mod h1:25YcZosX9Lwz2wBsrFrrsL8bmjjXdlyP6zsr2AMy29M= +github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= github.com/go-openapi/errors v0.20.3/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= -github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= -github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= -github.com/go-openapi/jsonpointer v0.20.1 h1:MkK4VEIEZMj4wT9PmjaUmGflVBr9nvud4Q4UVFbDoBE= -github.com/go-openapi/jsonpointer v0.20.1/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= -github.com/go-openapi/jsonreference v0.20.3 h1:EjGcjTW8pD1mRis6+w/gmoBdqv5+RbE9B85D1NgDOVQ= -github.com/go-openapi/jsonreference v0.20.3/go.mod h1:FviDZ46i9ivh810gqzFLl5NttD5q3tSlMLqLr6okedM= -github.com/go-openapi/loads v0.21.3 h1:8sSH2FIm/SnbDUGv572md4YqVMFne/a9Eubvcd3anew= -github.com/go-openapi/loads v0.21.3/go.mod h1:Y3aMR24iHbKHppOj91nQ/SHc0cuPbAr4ndY4a02xydc= -github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc= -github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ= -github.com/go-openapi/spec v0.20.12 h1:cgSLbrsmziAP2iais+Vz7kSazwZ8rsUZd6TUzdDgkVI= -github.com/go-openapi/spec v0.20.12/go.mod h1:iSCgnBcwbMW9SfzJb8iYynXvcY6C/QFrI7otzF7xGM4= +github.com/go-openapi/errors v0.22.1 h1:kslMRRnK7NCb/CvR1q1VWuEQCEIsBGn5GgKD9e+HYhU= +github.com/go-openapi/errors v0.22.1/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0= +github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic= +github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= +github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= -github.com/go-openapi/swag v0.22.5 h1:fVS63IE3M0lsuWRzuom3RLwUMVI2peDH01s6M70ugys= -github.com/go-openapi/swag v0.22.5/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0= -github.com/go-openapi/validate v0.22.4 h1:5v3jmMyIPKTR8Lv9syBAIRxG6lY0RqeBPB1LKEijzk8= -github.com/go-openapi/validate v0.22.4/go.mod h1:qm6O8ZIcPVdSY5219468Jv7kBdGvkiZLPOmqnqTUZ2A= +github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= +github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= +github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.24.0 h1:KHQckvo8G6hlWnrPX4NJJ+aBfWNAE/HH+qdL2cBpCmg= -github.com/go-playground/validator/v10 v10.24.0/go.mod h1:GGzBIJMuE98Ic/kJsBXbz1x/7cByt++cQ+YOuDM5wus= +github.com/go-playground/validator/v10 v10.26.0 h1:SP05Nqhjcvz81uJaRfEV0YBSSSGMc/iMaVtFbr3Sw2k= +github.com/go-playground/validator/v10 v10.26.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-test/deep v1.0.7 h1:/VSMRlnY/JSyqxQUzQLKVMAskpY/NZKFA5j2P+0pP2M= @@ -160,8 +160,8 @@ github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhE github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hcl/v2 v2.22.0 h1:hkZ3nCtqeJsDhPRFz5EA9iwcG1hNWGePOTw6oyul12M= -github.com/hashicorp/hcl/v2 v2.22.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= +github.com/hashicorp/hcl/v2 v2.23.0 h1:Fphj1/gCylPxHutVSEOf2fBOh1VE4AuLV7+kbJf3qos= +github.com/hashicorp/hcl/v2 v2.23.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= github.com/hashicorp/terraform-json v0.24.0 h1:rUiyF+x1kYawXeRth6fKFm/MdfBS6+lW4NbeATsYz8Q= github.com/hashicorp/terraform-json v0.24.0/go.mod h1:Nfj5ubo9xbu9uiAoZVBsNOjvNKB66Oyrvtit74kC7ow= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -175,8 +175,8 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= -github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -186,14 +186,14 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-zglob v0.0.4 h1:LQi2iOm0/fGgu80AioIJ/1j9w9Oh+9DZ39J4VAGzHQM= -github.com/mattn/go-zglob v0.0.4/go.mod h1:MxxjyoXXnMxfIpxTK2GAkw1w8glPsQILx3N5wrKakiY= +github.com/mattn/go-zglob v0.0.6 h1:mP8RnmCgho4oaUYDIDn6GNxYk+qJGUs8fJLn+twYj2A= +github.com/mattn/go-zglob v0.0.6/go.mod h1:MxxjyoXXnMxfIpxTK2GAkw1w8glPsQILx3N5wrKakiY= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= @@ -282,13 +282,13 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/terraform-ibm-modules/ibmcloud-terratest-wrapper v1.46.3 h1:2lCXpdsJ+Bqm0cDR+4CT8IPCHZtXZA3VIjORQDWXYmo= -github.com/terraform-ibm-modules/ibmcloud-terratest-wrapper v1.46.3/go.mod h1:qyXLLCQjBfQVyVWH82lhD78XH24qs8BslXURDIRYPqc= +github.com/terraform-ibm-modules/ibmcloud-terratest-wrapper v1.48.2 h1:pc2mBIfQCflHkxTunSAvAcSvM7uhDp6oMZ3L6hBj90w= +github.com/terraform-ibm-modules/ibmcloud-terratest-wrapper v1.48.2/go.mod h1:6bjkgzKQBYsIX7+tSQjB4C1NEq3qQBKJ/0LD8OGZffg= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tmccombs/hcl2json v0.6.4 h1:/FWnzS9JCuyZ4MNwrG4vMrFrzRgsWEOVi+1AyYUVLGw= -github.com/tmccombs/hcl2json v0.6.4/go.mod h1:+ppKlIW3H5nsAsZddXPy2iMyvld3SHxyjswOZhavRDk= -github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= -github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/tmccombs/hcl2json v0.6.7 h1:RYKTs4kd/gzRsEiv7J3M2WQ7TYRYZVc+0H0pZdERkxA= +github.com/tmccombs/hcl2json v0.6.7/go.mod h1:lJgBOOGDpbhjvdG2dLaWsqB4KBzul2HytfDTS3H465o= +github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= +github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= @@ -298,21 +298,23 @@ github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7Jul github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zclconf/go-cty v1.15.1 h1:RgQYm4j2EvoBRXOPxhUvxPzRrGDo1eCOhHXuGfrj5S0= -github.com/zclconf/go-cty v1.15.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty v1.16.2 h1:LAJSwc3v81IRBZyUVQDUdZ7hs3SYs9jv0eZJDWHD/70= +github.com/zclconf/go-cty v1.16.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= -go.mongodb.org/mongo-driver v1.17.2 h1:gvZyk8352qSfzyZ2UMWcpDpMSGEr1eqE4T793SqyhzM= -go.mongodb.org/mongo-driver v1.17.2/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= -go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= -go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= -go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= -go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.mongodb.org/mongo-driver v1.17.3 h1:TQyXhnsWfWtgAhMtOgtYHMTkZIfBTpMTsMnd9ZBeHxQ= +go.mongodb.org/mongo-driver v1.17.3/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= +go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= +go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= -go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= -go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= +go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= +go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -324,8 +326,8 @@ golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98y golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= -golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= @@ -336,8 +338,8 @@ golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= -golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -363,8 +365,8 @@ golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= -golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= -golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= +golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= +golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -374,8 +376,8 @@ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= -golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -411,8 +413,8 @@ golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -427,8 +429,8 @@ golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= -golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= -golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= +golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= +golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -443,8 +445,8 @@ golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= -golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -459,8 +461,8 @@ golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= -golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= -golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= +golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU= +golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/tools/image-builder/locals.tf b/tools/image-builder/locals.tf index 5c9dbe7b..431fc4ab 100644 --- a/tools/image-builder/locals.tf +++ b/tools/image-builder/locals.tf @@ -8,8 +8,8 @@ locals { vpc_id = var.vpc_name == null ? module.landing_zone.vpc_data[0].vpc_id : data.ibm_is_vpc.existing_vpc[0].id # Resource group calculation # If user defined then use existing else create new - create_resource_group = var.resource_group == "null" ? true : false - resource_groups = var.resource_group == "null" ? [ + create_resource_group = var.existing_resource_group == "null" ? true : false + resource_groups = var.existing_resource_group == "null" ? [ { name = "${local.prefix}-service-rg", create = local.create_resource_group, @@ -22,12 +22,12 @@ locals { } ] : [ { - name = var.resource_group, + name = var.existing_resource_group, create = local.create_resource_group } ] # For the variables looking for resource group names only (transit_gateway, key_management, atracker) - resource_group = var.resource_group == "null" ? "${local.prefix}-service-rg" : var.resource_group + resource_group = var.existing_resource_group == "null" ? "${local.prefix}-service-rg" : var.existing_resource_group region = join("-", slice(split("-", var.zones[0]), 0, 2)) zones = ["zone-1", "zone-2", "zone-3"] active_zones = [ @@ -93,7 +93,7 @@ locals { } ] : null prefix = local.name - resource_group = var.resource_group == "null" ? "${local.prefix}-workload-rg" : var.resource_group + resource_group = var.existing_resource_group == "null" ? "${local.prefix}-workload-rg" : var.existing_resource_group clean_default_security_group = true clean_default_acl = true # flow_logs_bucket_name = var.enable_vpc_flow_logs ? "vpc-flow-logs-bucket" : null @@ -150,8 +150,8 @@ locals { packer_floating_ip = var.enable_fip ? local.packer_vsi_data[0]["floating_ip"] : null packer_resource_groups = { - service_rg = var.resource_group == "null" ? module.landing_zone.resource_group_data["${var.prefix}-service-rg"] : one(values(module.landing_zone.resource_group_data)) - workload_rg = var.resource_group == "null" ? module.landing_zone.resource_group_data["${var.prefix}-workload-rg"] : one(values(module.landing_zone.resource_group_data)) + service_rg = var.existing_resource_group == "null" ? module.landing_zone.resource_group_data["${var.prefix}-service-rg"] : one(values(module.landing_zone.resource_group_data)) + workload_rg = var.existing_resource_group == "null" ? module.landing_zone.resource_group_data["${var.prefix}-workload-rg"] : one(values(module.landing_zone.resource_group_data)) } vsi = [] @@ -163,7 +163,7 @@ locals { vpc_name = local.name subnet_name = (var.vpc_name != null && var.subnet_id != null) ? data.ibm_is_subnet.existing_subnet[0].name : "subnet" mode = "policy" - resource_group = var.resource_group == "null" ? "${local.prefix}-service-rg" : var.resource_group + resource_group = var.existing_resource_group == "null" ? "${local.prefix}-service-rg" : var.existing_resource_group } ] : [] diff --git a/tools/image-builder/main.tf b/tools/image-builder/main.tf index 3fa09955..b0c5f677 100644 --- a/tools/image-builder/main.tf +++ b/tools/image-builder/main.tf @@ -1,6 +1,6 @@ module "landing_zone" { source = "terraform-ibm-modules/landing-zone/ibm" - version = "6.6.3" + version = "7.4.3" prefix = local.prefix region = local.region tags = local.tags @@ -48,11 +48,15 @@ resource "ibm_is_subnet_public_gateway_attachment" "zone_1_attachment" { resource "null_resource" "compress_and_encode_folder" { provisioner "local-exec" { command = < ./packer/hpcaas/encoded_compute.txt + fi EOT } } @@ -75,7 +79,7 @@ data "local_file" "encoded_compute_content" { module "packer_vsi" { source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "4.5.0" + version = "5.0.0" vsi_per_subnet = 1 image_id = local.packer_image_id machine_type = local.packer_machine_type diff --git a/tools/image-builder/template_files.tf b/tools/image-builder/template_files.tf index d3cf7a19..c02639f4 100644 --- a/tools/image-builder/template_files.tf +++ b/tools/image-builder/template_files.tf @@ -13,11 +13,11 @@ data "template_file" "packer_user_data" { encoded_compute = data.local_file.encoded_compute_content.content target_dir = "/var" prefix = var.prefix - cluster_id = var.cluster_id + cluster_name = var.cluster_name reservation_id = var.reservation_id catalog_validate_ssh_key = var.ssh_keys[0] zones = join(",", var.zones) - resource_group = var.resource_group + existing_resource_group = var.existing_resource_group private_catalog_id = var.private_catalog_id solution = var.solution } diff --git a/tools/image-builder/templates/packer_user_data.tpl b/tools/image-builder/templates/packer_user_data.tpl index d6ae82b4..c2efae84 100644 --- a/tools/image-builder/templates/packer_user_data.tpl +++ b/tools/image-builder/templates/packer_user_data.tpl @@ -87,14 +87,14 @@ echo "====================== Cos Bucket mounting completed ===================== cd /var/packer/hpcaas/compute sudo -E packer init . && sudo -E packer build \ - -var "ibm_api_key=${ibm_api_key}" \ - -var "vpc_region=${vpc_region}" \ - -var "resource_group_id=${resource_group_id}" \ - -var "vpc_subnet_id=${vpc_subnet_id}" \ - -var "source_image_name=${source_image_name}" \ - -var "install_sysdig=${install_sysdig}" \ - -var "security_group_id=${security_group_id}" \ - -var "image_name=${image_name}" . + -var "ibm_api_key=${ibm_api_key}" \ + -var "vpc_region=${vpc_region}" \ + -var "resource_group_id=${resource_group_id}" \ + -var "vpc_subnet_id=${vpc_subnet_id}" \ + -var "source_image_name=${source_image_name}" \ + -var "install_sysdig=${install_sysdig}" \ + -var "security_group_id=${security_group_id}" \ + -var "image_name=${image_name}" . echo "========== Generating SSH key =========" mkdir -p /HPCaaS/artifacts/.ssh @@ -110,7 +110,7 @@ curl -fsSL https://clis.cloud.ibm.com/install/linux | sh ibmcloud plugin install infrastructure-service ibmcloud login --apikey ${ibm_api_key} -r ${vpc_region} echo "========== Uploading SSH key to IBM cloud =========" -ibmcloud is key-create $CICD_SSH_KEY @/HPCaaS/artifacts/.ssh/id_rsa.pub --resource-group-name ${resource_group} +ibmcloud is key-create $CICD_SSH_KEY @/HPCaaS/artifacts/.ssh/id_rsa.pub --resource-group-name ${existing_resource_group} cd /HPCaaS/terraform-ibm-hpc/tools/tests git submodule update --init @@ -132,15 +132,15 @@ export TF_VAR_ibmcloud_api_key=${ibm_api_key} if [ "${solution}" != "lsf" ]; then if [ "${private_catalog_id}" ]; then - SOLUTION=${solution} PREFIX=${prefix} CLUSTER_ID=${cluster_id} RESERVATION_ID=${reservation_id} SSH_FILE_PATH="/HPCaaS/artifacts/.ssh/id_rsa" REMOTE_ALLOWED_IPS=$PACKER_FIP SSH_KEYS=$CICD_SSH_KEY CATALOG_VALIDATE_SSH_KEY=${catalog_validate_ssh_key} ZONES=${zones} RESOURCE_GROUP=${resource_group} COMPUTE_IMAGE_NAME=${image_name} PRIVATE_CATALOG_ID=${private_catalog_id} VPC_ID=${vpc_id} SUBNET_ID=${vpc_subnet_id} SOURCE_IMAGE_NAME=${source_image_name} go test -v -timeout 900m -parallel 4 -run "TestRunHpcDeploymentForCustomImageBuilder" | tee hpc_log_$(date +%d-%m-%Y-%H-%M-%S).log + SOLUTION=${solution} PREFIX=${prefix} CLUSTER_NAME=${cluster_name} RESERVATION_ID=${reservation_id} SSH_FILE_PATH="/HPCaaS/artifacts/.ssh/id_rsa" REMOTE_ALLOWED_IPS=$PACKER_FIP SSH_KEYS=$CICD_SSH_KEY CATALOG_VALIDATE_SSH_KEY=${catalog_validate_ssh_key} ZONES=${zones} EXISTING_RESOURCE_GROUP=${existing_resource_group} COMPUTE_IMAGE_NAME=${image_name} PRIVATE_CATALOG_ID=${private_catalog_id} VPC_ID=${vpc_id} SUBNET_ID=${vpc_subnet_id} SOURCE_IMAGE_NAME=${source_image_name} go test -v -timeout 900m -parallel 4 -run "TestRunHpcDeploymentForCustomImageBuilder" | tee hpc_log_$(date +%d-%m-%Y-%H-%M-%S).log else - SOLUTION=${solution} PREFIX=${prefix} CLUSTER_ID=${cluster_id} RESERVATION_ID=${reservation_id} SSH_FILE_PATH="/HPCaaS/artifacts/.ssh/id_rsa" REMOTE_ALLOWED_IPS=$PACKER_FIP SSH_KEYS=$CICD_SSH_KEY ZONES=${zones} RESOURCE_GROUP=${resource_group} COMPUTE_IMAGE_NAME=${image_name} SOURCE_IMAGE_NAME=${source_image_name} go test -v -timeout 900m -parallel 4 -run "TestRunHpcDeploymentForCustomImageBuilder" | tee hpc_log_$(date +%d-%m-%Y-%H-%M-%S).log + SOLUTION=${solution} PREFIX=${prefix} CLUSTER_NAME=${cluster_name} RESERVATION_ID=${reservation_id} SSH_FILE_PATH="/HPCaaS/artifacts/.ssh/id_rsa" REMOTE_ALLOWED_IPS=$PACKER_FIP SSH_KEYS=$CICD_SSH_KEY ZONES=${zones} EXISTING_RESOURCE_GROUP=${existing_resource_group} COMPUTE_IMAGE_NAME=${image_name} SOURCE_IMAGE_NAME=${source_image_name} go test -v -timeout 900m -parallel 4 -run "TestRunHpcDeploymentForCustomImageBuilder" | tee hpc_log_$(date +%d-%m-%Y-%H-%M-%S).log fi else if [ "${private_catalog_id}" ]; then - SOLUTION=${solution} PREFIX=${prefix} CLUSTER_ID=${cluster_id} SSH_FILE_PATH="/HPCaaS/artifacts/.ssh/id_rsa" REMOTE_ALLOWED_IPS=$PACKER_FIP SSH_KEYS=$CICD_SSH_KEY CATALOG_VALIDATE_SSH_KEY=${catalog_validate_ssh_key} ZONES=${zones} RESOURCE_GROUP=${resource_group} COMPUTE_IMAGE_NAME=${image_name} PRIVATE_CATALOG_ID=${private_catalog_id} VPC_ID=${vpc_id} SUBNET_ID=${vpc_subnet_id} SOURCE_IMAGE_NAME=${source_image_name} go test -v -timeout 900m -parallel 4 -run "TestRunHpcDeploymentForCustomImageBuilder" | tee hpc_log_$(date +%d-%m-%Y-%H-%M-%S).log + SOLUTION=${solution} PREFIX=${prefix} CLUSTER_NAME=${cluster_name} SSH_FILE_PATH="/HPCaaS/artifacts/.ssh/id_rsa" REMOTE_ALLOWED_IPS=$PACKER_FIP SSH_KEYS=$CICD_SSH_KEY CATALOG_VALIDATE_SSH_KEY=${catalog_validate_ssh_key} ZONES=${zones} EXISTING_RESOURCE_GROUP=${existing_resource_group} COMPUTE_IMAGE_NAME=${image_name} PRIVATE_CATALOG_ID=${private_catalog_id} VPC_ID=${vpc_id} SUBNET_ID=${vpc_subnet_id} SOURCE_IMAGE_NAME=${source_image_name} go test -v -timeout 900m -parallel 4 -run "TestRunHpcDeploymentForCustomImageBuilder" | tee hpc_log_$(date +%d-%m-%Y-%H-%M-%S).log else - SOLUTION=${solution} PREFIX=${prefix} CLUSTER_ID=${cluster_id} SSH_FILE_PATH="/HPCaaS/artifacts/.ssh/id_rsa" REMOTE_ALLOWED_IPS=$PACKER_FIP SSH_KEYS=$CICD_SSH_KEY ZONES=${zones} RESOURCE_GROUP=${resource_group} COMPUTE_IMAGE_NAME=${image_name} SOURCE_IMAGE_NAME=${source_image_name} go test -v -timeout 900m -parallel 4 -run "TestRunHpcDeploymentForCustomImageBuilder" | tee hpc_log_$(date +%d-%m-%Y-%H-%M-%S).log + SOLUTION=${solution} PREFIX=${prefix} CLUSTER_NAME=${cluster_name} SSH_FILE_PATH="/HPCaaS/artifacts/.ssh/id_rsa" REMOTE_ALLOWED_IPS=$PACKER_FIP SSH_KEYS=$CICD_SSH_KEY ZONES=${zones} EXISTING_RESOURCE_GROUP=${existing_resource_group} COMPUTE_IMAGE_NAME=${image_name} SOURCE_IMAGE_NAME=${source_image_name} go test -v -timeout 900m -parallel 4 -run "TestRunHpcDeploymentForCustomImageBuilder" | tee hpc_log_$(date +%d-%m-%Y-%H-%M-%S).log fi fi echo "========== Deleting the SSH key =========" diff --git a/tools/image-builder/variables.tf b/tools/image-builder/variables.tf index d0029c4b..46063d04 100644 --- a/tools/image-builder/variables.tf +++ b/tools/image-builder/variables.tf @@ -16,12 +16,12 @@ variable "ibmcloud_api_key" { # Resource Groups Variables ############################################################################## -variable "resource_group" { +variable "existing_resource_group" { description = "Specify the existing resource group name from your IBM Cloud account where the VPC resources should be deployed. By default, the resource group name is set to 'Default.' Note that in some older accounts, the resource group name may be 'default,' so please validate the resource_group name before deployment. If the resource group value is set to the string \"null\", the automation will create two different resource groups named 'workload-rg' and 'service-rg.' For more information on resource groups, refer to Managing resource groups." type = string default = "Default" validation { - condition = var.resource_group != null + condition = var.existing_resource_group != null error_message = "If you want to provide null for resource_group variable, it should be within double quotes." } } @@ -188,11 +188,11 @@ variable "enable_fip" { } # tflint-ignore: terraform_unused_declarations -variable "cluster_id" { +variable "cluster_name" { type = string description = "Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservations. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment." validation { - condition = 0 < length(var.cluster_id) && length(var.cluster_id) < 40 && can(regex("^[a-zA-Z0-9_.-]+$", var.cluster_id)) + condition = 0 < length(var.cluster_name) && length(var.cluster_name) < 40 && can(regex("^[a-zA-Z0-9_.-]+$", var.cluster_name)) error_message = "The Cluster ID can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. Other special characters and spaces are not allowed." } } diff --git a/tools/image-builder/version.tf b/tools/image-builder/version.tf index 51d06fd5..4d87918b 100644 --- a/tools/image-builder/version.tf +++ b/tools/image-builder/version.tf @@ -3,7 +3,7 @@ terraform { required_providers { ibm = { source = "IBM-Cloud/ibm" - version = "1.71.0" + version = "1.77.0" } null = { source = "hashicorp/null" From 98132b009be0fc4e3bd97cb2f5ad8aae9b6b10b2 Mon Sep 17 00:00:00 2001 From: Nupur Goyal Date: Sat, 12 Apr 2025 00:37:15 +0530 Subject: [PATCH 2/2] fixing pre-commit --- common-dev-assets | 2 +- tests/go.mod | 2 +- tests/go.sum | 4 +- tests/lsf/cluster_helpers.go | 49 ++++- tests/lsf/cluster_utils.go | 71 +++++-- tests/lsf/cluster_validation.go | 358 +++++++++++++++++++++++++++----- tests/other_test.go | 20 +- tests/utilities/api_utils.go | 8 +- tests/utilities/deployment.go | 69 ++++-- tests/utilities/helpers.go | 8 +- tests/utilities/report.go | 21 +- tests/utilities/ssh.go | 8 +- 12 files changed, 508 insertions(+), 112 deletions(-) diff --git a/common-dev-assets b/common-dev-assets index 8c7a97cb..2a2281ec 160000 --- a/common-dev-assets +++ b/common-dev-assets @@ -1 +1 @@ -Subproject commit 8c7a97cb00b128503d2c81380be904b6d196cc02 +Subproject commit 2a2281eca386901262a1d0c7b617dc07476d5944 diff --git a/tests/go.mod b/tests/go.mod index af6420fa..8c4147ab 100644 --- a/tests/go.mod +++ b/tests/go.mod @@ -16,7 +16,7 @@ require ( require ( dario.cat/mergo v1.0.1 // indirect - github.com/IBM-Cloud/bluemix-go v0.0.0-20250409011132-bdd4531aaa04 // indirect + github.com/IBM-Cloud/bluemix-go v0.0.0-20240719075425-078fcb3a55be // indirect github.com/IBM-Cloud/power-go-client v1.11.0 // indirect github.com/IBM/cloud-databases-go-sdk v0.7.1 // indirect github.com/IBM/platform-services-go-sdk v0.79.0 // indirect diff --git a/tests/go.sum b/tests/go.sum index 25fbde2a..93a6e0f3 100644 --- a/tests/go.sum +++ b/tests/go.sum @@ -1,7 +1,7 @@ dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= -github.com/IBM-Cloud/bluemix-go v0.0.0-20250409011132-bdd4531aaa04 h1:euG2yKR4snk8zIa5BeUKBWcIj4o1SBqX+k7CtIab6Y8= -github.com/IBM-Cloud/bluemix-go v0.0.0-20250409011132-bdd4531aaa04/go.mod h1:/7hMjdZA6fEpd/dQAOEABxKEwN0t72P3PlpEDu0Y7bE= +github.com/IBM-Cloud/bluemix-go v0.0.0-20240719075425-078fcb3a55be h1:USOcBHkYQ4o/ccoEvoHinrba8NQthLJpFXnAoBY+MI4= +github.com/IBM-Cloud/bluemix-go v0.0.0-20240719075425-078fcb3a55be/go.mod h1:/7hMjdZA6fEpd/dQAOEABxKEwN0t72P3PlpEDu0Y7bE= github.com/IBM-Cloud/power-go-client v1.11.0 h1:4xlYXF2+S3s6Crb0D2+d5c1kb6gUE7eowMXLB7Q6cWY= github.com/IBM-Cloud/power-go-client v1.11.0/go.mod h1:UDyXeIKEp6r7yWUXYu3r0ZnFSlNZ2YeQTHwM2Tmlgv0= github.com/IBM/cloud-databases-go-sdk v0.7.1 h1:5kK4/3NUsGxZzmuUe+1ftajpOQbeDVh5VeemrPgROP4= diff --git a/tests/lsf/cluster_helpers.go b/tests/lsf/cluster_helpers.go index aded2159..bb2d0754 100644 --- a/tests/lsf/cluster_helpers.go +++ b/tests/lsf/cluster_helpers.go @@ -135,7 +135,11 @@ func RebootInstance(t *testing.T, sshMgmtClient *ssh.Client, publicHostIP, publi bhostRespErr := LSFCheckBhostsResponse(t, sshClient, logger) utils.LogVerificationResult(t, bhostRespErr, "bhosts response non-empty", logger) - defer sshClient.Close() + defer func() { + if err := sshClient.Close(); err != nil { + logger.Info(t, fmt.Sprintf("failed to close sshClient: %v", err)) + } + }() } @@ -328,7 +332,12 @@ func VerifyManagementNodeLDAPConfig( utils.LogVerificationResult(t, err, "Connection to management node via SSH as LDAP User failed", logger) return } - defer sshLdapClient.Close() + + defer func() { + if err := sshLdapClient.Close(); err != nil { + logger.Info(t, fmt.Sprintf("failed to close sshLdapClient: %v", err)) + } + }() // Check file mount if err := HPCCheckFileMountAsLDAPUser(t, sshLdapClient, "management", logger); err != nil { @@ -353,7 +362,13 @@ func VerifyManagementNodeLDAPConfig( continue } logger.Info(t, fmt.Sprintf("Connected to management node %s via SSH as LDAP user", ip)) - sshLdapClientUser.Close() // Close connection immediately after usage + // Close connection immediately after usage + defer func() { + if err := sshLdapClientUser.Close(); err != nil { + logger.Info(t, fmt.Sprintf("failed to close sshLdapClientUser: %v", err)) + } + }() + } } @@ -384,7 +399,12 @@ func VerifyLoginNodeLDAPConfig( utils.LogVerificationResult(t, err, "Connection to login node via SSH as LDAP User failed", logger) return } - defer sshLdapClient.Close() + + defer func() { + if err := sshLdapClient.Close(); err != nil { + logger.Info(t, fmt.Sprintf("failed to close sshLdapClient: %v", err)) + } + }() // Check file mount if err := HPCCheckFileMountAsLDAPUser(t, sshLdapClient, "login", logger); err != nil { @@ -424,7 +444,12 @@ func VerifyComputeNodeLDAPConfig( utils.LogVerificationResult(t, connectionErr, "connect to the compute node via SSH as LDAP User failed", logger) return } - defer sshLdapClient.Close() + + defer func() { + if err := sshLdapClient.Close(); err != nil { + logger.Info(t, fmt.Sprintf("failed to close sshLdapClient: %v", err)) + } + }() // Verify LDAP configuration ldapErr := VerifyLDAPConfig(t, sshLdapClient, "compute", ldapServerIP, ldapDomainName, ldapUserName, logger) @@ -445,7 +470,12 @@ func VerifyComputeNodeLDAPConfig( logger.Info(t, fmt.Sprintf("connect to the compute node %s via SSH as LDAP User", computeNodeIPList[i])) } utils.LogVerificationResult(t, connectionErr, "connect to the compute node via SSH as LDAP User", logger) - defer sshLdapClientUser.Close() + + defer func() { + if err := sshLdapClientUser.Close(); err != nil { + logger.Info(t, fmt.Sprintf("failed to close sshLdapClientUser: %v", err)) + } + }() } } @@ -541,7 +571,12 @@ func VerifyCreateNewLdapUserAndManagementNodeLDAPConfig( utils.LogVerificationResult(t, err, "connect to the management node via SSH as the new LDAP user", logger) return } - defer sshLdapClientUser.Close() + + defer func() { + if err := sshLdapClientUser.Close(); err != nil { + logger.Info(t, fmt.Sprintf("failed to close sshLdapClientUser: %v", err)) + } + }() // Run job as the new LDAP user if err := LSFRunJobsAsLDAPUser(t, sshLdapClientUser, jobCommand, newLdapUserName, logger); err != nil { diff --git a/tests/lsf/cluster_utils.go b/tests/lsf/cluster_utils.go index b412fd90..98e71a4f 100644 --- a/tests/lsf/cluster_utils.go +++ b/tests/lsf/cluster_utils.go @@ -209,7 +209,7 @@ func LSFRestartDaemons(t *testing.T, sClient *ssh.Client, logger *utils.Aggregat time.Sleep(defaultSleepDuration) // Check if the restart was successful - if !(utils.VerifyDataContains(t, string(out), "Stopping", logger) && utils.VerifyDataContains(t, string(out), "Starting", logger)) { + if !utils.VerifyDataContains(t, string(out), "Stopping", logger) || !utils.VerifyDataContains(t, string(out), "Starting", logger) { return fmt.Errorf("lsf_daemons restart failed") } @@ -683,7 +683,7 @@ func LSFDaemonsStatus(t *testing.T, sClient *ssh.Client, logger *utils.Aggregate for scanner.Scan() { line := scanner.Text() if utils.VerifyDataContains(t, line, "pid", logger) { - if !(utils.VerifyDataContains(t, line, processes[i], logger) && utils.VerifyDataContains(t, line, expectedStatus, logger)) { + if !utils.VerifyDataContains(t, line, processes[i], logger) || !utils.VerifyDataContains(t, line, expectedStatus, logger) { return fmt.Errorf("%s is not running", processes[i]) } i++ @@ -801,7 +801,12 @@ func LSFCheckSSHKeyForManagementNodes(t *testing.T, publicHostName, publicHostIP if err != nil { return fmt.Errorf("failed to connect to the management node %s via SSH: %w", mgmtIP, err) } - defer mgmtSshClient.Close() + + defer func() { + if err := mgmtSshClient.Close(); err != nil { + logger.Info(t, fmt.Sprintf("failed to close mgmtSshClient: %v", err)) + } + }() logger.Info(t, fmt.Sprintf("SSH connection to the management node %s successful", mgmtIP)) @@ -1732,7 +1737,12 @@ func verifyPTRRecords(t *testing.T, sClient *ssh.Client, publicHostName, publicH if connectionErr != nil { return fmt.Errorf("failed to connect to the management node %s via SSH: %v", mgmtIP, connectionErr) } - defer mgmtSshClient.Close() + + defer func() { + if err := mgmtSshClient.Close(); err != nil { + logger.Info(t, fmt.Sprintf("failed to close mgmtSshClient: %v", err)) + } + }() // Verify PTR records on management node if err := verifyPTR(mgmtSshClient, fmt.Sprintf("management node %s", mgmtIP)); err != nil { @@ -1747,7 +1757,12 @@ func verifyPTRRecords(t *testing.T, sClient *ssh.Client, publicHostName, publicH if connectionErr != nil { return fmt.Errorf("failed to connect to the login node %s via SSH: %v", loginNodeIP, connectionErr) } - defer loginSshClient.Close() + + defer func() { + if err := loginSshClient.Close(); err != nil { + logger.Info(t, fmt.Sprintf("failed to close loginSshClient: %v", err)) + } + }() // Verify PTR records on login node if err := verifyPTR(loginSshClient, fmt.Sprintf("login node %s", loginNodeIP)); err != nil { @@ -2135,14 +2150,21 @@ func CheckSSSDServiceStatus(t *testing.T, sClient *ssh.Client, logger *utils.Agg // GetLDAPServerCert retrieves the LDAP server certificate by connecting to the LDAP server via SSH. // It requires the public host name, bastion IP, LDAP host name, and LDAP server IP as inputs. -// Returns the certificate as a string if successful or an error otherwise. +// Returns the certificate as a string if successful, or an error otherwise. func GetLDAPServerCert(publicHostName, bastionIP, ldapHostName, ldapServerIP string) (string, error) { // Establish SSH connection to LDAP server via bastion host sshClient, connectionErr := utils.ConnectToHost(publicHostName, bastionIP, ldapHostName, ldapServerIP) if connectionErr != nil { return "", fmt.Errorf("failed to connect to LDAP server via SSH: %w", connectionErr) } - defer sshClient.Close() + + // Ensure SSH client is closed, log any close errors + defer func() { + if err := sshClient.Close(); err != nil { + // Log the error instead of returning + fmt.Printf("warning: failed to close sshClient: %v\n", err) + } + }() // Command to retrieve LDAP server certificate const ldapServerCertCmd = `cat /etc/ssl/certs/ldap_cacert.pem` @@ -2153,7 +2175,6 @@ func GetLDAPServerCert(publicHostName, bastionIP, ldapHostName, ldapServerIP str return "", fmt.Errorf("failed to execute command '%s' via SSH: %w", ldapServerCertCmd, execErr) } - // Return the retrieved certificate return ldapServerCert, nil } @@ -2343,7 +2364,8 @@ func validateNodeLogFiles(t *testing.T, sClient *ssh.Client, node, sharedLogDir, } var logFiles []string - if nodeType == "management" { + switch nodeType { + case "management": logFiles = []string{ fmt.Sprintf("%s/sbatchd.log.%s", dirPath, node), fmt.Sprintf("%s/lim.log.%s", dirPath, node), @@ -2351,7 +2373,7 @@ func validateNodeLogFiles(t *testing.T, sClient *ssh.Client, node, sharedLogDir, fmt.Sprintf("%s/pim.log.%s", dirPath, node), fmt.Sprintf("%s/Install.log", dirPath), } - } else if nodeType == "master" { + case "master": logFiles = []string{ fmt.Sprintf("%s/mbatchd.log.%s", dirPath, node), fmt.Sprintf("%s/ebrokerd.log.%s", dirPath, node), @@ -2471,7 +2493,12 @@ func LogFilesAfterMasterReboot(t *testing.T, sClient *ssh.Client, bastionIP, man logger.Error(t, fmt.Sprintf("Failed to reconnect to the master via SSH after Management node Reboot: %s", connectionErr)) return fmt.Errorf("failed to reconnect to the master via SSH after Management node Reboot : %s", connectionErr) } - defer sClient.Close() + + defer func() { + if err := sClient.Close(); err != nil { + logger.Info(t, fmt.Sprintf("failed to close sClient: %v", err)) + } + }() // Validate the log files after reboot for _, node := range managementNodes { @@ -2535,7 +2562,12 @@ func LogFilesAfterMasterShutdown(t *testing.T, sshClient *ssh.Client, apiKey, re logger.Error(t, errorMessage) return fmt.Errorf("%s", errorMessage) } - defer sshClient.Close() + + defer func() { + if err := sshClient.Close(); err != nil { + logger.Info(t, fmt.Sprintf("failed to close sshClient: %v", err)) + } + }() // Retrieve the new master node name after shutdown newMasterNodeName, err := utils.GetMasterNodeName(t, sshClient, logger) @@ -2609,7 +2641,12 @@ func LogFilesAfterMasterShutdown(t *testing.T, sshClient *ssh.Client, apiKey, re logger.Error(t, errorMessage) return fmt.Errorf("%s", errorMessage) } - defer sshClient.Close() + + defer func() { + if err := sshClient.Close(); err != nil { + logger.Info(t, fmt.Sprintf("failed to close sshClient: %v", err)) + } + }() logger.Info(t, "Successfully switched back to the original master node after instance start") return nil @@ -2872,8 +2909,7 @@ func CheckAppCenterSetup(t *testing.T, sshClient *ssh.Client, logger *utils.Aggr } // Check for required configuration statuses in the output - if !(utils.VerifyDataContains(t, commandOutput, webguiStatus, logger) && - utils.VerifyDataContains(t, commandOutput, pncStatus, logger)) { + if !utils.VerifyDataContains(t, commandOutput, webguiStatus, logger) || !utils.VerifyDataContains(t, commandOutput, pncStatus, logger) { return fmt.Errorf("APP Center GUI or PNC configuration mismatch: %s", commandOutput) } @@ -3761,9 +3797,10 @@ func ValidateAtrackerRouteTarget(t *testing.T, apiKey, region, resourceGroup, cl // Normalize targetType before validation expectedTargetType := targetType - if targetType == "cloudlogs" { + switch targetType { + case "cloudlogs": expectedTargetType = "cloud_logs" - } else if targetType == "cos" { + case "cos": expectedTargetType = "cloud_object_storage" } diff --git a/tests/lsf/cluster_validation.go b/tests/lsf/cluster_validation.go index c2d7aff7..87966088 100644 --- a/tests/lsf/cluster_validation.go +++ b/tests/lsf/cluster_validation.go @@ -56,7 +56,12 @@ func ValidateClusterConfiguration(t *testing.T, options *testhelper.TestOptions, // Connect to the master node via SSH and handle connection errors sshClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIPList[0]) require.NoError(t, connectionErr, "Failed to connect to the master via SSH") - defer sshClient.Close() + + defer func() { + if err := sshClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshClient: %v", err)) + } + }() testLogger.Info(t, "SSH connection to the master successful") t.Log("Validation in progress. Please wait...") @@ -82,7 +87,12 @@ func ValidateClusterConfiguration(t *testing.T, options *testhelper.TestOptions, // Reconnect to the management node after reboot sshClient, connectionErr = utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIPList[0]) require.NoError(t, connectionErr, "Failed to reconnect to the master via SSH") - defer sshClient.Close() + + defer func() { + if err := sshClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshClient: %v", err)) + } + }() // Wait for dynamic node disappearance and handle potential errors defer func() { @@ -112,7 +122,12 @@ func ValidateClusterConfiguration(t *testing.T, options *testhelper.TestOptions, // Verify SSH connectivity from login node and handle connection errors sshLoginNodeClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, loginNodeIP) require.NoError(t, connectionErr, "Failed to connect to the login node via SSH") - defer sshLoginNodeClient.Close() + + defer func() { + if err := sshLoginNodeClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshLoginNodeClient: %v", err)) + } + }() // Verify login node configuration VerifyLoginNodeConfig(t, sshLoginNodeClient, expectedClusterName, expectedMasterName, expectedReservationID, expectedHyperthreadingEnabled, loginNodeIP, jobCommandLow, EXPECTED_LSF_VERSION, testLogger) @@ -181,7 +196,12 @@ func ValidateClusterConfigurationWithAPPCenter(t *testing.T, options *testhelper // Connect to the master node via SSH and handle connection errors sshClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIPList[0]) require.NoError(t, connectionErr, "Failed to connect to the master via SSH") - defer sshClient.Close() + + defer func() { + if err := sshClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshClient: %v", err)) + } + }() testLogger.Info(t, "SSH connection to the master successful") t.Log("Validation in progress. Please wait...") @@ -213,7 +233,12 @@ func ValidateClusterConfigurationWithAPPCenter(t *testing.T, options *testhelper // Reconnect to the management node after reboot sshClient, connectionErr = utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIPList[0]) require.NoError(t, connectionErr, "Failed to reconnect to the master via SSH") - defer sshClient.Close() + + defer func() { + if err := sshClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshClient: %v", err)) + } + }() // Wait for dynamic node disappearance and handle potential errors defer func() { @@ -243,7 +268,12 @@ func ValidateClusterConfigurationWithAPPCenter(t *testing.T, options *testhelper // Verify SSH connectivity from login node and handle connection errors sshLoginNodeClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, loginNodeIP) require.NoError(t, connectionErr, "Failed to connect to the login node via SSH") - defer sshLoginNodeClient.Close() + + defer func() { + if err := sshLoginNodeClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshLoginNodeClient: %v", err)) + } + }() // Verify login node configuration VerifyLoginNodeConfig(t, sshLoginNodeClient, expectedClusterName, expectedMasterName, expectedReservationID, expectedHyperthreadingEnabled, loginNodeIP, jobCommandLow, EXPECTED_LSF_VERSION, testLogger) @@ -325,7 +355,12 @@ func ValidateClusterConfigurationWithPACHA(t *testing.T, options *testhelper.Tes // Connect to the master node via SSH and handle connection errors sshClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIPList[0]) require.NoError(t, connectionErr, "Failed to connect to the master via SSH") - defer sshClient.Close() + + defer func() { + if err := sshClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshClient: %v", err)) + } + }() testLogger.Info(t, "SSH connection to the master successful") t.Log("Validation in progress. Please wait...") @@ -360,7 +395,12 @@ func ValidateClusterConfigurationWithPACHA(t *testing.T, options *testhelper.Tes // Reconnect to the management node after reboot sshClient, connectionErr = utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIPList[0]) require.NoError(t, connectionErr, "Failed to reconnect to the master via SSH") - defer sshClient.Close() + + defer func() { + if err := sshClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshClient: %v", err)) + } + }() // Wait for dynamic node disappearance and handle potential errors defer func() { @@ -393,7 +433,12 @@ func ValidateClusterConfigurationWithPACHA(t *testing.T, options *testhelper.Tes // Verify SSH connectivity from login node and handle connection errors sshLoginNodeClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, loginNodeIP) require.NoError(t, connectionErr, "Failed to connect to the login node via SSH") - defer sshLoginNodeClient.Close() + + defer func() { + if err := sshLoginNodeClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshLoginNodeClient: %v", err)) + } + }() // Verify login node configuration VerifyLoginNodeConfig(t, sshLoginNodeClient, expectedClusterName, expectedMasterName, expectedReservationID, expectedHyperthreadingEnabled, loginNodeIP, jobCommandLow, EXPECTED_LSF_VERSION, testLogger) @@ -456,7 +501,12 @@ func ValidateBasicClusterConfiguration(t *testing.T, options *testhelper.TestOpt // Connect to the master node via SSH and handle connection errors sshClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIPList[0]) require.NoError(t, connectionErr, "Failed to connect to the master via SSH") - defer sshClient.Close() + + defer func() { + if err := sshClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshClient: %v", err)) + } + }() testLogger.Info(t, "SSH connection to the master successful") t.Log("Validation in progress. Please wait...") @@ -486,7 +536,12 @@ func ValidateBasicClusterConfiguration(t *testing.T, options *testhelper.TestOpt // Verify SSH connectivity from login node and handle connection errors sshLoginNodeClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, loginNodeIP) require.NoError(t, connectionErr, "Failed to connect to the login node via SSH") - defer sshLoginNodeClient.Close() + + defer func() { + if err := sshLoginNodeClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshLoginNodeClient: %v", err)) + } + }() // Verify login node configuration VerifyLoginNodeConfig(t, sshLoginNodeClient, expectedClusterName, expectedMasterName, expectedReservationID, expectedHyperthreadingEnabled, loginNodeIP, jobCommandLow, EXPECTED_LSF_VERSION, testLogger) @@ -545,7 +600,12 @@ func ValidateBasicClusterConfigurationWithDynamicProfile(t *testing.T, options * // Connect to the master node via SSH and handle connection errors sshClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIPList[0]) require.NoError(t, connectionErr, "Failed to connect to the master via SSH") - defer sshClient.Close() + + defer func() { + if err := sshClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshClient: %v", err)) + } + }() testLogger.Info(t, "SSH connection to the master successful") t.Log("Validation in progress. Please wait...") @@ -578,7 +638,12 @@ func ValidateBasicClusterConfigurationWithDynamicProfile(t *testing.T, options * // Verify SSH connectivity from login node and handle connection errors sshLoginNodeClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, loginNodeIP) require.NoError(t, connectionErr, "Failed to connect to the login node via SSH") - defer sshLoginNodeClient.Close() + + defer func() { + if err := sshLoginNodeClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshLoginNodeClient: %v", err)) + } + }() // Verify login node configuration VerifyLoginNodeConfig(t, sshLoginNodeClient, expectedClusterName, expectedMasterName, expectedReservationID, expectedHyperthreadingEnabled, loginNodeIP, jobCommandLow, EXPECTED_LSF_VERSION, testLogger) @@ -636,7 +701,12 @@ func ValidateLDAPClusterConfiguration(t *testing.T, options *testhelper.TestOpti // Connect to the master node via SSH and handle connection errors sshClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIPList[0]) require.NoError(t, connectionErr, "Failed to connect to the master via SSH") - defer sshClient.Close() + + defer func() { + if err := sshClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshClient: %v", err)) + } + }() testLogger.Info(t, "SSH connection to the master successful") t.Log("Validation in progress. Please wait...") @@ -662,7 +732,12 @@ func ValidateLDAPClusterConfiguration(t *testing.T, options *testhelper.TestOpti // Reconnect to the management node after reboot sshClient, connectionErr = utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIPList[0]) require.NoError(t, connectionErr, "Failed to reconnect to the master via SSH") - defer sshClient.Close() + + defer func() { + if err := sshClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshClient: %v", err)) + } + }() // Wait for dynamic node disappearance and handle potential errors defer func() { @@ -692,7 +767,12 @@ func ValidateLDAPClusterConfiguration(t *testing.T, options *testhelper.TestOpti // Verify SSH connectivity from login node and handle connection errors sshLoginNodeClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, loginNodeIP) require.NoError(t, connectionErr, "Failed to connect to the login node via SSH") - defer sshLoginNodeClient.Close() + + defer func() { + if err := sshLoginNodeClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshLoginNodeClient: %v", err)) + } + }() // Verify login node configuration VerifyLoginNodeConfig(t, sshLoginNodeClient, expectedClusterName, expectedMasterName, expectedReservationID, expectedHyperthreadingEnabled, loginNodeIP, jobCommandLow, EXPECTED_LSF_VERSION, testLogger) @@ -706,7 +786,12 @@ func ValidateLDAPClusterConfiguration(t *testing.T, options *testhelper.TestOpti // Connect to the LDAP server via SSH and handle connection errors sshLdapClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_LDAP_HOST_NAME, ldapServerIP) require.NoError(t, connectionErr, "Failed to connect to the LDAP server via SSH") - defer sshLdapClient.Close() + + defer func() { + if err := sshLdapClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshLdapClient: %v", err)) + } + }() // Check LDAP server status CheckLDAPServerStatus(t, sshLdapClient, ldapAdminPassword, expectedLdapDomain, ldapUserName, testLogger) @@ -770,7 +855,12 @@ func ValidatePACANDLDAPClusterConfiguration(t *testing.T, options *testhelper.Te // Connect to the master node via SSH and handle connection errors sshClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIPList[0]) require.NoError(t, connectionErr, "Failed to connect to the master via SSH") - defer sshClient.Close() + + defer func() { + if err := sshClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshClient: %v", err)) + } + }() testLogger.Info(t, "SSH connection to the master successful") t.Log("Validation in progress. Please wait...") @@ -802,7 +892,12 @@ func ValidatePACANDLDAPClusterConfiguration(t *testing.T, options *testhelper.Te // Reconnect to the management node after reboot sshClient, connectionErr = utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIPList[0]) require.NoError(t, connectionErr, "Failed to reconnect to the master via SSH") - defer sshClient.Close() + + defer func() { + if err := sshClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshClient: %v", err)) + } + }() // Wait for dynamic node disappearance and handle potential errors defer func() { @@ -832,7 +927,12 @@ func ValidatePACANDLDAPClusterConfiguration(t *testing.T, options *testhelper.Te // Verify SSH connectivity from login node and handle connection errors sshLoginNodeClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, loginNodeIP) require.NoError(t, connectionErr, "Failed to connect to the login node via SSH") - defer sshLoginNodeClient.Close() + + defer func() { + if err := sshLoginNodeClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshLoginNodeClient: %v", err)) + } + }() // Verify login node configuration VerifyLoginNodeConfig(t, sshLoginNodeClient, expectedClusterName, expectedMasterName, expectedReservationID, expectedHyperthreadingEnabled, loginNodeIP, jobCommandLow, EXPECTED_LSF_VERSION, testLogger) @@ -846,7 +946,12 @@ func ValidatePACANDLDAPClusterConfiguration(t *testing.T, options *testhelper.Te // Connect to the LDAP server via SSH and handle connection errors sshLdapClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_LDAP_HOST_NAME, ldapServerIP) require.NoError(t, connectionErr, "Failed to connect to the LDAP server via SSH") - defer sshLdapClient.Close() + + defer func() { + if err := sshLdapClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshLdapClient: %v", err)) + } + }() // Check LDAP server status CheckLDAPServerStatus(t, sshLdapClient, ldapAdminPassword, expectedLdapDomain, ldapUserName, testLogger) @@ -912,7 +1017,12 @@ func ValidateBasicClusterConfigurationWithVPCFlowLogsAndCos(t *testing.T, option // Connect to the master node via SSH and handle connection errors sshClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIPList[0]) require.NoError(t, connectionErr, "Failed to connect to the master via SSH") - defer sshClient.Close() + + defer func() { + if err := sshClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshClient: %v", err)) + } + }() testLogger.Info(t, "SSH connection to the master successful") t.Log("Validation in progress. Please wait...") @@ -942,7 +1052,12 @@ func ValidateBasicClusterConfigurationWithVPCFlowLogsAndCos(t *testing.T, option // Verify SSH connectivity from login node and handle connection errors sshLoginNodeClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, loginNodeIP) require.NoError(t, connectionErr, "Failed to connect to the login node via SSH") - defer sshLoginNodeClient.Close() + + defer func() { + if err := sshLoginNodeClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshLoginNodeClient: %v", err)) + } + }() // Verify login node configuration VerifyLoginNodeConfig(t, sshLoginNodeClient, expectedClusterName, expectedMasterName, expectedReservationID, expectedHyperthreadingEnabled, loginNodeIP, jobCommandLow, EXPECTED_LSF_VERSION, testLogger) @@ -1004,8 +1119,18 @@ func ValidateClusterConfigurationWithMultipleKeys(t *testing.T, options *testhel sshClientOne, sshClientTwo, connectionErrOne, connectionErrTwo := utils.ConnectToHostsWithMultipleUsers(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIPList[0]) require.NoError(t, connectionErrOne, "Failed to connect to the master via SSH") require.NoError(t, connectionErrTwo, "Failed to connect to the master via SSH") - defer sshClientOne.Close() - defer sshClientTwo.Close() + + defer func() { + if err := sshClientOne.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshClientOne: %v", err)) + } + }() + + defer func() { + if err := sshClientTwo.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshClientTwo: %v", err)) + } + }() testLogger.Info(t, "SSH connection to the master successful") t.Log("Validation in progress. Please wait...") @@ -1029,7 +1154,12 @@ func ValidateClusterConfigurationWithMultipleKeys(t *testing.T, options *testhel // Reconnect to the management node after reboot sshClientOne, connectionErrOne = utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIPList[0]) require.NoError(t, connectionErrOne, "Failed to reconnect to the master via SSH: %v", connectionErrOne) - defer sshClientOne.Close() + + defer func() { + if err := sshClientOne.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshClientOne: %v", err)) + } + }() // Wait for dynamic node disappearance and handle potential errors defer func() { @@ -1059,7 +1189,12 @@ func ValidateClusterConfigurationWithMultipleKeys(t *testing.T, options *testhel // Verify SSH connectivity from login node sshLoginNodeClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, loginNodeIP) require.NoError(t, connectionErr, "Failed to connect to the login node via SSH") - defer sshLoginNodeClient.Close() + + defer func() { + if err := sshLoginNodeClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshLoginNodeClient: %v", err)) + } + }() // Verify login node configuration VerifyLoginNodeConfig(t, sshLoginNodeClient, expectedClusterName, expectedMasterName, expectedReservationID, expectedHyperthreadingEnabled, loginNodeIP, jobCommandLow, EXPECTED_LSF_VERSION, testLogger) @@ -1123,7 +1258,12 @@ func ValidateExistingLDAPClusterConfig(t *testing.T, ldapServerBastionIP, ldapSe // Connect to the master node via SSH and handle connection errors sshClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIPList[0]) require.NoError(t, connectionErr, "Failed to connect to the master via SSH") - defer sshClient.Close() + + defer func() { + if err := sshClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshClient: %v", err)) + } + }() testLogger.Info(t, "SSH connection to the master successful") t.Log("Validation in progress. Please wait...") @@ -1149,7 +1289,12 @@ func ValidateExistingLDAPClusterConfig(t *testing.T, ldapServerBastionIP, ldapSe // Reconnect to the management node after reboot sshClient, connectionErr = utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIPList[0]) require.NoError(t, connectionErr, "Failed to reconnect to the master via SSH") - defer sshClient.Close() + + defer func() { + if err := sshClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshClient: %v", err)) + } + }() // Wait for dynamic node disappearance and handle potential errors defer func() { @@ -1178,7 +1323,12 @@ func ValidateExistingLDAPClusterConfig(t *testing.T, ldapServerBastionIP, ldapSe // Verify SSH connectivity from login node and handle connection errors sshLoginNodeClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, loginNodeIP) require.NoError(t, connectionErr, "Failed to connect to the login node via SSH") - defer sshLoginNodeClient.Close() + + defer func() { + if err := sshLoginNodeClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshLoginNodeClient: %v", err)) + } + }() // Verify login node configuration VerifyLoginNodeConfig(t, sshLoginNodeClient, expectedClusterName, expectedMasterName, expectedReservationID, expectedHyperthreadingEnabled, loginNodeIP, jobCommandLow, EXPECTED_LSF_VERSION, testLogger) @@ -1195,7 +1345,12 @@ func ValidateExistingLDAPClusterConfig(t *testing.T, ldapServerBastionIP, ldapSe // Connect to the LDAP server via SSH sshLdapClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, ldapServerBastionIP, LSF_LDAP_HOST_NAME, ldapServerIP) require.NoError(t, connectionErr, "Failed to connect to the LDAP server via SSH") - defer sshLdapClient.Close() + + defer func() { + if err := sshLdapClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshLdapClient: %v", err)) + } + }() // Check LDAP server status CheckLDAPServerStatus(t, sshLdapClient, ldapAdminPassword, expectedLdapDomain, ldapUserName, testLogger) @@ -1256,7 +1411,12 @@ func ValidateBasicClusterConfigurationLSFLogs(t *testing.T, options *testhelper. // Establish SSH connection to master node sshClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIPList[0]) require.NoError(t, connectionErr, "Failed to connect to the master via SSH") - defer sshClient.Close() + + defer func() { + if err := sshClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshClient: %v", err)) + } + }() testLogger.Info(t, "SSH connection to the master successful") t.Log("Validation in progress. Please wait...") @@ -1270,7 +1430,12 @@ func ValidateBasicClusterConfigurationLSFLogs(t *testing.T, options *testhelper. // Reconnect to the master node via SSH after reboot sshClient, connectionErr = utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIPList[0]) require.NoError(t, connectionErr, "Failed to reconnect to the master via SSH") - defer sshClient.Close() + + defer func() { + if err := sshClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshClient: %v", err)) + } + }() // Wait for dynamic node disappearance defer func() { @@ -1294,7 +1459,12 @@ func ValidateBasicClusterConfigurationLSFLogs(t *testing.T, options *testhelper. // Verify SSH connectivity from login node sshLoginNodeClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, loginNodeIP) require.NoError(t, connectionErr, "Failed to connect to the login node via SSH") - defer sshLoginNodeClient.Close() + + defer func() { + if err := sshLoginNodeClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshLoginNodeClient: %v", err)) + } + }() // Validate login node configuration VerifyLoginNodeConfig(t, sshLoginNodeClient, expectedClusterName, expectedMasterName, expectedReservationID, expectedHyperthreadingEnabled, loginNodeIP, jobCommandLow, EXPECTED_LSF_VERSION, testLogger) @@ -1345,7 +1515,12 @@ func ValidateBasicClusterConfigurationWithDedicatedHost(t *testing.T, options *t // Connect to the master node via SSH and handle connection errors sshClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIPList[0]) require.NoError(t, connectionErr, "Failed to connect to the master via SSH") - defer sshClient.Close() + + defer func() { + if err := sshClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshClient: %v", err)) + } + }() testLogger.Info(t, "SSH connection to the master successful") t.Log("Validation in progress. Please wait...") @@ -1378,7 +1553,12 @@ func ValidateBasicClusterConfigurationWithDedicatedHost(t *testing.T, options *t // Verify SSH connectivity from login node and handle connection errors sshLoginNodeClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, loginNodeIP) require.NoError(t, connectionErr, "Failed to connect to the login node via SSH") - defer sshLoginNodeClient.Close() + + defer func() { + if err := sshLoginNodeClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshLoginNodeClient: %v", err)) + } + }() // Verify login node configuration VerifyLoginNodeConfig(t, sshLoginNodeClient, expectedClusterName, expectedMasterName, expectedReservationID, expectedHyperthreadingEnabled, loginNodeIP, jobCommandLow, EXPECTED_LSF_VERSION, testLogger) @@ -1438,7 +1618,12 @@ func ValidateBasicClusterConfigurationWithSCC(t *testing.T, options *testhelper. // Connect to the master node via SSH and handle connection errors sshClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIPList[0]) require.NoError(t, connectionErr, "Failed to connect to the master via SSH") - defer sshClient.Close() + + defer func() { + if err := sshClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshClient: %v", err)) + } + }() testLogger.Info(t, "SSH connection to the master successful") t.Log("Validation in progress. Please wait...") @@ -1471,7 +1656,12 @@ func ValidateBasicClusterConfigurationWithSCC(t *testing.T, options *testhelper. // Verify SSH connectivity from login node and handle connection errors sshLoginNodeClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, loginNodeIP) require.NoError(t, connectionErr, "Failed to connect to the login node via SSH") - defer sshLoginNodeClient.Close() + + defer func() { + if err := sshLoginNodeClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshLoginNodeClient: %v", err)) + } + }() // Verify login node configuration VerifyLoginNodeConfig(t, sshLoginNodeClient, expectedClusterName, expectedMasterName, expectedReservationID, expectedHyperthreadingEnabled, loginNodeIP, jobCommandLow, EXPECTED_LSF_VERSION, testLogger) @@ -1535,7 +1725,12 @@ func ValidateBasicClusterConfigurationWithCloudLogs(t *testing.T, options *testh // Connect to the master node via SSH and handle connection errors sshClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIPList[0]) require.NoError(t, connectionErr, "Failed to connect to the master via SSH") - defer sshClient.Close() + + defer func() { + if err := sshClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshClient: %v", err)) + } + }() testLogger.Info(t, "SSH connection to the master successful") t.Log("Validation in progress. Please wait...") @@ -1568,7 +1763,12 @@ func ValidateBasicClusterConfigurationWithCloudLogs(t *testing.T, options *testh // Verify SSH connectivity from login node and handle connection errors sshLoginNodeClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, loginNodeIP) require.NoError(t, connectionErr, "Failed to connect to the login node via SSH") - defer sshLoginNodeClient.Close() + + defer func() { + if err := sshLoginNodeClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshLoginNodeClient: %v", err)) + } + }() // Verify login node configuration VerifyLoginNodeConfig(t, sshLoginNodeClient, expectedClusterName, expectedMasterName, expectedReservationID, expectedHyperthreadingEnabled, loginNodeIP, jobCommandLow, EXPECTED_LSF_VERSION, testLogger) @@ -1632,7 +1832,12 @@ func ValidateBasicClusterConfigurationWithCloudMonitoring(t *testing.T, options // Connect to the master node via SSH and handle connection errors sshClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIPList[0]) require.NoError(t, connectionErr, "Failed to connect to the master via SSH") - defer sshClient.Close() + + defer func() { + if err := sshClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshClient: %v", err)) + } + }() testLogger.Info(t, "SSH connection to the master successful") t.Log("Validation in progress. Please wait...") @@ -1665,7 +1870,12 @@ func ValidateBasicClusterConfigurationWithCloudMonitoring(t *testing.T, options // Verify SSH connectivity from login node and handle connection errors sshLoginNodeClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, loginNodeIP) require.NoError(t, connectionErr, "Failed to connect to the login node via SSH") - defer sshLoginNodeClient.Close() + + defer func() { + if err := sshLoginNodeClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshLoginNodeClient: %v", err)) + } + }() // Verify login node configuration VerifyLoginNodeConfig(t, sshLoginNodeClient, expectedClusterName, expectedMasterName, expectedReservationID, expectedHyperthreadingEnabled, loginNodeIP, jobCommandLow, EXPECTED_LSF_VERSION, testLogger) @@ -1722,7 +1932,13 @@ func ValidateBasicClusterConfigurationWithCloudAtracker(t *testing.T, options *t // Establish SSH connection to master node sshClient, err := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIPList[0]) require.NoError(t, err, "Failed to connect to the master node via SSH") - defer sshClient.Close() + + defer func() { + if err := sshClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshClient: %v", err)) + } + }() + testLogger.Info(t, "SSH connection to master node successful") t.Log("Validation in progress. Please wait...") @@ -1754,7 +1970,12 @@ func ValidateBasicClusterConfigurationWithCloudAtracker(t *testing.T, options *t // Establish SSH connection to login node sshLoginNodeClient, err := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, loginNodeIP) require.NoError(t, err, "Failed to connect to the login node via SSH") - defer sshLoginNodeClient.Close() + + defer func() { + if err := sshLoginNodeClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshLoginNodeClient: %v", err)) + } + }() // Verify login node configuration VerifyLoginNodeConfig(t, sshLoginNodeClient, expectedClusterName, expectedMasterName, expectedReservationID, expectedHyperthreadingEnabled, loginNodeIP, jobCommandLow, EXPECTED_LSF_VERSION, testLogger) @@ -1798,7 +2019,12 @@ func ValidateClusterConfigWithAPPCenterOnExistingEnvironment( // Connect to the master node via SSH sshClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIPList[0]) require.NoError(t, connectionErr, "Failed to connect to the master via SSH") - defer sshClient.Close() + + defer func() { + if err := sshClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshClient: %v", err)) + } + }() testLogger.Info(t, "SSH connection to the master successful") t.Log("Validation in progress. Please wait...") @@ -1821,7 +2047,12 @@ func ValidateClusterConfigWithAPPCenterOnExistingEnvironment( // Reconnect to the master node via SSH after reboot sshClient, connectionErr = utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIPList[0]) require.NoError(t, connectionErr, "Failed to reconnect to the master via SSH") - defer sshClient.Close() + + defer func() { + if err := sshClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshClient: %v", err)) + } + }() // Wait for dynamic node disappearance and handle potential errors defer func() { @@ -1846,7 +2077,12 @@ func ValidateClusterConfigWithAPPCenterOnExistingEnvironment( // Verify SSH connectivity from login node sshLoginNodeClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, loginNodeIP) require.NoError(t, connectionErr, "Failed to connect to the login node via SSH") - defer sshLoginNodeClient.Close() + + defer func() { + if err := sshLoginNodeClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshLoginNodeClient: %v", err)) + } + }() // Verify login node configuration VerifyLoginNodeConfig(t, sshLoginNodeClient, expectedClusterName, expectedMasterName, expectedReservationID, expectedHyperthreadingEnabled, loginNodeIP, jobCommandLow, EXPECTED_LSF_VERSION, testLogger) @@ -1903,7 +2139,12 @@ func ValidateClusterConfigWithAPPCenterAndLDAPOnExistingEnvironment( // Connect to the master node via SSH sshClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIPList[0]) require.NoError(t, connectionErr, "Failed to connect to the master via SSH") - defer sshClient.Close() + + defer func() { + if err := sshClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshClient: %v", err)) + } + }() testLogger.Info(t, "SSH connection to the master successful") t.Log("Validation in progress. Please wait...") @@ -1926,7 +2167,12 @@ func ValidateClusterConfigWithAPPCenterAndLDAPOnExistingEnvironment( // Reconnect to the master node via SSH after reboot sshClient, connectionErr = utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIPList[0]) require.NoError(t, connectionErr, "Failed to reconnect to the master via SSH") - defer sshClient.Close() + + defer func() { + if err := sshClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshClient: %v", err)) + } + }() // Wait for dynamic node disappearance and handle potential errors defer func() { @@ -1951,7 +2197,12 @@ func ValidateClusterConfigWithAPPCenterAndLDAPOnExistingEnvironment( // Verify SSH connectivity from login node sshLoginNodeClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, loginNodeIP) require.NoError(t, connectionErr, "Failed to connect to the login node via SSH") - defer sshLoginNodeClient.Close() + + defer func() { + if err := sshLoginNodeClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshLoginNodeClient: %v", err)) + } + }() // Verify login node configuration VerifyLoginNodeConfig(t, sshLoginNodeClient, expectedClusterName, expectedMasterName, expectedReservationID, expectedHyperthreadingEnabled, loginNodeIP, jobCommandLow, EXPECTED_LSF_VERSION, testLogger) @@ -1978,7 +2229,12 @@ func ValidateClusterConfigWithAPPCenterAndLDAPOnExistingEnvironment( // Connect to the LDAP server via SSH and handle connection errors sshLdapClient, connectionErr := utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_LDAP_HOST_NAME, ldapServerIP) require.NoError(t, connectionErr, "Failed to connect to the LDAP server via SSH") - defer sshLdapClient.Close() + + defer func() { + if err := sshLdapClient.Close(); err != nil { + testLogger.Info(t, fmt.Sprintf("failed to close sshLdapClient: %v", err)) + } + }() // Check LDAP server status CheckLDAPServerStatus(t, sshLdapClient, ldapAdminPassword, expectedLdapDomain, ldapUserName, testLogger) diff --git a/tests/other_test.go b/tests/other_test.go index d4059a5f..71327755 100644 --- a/tests/other_test.go +++ b/tests/other_test.go @@ -332,12 +332,13 @@ func TestRunInUsEastRegion(t *testing.T) { require.NoError(t, err, "Failed to set up Terraform options.") // Assign solution-specific Terraform variables. - if envVars.Solution == "HPC" { + switch envVars.Solution { + case "HPC": options.TerraformVars["zones"] = usEastZone options.TerraformVars["reservation_id"] = usEastReservationID options.TerraformVars["cluster_name"] = usEastClusterName testLogger.Info(t, "Terraform variables configured for HPC solution.") - } else if envVars.Solution == "lsf" { + case "lsf": options.TerraformVars["zones"] = usEastZone options.TerraformVars["worker_node_instance_type"] = []map[string]interface{}{ { @@ -397,12 +398,13 @@ func TestRunInEuDeRegion(t *testing.T) { require.NoError(t, err, "Failed to set up Terraform options.") // Assign solution-specific Terraform variables. - if envVars.Solution == "HPC" { + switch envVars.Solution { + case "HPC": options.TerraformVars["zones"] = euDeZone options.TerraformVars["reservation_id"] = euDeReservationID options.TerraformVars["cluster_name"] = euDeClusterName testLogger.Info(t, "Terraform variables configured for HPC in Frankfurt.") - } else if envVars.Solution == "lsf" { + case "lsf": options.TerraformVars["zones"] = euDeZone options.TerraformVars["worker_node_instance_type"] = []map[string]interface{}{ { @@ -463,12 +465,13 @@ func TestRunInUSSouthRegion(t *testing.T) { require.NoError(t, err, "Failed to set up Terraform options.") // Assign solution-specific Terraform variables. - if envVars.Solution == "HPC" { + switch envVars.Solution { + case "HPC": options.TerraformVars["zones"] = usSouthZone options.TerraformVars["reservation_id"] = usSouthReservationID options.TerraformVars["cluster_name"] = usSouthClusterName testLogger.Info(t, "Terraform variables configured for HPC in US South.") - } else if envVars.Solution == "lsf" { + case "lsf": options.TerraformVars["zones"] = usSouthZone options.TerraformVars["worker_node_instance_type"] = []map[string]interface{}{ { @@ -528,12 +531,13 @@ func TestRunInJPTokyoRegion(t *testing.T) { require.NoError(t, err, "Failed to set up Terraform options.") // Assign solution-specific Terraform variables. - if envVars.Solution == "HPC" { + switch envVars.Solution { + case "HPC": options.TerraformVars["zones"] = jpTokyoZone options.TerraformVars["cluster_name"] = jpTokyoClusterName options.TerraformVars["reservation_id"] = jpTokyoReservationID testLogger.Info(t, "Terraform variables configured for HPC in JP Tokyo.") - } else if envVars.Solution == "lsf" { + case "lsf": options.TerraformVars["zones"] = jpTokyoZone options.TerraformVars["worker_node_instance_type"] = []map[string]interface{}{ { diff --git a/tests/utilities/api_utils.go b/tests/utilities/api_utils.go index 37221c63..cebbb320 100644 --- a/tests/utilities/api_utils.go +++ b/tests/utilities/api_utils.go @@ -1,6 +1,7 @@ package tests import ( + "fmt" "net/http" "time" ) @@ -15,7 +16,12 @@ func CheckAPIStatus(url string) (int, error) { if err != nil { return 0, err } - defer resp.Body.Close() + + defer func() { + if err := resp.Body.Close(); err != nil { + fmt.Printf("warning: failed to close response body: %v\n", err) + } + }() return resp.StatusCode, nil } diff --git a/tests/utilities/deployment.go b/tests/utilities/deployment.go index 167fad76..940808af 100644 --- a/tests/utilities/deployment.go +++ b/tests/utilities/deployment.go @@ -88,7 +88,12 @@ func GetConfigFromYAML(filePath string) (*Config, error) { if err != nil { return nil, fmt.Errorf("failed to open YAML file %s: %v", filePath, err) } - defer file.Close() + + defer func() { + if err := file.Close(); err != nil { + fmt.Printf("warning: failed to close file: %v\n", err) + } + }() // Decode the YAML file into the config struct if err := yaml.NewDecoder(file).Decode(&config); err != nil { @@ -217,50 +222,78 @@ func setEnvFromConfig(config *Config) error { val, ok := os.LookupEnv(key) switch { case strings.Contains(key, "KEY_MANAGEMENT") && val == "null" && ok: - os.Setenv(key, "null") + if err := os.Setenv(key, "null"); err != nil { + return fmt.Errorf("failed to set %s to 'null': %v", key, err) + } case strings.Contains(key, "REMOTE_ALLOWED_IPS") && !ok && value == "": - os.Setenv(key, ip) + if err := os.Setenv(key, ip); err != nil { + return fmt.Errorf("failed to set %s to %s: %v", key, ip, err) + } case value != "" && !ok: switch v := value.(type) { case string: - os.Setenv(key, v) + if err := os.Setenv(key, v); err != nil { + return fmt.Errorf("failed to set %s to %s: %v", key, v, err) + } case bool: - os.Setenv(key, fmt.Sprintf("%t", v)) + if err := os.Setenv(key, fmt.Sprintf("%t", v)); err != nil { + return fmt.Errorf("failed to set %s to %t: %v", key, v, err) + } case int: - os.Setenv(key, fmt.Sprintf("%d", v)) + if err := os.Setenv(key, fmt.Sprintf("%d", v)); err != nil { + return fmt.Errorf("failed to set %s to %d: %v", key, v, err) + } case float64: - // Optionally handle float values - os.Setenv(key, fmt.Sprintf("%f", v)) + if err := os.Setenv(key, fmt.Sprintf("%f", v)); err != nil { + return fmt.Errorf("failed to set %s to %f: %v", key, v, err) + } case []string: - // If the value is a slice of strings, you can join them into a comma-separated string - os.Setenv(key, strings.Join(v, ",")) + if err := os.Setenv(key, strings.Join(v, ",")); err != nil { + return fmt.Errorf("failed to set %s to joined string: %v", key, err) + } case []WorkerNode: - // If the value is a slice of WorkerNode, marshal it to JSON string workerNodeInstanceTypeJSON, err := json.Marshal(v) if err != nil { return fmt.Errorf("failed to marshal %s: %v", key, err) } - os.Setenv(key, string(workerNodeInstanceTypeJSON)) + if err := os.Setenv(key, string(workerNodeInstanceTypeJSON)); err != nil { + return fmt.Errorf("failed to set %s to JSON: %v", key, err) + } default: return fmt.Errorf("unsupported type for key %s", key) } } } - // Handle missing reservations IDs if necessary + // Handle missing reservation IDs if necessary for key, value := range envVars { _, ok := os.LookupEnv(key) switch { case key == "RESERVATION_ID" && !ok && value == "": - os.Setenv("RESERVATION_ID", GetValueForKey(map[string]string{"us-south": reservationIDSouth, "us-east": reservationIDEast}, strings.ToLower(GetRegion(os.Getenv("ZONE"))))) + val := GetValueForKey( + map[string]string{ + "us-south": reservationIDSouth, + "us-east": reservationIDEast, + }, + strings.ToLower(GetRegion(os.Getenv("ZONE"))), + ) + if err := os.Setenv("RESERVATION_ID", val); err != nil { + return fmt.Errorf("failed to set RESERVATION_ID: %v", err) + } case key == "US_EAST_RESERVATION_ID" && !ok && value == "": - os.Setenv("US_EAST_RESERVATION_ID", reservationIDEast) + if err := os.Setenv("US_EAST_RESERVATION_ID", reservationIDEast); err != nil { + return fmt.Errorf("failed to set US_EAST_RESERVATION_ID: %v", err) + } case key == "EU_DE_RESERVATION_ID" && !ok && value == "": - os.Setenv("EU_DE_RESERVATION_ID", reservationIDEast) + if err := os.Setenv("EU_DE_RESERVATION_ID", reservationIDEast); err != nil { + return fmt.Errorf("failed to set EU_DE_RESERVATION_ID: %v", err) + } case key == "US_SOUTH_RESERVATION_ID" && !ok && value == "": - os.Setenv("US_SOUTH_RESERVATION_ID", reservationIDSouth) - + if err := os.Setenv("US_SOUTH_RESERVATION_ID", reservationIDSouth); err != nil { + return fmt.Errorf("failed to set US_SOUTH_RESERVATION_ID: %v", err) + } } } + return nil } diff --git a/tests/utilities/helpers.go b/tests/utilities/helpers.go index 9ccb427e..3b5944d0 100644 --- a/tests/utilities/helpers.go +++ b/tests/utilities/helpers.go @@ -162,7 +162,13 @@ func FindImageNamesByCriteria(name string) (string, error) { if err != nil { return "", err } - defer readFile.Close() + + var returnErr error + defer func() { + if cerr := readFile.Close(); cerr != nil && returnErr == nil { + returnErr = fmt.Errorf("failed to close file: %w", cerr) + } + }() // Create a scanner to read lines from the file fileScanner := bufio.NewScanner(readFile) diff --git a/tests/utilities/report.go b/tests/utilities/report.go index 7a2cd44d..20b9072b 100644 --- a/tests/utilities/report.go +++ b/tests/utilities/report.go @@ -35,7 +35,13 @@ func ParseJSONFile(fileName string) ([]TestResult, error) { if err != nil { return nil, fmt.Errorf("error opening log file: %w", err) } - defer file.Close() + + var returnErr error + defer func() { + if cerr := file.Close(); cerr != nil && returnErr == nil { + returnErr = fmt.Errorf("failed to close file: %w", cerr) + } + }() // Regular expression to capture results reTestResult := regexp.MustCompile(`--- (PASS|FAIL): (\S+) \((\d+\.\d+)s\)`) @@ -88,9 +94,10 @@ func GenerateHTMLReport(results []TestResult) error { totalFail := 0 totalTime := 0.0 for _, result := range results { - if result.Action == "PASS" { + switch result.Action { + case "PASS": totalPass++ - } else if result.Action == "FAIL" { + case "FAIL": totalFail++ } totalTime += result.Elapsed @@ -202,7 +209,13 @@ func GenerateHTMLReport(results []TestResult) error { if err != nil { return fmt.Errorf("error creating report file: %w", err) } - defer reportFile.Close() + + var returnErr error + defer func() { + if cerr := reportFile.Close(); cerr != nil && returnErr == nil { + returnErr = fmt.Errorf("failed to close report file: %w", cerr) + } + }() // Execute the template with the data err = tmpl.Execute(reportFile, reportData) diff --git a/tests/utilities/ssh.go b/tests/utilities/ssh.go index f92fd36f..f1809dcf 100644 --- a/tests/utilities/ssh.go +++ b/tests/utilities/ssh.go @@ -46,7 +46,13 @@ func RunCommandInSSHSession(sClient *ssh.Client, cmd string) (string, error) { if err != nil { return "", fmt.Errorf("failed to create SSH session: %w", err) } - defer session.Close() + + var returnErr error + defer func() { + if cerr := session.Close(); cerr != nil && returnErr == nil { + returnErr = fmt.Errorf("failed to close session: %w", cerr) + } + }() var b bytes.Buffer session.Stdout = &b