From 3c39665b0795a2470225aa96e9693c68debc43b9 Mon Sep 17 00:00:00 2001 From: Manikandan-Renu1 Date: Thu, 27 Feb 2025 00:16:19 +0530 Subject: [PATCH 01/20] pr-199 --- datasource.tf | 8 +- locals.tf | 70 ++--- main.tf | 184 ++++------- modules/deployer/main.tf | 10 +- modules/deployer/outputs.tf | 12 - modules/deployer/template_files.tf | 10 +- .../deployer/templates/deployer_user_data.tpl | 3 - modules/landing_zone/locals.tf | 10 +- modules/landing_zone_vsi/locals.tf | 4 + modules/landing_zone_vsi/main.tf | 26 +- modules/landing_zone_vsi/outputs.tf | 12 + modules/landing_zone_vsi/template_files.tf | 12 +- modules/null_resources/locals.tf | 26 ++ modules/null_resources/main.tf | 107 +++++++ modules/null_resources/variables.tf | 291 ++++++++++++++++++ modules/null_resources/version.tf | 22 ++ variables.tf | 2 +- 17 files changed, 588 insertions(+), 221 deletions(-) create mode 100644 modules/null_resources/locals.tf create mode 100644 modules/null_resources/main.tf create mode 100644 modules/null_resources/variables.tf create mode 100644 modules/null_resources/version.tf diff --git a/datasource.tf b/datasource.tf index 4f893a4a..008d8da4 100644 --- a/datasource.tf +++ b/datasource.tf @@ -14,6 +14,12 @@ data "ibm_is_vpc" "itself" { count = var.vpc == null ? 0 : 1 name = var.vpc } + +data "ibm_is_vpc" "existing_vpc" { + count = var.vpc != null ? 1 : 0 + name = var.vpc +} + /* data "ibm_is_subnet" "itself" { count = length(local.subnets) @@ -49,4 +55,4 @@ data "ibm_is_subnet" "existing_client_subnets" { data "ibm_is_subnet" "existing_bastion_subnets" { count = var.vpc != null && var.bastion_subnets != null ? 1 : 0 name = var.bastion_subnets[count.index] -} \ No newline at end of file +} diff --git a/locals.tf b/locals.tf index 4311679b..edaf38b3 100644 --- a/locals.tf +++ b/locals.tf @@ -15,7 +15,7 @@ locals { # locals needed for deployer locals { # dependency: landing_zone -> deployer - vpc_id = var.vpc == null ? one(module.landing_zone.vpc_id) : var.vpc_id + vpc_id = var.vpc == null ? one(module.landing_zone.vpc_id) : data.ibm_is_vpc.existing_vpc[0].id vpc = var.vpc == null ? one(module.landing_zone.vpc_name) : var.vpc bastion_subnets = module.landing_zone.bastion_subnets kms_encryption_enabled = var.key_management != null ? true : false @@ -35,10 +35,10 @@ locals { bastion_private_key_content = module.deployer.bastion_private_key_content deployer_hostname = var.enable_bastion ? flatten(module.deployer.deployer_vsi_data[*].list)[0].name : "" - deployer_ip = module.deployer.deployer_ip + deployer_ip = module.deployer.deployer_ip - compute_public_key_contents = module.deployer.compute_public_key_content - compute_private_key_contents = module.deployer.compute_private_key_content + # compute_public_key_contents = module.deployer.compute_public_key_content + # compute_private_key_contents = module.deployer.compute_private_key_content # Existing subnets details existing_compute_subnets = [ @@ -81,7 +81,7 @@ locals { } ] - existing_bastion_subnets = [ + existing_bastion_subnets = [ for subnet in data.ibm_is_subnet.existing_bastion_subnets : { cidr = subnet.ipv4_cidr_block @@ -117,7 +117,7 @@ locals { #boot_volume_encryption_key = var.key_management != null ? one(module.landing_zone.boot_volume_encryption_key)["crn"] : null # dependency: landing_zone_vsi -> file-share - compute_subnet_id = var.vpc == null && var.compute_subnets == null ? local.compute_subnets[0].id : [for subnet in data.ibm_is_subnet.existing_compute_subnets : subnet.id][0] + compute_subnet_id = var.vpc == null && var.compute_subnets == null ? local.compute_subnets[0].id : "" compute_security_group_id = var.enable_deployer ? [] : module.landing_zone_vsi[0].compute_sg_id management_instance_count = sum(var.management_instances[*]["count"]) default_share = local.management_instance_count > 0 ? [ @@ -149,15 +149,15 @@ locals { workload_rg = var.resource_group == null ? module.landing_zone.resource_group_id[0]["workload-rg"] : data.ibm_resource_group.resource_group[0].id } # resource_group_id = one(values(one(module.landing_zone.resource_group_id))) - vpc_crn = var.vpc == null ? one(module.landing_zone.vpc_crn) : one(data.ibm_is_vpc.itself[*].crn) + vpc_crn = var.vpc == null ? one(module.landing_zone.vpc_crn) : one(data.ibm_is_vpc.itself[*].crn) # TODO: Fix existing subnet logic #subnets_crn = var.vpc == null ? module.landing_zone.subnets_crn : ### - existing_compute_subnet_crns = [for subnet in data.ibm_is_subnet.existing_compute_subnets : subnet.crn] - existing_storage_subnet_crns = [for subnet in data.ibm_is_subnet.existing_storage_subnets : subnet.crn] - existing_protocol_subnet_crns = [for subnet in data.ibm_is_subnet.existing_protocol_subnets : subnet.crn] - existing_client_subnet_crns = [for subnet in data.ibm_is_subnet.existing_client_subnets : subnet.crn] - existing_bastion_subnet_crns = [for subnet in data.ibm_is_subnet.existing_bastion_subnets : subnet.crn] - subnets_crn = concat(local.existing_compute_subnet_crns, local.existing_storage_subnet_crns, local.existing_protocol_subnet_crns, local.existing_client_subnet_crns, local.existing_bastion_subnet_crns) + existing_compute_subnet_crns = [for subnet in local.existing_compute_subnets : subnet.crn] + existing_storage_subnet_crns = [for subnet in local.existing_storage_subnets : subnet.crn] + existing_protocol_subnet_crns = [for subnet in local.existing_protocol_subnets : subnet.crn] + existing_client_subnet_crns = [for subnet in local.existing_client_subnets : subnet.crn] + existing_bastion_subnet_crns = [for subnet in local.existing_bastion_subnets : subnet.crn] + subnets_crn = concat(local.existing_compute_subnet_crns, local.existing_storage_subnet_crns, local.existing_protocol_subnet_crns, local.existing_client_subnet_crns, local.existing_bastion_subnet_crns) # subnets_crn = var.vpc == null && var.compute_subnets == null ? module.landing_zone.subnets_crn : concat(local.existing_subnet_crns, module.landing_zone.subnets_crn) #subnets = flatten([local.compute_subnets, local.storage_subnets, local.protocol_subnets]) #subnets_crns = data.ibm_is_subnet.itself[*].crn @@ -167,17 +167,17 @@ locals { # dependency: landing_zone_vsi -> file-share } -data "external" "get_hostname" { - program = ["sh", "-c", "echo '{\"name\": \"'$(hostname)'\", \"ipv4_address\": \"'$(hostname -I | awk '{print $1}')'\"}'"] -} +# data "external" "get_hostname" { +# program = ["sh", "-c", "echo '{\"name\": \"'$(hostname)'\", \"ipv4_address\": \"'$(hostname -I | awk '{print $1}')'\"}'"] +# } # locals needed for dns-records locals { # dependency: dns -> dns-records - dns_instance_id = var.enable_deployer ? "" : module.dns[0].dns_instance_id + dns_instance_id = var.enable_deployer ? "" : module.dns[0].dns_instance_id dns_custom_resolver_id = var.enable_deployer ? "" : module.dns[0].dns_custom_resolver_id - dns_zone_map_list = var.enable_deployer ? [] : module.dns[0].dns_zone_maps + dns_zone_map_list = var.enable_deployer ? [] : module.dns[0].dns_zone_maps compute_dns_zone_id = one(flatten([ for dns_zone in local.dns_zone_map_list : values(dns_zone) if one(keys(dns_zone)) == var.dns_domain_names["compute"] ])) @@ -200,7 +200,7 @@ locals { ] compute_dns_records = [ - for instance in concat(local.compute_instances, local.deployer_instances): + for instance in concat(local.compute_instances, local.deployer_instances) : { name = instance["name"] rdata = instance["ipv4_address"] @@ -235,18 +235,18 @@ locals { bastion_fip = module.deployer.bastion_fip compute_private_key_path = var.enable_bastion ? "${path.root}/../../modules/ansible-roles/compute_id_rsa" : "${path.root}/modules/ansible-roles/compute_id_rsa" #checkov:skip=CKV_SECRET_6 storage_private_key_path = var.enable_bastion ? "${path.root}/../../modules/ansible-roles/storage_id_rsa" : "${path.root}/modules/ansible-roles/storage_id_rsa" #checkov:skip=CKV_SECRET_6 - compute_playbook_path = var.enable_bastion ? "${path.root}/../../modules/ansible-roles/compute_ssh.yaml" : "${path.root}/modules/ansible-roles/compute_ssh.yaml" + compute_playbook_path = var.enable_bastion ? "${path.root}/../../modules/ansible-roles/compute_ssh.yaml" : "${path.root}/modules/ansible-roles/compute_ssh.yaml" storage_playbook_path = var.enable_bastion ? "${path.root}/../../modules/ansible-roles/storage_ssh.yaml" : "${path.root}/modules/ansible-roles/storage_ssh.yaml" } # file Share OutPut locals { - fileshare_name_mount_path_map = var.enable_deployer ? {} : module.file_storage[0].name_mount_path_map + fileshare_name_mount_path_map = var.enable_deployer ? {} : module.file_storage[0].name_mount_path_map } # details needed for json file locals { - json_inventory_path = var.enable_bastion ? "${path.root}/../../modules/ansible-roles/all.json" : "${path.root}/modules/ansible-roles/all.json" + json_inventory_path = var.enable_bastion ? "${path.root}/../../modules/ansible-roles/all.json" : "${path.root}/modules/ansible-roles/all.json" management_nodes = var.enable_deployer ? [] : (flatten([module.landing_zone_vsi[0].management_vsi_data]))[*]["name"] compute_nodes = var.enable_deployer ? [] : (flatten([module.landing_zone_vsi[0].compute_vsi_data]))[*]["name"] compute_nodes_list = var.enable_deployer ? [] : (length(local.compute_nodes) > 0 ? [format("%s-[001:%s]", join("-", slice(split("-", local.compute_nodes[0]), 0, length(split("-", local.compute_nodes[0])) - 1)), split("-", local.compute_nodes[length(local.compute_nodes) - 1])[length(split("-", local.compute_nodes[length(local.compute_nodes) - 1])) - 1])] : local.compute_nodes) #(length(local.compute_nodes) >= 10 ? [format("%s-00[%d:%d]", regex("^(.*?)-\\d+$", local.compute_nodes[0])[0], 1, length(local.compute_nodes))] : local.compute_nodes) @@ -259,29 +259,3 @@ locals { lsf_deployer_hostname = var.deployer_hostname #data.external.get_hostname.result.name #var.enable_bastion ? "" : flatten(module.deployer.deployer_vsi_data[*].list)[0].name } -locals { - schematics_inputs_path = "/tmp/.schematics/solution_terraform.auto.tfvars.json" - remote_inputs_path = format("%s/terraform.tfvars.json", "/tmp") - deployer_path = "/opt/ibm" - remote_terraform_path = format("%s/terraform-ibm-hpc", local.deployer_path) - remote_ansible_path = format("%s/terraform-ibm-hpc", local.deployer_path) - da_hpc_repo_url = "https://github.com/terraform-ibm-modules/terraform-ibm-hpc.git" - da_hpc_repo_tag = "develop" ###### change it to main in future - zones = jsonencode(var.zones) - list_compute_ssh_keys = jsonencode(local.compute_ssh_keys) - list_storage_ssh_keys = jsonencode(local.storage_ssh_keys) - list_storage_instances = jsonencode(var.storage_instances) - list_management_instances = jsonencode(var.management_instances) - list_protocol_instances = jsonencode(var.protocol_instances) - list_compute_instances = jsonencode(var.static_compute_instances) - list_client_instances = jsonencode(var.client_instances) - allowed_cidr = jsonencode(var.allowed_cidr) - list_storage_subnets = jsonencode(length(local.storage_subnet) == 0 ? null : local.storage_subnet) - list_protocol_subnets = jsonencode(length(local.protocol_subnet) == 0 ? null : local.protocol_subnet) - list_compute_subnets = jsonencode(length(local.compute_subnet) == 0 ? null : local.compute_subnet) - list_client_subnets = jsonencode(length(local.client_subnet) == 0 ? null : local.client_subnet) - list_bastion_subnets = jsonencode(length(local.bastion_subnet) == 0 ? null : local.bastion_subnet) - dns_domain_names = jsonencode(var.dns_domain_names) - compute_public_key_content = local.compute_public_key_contents != null ? jsonencode(base64encode(local.compute_public_key_contents)) : "" - compute_private_key_content = local.compute_private_key_contents != null ? jsonencode(base64encode(local.compute_private_key_contents)) : "" -} \ No newline at end of file diff --git a/main.tf b/main.tf index e1b8c012..225c33ce 100644 --- a/main.tf +++ b/main.tf @@ -71,8 +71,8 @@ module "landing_zone_vsi" { vpc_id = local.vpc_id bastion_security_group_id = var.bastion_security_group_id bastion_public_key_content = local.bastion_public_key_content - compute_public_key_content = var.compute_public_key_content - compute_private_key_content= var.compute_private_key_content + # compute_public_key_content = var.compute_public_key_content + # compute_private_key_content = var.compute_private_key_content client_subnets = local.client_subnets client_ssh_keys = local.client_ssh_keys client_instances = var.client_instances @@ -93,120 +93,46 @@ module "landing_zone_vsi" { enable_bastion = var.enable_bastion } - -resource "local_sensitive_file" "prepare_tf_input" { - count = var.enable_deployer == true ? 1 : 0 - content = <> /home/$USER/.ssh/authorized_keys echo "StrictHostKeyChecking no" >> /home/$USER/.ssh/config echo "StrictHostKeyChecking no" >> ~/.ssh/config -echo "${compute_public_key_content}" >> ~/.ssh/authorized_keys -echo "${compute_private_key_content}" > ~/.ssh/id_rsa -chmod 600 ~/.ssh/id_rsa # # setup env # # TODO: Conditional installation (python3, terraform & ansible) diff --git a/modules/landing_zone/locals.tf b/modules/landing_zone/locals.tf index 394529e7..58951d08 100644 --- a/modules/landing_zone/locals.tf +++ b/modules/landing_zone/locals.tf @@ -308,7 +308,7 @@ locals { days = 30 enable = true rule_id = "bucket-expire-rule" - } + } } : null, var.enable_atracker ? { name = "atracker-bucket" @@ -320,7 +320,7 @@ locals { days = 30 enable = true rule_id = "bucket-expire-rule" - } + } } : null, var.observability_logs_enable ? { name = "logs-data-bucket" @@ -332,7 +332,7 @@ locals { days = 30 enable = true rule_id = "bucket-expire-rule" - } + } } : null, var.observability_logs_enable ? { name = "metrics-data-bucket" @@ -344,7 +344,7 @@ locals { days = 30 enable = true rule_id = "bucket-expire-rule" - } + } } : null, var.scc_enable ? { name = "scc-bucket" @@ -409,7 +409,7 @@ locals { name = format("%s-atracker-key", var.prefix) } : null, var.scc_enable ? { - name = format("%s-scc-key", var.prefix) + name = format("%s-scc-key", var.prefix) } : null ] : [ { diff --git a/modules/landing_zone_vsi/locals.tf b/modules/landing_zone_vsi/locals.tf index ef5d382a..8708a22d 100644 --- a/modules/landing_zone_vsi/locals.tf +++ b/modules/landing_zone_vsi/locals.tf @@ -267,6 +267,10 @@ locals { storage_subnets = var.storage_subnets protocol_subnets = var.protocol_subnets + compute_public_key_content = one(module.compute_key[*].public_key_content) + compute_private_key_content = one(module.compute_key[*].private_key_content) + + # Security Groups protocol_secondary_security_group = [for subnet in local.protocol_subnets : { diff --git a/modules/landing_zone_vsi/main.tf b/modules/landing_zone_vsi/main.tf index 1239c810..3b58e398 100644 --- a/modules/landing_zone_vsi/main.tf +++ b/modules/landing_zone_vsi/main.tf @@ -1,16 +1,30 @@ -# module "compute_key" { -# count = local.enable_compute ? 1 : 0 -# source = "./../key" -# private_key_path = "./../../modules/ansible-roles/compute_id_rsa" #checkov:skip=CKV_SECRET_6 -# } +module "compute_key" { + count = local.enable_compute ? 1 : 0 + source = "./../key" + private_key_path = "./../../modules/ansible-roles/compute_id_rsa" #checkov:skip=CKV_SECRET_6 +} resource "local_sensitive_file" "write_meta_private_key" { count = local.enable_compute ? 1 : 0 - content = base64decode(var.compute_private_key_content) + content = (local.compute_private_key_content) filename = var.enable_bastion ? "${path.root}/../../modules/ansible-roles/compute_id_rsa" : "${path.root}/modules/ansible-roles/compute_id_rsa" file_permission = "0600" } +resource "local_sensitive_file" "copy_compute_private_key_content" { + count = local.enable_compute ? 1 : 0 + content = (local.compute_private_key_content) + filename = "~/.ssh/id_rsa" + file_permission = "0600" +} + +resource "local_sensitive_file" "copy_compute_public_key_content" { + count = local.enable_compute ? 1 : 0 + content = (local.compute_public_key_content) + filename = "~/.ssh/authorized_keys" + file_permission = "0600" +} + module "storage_key" { count = local.enable_storage ? 1 : 0 source = "./../key" diff --git a/modules/landing_zone_vsi/outputs.tf b/modules/landing_zone_vsi/outputs.tf index ff13f57f..25c3bd40 100644 --- a/modules/landing_zone_vsi/outputs.tf +++ b/modules/landing_zone_vsi/outputs.tf @@ -27,3 +27,15 @@ output "compute_sg_id" { description = "Compute SG id" value = module.compute_sg[*].security_group_id } + +output "compute_public_key_content" { + description = "Compute public key content" + sensitive = true + value = one(module.compute_key[*].public_key_content) +} + +output "compute_private_key_content" { + description = "Compute private key content" + sensitive = true + value = one(module.compute_key[*].private_key_content) +} diff --git a/modules/landing_zone_vsi/template_files.tf b/modules/landing_zone_vsi/template_files.tf index ca7ebab6..77d61c14 100644 --- a/modules/landing_zone_vsi/template_files.tf +++ b/modules/landing_zone_vsi/template_files.tf @@ -2,8 +2,8 @@ data "template_file" "client_user_data" { template = file("${path.module}/templates/client_user_data.tpl") vars = { bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : "" - client_public_key_content = local.enable_client ? var.compute_public_key_content != null ? var.compute_public_key_content : "" : "" - client_private_key_content = local.enable_client ? var.compute_private_key_content != null ? var.compute_private_key_content : "" : "" + client_public_key_content = local.enable_client ? local.compute_public_key_content != null ? local.compute_public_key_content : "" : "" + client_private_key_content = local.enable_client ? local.compute_private_key_content != null ? local.compute_private_key_content : "" : "" client_interfaces = var.storage_type == "scratch" ? local.vsi_interfaces[0] : local.bms_interfaces[0] client_dns_domain = var.dns_domain_names["compute"] } @@ -13,8 +13,8 @@ data "template_file" "management_user_data" { template = file("${path.module}/templates/management_user_data.tpl") vars = { bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : "" - management_public_key_content = local.enable_management ? var.compute_public_key_content != null ? var.compute_public_key_content : "" : "" - management_private_key_content = local.enable_management ? var.compute_private_key_content != null ? var.compute_private_key_content : "" : "" + management_public_key_content = local.enable_management ? local.compute_public_key_content != null ? local.compute_public_key_content : "" : "" + management_private_key_content = local.enable_management ? local.compute_private_key_content != null ? local.compute_private_key_content : "" : "" management_interfaces = var.storage_type == "scratch" ? local.vsi_interfaces[0] : local.bms_interfaces[0] management_dns_domain = var.dns_domain_names["compute"] } @@ -24,8 +24,8 @@ data "template_file" "compute_user_data" { template = file("${path.module}/templates/compute_user_data.tpl") vars = { bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : "" - compute_public_key_content = local.enable_compute ? var.compute_public_key_content != null ? var.compute_public_key_content : "" : "" - compute_private_key_content = local.enable_compute ? var.compute_private_key_content != null ? var.compute_private_key_content : "" : "" + compute_public_key_content = local.enable_compute ? local.compute_public_key_content != null ? local.compute_public_key_content : "" : "" + compute_private_key_content = local.enable_compute ? local.compute_private_key_content != null ? local.compute_private_key_content : "" : "" compute_interfaces = var.storage_type == "scratch" ? local.vsi_interfaces[0] : local.bms_interfaces[0] compute_dns_domain = var.dns_domain_names["compute"] # TODO: Fix me diff --git a/modules/null_resources/locals.tf b/modules/null_resources/locals.tf new file mode 100644 index 00000000..7191f947 --- /dev/null +++ b/modules/null_resources/locals.tf @@ -0,0 +1,26 @@ +locals { + schematics_inputs_path = "/tmp/.schematics/solution_terraform.auto.tfvars.json" + remote_inputs_path = format("%s/terraform.tfvars.json", "/tmp") + deployer_path = "/opt/ibm" + remote_terraform_path = format("%s/terraform-ibm-hpc", local.deployer_path) + remote_ansible_path = format("%s/terraform-ibm-hpc", local.deployer_path) + da_hpc_repo_url = "https://github.com/terraform-ibm-modules/terraform-ibm-hpc.git" + da_hpc_repo_tag = "develop" ###### change it to main in future + zones = jsonencode(var.zones) + list_compute_ssh_keys = jsonencode(var.compute_ssh_keys) + list_storage_ssh_keys = jsonencode(var.storage_ssh_keys) + list_storage_instances = jsonencode(var.storage_instances) + list_management_instances = jsonencode(var.management_instances) + list_protocol_instances = jsonencode(var.protocol_instances) + list_compute_instances = jsonencode(var.static_compute_instances) + list_client_instances = jsonencode(var.client_instances) + allowed_cidr = jsonencode(var.allowed_cidr) + list_storage_subnets = jsonencode(length(var.storage_subnets) == 0 ? null : var.storage_subnets) + list_protocol_subnets = jsonencode(length(var.protocol_subnets) == 0 ? null : var.protocol_subnets) + list_compute_subnets = jsonencode(length(var.compute_subnets) == 0 ? null : var.compute_subnets) + list_client_subnets = jsonencode(length(var.client_subnets) == 0 ? null : var.client_subnets) + list_bastion_subnets = jsonencode(length(var.bastion_subnets) == 0 ? null : var.bastion_subnets) + dns_domain_names = jsonencode(var.dns_domain_names) + compute_public_key_content = var.compute_public_key_content != null ? jsonencode(base64encode(var.compute_public_key_content)) : "" + compute_private_key_content = var.compute_private_key_content != null ? jsonencode(base64encode(var.compute_private_key_content)) : "" +} diff --git a/modules/null_resources/main.tf b/modules/null_resources/main.tf new file mode 100644 index 00000000..20dfd20b --- /dev/null +++ b/modules/null_resources/main.tf @@ -0,0 +1,107 @@ +resource "local_sensitive_file" "prepare_tf_input" { + count = var.enable_deployer == true ? 1 : 0 + content = < Date: Thu, 27 Feb 2025 00:43:40 +0530 Subject: [PATCH 02/20] crn-change --- locals.tf | 10 +++++----- modules/null_resources/locals.tf | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/locals.tf b/locals.tf index edaf38b3..f73481f1 100644 --- a/locals.tf +++ b/locals.tf @@ -152,11 +152,11 @@ locals { vpc_crn = var.vpc == null ? one(module.landing_zone.vpc_crn) : one(data.ibm_is_vpc.itself[*].crn) # TODO: Fix existing subnet logic #subnets_crn = var.vpc == null ? module.landing_zone.subnets_crn : ### - existing_compute_subnet_crns = [for subnet in local.existing_compute_subnets : subnet.crn] - existing_storage_subnet_crns = [for subnet in local.existing_storage_subnets : subnet.crn] - existing_protocol_subnet_crns = [for subnet in local.existing_protocol_subnets : subnet.crn] - existing_client_subnet_crns = [for subnet in local.existing_client_subnets : subnet.crn] - existing_bastion_subnet_crns = [for subnet in local.existing_bastion_subnets : subnet.crn] + existing_compute_subnet_crns = [for subnet in data.ibm_is_subnet.existing_compute_subnets : subnet.crn] + existing_storage_subnet_crns = [for subnet in data.ibm_is_subnet.existing_storage_subnets : subnet.crn] + existing_protocol_subnet_crns = [for subnet in data.ibm_is_subnet.existing_protocol_subnets : subnet.crn] + existing_client_subnet_crns = [for subnet in data.ibm_is_subnet.existing_client_subnets : subnet.crn] + existing_bastion_subnet_crns = [for subnet in data.ibm_is_subnet.existing_bastion_subnets : subnet.crn] subnets_crn = concat(local.existing_compute_subnet_crns, local.existing_storage_subnet_crns, local.existing_protocol_subnet_crns, local.existing_client_subnet_crns, local.existing_bastion_subnet_crns) # subnets_crn = var.vpc == null && var.compute_subnets == null ? module.landing_zone.subnets_crn : concat(local.existing_subnet_crns, module.landing_zone.subnets_crn) #subnets = flatten([local.compute_subnets, local.storage_subnets, local.protocol_subnets]) diff --git a/modules/null_resources/locals.tf b/modules/null_resources/locals.tf index 7191f947..741936c8 100644 --- a/modules/null_resources/locals.tf +++ b/modules/null_resources/locals.tf @@ -5,7 +5,7 @@ locals { remote_terraform_path = format("%s/terraform-ibm-hpc", local.deployer_path) remote_ansible_path = format("%s/terraform-ibm-hpc", local.deployer_path) da_hpc_repo_url = "https://github.com/terraform-ibm-modules/terraform-ibm-hpc.git" - da_hpc_repo_tag = "develop" ###### change it to main in future + da_hpc_repo_tag = "pr-199" ###### change it to main in future zones = jsonencode(var.zones) list_compute_ssh_keys = jsonencode(var.compute_ssh_keys) list_storage_ssh_keys = jsonencode(var.storage_ssh_keys) From 0a1d78505a3e6aaa9143994e5f8f4f73ba241a4d Mon Sep 17 00:00:00 2001 From: Manikandan-Renu1 Date: Thu, 27 Feb 2025 07:25:24 +0530 Subject: [PATCH 03/20] sensistive-add --- outputs.tf | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/outputs.tf b/outputs.tf index 1accc0e8..d36503e4 100644 --- a/outputs.tf +++ b/outputs.tf @@ -8,7 +8,8 @@ output "deployer" { } output "landing_zone_vsi" { - value = module.landing_zone_vsi + value = module.landing_zone_vsi + sensitive = true } output "dns" { From a3a66bd2e19f3ebd405cc985e07b13eb238668f4 Mon Sep 17 00:00:00 2001 From: Manikandan-Renu1 Date: Thu, 27 Feb 2025 08:01:27 +0530 Subject: [PATCH 04/20] subnet-id-change --- locals.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/locals.tf b/locals.tf index f73481f1..b381163c 100644 --- a/locals.tf +++ b/locals.tf @@ -117,7 +117,7 @@ locals { #boot_volume_encryption_key = var.key_management != null ? one(module.landing_zone.boot_volume_encryption_key)["crn"] : null # dependency: landing_zone_vsi -> file-share - compute_subnet_id = var.vpc == null && var.compute_subnets == null ? local.compute_subnets[0].id : "" + compute_subnet_id = var.vpc == null && var.client_subnets == null ? local.existing_client_subnets[*].id : module.landing_zone.client_subnets[*].id compute_security_group_id = var.enable_deployer ? [] : module.landing_zone_vsi[0].compute_sg_id management_instance_count = sum(var.management_instances[*]["count"]) default_share = local.management_instance_count > 0 ? [ From fb1bd36d337164b40f2a96086b2bcfb734fa397a Mon Sep 17 00:00:00 2001 From: Manikandan-Renu1 Date: Thu, 27 Feb 2025 08:54:55 +0530 Subject: [PATCH 05/20] subnet-id-fix --- locals.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/locals.tf b/locals.tf index b381163c..67500361 100644 --- a/locals.tf +++ b/locals.tf @@ -117,7 +117,7 @@ locals { #boot_volume_encryption_key = var.key_management != null ? one(module.landing_zone.boot_volume_encryption_key)["crn"] : null # dependency: landing_zone_vsi -> file-share - compute_subnet_id = var.vpc == null && var.client_subnets == null ? local.existing_client_subnets[*].id : module.landing_zone.client_subnets[*].id + compute_subnet_id = var.vpc == null && var.client_subnets == null ? local.existing_client_subnets[0].id : module.landing_zone.client_subnets[0].id compute_security_group_id = var.enable_deployer ? [] : module.landing_zone_vsi[0].compute_sg_id management_instance_count = sum(var.management_instances[*]["count"]) default_share = local.management_instance_count > 0 ? [ From 243fc433c9154224a7cb89a507c49492ef09346c Mon Sep 17 00:00:00 2001 From: Manikandan-Renu1 Date: Thu, 27 Feb 2025 09:04:21 +0530 Subject: [PATCH 06/20] subent-id-fix --- locals.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/locals.tf b/locals.tf index 67500361..f73481f1 100644 --- a/locals.tf +++ b/locals.tf @@ -117,7 +117,7 @@ locals { #boot_volume_encryption_key = var.key_management != null ? one(module.landing_zone.boot_volume_encryption_key)["crn"] : null # dependency: landing_zone_vsi -> file-share - compute_subnet_id = var.vpc == null && var.client_subnets == null ? local.existing_client_subnets[0].id : module.landing_zone.client_subnets[0].id + compute_subnet_id = var.vpc == null && var.compute_subnets == null ? local.compute_subnets[0].id : "" compute_security_group_id = var.enable_deployer ? [] : module.landing_zone_vsi[0].compute_sg_id management_instance_count = sum(var.management_instances[*]["count"]) default_share = local.management_instance_count > 0 ? [ From 412643e42cf7951e3cb51437ec0d8d7f29e54aa1 Mon Sep 17 00:00:00 2001 From: Manikandan-Renu1 Date: Thu, 27 Feb 2025 11:53:03 +0530 Subject: [PATCH 07/20] subnet-issue --- locals.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/locals.tf b/locals.tf index f73481f1..18b71fdd 100644 --- a/locals.tf +++ b/locals.tf @@ -117,7 +117,7 @@ locals { #boot_volume_encryption_key = var.key_management != null ? one(module.landing_zone.boot_volume_encryption_key)["crn"] : null # dependency: landing_zone_vsi -> file-share - compute_subnet_id = var.vpc == null && var.compute_subnets == null ? local.compute_subnets[0].id : "" + compute_subnet_id = var.vpc == null && var.compute_subnets == null ? local.compute_subnets[0].id : [for subnet in data.ibm_is_subnet.existing_compute_subnets : subnet.id][0] compute_security_group_id = var.enable_deployer ? [] : module.landing_zone_vsi[0].compute_sg_id management_instance_count = sum(var.management_instances[*]["count"]) default_share = local.management_instance_count > 0 ? [ From 1654dbac70c124e61f33bee105b0b393868eccfa Mon Sep 17 00:00:00 2001 From: Manikandan-Renu1 Date: Thu, 27 Feb 2025 11:56:29 +0530 Subject: [PATCH 08/20] subnet-issue --- modules/landing_zone_vsi/main.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/landing_zone_vsi/main.tf b/modules/landing_zone_vsi/main.tf index 3b58e398..0242d73d 100644 --- a/modules/landing_zone_vsi/main.tf +++ b/modules/landing_zone_vsi/main.tf @@ -14,14 +14,14 @@ resource "local_sensitive_file" "write_meta_private_key" { resource "local_sensitive_file" "copy_compute_private_key_content" { count = local.enable_compute ? 1 : 0 content = (local.compute_private_key_content) - filename = "~/.ssh/id_rsa" + filename = "${path.root}/modules/ansible-roles/id_rsa" file_permission = "0600" } resource "local_sensitive_file" "copy_compute_public_key_content" { count = local.enable_compute ? 1 : 0 content = (local.compute_public_key_content) - filename = "~/.ssh/authorized_keys" + filename = "${path.root}/modules/ansible-roles/id_new" file_permission = "0600" } From cc6ac5d92bd70d5e56f5c06d095c36166c70c5d1 Mon Sep 17 00:00:00 2001 From: Manikandan-Renu1 Date: Thu, 27 Feb 2025 12:27:09 +0530 Subject: [PATCH 09/20] subnet-issue --- modules/landing_zone_vsi/main.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/landing_zone_vsi/main.tf b/modules/landing_zone_vsi/main.tf index 0242d73d..0bb13fec 100644 --- a/modules/landing_zone_vsi/main.tf +++ b/modules/landing_zone_vsi/main.tf @@ -14,14 +14,14 @@ resource "local_sensitive_file" "write_meta_private_key" { resource "local_sensitive_file" "copy_compute_private_key_content" { count = local.enable_compute ? 1 : 0 content = (local.compute_private_key_content) - filename = "${path.root}/modules/ansible-roles/id_rsa" + filename = "/root/.ssh/id_rsa" file_permission = "0600" } resource "local_sensitive_file" "copy_compute_public_key_content" { count = local.enable_compute ? 1 : 0 content = (local.compute_public_key_content) - filename = "${path.root}/modules/ansible-roles/id_new" + filename = "/root/.ssh/authorized_keys" file_permission = "0600" } From a8b470fd40c40d7d1184fd7d48ba52c1dd572730 Mon Sep 17 00:00:00 2001 From: Manikandan-Renu1 Date: Thu, 27 Feb 2025 14:39:30 +0530 Subject: [PATCH 10/20] ssh-key-copy --- modules/landing_zone_vsi/main.tf | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/modules/landing_zone_vsi/main.tf b/modules/landing_zone_vsi/main.tf index 0bb13fec..4e58c28b 100644 --- a/modules/landing_zone_vsi/main.tf +++ b/modules/landing_zone_vsi/main.tf @@ -18,11 +18,27 @@ resource "local_sensitive_file" "copy_compute_private_key_content" { file_permission = "0600" } -resource "local_sensitive_file" "copy_compute_public_key_content" { - count = local.enable_compute ? 1 : 0 - content = (local.compute_public_key_content) - filename = "/root/.ssh/authorized_keys" - file_permission = "0600" +# resource "local_sensitive_file" "copy_compute_public_key_content" { +# count = local.enable_compute ? 1 : 0 +# content = (local.compute_public_key_content) +# filename = "/root/.ssh/authorized_keys" +# file_permission = "0600" +# } + +resource "null_resource" "copy_compute_public_key_content" { + count = local.enable_compute ? 1 : 0 + + provisioner "local-exec" { + interpreter = ["/bin/bash", "-c"] + command = <> /root/.ssh/config + echo "${local.compute_public_key_content}" >> /root/.ssh/authorized_keys + EOT + } + + triggers = { + build = timestamp() + } } module "storage_key" { From 2104f7931a0e422e17cfb1004f8e25b5c5c5bbe7 Mon Sep 17 00:00:00 2001 From: Manikandan-Renu1 Date: Thu, 27 Feb 2025 15:22:47 +0530 Subject: [PATCH 11/20] base64-change --- modules/landing_zone_vsi/templates/compute_user_data.tpl | 4 ++-- modules/landing_zone_vsi/templates/management_user_data.tpl | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/landing_zone_vsi/templates/compute_user_data.tpl b/modules/landing_zone_vsi/templates/compute_user_data.tpl index 2fdb9bb0..2fc14b81 100644 --- a/modules/landing_zone_vsi/templates/compute_user_data.tpl +++ b/modules/landing_zone_vsi/templates/compute_user_data.tpl @@ -17,9 +17,9 @@ sed -i -e "s/^/no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command= # input parameters echo "${bastion_public_key_content}" >> ~/.ssh/authorized_keys -echo "${compute_public_key_content}" | base64 --decode >> ~/.ssh/authorized_keys +echo "${compute_public_key_content}" >> ~/.ssh/authorized_keys echo "StrictHostKeyChecking no" >> ~/.ssh/config -echo "${compute_private_key_content}" | base64 --decode > ~/.ssh/id_rsa +echo "${compute_private_key_content}" > ~/.ssh/id_rsa chmod 600 ~/.ssh/id_rsa # network setup diff --git a/modules/landing_zone_vsi/templates/management_user_data.tpl b/modules/landing_zone_vsi/templates/management_user_data.tpl index 1db5be80..8488c692 100644 --- a/modules/landing_zone_vsi/templates/management_user_data.tpl +++ b/modules/landing_zone_vsi/templates/management_user_data.tpl @@ -17,9 +17,9 @@ sed -i -e "s/^/no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command= # input parameters echo "${bastion_public_key_content}" >> ~/.ssh/authorized_keys -echo "${management_public_key_content}" | base64 --decode >> ~/.ssh/authorized_keys +echo "${management_public_key_content}" >> ~/.ssh/authorized_keys echo "StrictHostKeyChecking no" >> ~/.ssh/config -echo "${management_private_key_content}" | base64 --decode > ~/.ssh/id_rsa +echo "${management_private_key_content}" > ~/.ssh/id_rsa chmod 600 ~/.ssh/id_rsa # network setup From d07f70b4854a317983089cca448324e0129cf433 Mon Sep 17 00:00:00 2001 From: Manikandan-Renu1 Date: Thu, 6 Mar 2025 17:39:40 +0530 Subject: [PATCH 12/20] resource_provisioner and prepare_tf_input module add --- main.tf | 4 +- .../locals.tf | 0 modules/prepare_tf_input/main.tf | 40 +++++ .../variables.tf | 151 +++++++----------- .../version.tf | 0 modules/resource_provisioner/locals.tf | 9 ++ .../main.tf | 40 ----- modules/resource_provisioner/variables.tf | 68 ++++++++ modules/resource_provisioner/version.tf | 22 +++ 9 files changed, 199 insertions(+), 135 deletions(-) rename modules/{null_resources => prepare_tf_input}/locals.tf (100%) create mode 100644 modules/prepare_tf_input/main.tf rename modules/{null_resources => prepare_tf_input}/variables.tf (90%) rename modules/{null_resources => prepare_tf_input}/version.tf (100%) create mode 100644 modules/resource_provisioner/locals.tf rename modules/{null_resources => resource_provisioner}/main.tf (59%) create mode 100644 modules/resource_provisioner/variables.tf create mode 100644 modules/resource_provisioner/version.tf diff --git a/main.tf b/main.tf index 225c33ce..e837a1c7 100644 --- a/main.tf +++ b/main.tf @@ -93,8 +93,8 @@ module "landing_zone_vsi" { enable_bastion = var.enable_bastion } -module "null_resources" { - source = "./modules/null_resources" +module "resource_provisioner" { + source = "./modules/resource_provisioner" ibmcloud_api_key = var.ibmcloud_api_key resource_group = var.resource_group prefix = var.prefix diff --git a/modules/null_resources/locals.tf b/modules/prepare_tf_input/locals.tf similarity index 100% rename from modules/null_resources/locals.tf rename to modules/prepare_tf_input/locals.tf diff --git a/modules/prepare_tf_input/main.tf b/modules/prepare_tf_input/main.tf new file mode 100644 index 00000000..643ccc2d --- /dev/null +++ b/modules/prepare_tf_input/main.tf @@ -0,0 +1,40 @@ +resource "local_sensitive_file" "prepare_tf_input" { + count = var.enable_deployer == true ? 1 : 0 + content = < Date: Thu, 6 Mar 2025 18:03:33 +0530 Subject: [PATCH 13/20] module-issue-fix --- main.tf | 68 ++++++++++++----------- modules/prepare_tf_input/main.tf | 1 - modules/resource_provisioner/main.tf | 6 +- modules/resource_provisioner/variables.tf | 2 +- 4 files changed, 38 insertions(+), 39 deletions(-) diff --git a/main.tf b/main.tf index e837a1c7..f6b49fa1 100644 --- a/main.tf +++ b/main.tf @@ -71,8 +71,6 @@ module "landing_zone_vsi" { vpc_id = local.vpc_id bastion_security_group_id = var.bastion_security_group_id bastion_public_key_content = local.bastion_public_key_content - # compute_public_key_content = var.compute_public_key_content - # compute_private_key_content = var.compute_private_key_content client_subnets = local.client_subnets client_ssh_keys = local.client_ssh_keys client_instances = var.client_instances @@ -93,42 +91,46 @@ module "landing_zone_vsi" { enable_bastion = var.enable_bastion } +module "prepare_tf_input" { + source = "./modules/prepare_tf_input" + ibmcloud_api_key = var.ibmcloud_api_key + resource_group = var.resource_group + prefix = var.prefix + zones = var.zones + compute_ssh_keys = local.compute_ssh_keys + storage_ssh_keys = local.storage_ssh_keys + storage_instances = var.storage_instances + management_instances = var.management_instances + protocol_instances = var.protocol_instances + ibm_customer_number = var.ibm_customer_number + static_compute_instances = var.static_compute_instances + client_instances = var.client_instances + enable_cos_integration = var.enable_cos_integration + enable_atracker = var.enable_atracker + enable_vpc_flow_logs = var.enable_vpc_flow_logs + allowed_cidr = var.allowed_cidr + vpc = local.vpc + vpc_id = local.vpc_id + storage_subnets = local.storage_subnet + protocol_subnets = local.protocol_subnet + compute_subnets = local.compute_subnet + client_subnets = local.client_subnet + bastion_subnets = local.bastion_subnet + dns_domain_names = var.dns_domain_names + bastion_security_group_id = local.bastion_security_group_id + deployer_hostname = local.deployer_hostname + depends_on = [module.deployer] +} + module "resource_provisioner" { - source = "./modules/resource_provisioner" - ibmcloud_api_key = var.ibmcloud_api_key - resource_group = var.resource_group - prefix = var.prefix - zones = var.zones - enable_deployer = var.enable_deployer - compute_ssh_keys = local.compute_ssh_keys - storage_ssh_keys = local.storage_ssh_keys - storage_instances = var.storage_instances - management_instances = var.management_instances - protocol_instances = var.protocol_instances - ibm_customer_number = var.ibm_customer_number - static_compute_instances = var.static_compute_instances - client_instances = var.client_instances - enable_cos_integration = var.enable_cos_integration - enable_atracker = var.enable_atracker - enable_vpc_flow_logs = var.enable_vpc_flow_logs - allowed_cidr = var.allowed_cidr - vpc = local.vpc - vpc_id = local.vpc_id - storage_subnets = local.storage_subnet - protocol_subnets = local.protocol_subnet - compute_subnets = local.compute_subnet - client_subnets = local.client_subnet - bastion_subnets = local.bastion_subnet - dns_domain_names = var.dns_domain_names - # compute_public_key_content = local.compute_public_key_contents - # compute_private_key_content = local.compute_private_key_contents + source = "./modules/resource_provisioner" + ibmcloud_api_key = var.ibmcloud_api_key + enable_deployer = var.enable_deployer bastion_fip = local.bastion_fip bastion_public_key_content = local.bastion_public_key_content bastion_private_key_content = local.bastion_private_key_content - bastion_security_group_id = local.bastion_security_group_id - deployer_hostname = local.deployer_hostname deployer_ip = local.deployer_ip - depends_on = [module.deployer] + depends_on = [module.prepare_tf_input] } module "file_storage" { diff --git a/modules/prepare_tf_input/main.tf b/modules/prepare_tf_input/main.tf index 643ccc2d..5d515473 100644 --- a/modules/prepare_tf_input/main.tf +++ b/modules/prepare_tf_input/main.tf @@ -37,4 +37,3 @@ resource "local_sensitive_file" "prepare_tf_input" { EOT filename = local.schematics_inputs_path } - diff --git a/modules/resource_provisioner/main.tf b/modules/resource_provisioner/main.tf index 28bcbdc0..4cb8c8ae 100644 --- a/modules/resource_provisioner/main.tf +++ b/modules/resource_provisioner/main.tf @@ -1,3 +1,5 @@ + + resource "null_resource" "tf_resource_provisioner" { count = var.enable_deployer == true ? 1 : 0 connection { @@ -28,10 +30,6 @@ resource "null_resource" "tf_resource_provisioner" { triggers = { always_run = timestamp() } - - depends_on = [ - local_sensitive_file.prepare_tf_input - ] } resource "null_resource" "cluster_destroyer" { diff --git a/modules/resource_provisioner/variables.tf b/modules/resource_provisioner/variables.tf index d9c7ae63..726b39de 100644 --- a/modules/resource_provisioner/variables.tf +++ b/modules/resource_provisioner/variables.tf @@ -48,7 +48,7 @@ variable "bastion_private_key_content" { ############################################################################## # Terraform generic Variables -############################################################################## +############################################################################# variable "TF_PARALLELISM" { type = string default = "250" From 596fd175dea479ea7964c70d05369a788df0d1c3 Mon Sep 17 00:00:00 2001 From: Manikandan-Renu1 Date: Thu, 6 Mar 2025 19:34:21 +0530 Subject: [PATCH 14/20] resource-condition-handle --- main.tf | 2 +- modules/resource_provisioner/main.tf | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/main.tf b/main.tf index f6b49fa1..0b0165f4 100644 --- a/main.tf +++ b/main.tf @@ -130,7 +130,7 @@ module "resource_provisioner" { bastion_public_key_content = local.bastion_public_key_content bastion_private_key_content = local.bastion_private_key_content deployer_ip = local.deployer_ip - depends_on = [module.prepare_tf_input] + depends_on = [module.deployer, module.prepare_tf_input] } module "file_storage" { diff --git a/modules/resource_provisioner/main.tf b/modules/resource_provisioner/main.tf index 4cb8c8ae..ad905ecb 100644 --- a/modules/resource_provisioner/main.tf +++ b/modules/resource_provisioner/main.tf @@ -1,5 +1,3 @@ - - resource "null_resource" "tf_resource_provisioner" { count = var.enable_deployer == true ? 1 : 0 connection { @@ -59,7 +57,7 @@ resource "null_resource" "cluster_destroyer" { when = destroy on_failure = fail inline = [ - "export TF_LOG=${self.triggers.conn_terraform_log_level} && sudo -E terraform -chdir=${self.triggers.conn_remote_terraform_path} destroy -auto-approve" + "if [ -d ${local.remote_terraform_path} ]; then export TF_LOG=${self.triggers.conn_terraform_log_level} && sudo -E terraform -chdir=${self.triggers.conn_remote_terraform_path} destroy -auto-approve fi" ] } } From 8c86192101c94e2eff0f7c1b80d658140f9ea0b5 Mon Sep 17 00:00:00 2001 From: Manikandan-Renu1 Date: Thu, 6 Mar 2025 19:41:50 +0530 Subject: [PATCH 15/20] revoke-destroy-change --- modules/resource_provisioner/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/resource_provisioner/main.tf b/modules/resource_provisioner/main.tf index ad905ecb..e9ed3d9d 100644 --- a/modules/resource_provisioner/main.tf +++ b/modules/resource_provisioner/main.tf @@ -57,7 +57,7 @@ resource "null_resource" "cluster_destroyer" { when = destroy on_failure = fail inline = [ - "if [ -d ${local.remote_terraform_path} ]; then export TF_LOG=${self.triggers.conn_terraform_log_level} && sudo -E terraform -chdir=${self.triggers.conn_remote_terraform_path} destroy -auto-approve fi" + "export TF_LOG=${self.triggers.conn_terraform_log_level} && sudo -E terraform -chdir=${self.triggers.conn_remote_terraform_path} destroy -auto-approve" ] } } From cd5215e07b5b66bccb26f8d00a4d1bb9293d3066 Mon Sep 17 00:00:00 2001 From: Manikandan-Renu1 Date: Thu, 6 Mar 2025 20:43:38 +0530 Subject: [PATCH 16/20] comments-remove --- locals.tf | 9 --------- main.tf | 3 +++ modules/deployer/main.tf | 6 ------ modules/landing_zone_vsi/main.tf | 7 ------- modules/resource_provisioner/locals.tf | 2 +- 5 files changed, 4 insertions(+), 23 deletions(-) diff --git a/locals.tf b/locals.tf index 18b71fdd..bf8bfae4 100644 --- a/locals.tf +++ b/locals.tf @@ -37,9 +37,6 @@ locals { deployer_hostname = var.enable_bastion ? flatten(module.deployer.deployer_vsi_data[*].list)[0].name : "" deployer_ip = module.deployer.deployer_ip - # compute_public_key_contents = module.deployer.compute_public_key_content - # compute_private_key_contents = module.deployer.compute_private_key_content - # Existing subnets details existing_compute_subnets = [ for subnet in data.ibm_is_subnet.existing_compute_subnets : @@ -167,11 +164,6 @@ locals { # dependency: landing_zone_vsi -> file-share } -# data "external" "get_hostname" { -# program = ["sh", "-c", "echo '{\"name\": \"'$(hostname)'\", \"ipv4_address\": \"'$(hostname -I | awk '{print $1}')'\"}'"] -# } - - # locals needed for dns-records locals { # dependency: dns -> dns-records @@ -258,4 +250,3 @@ locals { Enable_Monitoring = false lsf_deployer_hostname = var.deployer_hostname #data.external.get_hostname.result.name #var.enable_bastion ? "" : flatten(module.deployer.deployer_vsi_data[*].list)[0].name } - diff --git a/main.tf b/main.tf index 0b0165f4..aef8a3a3 100644 --- a/main.tf +++ b/main.tf @@ -93,6 +93,9 @@ module "landing_zone_vsi" { module "prepare_tf_input" { source = "./modules/prepare_tf_input" + enable_deployer = var.enable_deployer + bastion_fip = local.bastion_fip + deployer_ip = local.deployer_ip ibmcloud_api_key = var.ibmcloud_api_key resource_group = var.resource_group prefix = var.prefix diff --git a/modules/deployer/main.tf b/modules/deployer/main.tf index 3e7f54f2..34c2a8d4 100644 --- a/modules/deployer/main.tf +++ b/modules/deployer/main.tf @@ -1,9 +1,3 @@ -# module "compute_key" { -# count = local.enable_deployer && local.enable_compute ? 1 : 0 -# source = "./../key" -# private_key_path = var.enable_bastion ? "${path.root}/../../modules/ansible-roles/compute_id_rsa" : "${path.root}/modules/ansible-roles/compute_id_rsa" #checkov:skip=CKV_SECRET_6 -# } - module "ssh_key" { count = local.enable_bastion ? 1 : 0 source = "./../key" diff --git a/modules/landing_zone_vsi/main.tf b/modules/landing_zone_vsi/main.tf index 4e58c28b..fc26a15c 100644 --- a/modules/landing_zone_vsi/main.tf +++ b/modules/landing_zone_vsi/main.tf @@ -18,13 +18,6 @@ resource "local_sensitive_file" "copy_compute_private_key_content" { file_permission = "0600" } -# resource "local_sensitive_file" "copy_compute_public_key_content" { -# count = local.enable_compute ? 1 : 0 -# content = (local.compute_public_key_content) -# filename = "/root/.ssh/authorized_keys" -# file_permission = "0600" -# } - resource "null_resource" "copy_compute_public_key_content" { count = local.enable_compute ? 1 : 0 diff --git a/modules/resource_provisioner/locals.tf b/modules/resource_provisioner/locals.tf index 41c70fc8..20b3a3d1 100644 --- a/modules/resource_provisioner/locals.tf +++ b/modules/resource_provisioner/locals.tf @@ -5,5 +5,5 @@ locals { remote_terraform_path = format("%s/terraform-ibm-hpc", local.deployer_path) remote_ansible_path = format("%s/terraform-ibm-hpc", local.deployer_path) da_hpc_repo_url = "https://github.com/terraform-ibm-modules/terraform-ibm-hpc.git" - da_hpc_repo_tag = "pr-199" ###### change it to main in future + da_hpc_repo_tag = "develop" ###### change it to main in future } From 3c1b482a62a8f683b440dc48536876a3a8b18172 Mon Sep 17 00:00:00 2001 From: Manikandan-Renu1 Date: Wed, 12 Mar 2025 21:48:48 +0530 Subject: [PATCH 17/20] pre-commit-fix --- .secrets.baseline | 65 ++- datasource.tf | 30 +- .../modules/landing_zone_vpc/datasource.tf | 4 +- .../modules/landing_zone_vpc/locals.tf | 2 +- .../modules/landing_zone_vpc/variables.tf | 2 +- examples/create_vpc/solutions/hpc/main.tf | 22 +- .../create_vpc/solutions/hpc/variables.tf | 6 +- locals.tf | 66 +-- main.tf | 26 +- .../roles/lsf/tasks/lsf_inventory.yml | 4 +- .../roles/lsf/tasks/lsf_prepare.yml | 2 +- .../ansible-roles/roles/lsf/tasks/main.yml | 2 +- .../roles/lsf/templates/lsf-all.j2 | 16 +- .../roles/lsf/templates/lsf-config.j2 | 38 +- .../roles/lsf/templates/lsf-inventory.j2 | 10 +- modules/ansible-roles/roles/lsf/vars/main.yml | 2 +- .../vpc_fileshare_configure/handlers/main.yml | 2 +- .../vpc_fileshare_configure/tasks/main.yml | 2 +- .../tasks/vpc_fileshare_configure.yml | 2 +- modules/deployer/datasource.tf | 2 +- modules/deployer/locals.tf | 22 +- modules/deployer/outputs.tf | 6 +- modules/deployer/variables.tf | 50 +- modules/dns/datasource.tf | 9 + modules/dns/locals.tf | 9 + modules/dns/main.tf | 32 +- modules/dns/outputs.tf | 2 +- modules/dns_record/datasource.tf | 3 + modules/dns_record/locals.tf | 5 + modules/dns_record/main.tf | 10 - modules/file_storage/locals.tf | 4 - modules/file_storage/outputs.tf | 4 +- modules/inventory/main.tf | 6 +- modules/inventory/variables.tf | 5 +- modules/key/main.tf | 4 +- modules/key/outputs.tf | 4 +- modules/landing_zone/variables.tf | 10 +- modules/landing_zone/version.tf | 24 +- modules/landing_zone_vsi/datasource.tf | 10 +- modules/landing_zone_vsi/locals.tf | 4 +- modules/landing_zone_vsi/variables.tf | 58 +- modules/landing_zone_vsi/version.tf | 18 +- modules/observability_instance/main.tf | 2 +- modules/observability_instance/variables.tf | 2 +- modules/playbook/main.tf | 4 +- modules/playbook/variables.tf | 2 +- modules/playbook/version.tf | 10 +- modules/prepare_tf_input/locals.tf | 49 +- modules/prepare_tf_input/main.tf | 2 +- modules/prepare_tf_input/outputs.tf | 0 modules/prepare_tf_input/variables.tf | 24 +- modules/prepare_tf_input/version.tf | 8 - modules/resource_provisioner/locals.tf | 6 +- modules/resource_provisioner/outputs.tf | 0 modules/resource_provisioner/variables.tf | 25 +- modules/resource_provisioner/version.tf | 14 +- modules/write_inventory/main.tf | 11 +- modules/write_inventory/outputs.tf | 5 + modules/write_inventory/variables.tf | 4 +- modules/write_inventory/version.tf | 8 - outputs.tf | 19 +- provider.tf | 2 +- solutions/custom/README.md | 83 --- .../catalogValidationValues.json.template | 2 +- solutions/custom/datasource.tf | 22 - solutions/custom/locals.tf | 8 +- solutions/custom/main.tf | 19 +- solutions/custom/outputs.tf | 3 +- solutions/custom/override.json | 228 ++++---- solutions/custom/variables.tf | 525 ++++++++--------- solutions/custom/version.tf | 4 - solutions/hpcaas/README.md | 32 +- .../catalogValidationValues.json.template | 4 +- solutions/hpcaas/datasource.tf | 22 - solutions/hpcaas/locals.tf | 8 +- solutions/hpcaas/main.tf | 19 +- solutions/hpcaas/outputs.tf | 3 +- solutions/hpcaas/override.json | 126 ++-- solutions/hpcaas/variables.tf | 6 +- solutions/hpcaas/version.tf | 4 - solutions/lsf/README.md | 55 +- .../lsf/catalogValidationValues.json.template | 4 +- solutions/lsf/datasource.tf | 22 - solutions/lsf/locals.tf | 32 +- solutions/lsf/main.tf | 142 ++--- solutions/lsf/outputs.tf | 5 +- solutions/lsf/override.json | 224 ++++---- solutions/lsf/variables.tf | 104 ++-- solutions/lsf/version.tf | 4 - solutions/scale/README.md | 44 +- .../catalogValidationValues.json.template | 4 +- solutions/scale/datasource.tf | 22 - solutions/scale/locals.tf | 8 +- solutions/scale/main.tf | 19 +- solutions/scale/outputs.tf | 3 +- solutions/scale/override.json | 164 +++--- solutions/scale/variables.tf | 172 +++--- solutions/scale/version.tf | 4 - solutions/slurm/README.md | 32 +- .../catalogValidationValues.json.template | 4 +- solutions/slurm/datasource.tf | 22 - solutions/slurm/locals.tf | 8 +- solutions/slurm/main.tf | 19 +- solutions/slurm/outputs.tf | 3 +- solutions/slurm/override.json | 126 ++-- solutions/slurm/variables.tf | 4 +- solutions/slurm/version.tf | 4 - solutions/symphony/README.md | 32 +- .../catalogValidationValues.json.template | 4 +- solutions/symphony/datasource.tf | 22 - solutions/symphony/locals.tf | 8 +- solutions/symphony/main.tf | 19 +- solutions/symphony/outputs.tf | 3 +- solutions/symphony/override.json | 126 ++-- solutions/symphony/variables.tf | 4 +- solutions/symphony/version.tf | 4 - variables.tf | 539 +++++++++--------- version.tf | 16 +- 118 files changed, 1879 insertions(+), 2071 deletions(-) create mode 100644 modules/dns/datasource.tf create mode 100644 modules/dns/locals.tf create mode 100644 modules/dns_record/datasource.tf create mode 100644 modules/dns_record/locals.tf delete mode 100644 modules/file_storage/locals.tf create mode 100644 modules/prepare_tf_input/outputs.tf create mode 100644 modules/resource_provisioner/outputs.tf delete mode 100644 solutions/custom/datasource.tf delete mode 100644 solutions/hpcaas/datasource.tf delete mode 100644 solutions/lsf/datasource.tf delete mode 100644 solutions/scale/datasource.tf delete mode 100644 solutions/slurm/datasource.tf delete mode 100644 solutions/symphony/datasource.tf diff --git a/.secrets.baseline b/.secrets.baseline index c7cfd369..913b818a 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "go.sum|^.secrets.baseline$", "lines": null }, - "generated_at": "2024-04-08T15:16:34Z", + "generated_at": "2025-03-12T08:49:38Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -76,7 +76,68 @@ "name": "TwilioKeyDetector" } ], - "results": {}, + "results": { + "solutions/custom/override.json": [ + { + "hashed_secret": "850264135744c21e30d6336ed7bf047d2e82af8b", + "is_secret": true, + "is_verified": false, + "line_number": 85, + "type": "Secret Keyword", + "verified_result": null + } + ], + "solutions/hpcaas/override.json": [ + { + "hashed_secret": "850264135744c21e30d6336ed7bf047d2e82af8b", + "is_secret": true, + "is_verified": false, + "line_number": 69, + "type": "Secret Keyword", + "verified_result": null + } + ], + "solutions/lsf/override.json": [ + { + "hashed_secret": "850264135744c21e30d6336ed7bf047d2e82af8b", + "is_secret": true, + "is_verified": false, + "line_number": 69, + "type": "Secret Keyword", + "verified_result": null + } + ], + "solutions/scale/override.json": [ + { + "hashed_secret": "850264135744c21e30d6336ed7bf047d2e82af8b", + "is_secret": true, + "is_verified": false, + "line_number": 63, + "type": "Secret Keyword", + "verified_result": null + } + ], + "solutions/slurm/override.json": [ + { + "hashed_secret": "850264135744c21e30d6336ed7bf047d2e82af8b", + "is_secret": true, + "is_verified": false, + "line_number": 69, + "type": "Secret Keyword", + "verified_result": null + } + ], + "solutions/symphony/override.json": [ + { + "hashed_secret": "850264135744c21e30d6336ed7bf047d2e82af8b", + "is_secret": true, + "is_verified": false, + "line_number": 69, + "type": "Secret Keyword", + "verified_result": null + } + ] + }, "version": "0.13.1+ibm.62.dss", "word_list": { "file": null, diff --git a/datasource.tf b/datasource.tf index 008d8da4..ef7d3825 100644 --- a/datasource.tf +++ b/datasource.tf @@ -1,58 +1,54 @@ # Future use /* -data "ibm_is_region" "itself" { +data "ibm_is_region" "region" { name = local.region } -data "ibm_is_zone" "itself" { +data "ibm_is_zone" "zone" { name = var.zones[0] - region = data.ibm_is_region.itself.name + region = data.ibm_is_region.region.name } */ -data "ibm_is_vpc" "itself" { - count = var.vpc == null ? 0 : 1 - name = var.vpc -} data "ibm_is_vpc" "existing_vpc" { - count = var.vpc != null ? 1 : 0 - name = var.vpc + count = var.vpc_name != null ? 1 : 0 + name = var.vpc_name } /* -data "ibm_is_subnet" "itself" { +data "ibm_is_subnet" "subnet" { count = length(local.subnets) identifier = local.subnets[count.index]["id"] } */ data "ibm_resource_group" "resource_group" { - count = var.resource_group == null ? 0 : 1 - name = var.resource_group + count = var.existing_resource_group == null ? 0 : 1 + name = var.existing_resource_group } data "ibm_is_subnet" "existing_compute_subnets" { - count = var.vpc != null && var.compute_subnets != null ? 1 : 0 + count = var.vpc_name != null && var.compute_subnets != null ? 1 : 0 name = var.compute_subnets[count.index] } data "ibm_is_subnet" "existing_storage_subnets" { - count = var.vpc != null && var.storage_subnets != null ? 1 : 0 + count = var.vpc_name != null && var.storage_subnets != null ? 1 : 0 name = var.storage_subnets[count.index] } data "ibm_is_subnet" "existing_protocol_subnets" { - count = var.vpc != null && var.protocol_subnets != null ? 1 : 0 + count = var.vpc_name != null && var.protocol_subnets != null ? 1 : 0 name = var.protocol_subnets[count.index] } data "ibm_is_subnet" "existing_client_subnets" { - count = var.vpc != null && var.client_subnets != null ? 1 : 0 + count = var.vpc_name != null && var.client_subnets != null ? 1 : 0 name = var.client_subnets[count.index] } data "ibm_is_subnet" "existing_bastion_subnets" { - count = var.vpc != null && var.bastion_subnets != null ? 1 : 0 + count = var.vpc_name != null && var.bastion_subnets != null ? 1 : 0 name = var.bastion_subnets[count.index] } diff --git a/examples/create_vpc/modules/landing_zone_vpc/datasource.tf b/examples/create_vpc/modules/landing_zone_vpc/datasource.tf index 130d351e..ad4e30ff 100644 --- a/examples/create_vpc/modules/landing_zone_vpc/datasource.tf +++ b/examples/create_vpc/modules/landing_zone_vpc/datasource.tf @@ -1,3 +1,3 @@ -data "ibm_resource_group" "itself" { - name = var.resource_group +data "ibm_resource_group" "resource_group" { + name = var.existing_resource_group } diff --git a/examples/create_vpc/modules/landing_zone_vpc/locals.tf b/examples/create_vpc/modules/landing_zone_vpc/locals.tf index a896e598..b34ad164 100644 --- a/examples/create_vpc/modules/landing_zone_vpc/locals.tf +++ b/examples/create_vpc/modules/landing_zone_vpc/locals.tf @@ -21,7 +21,7 @@ locals { var.allowed_cidr # var.network_cidr ]) - resource_group_id = var.resource_group != null ? data.ibm_resource_group.itself.id : "" + resource_group_id = var.existing_resource_group != null ? data.ibm_resource_group.resource_group.id : "" # Region and Zone calculations region = join("-", slice(split("-", var.zones[0]), 0, 2)) diff --git a/examples/create_vpc/modules/landing_zone_vpc/variables.tf b/examples/create_vpc/modules/landing_zone_vpc/variables.tf index 207f24a5..954feac4 100644 --- a/examples/create_vpc/modules/landing_zone_vpc/variables.tf +++ b/examples/create_vpc/modules/landing_zone_vpc/variables.tf @@ -13,7 +13,7 @@ variable "ibmcloud_api_key" { # Resource Groups Variables ############################################################################## -variable "resource_group" { +variable "existing_resource_group" { description = "String describing resource groups to create or reference" type = string default = null diff --git a/examples/create_vpc/solutions/hpc/main.tf b/examples/create_vpc/solutions/hpc/main.tf index 8dae8965..7045346c 100644 --- a/examples/create_vpc/solutions/hpc/main.tf +++ b/examples/create_vpc/solutions/hpc/main.tf @@ -1,13 +1,13 @@ module "create_vpc" { - source = "../../modules/landing_zone_vpc" - allowed_cidr = var.remote_allowed_ips - ibmcloud_api_key = var.ibmcloud_api_key - ssh_keys = var.bastion_ssh_keys - prefix = var.cluster_prefix - resource_group = var.resource_group - zones = var.zones - network_cidr = var.vpc_cidr - bastion_subnets_cidr = var.vpc_cluster_login_private_subnets_cidr_blocks - compute_subnets_cidr = var.vpc_cluster_private_subnets_cidr_blocks - enable_hub = var.enable_hub + source = "../../modules/landing_zone_vpc" + allowed_cidr = var.remote_allowed_ips + ibmcloud_api_key = var.ibmcloud_api_key + ssh_keys = var.bastion_ssh_keys + prefix = var.cluster_prefix + existing_resource_group = var.existing_resource_group + zones = var.zones + network_cidr = var.vpc_cidr + bastion_subnets_cidr = var.vpc_cluster_login_private_subnets_cidr_blocks + compute_subnets_cidr = var.vpc_cluster_private_subnets_cidr_blocks + enable_hub = var.enable_hub } diff --git a/examples/create_vpc/solutions/hpc/variables.tf b/examples/create_vpc/solutions/hpc/variables.tf index ed625bb1..be6e257f 100644 --- a/examples/create_vpc/solutions/hpc/variables.tf +++ b/examples/create_vpc/solutions/hpc/variables.tf @@ -16,13 +16,13 @@ variable "ibmcloud_api_key" { # Resource Groups Variables ############################################################################## -variable "resource_group" { +variable "existing_resource_group" { description = "Resource group name from your IBM Cloud account where the VPC resources should be deployed. Note. If the resource group value is set as null, automation creates two different RG with the name (workload-rg and service-rg). For additional information on resource groups, see [Managing resource groups](https://cloud.ibm.com/docs/account?topic=account-rgs)." type = string default = "Default" validation { - condition = var.resource_group != null - error_message = "If you want to provide null for resource_group variable, it should be within double quotes." + condition = var.existing_resource_group != null + error_message = "If you want to provide null for existing_resource_group variable, it should be within double quotes." } } diff --git a/locals.tf b/locals.tf index bf8bfae4..3263e82e 100644 --- a/locals.tf +++ b/locals.tf @@ -15,8 +15,8 @@ locals { # locals needed for deployer locals { # dependency: landing_zone -> deployer - vpc_id = var.vpc == null ? one(module.landing_zone.vpc_id) : data.ibm_is_vpc.existing_vpc[0].id - vpc = var.vpc == null ? one(module.landing_zone.vpc_name) : var.vpc + vpc_id = var.vpc_name == null ? one(module.landing_zone.vpc_id) : data.ibm_is_vpc.existing_vpc[0].id + vpc = var.vpc_name == null ? one(module.landing_zone.vpc_name) : var.vpc_name bastion_subnets = module.landing_zone.bastion_subnets kms_encryption_enabled = var.key_management != null ? true : false boot_volume_encryption_key = var.key_management != null ? one(module.landing_zone.boot_volume_encryption_key)["crn"] : null @@ -78,21 +78,21 @@ locals { } ] - existing_bastion_subnets = [ - for subnet in data.ibm_is_subnet.existing_bastion_subnets : - { - cidr = subnet.ipv4_cidr_block - id = subnet.id - name = subnet.name - zone = subnet.zone - } - ] + # existing_bastion_subnets = [ + # for subnet in data.ibm_is_subnet.existing_bastion_subnets : + # { + # cidr = subnet.ipv4_cidr_block + # id = subnet.id + # name = subnet.name + # zone = subnet.zone + # } + # ] # dependency: landing_zone -> landing_zone_vsi - client_subnets = var.vpc != null && var.client_subnets != null ? local.existing_client_subnets : module.landing_zone.client_subnets - compute_subnets = var.vpc != null && var.compute_subnets != null ? local.existing_compute_subnets : module.landing_zone.compute_subnets - storage_subnets = var.vpc != null && var.storage_subnets != null ? local.existing_storage_subnets : module.landing_zone.storage_subnets - protocol_subnets = var.vpc != null && var.protocol_subnets != null ? local.existing_protocol_subnets : module.landing_zone.protocol_subnets + client_subnets = var.vpc_name != null && var.client_subnets != null ? local.existing_client_subnets : module.landing_zone.client_subnets + compute_subnets = var.vpc_name != null && var.compute_subnets != null ? local.existing_compute_subnets : module.landing_zone.compute_subnets + storage_subnets = var.vpc_name != null && var.storage_subnets != null ? local.existing_storage_subnets : module.landing_zone.storage_subnets + protocol_subnets = var.vpc_name != null && var.protocol_subnets != null ? local.existing_protocol_subnets : module.landing_zone.protocol_subnets storage_subnet = [for subnet in local.storage_subnets : subnet.name] protocol_subnet = [for subnet in local.protocol_subnets : subnet.name] @@ -103,18 +103,18 @@ locals { #boot_volume_encryption_key = var.key_management != null ? one(module.landing_zone.boot_volume_encryption_key)["crn"] : null #skip_iam_authorization_policy = true #resource_group_id = data.ibm_resource_group.itself.id - #vpc_id = var.vpc == null ? module.landing_zone.vpc_id[0] : data.ibm_is_vpc.itself[0].id - #vpc_crn = var.vpc == null ? module.landing_zone.vpc_crn[0] : data.ibm_is_vpc.itself[0].crn + #vpc_id = var.vpc_name == null ? module.landing_zone.vpc_id[0] : data.ibm_is_vpc.existing_vpc[0].id + #vpc_crn = var.vpc_name == null ? module.landing_zone.vpc_crn[0] : data.ibm_is_vpc.existing_vpc[0].crn } # locals needed for file-storage locals { # dependency: landing_zone -> file-storage - #vpc_id = var.vpc == null ? one(module.landing_zone.vpc_id) : var.vpc + #vpc_id = var.vpc_name == null ? one(module.landing_zone.vpc_id) : var.vpc_name #boot_volume_encryption_key = var.key_management != null ? one(module.landing_zone.boot_volume_encryption_key)["crn"] : null # dependency: landing_zone_vsi -> file-share - compute_subnet_id = var.vpc == null && var.compute_subnets == null ? local.compute_subnets[0].id : [for subnet in data.ibm_is_subnet.existing_compute_subnets : subnet.id][0] + compute_subnet_id = var.vpc_name == null && var.compute_subnets == null ? local.compute_subnets[0].id : [for subnet in data.ibm_is_subnet.existing_compute_subnets : subnet.id][0] compute_security_group_id = var.enable_deployer ? [] : module.landing_zone_vsi[0].compute_sg_id management_instance_count = sum(var.management_instances[*]["count"]) default_share = local.management_instance_count > 0 ? [ @@ -139,23 +139,23 @@ locals { # locals needed for DNS locals { # dependency: landing_zone -> DNS - resource_group = var.resource_group == null ? "workload-rg" : var.resource_group + # resource_group = var.existing_resource_group == null ? "workload-rg" : var.existing_resource_group resource_group_ids = { - # management_rg = var.resource_group == null ? module.landing_zone.resource_group_id[0]["management-rg"] : one(values(one(module.landing_zone.resource_group_id))) - service_rg = var.resource_group == null ? module.landing_zone.resource_group_id[0]["service-rg"] : data.ibm_resource_group.resource_group[0].id - workload_rg = var.resource_group == null ? module.landing_zone.resource_group_id[0]["workload-rg"] : data.ibm_resource_group.resource_group[0].id + # management_rg = var.existing_resource_group == null ? module.landing_zone.resource_group_id[0]["management-rg"] : one(values(one(module.landing_zone.resource_group_id))) + service_rg = var.existing_resource_group == null ? module.landing_zone.resource_group_id[0]["service-rg"] : data.ibm_resource_group.resource_group[0].id + workload_rg = var.existing_resource_group == null ? module.landing_zone.resource_group_id[0]["workload-rg"] : data.ibm_resource_group.resource_group[0].id } # resource_group_id = one(values(one(module.landing_zone.resource_group_id))) - vpc_crn = var.vpc == null ? one(module.landing_zone.vpc_crn) : one(data.ibm_is_vpc.itself[*].crn) + vpc_crn = var.vpc_name == null ? one(module.landing_zone.vpc_crn) : one(data.ibm_is_vpc.existing_vpc[*].crn) # TODO: Fix existing subnet logic - #subnets_crn = var.vpc == null ? module.landing_zone.subnets_crn : ### + #subnets_crn = var.vpc_name == null ? module.landing_zone.subnets_crn : ### existing_compute_subnet_crns = [for subnet in data.ibm_is_subnet.existing_compute_subnets : subnet.crn] existing_storage_subnet_crns = [for subnet in data.ibm_is_subnet.existing_storage_subnets : subnet.crn] existing_protocol_subnet_crns = [for subnet in data.ibm_is_subnet.existing_protocol_subnets : subnet.crn] existing_client_subnet_crns = [for subnet in data.ibm_is_subnet.existing_client_subnets : subnet.crn] existing_bastion_subnet_crns = [for subnet in data.ibm_is_subnet.existing_bastion_subnets : subnet.crn] subnets_crn = concat(local.existing_compute_subnet_crns, local.existing_storage_subnet_crns, local.existing_protocol_subnet_crns, local.existing_client_subnet_crns, local.existing_bastion_subnet_crns) - # subnets_crn = var.vpc == null && var.compute_subnets == null ? module.landing_zone.subnets_crn : concat(local.existing_subnet_crns, module.landing_zone.subnets_crn) + # subnets_crn = var.vpc_name == null && var.compute_subnets == null ? module.landing_zone.subnets_crn : concat(local.existing_subnet_crns, module.landing_zone.subnets_crn) #subnets = flatten([local.compute_subnets, local.storage_subnets, local.protocol_subnets]) #subnets_crns = data.ibm_is_subnet.itself[*].crn # subnets_crn = module.landing_zone.subnets_crn @@ -167,9 +167,9 @@ locals { # locals needed for dns-records locals { # dependency: dns -> dns-records - dns_instance_id = var.enable_deployer ? "" : module.dns[0].dns_instance_id - dns_custom_resolver_id = var.enable_deployer ? "" : module.dns[0].dns_custom_resolver_id - dns_zone_map_list = var.enable_deployer ? [] : module.dns[0].dns_zone_maps + dns_instance_id = var.enable_deployer ? "" : module.dns[0].dns_instance_id + # dns_custom_resolver_id = var.enable_deployer ? "" : module.dns[0].dns_custom_resolver_id + dns_zone_map_list = var.enable_deployer ? [] : module.dns[0].dns_zone_maps compute_dns_zone_id = one(flatten([ for dns_zone in local.dns_zone_map_list : values(dns_zone) if one(keys(dns_zone)) == var.dns_domain_names["compute"] ])) @@ -226,9 +226,9 @@ locals { locals { bastion_fip = module.deployer.bastion_fip compute_private_key_path = var.enable_bastion ? "${path.root}/../../modules/ansible-roles/compute_id_rsa" : "${path.root}/modules/ansible-roles/compute_id_rsa" #checkov:skip=CKV_SECRET_6 - storage_private_key_path = var.enable_bastion ? "${path.root}/../../modules/ansible-roles/storage_id_rsa" : "${path.root}/modules/ansible-roles/storage_id_rsa" #checkov:skip=CKV_SECRET_6 - compute_playbook_path = var.enable_bastion ? "${path.root}/../../modules/ansible-roles/compute_ssh.yaml" : "${path.root}/modules/ansible-roles/compute_ssh.yaml" - storage_playbook_path = var.enable_bastion ? "${path.root}/../../modules/ansible-roles/storage_ssh.yaml" : "${path.root}/modules/ansible-roles/storage_ssh.yaml" + # storage_private_key_path = var.enable_bastion ? "${path.root}/../../modules/ansible-roles/storage_id_rsa" : "${path.root}/modules/ansible-roles/storage_id_rsa" #checkov:skip=CKV_SECRET_6 + compute_playbook_path = var.enable_bastion ? "${path.root}/../../modules/ansible-roles/compute_ssh.yaml" : "${path.root}/modules/ansible-roles/compute_ssh.yaml" + # storage_playbook_path = var.enable_bastion ? "${path.root}/../../modules/ansible-roles/storage_ssh.yaml" : "${path.root}/modules/ansible-roles/storage_ssh.yaml" } # file Share OutPut @@ -247,6 +247,6 @@ locals { db_hosts = var.enable_deployer ? [] : [local.management_nodes[0]] # Without Pac HA ha_shared_dir = "/mnt/lsf/shared" nfs_install_dir = "none" - Enable_Monitoring = false + enable_monitoring = false lsf_deployer_hostname = var.deployer_hostname #data.external.get_hostname.result.name #var.enable_bastion ? "" : flatten(module.deployer.deployer_vsi_data[*].list)[0].name } diff --git a/main.tf b/main.tf index aef8a3a3..5e2da7a6 100644 --- a/main.tf +++ b/main.tf @@ -3,13 +3,11 @@ module "landing_zone" { enable_landing_zone = var.enable_landing_zone allowed_cidr = var.allowed_cidr compute_subnets_cidr = var.compute_subnets_cidr - clusters = var.clusters cos_instance_name = var.cos_instance_name enable_atracker = var.observability_atracker_enable && (var.observability_atracker_target_type == "cos") ? true : false enable_cos_integration = var.enable_cos_integration enable_vpc_flow_logs = var.enable_vpc_flow_logs enable_vpn = var.enable_vpn - hpcs_instance_name = var.hpcs_instance_name key_management = var.key_management kms_instance_name = var.kms_instance_name kms_key_name = var.kms_key_name @@ -22,10 +20,10 @@ module "landing_zone" { prefix = var.prefix protocol_instances = var.protocol_instances protocol_subnets_cidr = var.protocol_subnets_cidr - resource_group = var.resource_group + resource_group = var.existing_resource_group storage_instances = var.storage_instances storage_subnets_cidr = var.storage_subnets_cidr - vpc = var.vpc + vpc = var.vpc_name vpn_peer_address = var.vpn_peer_address vpn_peer_cidr = var.vpn_peer_cidr vpn_preshared_key = var.vpn_preshared_key @@ -34,13 +32,14 @@ module "landing_zone" { skip_flowlogs_s2s_auth_policy = var.skip_flowlogs_s2s_auth_policy skip_kms_s2s_auth_policy = var.skip_kms_s2s_auth_policy observability_logs_enable = var.observability_logs_enable_for_management || var.observability_logs_enable_for_compute || (var.observability_atracker_enable && var.observability_atracker_target_type == "cloudlogs") ? true : false + # hpcs_instance_name = var.hpcs_instance_name + # clusters = var.clusters } module "deployer" { source = "./modules/deployer" - resource_group = var.resource_group + resource_group = var.existing_resource_group prefix = var.prefix - zones = var.zones vpc_id = local.vpc_id network_cidr = var.network_cidr enable_bastion = var.enable_bastion @@ -56,18 +55,18 @@ module "deployer" { boot_volume_encryption_key = local.boot_volume_encryption_key existing_kms_instance_guid = local.existing_kms_instance_guid skip_iam_authorization_policy = var.skip_iam_authorization_policy - static_compute_instances = var.static_compute_instances - management_instances = var.management_instances dns_domain_names = var.dns_domain_names + # zones = var.zones + # static_compute_instances = var.static_compute_instances + # management_instances = var.management_instances } module "landing_zone_vsi" { count = var.enable_deployer == false ? 1 : 0 source = "./modules/landing_zone_vsi" - resource_group = var.resource_group + resource_group = var.existing_resource_group prefix = var.prefix - zones = var.zones vpc_id = local.vpc_id bastion_security_group_id = var.bastion_security_group_id bastion_public_key_content = local.bastion_public_key_content @@ -97,7 +96,7 @@ module "prepare_tf_input" { bastion_fip = local.bastion_fip deployer_ip = local.deployer_ip ibmcloud_api_key = var.ibmcloud_api_key - resource_group = var.resource_group + resource_group = var.existing_resource_group prefix = var.prefix zones = var.zones compute_ssh_keys = local.compute_ssh_keys @@ -130,7 +129,6 @@ module "resource_provisioner" { ibmcloud_api_key = var.ibmcloud_api_key enable_deployer = var.enable_deployer bastion_fip = local.bastion_fip - bastion_public_key_content = local.bastion_public_key_content bastion_private_key_content = local.bastion_private_key_content deployer_ip = local.deployer_ip depends_on = [module.deployer, module.prepare_tf_input] @@ -203,7 +201,7 @@ module "write_compute_cluster_inventory" { my_cluster_name = var.prefix ha_shared_dir = local.ha_shared_dir nfs_install_dir = local.nfs_install_dir - Enable_Monitoring = local.Enable_Monitoring + enable_monitoring = local.enable_monitoring lsf_deployer_hostname = local.lsf_deployer_hostname depends_on = [time_sleep.wait_60_seconds] } @@ -220,7 +218,7 @@ module "write_storage_cluster_inventory" { my_cluster_name = var.prefix ha_shared_dir = local.ha_shared_dir nfs_install_dir = local.nfs_install_dir - Enable_Monitoring = local.Enable_Monitoring + enable_monitoring = local.enable_monitoring lsf_deployer_hostname = local.lsf_deployer_hostname depends_on = [time_sleep.wait_60_seconds] } diff --git a/modules/ansible-roles/roles/lsf/tasks/lsf_inventory.yml b/modules/ansible-roles/roles/lsf/tasks/lsf_inventory.yml index d6d94cf5..f17a0aef 100644 --- a/modules/ansible-roles/roles/lsf/tasks/lsf_inventory.yml +++ b/modules/ansible-roles/roles/lsf/tasks/lsf_inventory.yml @@ -19,7 +19,7 @@ run_once: true # Update deployer_hostname in group_vars/all - + - name: Update deployer_hostname in group_vars/all ansible.builtin.lineinfile: path: "{{ inventory_path }}/group_vars/all" @@ -34,4 +34,4 @@ # src: lsf-all.j2 # dest: "{{ inventory_path }}/group_vars/all" # # delegate_to: localhost -# delegate_to: "{{ lsf_deployer_hostname }}.comp.com" \ No newline at end of file +# delegate_to: "{{ lsf_deployer_hostname }}.comp.com" diff --git a/modules/ansible-roles/roles/lsf/tasks/lsf_prepare.yml b/modules/ansible-roles/roles/lsf/tasks/lsf_prepare.yml index 97b704b5..b335dc27 100644 --- a/modules/ansible-roles/roles/lsf/tasks/lsf_prepare.yml +++ b/modules/ansible-roles/roles/lsf/tasks/lsf_prepare.yml @@ -35,4 +35,4 @@ file: path: "/mnt/lsf/{{ lsf_dir }}" state: directory - mode: '0777' \ No newline at end of file + mode: '0777' diff --git a/modules/ansible-roles/roles/lsf/tasks/main.yml b/modules/ansible-roles/roles/lsf/tasks/main.yml index 7d712769..e5c22e41 100644 --- a/modules/ansible-roles/roles/lsf/tasks/main.yml +++ b/modules/ansible-roles/roles/lsf/tasks/main.yml @@ -4,4 +4,4 @@ - import_tasks: lsf_prepare.yml # tasks file for template tasks -- import_tasks: lsf_inventory.yml \ No newline at end of file +- import_tasks: lsf_inventory.yml diff --git a/modules/ansible-roles/roles/lsf/templates/lsf-all.j2 b/modules/ansible-roles/roles/lsf/templates/lsf-all.j2 index 14c581a9..fefa4536 100644 --- a/modules/ansible-roles/roles/lsf/templates/lsf-all.j2 +++ b/modules/ansible-roles/roles/lsf/templates/lsf-all.j2 @@ -25,7 +25,7 @@ ENABLE_FLOATING_CLIENTS: false ENABLE_STATIC_CLIENTS: true PACKAGES: - GUI: + GUI: es: [ ] kibana: [ ] gpfsio: [ ] @@ -38,13 +38,13 @@ PACKAGES: LSF_Master: base: [ lsf-ego-master, lsf-master, lsf-ls-server, lsf-release, lsf-resource-connector ] dm: [ lsf-data-mgr ] - LSF_Server: + LSF_Server: base: [ lsf-ego-server, lsf-server, lsf-integrations, lsf-devel, lsf-man-pages ] smpi: [] smpi_misc: [] pmpi: [] nfs: [ lsf-nfs-support ] - LSF_Client: + LSF_Client: base: [ lsf-conf, lsf-client, lsf-ls-client ] pm: [ lsf-pm-client ] dm: [ lsf-data-mgr ] @@ -53,10 +53,10 @@ PACKAGES: current: [ MariaDB-server, MariaDB-client ] previous: [ mariadb-server, mariadb ] -# NOTE: The packages to remove are not the same as what you need to +# NOTE: The packages to remove are not the same as what you need to # install, because they are dependencies of what was installed UNPACKAGES: - GUI: + GUI: explorer: [ lsf-monitor, lsf-explorer-server ] pac: [ lsf-appcenter ] base: [ lsf-perf, lsf-gui, logstash-for-lsf, elasticsearch-for-lsf, lsf-gui ,lsf-pm-server ,lsf-pm-client ] @@ -68,7 +68,7 @@ UNPACKAGES: LSF_Server: base: [ lsf-conf, lsf-ego-server, lsf-server, lsf-integrations, lsf-devel, lsf-man-pages ,ibm_smpi_lic_s, ibm_smpi ,lsf-pmpi-hpc, lsf-nfs-support ] smpi_misc: [ ibm_smpi-devel, ibm_smpi_gpusupport, ibm_smpi-libgpump, ibm_smpi-pami_devel, ibm_spindle, ibm_smpi_mpipsupport ] - LSF_Client: + LSF_Client: base: [ lsf-ls-client, lsf-man-pages ] pm: [ lsf-pm-client ] dm: [ lsf-data-mgr ] @@ -102,7 +102,7 @@ PERF_WORK_DIR: "{{ EXT_TOP }}/{{ PERF_TOP_FOLDER }}/work" GUI_TOP: "{{ EXT_TOP }}/{{ GUI_TOP_FOLDER }}" GUI_CONF_DIR: "{{ EXT_TOP }}/{{ GUI_TOP_FOLDER }}/conf" GUI_WORK_DIR: "{{ EXT_TOP }}/{{ GUI_TOP_FOLDER }}/work" -# Elasticsearch +# Elasticsearch ELK_TOP: {{ ELK_TOP }} ES_TOP: "{{ ELK_TOP }}/elasticsearch" # Logstash @@ -138,4 +138,4 @@ ls_entitlement_file : [ ] # swidtag files lsf_master_swidtag_file : [ ibm.com_IBM_Spectrum_LSF_Suite_for_HPC-10.2.0.swidtag ] -lsf_server_swidtag_file : ibm.com_IBM_Spectrum_LSF_Suite_Servers_Capacity-10.2.0.swidtag \ No newline at end of file +lsf_server_swidtag_file : ibm.com_IBM_Spectrum_LSF_Suite_Servers_Capacity-10.2.0.swidtag diff --git a/modules/ansible-roles/roles/lsf/templates/lsf-config.j2 b/modules/ansible-roles/roles/lsf/templates/lsf-config.j2 index 1aee163c..48ff571c 100644 --- a/modules/ansible-roles/roles/lsf/templates/lsf-config.j2 +++ b/modules/ansible-roles/roles/lsf/templates/lsf-config.j2 @@ -36,7 +36,7 @@ LSF: # to permit upgrading of LSF Masters independently \ # of the rest of the cluster. This is controlled \ # by the LSF_MASTERS_ON_LOCAL parameter in: \ - # /opt/ibm/lsf_installer/playbook/group_vars/all + # /opt/ibm/lsf_installer/playbook/group_vars/all NFS_install_dir: {{ NFS_install_dir }} # JDBC_string is the connection string for the @@ -45,14 +45,14 @@ LSF: # lsf-inventory file, the specified value is set # automatically. If you do not specify a host in the DB_Host role # in the lsf-inventory file, you must define a JDBC_string. - # The external database must be created + # The external database must be created # by sourcing the database schema files: # - Copy the /opt/ibm/lsf_installer/DBschema/MySQL/*.sql files on the deployer machine to the remote database host # - Create a database that is named pac - # - Create a database user who is named pacuser + # - Create a database user who is named pacuser # - Grant this user all privileges on the pac database # - Run the schema files for the database - # For more information, see + # For more information, see # "https://www.ibm.com/support/knowledgecenter/SSZRJV_10.2.0/install_guide/pac_install_config.html". # Set the following environment variables on the deployer machine: # - JDBC_USER @@ -92,15 +92,15 @@ LSF: # # LSF has the following default port number values listed below. # All are used for TCP, except LSF_LIM_PORT which also uses UDP. - #LSF_LIM_PORT: 7869 - #LSF_RES_PORT: 6878 - #LSB_SBD_PORT: 6882 - #LSB_MBD_PORT: 6881 - #LSB_QUERY_PORT: 6891 + #LSF_LIM_PORT: 7869 + #LSF_RES_PORT: 6878 + #LSB_SBD_PORT: 6882 + #LSB_MBD_PORT: 6881 + #LSB_QUERY_PORT: 6891 #LSF_DATA_PORT: 9998 - #EGO_KD_PORT: 7870 - #EGO_PEM_PORT: 7871 - #ESC_PORT: 7872 + #EGO_KD_PORT: 7870 + #EGO_PEM_PORT: 7871 + #ESC_PORT: 7872 # # Specify either nine individual port numbers or # a range with the starting port number followed by '-'. @@ -116,11 +116,11 @@ LSF: # (Optional) Private_IPv4_Range allows you to specify a range of private IPv4 # addresses used by LSF hosts. - # This parameter can be used in scenarios where the LSF master host has both + # This parameter can be used in scenarios where the LSF master host has both # public and private IP addresses, but the compute nodes have only private IP - # addresses. - # Specify a range of IPv4 addresses in the form of a Classless Inter-Domain - # Routing (CIDR) block. + # addresses. + # Specify a range of IPv4 addresses in the form of a Classless Inter-Domain + # Routing (CIDR) block. # For example, Private_IPv4_Range: 10.10.99.0/8 #Private_IPv4_Range: none @@ -151,12 +151,12 @@ LSF: # # The directory path must point to the top directory of Logstash on a host in the GUI_Hosts role. # For example, LOGSTASH_TOP: /path/to/logstash/top/directory - # + # # If the Logstash path.settings is not set to LOGSTASH_TOP/config, # make a symbolic link for LOGSTASH_TOP/config to the Logstash path.settings directory. # For example, # ln -s /etc/logstash LOGSTASH_TOP/config # #LOGSTASH_TOP: none - -# END OF LSF-CONFIG.YML \ No newline at end of file + +# END OF LSF-CONFIG.YML diff --git a/modules/ansible-roles/roles/lsf/templates/lsf-inventory.j2 b/modules/ansible-roles/roles/lsf/templates/lsf-inventory.j2 index 51039957..557b8e7b 100644 --- a/modules/ansible-roles/roles/lsf/templates/lsf-inventory.j2 +++ b/modules/ansible-roles/roles/lsf/templates/lsf-inventory.j2 @@ -44,13 +44,13 @@ localhost {{ host }} {% endfor %} -# DB_HOST is optional, and is the machine that hosts the database -# used by the Application Center component in LSF Suite. -# However, this database is not configured for High Availability (HA). +# DB_HOST is optional, and is the machine that hosts the database +# used by the Application Center component in LSF Suite. +# However, this database is not configured for High Availability (HA). # To enable HA for this database, manually create the database using MariaDB -# and configure it to be HA-ready, then set the JDBC_string parameter +# and configure it to be HA-ready, then set the JDBC_string parameter # in the /opt/ibm/lsf_installer/playbook/lsf-config.yml file to specify the database connection. [DB_Host] {% for host in db_hosts %} {{ host }} -{% endfor %} \ No newline at end of file +{% endfor %} diff --git a/modules/ansible-roles/roles/lsf/vars/main.yml b/modules/ansible-roles/roles/lsf/vars/main.yml index 1927972f..360951cf 100644 --- a/modules/ansible-roles/roles/lsf/vars/main.yml +++ b/modules/ansible-roles/roles/lsf/vars/main.yml @@ -3,4 +3,4 @@ # Static Variables inventory_path: "/opt/ibm/lsf_installer/playbook/" -lsf_dir: "{{ HA_shared_dir | basename }}" \ No newline at end of file +lsf_dir: "{{ HA_shared_dir | basename }}" diff --git a/modules/ansible-roles/roles/vpc_fileshare_configure/handlers/main.yml b/modules/ansible-roles/roles/vpc_fileshare_configure/handlers/main.yml index b8a75d74..85aa54c4 100644 --- a/modules/ansible-roles/roles/vpc_fileshare_configure/handlers/main.yml +++ b/modules/ansible-roles/roles/vpc_fileshare_configure/handlers/main.yml @@ -1,3 +1,3 @@ --- - name: Mount NFS - command: mount -a \ No newline at end of file + command: mount -a diff --git a/modules/ansible-roles/roles/vpc_fileshare_configure/tasks/main.yml b/modules/ansible-roles/roles/vpc_fileshare_configure/tasks/main.yml index f8f3dbde..3d999267 100644 --- a/modules/ansible-roles/roles/vpc_fileshare_configure/tasks/main.yml +++ b/modules/ansible-roles/roles/vpc_fileshare_configure/tasks/main.yml @@ -1,4 +1,4 @@ --- # tasks file for fileshare mount -- import_tasks: vpc_fileshare_configure.yml \ No newline at end of file +- import_tasks: vpc_fileshare_configure.yml diff --git a/modules/ansible-roles/roles/vpc_fileshare_configure/tasks/vpc_fileshare_configure.yml b/modules/ansible-roles/roles/vpc_fileshare_configure/tasks/vpc_fileshare_configure.yml index aa991f49..d4753e52 100644 --- a/modules/ansible-roles/roles/vpc_fileshare_configure/tasks/vpc_fileshare_configure.yml +++ b/modules/ansible-roles/roles/vpc_fileshare_configure/tasks/vpc_fileshare_configure.yml @@ -33,4 +33,4 @@ command: ls -ltr /mnt/lsf register: ls_output changed_when: false - failed_when: ls_output.rc != 0 \ No newline at end of file + failed_when: ls_output.rc != 0 diff --git a/modules/deployer/datasource.tf b/modules/deployer/datasource.tf index 20bd67a7..e55961d3 100644 --- a/modules/deployer/datasource.tf +++ b/modules/deployer/datasource.tf @@ -1,4 +1,4 @@ -data "ibm_resource_group" "itself" { +data "ibm_resource_group" "resource_group" { name = var.resource_group } diff --git a/modules/deployer/locals.tf b/modules/deployer/locals.tf index 2768edcc..ec720df1 100644 --- a/modules/deployer/locals.tf +++ b/modules/deployer/locals.tf @@ -35,16 +35,16 @@ locals { bastion_ssh_keys = [for name in var.ssh_keys : data.ibm_is_ssh_key.bastion[name].id] # Scale static configs - scale_cloud_deployer_path = "/opt/IBM/ibm-spectrumscale-cloud-deploy" - scale_cloud_infra_repo_url = "https://github.com/IBM/ibm-spectrum-scale-install-infra" - scale_cloud_infra_repo_name = "ibm-spectrum-scale-install-infra" - scale_cloud_infra_repo_tag = "ibmcloud_v2.6.0" + # scale_cloud_deployer_path = "/opt/IBM/ibm-spectrumscale-cloud-deploy" + # scale_cloud_infra_repo_url = "https://github.com/IBM/ibm-spectrum-scale-install-infra" + # scale_cloud_infra_repo_name = "ibm-spectrum-scale-install-infra" + # scale_cloud_infra_repo_tag = "ibmcloud_v2.6.0" # LSF static configs - lsf_cloud_deployer_path = "/opt/ibm/lsf" + # lsf_cloud_deployer_path = "/opt/ibm/lsf" # Region and Zone calculations - region = join("-", slice(split("-", var.zones[0]), 0, 2)) + # region = join("-", slice(split("-", var.zones[0]), 0, 2)) # Security group rules # TODO: Fix SG rules @@ -68,7 +68,7 @@ locals { # Derived configs # VPC - resource_group_id = data.ibm_resource_group.itself.id + resource_group_id = data.ibm_resource_group.resource_group.id # Subnets bastion_subnets = var.bastion_subnets @@ -79,7 +79,7 @@ locals { compute_interfaces = local.vsi_interfaces[0] compute_dns_domain = var.dns_domain_names["compute"] - management_instance_count = sum(var.management_instances[*]["count"]) - static_compute_instance_count = sum(var.static_compute_instances[*]["count"]) - enable_compute = local.management_instance_count > 0 || local.static_compute_instance_count > 0 -} \ No newline at end of file + # management_instance_count = sum(var.management_instances[*]["count"]) + # static_compute_instance_count = sum(var.static_compute_instances[*]["count"]) + # enable_compute = local.management_instance_count > 0 || local.static_compute_instance_count > 0 +} diff --git a/modules/deployer/outputs.tf b/modules/deployer/outputs.tf index 423bc77e..18c4d484 100644 --- a/modules/deployer/outputs.tf +++ b/modules/deployer/outputs.tf @@ -1,9 +1,11 @@ output "bastion_vsi_data" { - value = module.bastion_vsi[*] + description = "Bastion VSI data" + value = module.bastion_vsi[*] } output "deployer_vsi_data" { - value = module.deployer_vsi[*] + description = "Deployer VSI data" + value = module.deployer_vsi[*] } output "bastion_fip" { diff --git a/modules/deployer/variables.tf b/modules/deployer/variables.tf index 1b89ae38..bd2ac970 100644 --- a/modules/deployer/variables.tf +++ b/modules/deployer/variables.tf @@ -22,10 +22,10 @@ variable "prefix" { } } -variable "zones" { - description = "Region where VPC will be created. To find your VPC region, use `ibmcloud is regions` command to find available regions." - type = list(string) -} +# variable "zones" { +# description = "Region where VPC will be created. To find your VPC region, use `ibmcloud is regions` command to find available regions." +# type = list(string) +# } ############################################################################## # VPC Variables @@ -131,27 +131,27 @@ variable "skip_iam_authorization_policy" { description = "Set to false if authorization policy is required for VPC block storage volumes to access kms. This can be set to true if authorization policy already exists. For more information on how to create authorization policy manually, see [creating authorization policies for block storage volume](https://cloud.ibm.com/docs/vpc?topic=vpc-block-s2s-auth&interface=ui)." } -variable "management_instances" { - type = list( - object({ - profile = string - count = number - image = string - }) - ) - description = "Number of instances to be launched for management." -} - -variable "static_compute_instances" { - type = list( - object({ - profile = string - count = number - image = string - }) - ) - description = "Total Number of instances to be launched for compute cluster." -} +# variable "management_instances" { +# type = list( +# object({ +# profile = string +# count = number +# image = string +# }) +# ) +# description = "Number of instances to be launched for management." +# } + +# variable "static_compute_instances" { +# type = list( +# object({ +# profile = string +# count = number +# image = string +# }) +# ) +# description = "Total Number of instances to be launched for compute cluster." +# } variable "dns_domain_names" { type = object({ diff --git a/modules/dns/datasource.tf b/modules/dns/datasource.tf new file mode 100644 index 00000000..a1511959 --- /dev/null +++ b/modules/dns/datasource.tf @@ -0,0 +1,9 @@ +data "ibm_dns_zones" "conditional" { + count = var.dns_instance_id != null ? 1 : 0 + instance_id = var.dns_instance_id +} + +data "ibm_dns_zones" "dns_zones" { + instance_id = local.dns_instance_id + depends_on = [ibm_dns_zone.dns_zone] +} diff --git a/modules/dns/locals.tf b/modules/dns/locals.tf new file mode 100644 index 00000000..880cc870 --- /dev/null +++ b/modules/dns/locals.tf @@ -0,0 +1,9 @@ +locals { + dns_domain_names = flatten([setsubtract(var.dns_domain_names == null ? [] : var.dns_domain_names, flatten(data.ibm_dns_zones.conditional[*].dns_zones[*]["name"]))]) + + dns_zone_maps = [for zone in data.ibm_dns_zones.dns_zones.dns_zones : { + (zone["name"]) = zone["zone_id"] + } if contains(var.dns_domain_names, zone["name"])] + + dns_instance_id = var.dns_instance_id == null ? ibm_resource_instance.resource_instance[0].guid : var.dns_instance_id +} diff --git a/modules/dns/main.tf b/modules/dns/main.tf index ba5fcd73..9f331174 100644 --- a/modules/dns/main.tf +++ b/modules/dns/main.tf @@ -1,4 +1,4 @@ -resource "ibm_resource_instance" "itself" { +resource "ibm_resource_instance" "resource_instance" { count = var.dns_instance_id == null ? 1 : 0 name = format("%s-dns-instance", var.prefix) resource_group_id = var.resource_group_id @@ -7,11 +7,7 @@ resource "ibm_resource_instance" "itself" { plan = "standard-dns" } -locals { - dns_instance_id = var.dns_instance_id == null ? ibm_resource_instance.itself[0].guid : var.dns_instance_id -} - -resource "ibm_dns_custom_resolver" "itself" { +resource "ibm_dns_custom_resolver" "dns_custom_resolver" { count = var.dns_custom_resolver_id == null ? 1 : 0 name = format("%s-custom-resolver", var.prefix) instance_id = local.dns_instance_id @@ -26,33 +22,13 @@ resource "ibm_dns_custom_resolver" "itself" { } } -data "ibm_dns_zones" "conditional" { - count = var.dns_instance_id != null ? 1 : 0 - instance_id = var.dns_instance_id -} - -locals { - dns_domain_names = flatten([setsubtract(var.dns_domain_names == null ? [] : var.dns_domain_names, flatten(data.ibm_dns_zones.conditional[*].dns_zones[*]["name"]))]) -} - -resource "ibm_dns_zone" "itself" { +resource "ibm_dns_zone" "dns_zone" { count = length(local.dns_domain_names) instance_id = local.dns_instance_id name = local.dns_domain_names[count.index] } -data "ibm_dns_zones" "itself" { - instance_id = local.dns_instance_id - depends_on = [ibm_dns_zone.itself] -} - -locals { - dns_zone_maps = [for zone in data.ibm_dns_zones.itself.dns_zones : { - (zone["name"]) = zone["zone_id"] - } if contains(var.dns_domain_names, zone["name"])] -} - -resource "ibm_dns_permitted_network" "itself" { +resource "ibm_dns_permitted_network" "dns_permitted_network" { count = length(var.dns_domain_names) instance_id = local.dns_instance_id vpc_crn = var.vpc_crn diff --git a/modules/dns/outputs.tf b/modules/dns/outputs.tf index 39022f0c..a137bb52 100644 --- a/modules/dns/outputs.tf +++ b/modules/dns/outputs.tf @@ -5,7 +5,7 @@ output "dns_instance_id" { output "dns_custom_resolver_id" { description = "DNS custom resolver ID" - value = var.dns_custom_resolver_id == null ? one(ibm_dns_custom_resolver.itself[*].id) : var.dns_custom_resolver_id + value = var.dns_custom_resolver_id == null ? one(ibm_dns_custom_resolver.dns_custom_resolver[*].id) : var.dns_custom_resolver_id } output "dns_zone_maps" { diff --git a/modules/dns_record/datasource.tf b/modules/dns_record/datasource.tf new file mode 100644 index 00000000..748dbfb8 --- /dev/null +++ b/modules/dns_record/datasource.tf @@ -0,0 +1,3 @@ +data "ibm_dns_zones" "dns_zones" { + instance_id = var.dns_instance_id +} diff --git a/modules/dns_record/locals.tf b/modules/dns_record/locals.tf new file mode 100644 index 00000000..ac7ff13c --- /dev/null +++ b/modules/dns_record/locals.tf @@ -0,0 +1,5 @@ +locals { + dns_domain_name = [ + for zone in data.ibm_dns_zones.dns_zones.dns_zones : zone["name"] if zone["zone_id"] == var.dns_zone_id + ] +} diff --git a/modules/dns_record/main.tf b/modules/dns_record/main.tf index aa84e997..8c9c89e2 100644 --- a/modules/dns_record/main.tf +++ b/modules/dns_record/main.tf @@ -1,13 +1,3 @@ -data "ibm_dns_zones" "itself" { - instance_id = var.dns_instance_id -} - -locals { - dns_domain_name = [ - for zone in data.ibm_dns_zones.itself.dns_zones : zone["name"] if zone["zone_id"] == var.dns_zone_id - ] -} - resource "ibm_dns_resource_record" "a" { count = length(var.dns_records) instance_id = var.dns_instance_id diff --git a/modules/file_storage/locals.tf b/modules/file_storage/locals.tf deleted file mode 100644 index c31aeaa7..00000000 --- a/modules/file_storage/locals.tf +++ /dev/null @@ -1,4 +0,0 @@ -locals { - # Region and Zone calculations - region = join("-", slice(split("-", var.zone), 0, 2)) -} diff --git a/modules/file_storage/outputs.tf b/modules/file_storage/outputs.tf index 7f265e54..ddf923c5 100644 --- a/modules/file_storage/outputs.tf +++ b/modules/file_storage/outputs.tf @@ -8,5 +8,5 @@ output "mount_path" { output "name_mount_path_map" { description = "Mount path name and its path map" - value = { for mount_details in flatten([ibm_is_share_mount_target.share_target_vpc, ibm_is_share_mount_target.share_target_sg]) : split("-", mount_details.name)[length(split("-", mount_details.name)) - 4] => mount_details.mount_path } -} \ No newline at end of file + value = { for mount_details in flatten([ibm_is_share_mount_target.share_target_vpc, ibm_is_share_mount_target.share_target_sg]) : split("-", mount_details.name)[length(split("-", mount_details.name)) - 4] => mount_details.mount_path } +} diff --git a/modules/inventory/main.tf b/modules/inventory/main.tf index b63bf85d..bf30438a 100644 --- a/modules/inventory/main.tf +++ b/modules/inventory/main.tf @@ -1,9 +1,9 @@ -# resource "local_sensitive_file" "itself" { +# resource "local_sensitive_file" "mount_path_file" { # content = join("\n", var.hosts,) # filename = var.inventory_path # } -resource "local_sensitive_file" "itself" { +resource "local_sensitive_file" "mount_path_file" { content = < [terraform](#requirement\_terraform) | >= 1.3 | -| [ansible](#requirement\_ansible) | ~> 1.3.0 | -| [ibm](#requirement\_ibm) | >= 1.68.1, < 2.0.0 | - -## Providers - -| Name | Version | -|------|---------| -| [ibm](#provider\_ibm) | 1.70.1 | - -## Modules - -| Name | Source | Version | -|------|--------|---------| -| [lsf](#module\_lsf) | ./../.. | n/a | - -## Resources - -| Name | Type | -|------|------| -| [ibm_is_vpc.itself](https://registry.terraform.io/providers/IBM-Cloud/ibm/latest/docs/data-sources/is_vpc) | data source | - -## Inputs - -| Name | Description | Type | Default | Required | -|------|-------------|------|---------|:--------:| -| [allowed\_cidr](#input\_allowed\_cidr) | Network CIDR to access the VPC. This is used to manage network ACL rules for accessing the cluster. | `list(string)` | n/a | yes | -| [bastion\_ssh\_keys](#input\_bastion\_ssh\_keys) | The key pair to use to access the bastion host. | `list(string)` | `null` | no | -| [bastion\_subnets\_cidr](#input\_bastion\_subnets\_cidr) | Subnet CIDR block to launch the bastion host. | `list(string)` |
[
"10.0.0.0/24"
]
| no | -| [client\_instances](#input\_client\_instances) | Number of instances to be launched for client. |
list(
object({
profile = string
count = number
image = string
})
)
|
[
{
"count": 2,
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "cx2-2x4"
}
]
| no | -| [client\_ssh\_keys](#input\_client\_ssh\_keys) | The key pair to use to launch the client host. | `list(string)` | `null` | no | -| [client\_subnets\_cidr](#input\_client\_subnets\_cidr) | Subnet CIDR block to launch the client host. | `list(string)` |
[
"10.10.10.0/24",
"10.20.10.0/24",
"10.30.10.0/24"
]
| no | -| [compute\_gui\_password](#input\_compute\_gui\_password) | Password for compute cluster GUI | `string` | `"hpc@IBMCloud"` | no | -| [compute\_gui\_username](#input\_compute\_gui\_username) | GUI user to perform system management and monitoring tasks on compute cluster. | `string` | `"admin"` | no | -| [compute\_ssh\_keys](#input\_compute\_ssh\_keys) | The key pair to use to launch the compute host. | `list(string)` | `null` | no | -| [compute\_subnets\_cidr](#input\_compute\_subnets\_cidr) | Subnet CIDR block to launch the compute cluster host. | `list(string)` |
[
"10.10.20.0/24",
"10.20.20.0/24",
"10.30.20.0/24"
]
| no | -| [cos\_instance\_name](#input\_cos\_instance\_name) | Exiting COS instance name | `string` | `null` | no | -| [deployer\_instance\_profile](#input\_deployer\_instance\_profile) | Deployer should be only used for better deployment performance | `string` | `"mx2-4x32"` | no | -| [dns\_custom\_resolver\_id](#input\_dns\_custom\_resolver\_id) | IBM Cloud DNS custom resolver id. | `string` | `null` | no | -| [dns\_domain\_names](#input\_dns\_domain\_names) | IBM Cloud HPC DNS domain names. |
object({
compute = string
storage = string
protocol = string
})
|
{
"compute": "comp.com",
"protocol": "ces.com",
"storage": "strg.com"
}
| no | -| [dns\_instance\_id](#input\_dns\_instance\_id) | IBM Cloud HPC DNS service instance id. | `string` | `null` | no | -| [dynamic\_compute\_instances](#input\_dynamic\_compute\_instances) | MaxNumber of instances to be launched for compute cluster. |
list(
object({
profile = string
count = number
image = string
})
)
|
[
{
"count": 1024,
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "cx2-2x4"
}
]
| no | -| [enable\_atracker](#input\_enable\_atracker) | Enable Activity tracker | `bool` | `true` | no | -| [enable\_bastion](#input\_enable\_bastion) | The solution supports multiple ways to connect to your HPC cluster for example, using bastion node, via VPN or direct connection. If connecting to the HPC cluster via VPN or direct connection, set this value to false. | `bool` | `true` | no | -| [enable\_cos\_integration](#input\_enable\_cos\_integration) | Integrate COS with HPC solution | `bool` | `true` | no | -| [enable\_deployer](#input\_enable\_deployer) | Deployer should be only used for better deployment performance | `bool` | `false` | no | -| [enable\_vpc\_flow\_logs](#input\_enable\_vpc\_flow\_logs) | Enable Activity tracker | `bool` | `true` | no | -| [enable\_vpn](#input\_enable\_vpn) | The solution supports multiple ways to connect to your HPC cluster for example, using bastion node, via VPN or direct connection. If connecting to the HPC cluster via VPN, set this value to true. | `bool` | `false` | no | -| [file\_shares](#input\_file\_shares) | Custom file shares to access shared storage |
list(
object({
mount_path = string,
size = number,
iops = number
})
)
|
[
{
"iops": 1000,
"mount_path": "/mnt/binaries",
"size": 100
},
{
"iops": 1000,
"mount_path": "/mnt/data",
"size": 100
}
]
| no | -| [hpcs\_instance\_name](#input\_hpcs\_instance\_name) | Hyper Protect Crypto Service instance | `string` | `null` | no | -| [ibm\_customer\_number](#input\_ibm\_customer\_number) | Comma-separated list of the IBM Customer Number(s) (ICN) that is used for the Bring Your Own License (BYOL) entitlement check. For more information on how to find your ICN, see [What is my IBM Customer Number (ICN)?](https://www.ibm.com/support/pages/what-my-ibm-customer-number-icn). | `string` | n/a | yes | -| [ibmcloud\_api\_key](#input\_ibmcloud\_api\_key) | IBM Cloud API Key that will be used for authentication in scripts run in this module. Only required if certain options are required. | `string` | n/a | yes | -| [key\_management](#input\_key\_management) | null/key\_protect/hs\_crypto | `string` | `"key_protect"` | no | -| [management\_instances](#input\_management\_instances) | Number of instances to be launched for management. |
list(
object({
profile = string
count = number
image = string
})
)
|
[
{
"count": 2,
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "cx2-2x4"
}
]
| no | -| [network\_cidr](#input\_network\_cidr) | Network CIDR for the VPC. This is used to manage network ACL rules for cluster provisioning. | `string` | `"10.0.0.0/8"` | no | -| [nsd\_details](#input\_nsd\_details) | Storage scale NSD details |
list(
object({
profile = string
capacity = optional(number)
iops = optional(number)
})
)
|
[
{
"capacity": 100,
"iops": 1000,
"profile": "custom"
}
]
| no | -| [override](#input\_override) | Override default values with custom JSON template. This uses the file `override.json` to allow users to create a fully customized environment. | `bool` | `false` | no | -| [override\_json\_string](#input\_override\_json\_string) | Override default values with a JSON object. Any JSON other than an empty string overrides other configuration changes. | `string` | `null` | no | -| [placement\_strategy](#input\_placement\_strategy) | VPC placement groups to create (null / host\_spread / power\_spread) | `string` | `null` | no | -| [prefix](#input\_prefix) | A unique identifier for resources. Must begin with a letter and end with a letter or number. This prefix will be prepended to any resources provisioned by this template. Prefixes must be 16 or fewer characters. | `string` | `"lsf"` | no | -| [protocol\_instances](#input\_protocol\_instances) | Number of instances to be launched for protocol hosts. |
list(
object({
profile = string
count = number
image = string
})
)
|
[
{
"count": 2,
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "bx2-2x8"
}
]
| no | -| [protocol\_subnets\_cidr](#input\_protocol\_subnets\_cidr) | Subnet CIDR block to launch the storage cluster host. | `list(string)` |
[
"10.10.40.0/24",
"10.20.40.0/24",
"10.30.40.0/24"
]
| no | -| [resource\_group](#input\_resource\_group) | String describing resource groups to create or reference | `string` | `"Default"` | no | -| [ssh\_keys](#input\_ssh\_keys) | The key pair to use to access the HPC cluster. | `list(string)` | `null` | no | -| [static\_compute\_instances](#input\_static\_compute\_instances) | Min Number of instances to be launched for compute cluster. |
list(
object({
profile = string
count = number
image = string
})
)
|
[
{
"count": 1,
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "cx2-2x4"
}
]
| no | -| [storage\_gui\_password](#input\_storage\_gui\_password) | Password for storage cluster GUI | `string` | `"hpc@IBMCloud"` | no | -| [storage\_gui\_username](#input\_storage\_gui\_username) | GUI user to perform system management and monitoring tasks on storage cluster. | `string` | `"admin"` | no | -| [storage\_instances](#input\_storage\_instances) | Number of instances to be launched for storage cluster. |
list(
object({
profile = string
count = number
image = string
filesystem_name = optional(string)
})
)
|
[
{
"count": 2,
"filesystem_name": "fs1",
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "bx2-2x8"
}
]
| no | -| [storage\_ssh\_keys](#input\_storage\_ssh\_keys) | The key pair to use to launch the storage cluster host. | `list(string)` | `null` | no | -| [storage\_subnets\_cidr](#input\_storage\_subnets\_cidr) | Subnet CIDR block to launch the storage cluster host. | `list(string)` |
[
"10.10.30.0/24",
"10.20.30.0/24",
"10.30.30.0/24"
]
| no | -| [vpc](#input\_vpc) | Name of an existing VPC in which the cluster resources will be deployed. If no value is given, then a new VPC will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc) | `string` | `null` | no | -| [vpn\_peer\_address](#input\_vpn\_peer\_address) | The peer public IP address to which the VPN will be connected. | `string` | `null` | no | -| [vpn\_peer\_cidr](#input\_vpn\_peer\_cidr) | The peer CIDRs (e.g., 192.168.0.0/24) to which the VPN will be connected. | `list(string)` | `null` | no | -| [vpn\_preshared\_key](#input\_vpn\_preshared\_key) | The pre-shared key for the VPN. | `string` | `null` | no | -| [zones](#input\_zones) | Region where VPC will be created. To find your VPC region, use `ibmcloud is regions` command to find available regions. | `list(string)` | n/a | yes | - -## Outputs - -No outputs. diff --git a/solutions/custom/catalogValidationValues.json.template b/solutions/custom/catalogValidationValues.json.template index a5642a41..2aea8d9e 100644 --- a/solutions/custom/catalogValidationValues.json.template +++ b/solutions/custom/catalogValidationValues.json.template @@ -2,6 +2,6 @@ "ibmcloud_api_key": $VALIDATION_APIKEY, "prefix": $PREFIX, "zones": "[\"ca-tor-1\"]", - "resource_group": "geretain-hpc-rg", + "existing_resource_group": "geretain-hpc-rg", "ssh_keys": "[\"geretain-hpc-ssh-key\"]" } diff --git a/solutions/custom/datasource.tf b/solutions/custom/datasource.tf deleted file mode 100644 index f6fada1e..00000000 --- a/solutions/custom/datasource.tf +++ /dev/null @@ -1,22 +0,0 @@ -# Future use -/* -data "ibm_is_region" "itself" { - name = local.region -} - -data "ibm_is_zone" "itself" { - name = var.zones[0] - region = data.ibm_is_region.itself.name -} -*/ - -data "ibm_is_vpc" "itself" { - count = var.vpc == null ? 0 : 1 - name = var.vpc -} -/* -data "ibm_is_subnet" "itself" { - count = length(local.subnets) - identifier = local.subnets[count.index]["id"] -} -*/ diff --git a/solutions/custom/locals.tf b/solutions/custom/locals.tf index dbb3b8ac..bbbd0782 100644 --- a/solutions/custom/locals.tf +++ b/solutions/custom/locals.tf @@ -20,7 +20,7 @@ locals { locals { config = { - resource_group = var.resource_group + existing_resource_group = var.existing_resource_group allowed_cidr = var.allowed_cidr deployer_instance_profile = var.deployer_instance_profile ssh_keys = var.ssh_keys @@ -60,7 +60,7 @@ locals { storage_instances = var.storage_instances storage_ssh_keys = var.storage_ssh_keys storage_subnets_cidr = var.storage_subnets_cidr - vpc = var.vpc + vpc_name = var.vpc_name vpn_peer_address = var.vpn_peer_address vpn_peer_cidr = var.vpn_peer_cidr vpn_preshared_key = var.vpn_preshared_key @@ -71,7 +71,7 @@ locals { # Compile Environment for Config output locals { env = { - resource_group = lookup(local.override[local.override_type], "resource_group", local.config.resource_group) + existing_resource_group = lookup(local.override[local.override_type], "existing_resource_group", local.config.existing_resource_group) allowed_cidr = lookup(local.override[local.override_type], "allowed_cidr", local.config.allowed_cidr) deployer_instance_profile = lookup(local.override[local.override_type], "deployer_instance_profile", local.config.deployer_instance_profile) ssh_keys = lookup(local.override[local.override_type], "ssh_keys", local.config.ssh_keys) @@ -111,7 +111,7 @@ locals { storage_instances = lookup(local.override[local.override_type], "storage_instances", local.config.storage_instances) storage_ssh_keys = lookup(local.override[local.override_type], "storage_ssh_keys", local.config.storage_ssh_keys) storage_subnets_cidr = lookup(local.override[local.override_type], "storage_subnets_cidr", local.config.storage_subnets_cidr) - vpc = lookup(local.override[local.override_type], "vpc", local.config.vpc) + vpc_name = lookup(local.override[local.override_type], "vpc_name", local.config.vpc_name) vpn_peer_address = lookup(local.override[local.override_type], "vpn_peer_address", local.config.vpn_peer_address) vpn_peer_cidr = lookup(local.override[local.override_type], "vpn_peer_cidr", local.config.vpn_peer_cidr) vpn_preshared_key = lookup(local.override[local.override_type], "vpn_preshared_key", local.config.vpn_preshared_key) diff --git a/solutions/custom/main.tf b/solutions/custom/main.tf index 65524926..867ea215 100644 --- a/solutions/custom/main.tf +++ b/solutions/custom/main.tf @@ -1,17 +1,14 @@ module "custom" { source = "./../.." - scheduler = var.scheduler ibm_customer_number = var.ibm_customer_number zones = var.zones allowed_cidr = var.allowed_cidr prefix = local.env.prefix ssh_keys = local.env.ssh_keys - resource_group = local.env.resource_group + existing_resource_group = local.env.existing_resource_group deployer_instance_profile = local.env.deployer_instance_profile bastion_ssh_keys = local.env.bastion_ssh_keys bastion_subnets_cidr = local.env.bastion_subnets_cidr - compute_gui_password = local.env.compute_gui_password - compute_gui_username = local.env.compute_gui_username compute_ssh_keys = local.env.compute_ssh_keys compute_subnets_cidr = local.env.compute_subnets_cidr cos_instance_name = local.env.cos_instance_name @@ -26,11 +23,9 @@ module "custom" { enable_vpc_flow_logs = local.env.enable_vpc_flow_logs enable_vpn = local.env.enable_vpn file_shares = local.env.file_shares - hpcs_instance_name = local.env.hpcs_instance_name key_management = local.env.key_management client_instances = local.env.client_instances client_ssh_keys = local.env.client_ssh_keys - client_subnets_cidr = local.env.client_subnets_cidr management_instances = local.env.management_instances network_cidr = local.env.network_cidr nsd_details = local.env.nsd_details @@ -38,13 +33,19 @@ module "custom" { protocol_instances = local.env.protocol_instances protocol_subnets_cidr = local.env.protocol_subnets_cidr static_compute_instances = local.env.static_compute_instances - storage_gui_password = local.env.storage_gui_password - storage_gui_username = local.env.storage_gui_username storage_instances = local.env.storage_instances storage_ssh_keys = local.env.storage_ssh_keys storage_subnets_cidr = local.env.storage_subnets_cidr - vpc = local.env.vpc + vpc_name = local.env.vpc_name vpn_peer_address = local.env.vpn_peer_address vpn_peer_cidr = local.env.vpn_peer_cidr vpn_preshared_key = local.env.vpn_preshared_key + + # scheduler = var.scheduler + # compute_gui_password = local.env.compute_gui_password + # compute_gui_username = local.env.compute_gui_username + # client_subnets_cidr = local.env.client_subnets_cidr + # hpcs_instance_name = local.env.hpcs_instance_name + # storage_gui_password = local.env.storage_gui_password + # storage_gui_username = local.env.storage_gui_username } diff --git a/solutions/custom/outputs.tf b/solutions/custom/outputs.tf index 36ac8c82..f178ca4b 100644 --- a/solutions/custom/outputs.tf +++ b/solutions/custom/outputs.tf @@ -1,3 +1,4 @@ output "custom" { - value = module.custom + description = "Custom details" + value = module.custom } diff --git a/solutions/custom/override.json b/solutions/custom/override.json index 87173e8c..53e0527d 100644 --- a/solutions/custom/override.json +++ b/solutions/custom/override.json @@ -1,118 +1,118 @@ { - "prefix":"lsf", - "resource_group": "Default", - "vpc": null, - "network_cidr":"10.0.0.0/8", - "placement_strategy":null, - "enable_bastion":true, - "enable_deployer":false, - "deployer_instance_profile":"mx2-4x32", - "bastion_ssh_keys": null, - "bastion_subnets_cidr":[ - "10.0.0.0/24" - ], - "enable_vpn":false, - "vpn_peer_cidr":null, - "vpn_peer_address":null, - "vpn_preshared_key":null, - "client_subnets_cidr":[ - "10.10.10.0/24" - ], - "client_ssh_keys": null, - "client_instances":[ - { - "profile":"cx2-2x4", - "count":2, - "image":"ibm-redhat-8-10-minimal-amd64-2" - } - ], - "compute_subnets_cidr":[ - "10.10.20.0/24", - "10.20.20.0/24", - "10.30.20.0/24" - ], - "compute_ssh_keys": null, - "management_instances":[ - { - "profile":"cx2-2x4", - "count":3, - "image":"ibm-redhat-8-10-minimal-amd64-2" - } - ], - "static_compute_instances":[ - { - "profile":"cx2-2x4", - "count":0, - "image":"ibm-redhat-8-10-minimal-amd64-2" - } - ], - "dynamic_compute_instances":[ - { - "profile":"cx2-2x4", - "count":5000, - "image":"ibm-redhat-8-10-minimal-amd64-2" - } - ], - "compute_gui_username":"admin", - "compute_gui_password": "hpc@IBMCloud", - "storage_subnets_cidr":[ - "10.10.30.0/24", - "10.20.30.0/24", - "10.30.30.0/24" - ], - "storage_ssh_keys": null, - "storage_instances":[ - { - "profile":"cx2-2x4", - "count":2, - "image":"ibm-redhat-8-10-minimal-amd64-2" - } - ], - "protocol_subnets_cidr":[ - "10.10.40.0/24", - "10.20.40.0/24", - "10.30.40.0/24" - ], - "protocol_instances":[ - { - "profile":"cx2-2x4", - "count":2, - "image":"ibm-redhat-8-10-minimal-amd64-2" - } - ], - "colocate_protocol_instances":false, - "storage_gui_username":"admin", - "storage_gui_password": "hpc@IBMCloud", - "nsd_details":[ + "prefix": "lsf", + "existing_resource_group": "Default", + "vpc_name": null, + "network_cidr": "10.0.0.0/8", + "placement_strategy": null, + "enable_bastion": true, + "enable_deployer": false, + "deployer_instance_profile": "mx2-4x32", + "bastion_ssh_keys": null, + "bastion_subnets_cidr": [ + "10.0.0.0/24" + ], + "enable_vpn": false, + "vpn_peer_cidr": null, + "vpn_peer_address": null, + "vpn_preshared_key": null, + "client_subnets_cidr": [ + "10.10.10.0/24" + ], + "client_ssh_keys": null, + "client_instances": [ + { + "profile": "cx2-2x4", + "count": 2, + "image": "ibm-redhat-8-10-minimal-amd64-2" + } + ], + "compute_subnets_cidr": [ + "10.10.20.0/24", + "10.20.20.0/24", + "10.30.20.0/24" + ], + "compute_ssh_keys": null, + "management_instances": [ + { + "profile": "cx2-2x4", + "count": 3, + "image": "ibm-redhat-8-10-minimal-amd64-2" + } + ], + "static_compute_instances": [ + { + "profile": "cx2-2x4", + "count": 0, + "image": "ibm-redhat-8-10-minimal-amd64-2" + } + ], + "dynamic_compute_instances": [ + { + "profile": "cx2-2x4", + "count": 5000, + "image": "ibm-redhat-8-10-minimal-amd64-2" + } + ], + "compute_gui_username": "admin", + "compute_gui_password": "hpc@IBMCloud", + "storage_subnets_cidr": [ + "10.10.30.0/24", + "10.20.30.0/24", + "10.30.30.0/24" + ], + "storage_ssh_keys": null, + "storage_instances": [ + { + "profile": "cx2-2x4", + "count": 2, + "image": "ibm-redhat-8-10-minimal-amd64-2" + } + ], + "protocol_subnets_cidr": [ + "10.10.40.0/24", + "10.20.40.0/24", + "10.30.40.0/24" + ], + "protocol_instances": [ + { + "profile": "cx2-2x4", + "count": 2, + "image": "ibm-redhat-8-10-minimal-amd64-2" + } + ], + "colocate_protocol_instances": false, + "storage_gui_username": "admin", + "storage_gui_password": "hpc@IBMCloud", + "nsd_details": [ + { + "capacity": 100, + "iops": 1000, + "profile": "custom" + } + ], + "file_shares": [ + { + "mount_path": "/mnt/binaries", + "size": 100, + "iops": 1000 + }, { - "capacity":100, - "iops":1000, - "profile":"custom" + "mount_path": "/mnt/data", + "size": 100, + "iops": 1000 } ], - "file_shares":[ - { - "mount_path":"/mnt/binaries", - "size":100, - "iops":1000 - }, - { - "mount_path":"/mnt/data", - "size":100, - "iops":1000 - } - ], - "dns_instance_id":null, - "dns_custom_resolver_id":null, - "dns_domain_names":{ - "compute":"comp.com", - "storage":"strg.com", - "protocol":"ces.com" - }, - "enable_cos_integration":true, - "cos_instance_name":null, - "enable_atracker":true, - "enable_vpc_flow_logs":true, - "key_management":"key_protect", - "hpcs_instance_name":null - } + "dns_instance_id": null, + "dns_custom_resolver_id": null, + "dns_domain_names": { + "compute": "comp.com", + "storage": "strg.com", + "protocol": "ces.com" + }, + "enable_cos_integration": true, + "cos_instance_name": null, + "enable_atracker": true, + "enable_vpc_flow_logs": true, + "key_management": "key_protect", + "hpcs_instance_name": null +} diff --git a/solutions/custom/variables.tf b/solutions/custom/variables.tf index fdc200a1..499f4af8 100644 --- a/solutions/custom/variables.tf +++ b/solutions/custom/variables.tf @@ -1,11 +1,11 @@ ############################################################################## # Offering Variations ############################################################################## -variable "scheduler" { - type = string - default = "LSF" - description = "Select one of the scheduler (LSF/Symphony/Slurm/null)" -} +# variable "scheduler" { +# type = string +# default = "LSF" +# description = "Select one of the scheduler (LSF/Symphony/Slurm/null)" +# } variable "ibm_customer_number" { type = string @@ -58,7 +58,7 @@ variable "prefix" { ############################################################################## # Resource Groups Variables ############################################################################## -variable "resource_group" { +variable "existing_resource_group" { type = string default = "Default" description = "String describing resource groups to create or reference" @@ -68,7 +68,7 @@ variable "resource_group" { ############################################################################## # VPC Variables ############################################################################## -variable "vpc" { +variable "vpc_name" { type = string default = null description = "Name of an existing VPC in which the cluster resources will be deployed. If no value is given, then a new VPC will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)" @@ -303,11 +303,11 @@ variable "protocol_instances" { description = "Number of instances to be launched for protocol hosts." } -variable "colocate_protocol_instances" { - type = bool - default = true - description = "Enable it to use storage instances as protocol instances" -} +# variable "colocate_protocol_instances" { +# type = bool +# default = true +# description = "Enable it to use storage instances as protocol instances" +# } variable "storage_gui_username" { type = string @@ -392,65 +392,65 @@ variable "dns_domain_names" { ############################################################################## # Auth Variables ############################################################################## -variable "enable_ldap" { - type = bool - default = false - description = "Set this option to true to enable LDAP for IBM Cloud HPC, with the default value set to false." -} - -variable "ldap_basedns" { - type = string - default = "ldapscale.com" - description = "The dns domain name is used for configuring the LDAP server. If an LDAP server is already in existence, ensure to provide the associated DNS domain name." -} - -variable "ldap_server" { - type = string - default = null - description = "Provide the IP address for the existing LDAP server. If no address is given, a new LDAP server will be created." -} - -variable "ldap_admin_password" { - type = string - sensitive = true - default = "hpc@IBMCloud" - description = "The LDAP administrative password should be 8 to 20 characters long, with a mix of at least three alphabetic characters." -} - -variable "ldap_user_name" { - type = string - default = "admin" - description = "Custom LDAP User for performing cluster operations. Note: Username should be between 4 to 32 characters." -} - -variable "ldap_user_password" { - type = string - sensitive = true - default = "hpc@IBMCloud" - description = "The LDAP user password should be 8 to 20 characters long, with a mix of at least three alphabetic character." -} - -variable "ldap_ssh_keys" { - type = list(string) - default = null - description = "Name of the SSH key configured in your IBM Cloud account that is used to establish a connection to the LDAP Server." -} - -variable "ldap_instances" { - type = list( - object({ - profile = string - count = number - image = string - }) - ) - default = [{ - profile = "bx2-2x8" - count = 0 - image = "ibm-redhat-8-10-minimal-amd64-2" - }] - description = "Number of instances to be launched for ldap hosts." -} +# variable "enable_ldap" { +# type = bool +# default = false +# description = "Set this option to true to enable LDAP for IBM Cloud HPC, with the default value set to false." +# } + +# variable "ldap_basedns" { +# type = string +# default = "ldapscale.com" +# description = "The dns domain name is used for configuring the LDAP server. If an LDAP server is already in existence, ensure to provide the associated DNS domain name." +# } + +# variable "ldap_server" { +# type = string +# default = null +# description = "Provide the IP address for the existing LDAP server. If no address is given, a new LDAP server will be created." +# } + +# variable "ldap_admin_password" { +# type = string +# sensitive = true +# default = "hpc@IBMCloud" +# description = "The LDAP administrative password should be 8 to 20 characters long, with a mix of at least three alphabetic characters." +# } + +# variable "ldap_user_name" { +# type = string +# default = "admin" +# description = "Custom LDAP User for performing cluster operations. Note: Username should be between 4 to 32 characters." +# } + +# variable "ldap_user_password" { +# type = string +# sensitive = true +# default = "hpc@IBMCloud" +# description = "The LDAP user password should be 8 to 20 characters long, with a mix of at least three alphabetic character." +# } + +# variable "ldap_ssh_keys" { +# type = list(string) +# default = null +# description = "Name of the SSH key configured in your IBM Cloud account that is used to establish a connection to the LDAP Server." +# } + +# variable "ldap_instances" { +# type = list( +# object({ +# profile = string +# count = number +# image = string +# }) +# ) +# default = [{ +# profile = "bx2-2x8" +# count = 0 +# image = "ibm-redhat-8-10-minimal-amd64-2" +# }] +# description = "Number of instances to be launched for ldap hosts." +# } ############################################################################## # Encryption Variables @@ -501,117 +501,117 @@ variable "enable_vpc_flow_logs" { ############################################################################## # Scale specific Variables ############################################################################## -variable "filesystem_config" { - type = list(object({ - filesystem = string - block_size = string - default_data_replica = number - default_metadata_replica = number - max_data_replica = number - max_metadata_replica = number - mount_point = string - })) - default = null - description = "File system configurations." -} - -variable "filesets_config" { - type = list(object({ - fileset = string - filesystem = string - junction_path = string - client_mount_path = string - quota = number - })) - default = null - description = "Fileset configurations." -} - -variable "afm_instances" { - type = list( - object({ - profile = string - count = number - image = string - }) - ) - default = [{ - profile = "bx2-2x8" - count = 0 - image = "ibm-redhat-8-10-minimal-amd64-2" - }] - description = "Number of instances to be launched for afm hosts." -} - -variable "afm_cos_config" { - type = list(object({ - afm_fileset = string, - mode = string, - cos_instance = string, - bucket_name = string, - bucket_region = string, - cos_service_cred_key = string, - bucket_type = string, - bucket_storage_class = string - })) - default = null - description = "AFM configurations." -} +# variable "filesystem_config" { +# type = list(object({ +# filesystem = string +# block_size = string +# default_data_replica = number +# default_metadata_replica = number +# max_data_replica = number +# max_metadata_replica = number +# mount_point = string +# })) +# default = null +# description = "File system configurations." +# } + +# variable "filesets_config" { +# type = list(object({ +# fileset = string +# filesystem = string +# junction_path = string +# client_mount_path = string +# quota = number +# })) +# default = null +# description = "Fileset configurations." +# } + +# variable "afm_instances" { +# type = list( +# object({ +# profile = string +# count = number +# image = string +# }) +# ) +# default = [{ +# profile = "bx2-2x8" +# count = 0 +# image = "ibm-redhat-8-10-minimal-amd64-2" +# }] +# description = "Number of instances to be launched for afm hosts." +# } + +# variable "afm_cos_config" { +# type = list(object({ +# afm_fileset = string, +# mode = string, +# cos_instance = string, +# bucket_name = string, +# bucket_region = string, +# cos_service_cred_key = string, +# bucket_type = string, +# bucket_storage_class = string +# })) +# default = null +# description = "AFM configurations." +# } ############################################################################## # LSF specific Variables ############################################################################## -variable "cluster_id" { - type = string - default = "HPCCluster" - description = "Unique ID of the cluster used by LSF for configuration of resources. This can be up to 39 alphanumeric characters." - validation { - condition = 0 < length(var.cluster_id) && length(var.cluster_id) < 40 && can(regex("^[a-zA-Z0-9_.-]+$", var.cluster_id)) - error_message = "The ID can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters." - } -} - -variable "enable_hyperthreading" { - type = bool - default = true - description = "Setting this to true will enable hyper-threading in the worker nodes of the cluster (default). Otherwise, hyper-threading will be disabled." -} - -variable "enable_dedicated_host" { - type = bool - default = false - description = "Set to true to use dedicated hosts for compute hosts (default: false)." -} - -variable "dedicated_host_placement" { - type = string - default = "spread" - description = "Specify 'pack' or 'spread'. The 'pack' option will deploy VSIs on one dedicated host until full before moving on to the next dedicated host." - validation { - condition = var.dedicated_host_placement == "spread" || var.dedicated_host_placement == "pack" - error_message = "Supported values for dedicated_host_placement: spread or pack." - } -} - -variable "enable_app_center" { - type = bool - default = false - description = "Set to true to install and enable use of the IBM Spectrum LSF Application Center GUI." -} - -variable "app_center_gui_password" { - type = string - default = "hpc@IBMCloud" - sensitive = true - description = "Password for IBM Spectrum LSF Application Center GUI." -} - -variable "app_center_db_password" { - type = string - default = "hpc@IBMCloud" - sensitive = true - description = "Password for IBM Spectrum LSF Application Center database GUI." -} +# variable "cluster_name" { +# type = string +# default = "HPCCluster" +# description = "Unique ID of the cluster used by LSF for configuration of resources. This can be up to 39 alphanumeric characters." +# validation { +# condition = 0 < length(var.cluster_name) && length(var.cluster_name) < 40 && can(regex("^[a-zA-Z0-9_.-]+$", var.cluster_name)) +# error_message = "The ID can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters." +# } +# } + +# variable "enable_hyperthreading" { +# type = bool +# default = true +# description = "Setting this to true will enable hyper-threading in the worker nodes of the cluster (default). Otherwise, hyper-threading will be disabled." +# } + +# variable "enable_dedicated_host" { +# type = bool +# default = false +# description = "Set to true to use dedicated hosts for compute hosts (default: false)." +# } + +# variable "dedicated_host_placement" { +# type = string +# default = "spread" +# description = "Specify 'pack' or 'spread'. The 'pack' option will deploy VSIs on one dedicated host until full before moving on to the next dedicated host." +# validation { +# condition = var.dedicated_host_placement == "spread" || var.dedicated_host_placement == "pack" +# error_message = "Supported values for dedicated_host_placement: spread or pack." +# } +# } + +# variable "enable_app_center" { +# type = bool +# default = false +# description = "Set to true to install and enable use of the IBM Spectrum LSF Application Center GUI." +# } + +# variable "app_center_gui_password" { +# type = string +# default = "hpc@IBMCloud" +# sensitive = true +# description = "Password for IBM Spectrum LSF Application Center GUI." +# } + +# variable "app_center_db_password" { +# type = string +# default = "hpc@IBMCloud" +# sensitive = true +# description = "Password for IBM Spectrum LSF Application Center database GUI." +# } ############################################################################## # Symphony specific Variables @@ -624,93 +624,96 @@ variable "app_center_db_password" { ############################################################################## # Landing Zone Variables ############################################################################## -variable "clusters" { - default = null - description = "A list describing clusters workloads to create" - type = list( - object({ - name = string # Name of Cluster - vpc_name = string # Name of VPC - subnet_names = list(string) # List of vpc subnets for cluster - workers_per_subnet = number # Worker nodes per subnet. - machine_type = string # Worker node flavor - kube_type = string # iks or openshift - kube_version = optional(string) # Can be a version from `ibmcloud ks versions` or `default` - entitlement = optional(string) # entitlement option for openshift - secondary_storage = optional(string) # Secondary storage type - pod_subnet = optional(string) # Portable subnet for pods - service_subnet = optional(string) # Portable subnet for services - resource_group = string # Resource Group used for cluster - cos_name = optional(string) # Name of COS instance Required only for OpenShift clusters - access_tags = optional(list(string), []) - boot_volume_crk_name = optional(string) # Boot volume encryption key name - disable_public_endpoint = optional(bool, true) # disable cluster public, leaving only private endpoint - disable_outbound_traffic_protection = optional(bool, false) # public outbound access from the cluster workers - cluster_force_delete_storage = optional(bool, false) # force the removal of persistent storage associated with the cluster during cluster deletion - operating_system = string # The operating system of the workers in the default worker pool. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available . - kms_wait_for_apply = optional(bool, true) # make terraform wait until KMS is applied to master and it is ready and deployed - verify_cluster_network_readiness = optional(bool, true) # Flag to run a script will run kubectl commands to verify that all worker nodes can communicate successfully with the master. If the runtime does not have access to the kube cluster to run kubectl commands, this should be set to false. - use_ibm_cloud_private_api_endpoints = optional(bool, true) # Flag to force all cluster related api calls to use the IBM Cloud private endpoints. - import_default_worker_pool_on_create = optional(bool) # (Advanced users) Whether to handle the default worker pool as a stand-alone ibm_container_vpc_worker_pool resource on cluster creation. Only set to false if you understand the implications of managing the default worker pool as part of the cluster resource. Set to true to import the default worker pool as a separate resource. Set to false to manage the default worker pool as part of the cluster resource. - allow_default_worker_pool_replacement = optional(bool) # (Advanced users) Set to true to allow the module to recreate a default worker pool. Only use in the case where you are getting an error indicating that the default worker pool cannot be replaced on apply. Once the default worker pool is handled as a stand-alone ibm_container_vpc_worker_pool, if you wish to make any change to the default worker pool which requires the re-creation of the default pool set this variable to true - labels = optional(map(string)) # A list of labels that you want to add to the default worker pool. - addons = optional(object({ # Map of OCP cluster add-on versions to install - debug-tool = optional(string) - image-key-synchronizer = optional(string) - openshift-data-foundation = optional(string) - vpc-file-csi-driver = optional(string) - static-route = optional(string) - cluster-autoscaler = optional(string) - vpc-block-csi-driver = optional(string) - ibm-storage-operator = optional(string) - }), {}) - manage_all_addons = optional(bool, false) # Instructs Terraform to manage all cluster addons, even if addons were installed outside of the module. If set to 'true' this module will destroy any addons that were installed by other sources. - kms_config = optional( - object({ - crk_name = string # Name of key - private_endpoint = optional(bool) # Private endpoint - }) - ) - worker_pools = optional( - list( - object({ - name = string # Worker pool name - vpc_name = string # VPC name - workers_per_subnet = number # Worker nodes per subnet - flavor = string # Worker node flavor - subnet_names = list(string) # List of vpc subnets for worker pool - entitlement = optional(string) # entitlement option for openshift - secondary_storage = optional(string) # Secondary storage type - boot_volume_crk_name = optional(string) # Boot volume encryption key name - operating_system = string # The operating system of the workers in the worker pool. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available . - labels = optional(map(string)) # A list of labels that you want to add to all the worker nodes in the worker pool. - }) - ) - ) - }) - ) -} +# variable "clusters" { +# default = null +# description = "A list describing clusters workloads to create" +# type = list( +# object({ +# name = string # Name of Cluster +# vpc_name = string # Name of VPC +# subnet_names = list(string) # List of vpc subnets for cluster +# workers_per_subnet = number # Worker nodes per subnet. +# machine_type = string # Worker node flavor +# kube_type = string # iks or openshift +# kube_version = optional(string) # Can be a version from `ibmcloud ks versions` or `default` +# entitlement = optional(string) # entitlement option for openshift +# secondary_storage = optional(string) # Secondary storage type +# pod_subnet = optional(string) # Portable subnet for pods +# service_subnet = optional(string) # Portable subnet for services +# existing_resource_group = string # Resource Group used for cluster +# cos_name = optional(string) # Name of COS instance Required only for OpenShift clusters +# access_tags = optional(list(string), []) +# boot_volume_crk_name = optional(string) # Boot volume encryption key name +# disable_public_endpoint = optional(bool, true) # disable cluster public, leaving only private endpoint +# disable_outbound_traffic_protection = optional(bool, false) # public outbound access from the cluster workers +# cluster_force_delete_storage = optional(bool, false) # force the removal of persistent storage associated with the cluster during cluster deletion +# operating_system = string # The operating system of the workers in the default worker pool. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available . +# kms_wait_for_apply = optional(bool, true) # make terraform wait until KMS is applied to master and it is ready and deployed +# verify_cluster_network_readiness = optional(bool, true) # Flag to run a script will run kubectl commands to verify that all worker nodes can communicate successfully with the master. If the runtime does not have access to the kube cluster to run kubectl commands, this should be set to false. +# use_ibm_cloud_private_api_endpoints = optional(bool, true) # Flag to force all cluster related api calls to use the IBM Cloud private endpoints. +# import_default_worker_pool_on_create = optional(bool) # (Advanced users) Whether to handle the default worker pool as a stand-alone ibm_container_vpc_worker_pool resource on cluster creation. Only set to false if you understand the implications of managing the default worker pool as part of the cluster resource. Set to true to import the default worker pool as a separate resource. Set to false to manage the default worker pool as part of the cluster resource. +# allow_default_worker_pool_replacement = optional(bool) # (Advanced users) Set to true to allow the module to recreate a default worker pool. Only use in the case where you are getting an error indicating that the default worker pool cannot be replaced on apply. Once the default worker pool is handled as a stand-alone ibm_container_vpc_worker_pool, if you wish to make any change to the default worker pool which requires the re-creation of the default pool set this variable to true +# labels = optional(map(string)) # A list of labels that you want to add to the default worker pool. +# addons = optional(object({ # Map of OCP cluster add-on versions to install +# debug-tool = optional(string) +# image-key-synchronizer = optional(string) +# openshift-data-foundation = optional(string) +# vpc-file-csi-driver = optional(string) +# static-route = optional(string) +# cluster-autoscaler = optional(string) +# vpc-block-csi-driver = optional(string) +# ibm-storage-operator = optional(string) +# }), {}) +# manage_all_addons = optional(bool, false) # Instructs Terraform to manage all cluster addons, even if addons were installed outside of the module. If set to 'true' this module will destroy any addons that were installed by other sources. +# kms_config = optional( +# object({ +# crk_name = string # Name of key +# private_endpoint = optional(bool) # Private endpoint +# }) +# ) +# worker_pools = optional( +# list( +# object({ +# name = string # Worker pool name +# vpc_name = string # VPC name +# workers_per_subnet = number # Worker nodes per subnet +# flavor = string # Worker node flavor +# subnet_names = list(string) # List of vpc subnets for worker pool +# entitlement = optional(string) # entitlement option for openshift +# secondary_storage = optional(string) # Secondary storage type +# boot_volume_crk_name = optional(string) # Boot volume encryption key name +# operating_system = string # The operating system of the workers in the worker pool. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available . +# labels = optional(map(string)) # A list of labels that you want to add to all the worker nodes in the worker pool. +# }) +# ) +# ) +# }) +# ) +# } ############################################################################## # Terraform generic Variables ############################################################################## -variable "TF_PARALLELISM" { - type = string - default = "250" - description = "Limit the number of concurrent operation." -} - -variable "TF_VERSION" { - type = string - default = "1.9" - description = "The version of the Terraform engine that's used in the Schematics workspace." -} - -variable "TF_LOG" { - type = string - default = "ERROR" - description = "The Terraform log level used for output in the Schematics workspace." -} +# tflint-ignore: all +# variable "TF_PARALLELISM" { +# type = string +# default = "250" +# description = "Limit the number of concurrent operation." +# } + +# tflint-ignore: all +# variable "TF_VERSION" { +# type = string +# default = "1.9" +# description = "The version of the Terraform engine that's used in the Schematics workspace." +# } + +# tflint-ignore: all +# variable "TF_LOG" { +# type = string +# default = "ERROR" +# description = "The Terraform log level used for output in the Schematics workspace." +# } ############################################################################## # Override JSON diff --git a/solutions/custom/version.tf b/solutions/custom/version.tf index e081bcc1..d465bd59 100644 --- a/solutions/custom/version.tf +++ b/solutions/custom/version.tf @@ -5,10 +5,6 @@ terraform { source = "IBM-Cloud/ibm" version = ">= 1.68.1, < 2.0.0" } - ansible = { - source = "ansible/ansible" - version = "~> 1.3.0" - } } } diff --git a/solutions/hpcaas/README.md b/solutions/hpcaas/README.md index 6ee238a5..31b1a927 100644 --- a/solutions/hpcaas/README.md +++ b/solutions/hpcaas/README.md @@ -3,26 +3,21 @@ | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.3 | -| [ansible](#requirement\_ansible) | ~> 1.3.0 | | [ibm](#requirement\_ibm) | >= 1.68.1, < 2.0.0 | ## Providers -| Name | Version | -|------|---------| -| [ibm](#provider\_ibm) | 1.70.1 | +No providers. ## Modules | Name | Source | Version | |------|--------|---------| -| [lsf](#module\_lsf) | ./../.. | n/a | +| [hpcaas](#module\_hpcaas) | ./../.. | n/a | ## Resources -| Name | Type | -|------|------| -| [ibm_is_vpc.itself](https://registry.terraform.io/providers/IBM-Cloud/ibm/latest/docs/data-sources/is_vpc) | data source | +No resources. ## Inputs @@ -30,14 +25,14 @@ |------|-------------|------|---------|:--------:| | [allowed\_cidr](#input\_allowed\_cidr) | Network CIDR to access the VPC. This is used to manage network ACL rules for accessing the cluster. | `list(string)` | n/a | yes | | [bastion\_ssh\_keys](#input\_bastion\_ssh\_keys) | The key pair to use to access the bastion host. | `list(string)` | `null` | no | -| [bastion\_subnets\_cidr](#input\_bastion\_subnets\_cidr) | Subnet CIDR block to launch the bastion host. | `list(string)` |
[
"10.0.0.0/24"
]
| no | +| [bastion\_subnets\_cidr](#input\_bastion\_subnets\_cidr) | Subnet CIDR block to launch the bastion host. | `string` | `"10.0.0.0/24"` | no | | [client\_instances](#input\_client\_instances) | Number of instances to be launched for client. |
list(
object({
profile = string
count = number
image = string
})
)
|
[
{
"count": 2,
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "cx2-2x4"
}
]
| no | | [client\_ssh\_keys](#input\_client\_ssh\_keys) | The key pair to use to launch the client host. | `list(string)` | `null` | no | -| [client\_subnets\_cidr](#input\_client\_subnets\_cidr) | Subnet CIDR block to launch the client host. | `list(string)` |
[
"10.10.10.0/24",
"10.20.10.0/24",
"10.30.10.0/24"
]
| no | +| [client\_subnets\_cidr](#input\_client\_subnets\_cidr) | Subnet CIDR block to launch the client host. | `string` | `"10.10.10.0/24"` | no | | [compute\_gui\_password](#input\_compute\_gui\_password) | Password for compute cluster GUI | `string` | `"hpc@IBMCloud"` | no | | [compute\_gui\_username](#input\_compute\_gui\_username) | GUI user to perform system management and monitoring tasks on compute cluster. | `string` | `"admin"` | no | | [compute\_ssh\_keys](#input\_compute\_ssh\_keys) | The key pair to use to launch the compute host. | `list(string)` | `null` | no | -| [compute\_subnets\_cidr](#input\_compute\_subnets\_cidr) | Subnet CIDR block to launch the compute cluster host. | `list(string)` |
[
"10.10.20.0/24",
"10.20.20.0/24",
"10.30.20.0/24"
]
| no | +| [compute\_subnets\_cidr](#input\_compute\_subnets\_cidr) | Subnet CIDR block to launch the compute cluster host. | `string` | `"10.10.20.0/24"` | no | | [cos\_instance\_name](#input\_cos\_instance\_name) | Exiting COS instance name | `string` | `null` | no | | [deployer\_instance\_profile](#input\_deployer\_instance\_profile) | Deployer should be only used for better deployment performance | `string` | `"mx2-4x32"` | no | | [dns\_custom\_resolver\_id](#input\_dns\_custom\_resolver\_id) | IBM Cloud DNS custom resolver id. | `string` | `null` | no | @@ -50,34 +45,35 @@ | [enable\_deployer](#input\_enable\_deployer) | Deployer should be only used for better deployment performance | `bool` | `false` | no | | [enable\_vpc\_flow\_logs](#input\_enable\_vpc\_flow\_logs) | Enable Activity tracker | `bool` | `true` | no | | [enable\_vpn](#input\_enable\_vpn) | The solution supports multiple ways to connect to your HPC cluster for example, using bastion node, via VPN or direct connection. If connecting to the HPC cluster via VPN, set this value to true. | `bool` | `false` | no | +| [existing\_resource\_group](#input\_existing\_resource\_group) | String describing resource groups to create or reference | `string` | `"Default"` | no | | [file\_shares](#input\_file\_shares) | Custom file shares to access shared storage |
list(
object({
mount_path = string,
size = number,
iops = number
})
)
|
[
{
"iops": 1000,
"mount_path": "/mnt/binaries",
"size": 100
},
{
"iops": 1000,
"mount_path": "/mnt/data",
"size": 100
}
]
| no | | [hpcs\_instance\_name](#input\_hpcs\_instance\_name) | Hyper Protect Crypto Service instance | `string` | `null` | no | | [ibm\_customer\_number](#input\_ibm\_customer\_number) | Comma-separated list of the IBM Customer Number(s) (ICN) that is used for the Bring Your Own License (BYOL) entitlement check. For more information on how to find your ICN, see [What is my IBM Customer Number (ICN)?](https://www.ibm.com/support/pages/what-my-ibm-customer-number-icn). | `string` | n/a | yes | | [ibmcloud\_api\_key](#input\_ibmcloud\_api\_key) | IBM Cloud API Key that will be used for authentication in scripts run in this module. Only required if certain options are required. | `string` | n/a | yes | -| [key\_management](#input\_key\_management) | null/key\_protect/hs\_crypto | `string` | `"key_protect"` | no | +| [key\_management](#input\_key\_management) | Set the value as key\_protect to enable customer managed encryption for boot volume and file share. If the key\_management is set as null, IBM Cloud resources will be always be encrypted through provider managed. | `string` | `"key_protect"` | no | | [management\_instances](#input\_management\_instances) | Number of instances to be launched for management. |
list(
object({
profile = string
count = number
image = string
})
)
|
[
{
"count": 2,
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "cx2-2x4"
}
]
| no | | [network\_cidr](#input\_network\_cidr) | Network CIDR for the VPC. This is used to manage network ACL rules for cluster provisioning. | `string` | `"10.0.0.0/8"` | no | -| [nsd\_details](#input\_nsd\_details) | Storage scale NSD details |
list(
object({
profile = string
capacity = optional(number)
iops = optional(number)
})
)
|
[
{
"capacity": 100,
"iops": 1000,
"profile": "custom"
}
]
| no | | [override](#input\_override) | Override default values with custom JSON template. This uses the file `override.json` to allow users to create a fully customized environment. | `bool` | `false` | no | | [override\_json\_string](#input\_override\_json\_string) | Override default values with a JSON object. Any JSON other than an empty string overrides other configuration changes. | `string` | `null` | no | | [placement\_strategy](#input\_placement\_strategy) | VPC placement groups to create (null / host\_spread / power\_spread) | `string` | `null` | no | | [prefix](#input\_prefix) | A unique identifier for resources. Must begin with a letter and end with a letter or number. This prefix will be prepended to any resources provisioned by this template. Prefixes must be 16 or fewer characters. | `string` | `"lsf"` | no | | [protocol\_instances](#input\_protocol\_instances) | Number of instances to be launched for protocol hosts. |
list(
object({
profile = string
count = number
image = string
})
)
|
[
{
"count": 2,
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "bx2-2x8"
}
]
| no | -| [protocol\_subnets\_cidr](#input\_protocol\_subnets\_cidr) | Subnet CIDR block to launch the storage cluster host. | `list(string)` |
[
"10.10.40.0/24",
"10.20.40.0/24",
"10.30.40.0/24"
]
| no | -| [resource\_group](#input\_resource\_group) | String describing resource groups to create or reference | `string` | `"Default"` | no | +| [protocol\_subnets\_cidr](#input\_protocol\_subnets\_cidr) | Subnet CIDR block to launch the storage cluster host. | `string` | `"10.10.40.0/24"` | no | | [ssh\_keys](#input\_ssh\_keys) | The key pair to use to access the HPC cluster. | `list(string)` | `null` | no | | [static\_compute\_instances](#input\_static\_compute\_instances) | Min Number of instances to be launched for compute cluster. |
list(
object({
profile = string
count = number
image = string
})
)
|
[
{
"count": 1,
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "cx2-2x4"
}
]
| no | | [storage\_gui\_password](#input\_storage\_gui\_password) | Password for storage cluster GUI | `string` | `"hpc@IBMCloud"` | no | | [storage\_gui\_username](#input\_storage\_gui\_username) | GUI user to perform system management and monitoring tasks on storage cluster. | `string` | `"admin"` | no | | [storage\_instances](#input\_storage\_instances) | Number of instances to be launched for storage cluster. |
list(
object({
profile = string
count = number
image = string
filesystem_name = optional(string)
})
)
|
[
{
"count": 2,
"filesystem_name": "fs1",
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "bx2-2x8"
}
]
| no | | [storage\_ssh\_keys](#input\_storage\_ssh\_keys) | The key pair to use to launch the storage cluster host. | `list(string)` | `null` | no | -| [storage\_subnets\_cidr](#input\_storage\_subnets\_cidr) | Subnet CIDR block to launch the storage cluster host. | `list(string)` |
[
"10.10.30.0/24",
"10.20.30.0/24",
"10.30.30.0/24"
]
| no | +| [storage\_subnets\_cidr](#input\_storage\_subnets\_cidr) | Subnet CIDR block to launch the storage cluster host. | `string` | `"10.10.30.0/24"` | no | | [vpc](#input\_vpc) | Name of an existing VPC in which the cluster resources will be deployed. If no value is given, then a new VPC will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc) | `string` | `null` | no | | [vpn\_peer\_address](#input\_vpn\_peer\_address) | The peer public IP address to which the VPN will be connected. | `string` | `null` | no | | [vpn\_peer\_cidr](#input\_vpn\_peer\_cidr) | The peer CIDRs (e.g., 192.168.0.0/24) to which the VPN will be connected. | `list(string)` | `null` | no | | [vpn\_preshared\_key](#input\_vpn\_preshared\_key) | The pre-shared key for the VPN. | `string` | `null` | no | -| [zones](#input\_zones) | Region where VPC will be created. To find your VPC region, use `ibmcloud is regions` command to find available regions. | `list(string)` | n/a | yes | +| [zone](#input\_zone) | Zone where VPC will be created. | `string` | n/a | yes | ## Outputs -No outputs. +| Name | Description | +|------|-------------| +| [hpcaas](#output\_hpcaas) | HPCaaS details | diff --git a/solutions/hpcaas/catalogValidationValues.json.template b/solutions/hpcaas/catalogValidationValues.json.template index a5642a41..829063ab 100644 --- a/solutions/hpcaas/catalogValidationValues.json.template +++ b/solutions/hpcaas/catalogValidationValues.json.template @@ -1,7 +1,7 @@ { "ibmcloud_api_key": $VALIDATION_APIKEY, "prefix": $PREFIX, - "zones": "[\"ca-tor-1\"]", - "resource_group": "geretain-hpc-rg", + "zone": "ca-tor-1", + "existing_resource_group": "geretain-hpc-rg", "ssh_keys": "[\"geretain-hpc-ssh-key\"]" } diff --git a/solutions/hpcaas/datasource.tf b/solutions/hpcaas/datasource.tf deleted file mode 100644 index f6fada1e..00000000 --- a/solutions/hpcaas/datasource.tf +++ /dev/null @@ -1,22 +0,0 @@ -# Future use -/* -data "ibm_is_region" "itself" { - name = local.region -} - -data "ibm_is_zone" "itself" { - name = var.zones[0] - region = data.ibm_is_region.itself.name -} -*/ - -data "ibm_is_vpc" "itself" { - count = var.vpc == null ? 0 : 1 - name = var.vpc -} -/* -data "ibm_is_subnet" "itself" { - count = length(local.subnets) - identifier = local.subnets[count.index]["id"] -} -*/ diff --git a/solutions/hpcaas/locals.tf b/solutions/hpcaas/locals.tf index f61d69eb..f86ca6b4 100644 --- a/solutions/hpcaas/locals.tf +++ b/solutions/hpcaas/locals.tf @@ -20,7 +20,7 @@ locals { locals { config = { - resource_group = var.resource_group + existing_resource_group = var.existing_resource_group allowed_cidr = var.allowed_cidr deployer_instance_profile = var.deployer_instance_profile ssh_keys = var.ssh_keys @@ -59,7 +59,7 @@ locals { storage_instances = var.storage_instances storage_ssh_keys = var.storage_ssh_keys storage_subnets_cidr = var.storage_subnets_cidr - vpc = var.vpc + vpc_name = var.vpc_name vpn_peer_address = var.vpn_peer_address vpn_peer_cidr = var.vpn_peer_cidr vpn_preshared_key = var.vpn_preshared_key @@ -70,7 +70,7 @@ locals { # Compile Environment for Config output locals { env = { - resource_group = lookup(local.override[local.override_type], "resource_group", local.config.resource_group) + existing_resource_group = lookup(local.override[local.override_type], "existing_resource_group", local.config.existing_resource_group) allowed_cidr = lookup(local.override[local.override_type], "allowed_cidr", local.config.allowed_cidr) deployer_instance_profile = lookup(local.override[local.override_type], "deployer_instance_profile", local.config.deployer_instance_profile) ssh_keys = lookup(local.override[local.override_type], "ssh_keys", local.config.ssh_keys) @@ -109,7 +109,7 @@ locals { storage_instances = lookup(local.override[local.override_type], "storage_instances", local.config.storage_instances) storage_ssh_keys = lookup(local.override[local.override_type], "storage_ssh_keys", local.config.storage_ssh_keys) storage_subnets_cidr = lookup(local.override[local.override_type], "storage_subnets_cidr", local.config.storage_subnets_cidr) - vpc = lookup(local.override[local.override_type], "vpc", local.config.vpc) + vpc_name = lookup(local.override[local.override_type], "vpc_name", local.config.vpc_name) vpn_peer_address = lookup(local.override[local.override_type], "vpn_peer_address", local.config.vpn_peer_address) vpn_peer_cidr = lookup(local.override[local.override_type], "vpn_peer_cidr", local.config.vpn_peer_cidr) vpn_preshared_key = lookup(local.override[local.override_type], "vpn_preshared_key", local.config.vpn_preshared_key) diff --git a/solutions/hpcaas/main.tf b/solutions/hpcaas/main.tf index f20ffee8..ea26d08f 100644 --- a/solutions/hpcaas/main.tf +++ b/solutions/hpcaas/main.tf @@ -1,17 +1,14 @@ module "hpcaas" { source = "./../.." - scheduler = "HPCaaS" ibm_customer_number = var.ibm_customer_number zones = [var.zone] allowed_cidr = var.allowed_cidr prefix = local.env.prefix ssh_keys = local.env.ssh_keys - resource_group = local.env.resource_group + existing_resource_group = local.env.existing_resource_group deployer_instance_profile = local.env.deployer_instance_profile bastion_ssh_keys = local.env.bastion_ssh_keys bastion_subnets_cidr = [local.env.bastion_subnets_cidr] - compute_gui_password = local.env.compute_gui_password - compute_gui_username = local.env.compute_gui_username compute_ssh_keys = local.env.compute_ssh_keys compute_subnets_cidr = [local.env.compute_subnets_cidr] cos_instance_name = local.env.cos_instance_name @@ -26,24 +23,28 @@ module "hpcaas" { enable_vpc_flow_logs = local.env.enable_vpc_flow_logs enable_vpn = local.env.enable_vpn file_shares = local.env.file_shares - hpcs_instance_name = local.env.hpcs_instance_name key_management = local.env.key_management client_instances = local.env.client_instances client_ssh_keys = local.env.client_ssh_keys - client_subnets_cidr = [local.env.client_subnets_cidr] management_instances = local.env.management_instances network_cidr = local.env.network_cidr placement_strategy = local.env.placement_strategy protocol_instances = local.env.protocol_instances protocol_subnets_cidr = [local.env.protocol_subnets_cidr] static_compute_instances = local.env.static_compute_instances - storage_gui_password = local.env.storage_gui_password - storage_gui_username = local.env.storage_gui_username storage_instances = local.env.storage_instances storage_ssh_keys = local.env.storage_ssh_keys storage_subnets_cidr = [local.env.storage_subnets_cidr] - vpc = local.env.vpc + vpc_name = local.env.vpc_name vpn_peer_address = local.env.vpn_peer_address vpn_peer_cidr = local.env.vpn_peer_cidr vpn_preshared_key = local.env.vpn_preshared_key + + # scheduler = "HPCaaS" + # compute_gui_password = local.env.compute_gui_password + # compute_gui_username = local.env.compute_gui_username + # hpcs_instance_name = local.env.hpcs_instance_name + # client_subnets_cidr = [local.env.client_subnets_cidr] + # storage_gui_password = local.env.storage_gui_password + # storage_gui_username = local.env.storage_gui_username } diff --git a/solutions/hpcaas/outputs.tf b/solutions/hpcaas/outputs.tf index 43448084..54fa4c3d 100644 --- a/solutions/hpcaas/outputs.tf +++ b/solutions/hpcaas/outputs.tf @@ -1,3 +1,4 @@ output "hpcaas" { - value = module.hpcaas + description = "HPCaaS details" + value = module.hpcaas } diff --git a/solutions/hpcaas/override.json b/solutions/hpcaas/override.json index 95ac2e44..2d53374c 100644 --- a/solutions/hpcaas/override.json +++ b/solutions/hpcaas/override.json @@ -1,95 +1,95 @@ { - "prefix":"hpcaas", - "resource_group": "Default", - "vpc": null, - "network_cidr":"10.0.0.0/8", - "placement_strategy":null, - "ssh_keys":null, - "enable_bastion":true, - "enable_deployer":false, - "deployer_instance_profile":"mx2-4x32", + "prefix": "hpcaas", + "existing_resource_group": "Default", + "vpc_name": null, + "network_cidr": "10.0.0.0/8", + "placement_strategy": null, + "ssh_keys": null, + "enable_bastion": true, + "enable_deployer": false, + "deployer_instance_profile": "mx2-4x32", "bastion_ssh_keys": null, - "bastion_subnets_cidr":"10.0.0.0/24", - "enable_vpn":false, - "vpn_peer_cidr":null, - "vpn_peer_address":null, - "vpn_preshared_key":null, - "client_subnets_cidr":"10.10.10.0/24", + "bastion_subnets_cidr": "10.0.0.0/24", + "enable_vpn": false, + "vpn_peer_cidr": null, + "vpn_peer_address": null, + "vpn_preshared_key": null, + "client_subnets_cidr": "10.10.10.0/24", "client_ssh_keys": null, - "client_instances":[ + "client_instances": [ { - "profile":"cx2-2x4", - "count":2, - "image":"ibm-redhat-8-10-minimal-amd64-2" + "profile": "cx2-2x4", + "count": 2, + "image": "ibm-redhat-8-10-minimal-amd64-2" } ], - "compute_subnets_cidr":"10.10.20.0/24", + "compute_subnets_cidr": "10.10.20.0/24", "compute_ssh_keys": null, - "management_instances":[ + "management_instances": [ { - "profile":"cx2-2x4", - "count":3, - "image":"ibm-redhat-8-10-minimal-amd64-2" + "profile": "cx2-2x4", + "count": 3, + "image": "ibm-redhat-8-10-minimal-amd64-2" } ], - "static_compute_instances":[ + "static_compute_instances": [ { - "profile":"cx2-2x4", - "count":0, - "image":"ibm-redhat-8-10-minimal-amd64-2" + "profile": "cx2-2x4", + "count": 0, + "image": "ibm-redhat-8-10-minimal-amd64-2" } ], - "dynamic_compute_instances":[ + "dynamic_compute_instances": [ { - "profile":"cx2-2x4", - "count":5000, - "image":"ibm-redhat-8-10-minimal-amd64-2" + "profile": "cx2-2x4", + "count": 5000, + "image": "ibm-redhat-8-10-minimal-amd64-2" } ], - "compute_gui_username":"admin", + "compute_gui_username": "admin", "compute_gui_password": "hpc@IBMCloud", - "storage_subnets_cidr":"10.10.30.0/24", + "storage_subnets_cidr": "10.10.30.0/24", "storage_ssh_keys": null, - "storage_instances":[ + "storage_instances": [ { - "profile":"cx2-2x4", - "count":2, - "image":"ibm-redhat-8-10-minimal-amd64-2" + "profile": "cx2-2x4", + "count": 2, + "image": "ibm-redhat-8-10-minimal-amd64-2" } ], - "protocol_subnets_cidr":"10.10.40.0/24", - "protocol_instances":[ + "protocol_subnets_cidr": "10.10.40.0/24", + "protocol_instances": [ { - "profile":"cx2-2x4", - "count":2, - "image":"ibm-redhat-8-10-minimal-amd64-2" + "profile": "cx2-2x4", + "count": 2, + "image": "ibm-redhat-8-10-minimal-amd64-2" } ], - "storage_gui_username":"admin", + "storage_gui_username": "admin", "storage_gui_password": "hpc@IBMCloud", - "file_shares":[ + "file_shares": [ { - "mount_path":"/mnt/binaries", - "size":100, - "iops":1000 + "mount_path": "/mnt/binaries", + "size": 100, + "iops": 1000 }, { - "mount_path":"/mnt/data", - "size":100, - "iops":1000 + "mount_path": "/mnt/data", + "size": 100, + "iops": 1000 } ], - "dns_instance_id":null, - "dns_custom_resolver_id":null, - "dns_domain_names":{ - "compute":"comp.com", - "storage":"strg.com", - "protocol":"ces.com" + "dns_instance_id": null, + "dns_custom_resolver_id": null, + "dns_domain_names": { + "compute": "comp.com", + "storage": "strg.com", + "protocol": "ces.com" }, - "enable_cos_integration":true, - "cos_instance_name":null, - "enable_atracker":true, - "enable_vpc_flow_logs":true, - "key_management":"key_protect", - "hpcs_instance_name":null + "enable_cos_integration": true, + "cos_instance_name": null, + "enable_atracker": true, + "enable_vpc_flow_logs": true, + "key_management": "key_protect", + "hpcs_instance_name": null } diff --git a/solutions/hpcaas/variables.tf b/solutions/hpcaas/variables.tf index f7fe9167..c06fa667 100644 --- a/solutions/hpcaas/variables.tf +++ b/solutions/hpcaas/variables.tf @@ -52,7 +52,7 @@ variable "prefix" { ############################################################################## # Resource Groups Variables ############################################################################## -variable "resource_group" { +variable "existing_resource_group" { type = string default = "Default" description = "String describing resource groups to create or reference" @@ -62,10 +62,10 @@ variable "resource_group" { ############################################################################## # VPC Variables ############################################################################## -variable "vpc" { +variable "vpc_name" { type = string default = null - description = "Name of an existing VPC in which the cluster resources will be deployed. If no value is given, then a new VPC will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)" + description = "Name of an existing VPC in which the cluster resources will be deployed. If no value is given, then a new VPC will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc_name)" } variable "network_cidr" { diff --git a/solutions/hpcaas/version.tf b/solutions/hpcaas/version.tf index e081bcc1..d465bd59 100644 --- a/solutions/hpcaas/version.tf +++ b/solutions/hpcaas/version.tf @@ -5,10 +5,6 @@ terraform { source = "IBM-Cloud/ibm" version = ">= 1.68.1, < 2.0.0" } - ansible = { - source = "ansible/ansible" - version = "~> 1.3.0" - } } } diff --git a/solutions/lsf/README.md b/solutions/lsf/README.md index 6ee238a5..056eddaf 100644 --- a/solutions/lsf/README.md +++ b/solutions/lsf/README.md @@ -3,14 +3,11 @@ | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.3 | -| [ansible](#requirement\_ansible) | ~> 1.3.0 | | [ibm](#requirement\_ibm) | >= 1.68.1, < 2.0.0 | ## Providers -| Name | Version | -|------|---------| -| [ibm](#provider\_ibm) | 1.70.1 | +No providers. ## Modules @@ -20,64 +17,84 @@ ## Resources -| Name | Type | -|------|------| -| [ibm_is_vpc.itself](https://registry.terraform.io/providers/IBM-Cloud/ibm/latest/docs/data-sources/is_vpc) | data source | +No resources. ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| | [allowed\_cidr](#input\_allowed\_cidr) | Network CIDR to access the VPC. This is used to manage network ACL rules for accessing the cluster. | `list(string)` | n/a | yes | +| [bastion\_image](#input\_bastion\_image) | The image to use to deploy the bastion host. | `string` | `"ibm-ubuntu-22-04-3-minimal-amd64-1"` | no | +| [bastion\_instance\_profile](#input\_bastion\_instance\_profile) | Deployer should be only used for better deployment performance | `string` | `"cx2-4x8"` | no | | [bastion\_ssh\_keys](#input\_bastion\_ssh\_keys) | The key pair to use to access the bastion host. | `list(string)` | `null` | no | -| [bastion\_subnets\_cidr](#input\_bastion\_subnets\_cidr) | Subnet CIDR block to launch the bastion host. | `list(string)` |
[
"10.0.0.0/24"
]
| no | +| [bastion\_subnets\_cidr](#input\_bastion\_subnets\_cidr) | Subnet CIDR block to launch the bastion host. | `string` | `"10.0.0.0/24"` | no | | [client\_instances](#input\_client\_instances) | Number of instances to be launched for client. |
list(
object({
profile = string
count = number
image = string
})
)
|
[
{
"count": 2,
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "cx2-2x4"
}
]
| no | | [client\_ssh\_keys](#input\_client\_ssh\_keys) | The key pair to use to launch the client host. | `list(string)` | `null` | no | -| [client\_subnets\_cidr](#input\_client\_subnets\_cidr) | Subnet CIDR block to launch the client host. | `list(string)` |
[
"10.10.10.0/24",
"10.20.10.0/24",
"10.30.10.0/24"
]
| no | +| [client\_subnets\_cidr](#input\_client\_subnets\_cidr) | Subnet CIDR block to launch the client host. | `string` | `"10.10.10.0/24"` | no | | [compute\_gui\_password](#input\_compute\_gui\_password) | Password for compute cluster GUI | `string` | `"hpc@IBMCloud"` | no | | [compute\_gui\_username](#input\_compute\_gui\_username) | GUI user to perform system management and monitoring tasks on compute cluster. | `string` | `"admin"` | no | | [compute\_ssh\_keys](#input\_compute\_ssh\_keys) | The key pair to use to launch the compute host. | `list(string)` | `null` | no | -| [compute\_subnets\_cidr](#input\_compute\_subnets\_cidr) | Subnet CIDR block to launch the compute cluster host. | `list(string)` |
[
"10.10.20.0/24",
"10.20.20.0/24",
"10.30.20.0/24"
]
| no | +| [compute\_subnets\_cidr](#input\_compute\_subnets\_cidr) | Subnet CIDR block to launch the compute cluster host. | `string` | `"10.10.20.0/24"` | no | | [cos\_instance\_name](#input\_cos\_instance\_name) | Exiting COS instance name | `string` | `null` | no | -| [deployer\_instance\_profile](#input\_deployer\_instance\_profile) | Deployer should be only used for better deployment performance | `string` | `"mx2-4x32"` | no | +| [deployer\_image](#input\_deployer\_image) | The image to use to deploy the deployer host. | `string` | `"ibm-redhat-8-10-minimal-amd64-2"` | no | +| [deployer\_instance\_profile](#input\_deployer\_instance\_profile) | Deployer should be only used for better deployment performance | `string` | `"bx2-8x32"` | no | | [dns\_custom\_resolver\_id](#input\_dns\_custom\_resolver\_id) | IBM Cloud DNS custom resolver id. | `string` | `null` | no | | [dns\_domain\_names](#input\_dns\_domain\_names) | IBM Cloud HPC DNS domain names. |
object({
compute = string
storage = string
protocol = string
})
|
{
"compute": "comp.com",
"protocol": "ces.com",
"storage": "strg.com"
}
| no | | [dns\_instance\_id](#input\_dns\_instance\_id) | IBM Cloud HPC DNS service instance id. | `string` | `null` | no | | [dynamic\_compute\_instances](#input\_dynamic\_compute\_instances) | MaxNumber of instances to be launched for compute cluster. |
list(
object({
profile = string
count = number
image = string
})
)
|
[
{
"count": 1024,
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "cx2-2x4"
}
]
| no | -| [enable\_atracker](#input\_enable\_atracker) | Enable Activity tracker | `bool` | `true` | no | | [enable\_bastion](#input\_enable\_bastion) | The solution supports multiple ways to connect to your HPC cluster for example, using bastion node, via VPN or direct connection. If connecting to the HPC cluster via VPN or direct connection, set this value to false. | `bool` | `true` | no | | [enable\_cos\_integration](#input\_enable\_cos\_integration) | Integrate COS with HPC solution | `bool` | `true` | no | | [enable\_deployer](#input\_enable\_deployer) | Deployer should be only used for better deployment performance | `bool` | `false` | no | | [enable\_vpc\_flow\_logs](#input\_enable\_vpc\_flow\_logs) | Enable Activity tracker | `bool` | `true` | no | | [enable\_vpn](#input\_enable\_vpn) | The solution supports multiple ways to connect to your HPC cluster for example, using bastion node, via VPN or direct connection. If connecting to the HPC cluster via VPN, set this value to true. | `bool` | `false` | no | +| [existing\_resource\_group](#input\_existing\_resource\_group) | String describing resource groups to create or reference | `string` | `"Default"` | no | | [file\_shares](#input\_file\_shares) | Custom file shares to access shared storage |
list(
object({
mount_path = string,
size = number,
iops = number
})
)
|
[
{
"iops": 1000,
"mount_path": "/mnt/binaries",
"size": 100
},
{
"iops": 1000,
"mount_path": "/mnt/data",
"size": 100
}
]
| no | | [hpcs\_instance\_name](#input\_hpcs\_instance\_name) | Hyper Protect Crypto Service instance | `string` | `null` | no | | [ibm\_customer\_number](#input\_ibm\_customer\_number) | Comma-separated list of the IBM Customer Number(s) (ICN) that is used for the Bring Your Own License (BYOL) entitlement check. For more information on how to find your ICN, see [What is my IBM Customer Number (ICN)?](https://www.ibm.com/support/pages/what-my-ibm-customer-number-icn). | `string` | n/a | yes | | [ibmcloud\_api\_key](#input\_ibmcloud\_api\_key) | IBM Cloud API Key that will be used for authentication in scripts run in this module. Only required if certain options are required. | `string` | n/a | yes | -| [key\_management](#input\_key\_management) | null/key\_protect/hs\_crypto | `string` | `"key_protect"` | no | +| [key\_management](#input\_key\_management) | Set the value as key\_protect to enable customer managed encryption for boot volume and file share. If the key\_management is set as null, IBM Cloud resources will be always be encrypted through provider managed. | `string` | `"key_protect"` | no | +| [kms\_instance\_name](#input\_kms\_instance\_name) | Provide the name of the existing Key Protect instance associated with the Key Management Service. Note: To use existing kms\_instance\_name set key\_management as key\_protect. The name can be found under the details of the KMS, see [View key-protect ID](https://cloud.ibm.com/docs/key-protect?topic=key-protect-retrieve-instance-ID&interface=ui). | `string` | `null` | no | +| [kms\_key\_name](#input\_kms\_key\_name) | Provide the existing kms key name that you want to use for the IBM Cloud HPC cluster. Note: kms\_key\_name to be considered only if key\_management value is set as key\_protect.(for example kms\_key\_name: my-encryption-key). | `string` | `null` | no | | [management\_instances](#input\_management\_instances) | Number of instances to be launched for management. |
list(
object({
profile = string
count = number
image = string
})
)
|
[
{
"count": 2,
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "cx2-2x4"
}
]
| no | | [network\_cidr](#input\_network\_cidr) | Network CIDR for the VPC. This is used to manage network ACL rules for cluster provisioning. | `string` | `"10.0.0.0/8"` | no | -| [nsd\_details](#input\_nsd\_details) | Storage scale NSD details |
list(
object({
profile = string
capacity = optional(number)
iops = optional(number)
})
)
|
[
{
"capacity": 100,
"iops": 1000,
"profile": "custom"
}
]
| no | +| [observability\_atracker\_enable](#input\_observability\_atracker\_enable) | Activity Tracker Event Routing to configure how to route auditing events. While multiple Activity Tracker instances can be created, only one tracker is needed to capture all events. Creating additional trackers is unnecessary if an existing Activity Tracker is already integrated with a COS bucket. In such cases, set the value to false, as all events can be monitored and accessed through the existing Activity Tracker. | `bool` | `true` | no | +| [observability\_atracker\_target\_type](#input\_observability\_atracker\_target\_type) | All the events will be stored in either COS bucket or Cloud Logs on the basis of user input, so customers can retrieve or ingest them in their system. | `string` | `"cloudlogs"` | no | +| [observability\_enable\_metrics\_routing](#input\_observability\_enable\_metrics\_routing) | Enable metrics routing to manage metrics at the account-level by configuring targets and routes that define where data points are routed. | `bool` | `false` | no | +| [observability\_enable\_platform\_logs](#input\_observability\_enable\_platform\_logs) | Setting this to true will create a tenant in the same region that the Cloud Logs instance is provisioned to enable platform logs for that region. NOTE: You can only have 1 tenant per region in an account. | `bool` | `false` | no | +| [observability\_logs\_enable\_for\_compute](#input\_observability\_logs\_enable\_for\_compute) | Set false to disable IBM Cloud Logs integration. If enabled, infrastructure and LSF application logs from Compute Nodes will be ingested. | `bool` | `false` | no | +| [observability\_logs\_enable\_for\_management](#input\_observability\_logs\_enable\_for\_management) | Set false to disable IBM Cloud Logs integration. If enabled, infrastructure and LSF application logs from Management Nodes will be ingested. | `bool` | `false` | no | +| [observability\_logs\_retention\_period](#input\_observability\_logs\_retention\_period) | The number of days IBM Cloud Logs will retain the logs data in Priority insights. Allowed values: 7, 14, 30, 60, 90. | `number` | `7` | no | +| [observability\_monitoring\_enable](#input\_observability\_monitoring\_enable) | Set false to disable IBM Cloud Monitoring integration. If enabled, infrastructure and LSF application metrics from Management Nodes will be ingested. | `bool` | `true` | no | +| [observability\_monitoring\_on\_compute\_nodes\_enable](#input\_observability\_monitoring\_on\_compute\_nodes\_enable) | Set false to disable IBM Cloud Monitoring integration. If enabled, infrastructure metrics from Compute Nodes will be ingested. | `bool` | `false` | no | +| [observability\_monitoring\_plan](#input\_observability\_monitoring\_plan) | Type of service plan for IBM Cloud Monitoring instance. You can choose one of the following: lite, graduated-tier. For all details visit [IBM Cloud Monitoring Service Plans](https://cloud.ibm.com/docs/monitoring?topic=monitoring-service_plans). | `string` | `"graduated-tier"` | no | | [override](#input\_override) | Override default values with custom JSON template. This uses the file `override.json` to allow users to create a fully customized environment. | `bool` | `false` | no | | [override\_json\_string](#input\_override\_json\_string) | Override default values with a JSON object. Any JSON other than an empty string overrides other configuration changes. | `string` | `null` | no | | [placement\_strategy](#input\_placement\_strategy) | VPC placement groups to create (null / host\_spread / power\_spread) | `string` | `null` | no | | [prefix](#input\_prefix) | A unique identifier for resources. Must begin with a letter and end with a letter or number. This prefix will be prepended to any resources provisioned by this template. Prefixes must be 16 or fewer characters. | `string` | `"lsf"` | no | | [protocol\_instances](#input\_protocol\_instances) | Number of instances to be launched for protocol hosts. |
list(
object({
profile = string
count = number
image = string
})
)
|
[
{
"count": 2,
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "bx2-2x8"
}
]
| no | -| [protocol\_subnets\_cidr](#input\_protocol\_subnets\_cidr) | Subnet CIDR block to launch the storage cluster host. | `list(string)` |
[
"10.10.40.0/24",
"10.20.40.0/24",
"10.30.40.0/24"
]
| no | -| [resource\_group](#input\_resource\_group) | String describing resource groups to create or reference | `string` | `"Default"` | no | +| [protocol\_subnets\_cidr](#input\_protocol\_subnets\_cidr) | Subnet CIDR block to launch the storage cluster host. | `string` | `"10.10.40.0/24"` | no | +| [scc\_enable](#input\_scc\_enable) | Flag to enable SCC instance creation. If true, an instance of SCC (Security and Compliance Center) will be created. | `bool` | `true` | no | +| [scc\_event\_notification\_plan](#input\_scc\_event\_notification\_plan) | Event Notifications Instance plan to be used (it's used with S.C.C. instance), possible values 'lite' and 'standard'. | `string` | `"lite"` | no | +| [scc\_location](#input\_scc\_location) | Location where the SCC instance is provisioned (possible choices 'us-south', 'eu-de', 'ca-tor', 'eu-es') | `string` | `"us-south"` | no | +| [scc\_profile](#input\_scc\_profile) | Profile to be set on the SCC Instance (accepting empty, 'CIS IBM Cloud Foundations Benchmark' and 'IBM Cloud Framework for Financial Services') | `string` | `"CIS IBM Cloud Foundations Benchmark v1.1.0"` | no | +| [skip\_flowlogs\_s2s\_auth\_policy](#input\_skip\_flowlogs\_s2s\_auth\_policy) | Skip auth policy between flow logs service and COS instance, set to true if this policy is already in place on account. | `bool` | `false` | no | +| [skip\_iam\_authorization\_policy](#input\_skip\_iam\_authorization\_policy) | Set to false if authorization policy is required for VPC block storage volumes to access kms. This can be set to true if authorization policy already exists. For more information on how to create authorization policy manually, see [creating authorization policies for block storage volume](https://cloud.ibm.com/docs/vpc?topic=vpc-block-s2s-auth&interface=ui). | `bool` | `false` | no | +| [skip\_kms\_s2s\_auth\_policy](#input\_skip\_kms\_s2s\_auth\_policy) | Skip auth policy between KMS service and COS instance, set to true if this policy is already in place on account. | `bool` | `false` | no | | [ssh\_keys](#input\_ssh\_keys) | The key pair to use to access the HPC cluster. | `list(string)` | `null` | no | | [static\_compute\_instances](#input\_static\_compute\_instances) | Min Number of instances to be launched for compute cluster. |
list(
object({
profile = string
count = number
image = string
})
)
|
[
{
"count": 1,
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "cx2-2x4"
}
]
| no | | [storage\_gui\_password](#input\_storage\_gui\_password) | Password for storage cluster GUI | `string` | `"hpc@IBMCloud"` | no | | [storage\_gui\_username](#input\_storage\_gui\_username) | GUI user to perform system management and monitoring tasks on storage cluster. | `string` | `"admin"` | no | | [storage\_instances](#input\_storage\_instances) | Number of instances to be launched for storage cluster. |
list(
object({
profile = string
count = number
image = string
filesystem_name = optional(string)
})
)
|
[
{
"count": 2,
"filesystem_name": "fs1",
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "bx2-2x8"
}
]
| no | | [storage\_ssh\_keys](#input\_storage\_ssh\_keys) | The key pair to use to launch the storage cluster host. | `list(string)` | `null` | no | -| [storage\_subnets\_cidr](#input\_storage\_subnets\_cidr) | Subnet CIDR block to launch the storage cluster host. | `list(string)` |
[
"10.10.30.0/24",
"10.20.30.0/24",
"10.30.30.0/24"
]
| no | +| [storage\_subnets\_cidr](#input\_storage\_subnets\_cidr) | Subnet CIDR block to launch the storage cluster host. | `string` | `"10.10.30.0/24"` | no | | [vpc](#input\_vpc) | Name of an existing VPC in which the cluster resources will be deployed. If no value is given, then a new VPC will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc) | `string` | `null` | no | | [vpn\_peer\_address](#input\_vpn\_peer\_address) | The peer public IP address to which the VPN will be connected. | `string` | `null` | no | | [vpn\_peer\_cidr](#input\_vpn\_peer\_cidr) | The peer CIDRs (e.g., 192.168.0.0/24) to which the VPN will be connected. | `list(string)` | `null` | no | | [vpn\_preshared\_key](#input\_vpn\_preshared\_key) | The pre-shared key for the VPN. | `string` | `null` | no | -| [zones](#input\_zones) | Region where VPC will be created. To find your VPC region, use `ibmcloud is regions` command to find available regions. | `list(string)` | n/a | yes | +| [zone](#input\_zone) | Zone where VPC will be created. | `string` | n/a | yes | ## Outputs -No outputs. +| Name | Description | +|------|-------------| +| [lsf](#output\_lsf) | LSF details | diff --git a/solutions/lsf/catalogValidationValues.json.template b/solutions/lsf/catalogValidationValues.json.template index a5642a41..829063ab 100644 --- a/solutions/lsf/catalogValidationValues.json.template +++ b/solutions/lsf/catalogValidationValues.json.template @@ -1,7 +1,7 @@ { "ibmcloud_api_key": $VALIDATION_APIKEY, "prefix": $PREFIX, - "zones": "[\"ca-tor-1\"]", - "resource_group": "geretain-hpc-rg", + "zone": "ca-tor-1", + "existing_resource_group": "geretain-hpc-rg", "ssh_keys": "[\"geretain-hpc-ssh-key\"]" } diff --git a/solutions/lsf/datasource.tf b/solutions/lsf/datasource.tf deleted file mode 100644 index f6fada1e..00000000 --- a/solutions/lsf/datasource.tf +++ /dev/null @@ -1,22 +0,0 @@ -# Future use -/* -data "ibm_is_region" "itself" { - name = local.region -} - -data "ibm_is_zone" "itself" { - name = var.zones[0] - region = data.ibm_is_region.itself.name -} -*/ - -data "ibm_is_vpc" "itself" { - count = var.vpc == null ? 0 : 1 - name = var.vpc -} -/* -data "ibm_is_subnet" "itself" { - count = length(local.subnets) - identifier = local.subnets[count.index]["id"] -} -*/ diff --git a/solutions/lsf/locals.tf b/solutions/lsf/locals.tf index e25da8b4..d0874f4a 100644 --- a/solutions/lsf/locals.tf +++ b/solutions/lsf/locals.tf @@ -20,7 +20,7 @@ locals { locals { config = { - resource_group = var.resource_group + existing_resource_group = var.existing_resource_group allowed_cidr = var.allowed_cidr deployer_instance_profile = var.deployer_instance_profile ssh_keys = var.ssh_keys @@ -62,7 +62,7 @@ locals { storage_instances = var.storage_instances storage_ssh_keys = var.storage_ssh_keys storage_subnets_cidr = var.storage_subnets_cidr - vpc = var.vpc + vpc_name = var.vpc_name vpn_peer_address = var.vpn_peer_address vpn_peer_cidr = var.vpn_peer_cidr vpn_preshared_key = var.vpn_preshared_key @@ -81,14 +81,14 @@ locals { scc_enable = var.scc_enable scc_profile = var.scc_profile # scc_profile_version = var.scc_profile_version - scc_location = var.scc_location - scc_event_notification_plan = var.scc_event_notification_plan - skip_flowlogs_s2s_auth_policy = var.skip_flowlogs_s2s_auth_policy - skip_iam_authorization_policy = var.skip_iam_authorization_policy - skip_kms_s2s_auth_policy = var.skip_kms_s2s_auth_policy + scc_location = var.scc_location + scc_event_notification_plan = var.scc_event_notification_plan + skip_flowlogs_s2s_auth_policy = var.skip_flowlogs_s2s_auth_policy + skip_iam_authorization_policy = var.skip_iam_authorization_policy + skip_kms_s2s_auth_policy = var.skip_kms_s2s_auth_policy # New Variables - ibmcloud_api_key = var.ibmcloud_api_key + ibmcloud_api_key = var.ibmcloud_api_key } } @@ -96,7 +96,7 @@ locals { # Compile Environment for Config output locals { env = { - resource_group = lookup(local.override[local.override_type], "resource_group", local.config.resource_group) + existing_resource_group = lookup(local.override[local.override_type], "existing_resource_group", local.config.existing_resource_group) allowed_cidr = lookup(local.override[local.override_type], "allowed_cidr", local.config.allowed_cidr) deployer_instance_profile = lookup(local.override[local.override_type], "deployer_instance_profile", local.config.deployer_instance_profile) ssh_keys = lookup(local.override[local.override_type], "ssh_keys", local.config.ssh_keys) @@ -138,7 +138,7 @@ locals { storage_instances = lookup(local.override[local.override_type], "storage_instances", local.config.storage_instances) storage_ssh_keys = lookup(local.override[local.override_type], "storage_ssh_keys", local.config.storage_ssh_keys) storage_subnets_cidr = lookup(local.override[local.override_type], "storage_subnets_cidr", local.config.storage_subnets_cidr) - vpc = lookup(local.override[local.override_type], "vpc", local.config.vpc) + vpc_name = lookup(local.override[local.override_type], "vpc_name", local.config.vpc_name) vpn_peer_address = lookup(local.override[local.override_type], "vpn_peer_address", local.config.vpn_peer_address) vpn_peer_cidr = lookup(local.override[local.override_type], "vpn_peer_cidr", local.config.vpn_peer_cidr) vpn_preshared_key = lookup(local.override[local.override_type], "vpn_preshared_key", local.config.vpn_preshared_key) @@ -157,12 +157,12 @@ locals { scc_enable = lookup(local.override[local.override_type], "scc_enable", local.config.scc_enable) scc_profile = lookup(local.override[local.override_type], "scc_profile", local.config.scc_profile) # scc_profile_version = lookup(local.override[local.override_type], "scc_profile_version", local.config.scc_profile_version) - scc_location = lookup(local.override[local.override_type], "scc_location", local.config.scc_location) - scc_event_notification_plan = lookup(local.override[local.override_type], "scc_event_notification_plan", local.config.scc_event_notification_plan) - skip_flowlogs_s2s_auth_policy = lookup(local.override[local.override_type], "skip_flowlogs_s2s_auth_policy", local.config.skip_flowlogs_s2s_auth_policy) - skip_iam_authorization_policy = lookup(local.override[local.override_type], "skip_iam_authorization_policy", local.config.skip_iam_authorization_policy) - skip_kms_s2s_auth_policy = lookup(local.override[local.override_type], "skip_kms_s2s_auth_policy", local.config.skip_kms_s2s_auth_policy) + scc_location = lookup(local.override[local.override_type], "scc_location", local.config.scc_location) + scc_event_notification_plan = lookup(local.override[local.override_type], "scc_event_notification_plan", local.config.scc_event_notification_plan) + skip_flowlogs_s2s_auth_policy = lookup(local.override[local.override_type], "skip_flowlogs_s2s_auth_policy", local.config.skip_flowlogs_s2s_auth_policy) + skip_iam_authorization_policy = lookup(local.override[local.override_type], "skip_iam_authorization_policy", local.config.skip_iam_authorization_policy) + skip_kms_s2s_auth_policy = lookup(local.override[local.override_type], "skip_kms_s2s_auth_policy", local.config.skip_kms_s2s_auth_policy) # New Variables - ibmcloud_api_key = lookup(local.override[local.override_type], "ibmcloud_api_key", local.config.ibmcloud_api_key) + ibmcloud_api_key = lookup(local.override[local.override_type], "ibmcloud_api_key", local.config.ibmcloud_api_key) } } diff --git a/solutions/lsf/main.tf b/solutions/lsf/main.tf index 99750186..be07657e 100644 --- a/solutions/lsf/main.tf +++ b/solutions/lsf/main.tf @@ -1,74 +1,76 @@ module "lsf" { - source = "./../.." - scheduler = "LSF" - ibm_customer_number = var.ibm_customer_number - zones = [var.zone] - allowed_cidr = var.allowed_cidr - prefix = local.env.prefix - ssh_keys = local.env.ssh_keys - resource_group = local.env.resource_group - bastion_ssh_keys = local.env.bastion_ssh_keys - bastion_subnets_cidr = [local.env.bastion_subnets_cidr] - compute_gui_password = local.env.compute_gui_password - compute_gui_username = local.env.compute_gui_username - compute_ssh_keys = local.env.compute_ssh_keys - compute_subnets_cidr = [local.env.compute_subnets_cidr] - cos_instance_name = local.env.cos_instance_name - dns_custom_resolver_id = local.env.dns_custom_resolver_id - dns_instance_id = local.env.dns_instance_id - dns_domain_names = local.env.dns_domain_names - dynamic_compute_instances = local.env.dynamic_compute_instances - enable_bastion = local.env.enable_bastion - bastion_image = local.env.bastion_image - bastion_instance_profile = local.env.bastion_instance_profile - enable_deployer = local.env.enable_deployer - deployer_image = local.env.deployer_image - deployer_instance_profile = local.env.deployer_instance_profile - enable_cos_integration = local.env.enable_cos_integration - enable_vpc_flow_logs = local.env.enable_vpc_flow_logs - enable_vpn = local.env.enable_vpn - file_shares = local.env.file_shares - hpcs_instance_name = local.env.hpcs_instance_name - key_management = local.env.key_management - client_instances = local.env.client_instances - client_ssh_keys = local.env.client_ssh_keys - client_subnets_cidr = [local.env.client_subnets_cidr] - management_instances = local.env.management_instances - network_cidr = local.env.network_cidr - placement_strategy = local.env.placement_strategy - protocol_instances = local.env.protocol_instances - protocol_subnets_cidr = [local.env.protocol_subnets_cidr] - static_compute_instances = local.env.static_compute_instances - storage_gui_password = local.env.storage_gui_password - storage_gui_username = local.env.storage_gui_username - storage_instances = local.env.storage_instances - storage_ssh_keys = local.env.storage_ssh_keys - storage_subnets_cidr = [local.env.storage_subnets_cidr] - vpc = local.env.vpc - vpn_peer_address = local.env.vpn_peer_address - vpn_peer_cidr = local.env.vpn_peer_cidr - vpn_preshared_key = local.env.vpn_preshared_key - kms_instance_name = local.env.kms_instance_name - kms_key_name = local.env.kms_key_name - observability_atracker_enable = local.env.observability_atracker_enable - observability_atracker_target_type = local.env.observability_atracker_target_type - observability_monitoring_enable = local.env.observability_monitoring_enable - observability_logs_enable_for_management = local.env.observability_logs_enable_for_management - observability_logs_enable_for_compute = local.env.observability_logs_enable_for_compute - observability_enable_platform_logs = local.env.observability_enable_platform_logs - observability_enable_metrics_routing = local.env.observability_enable_metrics_routing - observability_logs_retention_period = local.env.observability_logs_retention_period - observability_monitoring_on_compute_nodes_enable = local.env.observability_monitoring_on_compute_nodes_enable - observability_monitoring_plan = local.env.observability_monitoring_plan - scc_enable = local.env.scc_enable - scc_profile = local.env.scc_profile - # scc_profile_version = local.env.scc_profile_version - scc_location = local.env.scc_location - scc_event_notification_plan = local.env.scc_event_notification_plan - skip_flowlogs_s2s_auth_policy = local.env.skip_flowlogs_s2s_auth_policy - skip_iam_authorization_policy = local.env.skip_iam_authorization_policy - skip_kms_s2s_auth_policy = local.env.skip_kms_s2s_auth_policy + source = "./../.." + ibm_customer_number = var.ibm_customer_number + zones = [var.zone] + allowed_cidr = var.allowed_cidr + prefix = local.env.prefix + ssh_keys = local.env.ssh_keys + existing_resource_group = local.env.existing_resource_group + bastion_ssh_keys = local.env.bastion_ssh_keys + bastion_subnets_cidr = [local.env.bastion_subnets_cidr] + compute_ssh_keys = local.env.compute_ssh_keys + compute_subnets_cidr = [local.env.compute_subnets_cidr] + cos_instance_name = local.env.cos_instance_name + dns_custom_resolver_id = local.env.dns_custom_resolver_id + dns_instance_id = local.env.dns_instance_id + dns_domain_names = local.env.dns_domain_names + dynamic_compute_instances = local.env.dynamic_compute_instances + enable_bastion = local.env.enable_bastion + bastion_image = local.env.bastion_image + bastion_instance_profile = local.env.bastion_instance_profile + enable_deployer = local.env.enable_deployer + deployer_image = local.env.deployer_image + deployer_instance_profile = local.env.deployer_instance_profile + enable_cos_integration = local.env.enable_cos_integration + enable_vpc_flow_logs = local.env.enable_vpc_flow_logs + enable_vpn = local.env.enable_vpn + file_shares = local.env.file_shares + key_management = local.env.key_management + client_instances = local.env.client_instances + client_ssh_keys = local.env.client_ssh_keys + management_instances = local.env.management_instances + network_cidr = local.env.network_cidr + placement_strategy = local.env.placement_strategy + protocol_instances = local.env.protocol_instances + protocol_subnets_cidr = [local.env.protocol_subnets_cidr] + static_compute_instances = local.env.static_compute_instances + storage_instances = local.env.storage_instances + storage_ssh_keys = local.env.storage_ssh_keys + storage_subnets_cidr = [local.env.storage_subnets_cidr] + vpc_name = local.env.vpc_name + vpn_peer_address = local.env.vpn_peer_address + vpn_peer_cidr = local.env.vpn_peer_cidr + vpn_preshared_key = local.env.vpn_preshared_key + kms_instance_name = local.env.kms_instance_name + kms_key_name = local.env.kms_key_name + observability_atracker_enable = local.env.observability_atracker_enable + observability_atracker_target_type = local.env.observability_atracker_target_type + observability_monitoring_enable = local.env.observability_monitoring_enable + observability_logs_enable_for_management = local.env.observability_logs_enable_for_management + observability_logs_enable_for_compute = local.env.observability_logs_enable_for_compute + observability_enable_platform_logs = local.env.observability_enable_platform_logs + observability_enable_metrics_routing = local.env.observability_enable_metrics_routing + observability_logs_retention_period = local.env.observability_logs_retention_period + observability_monitoring_plan = local.env.observability_monitoring_plan + scc_enable = local.env.scc_enable + scc_profile = local.env.scc_profile + scc_location = local.env.scc_location + scc_event_notification_plan = local.env.scc_event_notification_plan + skip_flowlogs_s2s_auth_policy = local.env.skip_flowlogs_s2s_auth_policy + skip_iam_authorization_policy = local.env.skip_iam_authorization_policy + skip_kms_s2s_auth_policy = local.env.skip_kms_s2s_auth_policy # New Variables - ibmcloud_api_key = local.env.ibmcloud_api_key + ibmcloud_api_key = local.env.ibmcloud_api_key + + # scheduler = "LSF" + # compute_gui_password = local.env.compute_gui_password + # compute_gui_username = local.env.compute_gui_username + # hpcs_instance_name = local.env.hpcs_instance_name + # client_subnets_cidr = [local.env.client_subnets_cidr] + # storage_gui_password = local.env.storage_gui_password + # storage_gui_username = local.env.storage_gui_username + # observability_monitoring_on_compute_nodes_enable = local.env.observability_monitoring_on_compute_nodes_enable + # scc_profile_version = local.env.scc_profile_version + } diff --git a/solutions/lsf/outputs.tf b/solutions/lsf/outputs.tf index aac8ff51..cc61dcf9 100644 --- a/solutions/lsf/outputs.tf +++ b/solutions/lsf/outputs.tf @@ -1,5 +1,6 @@ output "lsf" { - value = module.lsf.file_storage + description = "LSF details" + value = module.lsf.file_storage #sensitive = true } @@ -10,4 +11,4 @@ output "lsf" { # output "lsf" { # value = module.lsf -# } \ No newline at end of file +# } diff --git a/solutions/lsf/override.json b/solutions/lsf/override.json index 3b749691..1638e104 100644 --- a/solutions/lsf/override.json +++ b/solutions/lsf/override.json @@ -1,113 +1,113 @@ { - "prefix":"lsf", - "resource_group": "Default", - "vpc": null, - "network_cidr":"10.0.0.0/8", - "placement_strategy":null, - "ssh_keys":null, - "enable_bastion":true, - "enable_deployer":false, - "deployer_instance_profile":"mx2-4x32", - "bastion_ssh_keys": null, - "bastion_subnets_cidr":"10.0.0.0/24", - "enable_vpn":false, - "vpn_peer_cidr":null, - "vpn_peer_address":null, - "vpn_preshared_key":null, - "client_subnets_cidr":"10.10.10.0/24", - "client_ssh_keys": null, - "client_instances":[ - { - "profile":"cx2-2x4", - "count":2, - "image":"ibm-redhat-8-10-minimal-amd64-2" - } - ], - "compute_subnets_cidr":"10.10.20.0/24", - "compute_ssh_keys": null, - "management_instances":[ - { - "profile":"cx2-2x4", - "count":3, - "image":"ibm-redhat-8-10-minimal-amd64-2" - } - ], - "static_compute_instances":[ - { - "profile":"cx2-2x4", - "count":0, - "image":"ibm-redhat-8-10-minimal-amd64-2" - } - ], - "dynamic_compute_instances":[ - { - "profile":"cx2-2x4", - "count":5000, - "image":"ibm-redhat-8-10-minimal-amd64-2" - } - ], - "compute_gui_username":"admin", - "compute_gui_password": "hpc@IBMCloud", - "storage_subnets_cidr":"10.10.30.0/24", - "storage_ssh_keys": null, - "storage_instances":[ - { - "profile":"cx2-2x4", - "count":2, - "image":"ibm-redhat-8-10-minimal-amd64-2" - } - ], - "protocol_subnets_cidr":"10.10.40.0/24", - "protocol_instances":[ - { - "profile":"cx2-2x4", - "count":2, - "image":"ibm-redhat-8-10-minimal-amd64-2" - } - ], - "storage_gui_username":"admin", - "storage_gui_password": "hpc@IBMCloud", - "file_shares":[ - { - "mount_path":"/mnt/binaries", - "size":100, - "iops":1000 - }, - { - "mount_path":"/mnt/data", - "size":100, - "iops":1000 - } - ], - "dns_instance_id":null, - "dns_custom_resolver_id":null, - "dns_domain_names":{ - "compute":"comp.com", - "storage":"strg.com", - "protocol":"ces.com" - }, - "enable_cos_integration":true, - "cos_instance_name":null, - "enable_vpc_flow_logs":true, - "key_management":"key_protect", - "hpcs_instance_name":null, - "kms_instance_name":null, - "kms_key_name":null, - "observability_atracker_enable":true, - "observability_atracker_target_type":"cloudlogs", - "observability_monitoring_enable":true, - "observability_logs_enable_for_management":false, - "observability_logs_enable_for_compute":false, - "observability_enable_platform_logs":false, - "observability_enable_metrics_routing":false, - "observability_logs_retention_period":7, - "observability_monitoring_on_compute_nodes_enable":false, - "observability_monitoring_plan":"graduated-tier", - "scc_enable":true, - "scc_profile":"CIS IBM Cloud Foundations Benchmark v1.1.0", - "scc_location":"us-south", - "scc_event_notification_plan":"lite", - "skip_flowlogs_s2s_auth_policy":false, - "skip_kms_s2s_auth_policy":false, - "skip_iam_authorization_policy":false - } + "prefix": "lsf", + "existing_resource_group": "Default", + "vpc_name": null, + "network_cidr": "10.0.0.0/8", + "placement_strategy": null, + "ssh_keys": null, + "enable_bastion": true, + "enable_deployer": false, + "deployer_instance_profile": "mx2-4x32", + "bastion_ssh_keys": null, + "bastion_subnets_cidr": "10.0.0.0/24", + "enable_vpn": false, + "vpn_peer_cidr": null, + "vpn_peer_address": null, + "vpn_preshared_key": null, + "client_subnets_cidr": "10.10.10.0/24", + "client_ssh_keys": null, + "client_instances": [ + { + "profile": "cx2-2x4", + "count": 2, + "image": "ibm-redhat-8-10-minimal-amd64-2" + } + ], + "compute_subnets_cidr": "10.10.20.0/24", + "compute_ssh_keys": null, + "management_instances": [ + { + "profile": "cx2-2x4", + "count": 3, + "image": "ibm-redhat-8-10-minimal-amd64-2" + } + ], + "static_compute_instances": [ + { + "profile": "cx2-2x4", + "count": 0, + "image": "ibm-redhat-8-10-minimal-amd64-2" + } + ], + "dynamic_compute_instances": [ + { + "profile": "cx2-2x4", + "count": 5000, + "image": "ibm-redhat-8-10-minimal-amd64-2" + } + ], + "compute_gui_username": "admin", + "compute_gui_password": "hpc@IBMCloud", + "storage_subnets_cidr": "10.10.30.0/24", + "storage_ssh_keys": null, + "storage_instances": [ + { + "profile": "cx2-2x4", + "count": 2, + "image": "ibm-redhat-8-10-minimal-amd64-2" + } + ], + "protocol_subnets_cidr": "10.10.40.0/24", + "protocol_instances": [ + { + "profile": "cx2-2x4", + "count": 2, + "image": "ibm-redhat-8-10-minimal-amd64-2" + } + ], + "storage_gui_username": "admin", + "storage_gui_password": "hpc@IBMCloud", + "file_shares": [ + { + "mount_path": "/mnt/binaries", + "size": 100, + "iops": 1000 + }, + { + "mount_path": "/mnt/data", + "size": 100, + "iops": 1000 + } + ], + "dns_instance_id": null, + "dns_custom_resolver_id": null, + "dns_domain_names": { + "compute": "comp.com", + "storage": "strg.com", + "protocol": "ces.com" + }, + "enable_cos_integration": true, + "cos_instance_name": null, + "enable_vpc_flow_logs": true, + "key_management": "key_protect", + "hpcs_instance_name": null, + "kms_instance_name": null, + "kms_key_name": null, + "observability_atracker_enable": true, + "observability_atracker_target_type": "cloudlogs", + "observability_monitoring_enable": true, + "observability_logs_enable_for_management": false, + "observability_logs_enable_for_compute": false, + "observability_enable_platform_logs": false, + "observability_enable_metrics_routing": false, + "observability_logs_retention_period": 7, + "observability_monitoring_on_compute_nodes_enable": false, + "observability_monitoring_plan": "graduated-tier", + "scc_enable": true, + "scc_profile": "CIS IBM Cloud Foundations Benchmark v1.1.0", + "scc_location": "us-south", + "scc_event_notification_plan": "lite", + "skip_flowlogs_s2s_auth_policy": false, + "skip_kms_s2s_auth_policy": false, + "skip_iam_authorization_policy": false +} diff --git a/solutions/lsf/variables.tf b/solutions/lsf/variables.tf index cff1228d..5dbefad9 100644 --- a/solutions/lsf/variables.tf +++ b/solutions/lsf/variables.tf @@ -52,7 +52,7 @@ variable "prefix" { ############################################################################## # Resource Groups Variables ############################################################################## -variable "resource_group" { +variable "existing_resource_group" { type = string default = "Default" description = "String describing resource groups to create or reference" @@ -62,7 +62,7 @@ variable "resource_group" { ############################################################################## # VPC Variables ############################################################################## -variable "vpc" { +variable "vpc_name" { type = string default = null description = "Name of an existing VPC in which the cluster resources will be deployed. If no value is given, then a new VPC will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)" @@ -263,57 +263,57 @@ variable "compute_gui_password" { description = "Password for compute cluster GUI" } -variable "cluster_id" { - type = string - default = "HPCCluster" - description = "Unique ID of the cluster used by LSF for configuration of resources. This can be up to 39 alphanumeric characters." - validation { - condition = 0 < length(var.cluster_id) && length(var.cluster_id) < 40 && can(regex("^[a-zA-Z0-9_.-]+$", var.cluster_id)) - error_message = "The ID can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters." - } -} +# variable "cluster_name" { +# type = string +# default = "HPCCluster" +# description = "Unique ID of the cluster used by LSF for configuration of resources. This can be up to 39 alphanumeric characters." +# validation { +# condition = 0 < length(var.cluster_name) && length(var.cluster_name) < 40 && can(regex("^[a-zA-Z0-9_.-]+$", var.cluster_name)) +# error_message = "The ID can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters." +# } +# } -variable "enable_hyperthreading" { - type = bool - default = true - description = "Setting this to true will enable hyper-threading in the worker nodes of the cluster (default). Otherwise, hyper-threading will be disabled." -} +# variable "enable_hyperthreading" { +# type = bool +# default = true +# description = "Setting this to true will enable hyper-threading in the worker nodes of the cluster (default). Otherwise, hyper-threading will be disabled." +# } -variable "enable_dedicated_host" { - type = bool - default = false - description = "Set to true to use dedicated hosts for compute hosts (default: false)." -} +# variable "enable_dedicated_host" { +# type = bool +# default = false +# description = "Set to true to use dedicated hosts for compute hosts (default: false)." +# } -variable "dedicated_host_placement" { - type = string - default = "spread" - description = "Specify 'pack' or 'spread'. The 'pack' option will deploy VSIs on one dedicated host until full before moving on to the next dedicated host." - validation { - condition = var.dedicated_host_placement == "spread" || var.dedicated_host_placement == "pack" - error_message = "Supported values for dedicated_host_placement: spread or pack." - } -} +# variable "dedicated_host_placement" { +# type = string +# default = "spread" +# description = "Specify 'pack' or 'spread'. The 'pack' option will deploy VSIs on one dedicated host until full before moving on to the next dedicated host." +# validation { +# condition = var.dedicated_host_placement == "spread" || var.dedicated_host_placement == "pack" +# error_message = "Supported values for dedicated_host_placement: spread or pack." +# } +# } -variable "enable_app_center" { - type = bool - default = false - description = "Set to true to install and enable use of the IBM Spectrum LSF Application Center GUI." -} +# variable "enable_app_center" { +# type = bool +# default = false +# description = "Set to true to install and enable use of the IBM Spectrum LSF Application Center GUI." +# } -variable "app_center_gui_password" { - type = string - default = "hpc@IBMCloud" - sensitive = true - description = "Password for IBM Spectrum LSF Application Center GUI." -} +# variable "app_center_gui_password" { +# type = string +# default = "hpc@IBMCloud" +# sensitive = true +# description = "Password for IBM Spectrum LSF Application Center GUI." +# } -variable "app_center_db_password" { - type = string - default = "hpc@IBMCloud" - sensitive = true - description = "Password for IBM Spectrum LSF Application Center database GUI." -} +# variable "app_center_db_password" { +# type = string +# default = "hpc@IBMCloud" +# sensitive = true +# description = "Password for IBM Spectrum LSF Application Center database GUI." +# } ############################################################################## # Storage Scale Variables @@ -480,11 +480,11 @@ variable "cos_instance_name" { description = "Exiting COS instance name" } -variable "enable_atracker" { - type = bool - default = true - description = "Enable Activity tracker" -} +# variable "enable_atracker" { +# type = bool +# default = true +# description = "Enable Activity tracker" +# } variable "enable_vpc_flow_logs" { type = bool diff --git a/solutions/lsf/version.tf b/solutions/lsf/version.tf index e081bcc1..d465bd59 100644 --- a/solutions/lsf/version.tf +++ b/solutions/lsf/version.tf @@ -5,10 +5,6 @@ terraform { source = "IBM-Cloud/ibm" version = ">= 1.68.1, < 2.0.0" } - ansible = { - source = "ansible/ansible" - version = "~> 1.3.0" - } } } diff --git a/solutions/scale/README.md b/solutions/scale/README.md index 6ee238a5..7561b14c 100644 --- a/solutions/scale/README.md +++ b/solutions/scale/README.md @@ -3,81 +3,77 @@ | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.3 | -| [ansible](#requirement\_ansible) | ~> 1.3.0 | | [ibm](#requirement\_ibm) | >= 1.68.1, < 2.0.0 | ## Providers -| Name | Version | -|------|---------| -| [ibm](#provider\_ibm) | 1.70.1 | +No providers. ## Modules | Name | Source | Version | |------|--------|---------| -| [lsf](#module\_lsf) | ./../.. | n/a | +| [scale](#module\_scale) | ./../.. | n/a | ## Resources -| Name | Type | -|------|------| -| [ibm_is_vpc.itself](https://registry.terraform.io/providers/IBM-Cloud/ibm/latest/docs/data-sources/is_vpc) | data source | +No resources. ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| | [allowed\_cidr](#input\_allowed\_cidr) | Network CIDR to access the VPC. This is used to manage network ACL rules for accessing the cluster. | `list(string)` | n/a | yes | +| [bastion\_image](#input\_bastion\_image) | The image to use to deploy the bastion host. | `string` | `"ibm-ubuntu-22-04-3-minimal-amd64-1"` | no | +| [bastion\_instance\_profile](#input\_bastion\_instance\_profile) | Deployer should be only used for better deployment performance | `string` | `"cx2-4x8"` | no | | [bastion\_ssh\_keys](#input\_bastion\_ssh\_keys) | The key pair to use to access the bastion host. | `list(string)` | `null` | no | -| [bastion\_subnets\_cidr](#input\_bastion\_subnets\_cidr) | Subnet CIDR block to launch the bastion host. | `list(string)` |
[
"10.0.0.0/24"
]
| no | +| [bastion\_subnets\_cidr](#input\_bastion\_subnets\_cidr) | Subnet CIDR block to launch the bastion host. | `string` | `"10.0.0.0/24"` | no | | [client\_instances](#input\_client\_instances) | Number of instances to be launched for client. |
list(
object({
profile = string
count = number
image = string
})
)
|
[
{
"count": 2,
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "cx2-2x4"
}
]
| no | | [client\_ssh\_keys](#input\_client\_ssh\_keys) | The key pair to use to launch the client host. | `list(string)` | `null` | no | -| [client\_subnets\_cidr](#input\_client\_subnets\_cidr) | Subnet CIDR block to launch the client host. | `list(string)` |
[
"10.10.10.0/24",
"10.20.10.0/24",
"10.30.10.0/24"
]
| no | +| [client\_subnets\_cidr](#input\_client\_subnets\_cidr) | Subnet CIDR block to launch the client host. | `string` | `"10.10.10.0/24"` | no | | [compute\_gui\_password](#input\_compute\_gui\_password) | Password for compute cluster GUI | `string` | `"hpc@IBMCloud"` | no | | [compute\_gui\_username](#input\_compute\_gui\_username) | GUI user to perform system management and monitoring tasks on compute cluster. | `string` | `"admin"` | no | +| [compute\_instances](#input\_compute\_instances) | Total Number of instances to be launched for compute cluster. |
list(
object({
profile = string
count = number
image = string
})
)
|
[
{
"count": 3,
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "cx2-2x4"
}
]
| no | | [compute\_ssh\_keys](#input\_compute\_ssh\_keys) | The key pair to use to launch the compute host. | `list(string)` | `null` | no | -| [compute\_subnets\_cidr](#input\_compute\_subnets\_cidr) | Subnet CIDR block to launch the compute cluster host. | `list(string)` |
[
"10.10.20.0/24",
"10.20.20.0/24",
"10.30.20.0/24"
]
| no | +| [compute\_subnets\_cidr](#input\_compute\_subnets\_cidr) | Subnet CIDR block to launch the compute cluster host. | `string` | `"10.10.20.0/24"` | no | | [cos\_instance\_name](#input\_cos\_instance\_name) | Exiting COS instance name | `string` | `null` | no | +| [deployer\_image](#input\_deployer\_image) | The image to use to deploy the deployer host. | `string` | `"ibm-redhat-8-10-minimal-amd64-2"` | no | | [deployer\_instance\_profile](#input\_deployer\_instance\_profile) | Deployer should be only used for better deployment performance | `string` | `"mx2-4x32"` | no | | [dns\_custom\_resolver\_id](#input\_dns\_custom\_resolver\_id) | IBM Cloud DNS custom resolver id. | `string` | `null` | no | | [dns\_domain\_names](#input\_dns\_domain\_names) | IBM Cloud HPC DNS domain names. |
object({
compute = string
storage = string
protocol = string
})
|
{
"compute": "comp.com",
"protocol": "ces.com",
"storage": "strg.com"
}
| no | | [dns\_instance\_id](#input\_dns\_instance\_id) | IBM Cloud HPC DNS service instance id. | `string` | `null` | no | -| [dynamic\_compute\_instances](#input\_dynamic\_compute\_instances) | MaxNumber of instances to be launched for compute cluster. |
list(
object({
profile = string
count = number
image = string
})
)
|
[
{
"count": 1024,
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "cx2-2x4"
}
]
| no | | [enable\_atracker](#input\_enable\_atracker) | Enable Activity tracker | `bool` | `true` | no | | [enable\_bastion](#input\_enable\_bastion) | The solution supports multiple ways to connect to your HPC cluster for example, using bastion node, via VPN or direct connection. If connecting to the HPC cluster via VPN or direct connection, set this value to false. | `bool` | `true` | no | | [enable\_cos\_integration](#input\_enable\_cos\_integration) | Integrate COS with HPC solution | `bool` | `true` | no | | [enable\_deployer](#input\_enable\_deployer) | Deployer should be only used for better deployment performance | `bool` | `false` | no | | [enable\_vpc\_flow\_logs](#input\_enable\_vpc\_flow\_logs) | Enable Activity tracker | `bool` | `true` | no | | [enable\_vpn](#input\_enable\_vpn) | The solution supports multiple ways to connect to your HPC cluster for example, using bastion node, via VPN or direct connection. If connecting to the HPC cluster via VPN, set this value to true. | `bool` | `false` | no | -| [file\_shares](#input\_file\_shares) | Custom file shares to access shared storage |
list(
object({
mount_path = string,
size = number,
iops = number
})
)
|
[
{
"iops": 1000,
"mount_path": "/mnt/binaries",
"size": 100
},
{
"iops": 1000,
"mount_path": "/mnt/data",
"size": 100
}
]
| no | +| [existing\_resource\_group](#input\_existing\_resource\_group) | String describing resource groups to create or reference | `string` | `"Default"` | no | | [hpcs\_instance\_name](#input\_hpcs\_instance\_name) | Hyper Protect Crypto Service instance | `string` | `null` | no | | [ibm\_customer\_number](#input\_ibm\_customer\_number) | Comma-separated list of the IBM Customer Number(s) (ICN) that is used for the Bring Your Own License (BYOL) entitlement check. For more information on how to find your ICN, see [What is my IBM Customer Number (ICN)?](https://www.ibm.com/support/pages/what-my-ibm-customer-number-icn). | `string` | n/a | yes | | [ibmcloud\_api\_key](#input\_ibmcloud\_api\_key) | IBM Cloud API Key that will be used for authentication in scripts run in this module. Only required if certain options are required. | `string` | n/a | yes | -| [key\_management](#input\_key\_management) | null/key\_protect/hs\_crypto | `string` | `"key_protect"` | no | -| [management\_instances](#input\_management\_instances) | Number of instances to be launched for management. |
list(
object({
profile = string
count = number
image = string
})
)
|
[
{
"count": 2,
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "cx2-2x4"
}
]
| no | +| [key\_management](#input\_key\_management) | Set the value as key\_protect to enable customer managed encryption for boot volume and file share. If the key\_management is set as null, IBM Cloud resources will be always be encrypted through provider managed. | `string` | `"key_protect"` | no | | [network\_cidr](#input\_network\_cidr) | Network CIDR for the VPC. This is used to manage network ACL rules for cluster provisioning. | `string` | `"10.0.0.0/8"` | no | -| [nsd\_details](#input\_nsd\_details) | Storage scale NSD details |
list(
object({
profile = string
capacity = optional(number)
iops = optional(number)
})
)
|
[
{
"capacity": 100,
"iops": 1000,
"profile": "custom"
}
]
| no | | [override](#input\_override) | Override default values with custom JSON template. This uses the file `override.json` to allow users to create a fully customized environment. | `bool` | `false` | no | | [override\_json\_string](#input\_override\_json\_string) | Override default values with a JSON object. Any JSON other than an empty string overrides other configuration changes. | `string` | `null` | no | | [placement\_strategy](#input\_placement\_strategy) | VPC placement groups to create (null / host\_spread / power\_spread) | `string` | `null` | no | -| [prefix](#input\_prefix) | A unique identifier for resources. Must begin with a letter and end with a letter or number. This prefix will be prepended to any resources provisioned by this template. Prefixes must be 16 or fewer characters. | `string` | `"lsf"` | no | +| [prefix](#input\_prefix) | A unique identifier for resources. Must begin with a letter and end with a letter or number. This prefix will be prepended to any resources provisioned by this template. Prefixes must be 16 or fewer characters. | `string` | `"scale"` | no | | [protocol\_instances](#input\_protocol\_instances) | Number of instances to be launched for protocol hosts. |
list(
object({
profile = string
count = number
image = string
})
)
|
[
{
"count": 2,
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "bx2-2x8"
}
]
| no | -| [protocol\_subnets\_cidr](#input\_protocol\_subnets\_cidr) | Subnet CIDR block to launch the storage cluster host. | `list(string)` |
[
"10.10.40.0/24",
"10.20.40.0/24",
"10.30.40.0/24"
]
| no | -| [resource\_group](#input\_resource\_group) | String describing resource groups to create or reference | `string` | `"Default"` | no | +| [protocol\_subnets\_cidr](#input\_protocol\_subnets\_cidr) | Subnet CIDR block to launch the storage cluster host. | `string` | `"10.10.40.0/24"` | no | | [ssh\_keys](#input\_ssh\_keys) | The key pair to use to access the HPC cluster. | `list(string)` | `null` | no | -| [static\_compute\_instances](#input\_static\_compute\_instances) | Min Number of instances to be launched for compute cluster. |
list(
object({
profile = string
count = number
image = string
})
)
|
[
{
"count": 1,
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "cx2-2x4"
}
]
| no | | [storage\_gui\_password](#input\_storage\_gui\_password) | Password for storage cluster GUI | `string` | `"hpc@IBMCloud"` | no | | [storage\_gui\_username](#input\_storage\_gui\_username) | GUI user to perform system management and monitoring tasks on storage cluster. | `string` | `"admin"` | no | -| [storage\_instances](#input\_storage\_instances) | Number of instances to be launched for storage cluster. |
list(
object({
profile = string
count = number
image = string
filesystem_name = optional(string)
})
)
|
[
{
"count": 2,
"filesystem_name": "fs1",
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "bx2-2x8"
}
]
| no | +| [storage\_instances](#input\_storage\_instances) | Number of instances to be launched for storage cluster. |
list(
object({
profile = string
count = number
image = string
filesystem = optional(string)
})
)
|
[
{
"count": 2,
"filesystem": "fs1",
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "bx2-2x8"
}
]
| no | | [storage\_ssh\_keys](#input\_storage\_ssh\_keys) | The key pair to use to launch the storage cluster host. | `list(string)` | `null` | no | -| [storage\_subnets\_cidr](#input\_storage\_subnets\_cidr) | Subnet CIDR block to launch the storage cluster host. | `list(string)` |
[
"10.10.30.0/24",
"10.20.30.0/24",
"10.30.30.0/24"
]
| no | +| [storage\_subnets\_cidr](#input\_storage\_subnets\_cidr) | Subnet CIDR block to launch the storage cluster host. | `string` | `"10.10.30.0/24"` | no | | [vpc](#input\_vpc) | Name of an existing VPC in which the cluster resources will be deployed. If no value is given, then a new VPC will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc) | `string` | `null` | no | | [vpn\_peer\_address](#input\_vpn\_peer\_address) | The peer public IP address to which the VPN will be connected. | `string` | `null` | no | | [vpn\_peer\_cidr](#input\_vpn\_peer\_cidr) | The peer CIDRs (e.g., 192.168.0.0/24) to which the VPN will be connected. | `list(string)` | `null` | no | | [vpn\_preshared\_key](#input\_vpn\_preshared\_key) | The pre-shared key for the VPN. | `string` | `null` | no | -| [zones](#input\_zones) | Region where VPC will be created. To find your VPC region, use `ibmcloud is regions` command to find available regions. | `list(string)` | n/a | yes | +| [zone](#input\_zone) | Zone where VPC will be created. | `string` | n/a | yes | ## Outputs -No outputs. +| Name | Description | +|------|-------------| +| [scale](#output\_scale) | Scale details | diff --git a/solutions/scale/catalogValidationValues.json.template b/solutions/scale/catalogValidationValues.json.template index a5642a41..829063ab 100644 --- a/solutions/scale/catalogValidationValues.json.template +++ b/solutions/scale/catalogValidationValues.json.template @@ -1,7 +1,7 @@ { "ibmcloud_api_key": $VALIDATION_APIKEY, "prefix": $PREFIX, - "zones": "[\"ca-tor-1\"]", - "resource_group": "geretain-hpc-rg", + "zone": "ca-tor-1", + "existing_resource_group": "geretain-hpc-rg", "ssh_keys": "[\"geretain-hpc-ssh-key\"]" } diff --git a/solutions/scale/datasource.tf b/solutions/scale/datasource.tf deleted file mode 100644 index f6fada1e..00000000 --- a/solutions/scale/datasource.tf +++ /dev/null @@ -1,22 +0,0 @@ -# Future use -/* -data "ibm_is_region" "itself" { - name = local.region -} - -data "ibm_is_zone" "itself" { - name = var.zones[0] - region = data.ibm_is_region.itself.name -} -*/ - -data "ibm_is_vpc" "itself" { - count = var.vpc == null ? 0 : 1 - name = var.vpc -} -/* -data "ibm_is_subnet" "itself" { - count = length(local.subnets) - identifier = local.subnets[count.index]["id"] -} -*/ diff --git a/solutions/scale/locals.tf b/solutions/scale/locals.tf index 7ef13079..e20bee26 100644 --- a/solutions/scale/locals.tf +++ b/solutions/scale/locals.tf @@ -20,7 +20,7 @@ locals { locals { config = { - resource_group = var.resource_group + existing_resource_group = var.existing_resource_group allowed_cidr = var.allowed_cidr deployer_instance_profile = var.deployer_instance_profile ssh_keys = var.ssh_keys @@ -60,7 +60,7 @@ locals { storage_instances = var.storage_instances storage_ssh_keys = var.storage_ssh_keys storage_subnets_cidr = var.storage_subnets_cidr - vpc = var.vpc + vpc_name = var.vpc_name vpn_peer_address = var.vpn_peer_address vpn_peer_cidr = var.vpn_peer_cidr vpn_preshared_key = var.vpn_preshared_key @@ -71,7 +71,7 @@ locals { # Compile Environment for Config output locals { env = { - resource_group = lookup(local.override[local.override_type], "resource_group", local.config.resource_group) + existing_resource_group = lookup(local.override[local.override_type], "existing_resource_group", local.config.existing_resource_group) allowed_cidr = lookup(local.override[local.override_type], "allowed_cidr", local.config.allowed_cidr) ssh_keys = lookup(local.override[local.override_type], "ssh_keys", local.config.ssh_keys) bastion_ssh_keys = lookup(local.override[local.override_type], "bastion_ssh_keys", local.config.bastion_ssh_keys) @@ -110,7 +110,7 @@ locals { storage_instances = lookup(local.override[local.override_type], "storage_instances", local.config.storage_instances) storage_ssh_keys = lookup(local.override[local.override_type], "storage_ssh_keys", local.config.storage_ssh_keys) storage_subnets_cidr = lookup(local.override[local.override_type], "storage_subnets_cidr", local.config.storage_subnets_cidr) - vpc = lookup(local.override[local.override_type], "vpc", local.config.vpc) + vpc_name = lookup(local.override[local.override_type], "vpc_name", local.config.vpc_name) vpn_peer_address = lookup(local.override[local.override_type], "vpn_peer_address", local.config.vpn_peer_address) vpn_peer_cidr = lookup(local.override[local.override_type], "vpn_peer_cidr", local.config.vpn_peer_cidr) vpn_preshared_key = lookup(local.override[local.override_type], "vpn_preshared_key", local.config.vpn_preshared_key) diff --git a/solutions/scale/main.tf b/solutions/scale/main.tf index dbc45c6a..9dbe1125 100644 --- a/solutions/scale/main.tf +++ b/solutions/scale/main.tf @@ -1,16 +1,13 @@ module "scale" { source = "./../.." - scheduler = null ibm_customer_number = var.ibm_customer_number zones = [var.zone] allowed_cidr = var.allowed_cidr prefix = local.env.prefix ssh_keys = local.env.ssh_keys - resource_group = local.env.resource_group + existing_resource_group = local.env.existing_resource_group bastion_ssh_keys = local.env.bastion_ssh_keys bastion_subnets_cidr = [local.env.bastion_subnets_cidr] - compute_gui_password = local.env.compute_gui_password - compute_gui_username = local.env.compute_gui_username compute_ssh_keys = local.env.compute_ssh_keys compute_subnets_cidr = [local.env.compute_subnets_cidr] cos_instance_name = local.env.cos_instance_name @@ -27,23 +24,27 @@ module "scale" { enable_cos_integration = local.env.enable_cos_integration enable_vpc_flow_logs = local.env.enable_vpc_flow_logs enable_vpn = local.env.enable_vpn - hpcs_instance_name = local.env.hpcs_instance_name key_management = local.env.key_management client_instances = local.env.client_instances client_ssh_keys = local.env.client_ssh_keys - client_subnets_cidr = [local.env.client_subnets_cidr] network_cidr = local.env.network_cidr placement_strategy = local.env.placement_strategy protocol_instances = local.env.protocol_instances protocol_subnets_cidr = [local.env.protocol_subnets_cidr] static_compute_instances = local.env.compute_instances - storage_gui_password = local.env.storage_gui_password - storage_gui_username = local.env.storage_gui_username storage_instances = local.env.storage_instances storage_ssh_keys = local.env.storage_ssh_keys storage_subnets_cidr = [local.env.storage_subnets_cidr] - vpc = local.env.vpc + vpc_name = local.env.vpc_name vpn_peer_address = local.env.vpn_peer_address vpn_peer_cidr = local.env.vpn_peer_cidr vpn_preshared_key = local.env.vpn_preshared_key + + # scheduler = null + # compute_gui_password = local.env.compute_gui_password + # compute_gui_username = local.env.compute_gui_username + # hpcs_instance_name = local.env.hpcs_instance_name + # client_subnets_cidr = [local.env.client_subnets_cidr] + # storage_gui_password = local.env.storage_gui_password + # storage_gui_username = local.env.storage_gui_username } diff --git a/solutions/scale/outputs.tf b/solutions/scale/outputs.tf index 0756d967..18baf74d 100644 --- a/solutions/scale/outputs.tf +++ b/solutions/scale/outputs.tf @@ -1,3 +1,4 @@ output "scale" { - value = module.scale + description = "Scale details" + value = module.scale } diff --git a/solutions/scale/override.json b/solutions/scale/override.json index b30d5ab1..c55761d5 100644 --- a/solutions/scale/override.json +++ b/solutions/scale/override.json @@ -1,125 +1,125 @@ { - "prefix":"scale", - "resource_group": "Default", - "vpc": null, - "network_cidr":"10.0.0.0/8", - "placement_strategy":null, - "ssh_keys":null, - "enable_bastion":true, - "enable_deployer":true, - "deployer_instance_profile":"mx2-4x32", + "prefix": "scale", + "existing_resource_group": "Default", + "vpc_name": null, + "network_cidr": "10.0.0.0/8", + "placement_strategy": null, + "ssh_keys": null, + "enable_bastion": true, + "enable_deployer": true, + "deployer_instance_profile": "mx2-4x32", "bastion_ssh_keys": null, - "bastion_subnets_cidr":"10.0.0.0/24", - "enable_vpn":false, - "vpn_peer_cidr":null, - "vpn_peer_address":null, - "vpn_preshared_key":null, - "client_subnets_cidr":"10.10.10.0/24", + "bastion_subnets_cidr": "10.0.0.0/24", + "enable_vpn": false, + "vpn_peer_cidr": null, + "vpn_peer_address": null, + "vpn_preshared_key": null, + "client_subnets_cidr": "10.10.10.0/24", "client_ssh_keys": null, - "client_instances":[ + "client_instances": [ { - "profile":"cx2-2x4", - "count":2, - "image":"ibm-redhat-8-10-minimal-amd64-2" + "profile": "cx2-2x4", + "count": 2, + "image": "ibm-redhat-8-10-minimal-amd64-2" } ], - "compute_subnets_cidr":"10.10.20.0/24", + "compute_subnets_cidr": "10.10.20.0/24", "compute_ssh_keys": null, - "compute_instances":[ + "compute_instances": [ { - "profile":"cx2-2x4", - "count":3, - "image":"ibm-redhat-8-10-minimal-amd64-2" + "profile": "cx2-2x4", + "count": 3, + "image": "ibm-redhat-8-10-minimal-amd64-2" } ], - "compute_gui_username":"admin", + "compute_gui_username": "admin", "compute_gui_password": "hpc@IBMCloud", - "storage_subnets_cidr":"10.10.30.0/24", + "storage_subnets_cidr": "10.10.30.0/24", "storage_ssh_keys": null, - "storage_instances":[ + "storage_instances": [ { - "profile":"cx2-2x4", - "count":10, - "image":"ibm-redhat-8-10-minimal-amd64-2", + "profile": "cx2-2x4", + "count": 10, + "image": "ibm-redhat-8-10-minimal-amd64-2", "filesystem": "fs1" }, { - "profile":"cx2-2x4", - "count":10, - "image":"ibm-redhat-8-10-minimal-amd64-2", + "profile": "cx2-2x4", + "count": 10, + "image": "ibm-redhat-8-10-minimal-amd64-2", "filesystem": "fs2" } ], - "protocol_subnets_cidr":"10.10.40.0/24", - "protocol_instances":[ + "protocol_subnets_cidr": "10.10.40.0/24", + "protocol_instances": [ { - "profile":"cx2-2x4", - "count":2, - "image":"ibm-redhat-8-10-minimal-amd64-2" + "profile": "cx2-2x4", + "count": 2, + "image": "ibm-redhat-8-10-minimal-amd64-2" } ], "colocate_protocol_instances": true, - "storage_gui_username":"admin", + "storage_gui_username": "admin", "storage_gui_password": "hpc@IBMCloud", "filesystem_config": [ { - "filesystem":"fs1", - "block_size":"4M", - "default_data_replica":2, - "default_metadata_replica":2, - "max_data_replica":3, - "max_metadata_replica":3, - "mount_point":"/ibm/fs1" + "filesystem": "fs1", + "block_size": "4M", + "default_data_replica": 2, + "default_metadata_replica": 2, + "max_data_replica": 3, + "max_metadata_replica": 3, + "mount_point": "/ibm/fs1" } ], "filesets_config": [ { - "fileset":"fileset1", - "filesystem":"fs1", - "junction_path":"/ibm/fs1/fileset1", - "client_mount_path":"/mnt", - "quota" :100 + "fileset": "fileset1", + "filesystem": "fs1", + "junction_path": "/ibm/fs1/fileset1", + "client_mount_path": "/mnt", + "quota": 100 }, { - "fileset":"fileset2", - "filesystem":"fs1", - "junction_path":"/ibm/fs1/fileset1", - "client_mount_path":"/mnt", - "quota" :0 + "fileset": "fileset2", + "filesystem": "fs1", + "junction_path": "/ibm/fs1/fileset1", + "client_mount_path": "/mnt", + "quota": 0 } ], - "afm_instances":[ + "afm_instances": [ { - "profile":"cx2-2x4", - "count":2, - "image":"ibm-redhat-8-10-minimal-amd64-2" + "profile": "cx2-2x4", + "count": 2, + "image": "ibm-redhat-8-10-minimal-amd64-2" } ], - "afm_cos_config":[ + "afm_cos_config": [ { - "afm_fileset":"afm_fileset", - "mode":"iw", - "cos_instance":null, - "bucket_name":null, - "bucket_region":"us-south", - "cos_service_cred_key":"", - "bucket_storage_class":"smart", - "bucket_type":"region_location" + "afm_fileset": "afm_fileset", + "mode": "iw", + "cos_instance": null, + "bucket_name": null, + "bucket_region": "us-south", + "cos_service_cred_key": "", + "bucket_storage_class": "smart", + "bucket_type": "region_location" } ], - "nsd_details":null, - "dns_instance_id":null, - "dns_custom_resolver_id":null, - "dns_domain_names":{ - "compute":"comp.com", - "storage":"strg.com", - "protocol":"ces.com" + "nsd_details": null, + "dns_instance_id": null, + "dns_custom_resolver_id": null, + "dns_domain_names": { + "compute": "comp.com", + "storage": "strg.com", + "protocol": "ces.com" }, - "enable_cos_integration":true, - "cos_instance_name":null, - "enable_atracker":true, - "enable_vpc_flow_logs":true, - "key_management":"key_protect", - "hpcs_instance_name":null, + "enable_cos_integration": true, + "cos_instance_name": null, + "enable_atracker": true, + "enable_vpc_flow_logs": true, + "key_management": "key_protect", + "hpcs_instance_name": null, "clusters": null } diff --git a/solutions/scale/variables.tf b/solutions/scale/variables.tf index b197f2d0..3abf0a17 100644 --- a/solutions/scale/variables.tf +++ b/solutions/scale/variables.tf @@ -52,7 +52,7 @@ variable "prefix" { ############################################################################## # Resource Groups Variables ############################################################################## -variable "resource_group" { +variable "existing_resource_group" { type = string default = "Default" description = "String describing resource groups to create or reference" @@ -62,7 +62,7 @@ variable "resource_group" { ############################################################################## # VPC Variables ############################################################################## -variable "vpc" { +variable "vpc_name" { type = string default = null description = "Name of an existing VPC in which the cluster resources will be deployed. If no value is given, then a new VPC will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)" @@ -286,11 +286,11 @@ variable "protocol_instances" { description = "Number of instances to be launched for protocol hosts." } -variable "colocate_protocol_instances" { - type = bool - default = true - description = "Enable it to use storage instances as protocol instances" -} +# variable "colocate_protocol_instances" { +# type = bool +# default = true +# description = "Enable it to use storage instances as protocol instances" +# } variable "storage_gui_username" { type = string @@ -306,85 +306,85 @@ variable "storage_gui_password" { description = "Password for storage cluster GUI" } -variable "filesystem_config" { - type = list(object({ - filesystem = string - block_size = string - default_data_replica = number - default_metadata_replica = number - max_data_replica = number - max_metadata_replica = number - mount_point = string - })) - default = [{ - filesystem = "fs1" - block_size = "4M" - default_data_replica = 2 - default_metadata_replica = 2 - max_data_replica = 3 - max_metadata_replica = 3 - mount_point = "/ibm/fs1" - }] - description = "File system configurations." -} - -variable "filesets_config" { - type = list(object({ - fileset = string - filesystem = string - junction_path = string - client_mount_path = string - quota = number - })) - default = [{ - fileset = "fileset1" - filesystem = "fs1" - junction_path = "/ibm/fs1/fileset1" - client_mount_path = "/mnt" - quota = 100 - }] - description = "Fileset configurations." -} - -variable "afm_instances" { - type = list( - object({ - profile = string - count = number - image = string - }) - ) - default = [{ - profile = "bx2-2x8" - count = 0 - image = "ibm-redhat-8-10-minimal-amd64-2" - }] - description = "Number of instances to be launched for afm hosts." -} - -variable "afm_cos_config" { - type = list(object({ - afm_fileset = string, - mode = string, - cos_instance = string, - bucket_name = string, - bucket_region = string, - cos_service_cred_key = string, - bucket_type = string, - bucket_storage_class = string - })) - default = [{ - afm_fileset = "afm_fileset" - mode = "iw" - cos_instance = null - bucket_name = null - bucket_region = "us-south" - cos_service_cred_key = "" - bucket_storage_class = "smart" - bucket_type = "region_location" - }] - description = "AFM configurations." -} +# variable "filesystem_config" { +# type = list(object({ +# filesystem = string +# block_size = string +# default_data_replica = number +# default_metadata_replica = number +# max_data_replica = number +# max_metadata_replica = number +# mount_point = string +# })) +# default = [{ +# filesystem = "fs1" +# block_size = "4M" +# default_data_replica = 2 +# default_metadata_replica = 2 +# max_data_replica = 3 +# max_metadata_replica = 3 +# mount_point = "/ibm/fs1" +# }] +# description = "File system configurations." +# } + +# variable "filesets_config" { +# type = list(object({ +# fileset = string +# filesystem = string +# junction_path = string +# client_mount_path = string +# quota = number +# })) +# default = [{ +# fileset = "fileset1" +# filesystem = "fs1" +# junction_path = "/ibm/fs1/fileset1" +# client_mount_path = "/mnt" +# quota = 100 +# }] +# description = "Fileset configurations." +# } + +# variable "afm_instances" { +# type = list( +# object({ +# profile = string +# count = number +# image = string +# }) +# ) +# default = [{ +# profile = "bx2-2x8" +# count = 0 +# image = "ibm-redhat-8-10-minimal-amd64-2" +# }] +# description = "Number of instances to be launched for afm hosts." +# } + +# variable "afm_cos_config" { +# type = list(object({ +# afm_fileset = string, +# mode = string, +# cos_instance = string, +# bucket_name = string, +# bucket_region = string, +# cos_service_cred_key = string, +# bucket_type = string, +# bucket_storage_class = string +# })) +# default = [{ +# afm_fileset = "afm_fileset" +# mode = "iw" +# cos_instance = null +# bucket_name = null +# bucket_region = "us-south" +# cos_service_cred_key = "" +# bucket_storage_class = "smart" +# bucket_type = "region_location" +# }] +# description = "AFM configurations." +# } ############################################################################## # DNS Variables diff --git a/solutions/scale/version.tf b/solutions/scale/version.tf index e081bcc1..d465bd59 100644 --- a/solutions/scale/version.tf +++ b/solutions/scale/version.tf @@ -5,10 +5,6 @@ terraform { source = "IBM-Cloud/ibm" version = ">= 1.68.1, < 2.0.0" } - ansible = { - source = "ansible/ansible" - version = "~> 1.3.0" - } } } diff --git a/solutions/slurm/README.md b/solutions/slurm/README.md index 6ee238a5..c024d9b1 100644 --- a/solutions/slurm/README.md +++ b/solutions/slurm/README.md @@ -3,26 +3,21 @@ | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.3 | -| [ansible](#requirement\_ansible) | ~> 1.3.0 | | [ibm](#requirement\_ibm) | >= 1.68.1, < 2.0.0 | ## Providers -| Name | Version | -|------|---------| -| [ibm](#provider\_ibm) | 1.70.1 | +No providers. ## Modules | Name | Source | Version | |------|--------|---------| -| [lsf](#module\_lsf) | ./../.. | n/a | +| [slurm](#module\_slurm) | ./../.. | n/a | ## Resources -| Name | Type | -|------|------| -| [ibm_is_vpc.itself](https://registry.terraform.io/providers/IBM-Cloud/ibm/latest/docs/data-sources/is_vpc) | data source | +No resources. ## Inputs @@ -30,14 +25,14 @@ |------|-------------|------|---------|:--------:| | [allowed\_cidr](#input\_allowed\_cidr) | Network CIDR to access the VPC. This is used to manage network ACL rules for accessing the cluster. | `list(string)` | n/a | yes | | [bastion\_ssh\_keys](#input\_bastion\_ssh\_keys) | The key pair to use to access the bastion host. | `list(string)` | `null` | no | -| [bastion\_subnets\_cidr](#input\_bastion\_subnets\_cidr) | Subnet CIDR block to launch the bastion host. | `list(string)` |
[
"10.0.0.0/24"
]
| no | +| [bastion\_subnets\_cidr](#input\_bastion\_subnets\_cidr) | Subnet CIDR block to launch the bastion host. | `string` | `"10.0.0.0/24"` | no | | [client\_instances](#input\_client\_instances) | Number of instances to be launched for client. |
list(
object({
profile = string
count = number
image = string
})
)
|
[
{
"count": 2,
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "cx2-2x4"
}
]
| no | | [client\_ssh\_keys](#input\_client\_ssh\_keys) | The key pair to use to launch the client host. | `list(string)` | `null` | no | -| [client\_subnets\_cidr](#input\_client\_subnets\_cidr) | Subnet CIDR block to launch the client host. | `list(string)` |
[
"10.10.10.0/24",
"10.20.10.0/24",
"10.30.10.0/24"
]
| no | +| [client\_subnets\_cidr](#input\_client\_subnets\_cidr) | Subnet CIDR block to launch the client host. | `string` | `"10.10.10.0/24"` | no | | [compute\_gui\_password](#input\_compute\_gui\_password) | Password for compute cluster GUI | `string` | `"hpc@IBMCloud"` | no | | [compute\_gui\_username](#input\_compute\_gui\_username) | GUI user to perform system management and monitoring tasks on compute cluster. | `string` | `"admin"` | no | | [compute\_ssh\_keys](#input\_compute\_ssh\_keys) | The key pair to use to launch the compute host. | `list(string)` | `null` | no | -| [compute\_subnets\_cidr](#input\_compute\_subnets\_cidr) | Subnet CIDR block to launch the compute cluster host. | `list(string)` |
[
"10.10.20.0/24",
"10.20.20.0/24",
"10.30.20.0/24"
]
| no | +| [compute\_subnets\_cidr](#input\_compute\_subnets\_cidr) | Subnet CIDR block to launch the compute cluster host. | `string` | `"10.10.20.0/24"` | no | | [cos\_instance\_name](#input\_cos\_instance\_name) | Exiting COS instance name | `string` | `null` | no | | [deployer\_instance\_profile](#input\_deployer\_instance\_profile) | Deployer should be only used for better deployment performance | `string` | `"mx2-4x32"` | no | | [dns\_custom\_resolver\_id](#input\_dns\_custom\_resolver\_id) | IBM Cloud DNS custom resolver id. | `string` | `null` | no | @@ -50,34 +45,35 @@ | [enable\_deployer](#input\_enable\_deployer) | Deployer should be only used for better deployment performance | `bool` | `false` | no | | [enable\_vpc\_flow\_logs](#input\_enable\_vpc\_flow\_logs) | Enable Activity tracker | `bool` | `true` | no | | [enable\_vpn](#input\_enable\_vpn) | The solution supports multiple ways to connect to your HPC cluster for example, using bastion node, via VPN or direct connection. If connecting to the HPC cluster via VPN, set this value to true. | `bool` | `false` | no | +| [existing\_resource\_group](#input\_existing\_resource\_group) | String describing resource groups to create or reference | `string` | `"Default"` | no | | [file\_shares](#input\_file\_shares) | Custom file shares to access shared storage |
list(
object({
mount_path = string,
size = number,
iops = number
})
)
|
[
{
"iops": 1000,
"mount_path": "/mnt/binaries",
"size": 100
},
{
"iops": 1000,
"mount_path": "/mnt/data",
"size": 100
}
]
| no | | [hpcs\_instance\_name](#input\_hpcs\_instance\_name) | Hyper Protect Crypto Service instance | `string` | `null` | no | | [ibm\_customer\_number](#input\_ibm\_customer\_number) | Comma-separated list of the IBM Customer Number(s) (ICN) that is used for the Bring Your Own License (BYOL) entitlement check. For more information on how to find your ICN, see [What is my IBM Customer Number (ICN)?](https://www.ibm.com/support/pages/what-my-ibm-customer-number-icn). | `string` | n/a | yes | | [ibmcloud\_api\_key](#input\_ibmcloud\_api\_key) | IBM Cloud API Key that will be used for authentication in scripts run in this module. Only required if certain options are required. | `string` | n/a | yes | -| [key\_management](#input\_key\_management) | null/key\_protect/hs\_crypto | `string` | `"key_protect"` | no | +| [key\_management](#input\_key\_management) | Set the value as key\_protect to enable customer managed encryption for boot volume and file share. If the key\_management is set as null, IBM Cloud resources will be always be encrypted through provider managed. | `string` | `"key_protect"` | no | | [management\_instances](#input\_management\_instances) | Number of instances to be launched for management. |
list(
object({
profile = string
count = number
image = string
})
)
|
[
{
"count": 2,
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "cx2-2x4"
}
]
| no | | [network\_cidr](#input\_network\_cidr) | Network CIDR for the VPC. This is used to manage network ACL rules for cluster provisioning. | `string` | `"10.0.0.0/8"` | no | -| [nsd\_details](#input\_nsd\_details) | Storage scale NSD details |
list(
object({
profile = string
capacity = optional(number)
iops = optional(number)
})
)
|
[
{
"capacity": 100,
"iops": 1000,
"profile": "custom"
}
]
| no | | [override](#input\_override) | Override default values with custom JSON template. This uses the file `override.json` to allow users to create a fully customized environment. | `bool` | `false` | no | | [override\_json\_string](#input\_override\_json\_string) | Override default values with a JSON object. Any JSON other than an empty string overrides other configuration changes. | `string` | `null` | no | | [placement\_strategy](#input\_placement\_strategy) | VPC placement groups to create (null / host\_spread / power\_spread) | `string` | `null` | no | | [prefix](#input\_prefix) | A unique identifier for resources. Must begin with a letter and end with a letter or number. This prefix will be prepended to any resources provisioned by this template. Prefixes must be 16 or fewer characters. | `string` | `"lsf"` | no | | [protocol\_instances](#input\_protocol\_instances) | Number of instances to be launched for protocol hosts. |
list(
object({
profile = string
count = number
image = string
})
)
|
[
{
"count": 2,
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "bx2-2x8"
}
]
| no | -| [protocol\_subnets\_cidr](#input\_protocol\_subnets\_cidr) | Subnet CIDR block to launch the storage cluster host. | `list(string)` |
[
"10.10.40.0/24",
"10.20.40.0/24",
"10.30.40.0/24"
]
| no | -| [resource\_group](#input\_resource\_group) | String describing resource groups to create or reference | `string` | `"Default"` | no | +| [protocol\_subnets\_cidr](#input\_protocol\_subnets\_cidr) | Subnet CIDR block to launch the storage cluster host. | `string` | `"10.10.40.0/24"` | no | | [ssh\_keys](#input\_ssh\_keys) | The key pair to use to access the HPC cluster. | `list(string)` | `null` | no | | [static\_compute\_instances](#input\_static\_compute\_instances) | Min Number of instances to be launched for compute cluster. |
list(
object({
profile = string
count = number
image = string
})
)
|
[
{
"count": 1,
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "cx2-2x4"
}
]
| no | | [storage\_gui\_password](#input\_storage\_gui\_password) | Password for storage cluster GUI | `string` | `"hpc@IBMCloud"` | no | | [storage\_gui\_username](#input\_storage\_gui\_username) | GUI user to perform system management and monitoring tasks on storage cluster. | `string` | `"admin"` | no | | [storage\_instances](#input\_storage\_instances) | Number of instances to be launched for storage cluster. |
list(
object({
profile = string
count = number
image = string
filesystem_name = optional(string)
})
)
|
[
{
"count": 2,
"filesystem_name": "fs1",
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "bx2-2x8"
}
]
| no | | [storage\_ssh\_keys](#input\_storage\_ssh\_keys) | The key pair to use to launch the storage cluster host. | `list(string)` | `null` | no | -| [storage\_subnets\_cidr](#input\_storage\_subnets\_cidr) | Subnet CIDR block to launch the storage cluster host. | `list(string)` |
[
"10.10.30.0/24",
"10.20.30.0/24",
"10.30.30.0/24"
]
| no | +| [storage\_subnets\_cidr](#input\_storage\_subnets\_cidr) | Subnet CIDR block to launch the storage cluster host. | `string` | `"10.10.30.0/24"` | no | | [vpc](#input\_vpc) | Name of an existing VPC in which the cluster resources will be deployed. If no value is given, then a new VPC will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc) | `string` | `null` | no | | [vpn\_peer\_address](#input\_vpn\_peer\_address) | The peer public IP address to which the VPN will be connected. | `string` | `null` | no | | [vpn\_peer\_cidr](#input\_vpn\_peer\_cidr) | The peer CIDRs (e.g., 192.168.0.0/24) to which the VPN will be connected. | `list(string)` | `null` | no | | [vpn\_preshared\_key](#input\_vpn\_preshared\_key) | The pre-shared key for the VPN. | `string` | `null` | no | -| [zones](#input\_zones) | Region where VPC will be created. To find your VPC region, use `ibmcloud is regions` command to find available regions. | `list(string)` | n/a | yes | +| [zone](#input\_zone) | Zone where VPC will be created. | `string` | n/a | yes | ## Outputs -No outputs. +| Name | Description | +|------|-------------| +| [slurm](#output\_slurm) | Slurm details | diff --git a/solutions/slurm/catalogValidationValues.json.template b/solutions/slurm/catalogValidationValues.json.template index a5642a41..829063ab 100644 --- a/solutions/slurm/catalogValidationValues.json.template +++ b/solutions/slurm/catalogValidationValues.json.template @@ -1,7 +1,7 @@ { "ibmcloud_api_key": $VALIDATION_APIKEY, "prefix": $PREFIX, - "zones": "[\"ca-tor-1\"]", - "resource_group": "geretain-hpc-rg", + "zone": "ca-tor-1", + "existing_resource_group": "geretain-hpc-rg", "ssh_keys": "[\"geretain-hpc-ssh-key\"]" } diff --git a/solutions/slurm/datasource.tf b/solutions/slurm/datasource.tf deleted file mode 100644 index f6fada1e..00000000 --- a/solutions/slurm/datasource.tf +++ /dev/null @@ -1,22 +0,0 @@ -# Future use -/* -data "ibm_is_region" "itself" { - name = local.region -} - -data "ibm_is_zone" "itself" { - name = var.zones[0] - region = data.ibm_is_region.itself.name -} -*/ - -data "ibm_is_vpc" "itself" { - count = var.vpc == null ? 0 : 1 - name = var.vpc -} -/* -data "ibm_is_subnet" "itself" { - count = length(local.subnets) - identifier = local.subnets[count.index]["id"] -} -*/ diff --git a/solutions/slurm/locals.tf b/solutions/slurm/locals.tf index f61d69eb..f86ca6b4 100644 --- a/solutions/slurm/locals.tf +++ b/solutions/slurm/locals.tf @@ -20,7 +20,7 @@ locals { locals { config = { - resource_group = var.resource_group + existing_resource_group = var.existing_resource_group allowed_cidr = var.allowed_cidr deployer_instance_profile = var.deployer_instance_profile ssh_keys = var.ssh_keys @@ -59,7 +59,7 @@ locals { storage_instances = var.storage_instances storage_ssh_keys = var.storage_ssh_keys storage_subnets_cidr = var.storage_subnets_cidr - vpc = var.vpc + vpc_name = var.vpc_name vpn_peer_address = var.vpn_peer_address vpn_peer_cidr = var.vpn_peer_cidr vpn_preshared_key = var.vpn_preshared_key @@ -70,7 +70,7 @@ locals { # Compile Environment for Config output locals { env = { - resource_group = lookup(local.override[local.override_type], "resource_group", local.config.resource_group) + existing_resource_group = lookup(local.override[local.override_type], "existing_resource_group", local.config.existing_resource_group) allowed_cidr = lookup(local.override[local.override_type], "allowed_cidr", local.config.allowed_cidr) deployer_instance_profile = lookup(local.override[local.override_type], "deployer_instance_profile", local.config.deployer_instance_profile) ssh_keys = lookup(local.override[local.override_type], "ssh_keys", local.config.ssh_keys) @@ -109,7 +109,7 @@ locals { storage_instances = lookup(local.override[local.override_type], "storage_instances", local.config.storage_instances) storage_ssh_keys = lookup(local.override[local.override_type], "storage_ssh_keys", local.config.storage_ssh_keys) storage_subnets_cidr = lookup(local.override[local.override_type], "storage_subnets_cidr", local.config.storage_subnets_cidr) - vpc = lookup(local.override[local.override_type], "vpc", local.config.vpc) + vpc_name = lookup(local.override[local.override_type], "vpc_name", local.config.vpc_name) vpn_peer_address = lookup(local.override[local.override_type], "vpn_peer_address", local.config.vpn_peer_address) vpn_peer_cidr = lookup(local.override[local.override_type], "vpn_peer_cidr", local.config.vpn_peer_cidr) vpn_preshared_key = lookup(local.override[local.override_type], "vpn_preshared_key", local.config.vpn_preshared_key) diff --git a/solutions/slurm/main.tf b/solutions/slurm/main.tf index b2d8e2a0..456242f0 100644 --- a/solutions/slurm/main.tf +++ b/solutions/slurm/main.tf @@ -1,17 +1,14 @@ module "slurm" { source = "./../.." - scheduler = "slurm" ibm_customer_number = var.ibm_customer_number zones = [var.zone] allowed_cidr = var.allowed_cidr prefix = local.env.prefix ssh_keys = local.env.ssh_keys - resource_group = local.env.resource_group + existing_resource_group = local.env.existing_resource_group deployer_instance_profile = local.env.deployer_instance_profile bastion_ssh_keys = local.env.bastion_ssh_keys bastion_subnets_cidr = [local.env.bastion_subnets_cidr] - compute_gui_password = local.env.compute_gui_password - compute_gui_username = local.env.compute_gui_username compute_ssh_keys = local.env.compute_ssh_keys compute_subnets_cidr = [local.env.compute_subnets_cidr] cos_instance_name = local.env.cos_instance_name @@ -26,24 +23,28 @@ module "slurm" { enable_vpc_flow_logs = local.env.enable_vpc_flow_logs enable_vpn = local.env.enable_vpn file_shares = local.env.file_shares - hpcs_instance_name = local.env.hpcs_instance_name key_management = local.env.key_management client_instances = local.env.client_instances client_ssh_keys = local.env.client_ssh_keys - client_subnets_cidr = [local.env.client_subnets_cidr] management_instances = local.env.management_instances network_cidr = local.env.network_cidr placement_strategy = local.env.placement_strategy protocol_instances = local.env.protocol_instances protocol_subnets_cidr = [local.env.protocol_subnets_cidr] static_compute_instances = local.env.static_compute_instances - storage_gui_password = local.env.storage_gui_password - storage_gui_username = local.env.storage_gui_username storage_instances = local.env.storage_instances storage_ssh_keys = local.env.storage_ssh_keys storage_subnets_cidr = [local.env.storage_subnets_cidr] - vpc = local.env.vpc + vpc_name = local.env.vpc_name vpn_peer_address = local.env.vpn_peer_address vpn_peer_cidr = local.env.vpn_peer_cidr vpn_preshared_key = local.env.vpn_preshared_key + + # scheduler = "slurm" + # compute_gui_password = local.env.compute_gui_password + # compute_gui_username = local.env.compute_gui_username + # hpcs_instance_name = local.env.hpcs_instance_name + # client_subnets_cidr = [local.env.client_subnets_cidr] + # storage_gui_password = local.env.storage_gui_password + # storage_gui_username = local.env.storage_gui_username } diff --git a/solutions/slurm/outputs.tf b/solutions/slurm/outputs.tf index ced66306..ae50c7ab 100644 --- a/solutions/slurm/outputs.tf +++ b/solutions/slurm/outputs.tf @@ -1,3 +1,4 @@ output "slurm" { - value = module.slurm + description = "Slurm details" + value = module.slurm } diff --git a/solutions/slurm/override.json b/solutions/slurm/override.json index d20fb7fc..548f258f 100644 --- a/solutions/slurm/override.json +++ b/solutions/slurm/override.json @@ -1,95 +1,95 @@ { - "prefix":"slurm", - "resource_group": "Default", - "vpc": null, - "network_cidr":"10.0.0.0/8", - "placement_strategy":null, - "ssh_keys":null, - "enable_bastion":true, - "enable_deployer":false, - "deployer_instance_profile":"mx2-4x32", + "prefix": "slurm", + "existing_resource_group": "Default", + "vpc_name": null, + "network_cidr": "10.0.0.0/8", + "placement_strategy": null, + "ssh_keys": null, + "enable_bastion": true, + "enable_deployer": false, + "deployer_instance_profile": "mx2-4x32", "bastion_ssh_keys": null, - "bastion_subnets_cidr":"10.0.0.0/24", - "enable_vpn":false, - "vpn_peer_cidr":null, - "vpn_peer_address":null, - "vpn_preshared_key":null, - "client_subnets_cidr":"10.10.10.0/24", + "bastion_subnets_cidr": "10.0.0.0/24", + "enable_vpn": false, + "vpn_peer_cidr": null, + "vpn_peer_address": null, + "vpn_preshared_key": null, + "client_subnets_cidr": "10.10.10.0/24", "client_ssh_keys": null, - "client_instances":[ + "client_instances": [ { - "profile":"cx2-2x4", - "count":2, - "image":"ibm-redhat-8-10-minimal-amd64-2" + "profile": "cx2-2x4", + "count": 2, + "image": "ibm-redhat-8-10-minimal-amd64-2" } ], - "compute_subnets_cidr":"10.10.20.0/24", + "compute_subnets_cidr": "10.10.20.0/24", "compute_ssh_keys": null, - "management_instances":[ + "management_instances": [ { - "profile":"cx2-2x4", - "count":3, - "image":"ibm-redhat-8-10-minimal-amd64-2" + "profile": "cx2-2x4", + "count": 3, + "image": "ibm-redhat-8-10-minimal-amd64-2" } ], - "static_compute_instances":[ + "static_compute_instances": [ { - "profile":"cx2-2x4", - "count":0, - "image":"ibm-redhat-8-10-minimal-amd64-2" + "profile": "cx2-2x4", + "count": 0, + "image": "ibm-redhat-8-10-minimal-amd64-2" } ], - "dynamic_compute_instances":[ + "dynamic_compute_instances": [ { - "profile":"cx2-2x4", - "count":5000, - "image":"ibm-redhat-8-10-minimal-amd64-2" + "profile": "cx2-2x4", + "count": 5000, + "image": "ibm-redhat-8-10-minimal-amd64-2" } ], - "compute_gui_username":"admin", + "compute_gui_username": "admin", "compute_gui_password": "hpc@IBMCloud", - "storage_subnets_cidr":"10.10.30.0/24", + "storage_subnets_cidr": "10.10.30.0/24", "storage_ssh_keys": null, - "storage_instances":[ + "storage_instances": [ { - "profile":"cx2-2x4", - "count":2, - "image":"ibm-redhat-8-10-minimal-amd64-2" + "profile": "cx2-2x4", + "count": 2, + "image": "ibm-redhat-8-10-minimal-amd64-2" } ], - "protocol_subnets_cidr":"10.10.40.0/24", - "protocol_instances":[ + "protocol_subnets_cidr": "10.10.40.0/24", + "protocol_instances": [ { - "profile":"cx2-2x4", - "count":2, - "image":"ibm-redhat-8-10-minimal-amd64-2" + "profile": "cx2-2x4", + "count": 2, + "image": "ibm-redhat-8-10-minimal-amd64-2" } ], - "storage_gui_username":"admin", + "storage_gui_username": "admin", "storage_gui_password": "hpc@IBMCloud", - "file_shares":[ + "file_shares": [ { - "mount_path":"/mnt/binaries", - "size":100, - "iops":1000 + "mount_path": "/mnt/binaries", + "size": 100, + "iops": 1000 }, { - "mount_path":"/mnt/data", - "size":100, - "iops":1000 + "mount_path": "/mnt/data", + "size": 100, + "iops": 1000 } ], - "dns_instance_id":null, - "dns_custom_resolver_id":null, - "dns_domain_names":{ - "compute":"comp.com", - "storage":"strg.com", - "protocol":"ces.com" + "dns_instance_id": null, + "dns_custom_resolver_id": null, + "dns_domain_names": { + "compute": "comp.com", + "storage": "strg.com", + "protocol": "ces.com" }, - "enable_cos_integration":true, - "cos_instance_name":null, - "enable_atracker":true, - "enable_vpc_flow_logs":true, - "key_management":"key_protect", - "hpcs_instance_name":null + "enable_cos_integration": true, + "cos_instance_name": null, + "enable_atracker": true, + "enable_vpc_flow_logs": true, + "key_management": "key_protect", + "hpcs_instance_name": null } diff --git a/solutions/slurm/variables.tf b/solutions/slurm/variables.tf index f7fe9167..a7a040c5 100644 --- a/solutions/slurm/variables.tf +++ b/solutions/slurm/variables.tf @@ -52,7 +52,7 @@ variable "prefix" { ############################################################################## # Resource Groups Variables ############################################################################## -variable "resource_group" { +variable "existing_resource_group" { type = string default = "Default" description = "String describing resource groups to create or reference" @@ -62,7 +62,7 @@ variable "resource_group" { ############################################################################## # VPC Variables ############################################################################## -variable "vpc" { +variable "vpc_name" { type = string default = null description = "Name of an existing VPC in which the cluster resources will be deployed. If no value is given, then a new VPC will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)" diff --git a/solutions/slurm/version.tf b/solutions/slurm/version.tf index e081bcc1..d465bd59 100644 --- a/solutions/slurm/version.tf +++ b/solutions/slurm/version.tf @@ -5,10 +5,6 @@ terraform { source = "IBM-Cloud/ibm" version = ">= 1.68.1, < 2.0.0" } - ansible = { - source = "ansible/ansible" - version = "~> 1.3.0" - } } } diff --git a/solutions/symphony/README.md b/solutions/symphony/README.md index 6ee238a5..5628cd7c 100644 --- a/solutions/symphony/README.md +++ b/solutions/symphony/README.md @@ -3,26 +3,21 @@ | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 1.3 | -| [ansible](#requirement\_ansible) | ~> 1.3.0 | | [ibm](#requirement\_ibm) | >= 1.68.1, < 2.0.0 | ## Providers -| Name | Version | -|------|---------| -| [ibm](#provider\_ibm) | 1.70.1 | +No providers. ## Modules | Name | Source | Version | |------|--------|---------| -| [lsf](#module\_lsf) | ./../.. | n/a | +| [symphony](#module\_symphony) | ./../.. | n/a | ## Resources -| Name | Type | -|------|------| -| [ibm_is_vpc.itself](https://registry.terraform.io/providers/IBM-Cloud/ibm/latest/docs/data-sources/is_vpc) | data source | +No resources. ## Inputs @@ -30,14 +25,14 @@ |------|-------------|------|---------|:--------:| | [allowed\_cidr](#input\_allowed\_cidr) | Network CIDR to access the VPC. This is used to manage network ACL rules for accessing the cluster. | `list(string)` | n/a | yes | | [bastion\_ssh\_keys](#input\_bastion\_ssh\_keys) | The key pair to use to access the bastion host. | `list(string)` | `null` | no | -| [bastion\_subnets\_cidr](#input\_bastion\_subnets\_cidr) | Subnet CIDR block to launch the bastion host. | `list(string)` |
[
"10.0.0.0/24"
]
| no | +| [bastion\_subnets\_cidr](#input\_bastion\_subnets\_cidr) | Subnet CIDR block to launch the bastion host. | `string` | `"10.0.0.0/24"` | no | | [client\_instances](#input\_client\_instances) | Number of instances to be launched for client. |
list(
object({
profile = string
count = number
image = string
})
)
|
[
{
"count": 2,
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "cx2-2x4"
}
]
| no | | [client\_ssh\_keys](#input\_client\_ssh\_keys) | The key pair to use to launch the client host. | `list(string)` | `null` | no | -| [client\_subnets\_cidr](#input\_client\_subnets\_cidr) | Subnet CIDR block to launch the client host. | `list(string)` |
[
"10.10.10.0/24",
"10.20.10.0/24",
"10.30.10.0/24"
]
| no | +| [client\_subnets\_cidr](#input\_client\_subnets\_cidr) | Subnet CIDR block to launch the client host. | `string` | `"10.10.10.0/24"` | no | | [compute\_gui\_password](#input\_compute\_gui\_password) | Password for compute cluster GUI | `string` | `"hpc@IBMCloud"` | no | | [compute\_gui\_username](#input\_compute\_gui\_username) | GUI user to perform system management and monitoring tasks on compute cluster. | `string` | `"admin"` | no | | [compute\_ssh\_keys](#input\_compute\_ssh\_keys) | The key pair to use to launch the compute host. | `list(string)` | `null` | no | -| [compute\_subnets\_cidr](#input\_compute\_subnets\_cidr) | Subnet CIDR block to launch the compute cluster host. | `list(string)` |
[
"10.10.20.0/24",
"10.20.20.0/24",
"10.30.20.0/24"
]
| no | +| [compute\_subnets\_cidr](#input\_compute\_subnets\_cidr) | Subnet CIDR block to launch the compute cluster host. | `string` | `"10.10.20.0/24"` | no | | [cos\_instance\_name](#input\_cos\_instance\_name) | Exiting COS instance name | `string` | `null` | no | | [deployer\_instance\_profile](#input\_deployer\_instance\_profile) | Deployer should be only used for better deployment performance | `string` | `"mx2-4x32"` | no | | [dns\_custom\_resolver\_id](#input\_dns\_custom\_resolver\_id) | IBM Cloud DNS custom resolver id. | `string` | `null` | no | @@ -50,34 +45,35 @@ | [enable\_deployer](#input\_enable\_deployer) | Deployer should be only used for better deployment performance | `bool` | `false` | no | | [enable\_vpc\_flow\_logs](#input\_enable\_vpc\_flow\_logs) | Enable Activity tracker | `bool` | `true` | no | | [enable\_vpn](#input\_enable\_vpn) | The solution supports multiple ways to connect to your HPC cluster for example, using bastion node, via VPN or direct connection. If connecting to the HPC cluster via VPN, set this value to true. | `bool` | `false` | no | +| [existing\_resource\_group](#input\_existing\_resource\_group) | String describing resource groups to create or reference | `string` | `"Default"` | no | | [file\_shares](#input\_file\_shares) | Custom file shares to access shared storage |
list(
object({
mount_path = string,
size = number,
iops = number
})
)
|
[
{
"iops": 1000,
"mount_path": "/mnt/binaries",
"size": 100
},
{
"iops": 1000,
"mount_path": "/mnt/data",
"size": 100
}
]
| no | | [hpcs\_instance\_name](#input\_hpcs\_instance\_name) | Hyper Protect Crypto Service instance | `string` | `null` | no | | [ibm\_customer\_number](#input\_ibm\_customer\_number) | Comma-separated list of the IBM Customer Number(s) (ICN) that is used for the Bring Your Own License (BYOL) entitlement check. For more information on how to find your ICN, see [What is my IBM Customer Number (ICN)?](https://www.ibm.com/support/pages/what-my-ibm-customer-number-icn). | `string` | n/a | yes | | [ibmcloud\_api\_key](#input\_ibmcloud\_api\_key) | IBM Cloud API Key that will be used for authentication in scripts run in this module. Only required if certain options are required. | `string` | n/a | yes | -| [key\_management](#input\_key\_management) | null/key\_protect/hs\_crypto | `string` | `"key_protect"` | no | +| [key\_management](#input\_key\_management) | Set the value as key\_protect to enable customer managed encryption for boot volume and file share. If the key\_management is set as null, IBM Cloud resources will be always be encrypted through provider managed. | `string` | `"key_protect"` | no | | [management\_instances](#input\_management\_instances) | Number of instances to be launched for management. |
list(
object({
profile = string
count = number
image = string
})
)
|
[
{
"count": 2,
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "cx2-2x4"
}
]
| no | | [network\_cidr](#input\_network\_cidr) | Network CIDR for the VPC. This is used to manage network ACL rules for cluster provisioning. | `string` | `"10.0.0.0/8"` | no | -| [nsd\_details](#input\_nsd\_details) | Storage scale NSD details |
list(
object({
profile = string
capacity = optional(number)
iops = optional(number)
})
)
|
[
{
"capacity": 100,
"iops": 1000,
"profile": "custom"
}
]
| no | | [override](#input\_override) | Override default values with custom JSON template. This uses the file `override.json` to allow users to create a fully customized environment. | `bool` | `false` | no | | [override\_json\_string](#input\_override\_json\_string) | Override default values with a JSON object. Any JSON other than an empty string overrides other configuration changes. | `string` | `null` | no | | [placement\_strategy](#input\_placement\_strategy) | VPC placement groups to create (null / host\_spread / power\_spread) | `string` | `null` | no | | [prefix](#input\_prefix) | A unique identifier for resources. Must begin with a letter and end with a letter or number. This prefix will be prepended to any resources provisioned by this template. Prefixes must be 16 or fewer characters. | `string` | `"lsf"` | no | | [protocol\_instances](#input\_protocol\_instances) | Number of instances to be launched for protocol hosts. |
list(
object({
profile = string
count = number
image = string
})
)
|
[
{
"count": 2,
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "bx2-2x8"
}
]
| no | -| [protocol\_subnets\_cidr](#input\_protocol\_subnets\_cidr) | Subnet CIDR block to launch the storage cluster host. | `list(string)` |
[
"10.10.40.0/24",
"10.20.40.0/24",
"10.30.40.0/24"
]
| no | -| [resource\_group](#input\_resource\_group) | String describing resource groups to create or reference | `string` | `"Default"` | no | +| [protocol\_subnets\_cidr](#input\_protocol\_subnets\_cidr) | Subnet CIDR block to launch the storage cluster host. | `string` | `"10.10.40.0/24"` | no | | [ssh\_keys](#input\_ssh\_keys) | The key pair to use to access the HPC cluster. | `list(string)` | `null` | no | | [static\_compute\_instances](#input\_static\_compute\_instances) | Min Number of instances to be launched for compute cluster. |
list(
object({
profile = string
count = number
image = string
})
)
|
[
{
"count": 1,
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "cx2-2x4"
}
]
| no | | [storage\_gui\_password](#input\_storage\_gui\_password) | Password for storage cluster GUI | `string` | `"hpc@IBMCloud"` | no | | [storage\_gui\_username](#input\_storage\_gui\_username) | GUI user to perform system management and monitoring tasks on storage cluster. | `string` | `"admin"` | no | | [storage\_instances](#input\_storage\_instances) | Number of instances to be launched for storage cluster. |
list(
object({
profile = string
count = number
image = string
filesystem_name = optional(string)
})
)
|
[
{
"count": 2,
"filesystem_name": "fs1",
"image": "ibm-redhat-8-10-minimal-amd64-2",
"profile": "bx2-2x8"
}
]
| no | | [storage\_ssh\_keys](#input\_storage\_ssh\_keys) | The key pair to use to launch the storage cluster host. | `list(string)` | `null` | no | -| [storage\_subnets\_cidr](#input\_storage\_subnets\_cidr) | Subnet CIDR block to launch the storage cluster host. | `list(string)` |
[
"10.10.30.0/24",
"10.20.30.0/24",
"10.30.30.0/24"
]
| no | +| [storage\_subnets\_cidr](#input\_storage\_subnets\_cidr) | Subnet CIDR block to launch the storage cluster host. | `string` | `"10.10.30.0/24"` | no | | [vpc](#input\_vpc) | Name of an existing VPC in which the cluster resources will be deployed. If no value is given, then a new VPC will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc) | `string` | `null` | no | | [vpn\_peer\_address](#input\_vpn\_peer\_address) | The peer public IP address to which the VPN will be connected. | `string` | `null` | no | | [vpn\_peer\_cidr](#input\_vpn\_peer\_cidr) | The peer CIDRs (e.g., 192.168.0.0/24) to which the VPN will be connected. | `list(string)` | `null` | no | | [vpn\_preshared\_key](#input\_vpn\_preshared\_key) | The pre-shared key for the VPN. | `string` | `null` | no | -| [zones](#input\_zones) | Region where VPC will be created. To find your VPC region, use `ibmcloud is regions` command to find available regions. | `list(string)` | n/a | yes | +| [zone](#input\_zone) | Zone where VPC will be created. | `string` | n/a | yes | ## Outputs -No outputs. +| Name | Description | +|------|-------------| +| [symphony](#output\_symphony) | Symphony details | diff --git a/solutions/symphony/catalogValidationValues.json.template b/solutions/symphony/catalogValidationValues.json.template index a5642a41..829063ab 100644 --- a/solutions/symphony/catalogValidationValues.json.template +++ b/solutions/symphony/catalogValidationValues.json.template @@ -1,7 +1,7 @@ { "ibmcloud_api_key": $VALIDATION_APIKEY, "prefix": $PREFIX, - "zones": "[\"ca-tor-1\"]", - "resource_group": "geretain-hpc-rg", + "zone": "ca-tor-1", + "existing_resource_group": "geretain-hpc-rg", "ssh_keys": "[\"geretain-hpc-ssh-key\"]" } diff --git a/solutions/symphony/datasource.tf b/solutions/symphony/datasource.tf deleted file mode 100644 index f6fada1e..00000000 --- a/solutions/symphony/datasource.tf +++ /dev/null @@ -1,22 +0,0 @@ -# Future use -/* -data "ibm_is_region" "itself" { - name = local.region -} - -data "ibm_is_zone" "itself" { - name = var.zones[0] - region = data.ibm_is_region.itself.name -} -*/ - -data "ibm_is_vpc" "itself" { - count = var.vpc == null ? 0 : 1 - name = var.vpc -} -/* -data "ibm_is_subnet" "itself" { - count = length(local.subnets) - identifier = local.subnets[count.index]["id"] -} -*/ diff --git a/solutions/symphony/locals.tf b/solutions/symphony/locals.tf index f61d69eb..f86ca6b4 100644 --- a/solutions/symphony/locals.tf +++ b/solutions/symphony/locals.tf @@ -20,7 +20,7 @@ locals { locals { config = { - resource_group = var.resource_group + existing_resource_group = var.existing_resource_group allowed_cidr = var.allowed_cidr deployer_instance_profile = var.deployer_instance_profile ssh_keys = var.ssh_keys @@ -59,7 +59,7 @@ locals { storage_instances = var.storage_instances storage_ssh_keys = var.storage_ssh_keys storage_subnets_cidr = var.storage_subnets_cidr - vpc = var.vpc + vpc_name = var.vpc_name vpn_peer_address = var.vpn_peer_address vpn_peer_cidr = var.vpn_peer_cidr vpn_preshared_key = var.vpn_preshared_key @@ -70,7 +70,7 @@ locals { # Compile Environment for Config output locals { env = { - resource_group = lookup(local.override[local.override_type], "resource_group", local.config.resource_group) + existing_resource_group = lookup(local.override[local.override_type], "existing_resource_group", local.config.existing_resource_group) allowed_cidr = lookup(local.override[local.override_type], "allowed_cidr", local.config.allowed_cidr) deployer_instance_profile = lookup(local.override[local.override_type], "deployer_instance_profile", local.config.deployer_instance_profile) ssh_keys = lookup(local.override[local.override_type], "ssh_keys", local.config.ssh_keys) @@ -109,7 +109,7 @@ locals { storage_instances = lookup(local.override[local.override_type], "storage_instances", local.config.storage_instances) storage_ssh_keys = lookup(local.override[local.override_type], "storage_ssh_keys", local.config.storage_ssh_keys) storage_subnets_cidr = lookup(local.override[local.override_type], "storage_subnets_cidr", local.config.storage_subnets_cidr) - vpc = lookup(local.override[local.override_type], "vpc", local.config.vpc) + vpc_name = lookup(local.override[local.override_type], "vpc_name", local.config.vpc_name) vpn_peer_address = lookup(local.override[local.override_type], "vpn_peer_address", local.config.vpn_peer_address) vpn_peer_cidr = lookup(local.override[local.override_type], "vpn_peer_cidr", local.config.vpn_peer_cidr) vpn_preshared_key = lookup(local.override[local.override_type], "vpn_preshared_key", local.config.vpn_preshared_key) diff --git a/solutions/symphony/main.tf b/solutions/symphony/main.tf index d2ba6cea..a072ebc6 100644 --- a/solutions/symphony/main.tf +++ b/solutions/symphony/main.tf @@ -1,17 +1,14 @@ module "symphony" { source = "./../.." - scheduler = "symphony" ibm_customer_number = var.ibm_customer_number zones = [var.zone] allowed_cidr = var.allowed_cidr prefix = local.env.prefix ssh_keys = local.env.ssh_keys - resource_group = local.env.resource_group + existing_resource_group = local.env.existing_resource_group deployer_instance_profile = local.env.deployer_instance_profile bastion_ssh_keys = local.env.bastion_ssh_keys bastion_subnets_cidr = [local.env.bastion_subnets_cidr] - compute_gui_password = local.env.compute_gui_password - compute_gui_username = local.env.compute_gui_username compute_ssh_keys = local.env.compute_ssh_keys compute_subnets_cidr = [local.env.compute_subnets_cidr] cos_instance_name = local.env.cos_instance_name @@ -26,24 +23,28 @@ module "symphony" { enable_vpc_flow_logs = local.env.enable_vpc_flow_logs enable_vpn = local.env.enable_vpn file_shares = local.env.file_shares - hpcs_instance_name = local.env.hpcs_instance_name key_management = local.env.key_management client_instances = local.env.client_instances client_ssh_keys = local.env.client_ssh_keys - client_subnets_cidr = [local.env.client_subnets_cidr] management_instances = local.env.management_instances network_cidr = local.env.network_cidr placement_strategy = local.env.placement_strategy protocol_instances = local.env.protocol_instances protocol_subnets_cidr = [local.env.protocol_subnets_cidr] static_compute_instances = local.env.static_compute_instances - storage_gui_password = local.env.storage_gui_password - storage_gui_username = local.env.storage_gui_username storage_instances = local.env.storage_instances storage_ssh_keys = local.env.storage_ssh_keys storage_subnets_cidr = [local.env.storage_subnets_cidr] - vpc = local.env.vpc + vpc_name = local.env.vpc_name vpn_peer_address = local.env.vpn_peer_address vpn_peer_cidr = local.env.vpn_peer_cidr vpn_preshared_key = local.env.vpn_preshared_key + + # scheduler = "symphony" + # compute_gui_password = local.env.compute_gui_password + # compute_gui_username = local.env.compute_gui_username + # hpcs_instance_name = local.env.hpcs_instance_name + # client_subnets_cidr = [local.env.client_subnets_cidr] + # storage_gui_password = local.env.storage_gui_password + # storage_gui_username = local.env.storage_gui_username } diff --git a/solutions/symphony/outputs.tf b/solutions/symphony/outputs.tf index de3bde19..7affaab0 100644 --- a/solutions/symphony/outputs.tf +++ b/solutions/symphony/outputs.tf @@ -1,3 +1,4 @@ output "symphony" { - value = module.symphony + description = "Symphony details" + value = module.symphony } diff --git a/solutions/symphony/override.json b/solutions/symphony/override.json index 585d1a8e..78c15f67 100644 --- a/solutions/symphony/override.json +++ b/solutions/symphony/override.json @@ -1,95 +1,95 @@ { - "prefix":"symphony", - "resource_group": "Default", - "vpc": null, - "network_cidr":"10.0.0.0/8", - "placement_strategy":null, - "ssh_keys":null, - "enable_bastion":true, - "enable_deployer":false, - "deployer_instance_profile":"mx2-4x32", + "prefix": "symphony", + "existing_resource_group": "Default", + "vpc_name": null, + "network_cidr": "10.0.0.0/8", + "placement_strategy": null, + "ssh_keys": null, + "enable_bastion": true, + "enable_deployer": false, + "deployer_instance_profile": "mx2-4x32", "bastion_ssh_keys": null, - "bastion_subnets_cidr":"10.0.0.0/24", - "enable_vpn":false, - "vpn_peer_cidr":null, - "vpn_peer_address":null, - "vpn_preshared_key":null, - "client_subnets_cidr":"10.10.10.0/24", + "bastion_subnets_cidr": "10.0.0.0/24", + "enable_vpn": false, + "vpn_peer_cidr": null, + "vpn_peer_address": null, + "vpn_preshared_key": null, + "client_subnets_cidr": "10.10.10.0/24", "client_ssh_keys": null, - "client_instances":[ + "client_instances": [ { - "profile":"cx2-2x4", - "count":2, - "image":"ibm-redhat-8-10-minimal-amd64-2" + "profile": "cx2-2x4", + "count": 2, + "image": "ibm-redhat-8-10-minimal-amd64-2" } ], - "compute_subnets_cidr":"10.10.20.0/24", + "compute_subnets_cidr": "10.10.20.0/24", "compute_ssh_keys": null, - "management_instances":[ + "management_instances": [ { - "profile":"cx2-2x4", - "count":3, - "image":"ibm-redhat-8-10-minimal-amd64-2" + "profile": "cx2-2x4", + "count": 3, + "image": "ibm-redhat-8-10-minimal-amd64-2" } ], - "static_compute_instances":[ + "static_compute_instances": [ { - "profile":"cx2-2x4", - "count":0, - "image":"ibm-redhat-8-10-minimal-amd64-2" + "profile": "cx2-2x4", + "count": 0, + "image": "ibm-redhat-8-10-minimal-amd64-2" } ], - "dynamic_compute_instances":[ + "dynamic_compute_instances": [ { - "profile":"cx2-2x4", - "count":5000, - "image":"ibm-redhat-8-10-minimal-amd64-2" + "profile": "cx2-2x4", + "count": 5000, + "image": "ibm-redhat-8-10-minimal-amd64-2" } ], - "compute_gui_username":"admin", + "compute_gui_username": "admin", "compute_gui_password": "hpc@IBMCloud", - "storage_subnets_cidr":"10.10.30.0/24", + "storage_subnets_cidr": "10.10.30.0/24", "storage_ssh_keys": null, - "storage_instances":[ + "storage_instances": [ { - "profile":"cx2-2x4", - "count":2, - "image":"ibm-redhat-8-10-minimal-amd64-2" + "profile": "cx2-2x4", + "count": 2, + "image": "ibm-redhat-8-10-minimal-amd64-2" } ], - "protocol_subnets_cidr":"10.10.40.0/24", - "protocol_instances":[ + "protocol_subnets_cidr": "10.10.40.0/24", + "protocol_instances": [ { - "profile":"cx2-2x4", - "count":2, - "image":"ibm-redhat-8-10-minimal-amd64-2" + "profile": "cx2-2x4", + "count": 2, + "image": "ibm-redhat-8-10-minimal-amd64-2" } ], - "storage_gui_username":"admin", + "storage_gui_username": "admin", "storage_gui_password": "hpc@IBMCloud", - "file_shares":[ + "file_shares": [ { - "mount_path":"/mnt/binaries", - "size":100, - "iops":1000 + "mount_path": "/mnt/binaries", + "size": 100, + "iops": 1000 }, { - "mount_path":"/mnt/data", - "size":100, - "iops":1000 + "mount_path": "/mnt/data", + "size": 100, + "iops": 1000 } ], - "dns_instance_id":null, - "dns_custom_resolver_id":null, - "dns_domain_names":{ - "compute":"comp.com", - "storage":"strg.com", - "protocol":"ces.com" + "dns_instance_id": null, + "dns_custom_resolver_id": null, + "dns_domain_names": { + "compute": "comp.com", + "storage": "strg.com", + "protocol": "ces.com" }, - "enable_cos_integration":true, - "cos_instance_name":null, - "enable_atracker":true, - "enable_vpc_flow_logs":true, - "key_management":"key_protect", - "hpcs_instance_name":null + "enable_cos_integration": true, + "cos_instance_name": null, + "enable_atracker": true, + "enable_vpc_flow_logs": true, + "key_management": "key_protect", + "hpcs_instance_name": null } diff --git a/solutions/symphony/variables.tf b/solutions/symphony/variables.tf index f7fe9167..a7a040c5 100644 --- a/solutions/symphony/variables.tf +++ b/solutions/symphony/variables.tf @@ -52,7 +52,7 @@ variable "prefix" { ############################################################################## # Resource Groups Variables ############################################################################## -variable "resource_group" { +variable "existing_resource_group" { type = string default = "Default" description = "String describing resource groups to create or reference" @@ -62,7 +62,7 @@ variable "resource_group" { ############################################################################## # VPC Variables ############################################################################## -variable "vpc" { +variable "vpc_name" { type = string default = null description = "Name of an existing VPC in which the cluster resources will be deployed. If no value is given, then a new VPC will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)" diff --git a/solutions/symphony/version.tf b/solutions/symphony/version.tf index e081bcc1..d465bd59 100644 --- a/solutions/symphony/version.tf +++ b/solutions/symphony/version.tf @@ -5,10 +5,6 @@ terraform { source = "IBM-Cloud/ibm" version = ">= 1.68.1, < 2.0.0" } - ansible = { - source = "ansible/ansible" - version = "~> 1.3.0" - } } } diff --git a/variables.tf b/variables.tf index 2a3d7147..1dae47f3 100644 --- a/variables.tf +++ b/variables.tf @@ -11,11 +11,11 @@ variable "ibmcloud_api_key" { ############################################################################## # Offering Variations ############################################################################## -variable "scheduler" { - type = string - default = null - description = "Select one of the scheduler (LSF/Symphony/Slurm/null)" -} +# variable "scheduler" { +# type = string +# default = null +# description = "Select one of the scheduler (LSF/Symphony/Slurm/null)" +# } variable "ibm_customer_number" { type = string @@ -55,7 +55,7 @@ variable "allowed_cidr" { description = "Network CIDR to access the VPC. This is used to manage network ACL rules for accessing the cluster." } -variable "resource_group" { +variable "existing_resource_group" { type = string default = "Default" description = "String describing resource groups to create or reference" @@ -65,7 +65,7 @@ variable "resource_group" { ############################################################################## # VPC Variables ############################################################################## -variable "vpc" { +variable "vpc_name" { type = string default = null description = "Name of an existing VPC in which the cluster resources will be deployed. If no value is given, then a new VPC will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)" @@ -177,11 +177,11 @@ variable "client_subnets" { description = "Name of an existing subnets in which the cluster resources will be deployed. If no value is given, then new subnet(s) will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)" } -variable "client_subnets_cidr" { - type = list(string) - default = ["10.10.10.0/24", "10.20.10.0/24", "10.30.10.0/24"] - description = "Subnet CIDR block to launch the client host." -} +# variable "client_subnets_cidr" { +# type = list(string) +# default = ["10.10.10.0/24", "10.20.10.0/24", "10.30.10.0/24"] +# description = "Subnet CIDR block to launch the client host." +# } variable "client_ssh_keys" { type = list(string) @@ -271,19 +271,19 @@ variable "dynamic_compute_instances" { description = "MaxNumber of instances to be launched for compute cluster." } -variable "compute_gui_username" { - type = string - default = "admin" - sensitive = true - description = "GUI user to perform system management and monitoring tasks on compute cluster." -} +# variable "compute_gui_username" { +# type = string +# default = "admin" +# sensitive = true +# description = "GUI user to perform system management and monitoring tasks on compute cluster." +# } -variable "compute_gui_password" { - type = string - default = "hpc@IBMCloud" - sensitive = true - description = "Password for compute cluster GUI" -} +# variable "compute_gui_password" { +# type = string +# default = "hpc@IBMCloud" +# sensitive = true +# description = "Password for compute cluster GUI" +# } ############################################################################## # Storage Variables @@ -352,25 +352,25 @@ variable "protocol_instances" { description = "Number of instances to be launched for protocol hosts." } -variable "colocate_protocol_instances" { - type = bool - default = true - description = "Enable it to use storage instances as protocol instances" -} +# variable "colocate_protocol_instances" { +# type = bool +# default = true +# description = "Enable it to use storage instances as protocol instances" +# } -variable "storage_gui_username" { - type = string - default = "admin" - sensitive = true - description = "GUI user to perform system management and monitoring tasks on storage cluster." -} +# variable "storage_gui_username" { +# type = string +# default = "admin" +# sensitive = true +# description = "GUI user to perform system management and monitoring tasks on storage cluster." +# } -variable "storage_gui_password" { - type = string - default = "hpc@IBMCloud" - sensitive = true - description = "Password for storage cluster GUI" -} +# variable "storage_gui_password" { +# type = string +# default = "hpc@IBMCloud" +# sensitive = true +# description = "Password for storage cluster GUI" +# } variable "nsd_details" { type = list( @@ -458,11 +458,11 @@ variable "kms_key_name" { description = "Provide the existing kms key name that you want to use for the IBM Cloud HPC cluster. Note: kms_key_name to be considered only if key_management value is set as key_protect.(for example kms_key_name: my-encryption-key)." } -variable "hpcs_instance_name" { - type = string - default = null - description = "Hyper Protect Crypto Service instance" -} +# variable "hpcs_instance_name" { +# type = string +# default = null +# description = "Hyper Protect Crypto Service instance" +# } variable "skip_flowlogs_s2s_auth_policy" { type = bool @@ -506,123 +506,123 @@ variable "enable_vpc_flow_logs" { ############################################################################## # Scale specific Variables ############################################################################## -variable "filesystem_config" { - type = list( - object({ - filesystem = string - block_size = string - default_data_replica = number - default_metadata_replica = number - max_data_replica = number - max_metadata_replica = number - mount_point = string - }) - ) - default = null - description = "File system configurations." -} +# variable "filesystem_config" { +# type = list( +# object({ +# filesystem = string +# block_size = string +# default_data_replica = number +# default_metadata_replica = number +# max_data_replica = number +# max_metadata_replica = number +# mount_point = string +# }) +# ) +# default = null +# description = "File system configurations." +# } -variable "filesets_config" { - type = list( - object({ - fileset = string - filesystem = string - junction_path = string - client_mount_path = string - quota = number - }) - ) - default = null - description = "Fileset configurations." -} +# variable "filesets_config" { +# type = list( +# object({ +# fileset = string +# filesystem = string +# junction_path = string +# client_mount_path = string +# quota = number +# }) +# ) +# default = null +# description = "Fileset configurations." +# } -variable "afm_instances" { - type = list( - object({ - profile = string - count = number - image = string - }) - ) - default = [{ - profile = "bx2-2x8" - count = 0 - image = "ibm-redhat-8-10-minimal-amd64-2" - }] - description = "Number of instances to be launched for afm hosts." -} +# variable "afm_instances" { +# type = list( +# object({ +# profile = string +# count = number +# image = string +# }) +# ) +# default = [{ +# profile = "bx2-2x8" +# count = 0 +# image = "ibm-redhat-8-10-minimal-amd64-2" +# }] +# description = "Number of instances to be launched for afm hosts." +# } -variable "afm_cos_config" { - type = list( - object({ - afm_fileset = string, - mode = string, - cos_instance = string, - bucket_name = string, - bucket_region = string, - cos_service_cred_key = string, - bucket_type = string, - bucket_storage_class = string - }) - ) - default = null - description = "AFM configurations." -} +# variable "afm_cos_config" { +# type = list( +# object({ +# afm_fileset = string, +# mode = string, +# cos_instance = string, +# bucket_name = string, +# bucket_region = string, +# cos_service_cred_key = string, +# bucket_type = string, +# bucket_storage_class = string +# }) +# ) +# default = null +# description = "AFM configurations." +# } ############################################################################## # LSF specific Variables ############################################################################## -variable "cluster_id" { - type = string - default = "HPCCluster" - description = "Unique ID of the cluster used by LSF for configuration of resources. This can be up to 39 alphanumeric characters." - validation { - condition = 0 < length(var.cluster_id) && length(var.cluster_id) < 40 && can(regex("^[a-zA-Z0-9_.-]+$", var.cluster_id)) - error_message = "The ID can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters." - } -} +# variable "cluster_id" { +# type = string +# default = "HPCCluster" +# description = "Unique ID of the cluster used by LSF for configuration of resources. This can be up to 39 alphanumeric characters." +# validation { +# condition = 0 < length(var.cluster_id) && length(var.cluster_id) < 40 && can(regex("^[a-zA-Z0-9_.-]+$", var.cluster_id)) +# error_message = "The ID can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters." +# } +# } -variable "enable_hyperthreading" { - type = bool - default = true - description = "Setting this to true will enable hyper-threading in the worker nodes of the cluster (default). Otherwise, hyper-threading will be disabled." -} +# variable "enable_hyperthreading" { +# type = bool +# default = true +# description = "Setting this to true will enable hyper-threading in the worker nodes of the cluster (default). Otherwise, hyper-threading will be disabled." +# } -variable "enable_dedicated_host" { - type = bool - default = false - description = "Set to true to use dedicated hosts for compute hosts (default: false)." -} +# variable "enable_dedicated_host" { +# type = bool +# default = false +# description = "Set to true to use dedicated hosts for compute hosts (default: false)." +# } -variable "dedicated_host_placement" { - type = string - default = "spread" - description = "Specify 'pack' or 'spread'. The 'pack' option will deploy VSIs on one dedicated host until full before moving on to the next dedicated host." - validation { - condition = var.dedicated_host_placement == "spread" || var.dedicated_host_placement == "pack" - error_message = "Supported values for dedicated_host_placement: spread or pack." - } -} +# variable "dedicated_host_placement" { +# type = string +# default = "spread" +# description = "Specify 'pack' or 'spread'. The 'pack' option will deploy VSIs on one dedicated host until full before moving on to the next dedicated host." +# validation { +# condition = var.dedicated_host_placement == "spread" || var.dedicated_host_placement == "pack" +# error_message = "Supported values for dedicated_host_placement: spread or pack." +# } +# } -variable "enable_app_center" { - type = bool - default = false - description = "Set to true to install and enable use of the IBM Spectrum LSF Application Center GUI." -} +# variable "enable_app_center" { +# type = bool +# default = false +# description = "Set to true to install and enable use of the IBM Spectrum LSF Application Center GUI." +# } -variable "app_center_gui_password" { - type = string - default = "hpc@IBMCloud" - sensitive = true - description = "Password for IBM Spectrum LSF Application Center GUI." -} +# variable "app_center_gui_password" { +# type = string +# default = "hpc@IBMCloud" +# sensitive = true +# description = "Password for IBM Spectrum LSF Application Center GUI." +# } -variable "app_center_db_password" { - type = string - default = "hpc@IBMCloud" - sensitive = true - description = "Password for IBM Spectrum LSF Application Center database GUI." -} +# variable "app_center_db_password" { +# type = string +# default = "hpc@IBMCloud" +# sensitive = true +# description = "Password for IBM Spectrum LSF Application Center database GUI." +# } ############################################################################## # Symphony specific Variables @@ -635,93 +635,96 @@ variable "app_center_db_password" { ############################################################################## # Landing Zone Variables ############################################################################## -variable "clusters" { - default = null - description = "A list describing clusters workloads to create" - type = list( - object({ - name = string # Name of Cluster - vpc_name = string # Name of VPC - subnet_names = list(string) # List of vpc subnets for cluster - workers_per_subnet = number # Worker nodes per subnet. - machine_type = string # Worker node flavor - kube_type = string # iks or openshift - kube_version = optional(string) # Can be a version from `ibmcloud ks versions` or `default` - entitlement = optional(string) # entitlement option for openshift - secondary_storage = optional(string) # Secondary storage type - pod_subnet = optional(string) # Portable subnet for pods - service_subnet = optional(string) # Portable subnet for services - resource_group = string # Resource Group used for cluster - cos_name = optional(string) # Name of COS instance Required only for OpenShift clusters - access_tags = optional(list(string), []) - boot_volume_crk_name = optional(string) # Boot volume encryption key name - disable_public_endpoint = optional(bool, true) # disable cluster public, leaving only private endpoint - disable_outbound_traffic_protection = optional(bool, false) # public outbound access from the cluster workers - cluster_force_delete_storage = optional(bool, false) # force the removal of persistent storage associated with the cluster during cluster deletion - operating_system = string # The operating system of the workers in the default worker pool. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available . - kms_wait_for_apply = optional(bool, true) # make terraform wait until KMS is applied to master and it is ready and deployed - verify_cluster_network_readiness = optional(bool, true) # Flag to run a script will run kubectl commands to verify that all worker nodes can communicate successfully with the master. If the runtime does not have access to the kube cluster to run kubectl commands, this should be set to false. - use_ibm_cloud_private_api_endpoints = optional(bool, true) # Flag to force all cluster related api calls to use the IBM Cloud private endpoints. - import_default_worker_pool_on_create = optional(bool) # (Advanced users) Whether to handle the default worker pool as a stand-alone ibm_container_vpc_worker_pool resource on cluster creation. Only set to false if you understand the implications of managing the default worker pool as part of the cluster resource. Set to true to import the default worker pool as a separate resource. Set to false to manage the default worker pool as part of the cluster resource. - allow_default_worker_pool_replacement = optional(bool) # (Advanced users) Set to true to allow the module to recreate a default worker pool. Only use in the case where you are getting an error indicating that the default worker pool cannot be replaced on apply. Once the default worker pool is handled as a stand-alone ibm_container_vpc_worker_pool, if you wish to make any change to the default worker pool which requires the re-creation of the default pool set this variable to true - labels = optional(map(string)) # A list of labels that you want to add to the default worker pool. - addons = optional(object({ # Map of OCP cluster add-on versions to install - debug-tool = optional(string) - image-key-synchronizer = optional(string) - openshift-data-foundation = optional(string) - vpc-file-csi-driver = optional(string) - static-route = optional(string) - cluster-autoscaler = optional(string) - vpc-block-csi-driver = optional(string) - ibm-storage-operator = optional(string) - }), {}) - manage_all_addons = optional(bool, false) # Instructs Terraform to manage all cluster addons, even if addons were installed outside of the module. If set to 'true' this module will destroy any addons that were installed by other sources. - kms_config = optional( - object({ - crk_name = string # Name of key - private_endpoint = optional(bool) # Private endpoint - }) - ) - worker_pools = optional( - list( - object({ - name = string # Worker pool name - vpc_name = string # VPC name - workers_per_subnet = number # Worker nodes per subnet - flavor = string # Worker node flavor - subnet_names = list(string) # List of vpc subnets for worker pool - entitlement = optional(string) # entitlement option for openshift - secondary_storage = optional(string) # Secondary storage type - boot_volume_crk_name = optional(string) # Boot volume encryption key name - operating_system = string # The operating system of the workers in the worker pool. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available . - labels = optional(map(string)) # A list of labels that you want to add to all the worker nodes in the worker pool. - }) - ) - ) - }) - ) -} +# variable "clusters" { +# default = null +# description = "A list describing clusters workloads to create" +# type = list( +# object({ +# name = string # Name of Cluster +# vpc_name = string # Name of VPC +# subnet_names = list(string) # List of vpc subnets for cluster +# workers_per_subnet = number # Worker nodes per subnet. +# machine_type = string # Worker node flavor +# kube_type = string # iks or openshift +# kube_version = optional(string) # Can be a version from `ibmcloud ks versions` or `default` +# entitlement = optional(string) # entitlement option for openshift +# secondary_storage = optional(string) # Secondary storage type +# pod_subnet = optional(string) # Portable subnet for pods +# service_subnet = optional(string) # Portable subnet for services +# existing_resource_group = string # Resource Group used for cluster +# cos_name = optional(string) # Name of COS instance Required only for OpenShift clusters +# access_tags = optional(list(string), []) +# boot_volume_crk_name = optional(string) # Boot volume encryption key name +# disable_public_endpoint = optional(bool, true) # disable cluster public, leaving only private endpoint +# disable_outbound_traffic_protection = optional(bool, false) # public outbound access from the cluster workers +# cluster_force_delete_storage = optional(bool, false) # force the removal of persistent storage associated with the cluster during cluster deletion +# operating_system = string # The operating system of the workers in the default worker pool. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available . +# kms_wait_for_apply = optional(bool, true) # make terraform wait until KMS is applied to master and it is ready and deployed +# verify_cluster_network_readiness = optional(bool, true) # Flag to run a script will run kubectl commands to verify that all worker nodes can communicate successfully with the master. If the runtime does not have access to the kube cluster to run kubectl commands, this should be set to false. +# use_ibm_cloud_private_api_endpoints = optional(bool, true) # Flag to force all cluster related api calls to use the IBM Cloud private endpoints. +# import_default_worker_pool_on_create = optional(bool) # (Advanced users) Whether to handle the default worker pool as a stand-alone ibm_container_vpc_worker_pool resource on cluster creation. Only set to false if you understand the implications of managing the default worker pool as part of the cluster resource. Set to true to import the default worker pool as a separate resource. Set to false to manage the default worker pool as part of the cluster resource. +# allow_default_worker_pool_replacement = optional(bool) # (Advanced users) Set to true to allow the module to recreate a default worker pool. Only use in the case where you are getting an error indicating that the default worker pool cannot be replaced on apply. Once the default worker pool is handled as a stand-alone ibm_container_vpc_worker_pool, if you wish to make any change to the default worker pool which requires the re-creation of the default pool set this variable to true +# labels = optional(map(string)) # A list of labels that you want to add to the default worker pool. +# addons = optional(object({ # Map of OCP cluster add-on versions to install +# debug-tool = optional(string) +# image-key-synchronizer = optional(string) +# openshift-data-foundation = optional(string) +# vpc-file-csi-driver = optional(string) +# static-route = optional(string) +# cluster-autoscaler = optional(string) +# vpc-block-csi-driver = optional(string) +# ibm-storage-operator = optional(string) +# }), {}) +# manage_all_addons = optional(bool, false) # Instructs Terraform to manage all cluster addons, even if addons were installed outside of the module. If set to 'true' this module will destroy any addons that were installed by other sources. +# kms_config = optional( +# object({ +# crk_name = string # Name of key +# private_endpoint = optional(bool) # Private endpoint +# }) +# ) +# worker_pools = optional( +# list( +# object({ +# name = string # Worker pool name +# vpc_name = string # VPC name +# workers_per_subnet = number # Worker nodes per subnet +# flavor = string # Worker node flavor +# subnet_names = list(string) # List of vpc subnets for worker pool +# entitlement = optional(string) # entitlement option for openshift +# secondary_storage = optional(string) # Secondary storage type +# boot_volume_crk_name = optional(string) # Boot volume encryption key name +# operating_system = string # The operating system of the workers in the worker pool. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available . +# labels = optional(map(string)) # A list of labels that you want to add to all the worker nodes in the worker pool. +# }) +# ) +# ) +# }) +# ) +# } ############################################################################## # Terraform generic Variables ############################################################################## -variable "TF_PARALLELISM" { - type = string - default = "250" - description = "Limit the number of concurrent operation." -} +# # tflint-ignore: all +# variable "TF_PARALLELISM" { +# type = string +# default = "250" +# description = "Limit the number of concurrent operation." +# } -variable "TF_VERSION" { - type = string - default = "1.9" - description = "The version of the Terraform engine that's used in the Schematics workspace." -} +# # tflint-ignore: all +# variable "TF_VERSION" { +# type = string +# default = "1.9" +# description = "The version of the Terraform engine that's used in the Schematics workspace." +# } -variable "TF_LOG" { - type = string - default = "ERROR" - description = "The Terraform log level used for output in the Schematics workspace." -} +# # tflint-ignore: all +# variable "TF_LOG" { +# type = string +# default = "ERROR" +# description = "The Terraform log level used for output in the Schematics workspace." +# } ############################################################################## # Observability Variables @@ -783,11 +786,11 @@ variable "observability_logs_retention_period" { } } -variable "observability_monitoring_on_compute_nodes_enable" { - description = "Set false to disable IBM Cloud Monitoring integration. If enabled, infrastructure metrics from Compute Nodes will be ingested." - type = bool - default = false -} +# variable "observability_monitoring_on_compute_nodes_enable" { +# description = "Set false to disable IBM Cloud Monitoring integration. If enabled, infrastructure metrics from Compute Nodes will be ingested." +# type = bool +# default = false +# } variable "observability_monitoring_plan" { description = "Type of service plan for IBM Cloud Monitoring instance. You can choose one of the following: lite, graduated-tier. For all details visit [IBM Cloud Monitoring Service Plans](https://cloud.ibm.com/docs/monitoring?topic=monitoring-service_plans)." @@ -855,31 +858,31 @@ variable "enable_landing_zone" { description = "Run landing zone module." } -variable "vpc_id" { - type = string - default = null - description = "ID of an existing VPC in which the cluster resources will be deployed." -} +# variable "vpc_id" { +# type = string +# default = null +# description = "ID of an existing VPC in which the cluster resources will be deployed." +# } -variable "bastion_fip" { - type = string - default = null - description = "bastion fip" -} +# variable "bastion_fip" { +# type = string +# default = null +# description = "bastion fip" +# } -variable "compute_public_key_content" { - type = string - sensitive = true - default = null - description = "Compute security key content." -} +# variable "compute_public_key_content" { +# type = string +# sensitive = true +# default = null +# description = "Compute security key content." +# } -variable "compute_private_key_content" { - type = string - sensitive = true - default = null - description = "Compute security key content." -} +# variable "compute_private_key_content" { +# type = string +# sensitive = true +# default = null +# description = "Compute security key content." +# } variable "enable_atracker" { type = bool @@ -887,17 +890,17 @@ variable "enable_atracker" { description = "Enable Activity tracker" } -variable "cloud_logs_data_bucket" { - type = list(string) - default = [] - description = "" -} +# variable "cloud_logs_data_bucket" { +# type = list(string) +# default = [] +# description = "" +# } -variable "cloud_metrics_data_bucket" { - type = list(string) - default = [] - description = "" -} +# variable "cloud_metrics_data_bucket" { +# type = list(string) +# default = [] +# description = "" +# } variable "bastion_security_group_id" { type = string diff --git a/version.tf b/version.tf index 834c7c72..5c31d39d 100644 --- a/version.tf +++ b/version.tf @@ -10,18 +10,18 @@ terraform { source = "IBM-Cloud/ibm" version = ">= 1.68.1, < 2.0.0" } - random = { - source = "hashicorp/random" - version = ">= 3.4.3, < 4.0.0" - } + # random = { + # source = "hashicorp/random" + # version = ">= 3.4.3, < 4.0.0" + # } time = { source = "hashicorp/time" version = ">= 0.9.1, < 1.0.0" } - ansible = { - source = "ansible/ansible" - version = "~> 1.3.0" - } + # ansible = { + # source = "ansible/ansible" + # version = "~> 1.3.0" + # } } } From 3d049510ef09e35ae59d0610b32c8d7100d009c5 Mon Sep 17 00:00:00 2001 From: Manikandan-Renu1 Date: Wed, 12 Mar 2025 22:30:27 +0530 Subject: [PATCH 18/20] var-name-change-in-prepare-tf-file --- locals.tf | 2 +- main.tf | 2 +- modules/prepare_tf_input/main.tf | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/locals.tf b/locals.tf index 3263e82e..e808efbf 100644 --- a/locals.tf +++ b/locals.tf @@ -16,7 +16,7 @@ locals { locals { # dependency: landing_zone -> deployer vpc_id = var.vpc_name == null ? one(module.landing_zone.vpc_id) : data.ibm_is_vpc.existing_vpc[0].id - vpc = var.vpc_name == null ? one(module.landing_zone.vpc_name) : var.vpc_name + vpc_name = var.vpc_name == null ? one(module.landing_zone.vpc_name) : var.vpc_name bastion_subnets = module.landing_zone.bastion_subnets kms_encryption_enabled = var.key_management != null ? true : false boot_volume_encryption_key = var.key_management != null ? one(module.landing_zone.boot_volume_encryption_key)["crn"] : null diff --git a/main.tf b/main.tf index 5e2da7a6..61f96c12 100644 --- a/main.tf +++ b/main.tf @@ -111,7 +111,7 @@ module "prepare_tf_input" { enable_atracker = var.enable_atracker enable_vpc_flow_logs = var.enable_vpc_flow_logs allowed_cidr = var.allowed_cidr - vpc = local.vpc + vpc = local.vpc_name vpc_id = local.vpc_id storage_subnets = local.storage_subnet protocol_subnets = local.protocol_subnet diff --git a/modules/prepare_tf_input/main.tf b/modules/prepare_tf_input/main.tf index b6aafda7..b087fdbc 100644 --- a/modules/prepare_tf_input/main.tf +++ b/modules/prepare_tf_input/main.tf @@ -3,7 +3,7 @@ resource "local_sensitive_file" "prepare_tf_input" { content = < Date: Thu, 13 Mar 2025 10:35:49 +0530 Subject: [PATCH 19/20] var-name-change-in-prepare-tf-file --- main.tf | 2 -- modules/prepare_tf_input/main.tf | 2 -- modules/prepare_tf_input/variables.tf | 18 +++++++++--------- 3 files changed, 9 insertions(+), 13 deletions(-) diff --git a/main.tf b/main.tf index 61f96c12..2b61a428 100644 --- a/main.tf +++ b/main.tf @@ -93,7 +93,6 @@ module "landing_zone_vsi" { module "prepare_tf_input" { source = "./modules/prepare_tf_input" enable_deployer = var.enable_deployer - bastion_fip = local.bastion_fip deployer_ip = local.deployer_ip ibmcloud_api_key = var.ibmcloud_api_key resource_group = var.existing_resource_group @@ -112,7 +111,6 @@ module "prepare_tf_input" { enable_vpc_flow_logs = var.enable_vpc_flow_logs allowed_cidr = var.allowed_cidr vpc = local.vpc_name - vpc_id = local.vpc_id storage_subnets = local.storage_subnet protocol_subnets = local.protocol_subnet compute_subnets = local.compute_subnet diff --git a/modules/prepare_tf_input/main.tf b/modules/prepare_tf_input/main.tf index b087fdbc..b243abbb 100644 --- a/modules/prepare_tf_input/main.tf +++ b/modules/prepare_tf_input/main.tf @@ -9,7 +9,6 @@ resource "local_sensitive_file" "prepare_tf_input" { "enable_landing_zone": false, "enable_deployer": false, "enable_bastion": false, - "bastion_fip": "${var.bastion_fip}", "compute_ssh_keys": ${local.list_compute_ssh_keys}, "storage_ssh_keys": ${local.list_storage_ssh_keys}, "storage_instances": ${local.list_storage_instances}, @@ -23,7 +22,6 @@ resource "local_sensitive_file" "prepare_tf_input" { "enable_vpc_flow_logs": ${var.enable_vpc_flow_logs}, "allowed_cidr": ${local.allowed_cidr}, "vpc_name": "${var.vpc}", - "vpc_id": "${var.vpc_id}", "storage_subnets": ${local.list_storage_subnets}, "protocol_subnets": ${local.list_protocol_subnets}, "compute_subnets": ${local.list_compute_subnets}, diff --git a/modules/prepare_tf_input/variables.tf b/modules/prepare_tf_input/variables.tf index af315ed7..b8d0e45f 100644 --- a/modules/prepare_tf_input/variables.tf +++ b/modules/prepare_tf_input/variables.tf @@ -155,10 +155,10 @@ variable "vpc" { description = "Name of an existing VPC in which the cluster resources will be deployed. If no value is given, then a new VPC will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)" } -variable "vpc_id" { - type = string - description = "ID of an existing VPC in which the cluster resources will be deployed." -} +# variable "vpc_id" { +# type = string +# description = "ID of an existing VPC in which the cluster resources will be deployed." +# } ############################################################################## # DNS Variables @@ -184,11 +184,11 @@ variable "enable_deployer" { ############################################################################## # Bastion Variables ############################################################################## -variable "bastion_fip" { - type = string - default = null - description = "bastion fip" -} +# variable "bastion_fip" { +# type = string +# default = null +# description = "bastion fip" +# } ############################################################################## # Offering Variations From b73d1cd96aa78919b3b9ad9b42aaa056e5044771 Mon Sep 17 00:00:00 2001 From: Manikandan-Renu1 Date: Fri, 14 Mar 2025 18:07:48 +0530 Subject: [PATCH 20/20] removed-unwanted-commented-lines-and-variables --- .secrets.baseline | 28 +++- datasource.tf | 3 +- .../modules/landing_zone_vpc/datasource.tf | 2 +- .../modules/landing_zone_vpc/locals.tf | 2 +- .../modules/landing_zone_vpc/version.tf | 2 +- examples/create_vpc/solutions/hpc/version.tf | 2 +- locals.tf | 24 ++-- main.tf | 30 ++-- modules/deployer/datasource.tf | 4 +- modules/deployer/locals.tf | 12 +- modules/deployer/main.tf | 5 +- modules/deployer/template_files.tf | 3 - modules/deployer/variables.tf | 38 ++--- modules/deployer/version.tf | 2 +- modules/dns/version.tf | 2 +- modules/dns_record/version.tf | 2 +- modules/file_storage/version.tf | 2 +- modules/inventory/main.tf | 5 - modules/inventory/version.tf | 2 +- modules/key/version.tf | 2 +- modules/landing_zone/locals.tf | 111 ++------------- modules/landing_zone/main.tf | 2 +- modules/landing_zone/outputs.tf | 20 --- modules/landing_zone/variables.tf | 4 +- modules/landing_zone/version.tf | 15 +- modules/landing_zone_vsi/datasource.tf | 4 +- modules/landing_zone_vsi/locals.tf | 3 +- modules/landing_zone_vsi/main.tf | 10 +- modules/landing_zone_vsi/variables.tf | 2 +- modules/landing_zone_vsi/version.tf | 2 +- modules/observability_instance/versions.tf | 2 +- modules/playbook/main.tf | 1 - modules/playbook/version.tf | 3 +- modules/prepare_tf_input/locals.tf | 11 +- modules/prepare_tf_input/main.tf | 4 +- modules/prepare_tf_input/variables.tf | 32 +---- modules/prepare_tf_input/version.tf | 3 +- modules/resource_provisioner/locals.tf | 5 +- modules/resource_provisioner/variables.tf | 14 -- modules/resource_provisioner/version.tf | 3 +- modules/security/scc/variables.tf | 4 +- modules/security/scc/versions.tf | 4 +- modules/write_inventory/main.tf | 5 +- modules/write_inventory/version.tf | 3 +- samples/configs/hpc_catalog_values.json | 130 ++++++++--------- samples/configs/hpc_schematics_values.json | 38 ++--- solutions/custom/locals.tf | 3 - solutions/custom/main.tf | 2 +- solutions/custom/variables.tf | 10 +- solutions/custom/version.tf | 2 +- .../catalogValidationValues.json.template | 2 +- solutions/hpcaas/locals.tf | 5 +- solutions/hpcaas/main.tf | 4 +- solutions/hpcaas/variables.tf | 7 +- solutions/hpcaas/version.tf | 2 +- .../lsf/catalogValidationValues.json.template | 2 +- solutions/lsf/locals.tf | 30 ++-- solutions/lsf/main.tf | 5 +- solutions/lsf/outputs.tf | 1 - solutions/lsf/variables.tf | 9 +- solutions/lsf/version.tf | 2 +- .../catalogValidationValues.json.template | 2 +- solutions/scale/locals.tf | 4 +- solutions/scale/main.tf | 4 +- solutions/scale/variables.tf | 9 +- solutions/scale/version.tf | 2 +- .../catalogValidationValues.json.template | 2 +- solutions/slurm/locals.tf | 5 +- solutions/slurm/main.tf | 4 +- solutions/slurm/variables.tf | 7 +- solutions/slurm/version.tf | 2 +- .../catalogValidationValues.json.template | 2 +- solutions/symphony/locals.tf | 2 +- solutions/symphony/main.tf | 4 +- solutions/symphony/variables.tf | 7 +- solutions/symphony/version.tf | 2 +- tools/image-builder/locals.tf | 16 +-- tools/image-builder/main.tf | 4 +- tools/image-builder/template_files.tf | 4 +- .../templates/packer_user_data.tpl | 6 +- tools/image-builder/variables.tf | 6 +- tools/image-builder/version.tf | 2 +- variables.tf | 131 +----------------- version.tf | 11 +- 84 files changed, 299 insertions(+), 632 deletions(-) diff --git a/.secrets.baseline b/.secrets.baseline index 913b818a..c19782fb 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "go.sum|^.secrets.baseline$", "lines": null }, - "generated_at": "2025-03-12T08:49:38Z", + "generated_at": "2025-03-14T12:32:22Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -77,6 +77,32 @@ } ], "results": { + "samples/configs/hpc_catalog_values.json": [ + { + "hashed_secret": "5073c7ac17500ef0678aebc7138a996b4f75d623", + "is_secret": true, + "is_verified": false, + "line_number": 2, + "type": "Secret Keyword", + "verified_result": null + }, + { + "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", + "is_secret": true, + "is_verified": false, + "line_number": 50, + "type": "Secret Keyword", + "verified_result": null + }, + { + "hashed_secret": "b295b04949a98dc50ba65adcddd588077b93ab3c", + "is_secret": true, + "is_verified": false, + "line_number": 66, + "type": "Secret Keyword", + "verified_result": null + } + ], "solutions/custom/override.json": [ { "hashed_secret": "850264135744c21e30d6336ed7bf047d2e82af8b", diff --git a/datasource.tf b/datasource.tf index ef7d3825..dff8ad33 100644 --- a/datasource.tf +++ b/datasource.tf @@ -10,7 +10,6 @@ data "ibm_is_zone" "zone" { } */ - data "ibm_is_vpc" "existing_vpc" { count = var.vpc_name != null ? 1 : 0 name = var.vpc_name @@ -23,7 +22,7 @@ data "ibm_is_subnet" "subnet" { } */ -data "ibm_resource_group" "resource_group" { +data "ibm_resource_group" "existing_resource_group" { count = var.existing_resource_group == null ? 0 : 1 name = var.existing_resource_group } diff --git a/examples/create_vpc/modules/landing_zone_vpc/datasource.tf b/examples/create_vpc/modules/landing_zone_vpc/datasource.tf index ad4e30ff..32f4c883 100644 --- a/examples/create_vpc/modules/landing_zone_vpc/datasource.tf +++ b/examples/create_vpc/modules/landing_zone_vpc/datasource.tf @@ -1,3 +1,3 @@ -data "ibm_resource_group" "resource_group" { +data "ibm_resource_group" "existing_resource_group" { name = var.existing_resource_group } diff --git a/examples/create_vpc/modules/landing_zone_vpc/locals.tf b/examples/create_vpc/modules/landing_zone_vpc/locals.tf index b34ad164..ad8d9715 100644 --- a/examples/create_vpc/modules/landing_zone_vpc/locals.tf +++ b/examples/create_vpc/modules/landing_zone_vpc/locals.tf @@ -21,7 +21,7 @@ locals { var.allowed_cidr # var.network_cidr ]) - resource_group_id = var.existing_resource_group != null ? data.ibm_resource_group.resource_group.id : "" + resource_group_id = var.existing_resource_group != null ? data.ibm_resource_group.existing_resource_group.id : "" # Region and Zone calculations region = join("-", slice(split("-", var.zones[0]), 0, 2)) diff --git a/examples/create_vpc/modules/landing_zone_vpc/version.tf b/examples/create_vpc/modules/landing_zone_vpc/version.tf index 87b0e3f2..a1697e97 100644 --- a/examples/create_vpc/modules/landing_zone_vpc/version.tf +++ b/examples/create_vpc/modules/landing_zone_vpc/version.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 1.3" + required_version = ">= 1.9.0" required_providers { ibm = { source = "IBM-Cloud/ibm" diff --git a/examples/create_vpc/solutions/hpc/version.tf b/examples/create_vpc/solutions/hpc/version.tf index 87b0e3f2..a1697e97 100644 --- a/examples/create_vpc/solutions/hpc/version.tf +++ b/examples/create_vpc/solutions/hpc/version.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 1.3" + required_version = ">= 1.9.0" required_providers { ibm = { source = "IBM-Cloud/ibm" diff --git a/locals.tf b/locals.tf index e808efbf..be1ed0a7 100644 --- a/locals.tf +++ b/locals.tf @@ -23,10 +23,11 @@ locals { existing_kms_instance_guid = var.key_management != null ? module.landing_zone.key_management_guid : null cos_data = var.enable_bastion ? [] : module.landing_zone.cos_buckets_data # Future use + # When we implement the existing bastion concept we need the changes to implemented like below. Which is already there on our LSF DA # skip_iam_authorization_policy = true + # skip_iam_authorization_policy = var.bastion_instance_name != null ? false : local.skip_iam_authorization_policy } - # locals needed for landing_zone_vsi locals { # dependency: landing_zone -> deployer -> landing_zone_vsi @@ -102,17 +103,10 @@ locals { #boot_volume_encryption_key = var.key_management != null ? one(module.landing_zone.boot_volume_encryption_key)["crn"] : null #skip_iam_authorization_policy = true - #resource_group_id = data.ibm_resource_group.itself.id - #vpc_id = var.vpc_name == null ? module.landing_zone.vpc_id[0] : data.ibm_is_vpc.existing_vpc[0].id - #vpc_crn = var.vpc_name == null ? module.landing_zone.vpc_crn[0] : data.ibm_is_vpc.existing_vpc[0].crn } # locals needed for file-storage locals { - # dependency: landing_zone -> file-storage - #vpc_id = var.vpc_name == null ? one(module.landing_zone.vpc_id) : var.vpc_name - #boot_volume_encryption_key = var.key_management != null ? one(module.landing_zone.boot_volume_encryption_key)["crn"] : null - # dependency: landing_zone_vsi -> file-share compute_subnet_id = var.vpc_name == null && var.compute_subnets == null ? local.compute_subnets[0].id : [for subnet in data.ibm_is_subnet.existing_compute_subnets : subnet.id][0] compute_security_group_id = var.enable_deployer ? [] : module.landing_zone_vsi[0].compute_sg_id @@ -142,8 +136,8 @@ locals { # resource_group = var.existing_resource_group == null ? "workload-rg" : var.existing_resource_group resource_group_ids = { # management_rg = var.existing_resource_group == null ? module.landing_zone.resource_group_id[0]["management-rg"] : one(values(one(module.landing_zone.resource_group_id))) - service_rg = var.existing_resource_group == null ? module.landing_zone.resource_group_id[0]["service-rg"] : data.ibm_resource_group.resource_group[0].id - workload_rg = var.existing_resource_group == null ? module.landing_zone.resource_group_id[0]["workload-rg"] : data.ibm_resource_group.resource_group[0].id + service_rg = var.existing_resource_group == null ? module.landing_zone.resource_group_id[0]["service-rg"] : data.ibm_resource_group.existing_resource_group[0].id + workload_rg = var.existing_resource_group == null ? module.landing_zone.resource_group_id[0]["workload-rg"] : data.ibm_resource_group.existing_resource_group[0].id } # resource_group_id = one(values(one(module.landing_zone.resource_group_id))) vpc_crn = var.vpc_name == null ? one(module.landing_zone.vpc_crn) : one(data.ibm_is_vpc.existing_vpc[*].crn) @@ -155,11 +149,11 @@ locals { existing_client_subnet_crns = [for subnet in data.ibm_is_subnet.existing_client_subnets : subnet.crn] existing_bastion_subnet_crns = [for subnet in data.ibm_is_subnet.existing_bastion_subnets : subnet.crn] subnets_crn = concat(local.existing_compute_subnet_crns, local.existing_storage_subnet_crns, local.existing_protocol_subnet_crns, local.existing_client_subnet_crns, local.existing_bastion_subnet_crns) - # subnets_crn = var.vpc_name == null && var.compute_subnets == null ? module.landing_zone.subnets_crn : concat(local.existing_subnet_crns, module.landing_zone.subnets_crn) - #subnets = flatten([local.compute_subnets, local.storage_subnets, local.protocol_subnets]) - #subnets_crns = data.ibm_is_subnet.itself[*].crn - # subnets_crn = module.landing_zone.subnets_crn - #boot_volume_encryption_key = var.key_management != null ? one(module.landing_zone.boot_volume_encryption_key)["crn"] : null + # subnets_crn = var.vpc_name == null && var.compute_subnets == null ? module.landing_zone.subnets_crn : concat(local.existing_subnet_crns, module.landing_zone.subnets_crn) + # subnets = flatten([local.compute_subnets, local.storage_subnets, local.protocol_subnets]) + # subnets_crns = data.ibm_is_subnet.itself[*].crn + # subnets_crn = module.landing_zone.subnets_crn + # boot_volume_encryption_key = var.key_management != null ? one(module.landing_zone.boot_volume_encryption_key)["crn"] : null # dependency: landing_zone_vsi -> file-share } diff --git a/main.tf b/main.tf index 2b61a428..39b784b0 100644 --- a/main.tf +++ b/main.tf @@ -20,10 +20,10 @@ module "landing_zone" { prefix = var.prefix protocol_instances = var.protocol_instances protocol_subnets_cidr = var.protocol_subnets_cidr - resource_group = var.existing_resource_group + existing_resource_group = var.existing_resource_group storage_instances = var.storage_instances storage_subnets_cidr = var.storage_subnets_cidr - vpc = var.vpc_name + vpc_name = var.vpc_name vpn_peer_address = var.vpn_peer_address vpn_peer_cidr = var.vpn_peer_cidr vpn_preshared_key = var.vpn_preshared_key @@ -38,7 +38,8 @@ module "landing_zone" { module "deployer" { source = "./modules/deployer" - resource_group = var.existing_resource_group + scheduler = var.scheduler + existing_resource_group = var.existing_resource_group prefix = var.prefix vpc_id = local.vpc_id network_cidr = var.network_cidr @@ -56,16 +57,12 @@ module "deployer" { existing_kms_instance_guid = local.existing_kms_instance_guid skip_iam_authorization_policy = var.skip_iam_authorization_policy dns_domain_names = var.dns_domain_names - # zones = var.zones - # static_compute_instances = var.static_compute_instances - # management_instances = var.management_instances - } module "landing_zone_vsi" { count = var.enable_deployer == false ? 1 : 0 source = "./modules/landing_zone_vsi" - resource_group = var.existing_resource_group + existing_resource_group = var.existing_resource_group prefix = var.prefix vpc_id = local.vpc_id bastion_security_group_id = var.bastion_security_group_id @@ -95,7 +92,7 @@ module "prepare_tf_input" { enable_deployer = var.enable_deployer deployer_ip = local.deployer_ip ibmcloud_api_key = var.ibmcloud_api_key - resource_group = var.existing_resource_group + existing_resource_group = var.existing_resource_group prefix = var.prefix zones = var.zones compute_ssh_keys = local.compute_ssh_keys @@ -110,7 +107,7 @@ module "prepare_tf_input" { enable_atracker = var.enable_atracker enable_vpc_flow_logs = var.enable_vpc_flow_logs allowed_cidr = var.allowed_cidr - vpc = local.vpc_name + vpc_name = local.vpc_name storage_subnets = local.storage_subnet protocol_subnets = local.protocol_subnet compute_subnets = local.compute_subnet @@ -287,15 +284,16 @@ module "cloud_monitoring_instance_creation" { # Code for SCC Instance module "scc_instance_and_profile" { - count = var.enable_deployer == true && var.scc_enable ? 1 : 0 - source = "./modules/security/scc" - location = var.scc_location != "" ? var.scc_location : "us-south" - rg = local.resource_group_ids["service_rg"] - scc_profile = var.scc_enable ? var.scc_profile : "" - # scc_profile_version = var.scc_profile != "" && var.scc_profile != null ? var.scc_profile_version : "" + count = var.enable_deployer == true && var.scc_enable ? 1 : 0 + source = "./modules/security/scc" + location = var.scc_location != "" ? var.scc_location : "us-south" + rg = local.resource_group_ids["service_rg"] + scc_profile = var.scc_enable ? var.scc_profile : "" event_notification_plan = var.scc_event_notification_plan tags = ["hpc", var.prefix] prefix = var.prefix cos_bucket = [for name in module.landing_zone.cos_buckets_names : name if strcontains(name, "scc-bucket")][0] cos_instance_crn = module.landing_zone.cos_instance_crns[0] + # scc_profile_version = var.scc_profile != "" && var.scc_profile != null ? var.scc_profile_version : "" + } diff --git a/modules/deployer/datasource.tf b/modules/deployer/datasource.tf index e55961d3..bca113b1 100644 --- a/modules/deployer/datasource.tf +++ b/modules/deployer/datasource.tf @@ -1,5 +1,5 @@ -data "ibm_resource_group" "resource_group" { - name = var.resource_group +data "ibm_resource_group" "existing_resource_group" { + name = var.existing_resource_group } data "ibm_is_image" "bastion" { diff --git a/modules/deployer/locals.tf b/modules/deployer/locals.tf index ec720df1..09153a14 100644 --- a/modules/deployer/locals.tf +++ b/modules/deployer/locals.tf @@ -1,7 +1,6 @@ # define variables locals { - #products = "scale" - name = "hpc" + name = var.scheduler == "LSF" ? "LSF" : (var.scheduler == null ? "Scale" : (var.scheduler == "HPCaaS" ? "HPCaaS" : (var.scheduler == "Symphony" ? "Symphony" : (var.scheduler == "Slurm" ? "Slurm" : "")))) prefix = var.prefix tags = [local.prefix, local.name] @@ -43,9 +42,6 @@ locals { # LSF static configs # lsf_cloud_deployer_path = "/opt/ibm/lsf" - # Region and Zone calculations - # region = join("-", slice(split("-", var.zones[0]), 0, 2)) - # Security group rules # TODO: Fix SG rules bastion_security_group_rules = flatten([ @@ -68,7 +64,7 @@ locals { # Derived configs # VPC - resource_group_id = data.ibm_resource_group.resource_group.id + resource_group_id = data.ibm_resource_group.existing_resource_group.id # Subnets bastion_subnets = var.bastion_subnets @@ -78,8 +74,4 @@ locals { vsi_interfaces = ["eth0", "eth1"] compute_interfaces = local.vsi_interfaces[0] compute_dns_domain = var.dns_domain_names["compute"] - - # management_instance_count = sum(var.management_instances[*]["count"]) - # static_compute_instance_count = sum(var.static_compute_instances[*]["count"]) - # enable_compute = local.management_instance_count > 0 || local.static_compute_instance_count > 0 } diff --git a/modules/deployer/main.tf b/modules/deployer/main.tf index 34c2a8d4..5f1e26ec 100644 --- a/modules/deployer/main.tf +++ b/modules/deployer/main.tf @@ -15,11 +15,10 @@ module "bastion_sg" { vpc_id = var.vpc_id } - module "bastion_vsi" { count = var.enable_bastion ? 1 : 0 source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "4.2.0" + version = "4.6.0" vsi_per_subnet = 1 create_security_group = false security_group = null @@ -43,7 +42,7 @@ module "bastion_vsi" { module "deployer_vsi" { count = local.enable_deployer ? 1 : 0 source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "4.2.0" + version = "4.6.0" vsi_per_subnet = 1 create_security_group = false security_group = null diff --git a/modules/deployer/template_files.tf b/modules/deployer/template_files.tf index 5fc5f27a..467a047d 100644 --- a/modules/deployer/template_files.tf +++ b/modules/deployer/template_files.tf @@ -11,8 +11,5 @@ data "template_file" "deployer_user_data" { bastion_public_key_content = local.enable_bastion ? module.ssh_key[0].public_key_content : "" compute_dns_domain = local.enable_bastion ? local.compute_dns_domain : "" compute_interfaces = local.enable_bastion ? local.compute_interfaces : "" - # compute_public_key_content = local.enable_bastion ? module.compute_key[0].public_key_content : "" - # compute_private_key_content = local.enable_bastion ? module.compute_key[0].private_key_content : "" - } } diff --git a/modules/deployer/variables.tf b/modules/deployer/variables.tf index bd2ac970..6a026851 100644 --- a/modules/deployer/variables.tf +++ b/modules/deployer/variables.tf @@ -2,7 +2,7 @@ # Resource Groups Variables ############################################################################## -variable "resource_group" { +variable "existing_resource_group" { description = "String describing resource groups to create or reference" type = string default = null @@ -22,11 +22,6 @@ variable "prefix" { } } -# variable "zones" { -# description = "Region where VPC will be created. To find your VPC region, use `ibmcloud is regions` command to find available regions." -# type = list(string) -# } - ############################################################################## # VPC Variables ############################################################################## @@ -42,6 +37,15 @@ variable "network_cidr" { default = "10.0.0.0/8" } +############################################################################## +# Offering Variations +############################################################################## +variable "scheduler" { + type = string + default = null + description = "Select one of the scheduler (LSF/Symphony/Slurm/null)" +} + ############################################################################## # Access Variables ############################################################################## @@ -131,28 +135,6 @@ variable "skip_iam_authorization_policy" { description = "Set to false if authorization policy is required for VPC block storage volumes to access kms. This can be set to true if authorization policy already exists. For more information on how to create authorization policy manually, see [creating authorization policies for block storage volume](https://cloud.ibm.com/docs/vpc?topic=vpc-block-s2s-auth&interface=ui)." } -# variable "management_instances" { -# type = list( -# object({ -# profile = string -# count = number -# image = string -# }) -# ) -# description = "Number of instances to be launched for management." -# } - -# variable "static_compute_instances" { -# type = list( -# object({ -# profile = string -# count = number -# image = string -# }) -# ) -# description = "Total Number of instances to be launched for compute cluster." -# } - variable "dns_domain_names" { type = object({ compute = string diff --git a/modules/deployer/version.tf b/modules/deployer/version.tf index cde6b069..e44fa0a2 100644 --- a/modules/deployer/version.tf +++ b/modules/deployer/version.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 1.3" + required_version = ">= 1.9.0" required_providers { ibm = { source = "IBM-Cloud/ibm" diff --git a/modules/dns/version.tf b/modules/dns/version.tf index e9f640ef..3ce71d7a 100644 --- a/modules/dns/version.tf +++ b/modules/dns/version.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 1.3" + required_version = ">= 1.9.0" required_providers { ibm = { source = "IBM-Cloud/ibm" diff --git a/modules/dns_record/version.tf b/modules/dns_record/version.tf index e9f640ef..3ce71d7a 100644 --- a/modules/dns_record/version.tf +++ b/modules/dns_record/version.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 1.3" + required_version = ">= 1.9.0" required_providers { ibm = { source = "IBM-Cloud/ibm" diff --git a/modules/file_storage/version.tf b/modules/file_storage/version.tf index e9f640ef..3ce71d7a 100644 --- a/modules/file_storage/version.tf +++ b/modules/file_storage/version.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 1.3" + required_version = ">= 1.9.0" required_providers { ibm = { source = "IBM-Cloud/ibm" diff --git a/modules/inventory/main.tf b/modules/inventory/main.tf index bf30438a..04340def 100644 --- a/modules/inventory/main.tf +++ b/modules/inventory/main.tf @@ -1,8 +1,3 @@ -# resource "local_sensitive_file" "mount_path_file" { -# content = join("\n", var.hosts,) -# filename = var.inventory_path -# } - resource "local_sensitive_file" "mount_path_file" { content = <" + "VAR1": "" }, { - "VAR2":"" + "VAR2": "" } ], "variablestore": [ @@ -55,12 +55,11 @@ "description": "IBM Cloud API key for the IBM Cloud account where the IBM Cloud HPC cluster needs to be deployed. For more information on how to create an API key, see [Managing user API keys](https://cloud.ibm.com/docs/account?topic=account-userapikey)." }, { - "name": "resource_group", + "name": "existing_resource_group", "value": "Default", "type": "string", "secure": false, "description": "Specify the existing resource group name from your IBM Cloud account where the VPC resources should be deployed. By default, the resource group name is set to 'Default.' Note that in some older accounts, the resource group name may be 'default,' so please validate the resource_group name before deployment. If the resource group value is set to the string \"null\", the automation will create two different resource groups named 'workload-rg' and 'service-rg.' For more information on resource groups, refer to Managing resource groups." - }, { "name": "zones", @@ -77,7 +76,7 @@ "description": "Prefix that is used to name the IBM Cloud HPC cluster and IBM Cloud resources that are provisioned to build the IBM Cloud HPC cluster instance. You cannot create more than one instance of the IBM Cloud HPC cluster with the same name. Ensure that the name is unique. Prefix must start with a lowercase letter and contain only lowercase letters, digits, and hyphens in between. Hyphens must be followed by at least one lowercase letter or digit. There are no leading, trailing, or consecutive hyphens.Character length for cluster_prefix should be less than 16." }, { - "name": "cluster_id", + "name": "cluster_name", "value": "Please fill here", "type": "string", "secure": false, @@ -208,7 +207,6 @@ "type": "string", "secure": false, "description": "Name of the custom image that you want to use to create virtual server instances in your IBM Cloud account to deploy the IBM Cloud HPC cluster dynamic compute nodes. By default, the solution uses a RHEL 8-8 OS image with additional software packages mentioned [here](https://cloud.ibm.com/docs/ibm-spectrum-lsf#create-custom-image). The solution also offers, Ubuntu 22-04 OS base image (hpcaas-lsf10-ubuntu2204-compute-v7). If you would like to include your application-specific binary files, follow the instructions in [ Planning for custom images ](https://cloud.ibm.com/docs/vpc?topic=vpc-planning-custom-images) to create your own custom image and use that to build the IBM Cloud HPC cluster through this offering." - }, { "name": "login_image_name", @@ -229,7 +227,7 @@ "value": "bx2-16x64", "type": "string", "secure": false, - "description" : "Specify the virtual server instance profile type to be used to create the management nodes for the IBM Cloud HPC cluster. For choices on profile types, see [Instance profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles)." + "description": "Specify the virtual server instance profile type to be used to create the management nodes for the IBM Cloud HPC cluster. For choices on profile types, see [Instance profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles)." }, { "name": "management_node_count", @@ -245,11 +243,12 @@ "secure": false, "description": "Mount points and sizes in GB and IOPS range of file shares that can be used to customize shared file storage layout. Provide the details for up to 5 shares. Each file share size in GB supports different range of IOPS. For more information, see [file share IOPS value](https://cloud.ibm.com/docs/vpc?topic=vpc-file-storage-profiles&interface=ui)" }, - { "name": "storage_security_group_id", + { + "name": "storage_security_group_id", "value": "__NULL__", "type": "string", "secure": false, - "description" : "Provide the security group id that is created from Scale storage, if the nfs_share is not equal to null from cluster_file_share variable." + "description": "Provide the security group id that is created from Scale storage, if the nfs_share is not equal to null from cluster_file_share variable." }, { "name": "hyperthreading_enabled", @@ -300,13 +299,15 @@ "secure": true, "description": "Password for IBM Spectrum LSF Application Center GUI. Note: Password should be at least 8 characters, must have one number, one lowercase letter, one uppercase letter, and at least one special character." }, - { "name": "app_center_high_availability", + { + "name": "app_center_high_availability", "value": "true", "type": "bool", "secure": false, "description": "Set to false to disable the IBM Spectrum LSF Application Center GUI High Availability (default: true)." }, - { "name": "existing_certificate_instance", + { + "name": "existing_certificate_instance", "value": "", "type": "string", "secure": false, @@ -438,7 +439,8 @@ "secure": false, "description": "Event Notifications Instance plan to be used (it's used with S.C.C. instance), possible values 'lite' and 'standard'" }, - { "name": "observability_monitoring_enable", + { + "name": "observability_monitoring_enable", "value": "false", "type": "bool", "secure": false, @@ -456,35 +458,35 @@ "value": "graduated-tier", "type": "string", "secure": false, - "description" : "Type of service plan for IBM Cloud Monitoring instance. You can choose one of the following: lite, graduated-tier. For all details visit [IBM Cloud Monitoring Service Plans](https://cloud.ibm.com/docs/monitoring?topic=monitoring-service_plans)." + "description": "Type of service plan for IBM Cloud Monitoring instance. You can choose one of the following: lite, graduated-tier. For all details visit [IBM Cloud Monitoring Service Plans](https://cloud.ibm.com/docs/monitoring?topic=monitoring-service_plans)." }, { "name": "bastion_instance_name", "value": "__NULL__", "type": "string", "secure": false, - "description" : "Provide the name of the bastion instance. If none given then new bastion will be created." + "description": "Provide the name of the bastion instance. If none given then new bastion will be created." }, { "name": "bastion_instance_public_ip", "value": "__NULL__", "type": "string", "secure": false, - "description" : "Provide the public ip address of the bastion instance to establish the remote connection." + "description": "Provide the public ip address of the bastion instance to establish the remote connection." }, { "name": "bastion_security_group_id", "value": "__NULL__", "type": "string", "secure": false, - "description" : "Provide the security group ID of the bastion server. This security group ID will be added as an allowlist rule on the HPC cluster nodes to establish an SSH connection through the bastion node." + "description": "Provide the security group ID of the bastion server. This security group ID will be added as an allowlist rule on the HPC cluster nodes to establish an SSH connection through the bastion node." }, { "name": "bastion_ssh_private_key", "value": "__NULL__", "type": "string", "secure": false, - "description" : "Provide the private SSH key (named id_rsa) used during the creation and configuration of the bastion server to securely authenticate and connect to the bastion server. This allows access to internal network resources from a secure entry point. Note: The corresponding public SSH key (named id_rsa.pub) must already be available in the ~/.ssh/authorized_keys file on the bastion host to establish authentication." + "description": "Provide the private SSH key (named id_rsa) used during the creation and configuration of the bastion server to securely authenticate and connect to the bastion server. This allows access to internal network resources from a secure entry point. Note: The corresponding public SSH key (named id_rsa.pub) must already be available in the ~/.ssh/authorized_keys file on the bastion host to establish authentication." } ] } diff --git a/solutions/custom/locals.tf b/solutions/custom/locals.tf index bbbd0782..7139423a 100644 --- a/solutions/custom/locals.tf +++ b/solutions/custom/locals.tf @@ -4,7 +4,6 @@ locals { region = join("-", slice(split("-", var.zones[0]), 0, 2)) } - locals { override_json_path = abspath("./override.json") override = { @@ -17,7 +16,6 @@ locals { override_type = var.override_json_string == null ? "override" : "override_json_string" } - locals { config = { existing_resource_group = var.existing_resource_group @@ -67,7 +65,6 @@ locals { } } - # Compile Environment for Config output locals { env = { diff --git a/solutions/custom/main.tf b/solutions/custom/main.tf index 867ea215..977baf10 100644 --- a/solutions/custom/main.tf +++ b/solutions/custom/main.tf @@ -1,5 +1,6 @@ module "custom" { source = "./../.." + scheduler = var.scheduler ibm_customer_number = var.ibm_customer_number zones = var.zones allowed_cidr = var.allowed_cidr @@ -41,7 +42,6 @@ module "custom" { vpn_peer_cidr = local.env.vpn_peer_cidr vpn_preshared_key = local.env.vpn_preshared_key - # scheduler = var.scheduler # compute_gui_password = local.env.compute_gui_password # compute_gui_username = local.env.compute_gui_username # client_subnets_cidr = local.env.client_subnets_cidr diff --git a/solutions/custom/variables.tf b/solutions/custom/variables.tf index 499f4af8..0d5c235b 100644 --- a/solutions/custom/variables.tf +++ b/solutions/custom/variables.tf @@ -1,11 +1,11 @@ ############################################################################## # Offering Variations ############################################################################## -# variable "scheduler" { -# type = string -# default = "LSF" -# description = "Select one of the scheduler (LSF/Symphony/Slurm/null)" -# } +variable "scheduler" { + type = string + default = "LSF" + description = "Select one of the scheduler (LSF/Symphony/Slurm/null)" +} variable "ibm_customer_number" { type = string diff --git a/solutions/custom/version.tf b/solutions/custom/version.tf index d465bd59..93f82bed 100644 --- a/solutions/custom/version.tf +++ b/solutions/custom/version.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 1.3" + required_version = ">= 1.9.0" required_providers { ibm = { source = "IBM-Cloud/ibm" diff --git a/solutions/hpcaas/catalogValidationValues.json.template b/solutions/hpcaas/catalogValidationValues.json.template index 829063ab..a50f2c75 100644 --- a/solutions/hpcaas/catalogValidationValues.json.template +++ b/solutions/hpcaas/catalogValidationValues.json.template @@ -1,7 +1,7 @@ { "ibmcloud_api_key": $VALIDATION_APIKEY, "prefix": $PREFIX, - "zone": "ca-tor-1", + "zones": "[\"ca-tor-1\"]", "existing_resource_group": "geretain-hpc-rg", "ssh_keys": "[\"geretain-hpc-ssh-key\"]" } diff --git a/solutions/hpcaas/locals.tf b/solutions/hpcaas/locals.tf index f86ca6b4..4d7d2766 100644 --- a/solutions/hpcaas/locals.tf +++ b/solutions/hpcaas/locals.tf @@ -1,10 +1,9 @@ # locals needed for ibm provider locals { # Region and Zone calculations - region = join("-", slice(split("-", var.zone), 0, 2)) + region = join("-", slice(split("-", var.zones[0]), 0, 2)) } - locals { override_json_path = abspath("./override.json") override = { @@ -17,7 +16,6 @@ locals { override_type = var.override_json_string == null ? "override" : "override_json_string" } - locals { config = { existing_resource_group = var.existing_resource_group @@ -66,7 +64,6 @@ locals { } } - # Compile Environment for Config output locals { env = { diff --git a/solutions/hpcaas/main.tf b/solutions/hpcaas/main.tf index ea26d08f..7409fd08 100644 --- a/solutions/hpcaas/main.tf +++ b/solutions/hpcaas/main.tf @@ -1,7 +1,8 @@ module "hpcaas" { source = "./../.." + scheduler = "HPCaaS" ibm_customer_number = var.ibm_customer_number - zones = [var.zone] + zones = var.zones allowed_cidr = var.allowed_cidr prefix = local.env.prefix ssh_keys = local.env.ssh_keys @@ -40,7 +41,6 @@ module "hpcaas" { vpn_peer_cidr = local.env.vpn_peer_cidr vpn_preshared_key = local.env.vpn_preshared_key - # scheduler = "HPCaaS" # compute_gui_password = local.env.compute_gui_password # compute_gui_username = local.env.compute_gui_username # hpcs_instance_name = local.env.hpcs_instance_name diff --git a/solutions/hpcaas/variables.tf b/solutions/hpcaas/variables.tf index c06fa667..ec411dfb 100644 --- a/solutions/hpcaas/variables.tf +++ b/solutions/hpcaas/variables.tf @@ -23,9 +23,10 @@ variable "ibmcloud_api_key" { ############################################################################## # Cluster Level Variables ############################################################################## -variable "zone" { - type = string - description = "Zone where VPC will be created." +variable "zones" { + type = list(string) + default = ["us-south-1", "us-south-2", "us-south-3"] + description = "Region where VPC will be created. To find your VPC region, use `ibmcloud is regions` command to find available regions." } variable "ssh_keys" { diff --git a/solutions/hpcaas/version.tf b/solutions/hpcaas/version.tf index d465bd59..93f82bed 100644 --- a/solutions/hpcaas/version.tf +++ b/solutions/hpcaas/version.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 1.3" + required_version = ">= 1.9.0" required_providers { ibm = { source = "IBM-Cloud/ibm" diff --git a/solutions/lsf/catalogValidationValues.json.template b/solutions/lsf/catalogValidationValues.json.template index 829063ab..2aea8d9e 100644 --- a/solutions/lsf/catalogValidationValues.json.template +++ b/solutions/lsf/catalogValidationValues.json.template @@ -1,7 +1,7 @@ { "ibmcloud_api_key": $VALIDATION_APIKEY, "prefix": $PREFIX, - "zone": "ca-tor-1", + "zones": "[\"ca-tor-1\"]", "existing_resource_group": "geretain-hpc-rg", "ssh_keys": "[\"geretain-hpc-ssh-key\"]" } diff --git a/solutions/lsf/locals.tf b/solutions/lsf/locals.tf index d0874f4a..495b27b0 100644 --- a/solutions/lsf/locals.tf +++ b/solutions/lsf/locals.tf @@ -1,10 +1,9 @@ # locals needed for ibm provider locals { # Region and Zone calculations - region = join("-", slice(split("-", var.zone), 0, 2)) + region = join("-", slice(split("-", var.zones[0]), 0, 2)) } - locals { override_json_path = abspath("./override.json") override = { @@ -17,7 +16,6 @@ locals { override_type = var.override_json_string == null ? "override" : "override_json_string" } - locals { config = { existing_resource_group = var.existing_resource_group @@ -80,19 +78,18 @@ locals { observability_monitoring_plan = var.observability_monitoring_plan scc_enable = var.scc_enable scc_profile = var.scc_profile - # scc_profile_version = var.scc_profile_version - scc_location = var.scc_location - scc_event_notification_plan = var.scc_event_notification_plan - skip_flowlogs_s2s_auth_policy = var.skip_flowlogs_s2s_auth_policy - skip_iam_authorization_policy = var.skip_iam_authorization_policy - skip_kms_s2s_auth_policy = var.skip_kms_s2s_auth_policy + scc_location = var.scc_location + scc_event_notification_plan = var.scc_event_notification_plan + skip_flowlogs_s2s_auth_policy = var.skip_flowlogs_s2s_auth_policy + skip_iam_authorization_policy = var.skip_iam_authorization_policy + skip_kms_s2s_auth_policy = var.skip_kms_s2s_auth_policy # New Variables ibmcloud_api_key = var.ibmcloud_api_key + # scc_profile_version = var.scc_profile_version } } - # Compile Environment for Config output locals { env = { @@ -156,13 +153,14 @@ locals { observability_monitoring_plan = lookup(local.override[local.override_type], "observability_monitoring_plan", local.config.observability_monitoring_plan) scc_enable = lookup(local.override[local.override_type], "scc_enable", local.config.scc_enable) scc_profile = lookup(local.override[local.override_type], "scc_profile", local.config.scc_profile) - # scc_profile_version = lookup(local.override[local.override_type], "scc_profile_version", local.config.scc_profile_version) - scc_location = lookup(local.override[local.override_type], "scc_location", local.config.scc_location) - scc_event_notification_plan = lookup(local.override[local.override_type], "scc_event_notification_plan", local.config.scc_event_notification_plan) - skip_flowlogs_s2s_auth_policy = lookup(local.override[local.override_type], "skip_flowlogs_s2s_auth_policy", local.config.skip_flowlogs_s2s_auth_policy) - skip_iam_authorization_policy = lookup(local.override[local.override_type], "skip_iam_authorization_policy", local.config.skip_iam_authorization_policy) - skip_kms_s2s_auth_policy = lookup(local.override[local.override_type], "skip_kms_s2s_auth_policy", local.config.skip_kms_s2s_auth_policy) + scc_location = lookup(local.override[local.override_type], "scc_location", local.config.scc_location) + scc_event_notification_plan = lookup(local.override[local.override_type], "scc_event_notification_plan", local.config.scc_event_notification_plan) + skip_flowlogs_s2s_auth_policy = lookup(local.override[local.override_type], "skip_flowlogs_s2s_auth_policy", local.config.skip_flowlogs_s2s_auth_policy) + skip_iam_authorization_policy = lookup(local.override[local.override_type], "skip_iam_authorization_policy", local.config.skip_iam_authorization_policy) + skip_kms_s2s_auth_policy = lookup(local.override[local.override_type], "skip_kms_s2s_auth_policy", local.config.skip_kms_s2s_auth_policy) # New Variables ibmcloud_api_key = lookup(local.override[local.override_type], "ibmcloud_api_key", local.config.ibmcloud_api_key) + # scc_profile_version = lookup(local.override[local.override_type], "scc_profile_version", local.config.scc_profile_version) + } } diff --git a/solutions/lsf/main.tf b/solutions/lsf/main.tf index be07657e..443a8023 100644 --- a/solutions/lsf/main.tf +++ b/solutions/lsf/main.tf @@ -1,7 +1,8 @@ module "lsf" { source = "./../.." + scheduler = "LSF" ibm_customer_number = var.ibm_customer_number - zones = [var.zone] + zones = var.zones allowed_cidr = var.allowed_cidr prefix = local.env.prefix ssh_keys = local.env.ssh_keys @@ -63,7 +64,6 @@ module "lsf" { # New Variables ibmcloud_api_key = local.env.ibmcloud_api_key - # scheduler = "LSF" # compute_gui_password = local.env.compute_gui_password # compute_gui_username = local.env.compute_gui_username # hpcs_instance_name = local.env.hpcs_instance_name @@ -72,5 +72,4 @@ module "lsf" { # storage_gui_username = local.env.storage_gui_username # observability_monitoring_on_compute_nodes_enable = local.env.observability_monitoring_on_compute_nodes_enable # scc_profile_version = local.env.scc_profile_version - } diff --git a/solutions/lsf/outputs.tf b/solutions/lsf/outputs.tf index cc61dcf9..67bc44e4 100644 --- a/solutions/lsf/outputs.tf +++ b/solutions/lsf/outputs.tf @@ -1,7 +1,6 @@ output "lsf" { description = "LSF details" value = module.lsf.file_storage - #sensitive = true } # output "ssh_to_compute" { diff --git a/solutions/lsf/variables.tf b/solutions/lsf/variables.tf index 5dbefad9..83287bae 100644 --- a/solutions/lsf/variables.tf +++ b/solutions/lsf/variables.tf @@ -23,9 +23,10 @@ variable "ibmcloud_api_key" { ############################################################################## # Cluster Level Variables ############################################################################## -variable "zone" { - type = string - description = "Zone where VPC will be created." +variable "zones" { + type = list(string) + default = ["us-south-1", "us-south-2", "us-south-3"] + description = "Region where VPC will be created. To find your VPC region, use `ibmcloud is regions` command to find available regions." } variable "ssh_keys" { @@ -148,7 +149,7 @@ variable "enable_deployer" { variable "deployer_image" { type = string - default = "ibm-redhat-8-10-minimal-amd64-2" + default = "jay-lsf-new-image" description = "The image to use to deploy the deployer host." } diff --git a/solutions/lsf/version.tf b/solutions/lsf/version.tf index d465bd59..93f82bed 100644 --- a/solutions/lsf/version.tf +++ b/solutions/lsf/version.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 1.3" + required_version = ">= 1.9.0" required_providers { ibm = { source = "IBM-Cloud/ibm" diff --git a/solutions/scale/catalogValidationValues.json.template b/solutions/scale/catalogValidationValues.json.template index 829063ab..2aea8d9e 100644 --- a/solutions/scale/catalogValidationValues.json.template +++ b/solutions/scale/catalogValidationValues.json.template @@ -1,7 +1,7 @@ { "ibmcloud_api_key": $VALIDATION_APIKEY, "prefix": $PREFIX, - "zone": "ca-tor-1", + "zones": "[\"ca-tor-1\"]", "existing_resource_group": "geretain-hpc-rg", "ssh_keys": "[\"geretain-hpc-ssh-key\"]" } diff --git a/solutions/scale/locals.tf b/solutions/scale/locals.tf index e20bee26..b9664d1b 100644 --- a/solutions/scale/locals.tf +++ b/solutions/scale/locals.tf @@ -1,10 +1,9 @@ # locals needed for ibm provider locals { # Region and Zone calculations - region = join("-", slice(split("-", var.zone), 0, 2)) + region = join("-", slice(split("-", var.zones[0]), 0, 2)) } - locals { override_json_path = abspath("./override.json") override = { @@ -17,7 +16,6 @@ locals { override_type = var.override_json_string == null ? "override" : "override_json_string" } - locals { config = { existing_resource_group = var.existing_resource_group diff --git a/solutions/scale/main.tf b/solutions/scale/main.tf index 9dbe1125..b3a8f20c 100644 --- a/solutions/scale/main.tf +++ b/solutions/scale/main.tf @@ -1,7 +1,8 @@ module "scale" { source = "./../.." + scheduler = null ibm_customer_number = var.ibm_customer_number - zones = [var.zone] + zones = var.zones allowed_cidr = var.allowed_cidr prefix = local.env.prefix ssh_keys = local.env.ssh_keys @@ -40,7 +41,6 @@ module "scale" { vpn_peer_cidr = local.env.vpn_peer_cidr vpn_preshared_key = local.env.vpn_preshared_key - # scheduler = null # compute_gui_password = local.env.compute_gui_password # compute_gui_username = local.env.compute_gui_username # hpcs_instance_name = local.env.hpcs_instance_name diff --git a/solutions/scale/variables.tf b/solutions/scale/variables.tf index 3abf0a17..0ea09079 100644 --- a/solutions/scale/variables.tf +++ b/solutions/scale/variables.tf @@ -23,9 +23,10 @@ variable "ibmcloud_api_key" { ############################################################################## # Cluster Level Variables ############################################################################## -variable "zone" { - type = string - description = "Zone where VPC will be created." +variable "zones" { + type = list(string) + default = ["us-south-1", "us-south-2", "us-south-3"] + description = "Region where VPC will be created. To find your VPC region, use `ibmcloud is regions` command to find available regions." } variable "ssh_keys" { @@ -148,7 +149,7 @@ variable "enable_deployer" { variable "deployer_image" { type = string - default = "ibm-redhat-8-10-minimal-amd64-2" + default = "jay-lsf-new-image" description = "The image to use to deploy the deployer host." } diff --git a/solutions/scale/version.tf b/solutions/scale/version.tf index d465bd59..93f82bed 100644 --- a/solutions/scale/version.tf +++ b/solutions/scale/version.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 1.3" + required_version = ">= 1.9.0" required_providers { ibm = { source = "IBM-Cloud/ibm" diff --git a/solutions/slurm/catalogValidationValues.json.template b/solutions/slurm/catalogValidationValues.json.template index 829063ab..2aea8d9e 100644 --- a/solutions/slurm/catalogValidationValues.json.template +++ b/solutions/slurm/catalogValidationValues.json.template @@ -1,7 +1,7 @@ { "ibmcloud_api_key": $VALIDATION_APIKEY, "prefix": $PREFIX, - "zone": "ca-tor-1", + "zones": "[\"ca-tor-1\"]", "existing_resource_group": "geretain-hpc-rg", "ssh_keys": "[\"geretain-hpc-ssh-key\"]" } diff --git a/solutions/slurm/locals.tf b/solutions/slurm/locals.tf index f86ca6b4..4d7d2766 100644 --- a/solutions/slurm/locals.tf +++ b/solutions/slurm/locals.tf @@ -1,10 +1,9 @@ # locals needed for ibm provider locals { # Region and Zone calculations - region = join("-", slice(split("-", var.zone), 0, 2)) + region = join("-", slice(split("-", var.zones[0]), 0, 2)) } - locals { override_json_path = abspath("./override.json") override = { @@ -17,7 +16,6 @@ locals { override_type = var.override_json_string == null ? "override" : "override_json_string" } - locals { config = { existing_resource_group = var.existing_resource_group @@ -66,7 +64,6 @@ locals { } } - # Compile Environment for Config output locals { env = { diff --git a/solutions/slurm/main.tf b/solutions/slurm/main.tf index 456242f0..a379e1b6 100644 --- a/solutions/slurm/main.tf +++ b/solutions/slurm/main.tf @@ -1,7 +1,8 @@ module "slurm" { source = "./../.." + scheduler = "Slurm" ibm_customer_number = var.ibm_customer_number - zones = [var.zone] + zones = var.zones allowed_cidr = var.allowed_cidr prefix = local.env.prefix ssh_keys = local.env.ssh_keys @@ -40,7 +41,6 @@ module "slurm" { vpn_peer_cidr = local.env.vpn_peer_cidr vpn_preshared_key = local.env.vpn_preshared_key - # scheduler = "slurm" # compute_gui_password = local.env.compute_gui_password # compute_gui_username = local.env.compute_gui_username # hpcs_instance_name = local.env.hpcs_instance_name diff --git a/solutions/slurm/variables.tf b/solutions/slurm/variables.tf index a7a040c5..a455c903 100644 --- a/solutions/slurm/variables.tf +++ b/solutions/slurm/variables.tf @@ -23,9 +23,10 @@ variable "ibmcloud_api_key" { ############################################################################## # Cluster Level Variables ############################################################################## -variable "zone" { - type = string - description = "Zone where VPC will be created." +variable "zones" { + type = list(string) + default = ["us-south-1", "us-south-2", "us-south-3"] + description = "Region where VPC will be created. To find your VPC region, use `ibmcloud is regions` command to find available regions." } variable "ssh_keys" { diff --git a/solutions/slurm/version.tf b/solutions/slurm/version.tf index d465bd59..93f82bed 100644 --- a/solutions/slurm/version.tf +++ b/solutions/slurm/version.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 1.3" + required_version = ">= 1.9.0" required_providers { ibm = { source = "IBM-Cloud/ibm" diff --git a/solutions/symphony/catalogValidationValues.json.template b/solutions/symphony/catalogValidationValues.json.template index 829063ab..2aea8d9e 100644 --- a/solutions/symphony/catalogValidationValues.json.template +++ b/solutions/symphony/catalogValidationValues.json.template @@ -1,7 +1,7 @@ { "ibmcloud_api_key": $VALIDATION_APIKEY, "prefix": $PREFIX, - "zone": "ca-tor-1", + "zones": "[\"ca-tor-1\"]", "existing_resource_group": "geretain-hpc-rg", "ssh_keys": "[\"geretain-hpc-ssh-key\"]" } diff --git a/solutions/symphony/locals.tf b/solutions/symphony/locals.tf index f86ca6b4..67d953d9 100644 --- a/solutions/symphony/locals.tf +++ b/solutions/symphony/locals.tf @@ -1,7 +1,7 @@ # locals needed for ibm provider locals { # Region and Zone calculations - region = join("-", slice(split("-", var.zone), 0, 2)) + region = join("-", slice(split("-", var.zones[0]), 0, 2)) } diff --git a/solutions/symphony/main.tf b/solutions/symphony/main.tf index a072ebc6..b93d7c11 100644 --- a/solutions/symphony/main.tf +++ b/solutions/symphony/main.tf @@ -1,7 +1,8 @@ module "symphony" { source = "./../.." + scheduler = "Symphony" ibm_customer_number = var.ibm_customer_number - zones = [var.zone] + zones = var.zones allowed_cidr = var.allowed_cidr prefix = local.env.prefix ssh_keys = local.env.ssh_keys @@ -40,7 +41,6 @@ module "symphony" { vpn_peer_cidr = local.env.vpn_peer_cidr vpn_preshared_key = local.env.vpn_preshared_key - # scheduler = "symphony" # compute_gui_password = local.env.compute_gui_password # compute_gui_username = local.env.compute_gui_username # hpcs_instance_name = local.env.hpcs_instance_name diff --git a/solutions/symphony/variables.tf b/solutions/symphony/variables.tf index a7a040c5..a455c903 100644 --- a/solutions/symphony/variables.tf +++ b/solutions/symphony/variables.tf @@ -23,9 +23,10 @@ variable "ibmcloud_api_key" { ############################################################################## # Cluster Level Variables ############################################################################## -variable "zone" { - type = string - description = "Zone where VPC will be created." +variable "zones" { + type = list(string) + default = ["us-south-1", "us-south-2", "us-south-3"] + description = "Region where VPC will be created. To find your VPC region, use `ibmcloud is regions` command to find available regions." } variable "ssh_keys" { diff --git a/solutions/symphony/version.tf b/solutions/symphony/version.tf index d465bd59..93f82bed 100644 --- a/solutions/symphony/version.tf +++ b/solutions/symphony/version.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 1.3" + required_version = ">= 1.9.0" required_providers { ibm = { source = "IBM-Cloud/ibm" diff --git a/tools/image-builder/locals.tf b/tools/image-builder/locals.tf index 5c9dbe7b..431fc4ab 100644 --- a/tools/image-builder/locals.tf +++ b/tools/image-builder/locals.tf @@ -8,8 +8,8 @@ locals { vpc_id = var.vpc_name == null ? module.landing_zone.vpc_data[0].vpc_id : data.ibm_is_vpc.existing_vpc[0].id # Resource group calculation # If user defined then use existing else create new - create_resource_group = var.resource_group == "null" ? true : false - resource_groups = var.resource_group == "null" ? [ + create_resource_group = var.existing_resource_group == "null" ? true : false + resource_groups = var.existing_resource_group == "null" ? [ { name = "${local.prefix}-service-rg", create = local.create_resource_group, @@ -22,12 +22,12 @@ locals { } ] : [ { - name = var.resource_group, + name = var.existing_resource_group, create = local.create_resource_group } ] # For the variables looking for resource group names only (transit_gateway, key_management, atracker) - resource_group = var.resource_group == "null" ? "${local.prefix}-service-rg" : var.resource_group + resource_group = var.existing_resource_group == "null" ? "${local.prefix}-service-rg" : var.existing_resource_group region = join("-", slice(split("-", var.zones[0]), 0, 2)) zones = ["zone-1", "zone-2", "zone-3"] active_zones = [ @@ -93,7 +93,7 @@ locals { } ] : null prefix = local.name - resource_group = var.resource_group == "null" ? "${local.prefix}-workload-rg" : var.resource_group + resource_group = var.existing_resource_group == "null" ? "${local.prefix}-workload-rg" : var.existing_resource_group clean_default_security_group = true clean_default_acl = true # flow_logs_bucket_name = var.enable_vpc_flow_logs ? "vpc-flow-logs-bucket" : null @@ -150,8 +150,8 @@ locals { packer_floating_ip = var.enable_fip ? local.packer_vsi_data[0]["floating_ip"] : null packer_resource_groups = { - service_rg = var.resource_group == "null" ? module.landing_zone.resource_group_data["${var.prefix}-service-rg"] : one(values(module.landing_zone.resource_group_data)) - workload_rg = var.resource_group == "null" ? module.landing_zone.resource_group_data["${var.prefix}-workload-rg"] : one(values(module.landing_zone.resource_group_data)) + service_rg = var.existing_resource_group == "null" ? module.landing_zone.resource_group_data["${var.prefix}-service-rg"] : one(values(module.landing_zone.resource_group_data)) + workload_rg = var.existing_resource_group == "null" ? module.landing_zone.resource_group_data["${var.prefix}-workload-rg"] : one(values(module.landing_zone.resource_group_data)) } vsi = [] @@ -163,7 +163,7 @@ locals { vpc_name = local.name subnet_name = (var.vpc_name != null && var.subnet_id != null) ? data.ibm_is_subnet.existing_subnet[0].name : "subnet" mode = "policy" - resource_group = var.resource_group == "null" ? "${local.prefix}-service-rg" : var.resource_group + resource_group = var.existing_resource_group == "null" ? "${local.prefix}-service-rg" : var.existing_resource_group } ] : [] diff --git a/tools/image-builder/main.tf b/tools/image-builder/main.tf index 65396615..3c1433d2 100644 --- a/tools/image-builder/main.tf +++ b/tools/image-builder/main.tf @@ -1,6 +1,6 @@ module "landing_zone" { source = "terraform-ibm-modules/landing-zone/ibm" - version = "5.27.0" + version = "7.3.1" prefix = local.prefix region = local.region tags = local.tags @@ -75,7 +75,7 @@ data "local_file" "encoded_compute_content" { module "packer_vsi" { source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "4.0.1" + version = "4.6.0" vsi_per_subnet = 1 image_id = local.packer_image_id machine_type = local.packer_machine_type diff --git a/tools/image-builder/template_files.tf b/tools/image-builder/template_files.tf index 30874ad7..c112055c 100644 --- a/tools/image-builder/template_files.tf +++ b/tools/image-builder/template_files.tf @@ -13,11 +13,11 @@ data "template_file" "packer_user_data" { encoded_compute = data.local_file.encoded_compute_content.content target_dir = "/var" prefix = var.prefix - cluster_id = var.cluster_id + cluster_name = var.cluster_name reservation_id = var.reservation_id catalog_validate_ssh_key = var.ssh_keys[0] zones = join(",", var.zones) - resource_group = var.resource_group + existing_resource_group = var.existing_resource_group private_catalog_id = var.private_catalog_id } } diff --git a/tools/image-builder/templates/packer_user_data.tpl b/tools/image-builder/templates/packer_user_data.tpl index 9ed0385a..cd0e8bd0 100644 --- a/tools/image-builder/templates/packer_user_data.tpl +++ b/tools/image-builder/templates/packer_user_data.tpl @@ -109,7 +109,7 @@ curl -fsSL https://clis.cloud.ibm.com/install/linux | sh ibmcloud plugin install infrastructure-service ibmcloud login --apikey ${ibm_api_key} -r ${vpc_region} echo "========== Uploading SSH key to IBM cloud =========" -ibmcloud is key-create $CICD_SSH_KEY @/HPCaaS/artifacts/.ssh/id_rsa.pub --resource-group-name ${resource_group} +ibmcloud is key-create $CICD_SSH_KEY @/HPCaaS/artifacts/.ssh/id_rsa.pub --resource-group-name ${existing_resource_group} cd /HPCaaS/terraform-ibm-hpc/tools/tests git submodule update --init @@ -130,9 +130,9 @@ echo "========== Executing Go function to validate the image through HPC deploym export TF_VAR_ibmcloud_api_key=${ibm_api_key} if [ "${private_catalog_id}" ]; then - PREFIX=${prefix} CLUSTER_ID=${cluster_id} RESERVATION_ID=${reservation_id} SSH_FILE_PATH="/HPCaaS/artifacts/.ssh/id_rsa" REMOTE_ALLOWED_IPS=$PACKER_FIP SSH_KEYS=$CICD_SSH_KEY CATALOG_VALIDATE_SSH_KEY=${catalog_validate_ssh_key} ZONES=${zones} RESOURCE_GROUP=${resource_group} COMPUTE_IMAGE_NAME=${image_name} PRIVATE_CATALOG_ID=${private_catalog_id} VPC_ID=${vpc_id} SUBNET_ID=${vpc_subnet_id} SOURCE_IMAGE_NAME=${source_image_name} go test -v -timeout 900m -parallel 4 -run "TestRunHpcDeploymentForCustomImageBuilder" | tee hpc_log_$(date +%d-%m-%Y-%H-%M-%S).log + PREFIX=${prefix} CLUSTER_NAME=${cluster_name} RESERVATION_ID=${reservation_id} SSH_FILE_PATH="/HPCaaS/artifacts/.ssh/id_rsa" REMOTE_ALLOWED_IPS=$PACKER_FIP SSH_KEYS=$CICD_SSH_KEY CATALOG_VALIDATE_SSH_KEY=${catalog_validate_ssh_key} ZONES=${zones} EXISTING_RESOURCE_GROUP=${existing_resource_group} COMPUTE_IMAGE_NAME=${image_name} PRIVATE_CATALOG_ID=${private_catalog_id} VPC_ID=${vpc_id} SUBNET_ID=${vpc_subnet_id} SOURCE_IMAGE_NAME=${source_image_name} go test -v -timeout 900m -parallel 4 -run "TestRunHpcDeploymentForCustomImageBuilder" | tee hpc_log_$(date +%d-%m-%Y-%H-%M-%S).log else - PREFIX=${prefix} CLUSTER_ID=${cluster_id} RESERVATION_ID=${reservation_id} SSH_FILE_PATH="/HPCaaS/artifacts/.ssh/id_rsa" REMOTE_ALLOWED_IPS=$PACKER_FIP SSH_KEYS=$CICD_SSH_KEY ZONES=${zones} RESOURCE_GROUP=${resource_group} COMPUTE_IMAGE_NAME=${image_name} SOURCE_IMAGE_NAME=${source_image_name} go test -v -timeout 900m -parallel 4 -run "TestRunHpcDeploymentForCustomImageBuilder" | tee hpc_log_$(date +%d-%m-%Y-%H-%M-%S).log + PREFIX=${prefix} CLUSTER_NAME=${cluster_name} RESERVATION_ID=${reservation_id} SSH_FILE_PATH="/HPCaaS/artifacts/.ssh/id_rsa" REMOTE_ALLOWED_IPS=$PACKER_FIP SSH_KEYS=$CICD_SSH_KEY ZONES=${zones} EXISTING_RESOURCE_GROUP=${existing_resource_group} COMPUTE_IMAGE_NAME=${image_name} SOURCE_IMAGE_NAME=${source_image_name} go test -v -timeout 900m -parallel 4 -run "TestRunHpcDeploymentForCustomImageBuilder" | tee hpc_log_$(date +%d-%m-%Y-%H-%M-%S).log fi echo "========== Deleting the SSH key =========" diff --git a/tools/image-builder/variables.tf b/tools/image-builder/variables.tf index b4a63fe5..e044ede0 100644 --- a/tools/image-builder/variables.tf +++ b/tools/image-builder/variables.tf @@ -16,7 +16,7 @@ variable "ibmcloud_api_key" { # Resource Groups Variables ############################################################################## -variable "resource_group" { +variable "existing_resource_group" { description = "Specify the existing resource group name from your IBM Cloud account where the VPC resources should be deployed. By default, the resource group name is set to 'Default.' Note that in some older accounts, the resource group name may be 'default,' so please validate the resource_group name before deployment. If the resource group value is set to the string \"null\", the automation will create two different resource groups named 'workload-rg' and 'service-rg.' For more information on resource groups, refer to Managing resource groups." type = string default = "Default" @@ -188,11 +188,11 @@ variable "enable_fip" { } # tflint-ignore: terraform_unused_declarations -variable "cluster_id" { +variable "cluster_name" { type = string description = "Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservations. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment." validation { - condition = 0 < length(var.cluster_id) && length(var.cluster_id) < 40 && can(regex("^[a-zA-Z0-9_.-]+$", var.cluster_id)) + condition = 0 < length(var.cluster_name) && length(var.cluster_name) < 40 && can(regex("^[a-zA-Z0-9_.-]+$", var.cluster_name)) error_message = "The Cluster ID can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. Other special characters and spaces are not allowed." } } diff --git a/tools/image-builder/version.tf b/tools/image-builder/version.tf index c02c9667..0fa51187 100644 --- a/tools/image-builder/version.tf +++ b/tools/image-builder/version.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 1.3" + required_version = ">= 1.9.0" required_providers { ibm = { source = "IBM-Cloud/ibm" diff --git a/variables.tf b/variables.tf index 1dae47f3..2b1de5c2 100644 --- a/variables.tf +++ b/variables.tf @@ -11,11 +11,11 @@ variable "ibmcloud_api_key" { ############################################################################## # Offering Variations ############################################################################## -# variable "scheduler" { -# type = string -# default = null -# description = "Select one of the scheduler (LSF/Symphony/Slurm/null)" -# } +variable "scheduler" { + type = string + default = null + description = "Select one of the scheduler (LSF/Symphony/Slurm/null)" +} variable "ibm_customer_number" { type = string @@ -167,7 +167,6 @@ variable "deployer_instance_profile" { description = "Deployer should be only used for better deployment performance" } - ############################################################################## # Compute Variables ############################################################################## @@ -632,100 +631,6 @@ variable "enable_vpc_flow_logs" { # Slurm specific Variables ############################################################################## -############################################################################## -# Landing Zone Variables -############################################################################## -# variable "clusters" { -# default = null -# description = "A list describing clusters workloads to create" -# type = list( -# object({ -# name = string # Name of Cluster -# vpc_name = string # Name of VPC -# subnet_names = list(string) # List of vpc subnets for cluster -# workers_per_subnet = number # Worker nodes per subnet. -# machine_type = string # Worker node flavor -# kube_type = string # iks or openshift -# kube_version = optional(string) # Can be a version from `ibmcloud ks versions` or `default` -# entitlement = optional(string) # entitlement option for openshift -# secondary_storage = optional(string) # Secondary storage type -# pod_subnet = optional(string) # Portable subnet for pods -# service_subnet = optional(string) # Portable subnet for services -# existing_resource_group = string # Resource Group used for cluster -# cos_name = optional(string) # Name of COS instance Required only for OpenShift clusters -# access_tags = optional(list(string), []) -# boot_volume_crk_name = optional(string) # Boot volume encryption key name -# disable_public_endpoint = optional(bool, true) # disable cluster public, leaving only private endpoint -# disable_outbound_traffic_protection = optional(bool, false) # public outbound access from the cluster workers -# cluster_force_delete_storage = optional(bool, false) # force the removal of persistent storage associated with the cluster during cluster deletion -# operating_system = string # The operating system of the workers in the default worker pool. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available . -# kms_wait_for_apply = optional(bool, true) # make terraform wait until KMS is applied to master and it is ready and deployed -# verify_cluster_network_readiness = optional(bool, true) # Flag to run a script will run kubectl commands to verify that all worker nodes can communicate successfully with the master. If the runtime does not have access to the kube cluster to run kubectl commands, this should be set to false. -# use_ibm_cloud_private_api_endpoints = optional(bool, true) # Flag to force all cluster related api calls to use the IBM Cloud private endpoints. -# import_default_worker_pool_on_create = optional(bool) # (Advanced users) Whether to handle the default worker pool as a stand-alone ibm_container_vpc_worker_pool resource on cluster creation. Only set to false if you understand the implications of managing the default worker pool as part of the cluster resource. Set to true to import the default worker pool as a separate resource. Set to false to manage the default worker pool as part of the cluster resource. -# allow_default_worker_pool_replacement = optional(bool) # (Advanced users) Set to true to allow the module to recreate a default worker pool. Only use in the case where you are getting an error indicating that the default worker pool cannot be replaced on apply. Once the default worker pool is handled as a stand-alone ibm_container_vpc_worker_pool, if you wish to make any change to the default worker pool which requires the re-creation of the default pool set this variable to true -# labels = optional(map(string)) # A list of labels that you want to add to the default worker pool. -# addons = optional(object({ # Map of OCP cluster add-on versions to install -# debug-tool = optional(string) -# image-key-synchronizer = optional(string) -# openshift-data-foundation = optional(string) -# vpc-file-csi-driver = optional(string) -# static-route = optional(string) -# cluster-autoscaler = optional(string) -# vpc-block-csi-driver = optional(string) -# ibm-storage-operator = optional(string) -# }), {}) -# manage_all_addons = optional(bool, false) # Instructs Terraform to manage all cluster addons, even if addons were installed outside of the module. If set to 'true' this module will destroy any addons that were installed by other sources. -# kms_config = optional( -# object({ -# crk_name = string # Name of key -# private_endpoint = optional(bool) # Private endpoint -# }) -# ) -# worker_pools = optional( -# list( -# object({ -# name = string # Worker pool name -# vpc_name = string # VPC name -# workers_per_subnet = number # Worker nodes per subnet -# flavor = string # Worker node flavor -# subnet_names = list(string) # List of vpc subnets for worker pool -# entitlement = optional(string) # entitlement option for openshift -# secondary_storage = optional(string) # Secondary storage type -# boot_volume_crk_name = optional(string) # Boot volume encryption key name -# operating_system = string # The operating system of the workers in the worker pool. See https://cloud.ibm.com/docs/openshift?topic=openshift-openshift_versions#openshift_versions_available . -# labels = optional(map(string)) # A list of labels that you want to add to all the worker nodes in the worker pool. -# }) -# ) -# ) -# }) -# ) -# } - -############################################################################## -# Terraform generic Variables -############################################################################## -# # tflint-ignore: all -# variable "TF_PARALLELISM" { -# type = string -# default = "250" -# description = "Limit the number of concurrent operation." -# } - -# # tflint-ignore: all -# variable "TF_VERSION" { -# type = string -# default = "1.9" -# description = "The version of the Terraform engine that's used in the Schematics workspace." -# } - -# # tflint-ignore: all -# variable "TF_LOG" { -# type = string -# default = "ERROR" -# description = "The Terraform log level used for output in the Schematics workspace." -# } - ############################################################################## # Observability Variables ############################################################################## @@ -858,32 +763,6 @@ variable "enable_landing_zone" { description = "Run landing zone module." } -# variable "vpc_id" { -# type = string -# default = null -# description = "ID of an existing VPC in which the cluster resources will be deployed." -# } - -# variable "bastion_fip" { -# type = string -# default = null -# description = "bastion fip" -# } - -# variable "compute_public_key_content" { -# type = string -# sensitive = true -# default = null -# description = "Compute security key content." -# } - -# variable "compute_private_key_content" { -# type = string -# sensitive = true -# default = null -# description = "Compute security key content." -# } - variable "enable_atracker" { type = bool default = false diff --git a/version.tf b/version.tf index 5c31d39d..7615eacf 100644 --- a/version.tf +++ b/version.tf @@ -3,25 +3,16 @@ ############################################################################## terraform { - required_version = ">= 1.3" - # Use "greater than or equal to" range for root level modules + required_version = ">= 1.9.0" required_providers { ibm = { source = "IBM-Cloud/ibm" version = ">= 1.68.1, < 2.0.0" } - # random = { - # source = "hashicorp/random" - # version = ">= 3.4.3, < 4.0.0" - # } time = { source = "hashicorp/time" version = ">= 0.9.1, < 1.0.0" } - # ansible = { - # source = "ansible/ansible" - # version = "~> 1.3.0" - # } } }