diff --git a/.catalog-onboard-pipeline.yaml b/.catalog-onboard-pipeline.yaml index 78ac1103..4c9003c4 100644 --- a/.catalog-onboard-pipeline.yaml +++ b/.catalog-onboard-pipeline.yaml @@ -22,23 +22,3 @@ offerings: region: us-south # pre_validation: tests/scripts/pre-validation.sh # optionally run a command before validation runs # post_validation: tests/scripts/post-validation.sh # optionally run a command after validation completes -- name: deploy-arch-ibm-storage-scale # must match the offering name in the ibm_catalog.json - kind: solution - catalog_id: 90717ada-be34-4b82-a0d9-0f225f8dbd76 - offering_id: 33105573-84df-4279-9efa-48887456fa6d - # list all of the variations (flavors) you have included in the ibm_catalog.json - variations: - - name: Cluster-with-Scale - mark_ready: false # have pipeline mark as visible if validation passes - install_type: fullstack # ensure value matches what is in ibm_catalog.json (fullstack or extension) - destroy_resources_on_failure: false # defaults to false if not specified so resources can be inspected to debug failures during validation - destroy_workspace_on_failure: false # defaults to false if not specified so schematics workspace can be inspected to debug failures during validation - import_only: false # defaults to false - set to true if you do not want to do any validation, but be aware offering can't be publish if not validated - validation_rg: validation # the resource group in which to do validation in. Will be created if does not exist. If not specified, default value is 'validation' - # scc details needed if your offering is claiming any compliance controls - scc: - # must be an instance in the same account the validation is being done in - instance_id: 1c7d5f78-9262-44c3-b779-b28fe4d88c37 - region: us-south - # pre_validation: tests/scripts/pre-validation.sh # optionally run a command before validation runs - # post_validation: tests/scripts/post-validation.sh # optionally run a command after validation completes diff --git a/.secrets.baseline b/.secrets.baseline index feeef2d3..d5df7ebe 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2025-11-07T08:08:33Z", + "generated_at": "2025-10-29T15:24:27Z", "plugins_used": [ { "name": "AWSKeyDetector" diff --git a/ci 2 b/ci 2 new file mode 120000 index 00000000..f9d799a5 --- /dev/null +++ b/ci 2 @@ -0,0 +1 @@ +common-dev-assets/module-assets/ci \ No newline at end of file diff --git a/ibm_catalog.json b/ibm_catalog.json index 0a36d3df..8f1f4f5f 100644 --- a/ibm_catalog.json +++ b/ibm_catalog.json @@ -66,6 +66,10 @@ "key": "existing_resource_group", "required": true }, + { + "key": "lsf_pay_per_use", + "required": true + }, { "key": "ssh_keys", "required": true @@ -669,7 +673,7 @@ "service_name": "dns-svcs" }, { - "service_name": "kms", + "service_name": "Key Protect", "role_crns": [ "crn:v1:bluemix:public:iam::::serviceRole:Manager", "crn:v1:bluemix:public:iam::::role:ConfigReader" @@ -720,6 +724,12 @@ "crn:v1:bluemix:public:iam::::serviceRole:Manager", "crn:v1:bluemix:public:iam::::role:Administrator" ] + }, + { + "service_name": "All Account Management services", + "role_crns": [ + "crn:v1:bluemix:public:iam::::role:Administrator" + ] } ], "architecture": { diff --git a/main.tf b/main.tf index 64e9aa9e..7514f331 100644 --- a/main.tf +++ b/main.tf @@ -134,6 +134,7 @@ module "landing_zone_vsi" { client_security_group_name = var.client_security_group_name gklm_security_group_name = var.gklm_security_group_name ldap_security_group_name = var.ldap_security_group_name + lsf_pay_per_use = var.lsf_pay_per_use } module "prepare_tf_input" { @@ -237,6 +238,7 @@ module "prepare_tf_input" { bms_boot_drive_encryption = var.bms_boot_drive_encryption scale_afm_bucket_config_details = local.scale_afm_bucket_config_details scale_afm_cos_hmac_key_params = local.scale_afm_cos_hmac_key_params + lsf_pay_per_use = var.lsf_pay_per_use depends_on = [module.deployer] } @@ -378,6 +380,7 @@ module "write_compute_cluster_inventory" { compute_subnet_crn = local.compute_subnet_crn kms_encryption_enabled = local.kms_encryption_enabled boot_volume_encryption_key = var.boot_volume_encryption_key + lsf_pay_per_use = var.lsf_pay_per_use depends_on = [time_sleep.wait_for_vsi_syncup, module.landing_zone_vsi] } diff --git a/modules/ansible-roles/roles/lsf_mgmt_config/templates/ibmcloudgen2_templates.json.j2 b/modules/ansible-roles/roles/lsf_mgmt_config/templates/ibmcloudgen2_templates.json.j2 index 358f5b11..f710f299 100644 --- a/modules/ansible-roles/roles/lsf_mgmt_config/templates/ibmcloudgen2_templates.json.j2 +++ b/modules/ansible-roles/roles/lsf_mgmt_config/templates/ibmcloudgen2_templates.json.j2 @@ -1,25 +1,33 @@ { - "templates": [ - { - "templateId": "Template-1", - "maxNumber": "{{ rc_max_num }}", - "attributes": { - "type": ["String", "X86_64"], - "ncores": ["Numeric", "{{ rc_ncores }}"], - "ncpus": ["Numeric", "{{ rc_ncpus }}"], - "mem": ["Numeric", "{{ rc_mem_in_mb }}"], - "icgen2host": ["Boolean", "1"] - }, - "crn": "{{ boot_volume_encryption_key }}", - "imageId": "{{ image_id }}", - "subnetId": "{{ compute_subnet_crn }}", - "vpcId": "{{ vpc_id }}", - "vmType": "{{ rc_profile }}", - "securityGroupIds": ["{{ compute_security_group_id | first }}"], - "resourceGroupId": "{{ resource_group_id }}", - "sshkey_id": "{{ compute_ssh_keys_ids | first }}", - "region": "{{ region_name }}", - "zone": "{{ zone_name | first }}" - } - ] + "templates": [ + { + "templateId": "Template-1", + "maxNumber": "{{ rc_max_num }}", + "attributes": { + "type": ["String", "X86_64"], + "ncores": ["Numeric", "{{ rc_ncores }}"], + "ncpus": ["Numeric", "{{ rc_ncpus }}"], + "mem": ["Numeric", "{{ rc_mem_in_mb }}"], + "icgen2host": ["Boolean", "1"] + }, + "volumeProfile": "general-purpose", + "encryptionKey": "{{ boot_volume_encryption_key }}", +{% if lsf_pay_per_use %} + "catalogOffering": { + "version_crn": "{{ catalog_offering.version_crn }}", + "plan_crn": "{{ catalog_offering.plan_crn }}" + }, +{% else %} + "imageId": "{{ image_id }}", +{% endif %} + "subnetId": "{{ compute_subnet_crn }}", + "vpcId": "{{ vpc_id }}", + "vmType": "{{ rc_profile }}", + "securityGroupIds": ["{{ compute_security_group_id | first }}"], + "resourceGroupId": "{{ resource_group_id }}", + "sshkeyIds": {{ compute_ssh_keys_ids | to_json }}, + "region": "{{ region_name }}", + "zone": "{{ zone_name | first }}" + } + ] } diff --git a/modules/deployer/image_map.tf b/modules/deployer/image_map.tf index d4d29018..727f681a 100644 --- a/modules/deployer/image_map.tf +++ b/modules/deployer/image_map.tf @@ -1,17 +1,17 @@ locals { image_region_map = { - "hpc-lsf-fp15-deployer-rhel810-v1" = { - "eu-es" = "r050-e7b874c1-f370-41c4-8ee6-50efb07aa340" - "eu-gb" = "r018-eb14c522-cb0f-4b72-948f-2c029957665a" - "eu-de" = "r010-00629ef3-324c-4651-a7a7-76830d2ad660" - "us-east" = "r014-ac586488-de00-490e-8962-5e2a7fcab076" - "us-south" = "r006-f2b7871c-54c9-4b02-837c-1d28294f0842" - "jp-tok" = "r022-dd715ea3-d2dc-4936-bff0-51c9cd63b3a9" - "jp-osa" = "r034-82d648ed-fd3e-4248-955c-6009c973aa5f" - "au-syd" = "r026-b47e4863-f5e7-440c-8734-c058f6b8ce33" - "br-sao" = "r042-8b5ac031-3e65-4afb-9679-b7e2b907a2ad" - "ca-tor" = "r038-c55b1ab4-500f-4842-9e78-dc64a16a746a" - "ca-mon" = "r058-fc93c3f9-f97c-4d9b-b8d6-dd40db891913" + "hpc-lsf-fp15-deployer-rhel810-v2" = { + "eu-es" = "r050-92ce36d5-20c3-4c7d-8d38-a5d16e553d6a" + "eu-gb" = "r018-da0c8368-8b87-428b-b63e-1d4acd3764f0" + "eu-de" = "r010-4625705f-a2af-46fc-9694-864fb8a4b02f" + "us-east" = "r014-79497f2e-d71c-4390-93d6-8ccdb19f594a" + "us-south" = "r006-390958ed-07cc-4489-98d3-8e3350174cf9" + "jp-tok" = "r022-f4d7ac54-1707-42e7-acf9-a74f46b4aa66" + "jp-osa" = "r034-e5363d7e-f4e3-4fdc-a642-3a89c1c86b6b" + "au-syd" = "r026-a52653e5-2d96-4470-bb28-5903d562099b" + "br-sao" = "r042-61f7226d-5ebc-40fa-ae81-1554344b9640" + "ca-tor" = "r038-4ccdcbb5-5d1a-4a86-9fdf-847f57dd1588" + "ca-mon" = "r058-9a37637f-07a1-4048-9a6c-6ab75441f74a" }, "hpc-lsf-fp14-deployer-rhel810-v1" = { "eu-es" = "r050-a530edc3-d053-41cd-899b-2c61d53d5efd" diff --git a/modules/landing_zone_vsi/image_map.tf b/modules/landing_zone_vsi/image_map.tf index 4ee680d8..24b43b54 100644 --- a/modules/landing_zone_vsi/image_map.tf +++ b/modules/landing_zone_vsi/image_map.tf @@ -1,28 +1,30 @@ locals { image_region_map = { - "hpc-lsf-fp15-rhel810-v1" = { - "eu-es" = "r050-deeeb734-2523-4aff-96e3-2be8d2b0d634" - "eu-gb" = "r018-8edcd9a1-dbca-462f-bf74-017c15ca4b71" - "eu-de" = "r010-394c5295-1704-4066-b57e-ae9bca1968de" - "us-east" = "r014-1777cdcb-8a68-4ef0-becf-84ec0d2e9a26" - "us-south" = "r006-40caf671-28a8-42c5-b83e-b2ba3ceb86af" - "jp-tok" = "r022-01531301-d100-44ba-b1a3-12e7c8d65469" - "jp-osa" = "r034-ac455775-c667-4d3e-b281-9ef845080599" - "au-syd" = "r026-eff4d59c-5006-46cc-8b03-60514f763a87" - "br-sao" = "r042-1e1bbeeb-3ef7-4f7a-a44c-9f50609bb538" - "ca-tor" = "r038-bb9fcdb7-d200-4cdd-af04-6848007c9cb2" + "hpc-lsf-fp15-rhel810-v2" = { + "eu-es" = "r050-bb9be81c-7026-4b53-9768-b46fe6ff35af" + "eu-gb" = "r018-d85fbab9-5573-4a25-8cd9-b584e0266ed3" + "eu-de" = "r010-b5259da3-11f9-434d-87f9-0eed1030f593" + "us-east" = "r014-1dffabd0-bb20-4c97-b73a-3a745ccfa53d" + "us-south" = "r006-829c9fbc-ecb6-4f3d-be37-1f652d26ec58" + "jp-tok" = "r022-1c956e0e-17e0-4ce3-833b-d79173d68fe0" + "jp-osa" = "r034-7a3733eb-c2eb-4e8e-8b10-4b5bc97331c3" + "au-syd" = "r026-4d4d012d-a023-4a32-9a58-fe3b0903be7a" + "br-sao" = "r042-7d242646-c928-4eae-8176-b6a4c6aad06b" + "ca-tor" = "r038-023f8697-5b44-469e-a021-6898b46ea0a5" + "ca-mon" = "r058-e952898e-71cf-4921-8e3c-1e2b00382f07" }, - "hpc-lsf-fp15-compute-rhel810-v1" = { - "eu-es" = "r050-f0608e39-9dcf-4aca-9e92-7719474b3e86" - "eu-gb" = "r018-db8b97a8-6f87-4cf7-a044-847da6ab5c59" - "eu-de" = "r010-957efd6b-e7b3-4249-8644-6184f1531915" - "us-east" = "r014-5fdd6a25-5943-4084-9c57-b900a80579a3" - "us-south" = "r006-5c0e462a-679c-4a18-81a5-0fe036f483a3" - "jp-tok" = "r022-8087a984-8912-42ff-9576-c5cab8edda3a" - "jp-osa" = "r034-728d1f12-7842-412c-97a0-9deb66c23962" - "au-syd" = "r026-f957ed22-9565-441c-bce6-f716360e02ea" - "br-sao" = "r042-7bf7d508-a7b1-4434-ae6a-6986f7042d4e" - "ca-tor" = "r038-a658da44-f1b4-4e02-826a-38b16e6ae98a" + "hpc-lsf-fp15-compute-rhel810-v2" = { + "eu-es" = "r050-91d88518-bc52-42f4-a794-f64e9d0e9fac" + "eu-gb" = "r018-923c06c7-f077-44b7-9ed2-7d9817d9df26" + "eu-de" = "r010-2dd07456-e9ad-4b39-a131-ad786fb1f725" + "us-east" = "r014-f464db9b-5951-48ab-908d-8d36614ac086" + "us-south" = "r006-cb59a6b6-7a58-489b-905c-47ca13f2e60b" + "jp-tok" = "r022-1026bca9-163d-4852-a071-7481ebc19255" + "jp-osa" = "r034-5551a235-92eb-4316-98e3-5b100b7563c8" + "au-syd" = "r026-30a9c1d9-1803-4cf2-9175-bab4f7866f77" + "br-sao" = "r042-1f4b2fa5-eb39-472c-acd9-96cba25d46ab" + "ca-tor" = "r038-bebb2cdc-530a-4d37-ada7-f8f0fbb17a5f" + "ca-mon" = "r058-12b6c1f4-1377-478d-ba39-bd4b38a94e8b" }, "hpc-lsf-fp14-rhel810-v1" = { "eu-es" = "r050-12a3533c-5fa1-4bcc-8765-7150a06e122e" diff --git a/modules/landing_zone_vsi/locals.tf b/modules/landing_zone_vsi/locals.tf index bd9888d7..cb340dfc 100644 --- a/modules/landing_zone_vsi/locals.tf +++ b/modules/landing_zone_vsi/locals.tf @@ -482,3 +482,10 @@ locals { ldap_security_group_name_id = var.ldap_security_group_name != null ? data.ibm_is_security_group.ldap_security_group[*].id : [] compute_security_group_name_id = var.compute_security_group_name != null ? data.ibm_is_security_group.compute_security_group[*].id : [] } + +locals { + catalog_offering = { + version_crn = "crn:v1:bluemix:public:globalcatalog-collection:global:a/77efe1030c00b5c89cfd08648d3480bf:0d89ec0d-d39a-494d-ac5b-9d940d8cc65f:version:61e655c5-40b6-4b68-a6ab-e6c77a457fce/e08b9ca5-699c-4779-8369-1a0c1ed54b30" + plan_crn = "crn:v1:bluemix:public:globalcatalog-collection:global:a/77efe1030c00b5c89cfd08648d3480bf:0d89ec0d-d39a-494d-ac5b-9d940d8cc65f:plan:sw.0d89ec0d-d39a-494d-ac5b-9d940d8cc65f.d114e7ab-4f7e-40c4-98cc-f0c000cbf3a7" + } +} diff --git a/modules/landing_zone_vsi/main.tf b/modules/landing_zone_vsi/main.tf index 1e867ad2..452fe537 100644 --- a/modules/landing_zone_vsi/main.tf +++ b/modules/landing_zone_vsi/main.tf @@ -136,7 +136,7 @@ module "storage_sg" { module "login_vsi" { count = var.scheduler == "LSF" ? 1 : 0 source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "5.4.6" + version = "5.11.0" vsi_per_subnet = 1 create_security_group = false security_group = null @@ -159,13 +159,15 @@ module "login_vsi" { } module "management_vsi" { - count = length(var.management_instances) - source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "5.4.16" - vsi_per_subnet = var.management_instances[count.index]["count"] - create_security_group = false - security_group = null - image_id = local.image_mapping_entry_found ? local.new_image_id : data.ibm_is_image.management_stock_image[0].id + count = length(var.management_instances) + source = "terraform-ibm-modules/landing-zone-vsi/ibm" + version = "5.11.0" + vsi_per_subnet = var.management_instances[count.index]["count"] + create_security_group = false + security_group = null + catalog_offering = var.lsf_pay_per_use ? local.catalog_offering : null + image_id = var.lsf_pay_per_use ? null : (local.image_mapping_entry_found ? local.new_image_id : data.ibm_is_image.management_stock_image[0].id) + #image_id = local.image_mapping_entry_found ? local.new_image_id : data.ibm_is_image.management_stock_image[0].id machine_type = var.management_instances[count.index]["profile"] prefix = format("%s-%s", local.management_node_name, count.index + 1) resource_group_id = var.resource_group @@ -183,13 +185,15 @@ module "management_vsi" { } module "compute_vsi" { - count = length(var.static_compute_instances) - source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "5.4.16" - vsi_per_subnet = var.static_compute_instances[count.index]["count"] - create_security_group = false - security_group = null - image_id = var.scheduler == "LSF" ? (local.compute_image_found_in_map ? local.new_compute_image_id : data.ibm_is_image.compute_stock_image[0].id) : (var.storage_type != "evaluation" ? (local.scale_compute_image_found_in_map ? local.scale_compute_image_id : data.ibm_is_image.scale_compute_stock_image[0].id) : local.evaluation_image_id) + count = length(var.static_compute_instances) + source = "terraform-ibm-modules/landing-zone-vsi/ibm" + version = "5.11.0" + vsi_per_subnet = var.static_compute_instances[count.index]["count"] + create_security_group = false + security_group = null + catalog_offering = var.lsf_pay_per_use ? local.catalog_offering : null + image_id = var.lsf_pay_per_use ? null : (var.scheduler == "LSF" ? (local.compute_image_found_in_map ? local.new_compute_image_id : data.ibm_is_image.compute_stock_image[0].id) : (var.storage_type != "evaluation" ? (local.scale_compute_image_found_in_map ? local.scale_compute_image_id : data.ibm_is_image.scale_compute_stock_image[0].id) : local.evaluation_image_id)) + #image_id = var.scheduler == "LSF" ? (local.compute_image_found_in_map ? local.new_compute_image_id : data.ibm_is_image.compute_stock_image[0].id) : (var.storage_type != "evaluation" ? (local.scale_compute_image_found_in_map ? local.scale_compute_image_id : data.ibm_is_image.scale_compute_stock_image[0].id) : local.evaluation_image_id) machine_type = var.static_compute_instances[count.index]["profile"] prefix = format("%s-%s", local.compute_node_name, count.index + 1) resource_group_id = var.resource_group @@ -215,7 +219,7 @@ module "compute_vsi" { module "compute_cluster_management_vsi" { count = var.scheduler == "Scale" && local.enable_compute ? 1 : 0 source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "5.4.16" + version = "5.11.0" vsi_per_subnet = 1 create_security_group = false security_group = null @@ -242,7 +246,7 @@ module "compute_cluster_management_vsi" { module "storage_vsi" { count = var.scheduler == "Scale" ? (length(var.storage_instances) > 0 && var.storage_type != "persistent" ? 1 : 0) : 0 source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "5.4.16" + version = "5.11.0" vsi_per_subnet = var.storage_instances[count.index]["count"] create_security_group = false security_group = null @@ -273,7 +277,7 @@ module "storage_vsi" { module "storage_cluster_management_vsi" { count = var.scheduler == "Scale" ? length(var.storage_instances) : 0 source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "5.4.16" + version = "5.11.0" vsi_per_subnet = 1 create_security_group = false security_group = null @@ -301,7 +305,7 @@ module "storage_cluster_management_vsi" { module "storage_cluster_tie_breaker_vsi" { count = var.scheduler == "Scale" ? (var.storage_type != "persistent" ? 1 : 0) : 0 source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "5.4.16" + version = "5.11.0" vsi_per_subnet = 1 create_security_group = false security_group = null @@ -330,7 +334,7 @@ module "storage_cluster_tie_breaker_vsi" { module "client_vsi" { count = var.scheduler == "Scale" ? length(var.client_instances) : 0 source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "5.4.6" + version = "5.11.0" vsi_per_subnet = var.client_instances[count.index]["count"] create_security_group = false security_group = null @@ -353,7 +357,7 @@ module "client_vsi" { module "protocol_vsi" { count = var.scheduler == "Scale" ? ((local.enable_protocol && var.colocate_protocol_instances == false && local.ces_server_type == false) ? 1 : 0) : 0 source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "5.4.16" + version = "5.11.0" vsi_per_subnet = var.protocol_instances[count.index]["count"] create_security_group = false security_group = null @@ -383,7 +387,7 @@ module "protocol_vsi" { module "afm_vsi" { count = var.scheduler == "Scale" ? ((local.afm_server_type == false && local.enable_afm) ? 1 : 0) : 0 source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "5.4.6" + version = "5.11.0" vsi_per_subnet = var.afm_instances[count.index]["count"] create_security_group = false security_group = null @@ -407,7 +411,7 @@ module "afm_vsi" { module "gklm_vsi" { count = var.scheduler == "Scale" ? (var.scale_encryption_enabled == true && var.scale_encryption_type == "gklm" ? 1 : 0) : 0 source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "5.4.6" + version = "5.11.0" vsi_per_subnet = var.gklm_instances[count.index]["count"] create_security_group = false security_group = null @@ -430,7 +434,7 @@ module "gklm_vsi" { module "ldap_vsi" { count = var.enable_ldap == true && var.ldap_server == "null" ? 1 : 0 source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "5.4.6" + version = "5.11.0" vsi_per_subnet = 1 create_security_group = false security_group = null diff --git a/modules/landing_zone_vsi/variables.tf b/modules/landing_zone_vsi/variables.tf index a2a52378..203fff59 100644 --- a/modules/landing_zone_vsi/variables.tf +++ b/modules/landing_zone_vsi/variables.tf @@ -536,3 +536,9 @@ variable "ldap_security_group_name" { default = null description = "Provide the security group name to provision the ldap nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the ldap nodes to function properly." } + +variable "lsf_pay_per_use" { + type = bool + default = true + description = "When lsf_pay_per_use is set to true, the LSF cluster nodes are provisioned using predefined custom images under a pay-per-use pricing plan, where billing is based on vCPU usage per hour. In this mode, providing custom images for the nodes is not required, and Bring Your Own Image (BYOL) is not supported. The pay-per-use option is available only for FP15 images. If you set the variable to false, the automation uses default images for all cluster nodes and enables support for BYOL, with no pay-per-use billing applied." +} diff --git a/modules/prepare_tf_input/main.tf b/modules/prepare_tf_input/main.tf index b3020f10..63691879 100644 --- a/modules/prepare_tf_input/main.tf +++ b/modules/prepare_tf_input/main.tf @@ -6,6 +6,7 @@ resource "local_sensitive_file" "prepare_tf_input" { "ibmcloud_api_key": "${var.ibmcloud_api_key}", "app_center_gui_password": "${var.app_center_gui_password}", "lsf_version": "${var.lsf_version}", + "lsf_pay_per_use": "${var.lsf_pay_per_use}", "resource_group_ids": ${local.resource_group_ids}, "cluster_prefix": "${var.cluster_prefix}", "zones": ${local.zones}, diff --git a/modules/prepare_tf_input/variables.tf b/modules/prepare_tf_input/variables.tf index d439db95..0c622a34 100644 --- a/modules/prepare_tf_input/variables.tf +++ b/modules/prepare_tf_input/variables.tf @@ -14,6 +14,12 @@ variable "lsf_version" { description = "Select the LSF version to deploy: 'fixpack_14' or 'fixpack_15'. Use null to skip LSF deployment." } +variable "lsf_pay_per_use" { + type = bool + default = true + description = "When lsf_pay_per_use is set to true, the LSF cluster nodes are provisioned using predefined custom images under a pay-per-use pricing plan, where billing is based on vCPU usage per hour. In this mode, providing custom images for the nodes is not required, and Bring Your Own Image (BYOL) is not supported. The pay-per-use option is available only for FP15 images. If you set the variable to false, the automation uses default images for all cluster nodes and enables support for BYOL, with no pay-per-use billing applied." +} + ############################################################################## # Cluster Level Variables ############################################################################## diff --git a/modules/resource_provisioner/locals.tf b/modules/resource_provisioner/locals.tf index fed183ce..caa46e20 100644 --- a/modules/resource_provisioner/locals.tf +++ b/modules/resource_provisioner/locals.tf @@ -3,9 +3,9 @@ locals { remote_inputs_path = format("%s/terraform.tfvars.json", "/tmp") deployer_path = "/opt/ibm" remote_terraform_path = format("%s/terraform-ibm-hpc", local.deployer_path) - # da_hpc_repo_url = "github.ibm.com/workload-eng-services/HPCaaS.git" + # da_hpc_repo_url = "github.ibm.com/workload-eng-services/HPCaaS.git" da_hpc_repo_url = "github.com/terraform-ibm-modules/terraform-ibm-hpc.git" - da_hpc_lsf_repo_tag = "v3.1.0" + da_hpc_lsf_repo_tag = "v3.2.0" da_hpc_scale_repo_tag = "v3.1.0" da_hpc_repo_tag = var.scheduler == "Scale" ? local.da_hpc_scale_repo_tag : local.da_hpc_lsf_repo_tag remote_ansible_path = format("%s/ibm-spectrumscale-cloud-deploy", local.deployer_path) diff --git a/modules/write_inventory/image_map.tf b/modules/write_inventory/image_map.tf index f58ee9d7..ec48d18a 100644 --- a/modules/write_inventory/image_map.tf +++ b/modules/write_inventory/image_map.tf @@ -1,16 +1,17 @@ locals { image_region_map = { - "hpc-lsf-fp15-rhel810-v1" = { - "eu-es" = "r050-deeeb734-2523-4aff-96e3-2be8d2b0d634" - "eu-gb" = "r018-8edcd9a1-dbca-462f-bf74-017c15ca4b71" - "eu-de" = "r010-394c5295-1704-4066-b57e-ae9bca1968de" - "us-east" = "r014-1777cdcb-8a68-4ef0-becf-84ec0d2e9a26" - "us-south" = "r006-40caf671-28a8-42c5-b83e-b2ba3ceb86af" - "jp-tok" = "r022-01531301-d100-44ba-b1a3-12e7c8d65469" - "jp-osa" = "r034-ac455775-c667-4d3e-b281-9ef845080599" - "au-syd" = "r026-eff4d59c-5006-46cc-8b03-60514f763a87" - "br-sao" = "r042-1e1bbeeb-3ef7-4f7a-a44c-9f50609bb538" - "ca-tor" = "r038-bb9fcdb7-d200-4cdd-af04-6848007c9cb2" + "hpc-lsf-fp15-rhel810-v2" = { + "eu-es" = "r050-bb9be81c-7026-4b53-9768-b46fe6ff35af" + "eu-gb" = "r018-d85fbab9-5573-4a25-8cd9-b584e0266ed3" + "eu-de" = "r010-b5259da3-11f9-434d-87f9-0eed1030f593" + "us-east" = "r014-1dffabd0-bb20-4c97-b73a-3a745ccfa53d" + "us-south" = "r006-829c9fbc-ecb6-4f3d-be37-1f652d26ec58" + "jp-tok" = "r022-1c956e0e-17e0-4ce3-833b-d79173d68fe0" + "jp-osa" = "r034-7a3733eb-c2eb-4e8e-8b10-4b5bc97331c3" + "au-syd" = "r026-4d4d012d-a023-4a32-9a58-fe3b0903be7a" + "br-sao" = "r042-7d242646-c928-4eae-8176-b6a4c6aad06b" + "ca-tor" = "r038-023f8697-5b44-469e-a021-6898b46ea0a5" + "ca-mon" = "r058-e952898e-71cf-4921-8e3c-1e2b00382f07" }, "hpc-lsf-fp15-compute-rhel810-v1" = { "eu-es" = "r050-f0608e39-9dcf-4aca-9e92-7719474b3e86" @@ -36,17 +37,18 @@ locals { "br-sao" = "r042-bb407137-93cf-4ec7-aa77-4702896fff97" "ca-tor" = "r038-6683403d-1cf5-4f39-a96f-c8cbb2314ad5" }, - "hpc-lsf-fp14-compute-rhel810-v1" = { - "eu-es" = "r050-d2ad9625-1668-4b2c-a8bb-6ef14678d3ed" - "eu-gb" = "r018-f1059503-27ec-44d4-a981-21be6225520a" - "eu-de" = "r010-8115b1f6-912e-4b55-89f1-e448c397115e" - "us-east" = "r014-5108884c-011b-4473-b585-0d43309c37e3" - "us-south" = "r006-68c6af72-1abf-4d13-bca1-4f42be5d2c70" - "jp-tok" = "r022-1932c5ec-b5a6-4262-aa56-6c6257c8297f" - "jp-osa" = "r034-50be9bd9-9623-4ffc-8ce7-aab66f674137" - "au-syd" = "r026-11aee148-c938-4524-91e6-8e6da5933a42" - "br-sao" = "r042-5cb62448-e771-4caf-a556-28fdf88acab9" - "ca-tor" = "r038-fa815ec1-d52e-42b2-8221-5b8c2145a248" + "hpc-lsf-fp15-compute-rhel810-v2" = { + "eu-es" = "r050-91d88518-bc52-42f4-a794-f64e9d0e9fac" + "eu-gb" = "r018-923c06c7-f077-44b7-9ed2-7d9817d9df26" + "eu-de" = "r010-2dd07456-e9ad-4b39-a131-ad786fb1f725" + "us-east" = "r014-f464db9b-5951-48ab-908d-8d36614ac086" + "us-south" = "r006-cb59a6b6-7a58-489b-905c-47ca13f2e60b" + "jp-tok" = "r022-1026bca9-163d-4852-a071-7481ebc19255" + "jp-osa" = "r034-5551a235-92eb-4316-98e3-5b100b7563c8" + "au-syd" = "r026-30a9c1d9-1803-4cf2-9175-bab4f7866f77" + "br-sao" = "r042-1f4b2fa5-eb39-472c-acd9-96cba25d46ab" + "ca-tor" = "r038-bebb2cdc-530a-4d37-ada7-f8f0fbb17a5f" + "ca-mon" = "r058-12b6c1f4-1377-478d-ba39-bd4b38a94e8b" } } } diff --git a/modules/write_inventory/locals.tf b/modules/write_inventory/locals.tf index 97285d13..2e34e0cd 100644 --- a/modules/write_inventory/locals.tf +++ b/modules/write_inventory/locals.tf @@ -10,4 +10,8 @@ locals { compute_image_found_in_map = contains(keys(local.image_region_map), var.dynamic_compute_instances[0]["image"]) new_compute_image_id = local.compute_image_found_in_map ? local.image_region_map[var.dynamic_compute_instances[0]["image"]][local.region] : "Image not found with the given name" image_id = local.compute_image_found_in_map ? local.new_compute_image_id : data.ibm_is_image.dynamic_compute[0].id + catalog_offering = { + version_crn = "crn:v1:bluemix:public:globalcatalog-collection:global:a/77efe1030c00b5c89cfd08648d3480bf:0d89ec0d-d39a-494d-ac5b-9d940d8cc65f:version:61e655c5-40b6-4b68-a6ab-e6c77a457fce/e08b9ca5-699c-4779-8369-1a0c1ed54b30" + plan_crn = "crn:v1:bluemix:public:globalcatalog-collection:global:a/77efe1030c00b5c89cfd08648d3480bf:0d89ec0d-d39a-494d-ac5b-9d940d8cc65f:plan:sw.0d89ec0d-d39a-494d-ac5b-9d940d8cc65f.d114e7ab-4f7e-40c4-98cc-f0c000cbf3a7" + } } diff --git a/modules/write_inventory/main.tf b/modules/write_inventory/main.tf index 72f87020..b30e8906 100644 --- a/modules/write_inventory/main.tf +++ b/modules/write_inventory/main.tf @@ -37,7 +37,9 @@ resource "local_sensitive_file" "infra_details_to_json" { "compute_subnets_cidr": ${jsonencode(var.compute_subnets_cidr)}, "compute_security_group_id": ${jsonencode(var.compute_security_group_id)}, "compute_subnet_crn": "${var.compute_subnet_crn}", - "boot_volume_encryption_key": ${local.boot_volume_encryption_key} + "boot_volume_encryption_key": ${local.boot_volume_encryption_key}, + "lsf_pay_per_use": ${var.lsf_pay_per_use}, + "catalog_offering": ${jsonencode(local.catalog_offering)} } EOT filename = var.json_inventory_path diff --git a/modules/write_inventory/variables.tf b/modules/write_inventory/variables.tf index e037c407..8b71190f 100644 --- a/modules/write_inventory/variables.tf +++ b/modules/write_inventory/variables.tf @@ -206,3 +206,9 @@ variable "login_host" { default = null description = "list of lsf Login node" } + +variable "lsf_pay_per_use" { + type = bool + default = true + description = "When lsf_pay_per_use is set to true, the LSF cluster nodes are provisioned using predefined custom images under a pay-per-use pricing plan, where billing is based on vCPU usage per hour. In this mode, providing custom images for the nodes is not required, and Bring Your Own Image (BYOL) is not supported. The pay-per-use option is available only for FP15 images. If you set the variable to false, the automation uses default images for all cluster nodes and enables support for BYOL, with no pay-per-use billing applied." +} diff --git a/samples/configs/hpc_catalog_values.json b/samples/configs/hpc_catalog_values.json index 54e19b10..06be1814 100644 --- a/samples/configs/hpc_catalog_values.json +++ b/samples/configs/hpc_catalog_values.json @@ -64,5 +64,6 @@ "enable_hyperthreading": "true", "vpn_enabled": "false", "TF_VERSION": "1.9", - "TF_PARALLELISM": "250" + "TF_PARALLELISM": "250", + "lsf_pay_per_use": "true" } diff --git a/samples/configs/hpc_schematics_values.json b/samples/configs/hpc_schematics_values.json index fd6a9245..1c48e4a6 100644 --- a/samples/configs/hpc_schematics_values.json +++ b/samples/configs/hpc_schematics_values.json @@ -75,6 +75,13 @@ "secure": false, "description": "Select the desired version of IBM Spectrum LSF to deploy either fixpack_15 or fixpack_14. By default, the solution uses the latest available version, which is Fix Pack 15. If you need to deploy an earlier version such as Fix Pack 14, update the lsf_version field to fixpack_14. When changing the LSF version, ensure that all custom images used for management, compute, and login nodes correspond to the same version. This is essential to maintain compatibility across the cluster and to prevent deployment issues." }, + { + "name": "lsf_pay_per_use", + "value": "true", + "type": "string", + "secure": false, + "description": "When lsf_pay_per_use is set to true, the LSF cluster nodes are provisioned using predefined custom images under a pay-per-use pricing plan, where billing is based on vCPU usage per hour. In this mode, providing custom images for the nodes is not required, and Bring Your Own Image (BYOI) is not supported. The pay-per-use option is available only for FP15 images. If you set the variable to false, the automation uses default images for all cluster nodes and enables support for BYOI, with no pay-per-use billing applied." + }, { "name": "ssh_keys", "value": "[\"Please fill here\"]", diff --git a/solutions/lsf/locals.tf b/solutions/lsf/locals.tf index 7a85747e..01df4711 100644 --- a/solutions/lsf/locals.tf +++ b/solutions/lsf/locals.tf @@ -82,6 +82,7 @@ locals { sccwp_enable = var.sccwp_enable cspm_enabled = var.cspm_enabled app_config_plan = var.app_config_plan + lsf_pay_per_use = var.lsf_pay_per_use } } @@ -153,6 +154,7 @@ locals { cspm_enable = lookup(local.override[local.override_type], "cspm_enable", local.config.cspm_enabled) sccwp_service_plan = lookup(local.override[local.override_type], "scc_wp_service_plan", local.config.sccwp_service_plan) app_config_plan = lookup(local.override[local.override_type], "app_config_plan", local.config.app_config_plan) + lsf_pay_per_use = lookup(local.override[local.override_type], "lsf_pay_per_use", local.config.lsf_pay_per_use) # client_instances = lookup(local.override[local.override_type], "client_instances", local.config.client_instances) # client_subnets_cidr = lookup(local.override[local.override_type], "client_subnets_cidr", local.config.client_subnets_cidr) } diff --git a/solutions/lsf/main.tf b/solutions/lsf/main.tf index c711c07c..ba42edb4 100644 --- a/solutions/lsf/main.tf +++ b/solutions/lsf/main.tf @@ -65,4 +65,5 @@ module "lsf" { sccwp_service_plan = local.env.sccwp_service_plan cspm_enabled = var.cspm_enabled app_config_plan = var.app_config_plan + lsf_pay_per_use = local.env.lsf_pay_per_use } diff --git a/solutions/lsf/variables.tf b/solutions/lsf/variables.tf index 2abcde56..9dac471c 100644 --- a/solutions/lsf/variables.tf +++ b/solutions/lsf/variables.tf @@ -22,6 +22,12 @@ variable "lsf_version" { } } +variable "lsf_pay_per_use" { + type = bool + default = true + description = "When lsf_pay_per_use is set to true, the LSF cluster nodes are provisioned using predefined custom images under a pay-per-use pricing plan, where billing is based on vCPU usage per hour. In this mode, providing custom images for the nodes is not required, and Bring Your Own Image (BYOI) is not supported. The pay-per-use option is available only for FP15 images. If you set the variable to false, the automation uses default images for all cluster nodes and enables support for BYOI, with no pay-per-use billing applied." +} + variable "app_center_gui_password" { type = string default = "" @@ -192,16 +198,16 @@ variable "deployer_instance" { profile = string }) default = { - image = "hpc-lsf-fp15-deployer-rhel810-v1" + image = "hpc-lsf-fp15-deployer-rhel810-v2" profile = "bx2-8x32" } - description = "Configuration for the deployer node, including the custom image and instance profile. By default, deployer node is created using Fix Pack 15. If deploying with Fix Pack 14, set lsf_version to fixpack_14 and use the corresponding image hpc-lsf-fp15-deployer-rhel810-v1. The selected image must align with the specified lsf_version, any mismatch may lead to deployment failures." + description = "Configuration for the deployer node, including the custom image and instance profile. By default, deployer node is created using Fix Pack 15. If deploying with Fix Pack 14, set lsf_version to fixpack_14 and use the corresponding image hpc-lsf-fp14-deployer-rhel810-v1. The selected image must align with the specified lsf_version, any mismatch may lead to deployment failures." validation { condition = contains([ - "hpc-lsf-fp15-deployer-rhel810-v1", + "hpc-lsf-fp15-deployer-rhel810-v2", "hpc-lsf-fp14-deployer-rhel810-v1" ], var.deployer_instance.image) - error_message = "Invalid deployer image. Allowed values for fixpack_15 is 'hpc-lsf-fp15-deployer-rhel810-v1' and for fixpack_14 is 'hpc-lsf-fp14-deployer-rhel810-v1'." + error_message = "Invalid deployer image. Allowed values for fixpack_15 is 'hpc-lsf-fp15-deployer-rhel810-v2' and for fixpack_14 is 'hpc-lsf-fp14-deployer-rhel810-v1'." } validation { condition = ( @@ -229,7 +235,7 @@ variable "login_instance" { ) default = [{ profile = "bx2-2x8" - image = "hpc-lsf-fp15-compute-rhel810-v1" + image = "hpc-lsf-fp15-compute-rhel810-v2" }] description = "Specify the list of login node configurations, including instance profile, image name. By default, login node is created using Fix Pack 15. If deploying with Fix Pack 14, set lsf_version to fixpack_14 and use the corresponding image hpc-lsf-fp14-compute-rhel810-v1. The selected image must align with the specified lsf_version, any mismatch may lead to deployment failures." validation { @@ -260,7 +266,7 @@ variable "management_instances" { default = [{ profile = "bx2-16x64" count = 2 - image = "hpc-lsf-fp15-rhel810-v1" + image = "hpc-lsf-fp15-rhel810-v2" }] description = "Specify the list of management node configurations, including instance profile, image name, and count. By default, all management nodes are created using Fix Pack 15. If deploying with Fix Pack 14, set lsf_version to fixpack_14 and use the corresponding image hpc-lsf-fp14-rhel810-v1. The selected image must align with the specified lsf_version, any mismatch may lead to deployment failures. The solution allows customization of instance profiles and counts, but mixing custom images and IBM stock images across instances is not supported. If using IBM stock images, only Red Hat-based images are allowed. Management nodes must have a minimum of 9 GB RAM. Select a profile with 9 GB or higher." validation { @@ -306,7 +312,7 @@ variable "static_compute_instances" { default = [{ profile = "bx2-4x16" count = 0 - image = "hpc-lsf-fp15-compute-rhel810-v1" + image = "hpc-lsf-fp15-compute-rhel810-v2" }] description = "Specify the list of static compute node configurations, including instance profile, image name, and count. By default, all compute nodes are created using Fix Pack 15. If deploying with Fix Pack 14, set lsf_version to fixpack_14 and use the corresponding image hpc-lsf-fp14-compute-rhel810-v1. The selected image must align with the specified lsf_version, any mismatch may lead to deployment failures. The solution allows customization of instance profiles and counts, but mixing custom images and IBM stock images across instances is not supported. If using IBM stock images, only Red Hat-based images are allowed." validation { @@ -345,7 +351,7 @@ variable "dynamic_compute_instances" { default = [{ profile = "bx2-4x16" count = 500 - image = "hpc-lsf-fp15-compute-rhel810-v1" + image = "hpc-lsf-fp15-compute-rhel810-v2" }] description = "Specify the list of dynamic compute node configurations, including instance profile, image name, and count. By default, all dynamic compute nodes are created using Fix Pack 15. If deploying with Fix Pack 14, set lsf_version to fixpack_14 and use the corresponding image hpc-lsf-fp14-compute-rhel810-v1. The selected image must align with the specified lsf_version, any mismatch may lead to deployment failures. Currently, only a single instance profile is supported for dynamic compute nodes—multiple profiles are not yet supported.." validation { diff --git a/tests/data/lsf_fp15_config.yml b/tests/data/lsf_fp15_config.yml index e6bd9585..e8d50adb 100644 --- a/tests/data/lsf_fp15_config.yml +++ b/tests/data/lsf_fp15_config.yml @@ -13,7 +13,7 @@ bastion_instance: image: ibm-ubuntu-22-04-5-minimal-amd64-3 deployer_instance: profile: bx2-8x32 - image: hpc-lsf-fp15-deployer-rhel810-v1 + image: hpc-lsf-fp15-deployer-rhel810-v2 enable_cos_integration: false enable_vpc_flow_logs: false custom_file_shares: @@ -26,19 +26,19 @@ custom_file_shares: key_management: key_protect login_instance: - profile: bx2-2x8 - image: hpc-lsf-fp15-compute-rhel810-v1 + image: hpc-lsf-fp15-compute-rhel810-v2 management_instances: - profile: bx2-4x16 count: 2 - image: hpc-lsf-fp15-rhel810-v1 + image: hpc-lsf-fp15-rhel810-v2 static_compute_instances: - profile: bx2-2x8 count: 2 - image: hpc-lsf-fp15-compute-rhel810-v1 + image: hpc-lsf-fp15-compute-rhel810-v2 dynamic_compute_instances: - profile: bx2-2x8 count: 1024 - image: hpc-lsf-fp15-compute-rhel810-v1 + image: hpc-lsf-fp15-compute-rhel810-v2 placement_strategy: spread kms_instance_name: cicd-lsf-dnd-kms-instance kms_key_name: cicd-lsf-dnd-kms-key @@ -76,8 +76,8 @@ us_south_cluster_name: HPC-LSF-2 jp_tok_zone: jp-tok-1 jp_tok_cluster_name: HPC-LSF-2 attracker_test_zone: eu-de-1 #added for testing purpose -management_instances_image: hpc-lsf-fp15-rhel810-v1 #added for testing purpose -static_compute_instances_image: hpc-lsf-fp15-compute-rhel810-v1 #added for testing purpose -dynamic_compute_instances_image: hpc-lsf-fp15-compute-rhel810-v1 #added for testing purpose +management_instances_image: hpc-lsf-fp15-rhel810-v2 #added for testing purpose +static_compute_instances_image: hpc-lsf-fp15-compute-rhel810-v2 #added for testing purpose +dynamic_compute_instances_image: hpc-lsf-fp15-compute-rhel810-v2 #added for testing purpose ssh_file_path: /artifacts/.ssh/id_rsa ssh_file_path_two: /artifacts/.ssh/id_rsa diff --git a/variables.tf b/variables.tf index 746569fc..d49424b6 100644 --- a/variables.tf +++ b/variables.tf @@ -1198,3 +1198,9 @@ variable "login_security_group_name" { error_message = "If the login_security_group_name are provided, the user should also provide the vpc_name." } } + +variable "lsf_pay_per_use" { + type = bool + default = true + description = "When lsf_pay_per_use is set to true, the LSF cluster nodes are provisioned using predefined custom images under a pay-per-use pricing plan, where billing is based on vCPU usage per hour. In this mode, providing custom images for the nodes is not required, and Bring Your Own Image (BYOL) is not supported. The pay-per-use option is available only for FP15 images. If you set the variable to false, the automation uses default images for all cluster nodes and enables support for BYOL, with no pay-per-use billing applied." +}