diff --git a/.catalog-onboard-pipeline.yaml b/.catalog-onboard-pipeline.yaml
index 4c9003c4..78ac1103 100644
--- a/.catalog-onboard-pipeline.yaml
+++ b/.catalog-onboard-pipeline.yaml
@@ -22,3 +22,23 @@ offerings:
region: us-south
# pre_validation: tests/scripts/pre-validation.sh # optionally run a command before validation runs
# post_validation: tests/scripts/post-validation.sh # optionally run a command after validation completes
+- name: deploy-arch-ibm-storage-scale # must match the offering name in the ibm_catalog.json
+ kind: solution
+ catalog_id: 90717ada-be34-4b82-a0d9-0f225f8dbd76
+ offering_id: 33105573-84df-4279-9efa-48887456fa6d
+ # list all of the variations (flavors) you have included in the ibm_catalog.json
+ variations:
+ - name: Cluster-with-Scale
+ mark_ready: false # have pipeline mark as visible if validation passes
+ install_type: fullstack # ensure value matches what is in ibm_catalog.json (fullstack or extension)
+ destroy_resources_on_failure: false # defaults to false if not specified so resources can be inspected to debug failures during validation
+ destroy_workspace_on_failure: false # defaults to false if not specified so schematics workspace can be inspected to debug failures during validation
+ import_only: false # defaults to false - set to true if you do not want to do any validation, but be aware offering can't be publish if not validated
+ validation_rg: validation # the resource group in which to do validation in. Will be created if does not exist. If not specified, default value is 'validation'
+ # scc details needed if your offering is claiming any compliance controls
+ scc:
+ # must be an instance in the same account the validation is being done in
+ instance_id: 1c7d5f78-9262-44c3-b779-b28fe4d88c37
+ region: us-south
+ # pre_validation: tests/scripts/pre-validation.sh # optionally run a command before validation runs
+ # post_validation: tests/scripts/post-validation.sh # optionally run a command after validation completes
diff --git a/.secrets.baseline b/.secrets.baseline
index e72e0cdb..23762826 100644
--- a/.secrets.baseline
+++ b/.secrets.baseline
@@ -3,7 +3,7 @@
"files": "^.secrets.baseline$",
"lines": null
},
- "generated_at": "2025-06-19T07:38:57Z",
+ "generated_at": "2025-08-19T13:31:49Z",
"plugins_used": [
{
"name": "AWSKeyDetector"
@@ -102,6 +102,32 @@
"type": "Secret Keyword",
"verified_result": null
}
+ ],
+ "samples/configs/scale_catalog_values.json": [
+ {
+ "hashed_secret": "2e7ec5f9611439242fd8e30e3e13bc36baff526c",
+ "is_secret": true,
+ "is_verified": false,
+ "line_number": 10,
+ "type": "Secret Keyword",
+ "verified_result": null
+ },
+ {
+ "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c",
+ "is_secret": true,
+ "is_verified": false,
+ "line_number": 45,
+ "type": "Secret Keyword",
+ "verified_result": null
+ },
+ {
+ "hashed_secret": "b295b04949a98dc50ba65adcddd588077b93ab3c",
+ "is_secret": true,
+ "is_verified": false,
+ "line_number": 62,
+ "type": "Secret Keyword",
+ "verified_result": null
+ }
]
},
"version": "0.13.1+ibm.62.dss",
diff --git a/DA_Scale_24July.drawio.svg b/DA_Scale_24July.drawio.svg
new file mode 100644
index 00000000..132d533f
--- /dev/null
+++ b/DA_Scale_24July.drawio.svg
@@ -0,0 +1,4 @@
+
+
+
+
\ No newline at end of file
diff --git a/common-dev-assets b/common-dev-assets
index 6739b3a0..abf631a1 160000
--- a/common-dev-assets
+++ b/common-dev-assets
@@ -1 +1 @@
-Subproject commit 6739b3a089aa08a072dd83c8b594311e42fc96d4
+Subproject commit abf631a16a48a308e609896937e1eed16b4aae4e
diff --git a/cra-config.yaml b/cra-config.yaml
index 3e617358..757e7945 100644
--- a/cra-config.yaml
+++ b/cra-config.yaml
@@ -8,3 +8,12 @@ CRA_TARGETS:
TF_VAR_ssh_keys: "[\"geretain-hpc\"]"
TF_VAR_remote_allowed_ips: "[\"49.207.216.50\"]"
TF_VAR_app_center_gui_password: "Craconfig@123" #pragma: allowlist secret
+ - CRA_TARGET: "solutions/scale"
+ CRA_IGNORE_RULES_FILE: "cra-tf-validate-ignore-rules.json"
+ PROFILE_ID: "48279384-3d29-4089-8259-8ed354774b4a" # SCC profile ID (currently set to 'CIS IBM Cloud Foundations Benchmark v1.1.0' '1.1.0' profile).
+ CRA_ENVIRONMENT_VARIABLES:
+ TF_VAR_ssh_keys: "[\"geretain-hpc\"]"
+ TF_VAR_remote_allowed_ips: "[\"49.207.216.50\"]"
+ TF_VAR_storage_gui_username: "storagegui"
+ TF_VAR_storage_type: "evaluation"
+ TF_VAR_storage_gui_password: "Stor@ge1234" #pragma: allowlist secret
diff --git a/datasource.tf b/datasource.tf
index 7953cf00..23de1886 100644
--- a/datasource.tf
+++ b/datasource.tf
@@ -33,24 +33,25 @@ data "ibm_is_subnet" "subnet" {
# name = var.existing_resource_group
# }
-data "ibm_is_subnet" "existing_cluster_subnets" {
- count = var.vpc_name != null && var.cluster_subnet_id != null ? 1 : 0
- identifier = var.cluster_subnet_id
+data "ibm_is_subnet" "existing_compute_subnets" {
+ count = var.vpc_name != null && var.compute_subnet_id != null ? 1 : 0
+ identifier = var.compute_subnet_id
}
+
data "ibm_is_subnet" "existing_storage_subnets" {
- count = var.vpc_name != null && var.storage_subnets != null ? 1 : 0
- name = var.storage_subnets[count.index]
+ count = var.vpc_name != null && var.storage_subnet_id != null ? 1 : 0
+ identifier = var.storage_subnet_id
}
data "ibm_is_subnet" "existing_protocol_subnets" {
- count = var.vpc_name != null && var.protocol_subnets != null ? 1 : 0
- name = var.protocol_subnets[count.index]
+ count = var.vpc_name != null && var.protocol_subnet_id != null ? 1 : 0
+ identifier = var.protocol_subnet_id
}
data "ibm_is_subnet" "existing_client_subnets" {
- count = var.vpc_name != null && var.client_subnets != null ? 1 : 0
- name = var.client_subnets[count.index]
+ count = var.vpc_name != null && var.client_subnet_id != null ? 1 : 0
+ identifier = var.client_subnet_id
}
data "ibm_is_subnet" "existing_login_subnets" {
@@ -64,8 +65,8 @@ data "ibm_is_ssh_key" "ssh_keys" {
}
data "ibm_is_subnet" "compute_subnet_crn" {
- count = var.vpc_name != null && var.cluster_subnet_id != null ? 1 : 0
- identifier = local.compute_subnet_id
+ count = var.vpc_name != null && var.compute_subnet_id != null ? 1 : 0
+ identifier = local.compute_subnet
}
data "ibm_is_instance_profile" "compute_profile" {
@@ -77,7 +78,7 @@ data "ibm_is_instance_profile" "storage_profile" {
}
data "ibm_is_bare_metal_server_profile" "storage_bms_profile" {
- count = var.scheduler == "Scale" ? 1 : 0
+ count = var.scheduler == "Scale" && var.storage_type == "persistent" ? 1 : 0
name = local.storage_bms_profile[0]
}
@@ -90,12 +91,27 @@ data "ibm_is_instance_profile" "protocol_profile" {
name = local.protocol_vsi_profile[0]
}
+data "ibm_is_bare_metal_server_profile" "protocol_bm_profile" {
+ count = local.ces_server_type == true && (local.scale_ces_enabled == true && var.colocate_protocol_instances == false) ? 1 : 0
+ name = local.protocol_vsi_profile[0]
+}
+
data "ibm_is_subnet_reserved_ips" "protocol_subnet_reserved_ips" {
- count = local.scale_ces_enabled == true ? 1 : 0
- subnet = local.protocol_subnet_id
+ count = var.enable_deployer == false && local.scale_ces_enabled == true ? 1 : 0
+ subnet = local.protocol_subnet
}
data "ibm_is_instance_profile" "afm_server_profile" {
count = local.afm_server_type == false ? 1 : 0
name = local.afm_vsi_profile[0]
}
+
+data "ibm_is_bare_metal_server_profile" "afm_bm_profile" {
+ count = local.afm_server_type == true ? 1 : 0
+ name = local.afm_vsi_profile[0]
+}
+
+data "ibm_is_security_group" "login_security_group" {
+ count = var.login_security_group_name != null ? 1 : 0
+ name = var.login_security_group_name
+}
diff --git a/ibm_catalog.json b/ibm_catalog.json
index 8bd5af3e..51db9a49 100644
--- a/ibm_catalog.json
+++ b/ibm_catalog.json
@@ -5,16 +5,15 @@
"label": "IBM Spectrum LSF",
"product_kind": "solution",
"tags": [
- "ibm_created",
- "target_terraform",
- "terraform",
- "solution",
"Deployable Architecture",
"DA",
"LSF",
"Spectrum LSF",
+ "ibm_created",
+ "target_terraform",
+ "terraform",
"reference_architecture",
- "converged_infra"
+ "solution"
],
"keywords": [
"LSF",
@@ -48,6 +47,7 @@
"name": "Cluster-with-LSF",
"install_type": "fullstack",
"working_directory": "solutions/lsf",
+ "terraform_version": "1.10.5",
"compliance": {
"authority": "scc-v3",
"profiles": [
@@ -216,7 +216,7 @@
]
},
{
- "displayname": "Syndney 3",
+ "displayname": "Sydney 3",
"value": [
"au-syd-3"
]
@@ -256,6 +256,42 @@
"value": [
"br-sao-3"
]
+ },
+ {
+ "displayname": "Montreal 1",
+ "value": [
+ "ca-mon-1"
+ ]
+ },
+ {
+ "displayname": "Montreal 2",
+ "value": [
+ "ca-mon-2"
+ ]
+ },
+ {
+ "displayname": "Montreal 3",
+ "value": [
+ "ca-mon-3"
+ ]
+ },
+ {
+ "displayname": "Madrid 1",
+ "value": [
+ "eu-es-1"
+ ]
+ },
+ {
+ "displayname": "Madrid 2",
+ "value": [
+ "eu-es-2"
+ ]
+ },
+ {
+ "displayname": "Madrid 3",
+ "value": [
+ "eu-es-3"
+ ]
}
]
},
@@ -378,10 +414,6 @@
"displayname": "basic",
"value": "basic"
},
- {
- "displayname": "lite",
- "value": "lite"
- },
{
"displayname": "standardv2",
"value": "standardv2"
@@ -433,7 +465,7 @@
"key": "vpc_name"
},
{
- "key": "cluster_subnet_id"
+ "key": "compute_subnet_id"
},
{
"key": "login_subnet_id"
@@ -571,7 +603,7 @@
{
"key": "ldap_instance",
"type": "array",
- "default_value": "[\n {\n \"profile\": \"cx2-2x4\",\n \"image\": \"ibm-ubuntu-22-04-5-minimal-amd64-3\"\n }\n]",
+ "default_value": "[\n {\n \"profile\": \"cx2-2x4\",\n \"image\": \"ibm-ubuntu-22-04-5-minimal-amd64-5\"\n }\n]",
"required": false,
"custom_config": {
"type": "json_editor",
@@ -656,12 +688,6 @@
"crn:v1:bluemix:public:iam::::role:Administrator"
]
},
- {
- "service_name": "is.share",
- "role_crns": [
- "crn:v1:bluemix:public:iam::::role:Editor"
- ]
- },
{
"service_name": "All Identity and Access enabled services",
"role_crns": [
@@ -686,13 +712,7 @@
"role_crns": [
"crn:v1:bluemix:public:iam::::role:Editor"
],
- "service_name": "is.vpc"
- },
- {
- "service_name": "is.flow-log-collector",
- "role_crns": [
- "crn:v1:bluemix:public:iam::::role:Editor"
- ]
+ "service_name": "VPC Infrastructure Services"
},
{
"service_name": "sysdig-monitor",
@@ -751,6 +771,853 @@
}
}
]
+ },
+ {
+ "name": "deploy-arch-ibm-storage-scale",
+ "label": "IBM Storage Scale",
+ "product_kind": "solution",
+ "tags": [
+ "Deployable Architecture",
+ "DA",
+ "HPC",
+ "IBM Storage Scale",
+ "ibm_created",
+ "target_terraform",
+ "terraform",
+ "reference_architecture",
+ "solution"
+ ],
+ "keywords": [
+ "scale",
+ "vpc",
+ "DA",
+ "Deployable Architecture",
+ "terraform",
+ "solution"
+ ],
+ "short_description": "Deploy your HPC cluster with IBM Storage Scale for high performance, highly available, clustered file system.",
+ "long_description": "**Before you begin deploying IBM Storage Scale, make sure that you meet the prerequisites listed in [the step-by-step guide](https://cloud.ibm.com/docs/allowlist/hpc-service?topic=hpc-service-before-you-begin-deploying).**\n\nIBM Storage Scale is a deployable architecture where you can deploy dedicated Storage Scale cluster. Storage Scale supports the configuration of both compute and storage nodes, allowing you to build a complete, end-to-end Storage cluster.",
+ "provider_name": "IBM",
+ "offering_docs_url": "https://cloud.ibm.com/docs/allowlist/hpc-service?topic=hpc-service-overview",
+ "features": [
+ {
+ "title": "Cluster file system",
+ "description": "IBM Storage scale is clustered file system that provides concurrent access to a single file system or set of file systems from multiple nodes. It's designed for high-performance, scalable storage solutions, particularly for I/O-intensive applications and large-scale data environments."
+ },
+ {
+ "title": "Data intensive workflows",
+ "description": "Create a cluster with IBM Storage Scale with its high-performance, distributed file system, IBM Storage Scale accelerates data-intensive workflows by providing fast, concurrent access to massive datasets."
+ },
+ {
+ "title": "Unified storage for files and objects",
+ "description": "Unified storage allows simultaneous access to the same data as both files and objects, simplifying data management and enabling seamless workflows across traditional and cloud-native applications."
+ }
+ ],
+ "flavors": [
+ {
+ "label": "IBM Storage Scale",
+ "name": "Cluster-with-Scale",
+ "install_type": "fullstack",
+ "working_directory": "solutions/scale",
+ "terraform_version": "1.10.5",
+ "compliance": {
+ "authority": "scc-v3",
+ "profiles": [
+ {
+ "profile_name": "IBM Cloud Framework for Financial Services",
+ "profile_version": "1.5.0"
+ }
+ ]
+ },
+ "release_notes_url": "https://cloud.ibm.com/docs/allowlist/hpc-service?topic=hpc-service-release-notes",
+ "configuration": [
+ {
+ "key": "ibmcloud_api_key",
+ "required": true
+ },
+ {
+ "key": "zones",
+ "required": true,
+ "default_value": [
+ "us-east-1"
+ ],
+ "options": [
+ {
+ "displayname": "Washington DC 1",
+ "value": [
+ "us-east-1"
+ ]
+ },
+ {
+ "displayname": "Washington DC 2",
+ "value": [
+ "us-east-2"
+ ]
+ },
+ {
+ "displayname": "Washington DC 3",
+ "value": [
+ "us-east-3"
+ ]
+ },
+ {
+ "displayname": "Frankfurt 1",
+ "value": [
+ "eu-de-1"
+ ]
+ },
+ {
+ "displayname": "Frankfurt 2",
+ "value": [
+ "eu-de-2"
+ ]
+ },
+ {
+ "displayname": "Frankfurt 3",
+ "value": [
+ "eu-de-3"
+ ]
+ },
+ {
+ "displayname": "Dallas 1",
+ "value": [
+ "us-south-1"
+ ]
+ },
+ {
+ "displayname": "Dallas 2",
+ "value": [
+ "us-south-2"
+ ]
+ },
+ {
+ "displayname": "Dallas 3",
+ "value": [
+ "us-south-3"
+ ]
+ },
+ {
+ "displayname": "Toronto 1",
+ "value": [
+ "ca-tor-1"
+ ]
+ },
+ {
+ "displayname": "Toronto 2",
+ "value": [
+ "ca-tor-2"
+ ]
+ },
+ {
+ "displayname": "Toronto 3",
+ "value": [
+ "ca-tor-3"
+ ]
+ },
+ {
+ "displayname": "Tokyo 1",
+ "value": [
+ "jp-tok-1"
+ ]
+ },
+ {
+ "displayname": "Tokyo 2",
+ "value": [
+ "jp-tok-2"
+ ]
+ },
+ {
+ "displayname": "Tokyo 3",
+ "value": [
+ "jp-tok-3"
+ ]
+ },
+ {
+ "displayname": "London 1",
+ "value": [
+ "eu-gb-1"
+ ]
+ },
+ {
+ "displayname": "London 2",
+ "value": [
+ "eu-gb-2"
+ ]
+ },
+ {
+ "displayname": "London 3",
+ "value": [
+ "eu-gb-3"
+ ]
+ },
+ {
+ "displayname": "Sydney 1",
+ "value": [
+ "au-syd-1"
+ ]
+ },
+ {
+ "displayname": "Sydney 2",
+ "value": [
+ "au-syd-2"
+ ]
+ },
+ {
+ "displayname": "Sydney 3",
+ "value": [
+ "au-syd-3"
+ ]
+ },
+ {
+ "displayname": "Osaka 1",
+ "value": [
+ "jp-osa-1"
+ ]
+ },
+ {
+ "displayname": "Osaka 2",
+ "value": [
+ "jp-osa-2"
+ ]
+ },
+ {
+ "displayname": "Osaka 3",
+ "value": [
+ "jp-osa-3"
+ ]
+ },
+ {
+ "displayname": "Sao Paulo 1",
+ "value": [
+ "br-sao-1"
+ ]
+ },
+ {
+ "displayname": "Sao Paulo 2",
+ "value": [
+ "br-sao-2"
+ ]
+ },
+ {
+ "displayname": "Sao Paulo 3",
+ "value": [
+ "br-sao-3"
+ ]
+ },
+ {
+ "displayname": "Montreal 1",
+ "value": [
+ "ca-mon-1"
+ ]
+ },
+ {
+ "displayname": "Montreal 2",
+ "value": [
+ "ca-mon-2"
+ ]
+ },
+ {
+ "displayname": "Montreal 3",
+ "value": [
+ "ca-mon-3"
+ ]
+ },
+ {
+ "displayname": "Madrid 1",
+ "value": [
+ "eu-es-1"
+ ]
+ },
+ {
+ "displayname": "Madrid 2",
+ "value": [
+ "eu-es-2"
+ ]
+ },
+ {
+ "displayname": "Madrid 3",
+ "value": [
+ "eu-es-3"
+ ]
+ }
+ ]
+ },
+ {
+ "key": "ssh_keys",
+ "type": "array",
+ "required": true,
+ "custom_config": {
+ "type": "vpc_ssh_key",
+ "grouping": "deployment",
+ "original_grouping": "deployment",
+ "config_constraints": {
+ "selection": "multi_select",
+ "valueType": "name"
+ }
+ }
+ },
+ {
+ "key": "remote_allowed_ips",
+ "required": true
+ },
+ {
+ "key": "storage_gui_username",
+ "required": true
+ },
+ {
+ "key": "storage_gui_password",
+ "required": true
+ },
+ {
+ "key": "existing_resource_group",
+ "required": true
+ },
+ {
+ "key": "cluster_prefix"
+ },
+ {
+ "key": "storage_type",
+ "default_value": "scratch",
+ "options": [
+ {
+ "displayname": "scratch",
+ "value": "scratch"
+ },
+ {
+ "displayname": "persistent",
+ "value": "persistent"
+ },
+ {
+ "displayname": "evaluation",
+ "value": "evaluation"
+ }
+ ]
+ },
+ {
+ "key": "ibm_customer_number"
+ },
+ {
+ "key": "vpc_cidr"
+ },
+ {
+ "key": "login_subnets_cidr"
+ },
+ {
+ "key": "compute_subnets_cidr"
+ },
+ {
+ "key": "storage_subnets_cidr"
+ },
+ {
+ "key": "client_subnets_cidr"
+ },
+ {
+ "key": "protocol_subnets_cidr"
+ },
+ {
+ "key": "enable_vpc_flow_logs"
+ },
+ {
+ "key": "skip_flowlogs_s2s_auth_policy"
+ },
+ {
+ "key": "vpc_name"
+ },
+ {
+ "key": "login_subnet_id"
+ },
+ {
+ "key": "compute_subnet_id"
+ },
+ {
+ "key": "storage_subnet_id"
+ },
+ {
+ "key": "protocol_subnet_id"
+ },
+ {
+ "key": "client_subnet_id"
+ },
+ {
+ "key": "enable_sg_validation"
+ },
+ {
+ "key": "login_security_group_name"
+ },
+ {
+ "key": "storage_security_group_name"
+ },
+ {
+ "key": "compute_security_group_name"
+ },
+ {
+ "key": "client_security_group_name"
+ },
+ {
+ "key": "gklm_security_group_name"
+ },
+ {
+ "key": "ldap_security_group_name"
+ },
+ {
+ "key": "dns_domain_names"
+ },
+ {
+ "key": "dns_instance_id"
+ },
+ {
+ "key": "dns_custom_resolver_id"
+ },
+ {
+ "key": "scale_management_vsi_profile"
+ },
+ {
+ "key": "bastion_instance",
+ "type": "object",
+ "default_value": "{\n \"image\": \"ibm-ubuntu-22-04-5-minimal-amd64-5\",\n \"profile\": \"cx2-4x8\"\n}",
+ "required": false,
+ "custom_config": {
+ "type": "json_editor",
+ "grouping": "deployment",
+ "original_grouping": "deployment",
+ "config_constraints": {
+ "type": "mixed"
+ }
+ }
+ },
+ {
+ "key": "deployer_instance",
+ "type": "object",
+ "default_value": "{\n \"image\": \"hpcc-scale-deployer-v1\",\n \"profile\": \"bx2-8x32\"\n}",
+ "required": false,
+ "custom_config": {
+ "type": "json_editor",
+ "grouping": "deployment",
+ "original_grouping": "deployment",
+ "config_constraints": {
+ "type": "mixed"
+ }
+ }
+ },
+ {
+ "key": "client_instances",
+ "type": "object",
+ "default_value": "[\n {\n \"count\": 2,\n \"profile\": \"cx2-2x4\",\n \"image\": \"ibm-redhat-8-10-minimal-amd64-6\"\n }\n]",
+ "required": false,
+ "custom_config": {
+ "type": "json_editor",
+ "grouping": "deployment",
+ "original_grouping": "deployment",
+ "config_constraints": {
+ "type": "mixed"
+ }
+ }
+ },
+ {
+ "key": "compute_gui_username"
+ },
+ {
+ "key": "compute_gui_password"
+ },
+ {
+ "key": "compute_instances",
+ "type": "object",
+ "default_value": "[\n {\n \"count\": 0,\n \"profile\": \"bx2-2x8\",\n \"filesystem\": \"/gpfs/fs1\",\n \"image\": \"hpcc-scale5232-rhel810-v1\"\n }\n]",
+ "required": false,
+ "custom_config": {
+ "type": "json_editor",
+ "grouping": "deployment",
+ "original_grouping": "deployment",
+ "config_constraints": {
+ "type": "mixed"
+ }
+ }
+ },
+ {
+ "key": "storage_instances",
+ "type": "object",
+ "default_value": "[\n {\n \"count\": 2,\n \"profile\": \"bx2d-32x128\",\n \"filesystem\": \"/gpfs/fs1\",\n \"image\": \"hpcc-scale5232-rhel810-v1\"\n }\n]",
+ "required": false,
+ "custom_config": {
+ "type": "json_editor",
+ "grouping": "deployment",
+ "original_grouping": "deployment",
+ "config_constraints": {
+ "type": "mixed"
+ }
+ }
+ },
+ {
+ "key": "storage_baremetal_server",
+ "type": "object",
+ "default_value": "[\n {\n \"count\": 0,\n \"profile\": \"cx2d-metal-96x192\",\n \"filesystem\": \"/gpfs/fs1\",\n \"image\": \"hpcc-scale5232-rhel810-v1\"\n }\n]",
+ "required": false,
+ "custom_config": {
+ "type": "json_editor",
+ "grouping": "deployment",
+ "original_grouping": "deployment",
+ "config_constraints": {
+ "type": "mixed"
+ }
+ }
+ },
+ {
+ "key": "bms_boot_drive_encryption"
+ },
+ {
+ "key": "tie_breaker_baremetal_server_profile"
+ },
+ {
+ "key": "afm_instances",
+ "type": "object",
+ "default_value": "[\n {\n \"count\": 0,\n \"profile\": \"bx2-32x128\"\n }\n]",
+ "required": false,
+ "custom_config": {
+ "type": "json_editor",
+ "grouping": "deployment",
+ "original_grouping": "deployment",
+ "config_constraints": {
+ "type": "mixed"
+ }
+ }
+ },
+ {
+ "key": "afm_cos_config",
+ "type": "object",
+ "default_value": "[\n {\n \"afm_fileset\": \"afm_fileset\",\n \"mode\": \"iw\",\n \"cos_instance\": \"\",\n \"bucket_name\": \"\",\n \"bucket_region\": \"us-south\",\n \"cos_service_cred_key\": \"\",\n \"bucket_storage_class\": \"smart\",\n \"bucket_type\": \"region_location\"\n }\n]",
+ "required": false,
+ "custom_config": {
+ "type": "json_editor",
+ "grouping": "deployment",
+ "original_grouping": "deployment",
+ "config_constraints": {
+ "type": "mixed"
+ }
+ }
+ },
+ {
+ "key": "protocol_instances",
+ "type": "object",
+ "default_value": "[\n {\n \"count\": 2,\n \"profile\": \"cx2-32x64\"\n }\n]",
+ "required": false,
+ "custom_config": {
+ "type": "json_editor",
+ "grouping": "deployment",
+ "original_grouping": "deployment",
+ "config_constraints": {
+ "type": "mixed"
+ }
+ }
+ },
+ {
+ "key": "colocate_protocol_instances"
+ },
+ {
+ "key": "scale_encryption_enabled"
+ },
+ {
+ "key": "key_protect_instance_id"
+ },
+ {
+ "key": "scale_encryption_type",
+ "default_value": "null",
+ "options": [
+ {
+ "displayname": "null",
+ "value": "null"
+ },
+ {
+ "displayname": "key_protect",
+ "value": "key_protect"
+ },
+ {
+ "displayname": "gklm",
+ "value": "gklm"
+ }
+ ]
+ },
+ {
+ "key": "gklm_instances",
+ "type": "object",
+ "default_value": "[\n {\n \"count\": 2,\n \"profile\": \"bx2-2x8\",\n \"image\": \"hpcc-scale-gklm4202-v2-5-3\"\n }\n]",
+ "required": false,
+ "custom_config": {
+ "type": "json_editor",
+ "grouping": "deployment",
+ "original_grouping": "deployment",
+ "config_constraints": {
+ "type": "mixed"
+ }
+ }
+ },
+ {
+ "key": "scale_encryption_admin_password"
+ },
+ {
+ "key": "filesystem_config",
+ "type": "object",
+ "default_value": "[\n {\n \"filesystem\": \"/gpfs/fs1\",\n \"block_size\": \"4M\",\n \"default_data_replica\": 2,\n \"default_metadata_replica\": 2,\n \"max_data_replica\": 3,\n \"max_metadata_replica\": 3 }\n]",
+ "required": false,
+ "custom_config": {
+ "type": "json_editor",
+ "grouping": "deployment",
+ "original_grouping": "deployment",
+ "config_constraints": {
+ "type": "mixed"
+ }
+ }
+ },
+ {
+ "key": "filesets_config",
+ "type": "object",
+ "default_value": "[\n {\n \"client_mount_path\": \"/mnt/scale/tools\",\n \"quota\": 0\n },\n {\n \"client_mount_path\": \"/mnt/scale/data\",\n \"quota\": 0\n }\n]",
+ "required": false,
+ "custom_config": {
+ "type": "json_editor",
+ "grouping": "deployment",
+ "original_grouping": "deployment",
+ "config_constraints": {
+ "type": "mixed"
+ }
+ }
+ },
+ {
+ "key": "enable_cos_integration"
+ },
+ {
+ "key": "cos_instance_name"
+ },
+ {
+ "key": "enable_ldap"
+ },
+ {
+ "key": "ldap_basedns"
+ },
+ {
+ "key": "ldap_server"
+ },
+ {
+ "key": "ldap_admin_password"
+ },
+ {
+ "key": "ldap_user_name"
+ },
+ {
+ "key": "ldap_user_password"
+ },
+ {
+ "key": "ldap_instance",
+ "type": "array",
+ "default_value": "[\n {\n \"profile\": \"cx2-2x4\",\n \"image\": \"ibm-ubuntu-22-04-5-minimal-amd64-5\"\n }\n]",
+ "required": false,
+ "custom_config": {
+ "type": "json_editor",
+ "grouping": "deployment",
+ "original_grouping": "deployment",
+ "config_constraints": {
+ "type": "mixed"
+ }
+ }
+ },
+ {
+ "key": "ldap_server_cert"
+ },
+ {
+ "key": "observability_atracker_enable"
+ },
+ {
+ "key": "observability_atracker_target_type"
+ },
+ {
+ "key": "sccwp_enable"
+ },
+ {
+ "key": "sccwp_service_plan",
+ "default_value": "free-trial",
+ "options": [
+ {
+ "displayname": "free-trial",
+ "value": "free-trial"
+ },
+ {
+ "displayname": "graduated-tier",
+ "value": "graduated-tier"
+ }
+ ]
+ },
+ {
+ "key": "cspm_enabled"
+ },
+ {
+ "key": "app_config_plan",
+ "default_value": "basic",
+ "options": [
+ {
+ "displayname": "basic",
+ "value": "basic"
+ },
+ {
+ "displayname": "standardv2",
+ "value": "standardv2"
+ },
+ {
+ "displayname": "enterprise",
+ "value": "enterprise"
+ }
+ ]
+ },
+ {
+ "key": "existing_bastion_instance_name"
+ },
+ {
+ "key": "existing_bastion_instance_public_ip"
+ },
+ {
+ "key": "existing_bastion_security_group_id"
+ },
+ {
+ "key": "existing_bastion_ssh_private_key",
+ "type": "multiline_secure_value",
+ "required": false,
+ "custom_config": {
+ "grouping": "deployment",
+ "original_grouping": "deployment",
+ "type": "multiline_secure_value"
+ }
+ },
+ {
+ "hidden": true,
+ "key": "TF_VERSION"
+ },
+ {
+ "hidden": true,
+ "key": "TF_PARALLELISM"
+ },
+ {
+ "hidden": true,
+ "key": "override"
+ },
+ {
+ "hidden": true,
+ "key": "override_json_string"
+ }
+ ],
+ "iam_permissions": [
+ {
+ "role_crns": [
+ "crn:v1:bluemix:public:iam::::serviceRole:Manager"
+ ],
+ "service_name": "cloud-object-storage"
+ },
+ {
+ "role_crns": [
+ "crn:v1:bluemix:public:iam::::serviceRole:Manager",
+ "crn:v1:bluemix:public:iam::::role:Editor"
+ ],
+ "service_name": "dns-svcs"
+ },
+ {
+ "role_crns": [
+ "crn:v1:bluemix:public:iam::::role:Administrator"
+ ],
+ "service_name": "iam-identity"
+ },
+ {
+ "service_name": "kms",
+ "role_crns": [
+ "crn:v1:bluemix:public:iam::::serviceRole:Manager",
+ "crn:v1:bluemix:public:iam::::role:ConfigReader"
+ ]
+ },
+ {
+ "service_name": "Security and Compliance Center Workload Protection",
+ "role_crns": [
+ "crn:v1:bluemix:public:iam::::role:Administrator"
+ ]
+ },
+ {
+ "role_crns": [
+ "crn:v1:bluemix:public:iam::::role:Administrator"
+ ],
+ "service_name": "VPC Infrastructure Services"
+ },
+ {
+ "service_name": "All Identity and Access enabled services",
+ "role_crns": [
+ "crn:v1:bluemix:public:iam::::serviceRole:Manager",
+ "crn:v1:bluemix:public:iam::::role:Administrator"
+ ]
+ },
+ {
+ "service_name": "apprapp",
+ "role_crns": [
+ "crn:v1:bluemix:public:iam::::serviceRole:Manager",
+ "crn:v1:bluemix:public:iam::::role:Administrator"
+ ]
+ },
+ {
+ "service_name": "iam-identity",
+ "role_crns": [
+ "crn:v1:bluemix:public:iam::::role:Administrator"
+ ]
+ },
+ {
+ "service_name": "secrets-manager",
+ "role_crns": [
+ "crn:v1:bluemix:public:iam::::serviceRole:Manager",
+ "crn:v1:bluemix:public:iam::::role:Administrator"
+ ]
+ }
+ ],
+ "architecture": {
+ "descriptions": "",
+ "features": [
+ {
+ "title": "Separate VPC for HPC workloads",
+ "description": "Yes"
+ },
+ {
+ "title": "Virtual Server Instances for every subnet",
+ "description": "Yes"
+ },
+ {
+ "title": "Increases security with Key Protect",
+ "description": "Yes"
+ },
+ {
+ "title": "Reduces failure events by using multizone regions",
+ "description": "No"
+ },
+ {
+ "title": "Collects and stores Internet Protocol (IP) traffic information with Activity Tracker and Flow Logs",
+ "description": "Yes"
+ },
+ {
+ "title": "Securely connects to multiple networks with a site-to-site virtual private network",
+ "description": "Yes"
+ },
+ {
+ "title": "Simplifies risk management and demonstrates regulatory compliance with Financial Services",
+ "description": "Yes"
+ },
+ {
+ "title": "Uses Floating IP address for access through the public internet",
+ "description": "No"
+ }
+ ],
+ "diagrams": [
+ {
+ "diagram": {
+ "caption": "IBM Storage Scale",
+ "url": "https://raw.githubusercontent.com/terraform-ibm-modules/terraform-ibm-hpc/refs/heads/26-aug-scale/DA_Scale_24July.drawio.svg",
+ "type": "image/svg+xml"
+ },
+ "description": "The HPC variation of the deployable architecture is based on the IBM Cloud for Financial Services reference architecture. The architecture creates a customizable and secure infrastructure, with virtual servers, to run your workloads with a Virtual Private Cloud (VPC) in multizone regions."
+ }
+ ]
+ }
+ }
+ ]
}
]
}
diff --git a/locals.tf b/locals.tf
index ce3bc585..d6aded48 100644
--- a/locals.tf
+++ b/locals.tf
@@ -3,15 +3,17 @@ locals {
# Region and Zone calculations
region = join("-", slice(split("-", var.zones[0]), 0, 2))
+ # Scheduler name change in lower_case
+ scheduler_lowcase = var.scheduler != null ? lower(var.scheduler) : "null"
+
# SSH key calculations
# Combining the common ssh keys with host specific ssh keys
- gklm_instance_key_pair = distinct(concat(coalesce(var.gklm_instance_key_pair, []), coalesce(var.ssh_keys, [])))
- ldap_instance_key_pair = distinct(concat(coalesce(var.ldap_instance_key_pair, []), coalesce(var.ssh_keys, [])))
- ssh_keys = distinct(coalesce(var.ssh_keys, []))
- key_management = var.key_management == "null" ? null : var.key_management
- ldap_server = var.ldap_server == null ? "null" : var.ldap_server
- ldap_admin_password = var.ldap_admin_password == null ? "" : var.ldap_admin_password
- ldap_server_cert = var.ldap_server_cert == null ? "null" : var.ldap_server_cert
+ # ldap_instance_key_pair = distinct(concat(coalesce(var.ldap_instance_key_pair, []), coalesce(var.ssh_keys, [])))
+ ssh_keys = distinct(coalesce(var.ssh_keys, []))
+ key_management = var.key_management == "null" ? null : var.key_management
+ ldap_server = var.ldap_server == null ? "null" : var.ldap_server
+ ldap_admin_password = var.ldap_admin_password == null ? "" : var.ldap_admin_password
+ ldap_server_cert = var.ldap_server_cert == null ? "null" : var.ldap_server_cert
}
# locals needed for deployer
@@ -21,47 +23,76 @@ locals {
vpc_name = var.vpc_name == null ? one(module.landing_zone.vpc_name) : var.vpc_name
kms_encryption_enabled = local.key_management != null ? true : false
boot_volume_encryption_key = local.key_management != null && var.enable_deployer ? one(module.landing_zone.boot_volume_encryption_key)["crn"] : null
- existing_kms_instance_guid = local.key_management != null ? module.landing_zone.key_management_guid : null
- cos_data = module.landing_zone.cos_buckets_data
+ existing_kms_instance_guid = local.key_management != null || (var.scale_encryption_enabled && var.scale_encryption_type == "key_protect" && var.key_protect_instance_id == null) ? module.landing_zone.key_management_guid : null
+
+ encryption_filesystem_mountpoint = jsonencode(
+ var.scale_encryption_type == "key_protect" ? (
+ try(var.storage_instances[0].filesystem, "") != "" ?
+ element(
+ split("/", var.storage_instances[0].filesystem),
+ length(split("/", var.storage_instances[0].filesystem)) - 1
+ ) :
+ try(var.storage_servers[0].filesystem, "") != "" ?
+ element(
+ split("/", var.storage_servers[0].filesystem),
+ length(split("/", var.storage_servers[0].filesystem)) - 1
+ ) :
+ element(
+ split("/", var.filesystem_config[0].filesystem),
+ length(split("/", var.filesystem_config[0].filesystem)) - 1
+ )
+ ) : ""
+ )
+
+ filesystem_mountpoint = var.storage_type == "persistent" ? (var.storage_servers[0]["filesystem"] != "" && var.storage_servers[0]["filesystem"] != null ? var.storage_servers[0]["filesystem"] : var.filesystem_config[0]["filesystem"]) : (var.storage_instances[0]["filesystem"] != "" && var.storage_instances[0]["filesystem"] != null ? var.storage_instances[0]["filesystem"] : var.filesystem_config[0]["filesystem"])
+
+ cos_data = module.landing_zone.cos_buckets_data
# Future use
# When we implement the existing bastion concept we need the changes to implemented like below. Which is already there on our LSF DA
# skip_iam_authorization_policy = true
# skip_iam_authorization_policy = var.bastion_instance_name != null ? false : local.skip_iam_authorization_policy
# Cluster node details:
- compute_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].management_vsi_data, module.landing_zone_vsi[0].compute_vsi_data])
- comp_mgmt_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].compute_management_vsi_data])
- storage_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].storage_vsi_data])
- storage_servers = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].storage_bms_data])
- protocol_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].protocol_vsi_data])
- gklm_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].gklm_vsi_data])
- client_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].client_vsi_data])
- afm_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].afm_vsi_data])
- ldap_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].ldap_vsi_data])
- tie_brkr_instances = var.enable_deployer ? [] : flatten(module.landing_zone_vsi[0].storage_cluster_tie_breaker_vsi_data)
- strg_mgmt_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].storage_cluster_management_vsi])
- login_instance = var.enable_deployer ? [] : flatten(module.landing_zone_vsi[0].login_vsi_data)
+ compute_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].management_vsi_data, module.landing_zone_vsi[0].compute_vsi_data])
+ comp_mgmt_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].compute_management_vsi_data])
+ storage_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].storage_vsi_data])
+ storage_servers = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].storage_bms_data])
+ storage_tie_brkr_bm = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].storage_tie_breaker_bms_data])
+ protocol_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].protocol_vsi_data])
+ protocol_bm_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].protocol_bms_data])
+ gklm_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].gklm_vsi_data])
+ client_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].client_vsi_data])
+ afm_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].afm_vsi_data])
+ afm_bm_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].afm_bms_data])
+ ldap_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].ldap_vsi_data])
+ tie_brkr_instances = var.enable_deployer ? [] : flatten(module.landing_zone_vsi[0].storage_cluster_tie_breaker_vsi_data)
+ strg_mgmt_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].storage_cluster_management_vsi])
+ login_instance = var.enable_deployer ? [] : flatten(module.landing_zone_vsi[0].login_vsi_data)
+
+ storage_bm_name_with_vol_mapping = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].storage_bm_name_with_vol_mapping])
+ storage_tie_breaker_bms_name_with_vol_mapping = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].storage_tie_breaker_bms_name_with_vol_mapping])
management_instance_count = sum(var.management_instances[*]["count"])
storage_instance_count = var.storage_type == "persistent" ? sum(var.storage_servers[*]["count"]) : sum(var.storage_instances[*]["count"])
client_instance_count = sum(var.client_instances[*]["count"])
protocol_instance_count = sum(var.protocol_instances[*]["count"])
static_compute_instance_count = sum(var.static_compute_instances[*]["count"])
- # afm_instance_count = sum(var.afm_instances[*]["count"])
+ afm_instance_count = sum(var.afm_instances[*]["count"])
}
# locals needed for landing_zone_vsi
locals {
# dependency: landing_zone -> deployer -> landing_zone_vsi
- bastion_security_group_id = module.deployer.bastion_security_group_id
- bastion_public_key_content = module.deployer.bastion_public_key_content
- bastion_private_key_content = module.deployer.bastion_private_key_content
+ login_security_group_name_id = var.login_security_group_name != null ? data.ibm_is_security_group.login_security_group[*].id : []
+ bastion_security_group_id = var.login_security_group_name == null ? module.deployer.bastion_security_group_id : local.login_security_group_name_id[0]
+ bastion_public_key_content = module.deployer.bastion_public_key_content
+ bastion_private_key_content = module.deployer.bastion_private_key_content
deployer_hostname = var.enable_deployer ? flatten(module.deployer.deployer_vsi_data[*].list)[0].name : ""
deployer_ip = module.deployer.deployer_ip
# Existing subnets details
- existing_cluster_subnets = [
- for subnet in data.ibm_is_subnet.existing_cluster_subnets :
+ existing_compute_subnets = [
+ for subnet in data.ibm_is_subnet.existing_compute_subnets :
{
cidr = subnet.ipv4_cidr_block
id = subnet.id
@@ -111,18 +142,33 @@ locals {
]
# dependency: landing_zone -> landing_zone_vsi
- client_subnets = var.vpc_name != null && var.client_subnets != null ? local.existing_client_subnets : module.landing_zone.client_subnets
- cluster_subnets = var.vpc_name != null && var.cluster_subnet_id != null ? local.existing_cluster_subnets : module.landing_zone.compute_subnets
- storage_subnets = var.vpc_name != null && var.storage_subnets != null ? local.existing_storage_subnets : module.landing_zone.storage_subnets
- protocol_subnets = var.vpc_name != null && var.protocol_subnets != null ? local.existing_protocol_subnets : module.landing_zone.protocol_subnets
- login_subnets = var.vpc_name != null && var.login_subnet_id != null ? local.existing_login_subnets : module.landing_zone.bastion_subnets
-
- storage_subnet = [for subnet in local.storage_subnets : subnet.name]
- protocol_subnet = [for subnet in local.protocol_subnets : subnet.name]
- protocol_subnet_id = local.protocol_instance_count > 0 ? [for subnet in local.protocol_subnets : subnet.id][0] : ""
- cluster_subnet = [for subnet in local.cluster_subnets : subnet.id][0]
- client_subnet = [for subnet in local.client_subnets : subnet.name]
- login_subnet = [for subnet in local.login_subnets : subnet.id][0]
+ use_existing_client_subnets = var.vpc_name != null && var.client_subnet_id != null
+ use_existing_compute_subnets = var.vpc_name != null && var.compute_subnet_id != null
+ use_existing_storage_subnets = var.vpc_name != null && var.storage_subnet_id != null
+ use_existing_protocol_subnets = var.vpc_name != null && var.protocol_subnet_id != null
+ use_existing_login_subnets = var.vpc_name != null && var.login_subnet_id != null
+
+ client_subnets = (var.vpc_name == null ? module.landing_zone.client_subnets :
+ (local.use_existing_client_subnets ? local.existing_client_subnets : module.landing_zone.client_subnets))
+
+ compute_subnets = (var.vpc_name == null ? module.landing_zone.compute_subnets :
+ (local.use_existing_compute_subnets ? local.existing_compute_subnets : module.landing_zone.compute_subnets))
+
+ storage_subnets = (var.vpc_name == null ? module.landing_zone.storage_subnets :
+ (local.use_existing_storage_subnets ? local.existing_storage_subnets : module.landing_zone.storage_subnets))
+
+ protocol_subnets = (var.vpc_name == null ? module.landing_zone.protocol_subnets :
+ (local.use_existing_protocol_subnets ? local.existing_protocol_subnets : module.landing_zone.protocol_subnets))
+
+ login_subnets = (var.vpc_name == null ? module.landing_zone.bastion_subnets :
+ (local.use_existing_login_subnets ? local.existing_login_subnets : module.landing_zone.bastion_subnets))
+
+ # update the subnet_id
+ storage_subnet = length(local.storage_subnets) > 0 ? [for subnet in local.storage_subnets : subnet.id][0] : ""
+ protocol_subnet = length(local.protocol_subnets) > 0 ? [for subnet in local.protocol_subnets : subnet.id][0] : ""
+ compute_subnet = length(local.compute_subnets) > 0 ? [for subnet in local.compute_subnets : subnet.id][0] : ""
+ client_subnet = length(local.client_subnets) > 0 ? [for subnet in local.client_subnets : subnet.id][0] : ""
+ login_subnet = length(local.login_subnets) > 0 ? [for subnet in local.login_subnets : subnet.id][0] : ""
#boot_volume_encryption_key = local.key_management != null ? one(module.landing_zone.boot_volume_encryption_key)["crn"] : null
#skip_iam_authorization_policy = true
@@ -131,9 +177,11 @@ locals {
# locals needed for file-storage
locals {
# dependency: landing_zone_vsi -> file-share
- compute_subnet_id = (var.vpc_name == null && var.cluster_subnet_id == null ? local.cluster_subnets[0].id : (var.vpc_name != null && var.cluster_subnet_id != null ? [for subnet in data.ibm_is_subnet.existing_cluster_subnets : subnet.id][0] : (var.vpc_name != null && var.cluster_subnet_id == null ? local.cluster_subnets[0].id : "")))
+ compute_subnet_id = (var.enable_deployer && var.vpc_name != null && var.compute_subnet_id != null) ? local.existing_compute_subnets[0].id : ""
bastion_subnet_id = (var.enable_deployer && var.vpc_name != null && var.login_subnet_id != null) ? local.existing_login_subnets[0].id : ""
- subnet_id = (var.enable_deployer && var.vpc_name != null && var.cluster_subnet_id != null) ? local.existing_cluster_subnets[0].id : ""
+ protocol_subnet_id = (var.enable_deployer && var.vpc_name != null && var.protocol_subnet_id != null) ? local.existing_protocol_subnets[0].id : ""
+ client_subnet_id = (var.enable_deployer && var.vpc_name != null && var.client_subnet_id != null) ? local.existing_client_subnets[0].id : ""
+ storage_subnet_id = (var.enable_deployer && var.vpc_name != null && var.storage_subnet_id != null) ? local.existing_storage_subnets[0].id : ""
compute_security_group_id = var.enable_deployer ? [] : module.landing_zone_vsi[0].compute_sg_id
nfs_shares_map = {
@@ -142,10 +190,10 @@ locals {
if share.nfs_share != "" && share.nfs_share != null
}
- fileset_size_map = try({ for details in var.custom_file_shares : details.mount_path => details.size }, {})
+ fileset_size_map = try({ for details in var.filesets_config : details.client_mount_path => details.quota }, {})
# Original file share map from module
- original_map = var.enable_deployer ? {} : module.file_storage[0].name_mount_path_map
+ original_map = var.enable_deployer ? {} : var.scheduler == "LSF" ? module.file_storage[0].name_mount_path_map : {}
# Extract keyword-to-target mapping from file share names
keyword_to_target_map = var.enable_deployer ? {} : {
@@ -242,14 +290,14 @@ locals {
vpc_crn = var.vpc_name == null ? one(module.landing_zone.vpc_crn) : one(data.ibm_is_vpc.existing_vpc[*].crn)
# TODO: Fix existing subnet logic
#subnets_crn = var.vpc_name == null ? module.landing_zone.subnets_crn : ###
- existing_compute_subnet_crns = [for subnet in data.ibm_is_subnet.existing_cluster_subnets : subnet.crn]
+ existing_compute_subnet_crns = [for subnet in data.ibm_is_subnet.existing_compute_subnets : subnet.crn]
existing_storage_subnet_crns = [for subnet in data.ibm_is_subnet.existing_storage_subnets : subnet.crn]
existing_protocol_subnet_crns = [for subnet in data.ibm_is_subnet.existing_protocol_subnets : subnet.crn]
existing_client_subnet_crns = [for subnet in data.ibm_is_subnet.existing_client_subnets : subnet.crn]
existing_bastion_subnet_crns = [for subnet in data.ibm_is_subnet.existing_login_subnets : subnet.crn]
subnets_crn = concat(local.existing_compute_subnet_crns, local.existing_storage_subnet_crns, local.existing_protocol_subnet_crns, local.existing_client_subnet_crns, local.existing_bastion_subnet_crns)
- # subnets_crn = var.vpc_name == null && var.cluster_subnet_id == null ? module.landing_zone.subnets_crn : concat(local.existing_subnet_crns, module.landing_zone.subnets_crn)
- # subnets = flatten([local.cluster_subnets, local.storage_subnets, local.protocol_subnets])
+ # subnets_crn = var.vpc_name == null && var.compute_subnet_id == null ? module.landing_zone.subnets_crn : concat(local.existing_subnet_crns, module.landing_zone.subnets_crn)
+ # subnets = flatten([local.compute_subnets, local.storage_subnets, local.protocol_subnets])
# subnets_crns = data.ibm_is_subnet.itself[*].crn
# subnets_crn = module.landing_zone.subnets_crn
# boot_volume_encryption_key = local.key_management != null ? one(module.landing_zone.boot_volume_encryption_key)["crn"] : null
@@ -287,6 +335,56 @@ locals {
}
]
+ raw_bm_storage_servers_dns_record_details = [
+ for server in local.storage_servers : {
+ id = server.id
+ ipv4_address = server.ipv4_address
+ name = server.name
+ vni_id = server.bms_primary_vni_id
+ }
+ ]
+
+ raw_bm_tie_breaker_dns_record_details = [
+ for server in local.storage_tie_brkr_bm : {
+ id = server.id
+ ipv4_address = server.ipv4_address
+ name = server.name
+ vni_id = server.bms_primary_vni_id
+ }
+ ]
+
+ raw_bm_protocol_dns_record_details = [
+ for server in local.protocol_bm_instances : {
+ id = server.id
+ ipv4_address = server.ipv4_address
+ name = server.name
+ vni_id = server.bms_primary_vni_id
+ }
+ ]
+
+ raw_bm_afm_dns_record_details = [
+ for server in local.afm_bm_instances : {
+ id = server.id
+ ipv4_address = server.ipv4_address
+ name = server.name
+ vni_id = server.bms_primary_vni_id
+ }
+ ]
+
+ raw_compute_sec_vnic_dns_record_details = local.enable_sec_interface_compute ? [
+ for record in flatten([for details in local.compute_instances : details.secondary_network_interface_detail]) : {
+ ipv4_address = record.primary_ipv4_address
+ name = record.name
+ }
+ ] : []
+
+ raw_compute_mgmt_sec_vnic_dns_record_details = local.enable_sec_interface_compute ? [
+ for record in flatten([for details in local.comp_mgmt_instances : details.secondary_network_interface_detail]) : {
+ ipv4_address = record.primary_ipv4_address
+ name = record.name
+ }
+ ] : []
+
compute_dns_records = [
for instance in concat(local.compute_instances, local.comp_mgmt_instances, local.deployer_instances, local.login_instance) :
{
@@ -295,7 +393,7 @@ locals {
}
]
storage_dns_records = [
- for instance in concat(local.storage_instances, local.protocol_instances, local.afm_instances, local.tie_brkr_instances, local.strg_mgmt_instances, local.storage_servers) :
+ for instance in concat(local.storage_instances, local.protocol_instances, local.raw_bm_protocol_dns_record_details, local.afm_instances, local.raw_bm_afm_dns_record_details, local.tie_brkr_instances, local.strg_mgmt_instances, local.raw_bm_storage_servers_dns_record_details, local.raw_bm_tie_breaker_dns_record_details, local.raw_compute_sec_vnic_dns_record_details, local.raw_compute_mgmt_sec_vnic_dns_record_details) :
{
name = instance["name"]
rdata = instance["ipv4_address"]
@@ -322,14 +420,26 @@ locals {
compute_hosts = try([for name in local.compute_instances[*]["name"] : "${name}.${var.dns_domain_names["compute"]}"], [])
# storage_hosts = try([for name in local.storage_instances[*]["name"] : "${name}.${var.dns_domain_names["storage"]}"], [])
ldap_hosts = try([for instance in local.ldap_instances : instance["ipv4_address"]], [])
+ client_hosts = try([for instance in local.client_instances : instance["ipv4_address"]], [])
+ afm_hosts = try([for instance in local.afm_instances : instance["ipv4_address"]], [])
+ gklm_hosts = try([for instance in local.gklm_instances : instance["ipv4_address"]], [])
+ storage_hosts = try([for instance in local.storage_instances : instance["ipv4_address"]], [])
+ strg_mgmt_hosts = try([for instance in local.strg_mgmt_instances : instance["ipv4_address"]], [])
+ all_storage_hosts = concat(local.storage_hosts, local.strg_mgmt_hosts)
+ protocol_hosts = try([for instance in local.protocol_instances : instance["ipv4_address"]], [])
login_host_ip = try([for instance in local.login_instance : instance["ipv4_address"]], [])
compute_inventory_path = var.enable_deployer ? "${path.root}/../../modules/ansible-roles/compute.ini" : "${path.root}/modules/ansible-roles/compute.ini"
- compute_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/lsf/compute_hosts.ini" : "${path.root}/solutions/lsf/compute_hosts.ini"
- mgmt_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/lsf/mgmt_hosts.ini" : "${path.root}/solutions/lsf/mgmt_hosts.ini"
- bastion_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/lsf/bastion_hosts.ini" : "${path.root}/solutions/lsf/bastion_hosts.ini"
- deployer_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/lsf/deployer_hosts.ini" : "${path.root}/solutions/lsf/deployer_hosts.ini"
- ldap_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/lsf/ldap_hosts.ini" : "${path.root}/solutions/lsf/ldap_hosts.ini"
- login_host_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/lsf/login_host.ini" : "${path.root}/solutions/lsf/login_host.ini"
+ compute_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/${local.scheduler_lowcase}/compute_hosts.ini" : "${path.root}/solutions/${local.scheduler_lowcase}/compute_hosts.ini"
+ mgmt_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/${local.scheduler_lowcase}/mgmt_hosts.ini" : "${path.root}/solutions/${local.scheduler_lowcase}/mgmt_hosts.ini"
+ bastion_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/${local.scheduler_lowcase}/bastion_hosts.ini" : "${path.root}/solutions/${local.scheduler_lowcase}/bastion_hosts.ini"
+ deployer_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/${local.scheduler_lowcase}/deployer_hosts.ini" : "${path.root}/solutions/${local.scheduler_lowcase}/deployer_hosts.ini"
+ ldap_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/${local.scheduler_lowcase}/ldap_hosts.ini" : "${path.root}/solutions/${local.scheduler_lowcase}/ldap_hosts.ini"
+ client_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/${local.scheduler_lowcase}/client_hosts.ini" : "${path.root}/solutions/${local.scheduler_lowcase}/client_hosts.ini"
+ storage_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/${local.scheduler_lowcase}/storage_hosts.ini" : "${path.root}/solutions/${local.scheduler_lowcase}/storage_hosts.ini"
+ afm_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/${local.scheduler_lowcase}/afm_hosts.ini" : "${path.root}/solutions/${local.scheduler_lowcase}/afm_hosts.ini"
+ gklm_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/${local.scheduler_lowcase}/gklm_hosts.ini" : "${path.root}/solutions/${local.scheduler_lowcase}/gklm_hosts.ini"
+ protocol_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/${local.scheduler_lowcase}/protocol_hosts.ini" : "${path.root}/solutions/${local.scheduler_lowcase}/protocol_hosts.ini"
+ login_host_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/${local.scheduler_lowcase}/login_host.ini" : "${path.root}/solutions/${local.scheduler_lowcase}/login_host.ini"
# storage_inventory_path = var.enable_deployer ? "${path.root}/../../modules/ansible-roles/storage.ini" : "${path.root}/modules/ansible-roles/storage.ini"
}
@@ -351,17 +461,19 @@ locals {
# details needed for json file
locals {
- compute_instances_data = var.scheduler == "LSF" ? var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].compute_vsi_data]) : []
- compute_hosts_ips = var.scheduler == "LSF" ? var.enable_deployer ? [] : local.compute_instances_data[*]["ipv4_address"] : []
- # bastion_instances_data = var.scheduler == "LSF" ? var.enable_deployer ? flatten([module.deployer.bastion_vsi_data]) : [] : []
- bastion_hosts_ips = var.scheduler == "LSF" ? var.enable_deployer ? [module.deployer.bastion_fip] : [] : []
- deployer_hosts_ips = var.scheduler == "LSF" ? var.enable_deployer ? [module.deployer.deployer_ip] : [] : []
- mgmt_instances_data = var.scheduler == "LSF" ? var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].management_vsi_data]) : []
- mgmt_hosts_ips = var.scheduler == "LSF" ? var.enable_deployer ? [] : local.mgmt_instances_data[*]["ipv4_address"] : []
- ldap_hosts_ips = var.scheduler == "LSF" ? var.enable_deployer ? [] : (var.enable_ldap == true ? (var.ldap_server == "null" ? local.ldap_instances[*]["ipv4_address"] : [var.ldap_server]) : []) : []
- json_inventory_path = var.enable_deployer ? "${path.root}/../../modules/ansible-roles/all.json" : "${path.root}/modules/ansible-roles/all.json"
- management_nodes = var.scheduler == "LSF" ? var.enable_deployer ? [] : (flatten([module.landing_zone_vsi[0].management_vsi_data]))[*]["name"] : []
- login_host = var.scheduler == "LSF" ? var.enable_deployer ? [] : (flatten([module.landing_zone_vsi[0].login_vsi_data]))[*]["name"] : []
+ compute_instances_data = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].compute_vsi_data])
+ compute_hosts_ips = var.enable_deployer ? [] : local.compute_instances_data[*]["ipv4_address"]
+ compute_mgmt_instances_data = var.scheduler == "Scale" ? var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].compute_management_vsi_data]) : []
+ compute_mgmt_hosts_ips = var.scheduler == "Scale" ? var.enable_deployer ? [] : local.compute_mgmt_instances_data[*]["ipv4_address"] : []
+ all_compute_hosts = concat(local.compute_hosts_ips, local.compute_mgmt_hosts_ips)
+ bastion_hosts_ips = var.enable_deployer ? [module.deployer.bastion_fip] : []
+ deployer_hosts_ips = var.enable_deployer ? [module.deployer.deployer_ip] : []
+ mgmt_instances_data = var.scheduler == "LSF" ? var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].management_vsi_data]) : []
+ mgmt_hosts_ips = var.scheduler == "LSF" ? var.enable_deployer ? [] : local.mgmt_instances_data[*]["ipv4_address"] : []
+ ldap_hosts_ips = var.scheduler == "LSF" ? var.enable_deployer ? [] : (var.enable_ldap == true ? (var.ldap_server == "null" ? local.ldap_instances[*]["ipv4_address"] : [var.ldap_server]) : []) : []
+ json_inventory_path = var.enable_deployer ? "${path.root}/../../modules/ansible-roles/all.json" : "${path.root}/modules/ansible-roles/all.json"
+ management_nodes = var.scheduler == "LSF" ? var.enable_deployer ? [] : (flatten([module.landing_zone_vsi[0].management_vsi_data]))[*]["name"] : []
+ login_host = var.scheduler == "LSF" ? var.enable_deployer ? [] : (flatten([module.landing_zone_vsi[0].login_vsi_data]))[*]["name"] : []
compute_nodes = var.scheduler == "LSF" ? (
var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].compute_vsi_data])[*]["name"]
) : []
@@ -401,24 +513,64 @@ locals {
bucket_crn = local.cloud_metrics_bucket.crn
bucket_endpoint = local.cloud_metrics_bucket.s3_endpoint_direct
} : null)
- # scc_cos_bucket = length(module.landing_zone.cos_buckets_names) > 0 && var.scc_enable ? [for name in module.landing_zone.cos_buckets_names : name if strcontains(name, "scc-bucket")][0] : ""
- # scc_cos_instance_crn = length(module.landing_zone.cos_instance_crns) > 0 && var.scc_enable ? module.landing_zone.cos_instance_crns[0] : ""
- compute_subnet_crn = var.enable_deployer ? "" : data.ibm_is_subnet.compute_subnet_crn[0].crn
+ compute_subnet_crn = var.enable_deployer ? "" : (length(local.compute_subnets) > 0 ? data.ibm_is_subnet.compute_subnet_crn[0].crn : "")
ssh_keys_ids = var.enable_deployer ? [] : [for name in var.ssh_keys : data.ibm_is_ssh_key.ssh_keys[name].id]
- compute_public_key_content = var.enable_deployer ? "" : jsonencode(base64encode(join("", flatten([module.landing_zone_vsi[0].compute_public_key_content]))))
- compute_private_key_content = var.enable_deployer ? "" : jsonencode(base64encode(join("", flatten([module.landing_zone_vsi[0].compute_private_key_content]))))
+ compute_public_key_content = var.enable_deployer ? "" : (local.static_compute_instance_count > 0 || local.management_instance_count > 0 ? jsonencode(base64encode(join("", flatten([module.landing_zone_vsi[0].compute_public_key_content])))) : "")
+ compute_private_key_content = var.enable_deployer ? "" : (local.static_compute_instance_count > 0 || local.management_instance_count > 0 ? jsonencode(base64encode(join("", flatten([module.landing_zone_vsi[0].compute_private_key_content])))) : "")
+ #For LSF
mgmnt_host_entry = var.scheduler == "LSF" ? { for vsi in flatten([module.landing_zone_vsi[*].management_vsi_data]) : vsi.ipv4_address => vsi.name } : {}
comp_host_entry = var.scheduler == "LSF" ? { for vsi in flatten([module.landing_zone_vsi[*].compute_vsi_data]) : vsi.ipv4_address => vsi.name } : {}
login_host_entry = var.scheduler == "LSF" ? { for vsi in flatten([module.landing_zone_vsi[*].login_vsi_data]) : vsi.ipv4_address => vsi.name } : {}
deployer_host_entry = var.scheduler == "LSF" ? { for inst in local.deployer_instances : inst.ipv4_address => inst.name if inst.ipv4_address != null } : {}
+
+
+ #For Scale
+ storage_host_entry = var.scheduler == "Scale" ? { for vsi in flatten([module.landing_zone_vsi[*].storage_vsi_data]) : vsi.ipv4_address => vsi.name } : {}
+ storage_mgmnt_host_entry = var.scheduler == "Scale" ? { for vsi in flatten([module.landing_zone_vsi[*].storage_cluster_management_vsi]) : vsi.ipv4_address => vsi.name } : {}
+ storage_tb_host_entry = var.scheduler == "Scale" ? { for vsi in flatten([module.landing_zone_vsi[*].storage_cluster_tie_breaker_vsi_data]) : vsi.ipv4_address => vsi.name } : {}
+ compute_host_entry = var.scheduler == "Scale" ? (local.enable_sec_interface_compute ? { for vsi in flatten([module.landing_zone_vsi[*].compute_vsi_data]) : vsi.secondary_ipv4_address => vsi.secondary_network_interface_detail.name } : { for vsi in flatten([module.landing_zone_vsi[*].compute_vsi_data]) : vsi.ipv4_address => vsi.name }) : {}
+ compute_mgmnt_host_entry = var.scheduler == "Scale" ? (local.enable_sec_interface_compute ? { for vsi in flatten([module.landing_zone_vsi[*].compute_management_vsi_data]) : vsi.secondary_ipv4_address => vsi.secondary_network_interface_detail.name } : { for vsi in flatten([module.landing_zone_vsi[*].compute_management_vsi_data]) : vsi.ipv4_address => vsi.name }) : {}
+ client_host_entry = var.scheduler == "Scale" ? { for vsi in flatten([module.landing_zone_vsi[*].client_vsi_data]) : vsi.ipv4_address => vsi.name } : {}
+ protocol_host_entry = var.scheduler == "Scale" ? { for vsi in flatten([module.landing_zone_vsi[*].protocol_vsi_data]) : vsi.ipv4_address => vsi.name } : {}
+ gklm_host_entry = var.scheduler == "Scale" ? { for vsi in flatten([module.landing_zone_vsi[*].gklm_vsi_data]) : vsi.ipv4_address => vsi.name } : {}
+ afm_host_entry = var.scheduler == "Scale" ? { for vsi in flatten([module.landing_zone_vsi[*].afm_vsi_data]) : vsi.ipv4_address => vsi.name } : {}
+
+ storage_bms_host_entry = var.scheduler == "Scale" ? {
+ for server in local.raw_bm_storage_servers_dns_record_details : server.ipv4_address =>
+ {
+ name = server.name
+ id = server.id
+ }
+ } : {}
+ storage_tb_bms_host_entry = var.scheduler == "Scale" ? {
+ for server in local.raw_bm_tie_breaker_dns_record_details : server.ipv4_address =>
+ {
+ name = server.name
+ id = server.id
+ }
+ } : {}
+ protocol_bms_host_entry = var.scheduler == "Scale" ? {
+ for server in local.raw_bm_protocol_dns_record_details : server.ipv4_address =>
+ {
+ name = server.name
+ id = server.id
+ }
+ } : {}
+ afm_bms_host_entry = var.scheduler == "Scale" ? {
+ for server in local.raw_bm_afm_dns_record_details : server.ipv4_address =>
+ {
+ name = server.name
+ id = server.id
+ }
+ } : {}
}
locals {
- # gpfs_base_rpm_path = fileset(var.spectrumscale_rpms_path, "gpfs.base-*")
- # scale_org_version = regex("gpfs.base-(.*).x86_64.rpm", tolist(local.gpfs_base_rpm_path)[0])[0]
- scale_version = "5.2.2.1" #replace(local.scale_org_version, "-", ".")
+ gpfs_base_rpm_path = var.scheduler == "Scale" ? (var.enable_deployer ? [] : fileset(var.spectrumscale_rpms_path, "gpfs.base-*")) : []
+ scale_org_version = var.scheduler == "Scale" ? (var.enable_deployer ? "" : regex("gpfs.base-(.*).x86_64.rpm", tolist(local.gpfs_base_rpm_path)[0])[0]) : ""
+ scale_version = var.scheduler == "Scale" ? (var.enable_deployer ? "" : replace(local.scale_org_version, "-", ".")) : ""
compute_vsi_profile = var.static_compute_instances[*]["profile"]
storage_vsi_profile = var.storage_instances[*]["profile"]
@@ -429,20 +581,22 @@ locals {
afm_server_type = strcontains(local.afm_vsi_profile[0], "metal")
ces_server_type = strcontains(local.protocol_vsi_profile[0], "metal")
- scale_ces_enabled = local.protocol_instance_count > 0 ? true : false
- is_colocate_protocol_subset = local.scale_ces_enabled && var.colocate_protocol_instances ? local.protocol_instance_count < local.storage_instance_count ? true : false : false
- enable_sec_interface_compute = local.scale_ces_enabled == false && data.ibm_is_instance_profile.compute_profile.bandwidth[0].value >= 64000 ? true : false
- enable_sec_interface_storage = local.scale_ces_enabled == false && var.storage_type != "persistent" && data.ibm_is_instance_profile.storage_profile.bandwidth[0].value >= 64000 ? true : false
- enable_mrot_conf = local.enable_sec_interface_compute && local.enable_sec_interface_storage ? true : false
- enable_afm = sum(var.afm_instances[*]["count"]) > 0 ? true : false
+ scale_ces_enabled = local.protocol_instance_count > 0 ? true : false
+ is_colocate_protocol_subset = local.scale_ces_enabled && var.colocate_protocol_instances ? local.protocol_instance_count < local.storage_instance_count ? true : false : false
+ enable_sec_interface_compute = local.scale_ces_enabled == false && data.ibm_is_instance_profile.compute_profile.bandwidth[0].value >= 64000 ? true : false
+ enable_sec_interface_storage = local.scale_ces_enabled == false && var.storage_type != "persistent" && data.ibm_is_instance_profile.storage_profile.bandwidth[0].value >= 64000 ? true : false
+ enable_mrot_conf = local.enable_sec_interface_compute && local.enable_sec_interface_storage ? true : false
+ enable_afm = local.afm_instance_count > 0 ? true : false
+ scale_afm_bucket_config_details = module.landing_zone.scale_afm_bucket_config_details
+ scale_afm_cos_hmac_key_params = module.landing_zone.scale_afm_cos_hmac_key_params
- compute_instance_private_ips = flatten(local.compute_instances[*]["ipv4_address"])
- compute_instance_ids = flatten(local.compute_instances[*]["id"])
- compute_instance_names = try(tolist([for name_details in flatten(local.compute_instances[*]["name"]) : "${name_details}.${var.dns_domain_names["compute"]}"]), [])
+ compute_instance_private_ips = local.enable_sec_interface_compute ? flatten([for ip in local.compute_instances : ip.secondary_network_interface_detail[*]["primary_ipv4_address"]]) : flatten(local.compute_instances[*]["ipv4_address"])
+ compute_instance_ids = local.enable_sec_interface_compute ? flatten([for id in local.compute_instances : id.secondary_network_interface_detail[*]["id"]]) : flatten(local.compute_instances[*]["id"])
+ compute_instance_names = local.enable_sec_interface_compute ? flatten([for name in local.compute_instances : [for nic in name.secondary_network_interface_detail[*]["name"] : "${nic}.${var.dns_domain_names["storage"]}"]]) : try(tolist([for name_details in flatten(local.compute_instances[*]["name"]) : "${name_details}.${var.dns_domain_names["compute"]}"]), [])
- compute_mgmt_instance_private_ips = flatten(local.comp_mgmt_instances[*]["ipv4_address"])
- compute_mgmt_instance_ids = flatten(local.comp_mgmt_instances[*]["id"])
- compute_mgmt_instance_names = try(tolist([for name_details in flatten(local.comp_mgmt_instances[*]["name"]) : "${name_details}.${var.dns_domain_names["compute"]}"]), [])
+ compute_mgmt_instance_private_ips = local.enable_sec_interface_compute ? flatten([for ip in local.comp_mgmt_instances : ip.secondary_network_interface_detail[*]["primary_ipv4_address"]]) : flatten(local.comp_mgmt_instances[*]["ipv4_address"])
+ compute_mgmt_instance_ids = local.enable_sec_interface_compute ? flatten([for id in local.comp_mgmt_instances : id.secondary_network_interface_detail[*]["id"]]) : flatten(local.comp_mgmt_instances[*]["id"])
+ compute_mgmt_instance_names = local.enable_sec_interface_compute ? flatten([for name in local.comp_mgmt_instances : [for nic in name.secondary_network_interface_detail[*]["name"] : "${nic}.${var.dns_domain_names["storage"]}"]]) : try(tolist([for name_details in flatten(local.comp_mgmt_instances[*]["name"]) : "${name_details}.${var.dns_domain_names["compute"]}"]), [])
strg_instance_private_ips = flatten(local.storage_instances[*]["ipv4_address"])
strg_instance_ids = flatten(local.storage_instances[*]["id"])
@@ -452,6 +606,10 @@ locals {
strg_servers_ids = flatten(local.storage_servers[*]["id"])
strg_servers_names = try(tolist([for name_details in flatten(local.storage_servers[*]["name"]) : "${name_details}.${var.dns_domain_names["storage"]}"]), [])
+ bm_tie_breaker_private_ips = flatten(local.storage_tie_brkr_bm[*]["ipv4_address"])
+ bm_tie_breaker_ids = flatten(local.storage_tie_brkr_bm[*]["id"])
+ bm_tie_breaker_names = try(tolist([for name_details in flatten(local.storage_tie_brkr_bm[*]["name"]) : "${name_details}.${var.dns_domain_names["storage"]}"]), [])
+
strg_mgmt_instance_private_ips = flatten(local.strg_mgmt_instances[*]["ipv4_address"])
strg_mgmtt_instance_ids = flatten(local.strg_mgmt_instances[*]["id"])
strg_mgmt_instance_names = try(tolist([for name_details in flatten(local.strg_mgmt_instances[*]["name"]) : "${name_details}.${var.dns_domain_names["storage"]}"]), [])
@@ -460,14 +618,30 @@ locals {
strg_tie_breaker_instance_ids = flatten(local.tie_brkr_instances[*]["id"])
strg_tie_breaker_instance_names = try(tolist([for name_details in flatten(local.tie_brkr_instances[*]["name"]) : "${name_details}.${var.dns_domain_names["storage"]}"]), [])
- secondary_compute_instance_private_ips = flatten(local.compute_instances[*]["secondary_ipv4_address"])
+ # secondary_compute_instance_private_ips = flatten(local.compute_instances[*]["secondary_ipv4_address"])
# secondary_storage_instance_private_ips = flatten(local.storage_instances[*]["secondary_ipv4_address"])
protocol_instance_private_ips = flatten(local.protocol_instances[*]["ipv4_address"])
protocol_instance_ids = flatten(local.protocol_instances[*]["id"])
protocol_instance_names = try(tolist([for name_details in flatten(local.protocol_instances[*]["name"]) : "${name_details}.${var.dns_domain_names["storage"]}"]), [])
- protocol_cluster_instance_names = var.enable_deployer ? [] : slice((concat(local.protocol_instance_names, (var.storage_type == "persistent" ? [] : local.strg_instance_names))), 0, local.protocol_instance_count)
+ protocol_bm_instance_private_ips = flatten(local.protocol_bm_instances[*]["ipv4_address"])
+ protocol_bm_instance_ids = flatten(local.protocol_bm_instances[*]["id"])
+ protocol_bm_instance_names = try(tolist([for name_details in flatten(local.protocol_bm_instances[*]["name"]) : "${name_details}.${var.dns_domain_names["storage"]}"]), [])
+
+ protocol_cluster_instance_names = var.enable_deployer ? [] : slice((concat(local.protocol_instance_names, local.protocol_bm_instance_names, (var.storage_type == "persistent" ? local.strg_servers_names : local.strg_instance_names))), 0, local.protocol_instance_count)
+
+ afm_instance_private_ips = flatten(local.afm_instances[*]["ipv4_address"])
+ afm_instance_ids = flatten(local.afm_instances[*]["id"])
+ afm_instance_names = try(tolist([for name_details in flatten(local.afm_instances[*]["name"]) : "${name_details}.${var.dns_domain_names["storage"]}"]), [])
+
+ afm_bm_private_ips = flatten(local.afm_bm_instances[*]["ipv4_address"])
+ afm_bm_ids = flatten(local.afm_bm_instances[*]["id"])
+ afm_bm_names = try(tolist([for name_details in flatten(local.afm_bm_instances[*]["name"]) : "${name_details}.${var.dns_domain_names["storage"]}"]), [])
+
+ afm_private_ips_final = concat(local.afm_instance_private_ips, local.afm_bm_private_ips)
+ afm_ids_final = concat(local.afm_instance_ids, local.afm_bm_ids)
+ afm_names_final = concat(local.afm_instance_names, local.afm_bm_names)
# client_instance_private_ips = flatten(local.client_instances[*]["ipv4_address"])
# client_instance_ids = flatten(local.client_instances[*]["id"])
@@ -482,49 +656,33 @@ locals {
# ldap_instance_names = flatten(local.ldap_instances[*]["name"])
}
-locals {
- afm_instance_private_ips = flatten(local.afm_instances[*]["ipv4_address"])
- afm_instance_ids = flatten(local.afm_instances[*]["id"])
- afm_instance_names = try(tolist([for name_details in flatten(local.afm_instances[*]["name"]) : "${name_details}.${var.dns_domain_names["storage"]}"]), [])
-
- new_instance_bucket_hmac = [for details in var.afm_cos_config : details if(details.cos_instance == "" && details.bucket_name == "" && details.cos_service_cred_key == "")]
- exstng_instance_new_bucket_hmac = [for details in var.afm_cos_config : details if(details.cos_instance != "" && details.bucket_name == "" && details.cos_service_cred_key == "")]
- exstng_instance_bucket_new_hmac = [for details in var.afm_cos_config : details if(details.cos_instance != "" && details.bucket_name != "" && details.cos_service_cred_key == "")]
- exstng_instance_hmac_new_bucket = [for details in var.afm_cos_config : details if(details.cos_instance != "" && details.bucket_name == "" && details.cos_service_cred_key != "")]
- exstng_instance_bucket_hmac = [for details in var.afm_cos_config : details if(details.cos_instance != "" && details.bucket_name != "" && details.cos_service_cred_key != "")]
-
- afm_cos_bucket_details = local.enable_afm == true ? flatten(module.cos[*].afm_cos_bucket_details) : []
- afm_cos_config = local.enable_afm == true ? flatten(module.cos[*].afm_config_details) : []
-}
-
-
locals {
- storage_instance_private_ips = var.storage_type != "persistent" ? local.enable_afm == true ? concat(local.strg_instance_private_ips, local.afm_instance_private_ips) : local.strg_instance_private_ips : []
- storage_instance_ids = var.storage_type != "persistent" ? local.enable_afm == true ? concat(local.strg_instance_ids, local.afm_instance_ids) : local.strg_instance_ids : []
- storage_instance_names = var.storage_type != "persistent" ? local.enable_afm == true ? concat(local.strg_instance_names, local.afm_instance_names) : local.strg_instance_names : []
- storage_ips_with_vol_mapping = module.landing_zone_vsi[*].instance_ips_with_vol_mapping
+ storage_instance_private_ips = var.storage_type != "persistent" ? local.enable_afm == true ? concat(local.strg_instance_private_ips, local.afm_private_ips_final) : local.strg_instance_private_ips : []
+ storage_instance_ids = var.storage_type != "persistent" ? local.enable_afm == true ? concat(local.strg_instance_ids, local.afm_ids_final) : local.strg_instance_ids : []
+ storage_instance_names = var.storage_type != "persistent" ? local.enable_afm == true ? concat(local.strg_instance_names, local.afm_names_final) : local.strg_instance_names : []
+ storage_ips_with_vol_mapping = var.storage_type != "persistent" ? module.landing_zone_vsi[*].instance_ips_with_vol_mapping : local.storage_bm_name_with_vol_mapping
storage_cluster_instance_private_ips = local.scale_ces_enabled == false ? local.storage_instance_private_ips : concat(local.storage_instance_private_ips, local.protocol_instance_private_ips)
storage_cluster_instance_ids = local.scale_ces_enabled == false ? local.storage_instance_ids : concat(local.storage_instance_ids, local.protocol_instance_ids)
storage_cluster_instance_names = local.scale_ces_enabled == false ? local.storage_instance_names : concat(local.storage_instance_names, local.protocol_instance_names)
- baremetal_instance_private_ips = var.storage_type == "persistent" ? local.enable_afm == true ? concat(local.strg_servers_private_ips, local.afm_instance_private_ips) : local.strg_servers_private_ips : []
- baremetal_instance_ids = var.storage_type == "persistent" ? local.enable_afm == true ? concat(local.strg_servers_ids, local.afm_instance_ids) : local.strg_servers_ids : []
- baremetal_instance_names = var.storage_type == "persistent" ? local.enable_afm == true ? concat(local.strg_servers_names, local.afm_instance_names) : local.strg_servers_names : []
+ baremetal_instance_private_ips = var.storage_type == "persistent" ? local.enable_afm == true ? concat(local.strg_servers_private_ips, local.afm_private_ips_final) : local.strg_servers_private_ips : []
+ baremetal_instance_ids = var.storage_type == "persistent" ? local.enable_afm == true ? concat(local.strg_servers_ids, local.afm_ids_final) : local.strg_servers_ids : []
+ baremetal_instance_names = var.storage_type == "persistent" ? local.enable_afm == true ? concat(local.strg_servers_names, local.afm_names_final) : local.strg_servers_names : []
- baremetal_cluster_instance_private_ips = var.storage_type == "persistent" && local.scale_ces_enabled == false ? local.baremetal_instance_private_ips : concat(local.baremetal_instance_private_ips, local.protocol_instance_private_ips)
- baremetal_cluster_instance_ids = var.storage_type == "persistent" && local.scale_ces_enabled == false ? local.baremetal_instance_ids : concat(local.baremetal_instance_ids, local.protocol_instance_ids)
- baremetal_cluster_instance_names = var.storage_type == "persistent" && local.scale_ces_enabled == false ? local.baremetal_instance_names : concat(local.baremetal_instance_names, local.protocol_instance_names)
+ baremetal_cluster_instance_private_ips = var.storage_type == "persistent" && local.scale_ces_enabled == false ? local.baremetal_instance_private_ips : concat(local.baremetal_instance_private_ips, local.protocol_instance_private_ips, local.protocol_bm_instance_private_ips)
+ baremetal_cluster_instance_ids = var.storage_type == "persistent" && local.scale_ces_enabled == false ? local.baremetal_instance_ids : concat(local.baremetal_instance_ids, local.protocol_instance_ids, local.protocol_bm_instance_ids)
+ baremetal_cluster_instance_names = var.storage_type == "persistent" && local.scale_ces_enabled == false ? local.baremetal_instance_names : concat(local.baremetal_instance_names, local.protocol_instance_names, local.protocol_bm_instance_names)
- tie_breaker_storage_instance_private_ips = var.storage_type != "persistent" ? local.strg_tie_breaker_private_ips : local.baremetal_instance_private_ips
- tie_breaker_storage_instance_ids = var.storage_type != "persistent" ? local.strg_tie_breaker_instance_ids : local.baremetal_instance_ids
- tie_breaker_storage_instance_names = var.storage_type != "persistent" ? local.strg_tie_breaker_instance_names : local.baremetal_instance_names
- tie_breaker_ips_with_vol_mapping = module.landing_zone_vsi[*].instance_ips_with_vol_mapping_tie_breaker
+ tie_breaker_storage_instance_private_ips = var.storage_type != "persistent" ? local.strg_tie_breaker_private_ips : local.bm_tie_breaker_private_ips
+ tie_breaker_storage_instance_ids = var.storage_type != "persistent" ? local.strg_tie_breaker_instance_ids : local.bm_tie_breaker_ids
+ tie_breaker_storage_instance_names = var.storage_type != "persistent" ? local.strg_tie_breaker_instance_names : local.bm_tie_breaker_names
+ tie_breaker_ips_with_vol_mapping = var.storage_type != "persistent" ? module.landing_zone_vsi[*].instance_ips_with_vol_mapping_tie_breaker : local.storage_tie_breaker_bms_name_with_vol_mapping
- storage_subnet_cidr = var.enable_deployer ? "" : local.storage_instance_count > 0 ? jsonencode((data.ibm_is_subnet.existing_storage_subnets[*].ipv4_cidr_block)[0]) : ""
- cluster_subnet_cidr = var.enable_deployer ? "" : jsonencode((data.ibm_is_subnet.existing_cluster_subnets[*].ipv4_cidr_block)[0])
- client_subnet_cidr = var.enable_deployer ? "" : local.client_instance_count > 0 ? jsonencode((data.ibm_is_subnet.existing_client_subnets[*].ipv4_cidr_block)[0]) : ""
+ storage_subnet_cidr = local.storage_instance_count > 0 && var.storage_subnet_id != null ? jsonencode((data.ibm_is_subnet.existing_storage_subnets[*].ipv4_cidr_block)[0]) : ""
+ compute_subnet_cidr = local.static_compute_instance_count > 0 && var.compute_subnet_id != null ? jsonencode((data.ibm_is_subnet.existing_compute_subnets[*].ipv4_cidr_block)[0]) : ""
+ client_subnet_cidr = local.client_instance_count > 0 && var.client_subnet_id != null ? jsonencode((data.ibm_is_subnet.existing_client_subnets[*].ipv4_cidr_block)[0]) : ""
compute_memory = data.ibm_is_instance_profile.compute_profile.memory[0].value
compute_vcpus_count = data.ibm_is_instance_profile.compute_profile.vcpu_count[0].value
@@ -537,25 +695,29 @@ locals {
storage_desc_bandwidth = data.ibm_is_instance_profile.storage_profile.bandwidth[0].value
storage_memory = var.storage_type == "persistent" ? data.ibm_is_bare_metal_server_profile.storage_bms_profile[0].memory[0].value : data.ibm_is_instance_profile.storage_profile.memory[0].value
storage_vcpus_count = var.storage_type == "persistent" ? data.ibm_is_bare_metal_server_profile.storage_bms_profile[0].cpu_core_count[0].value : data.ibm_is_instance_profile.storage_profile.vcpu_count[0].value
- storage_bandwidth = var.storage_type == "persistent" ? data.ibm_is_bare_metal_server_profile.storage_bms_profile[0].bandwidth[0].value : data.ibm_is_instance_profile.storage_profile.bandwidth[0].value
- protocol_memory = (local.scale_ces_enabled == true && var.colocate_protocol_instances == false) ? local.ces_server_type == false ? data.ibm_is_instance_profile.protocol_profile[0].memory[0].value : jsonencode(0) : jsonencode(0)
- protocol_vcpus_count = (local.scale_ces_enabled == true && var.colocate_protocol_instances == false) ? local.ces_server_type == false ? data.ibm_is_instance_profile.protocol_profile[0].vcpu_count[0].value : jsonencode(0) : jsonencode(0)
- protocol_bandwidth = (local.scale_ces_enabled == true && var.colocate_protocol_instances == false) ? local.ces_server_type == false ? data.ibm_is_instance_profile.protocol_profile[0].bandwidth[0].value : jsonencode(0) : jsonencode(0)
+ storage_bandwidth = var.storage_type == "persistent" ? local.sapphire_rapids_profile_check == true ? 200000 : 100000 : data.ibm_is_instance_profile.storage_profile.bandwidth[0].value
+ protocol_memory = (local.scale_ces_enabled == true && var.colocate_protocol_instances == false) ? local.ces_server_type == false ? data.ibm_is_instance_profile.protocol_profile[0].memory[0].value : data.ibm_is_bare_metal_server_profile.protocol_bm_profile[0].memory[0].value : jsonencode(0)
+ protocol_vcpus_count = (local.scale_ces_enabled == true && var.colocate_protocol_instances == false) ? local.ces_server_type == false ? data.ibm_is_instance_profile.protocol_profile[0].vcpu_count[0].value : data.ibm_is_bare_metal_server_profile.protocol_bm_profile[0].cpu_core_count[0].value : jsonencode(0)
+ protocol_bandwidth = (local.scale_ces_enabled == true && var.colocate_protocol_instances == false) ? local.ces_server_type == false ? data.ibm_is_instance_profile.protocol_profile[0].bandwidth[0].value : local.sapphire_rapids_profile_check == true ? 200000 : 100000 : jsonencode(0)
storage_protocol_memory = var.storage_type == "persistent" ? data.ibm_is_bare_metal_server_profile.storage_bms_profile[0].memory[0].value : data.ibm_is_instance_profile.storage_profile.memory[0].value
storage_protocol_vcpus_count = var.storage_type == "persistent" ? data.ibm_is_bare_metal_server_profile.storage_bms_profile[0].cpu_core_count[0].value : data.ibm_is_instance_profile.storage_profile.vcpu_count[0].value
- storage_protocol_bandwidth = var.storage_type == "persistent" ? data.ibm_is_bare_metal_server_profile.storage_bms_profile[0].bandwidth[0].value : data.ibm_is_instance_profile.storage_profile.bandwidth[0].value
- afm_memory = local.afm_server_type == true ? jsonencode("") : data.ibm_is_instance_profile.afm_server_profile[0].memory[0].value
- afm_vcpus_count = local.afm_server_type == true ? jsonencode("") : data.ibm_is_instance_profile.afm_server_profile[0].vcpu_count[0].value
- afm_bandwidth = local.afm_server_type == true ? jsonencode("") : data.ibm_is_instance_profile.afm_server_profile[0].bandwidth[0].value
+ storage_protocol_bandwidth = var.storage_type == "persistent" ? local.sapphire_rapids_profile_check == true ? 200000 : 100000 : data.ibm_is_instance_profile.storage_profile.bandwidth[0].value
+ afm_memory = local.afm_server_type == true ? data.ibm_is_bare_metal_server_profile.afm_bm_profile[0].memory[0].value : data.ibm_is_instance_profile.afm_server_profile[0].memory[0].value
+ afm_vcpus_count = local.afm_server_type == true ? data.ibm_is_bare_metal_server_profile.afm_bm_profile[0].cpu_core_count[0].value : data.ibm_is_instance_profile.afm_server_profile[0].vcpu_count[0].value
+ afm_bandwidth = local.afm_server_type == true ? local.sapphire_rapids_profile_check == true ? 200000 : 100000 : data.ibm_is_instance_profile.afm_server_profile[0].bandwidth[0].value
protocol_reserved_name_ips_map = try({ for details in data.ibm_is_subnet_reserved_ips.protocol_subnet_reserved_ips[0].reserved_ips : details.name => details.address }, {})
- protocol_subnet_gateway_ip = local.scale_ces_enabled == true ? local.protocol_reserved_name_ips_map.ibm-default-gateway : ""
+ protocol_subnet_gateway_ip = var.enable_deployer ? "" : local.scale_ces_enabled == true ? local.protocol_reserved_name_ips_map.ibm-default-gateway : ""
}
# Existing bastion Variables
locals {
bastion_instance_public_ip = var.existing_bastion_instance_name != null ? var.existing_bastion_instance_public_ip : null
bastion_ssh_private_key = var.existing_bastion_instance_name != null ? var.existing_bastion_ssh_private_key : null
+ sapphire_rapids_profile_check = [
+ for server in var.storage_servers :
+ strcontains(server.profile, "3-metal") || strcontains(server.profile, "3d-metal")
+ ]
}
locals {
@@ -565,11 +727,22 @@ locals {
# locals needed for ssh connection
locals {
- ssh_forward_host = var.enable_deployer ? "" : local.mgmt_hosts_ips[0]
- ssh_forwards = var.enable_deployer ? "" : "-L 8443:${local.ssh_forward_host}:8443 -L 6080:${local.ssh_forward_host}:6080 -L 8444:${local.ssh_forward_host}:8444"
- ssh_jump_host = var.enable_deployer ? "" : local.bastion_instance_public_ip != null ? local.bastion_instance_public_ip : var.bastion_fip
- ssh_jump_option = var.enable_deployer ? "" : "-J ubuntu@${local.ssh_jump_host}"
- ssh_cmd = var.enable_deployer ? "" : "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ServerAliveInterval=5 -o ServerAliveCountMax=1 ${local.ssh_forwards} ${local.ssh_jump_option} lsfadmin@${join(",", local.login_host_ip)}"
+ ssh_forward_host = var.enable_deployer ? "" : var.scheduler == "LSF" ? local.mgmt_hosts_ips[0] : ""
+ ssh_forwards = var.enable_deployer ? "" : var.scheduler == "LSF" ? "-L 8443:${local.ssh_forward_host}:8443 -L 6080:${local.ssh_forward_host}:6080 -L 8444:${local.ssh_forward_host}:8444" : ""
+ ssh_jump_host = var.enable_deployer ? "" : var.scheduler == "LSF" ? local.bastion_instance_public_ip != null ? local.bastion_instance_public_ip : var.bastion_fip : ""
+ ssh_jump_option = var.enable_deployer ? "" : var.scheduler == "LSF" ? "-J ubuntu@${local.ssh_jump_host}" : ""
+ ssh_cmd = var.enable_deployer ? "" : var.scheduler == "LSF" ? "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ServerAliveInterval=5 -o ServerAliveCountMax=1 ${local.ssh_forwards} ${local.ssh_jump_option} lsfadmin@${join(",", local.login_host_ip)}" : ""
+}
+
+locals {
+ scale_encryption_admin_username = "SKLMAdmin" # pragma: allowlist secret
+ scale_encryption_admin_default_password = "SKLM@dmin123" # pragma: allowlist secret
+}
+
+#For Baremetal Userdata
+locals {
+ enable_protocol = local.storage_instance_count > 0 && local.protocol_instance_count > 0
+ bms_interfaces = ["eth0", "eth1"]
}
#locals {
diff --git a/main.tf b/main.tf
index 6dbfd1a6..19322407 100644
--- a/main.tf
+++ b/main.tf
@@ -1,10 +1,14 @@
module "landing_zone" {
source = "./modules/landing_zone"
enable_landing_zone = var.enable_landing_zone
+ scheduler = var.scheduler
vpc_cluster_private_subnets_cidr_blocks = [var.vpc_cluster_private_subnets_cidr_blocks]
cos_instance_name = var.cos_instance_name
bastion_subnet_id = local.bastion_subnet_id
- compute_subnet_id = local.subnet_id
+ compute_subnet_id = local.compute_subnet_id
+ protocol_subnet_id = local.protocol_subnet_id
+ client_subnet_id = local.client_subnet_id
+ storage_subnet_id = local.storage_subnet_id
enable_atracker = var.observability_atracker_enable && (var.observability_atracker_target_type == "cos") ? true : false
enable_cos_integration = var.enable_cos_integration
enable_vpc_flow_logs = var.enable_vpc_flow_logs
@@ -33,6 +37,12 @@ module "landing_zone" {
skip_flowlogs_s2s_auth_policy = var.skip_flowlogs_s2s_auth_policy
skip_kms_s2s_auth_policy = var.skip_kms_s2s_auth_policy
observability_logs_enable = var.observability_logs_enable_for_management || var.observability_logs_enable_for_compute || (var.observability_atracker_enable && var.observability_atracker_target_type == "cloudlogs") ? true : false
+ scale_encryption_type = var.scale_encryption_type
+ scale_encryption_enabled = var.scale_encryption_enabled
+ key_protect_instance_id = var.key_protect_instance_id
+ afm_instances = var.afm_instances
+ afm_cos_config = var.afm_cos_config
+ filesystem_config = var.filesystem_config
# hpcs_instance_name = var.hpcs_instance_name
# clusters = var.clusters
}
@@ -47,8 +57,11 @@ module "deployer" {
cluster_cidr = local.cluster_cidr
ext_login_subnet_id = var.login_subnet_id
bastion_subnets = local.login_subnets
- ext_cluster_subnet_id = var.cluster_subnet_id
- cluster_subnets = local.cluster_subnets
+ ext_compute_subnet_id = var.compute_subnet_id
+ compute_subnets = local.compute_subnets
+ client_subnets = local.client_subnets
+ storage_subnets = local.storage_subnets
+ protocol_subnets = local.protocol_subnets
bastion_instance = var.bastion_instance
enable_deployer = var.enable_deployer
deployer_instance = var.deployer_instance
@@ -56,61 +69,71 @@ module "deployer" {
allowed_cidr = var.remote_allowed_ips
kms_encryption_enabled = local.kms_encryption_enabled
boot_volume_encryption_key = local.boot_volume_encryption_key
- existing_kms_instance_guid = local.existing_kms_instance_guid
dns_domain_names = var.dns_domain_names
skip_iam_authorization_policy = var.skip_iam_block_storage_authorization_policy
ext_vpc_name = var.vpc_name
bastion_instance_name = var.existing_bastion_instance_name
bastion_instance_public_ip = local.bastion_instance_public_ip
existing_bastion_security_group_id = var.existing_bastion_instance_name != null ? var.existing_bastion_security_group_id : null
+ ext_client_subnet_id = var.client_subnet_id
+ ext_storage_subnet_id = var.storage_subnet_id
+ ext_protocol_subnet_id = var.protocol_subnet_id
+ login_security_group_name = var.login_security_group_name
+ enable_sec_interface_compute = local.enable_sec_interface_compute
}
module "landing_zone_vsi" {
- count = var.enable_deployer == false ? 1 : 0
- source = "./modules/landing_zone_vsi"
- resource_group = var.resource_group_ids["workload_rg"]
- prefix = var.cluster_prefix
- vpc_id = local.vpc_id
- zones = var.zones
- bastion_security_group_id = var.bastion_security_group_id
- bastion_public_key_content = local.bastion_public_key_content
- ssh_keys = var.ssh_keys
- client_subnets = local.client_subnets
- client_instances = var.client_instances
- cluster_subnet_id = local.cluster_subnets
- management_instances = var.management_instances
- static_compute_instances = var.static_compute_instances
- dynamic_compute_instances = var.dynamic_compute_instances
- storage_subnets = local.storage_subnets
- storage_instances = var.storage_instances
- storage_servers = var.storage_servers
- storage_type = var.storage_type
- protocol_subnets = local.protocol_subnets
- protocol_instances = var.protocol_instances
- nsd_details = var.nsd_details
- dns_domain_names = var.dns_domain_names
- kms_encryption_enabled = local.kms_encryption_enabled
- boot_volume_encryption_key = var.boot_volume_encryption_key
- existing_kms_instance_guid = var.existing_kms_instance_guid
- enable_deployer = var.enable_deployer
- afm_instances = var.afm_instances
- enable_dedicated_host = var.enable_dedicated_host
- enable_ldap = var.enable_ldap
- ldap_instances = var.ldap_instance
- ldap_server = local.ldap_server
- ldap_instance_key_pair = local.ldap_instance_key_pair
- scale_encryption_enabled = var.scale_encryption_enabled
- scale_encryption_type = var.scale_encryption_type
- gklm_instance_key_pair = local.gklm_instance_key_pair
- gklm_instances = var.gklm_instances
- vpc_region = local.region
- scheduler = var.scheduler
- ibm_customer_number = var.ibm_customer_number
- colocate_protocol_instances = var.colocate_protocol_instances
- storage_security_group_id = var.storage_security_group_id
- login_instance = var.login_instance
- bastion_subnets = local.login_subnets
- cluster_cidr = local.cluster_cidr
+ count = var.enable_deployer == false ? 1 : 0
+ source = "./modules/landing_zone_vsi"
+ resource_group = var.resource_group_ids["workload_rg"]
+ prefix = var.cluster_prefix
+ vpc_id = local.vpc_id
+ zones = var.zones
+ bastion_security_group_id = var.bastion_security_group_id
+ bastion_public_key_content = local.bastion_public_key_content
+ ssh_keys = var.ssh_keys
+ client_subnets = local.client_subnets
+ client_instances = var.client_instances
+ compute_subnet_id = local.compute_subnets
+ management_instances = var.management_instances
+ static_compute_instances = var.static_compute_instances
+ dynamic_compute_instances = var.dynamic_compute_instances
+ storage_subnets = local.storage_subnets
+ storage_instances = var.storage_instances
+ storage_servers = var.storage_servers
+ storage_type = var.storage_type
+ protocol_subnets = local.protocol_subnets
+ protocol_instances = var.protocol_instances
+ nsd_details = var.nsd_details
+ dns_domain_names = var.dns_domain_names
+ kms_encryption_enabled = local.kms_encryption_enabled
+ boot_volume_encryption_key = var.boot_volume_encryption_key
+ enable_deployer = var.enable_deployer
+ afm_instances = var.afm_instances
+ enable_dedicated_host = var.enable_dedicated_host
+ enable_ldap = var.enable_ldap
+ ldap_instances = var.ldap_instance
+ ldap_server = local.ldap_server
+ scale_encryption_enabled = var.scale_encryption_enabled
+ scale_encryption_type = var.scale_encryption_type
+ gklm_instances = var.gklm_instances
+ vpc_region = local.region
+ scheduler = var.scheduler
+ ibm_customer_number = var.ibm_customer_number
+ colocate_protocol_instances = var.colocate_protocol_instances
+ storage_security_group_id = var.storage_security_group_id
+ login_instance = var.login_instance
+ bastion_subnets = local.login_subnets
+ cluster_cidr = local.cluster_cidr
+ bms_boot_drive_encryption = var.bms_boot_drive_encryption
+ tie_breaker_bm_server_profile = var.tie_breaker_bm_server_profile
+ scale_management_vsi_profile = var.scale_management_vsi_profile
+ login_security_group_name = var.login_security_group_name
+ storage_security_group_name = var.storage_security_group_name
+ compute_security_group_name = var.compute_security_group_name
+ client_security_group_name = var.client_security_group_name
+ gklm_security_group_name = var.gklm_security_group_name
+ ldap_security_group_name = var.ldap_security_group_name
}
module "prepare_tf_input" {
@@ -142,11 +165,11 @@ module "prepare_tf_input" {
enable_dedicated_host = var.enable_dedicated_host
remote_allowed_ips = var.remote_allowed_ips
vpc_name = local.vpc_name
- storage_subnets = local.storage_subnet
- protocol_subnets = local.protocol_subnet
- cluster_subnet_id = local.cluster_subnet
- client_subnets = local.client_subnet
+ compute_subnet_id = local.compute_subnet
login_subnet_id = local.login_subnet
+ client_subnet_id = local.client_subnet
+ storage_subnet_id = local.storage_subnet
+ protocol_subnet_id = local.protocol_subnet
login_instance = var.login_instance
dns_domain_names = var.dns_domain_names
key_management = local.key_management
@@ -178,17 +201,21 @@ module "prepare_tf_input" {
ldap_basedns = var.ldap_basedns
ldap_server_cert = local.ldap_server_cert
ldap_admin_password = local.ldap_admin_password
- ldap_instance_key_pair = local.ldap_instance_key_pair
ldap_user_password = var.ldap_user_password
ldap_user_name = var.ldap_user_name
afm_instances = var.afm_instances
afm_cos_config = var.afm_cos_config
- gklm_instance_key_pair = local.gklm_instance_key_pair
gklm_instances = var.gklm_instances
scale_encryption_type = var.scale_encryption_type
filesystem_config = var.filesystem_config
+ filesets_config = var.filesets_config
+ storage_gui_username = var.storage_gui_username
+ storage_gui_password = var.storage_gui_password
+ compute_gui_username = var.compute_gui_username
+ compute_gui_password = var.compute_gui_password
scale_encryption_admin_password = var.scale_encryption_admin_password
scale_encryption_enabled = var.scale_encryption_enabled
+ key_protect_instance_id = var.key_protect_instance_id
storage_security_group_id = var.storage_security_group_id
custom_file_shares = var.custom_file_shares
existing_bastion_instance_name = var.existing_bastion_instance_name
@@ -199,6 +226,17 @@ module "prepare_tf_input" {
cspm_enabled = var.cspm_enabled
app_config_plan = var.app_config_plan
existing_resource_group = var.existing_resource_group
+ tie_breaker_bm_server_profile = var.tie_breaker_bm_server_profile
+ scale_management_vsi_profile = var.scale_management_vsi_profile
+ login_security_group_name = var.login_security_group_name
+ storage_security_group_name = var.storage_security_group_name
+ compute_security_group_name = var.compute_security_group_name
+ client_security_group_name = var.client_security_group_name
+ gklm_security_group_name = var.gklm_security_group_name
+ ldap_security_group_name = var.ldap_security_group_name
+ bms_boot_drive_encryption = var.bms_boot_drive_encryption
+ scale_afm_bucket_config_details = local.scale_afm_bucket_config_details
+ scale_afm_cos_hmac_key_params = local.scale_afm_cos_hmac_key_params
depends_on = [module.deployer]
}
@@ -226,33 +264,15 @@ module "resource_provisioner" {
depends_on = [module.deployer, module.prepare_tf_input, module.validate_ldap_server_connection]
}
-module "cos" {
- count = var.scheduler == "Scale" && local.enable_afm == true ? 1 : 0
- source = "./modules/cos"
- prefix = "${var.cluster_prefix}-"
- resource_group_id = local.resource_group_ids["service_rg"]
- cos_instance_plan = "standard"
- cos_instance_location = "global"
- cos_instance_service = "cloud-object-storage"
- cos_hmac_role = "Manager"
- new_instance_bucket_hmac = local.new_instance_bucket_hmac
- exstng_instance_new_bucket_hmac = local.exstng_instance_new_bucket_hmac
- exstng_instance_bucket_new_hmac = local.exstng_instance_bucket_new_hmac
- exstng_instance_hmac_new_bucket = local.exstng_instance_hmac_new_bucket
- exstng_instance_bucket_hmac = local.exstng_instance_bucket_hmac
- filesystem = var.storage_instances[*]["filesystem"] != "" ? var.storage_instances[0]["filesystem"] : var.filesystem_config[0]["filesystem"]
- depends_on = [module.landing_zone_vsi]
-}
-
module "file_storage" {
- count = var.enable_deployer == false ? 1 : 0
+ count = var.enable_deployer == false && var.scheduler == "LSF" ? 1 : 0
source = "./modules/file_storage"
zone = var.zones[0] # always the first zone
resource_group_id = var.resource_group_ids["workload_rg"]
file_shares = local.file_shares
encryption_key_crn = local.boot_volume_encryption_key
security_group_ids = local.compute_security_group_id
- subnet_id = local.compute_subnet_id
+ subnet_id = local.compute_subnet
existing_kms_instance_guid = var.existing_kms_instance_guid
skip_iam_share_authorization_policy = var.skip_iam_share_authorization_policy
kms_encryption_enabled = local.kms_encryption_enabled
@@ -289,7 +309,7 @@ module "storage_dns_records" {
}
module "protocol_reserved_ip" {
- count = var.scheduler == "Scale" && var.enable_deployer == false && var.protocol_subnets != null ? 1 : 0
+ count = var.scheduler == "Scale" && var.enable_deployer == false && var.protocol_subnet_id != null ? 1 : 0
source = "./modules/protocol_reserved_ip"
total_reserved_ips = local.protocol_instance_count
subnet_id = [local.protocol_subnets[0].id]
@@ -318,9 +338,10 @@ module "gklm_dns_records" {
depends_on = [module.dns]
}
-resource "time_sleep" "wait_60_seconds" {
- create_duration = "60s"
- depends_on = [module.storage_dns_records, module.protocol_reserved_ip, module.compute_dns_records]
+resource "time_sleep" "wait_for_vsi_syncup" {
+ count = var.enable_deployer == false && var.scheduler == "Scale" && var.storage_type != "persistent" && (can(regex("^ibm-redhat-8-10-minimal-amd64-.*$", (var.storage_instances[*]["image"])[0])) || local.enable_sec_interface_compute || local.enable_sec_interface_storage) ? 1 : 0
+ create_duration = local.enable_sec_interface_compute || local.enable_sec_interface_storage ? "180s" : "300s"
+ depends_on = [module.storage_dns_records, module.protocol_reserved_ip, module.compute_dns_records, module.landing_zone_vsi]
}
module "write_compute_cluster_inventory" {
@@ -357,7 +378,7 @@ module "write_compute_cluster_inventory" {
compute_subnet_crn = local.compute_subnet_crn
kms_encryption_enabled = local.kms_encryption_enabled
boot_volume_encryption_key = var.boot_volume_encryption_key
- depends_on = [time_sleep.wait_60_seconds, module.landing_zone_vsi]
+ depends_on = [time_sleep.wait_for_vsi_syncup, module.landing_zone_vsi]
}
module "write_compute_scale_cluster_inventory" {
@@ -372,12 +393,12 @@ module "write_compute_scale_cluster_inventory" {
vpc_region = jsonencode(local.region)
vpc_availability_zones = var.zones
scale_version = jsonencode(local.scale_version)
- compute_cluster_filesystem_mountpoint = jsonencode(var.scale_compute_cluster_filesystem_mountpoint)
+ compute_cluster_filesystem_mountpoint = jsonencode((var.static_compute_instances[*].filesystem)[0])
storage_cluster_filesystem_mountpoint = jsonencode("None")
filesystem_block_size = jsonencode("None")
- compute_cluster_instance_private_ips = concat((local.enable_sec_interface_compute ? local.secondary_compute_instance_private_ips : local.compute_instance_private_ips), local.compute_mgmt_instance_private_ips)
- compute_cluster_instance_ids = concat((local.enable_sec_interface_compute ? local.secondary_compute_instance_private_ips : local.compute_instance_ids), local.compute_mgmt_instance_ids)
- compute_cluster_instance_names = concat((local.enable_sec_interface_compute ? local.secondary_compute_instance_private_ips : local.compute_instance_names), local.compute_mgmt_instance_names)
+ compute_cluster_instance_private_ips = concat(local.compute_instance_private_ips, local.compute_mgmt_instance_private_ips)
+ compute_cluster_instance_ids = concat(local.compute_instance_ids, local.compute_mgmt_instance_ids)
+ compute_cluster_instance_names = concat(local.compute_instance_names, local.compute_mgmt_instance_names)
compute_cluster_instance_private_dns_ip_map = {}
storage_cluster_instance_ids = []
storage_cluster_instance_private_ips = []
@@ -389,8 +410,8 @@ module "write_compute_scale_cluster_inventory" {
storage_cluster_desc_instance_private_dns_ip_map = {}
storage_cluster_instance_names = []
storage_subnet_cidr = local.enable_mrot_conf ? local.storage_subnet_cidr : jsonencode("")
- compute_subnet_cidr = local.enable_mrot_conf ? local.cluster_subnet_cidr : jsonencode("")
- scale_remote_cluster_clustername = local.enable_mrot_conf ? jsonencode(format("%s.%s", var.cluster_prefix, var.cluster_prefix, var.dns_domain_names["storage"])) : jsonencode("")
+ compute_subnet_cidr = local.enable_mrot_conf ? local.compute_subnet_cidr : jsonencode("")
+ scale_remote_cluster_clustername = local.enable_mrot_conf ? jsonencode(format("%s.%s", var.cluster_prefix, var.dns_domain_names["storage"])) : jsonencode("")
protocol_cluster_instance_names = []
client_cluster_instance_names = []
protocol_cluster_reserved_names = ""
@@ -402,12 +423,12 @@ module "write_compute_scale_cluster_inventory" {
filesystem = jsonencode("")
mountpoint = jsonencode("")
protocol_gateway_ip = jsonencode("")
- filesets = local.fileset_size_map #{}
+ filesets = local.fileset_size_map
afm_cos_bucket_details = []
afm_config_details = []
afm_cluster_instance_names = []
- filesystem_mountpoint = var.scale_encryption_type == "key_protect" ? (var.storage_instances[*]["filesystem"] != "" ? var.storage_instances[*]["filesystem"] : jsonencode(var.filesystem_config[0]["filesystem"])) : jsonencode("")
- depends_on = [time_sleep.wait_60_seconds]
+ filesystem_mountpoint = local.encryption_filesystem_mountpoint
+ depends_on = [time_sleep.wait_for_vsi_syncup, module.landing_zone_vsi]
}
module "write_storage_scale_cluster_inventory" {
@@ -423,15 +444,15 @@ module "write_storage_scale_cluster_inventory" {
vpc_availability_zones = var.zones
scale_version = jsonencode(local.scale_version)
compute_cluster_filesystem_mountpoint = jsonencode("None")
- storage_cluster_filesystem_mountpoint = jsonencode(var.filesystem_config[0]["mount_point"]) #jsonencode(var.storage_instances[count.index].filesystem)
+ storage_cluster_filesystem_mountpoint = jsonencode(local.filesystem_mountpoint)
filesystem_block_size = jsonencode(var.filesystem_config[0]["block_size"])
compute_cluster_instance_ids = []
compute_cluster_instance_private_ips = []
compute_cluster_instance_private_dns_ip_map = {}
compute_cluster_instance_names = []
- storage_cluster_instance_ids = var.storage_type == "persistent" ? concat(local.baremetal_cluster_instance_ids, local.strg_mgmtt_instance_ids, local.tie_breaker_storage_instance_ids) : concat(local.storage_cluster_instance_ids, local.strg_mgmtt_instance_ids, local.tie_breaker_storage_instance_ids)
- storage_cluster_instance_private_ips = var.storage_type == "persistent" ? concat(local.baremetal_cluster_instance_private_ips, local.strg_mgmt_instance_private_ips, local.tie_breaker_storage_instance_private_ips) : concat(local.storage_cluster_instance_private_ips, local.strg_mgmt_instance_private_ips, local.tie_breaker_storage_instance_private_ips)
- storage_cluster_instance_names = var.storage_type == "persistent" ? concat(local.baremetal_cluster_instance_names, local.strg_mgmt_instance_names, local.tie_breaker_storage_instance_names) : concat(local.storage_cluster_instance_names, local.strg_mgmt_instance_names, local.tie_breaker_storage_instance_names)
+ storage_cluster_instance_ids = var.storage_type == "persistent" ? concat(local.baremetal_cluster_instance_ids, local.strg_mgmtt_instance_ids, local.bm_tie_breaker_ids) : concat(local.storage_cluster_instance_ids, local.strg_mgmtt_instance_ids, local.tie_breaker_storage_instance_ids)
+ storage_cluster_instance_private_ips = var.storage_type == "persistent" ? concat(local.baremetal_cluster_instance_private_ips, local.strg_mgmt_instance_private_ips, local.bm_tie_breaker_private_ips) : concat(local.storage_cluster_instance_private_ips, local.strg_mgmt_instance_private_ips, local.tie_breaker_storage_instance_private_ips)
+ storage_cluster_instance_names = var.storage_type == "persistent" ? concat(local.baremetal_cluster_instance_names, local.strg_mgmt_instance_names, local.bm_tie_breaker_names) : concat(local.storage_cluster_instance_names, local.strg_mgmt_instance_names, local.tie_breaker_storage_instance_names)
storage_cluster_with_data_volume_mapping = local.storage_ips_with_vol_mapping[0]
storage_cluster_instance_private_dns_ip_map = {}
storage_cluster_desc_instance_private_ips = local.strg_tie_breaker_private_ips
@@ -439,7 +460,7 @@ module "write_storage_scale_cluster_inventory" {
storage_cluster_desc_data_volume_mapping = local.tie_breaker_ips_with_vol_mapping[0]
storage_cluster_desc_instance_private_dns_ip_map = {}
storage_subnet_cidr = local.enable_mrot_conf ? local.storage_subnet_cidr : jsonencode("")
- compute_subnet_cidr = local.enable_mrot_conf ? local.cluster_subnet_cidr : local.scale_ces_enabled == true ? local.client_subnet_cidr : jsonencode("")
+ compute_subnet_cidr = local.enable_mrot_conf ? local.compute_subnet_cidr : local.scale_ces_enabled == true && local.client_instance_count > 0 ? local.client_subnet_cidr : jsonencode("")
scale_remote_cluster_clustername = local.enable_mrot_conf ? jsonencode(format("%s.%s", var.cluster_prefix, var.dns_domain_names["compute"])) : jsonencode("")
protocol_cluster_instance_names = local.scale_ces_enabled == true ? local.protocol_cluster_instance_names : []
client_cluster_instance_names = []
@@ -450,14 +471,14 @@ module "write_storage_scale_cluster_inventory" {
interface = []
export_ip_pool = local.scale_ces_enabled == true ? values(one(module.protocol_reserved_ip[*].instance_name_ip_map)) : []
filesystem = local.scale_ces_enabled == true ? jsonencode("cesSharedRoot") : jsonencode("")
- mountpoint = local.scale_ces_enabled == true ? jsonencode(var.filesystem_config[0]["mount_point"]) : jsonencode("")
+ mountpoint = local.scale_ces_enabled == true ? jsonencode(local.filesystem_mountpoint) : jsonencode("")
protocol_gateway_ip = jsonencode(local.protocol_subnet_gateway_ip)
filesets = local.fileset_size_map
- afm_cos_bucket_details = local.enable_afm == true ? local.afm_cos_bucket_details : []
- afm_config_details = local.enable_afm == true ? local.afm_cos_config : []
- afm_cluster_instance_names = local.afm_instance_names
- filesystem_mountpoint = var.scale_encryption_type == "key_protect" ? (var.storage_instances[*]["filesystem"] != "" ? var.storage_instances[*]["filesystem"] : jsonencode(var.filesystem_config[0]["filesystem"])) : jsonencode("")
- depends_on = [time_sleep.wait_60_seconds]
+ afm_config_details = var.scale_afm_bucket_config_details
+ afm_cos_bucket_details = var.scale_afm_cos_hmac_key_params
+ afm_cluster_instance_names = local.afm_names_final
+ filesystem_mountpoint = local.encryption_filesystem_mountpoint
+ depends_on = [time_sleep.wait_for_vsi_syncup, module.landing_zone_vsi]
}
module "write_client_scale_cluster_inventory" {
@@ -477,7 +498,7 @@ module "write_client_scale_cluster_inventory" {
compute_cluster_instance_ids = []
compute_cluster_instance_private_ips = []
compute_cluster_instance_private_dns_ip_map = {}
- storage_cluster_filesystem_mountpoint = local.scale_ces_enabled == true ? jsonencode(var.filesystem_config[0]["mount_point"]) : jsonencode("")
+ storage_cluster_filesystem_mountpoint = local.scale_ces_enabled == true ? jsonencode(local.filesystem_mountpoint) : jsonencode("")
storage_cluster_instance_ids = []
storage_cluster_instance_private_ips = []
storage_cluster_with_data_volume_mapping = {}
@@ -507,6 +528,70 @@ module "write_client_scale_cluster_inventory" {
afm_config_details = []
afm_cluster_instance_names = []
filesystem_mountpoint = jsonencode("")
+ depends_on = [time_sleep.wait_for_vsi_syncup, module.landing_zone_vsi]
+}
+
+module "key_protect_scale" {
+ count = var.scale_encryption_enabled == true && var.scale_encryption_type == "key_protect" && var.enable_deployer == false ? 1 : 0
+ source = "./modules/key_protect"
+ key_protect_instance_id = var.key_protect_instance_id != null ? var.key_protect_instance_id : var.existing_kms_instance_guid
+ resource_prefix = var.cluster_prefix
+ vpc_region = local.region
+ scale_config_path = format("%s/key_protect", var.scale_config_path)
+ vpc_storage_cluster_dns_domain = var.dns_domain_names["storage"]
+}
+
+module "ldap_configuration" {
+ count = var.scheduler == "Scale" && var.enable_deployer == false && var.enable_ldap && var.ldap_server == "null" ? 1 : 0
+ source = "./modules/common/ldap_configuration"
+ turn_on = var.enable_ldap
+ clone_path = var.scale_ansible_repo_clone_path
+ create_scale_cluster = var.create_scale_cluster
+ bastion_user = jsonencode(var.bastion_user)
+ write_inventory_complete = module.write_storage_scale_cluster_inventory[0].write_scale_inventory_complete
+ ldap_cluster_prefix = var.cluster_prefix
+ using_jumphost_connection = var.using_jumphost_connection
+ bastion_instance_public_ip = local.bastion_fip
+ bastion_ssh_private_key = local.bastion_ssh_private_key != null ? local.bastion_ssh_private_key : local.bastion_private_key_content
+ ldap_basedns = var.ldap_basedns
+ ldap_admin_password = var.ldap_admin_password
+ ldap_user_name = var.ldap_user_name
+ ldap_user_password = var.ldap_user_password
+ ldap_server = var.enable_ldap ? (var.ldap_server != "null" ? var.ldap_server : local.ldap_instance_private_ips[0]) : null
+ meta_private_key = module.landing_zone_vsi[0].storage_private_key_content
+ depends_on = [module.validate_ldap_server_connection, module.landing_zone_vsi]
+}
+
+module "host_resolution_add" {
+ count = var.scheduler == "Scale" && var.enable_deployer == false ? 1 : 0
+ source = "./modules/host_resolution_add"
+ scheduler = var.scheduler
+ clone_path = var.scale_ansible_repo_clone_path
+ storage_hosts = local.storage_host_entry
+ storage_mgmnt_hosts = local.storage_mgmnt_host_entry
+ storage_tb_hosts = local.storage_tb_host_entry
+ compute_hosts = local.compute_host_entry
+ compute_mgmnt_hosts = local.compute_mgmnt_host_entry
+ client_hosts = local.client_host_entry
+ protocol_hosts = local.protocol_host_entry
+ gklm_hosts = local.gklm_host_entry
+ afm_hosts = local.afm_host_entry
+ storage_bms_hosts = local.storage_bms_host_entry
+ storage_tb_bms_hosts = local.storage_tb_bms_host_entry
+ protocol_bms_hosts = local.protocol_bms_host_entry
+ afm_bms_hosts = local.afm_bms_host_entry
+ domain_names = var.dns_domain_names
+ storage_type = var.storage_type
+ storage_interface = local.bms_interfaces[0]
+ protocol_interface = local.bms_interfaces[1]
+ enable_protocol = local.enable_protocol && var.colocate_protocol_instances ? true : false
+ vpc_region = local.region
+ resource_group = var.resource_group_ids["service_rg"]
+ protocol_subnets = local.enable_protocol ? (length(local.protocol_subnets) > 0 ? local.protocol_subnets[0].id : "") : ""
+ bms_boot_drive_encryption = var.bms_boot_drive_encryption
+ ibmcloud_api_key = var.ibmcloud_api_key
+ scale_encryption_type = var.scale_encryption_type
+ depends_on = [module.landing_zone_vsi]
}
module "compute_cluster_configuration" {
@@ -535,15 +620,16 @@ module "compute_cluster_configuration" {
enable_mrot_conf = local.enable_mrot_conf
enable_ces = false
enable_afm = false
+ scale_encryption_type = var.scale_encryption_type != null ? var.scale_encryption_type : null
scale_encryption_enabled = var.scale_encryption_enabled
- scale_encryption_admin_password = var.scale_encryption_admin_password
+ scale_encryption_admin_password = var.scale_encryption_admin_password == null ? "null" : var.scale_encryption_admin_password
scale_encryption_servers = var.scale_encryption_enabled && var.scale_encryption_type == "gklm" ? local.gklm_instance_private_ips : []
enable_ldap = var.enable_ldap
ldap_basedns = var.ldap_basedns
- ldap_server = var.enable_ldap ? local.ldap_instance_private_ips[0] : null
+ ldap_server = var.enable_ldap ? (var.ldap_server != "null" ? var.ldap_server : local.ldap_instance_private_ips[0]) : null
ldap_admin_password = local.ldap_admin_password == "" ? jsonencode(null) : local.ldap_admin_password
- enable_key_protect = var.scale_encryption_type
- depends_on = [module.write_compute_scale_cluster_inventory]
+ enable_key_protect = var.scale_encryption_type == "key_protect" ? "True" : "False"
+ depends_on = [module.write_compute_scale_cluster_inventory, module.key_protect_scale, module.ldap_configuration, module.host_resolution_add]
}
module "storage_cluster_configuration" {
@@ -555,6 +641,7 @@ module "storage_cluster_configuration" {
inventory_format = var.inventory_format
create_scale_cluster = var.create_scale_cluster
clone_path = var.scale_ansible_repo_clone_path
+ scale_config_path = var.scale_config_path
inventory_path = format("%s/storage_cluster_inventory.json", var.scale_ansible_repo_clone_path)
using_packer_image = var.using_packer_image
using_jumphost_connection = var.using_jumphost_connection
@@ -563,6 +650,7 @@ module "storage_cluster_configuration" {
storage_cluster_gui_password = var.storage_gui_password
colocate_protocol_instances = var.colocate_protocol_instances
is_colocate_protocol_subset = local.is_colocate_protocol_subset
+ bms_boot_drive_encryption = var.bms_boot_drive_encryption
mgmt_memory = local.management_memory
mgmt_vcpus_count = local.management_vcpus_count
mgmt_bandwidth = local.management_bandwidth
@@ -596,20 +684,21 @@ module "storage_cluster_configuration" {
enable_afm = local.enable_afm
scale_encryption_enabled = var.scale_encryption_enabled
scale_encryption_type = var.scale_encryption_type != null ? var.scale_encryption_type : null
- scale_encryption_admin_password = var.scale_encryption_admin_password
+ scale_encryption_admin_password = var.scale_encryption_admin_password == null ? "null" : var.scale_encryption_admin_password
scale_encryption_servers = var.scale_encryption_enabled && var.scale_encryption_type == "gklm" ? local.gklm_instance_private_ips : []
enable_ldap = var.enable_ldap
ldap_basedns = var.ldap_basedns
- ldap_server = var.enable_ldap ? local.ldap_instance_private_ips[0] : null
+ ldap_server = var.enable_ldap ? (var.ldap_server != "null" ? var.ldap_server : local.ldap_instance_private_ips[0]) : null
ldap_admin_password = local.ldap_admin_password == "" ? jsonencode(null) : local.ldap_admin_password
ldap_server_cert = local.ldap_server_cert
- enable_key_protect = var.scale_encryption_type
- depends_on = [module.write_storage_scale_cluster_inventory]
+ enable_key_protect = var.scale_encryption_type == "key_protect" ? "True" : "False"
+ storage_type = var.storage_type
+ depends_on = [module.write_storage_scale_cluster_inventory, module.key_protect_scale, module.ldap_configuration, module.host_resolution_add]
}
module "client_configuration" {
count = var.scheduler == "Scale" && var.enable_deployer == false ? 1 : 0
- source = "./modules/common//client_configuration"
+ source = "./modules/common/client_configuration"
turn_on = (local.client_instance_count > 0 && var.create_separate_namespaces == true && local.scale_ces_enabled == true) ? true : false
create_scale_cluster = var.create_scale_cluster
storage_cluster_create_complete = module.storage_cluster_configuration[0].storage_cluster_create_complete
@@ -619,13 +708,13 @@ module "client_configuration" {
bastion_user = jsonencode(var.bastion_user)
bastion_instance_public_ip = jsonencode(local.bastion_fip)
bastion_ssh_private_key = var.bastion_ssh_private_key
- client_meta_private_key = module.landing_zone_vsi[0].compute_private_key_content
+ client_meta_private_key = module.landing_zone_vsi[0].client_private_key_content
write_inventory_complete = module.write_storage_scale_cluster_inventory[0].write_scale_inventory_complete
enable_ldap = var.enable_ldap
ldap_basedns = var.ldap_basedns
- ldap_server = var.enable_ldap ? jsonencode(local.ldap_instance_private_ips[0]) : jsonencode(null)
+ ldap_server = var.enable_ldap ? (var.ldap_server != "null" ? var.ldap_server : local.ldap_instance_private_ips[0]) : null
ldap_admin_password = local.ldap_admin_password == "" ? jsonencode(null) : local.ldap_admin_password
- depends_on = [module.compute_cluster_configuration, module.storage_cluster_configuration]
+ depends_on = [module.storage_cluster_configuration, module.ldap_configuration, module.host_resolution_add]
}
module "remote_mount_configuration" {
@@ -652,6 +741,67 @@ module "remote_mount_configuration" {
depends_on = [module.compute_cluster_configuration, module.storage_cluster_configuration]
}
+module "invoke_compute_network_playbook" {
+ count = var.scheduler == "Scale" ? 1 : 0
+ source = "./modules/common/network_playbook"
+ turn_on = (var.create_separate_namespaces == true && local.static_compute_instance_count > 0) ? true : false
+ create_scale_cluster = var.create_scale_cluster
+ compute_cluster_create_complete = var.enable_deployer ? false : module.compute_cluster_configuration[0].compute_cluster_create_complete
+ storage_cluster_create_complete = var.enable_deployer ? false : module.storage_cluster_configuration[0].storage_cluster_create_complete
+ inventory_path = format("%s/%s/compute_inventory.ini", var.scale_ansible_repo_clone_path, "ibm-spectrum-scale-install-infra")
+ network_playbook_path = format("%s/%s/collections/ansible_collections/ibm/spectrum_scale/samples/playbook_cloud_network_config.yaml", var.scale_ansible_repo_clone_path, "ibm-spectrum-scale-install-infra")
+ depends_on = [module.compute_cluster_configuration, module.storage_cluster_configuration]
+}
+
+module "invoke_storage_network_playbook" {
+ count = var.scheduler == "Scale" ? 1 : 0
+ source = "./modules/common/network_playbook"
+ turn_on = (var.create_separate_namespaces == true && local.storage_instance_count > 0) ? true : false
+ create_scale_cluster = var.create_scale_cluster
+ compute_cluster_create_complete = var.enable_deployer ? false : module.compute_cluster_configuration[0].compute_cluster_create_complete
+ storage_cluster_create_complete = var.enable_deployer ? false : module.storage_cluster_configuration[0].storage_cluster_create_complete
+ inventory_path = format("%s/%s/storage_inventory.ini", var.scale_ansible_repo_clone_path, "ibm-spectrum-scale-install-infra")
+ network_playbook_path = format("%s/%s/collections/ansible_collections/ibm/spectrum_scale/samples/playbook_cloud_network_config.yaml", var.scale_ansible_repo_clone_path, "ibm-spectrum-scale-install-infra")
+ depends_on = [module.compute_cluster_configuration, module.storage_cluster_configuration]
+}
+
+module "encryption_configuration" {
+ source = "./modules/common/encryption_configuration"
+ count = var.scheduler == "Scale" && var.enable_deployer == false && var.scale_encryption_enabled && var.scale_encryption_type == "gklm" ? 1 : 0
+ turn_on = (var.create_separate_namespaces == true && local.storage_instance_count > 0) ? true : false
+ clone_path = var.scale_ansible_repo_clone_path
+ create_scale_cluster = var.create_scale_cluster
+ meta_private_key = module.landing_zone_vsi[0].storage_private_key_content
+ scale_encryption_type = var.scale_encryption_type != null ? var.scale_encryption_type : null
+ scale_encryption_admin_password = var.scale_encryption_admin_password
+ scale_encryption_servers = var.scale_encryption_enabled && var.scale_encryption_type == "gklm" ? local.gklm_instance_private_ips : []
+ scale_encryption_servers_dns = var.scale_encryption_type == "gklm" ? [for instance in local.gklm_instances : "${instance.name}.${var.dns_domain_names["gklm"]}"] : []
+ scale_cluster_clustername = var.cluster_prefix
+ scale_encryption_admin_default_password = local.scale_encryption_admin_default_password
+ scale_encryption_admin_username = local.scale_encryption_admin_username
+ compute_cluster_create_complete = module.compute_cluster_configuration[0].compute_cluster_create_complete
+ storage_cluster_create_complete = module.storage_cluster_configuration[0].storage_cluster_create_complete
+ remote_mount_create_complete = module.remote_mount_configuration[0].remote_mount_create_complete
+ compute_cluster_encryption = (var.create_separate_namespaces == true && local.static_compute_instance_count > 0) ? true : false
+ storage_cluster_encryption = (var.create_separate_namespaces == true && local.storage_instance_count > 0) ? true : false
+ depends_on = [module.client_configuration, module.compute_cluster_configuration, module.storage_cluster_configuration]
+}
+
+module "key_protect_encryption_configuration" {
+ source = "./modules/common/key_protect_configuration"
+ count = var.scheduler == "Scale" && var.enable_deployer == false && var.scale_encryption_enabled && var.scale_encryption_type == "key_protect" ? 1 : 0
+ turn_on = (var.create_separate_namespaces == true && local.storage_instance_count > 0) ? true : false
+ clone_path = var.scale_ansible_repo_clone_path
+ create_scale_cluster = var.create_scale_cluster
+ scale_encryption_type = var.scale_encryption_type != null ? var.scale_encryption_type : null
+ compute_cluster_create_complete = module.compute_cluster_configuration[0].compute_cluster_create_complete
+ storage_cluster_create_complete = module.storage_cluster_configuration[0].storage_cluster_create_complete
+ remote_mount_create_complete = module.remote_mount_configuration[0].remote_mount_create_complete
+ compute_cluster_encryption = (var.create_separate_namespaces == true && local.static_compute_instance_count > 0) ? true : false
+ storage_cluster_encryption = (var.create_separate_namespaces == true && local.storage_instance_count > 0) ? true : false
+ depends_on = [module.client_configuration, module.compute_cluster_configuration, module.storage_cluster_configuration]
+}
+
module "compute_inventory" {
count = var.enable_deployer == false ? 1 : 0
source = "./modules/inventory"
@@ -684,7 +834,7 @@ module "compute_inventory" {
}
module "ldap_inventory" {
- count = var.enable_deployer == false && var.enable_ldap && local.ldap_server == "null" ? 1 : 0
+ count = var.enable_deployer == false && var.scheduler == "LSF" && var.enable_ldap && local.ldap_server == "null" ? 1 : 0
source = "./modules/inventory"
prefix = var.cluster_prefix
name_mount_path_map = local.fileshare_name_mount_path_map
@@ -700,7 +850,7 @@ module "ldap_inventory" {
}
module "mgmt_inventory_hosts" {
- count = var.enable_deployer == false ? 1 : 0
+ count = var.enable_deployer == false && var.scheduler == "LSF" ? 1 : 0
source = "./modules/inventory_hosts"
hosts = local.mgmt_hosts_ips
inventory_path = local.mgmt_hosts_inventory_path
@@ -709,12 +859,12 @@ module "mgmt_inventory_hosts" {
module "compute_inventory_hosts" {
count = var.enable_deployer == false ? 1 : 0
source = "./modules/inventory_hosts"
- hosts = local.compute_hosts_ips
+ hosts = var.scheduler == "Scale" ? local.all_compute_hosts : local.compute_hosts_ips
inventory_path = local.compute_hosts_inventory_path
}
module "login_inventory_host" {
- count = var.enable_deployer == false ? 1 : 0
+ count = var.enable_deployer == false && var.scheduler == "LSF" ? 1 : 0
source = "./modules/inventory_hosts"
hosts = local.login_host_ip
inventory_path = local.login_host_inventory_path
@@ -741,6 +891,50 @@ module "ldap_inventory_hosts" {
inventory_path = local.ldap_hosts_inventory_path
}
+module "client_inventory_hosts" {
+ count = var.enable_deployer == false && var.scheduler == "Scale" ? 1 : 0
+ source = "./modules/inventory_hosts"
+ hosts = local.client_hosts
+ inventory_path = local.client_hosts_inventory_path
+}
+
+module "protocol_inventory_hosts" {
+ count = var.enable_deployer == false && var.scheduler == "Scale" ? 1 : 0
+ source = "./modules/inventory_hosts"
+ hosts = local.protocol_hosts
+ inventory_path = local.protocol_hosts_inventory_path
+}
+
+module "afm_inventory_hosts" {
+ count = var.enable_deployer == false && var.scheduler == "Scale" ? 1 : 0
+ source = "./modules/inventory_hosts"
+ hosts = local.afm_hosts
+ inventory_path = local.afm_hosts_inventory_path
+}
+
+module "gklm_inventory_hosts" {
+ count = var.enable_deployer == false && var.scheduler == "Scale" ? 1 : 0
+ source = "./modules/inventory_hosts"
+ hosts = local.gklm_hosts
+ inventory_path = local.gklm_hosts_inventory_path
+}
+
+module "storage_inventory_hosts" {
+ count = var.enable_deployer == false && var.scheduler == "Scale" ? 1 : 0
+ source = "./modules/inventory_hosts"
+ hosts = local.all_storage_hosts
+ inventory_path = local.storage_hosts_inventory_path
+}
+
+module "host_resolution_remove" {
+ count = var.scheduler == "Scale" && var.enable_deployer == false ? 1 : 0
+ turn_on = var.create_separate_namespaces
+ source = "./modules/host_resolution_remove"
+ create_scale_cluster = var.create_scale_cluster
+ clone_path = var.scale_ansible_repo_clone_path
+ depends_on = [module.compute_cluster_configuration, module.storage_cluster_configuration, module.remote_mount_configuration, module.encryption_configuration, module.invoke_compute_network_playbook, module.invoke_storage_network_playbook]
+}
+
module "compute_playbook" {
count = var.enable_deployer == false ? 1 : 0
source = "./modules/playbook"
@@ -787,16 +981,17 @@ module "cloud_monitoring_instance_creation" {
cloud_logs_as_atracker_target = var.observability_atracker_enable && (var.observability_atracker_target_type == "cloudlogs") ? true : false
cloud_logs_data_bucket = var.cloud_logs_data_bucket
cloud_metrics_data_bucket = var.cloud_metrics_data_bucket
- tags = ["lsf", var.cluster_prefix]
+ tags = [local.scheduler_lowcase, var.cluster_prefix]
}
+# Code for SCC Instance
module "scc_workload_protection" {
source = "./modules/security/sccwp"
resource_group_name = var.existing_resource_group != "null" ? var.existing_resource_group : "${var.cluster_prefix}-service-rg"
prefix = var.cluster_prefix
region = local.region
sccwp_service_plan = var.sccwp_service_plan
- resource_tags = ["lsf", var.cluster_prefix]
+ resource_tags = [local.scheduler_lowcase, var.cluster_prefix]
enable_deployer = var.enable_deployer
sccwp_enable = var.sccwp_enable
cspm_enabled = var.cspm_enabled
diff --git a/modules/ansible-roles/roles/lsf_mgmt_config/templates/user_data.sh b/modules/ansible-roles/roles/lsf_mgmt_config/templates/user_data.sh
index 4d5336e1..b20e516e 100644
--- a/modules/ansible-roles/roles/lsf_mgmt_config/templates/user_data.sh
+++ b/modules/ansible-roles/roles/lsf_mgmt_config/templates/user_data.sh
@@ -209,6 +209,21 @@ LSF_GPU_AUTOCONFIG=Y
LSB_GPU_NEW_SYNTAX=extend
EOF
+# Support rc_account resource to enable RC_ACCOUNT policy
+sed -i '$ a LSF_LOCAL_RESOURCES=\"[resource icgen2host]\"' $LSF_CONF_FILE
+
+# shellcheck disable=SC2154
+sed -i "s/\(LSF_LOCAL_RESOURCES=.*\)\"/\1 [resourcemap ${rc_account}*rc_account]\"/" $LSF_CONF_FILE
+
+# Add additional local resources if needed
+instance_id=$(dmidecode | grep Family | cut -d ' ' -f 2 |head -1)
+if [ -n "$instance_id" ]; then
+ sed -i "s/\(LSF_LOCAL_RESOURCES=.*\)\"/\1 [resourcemap ${instance_id}\*instanceID]\"/" "$LSF_CONF_FILE"
+ echo "Update LSF_LOCAL_RESOURCES in $LSF_CONF_FILE successfully, add [resourcemap ${instance_id}*instanceID]" >> "$logfile"
+else
+ echo "Can not get instance ID" >> $logfile
+fi
+
# source profile.lsf
echo "source ${LSF_CONF}/profile.lsf" >>~/.bashrc
echo "source ${LSF_CONF}/profile.lsf" >>"$LDAP_DIR"/.bashrc
diff --git a/modules/ansible-roles/roles/lsf_post_config/tasks/cluster_validation.yml b/modules/ansible-roles/roles/lsf_post_config/tasks/cluster_validation.yml
index f16b9361..23649f60 100644
--- a/modules/ansible-roles/roles/lsf_post_config/tasks/cluster_validation.yml
+++ b/modules/ansible-roles/roles/lsf_post_config/tasks/cluster_validation.yml
@@ -41,3 +41,12 @@
register: lsfd_restart_result
changed_when: "'lsfd restarted' in lsfd_restart_result.stdout"
when: inventory_hostname == groups['management_nodes'][0]
+
+- name: Duplicate Logs | Remove duplicate logs from LSF log directory if not deleted
+ shell: |
+ rm -rf /opt/ibm/lsflogs/* /opt/ibm/lsflogs/.* 2>/dev/null || true
+ args:
+ warn: false
+ ignore_errors: true
+ become: true
+ when: inventory_hostname in groups['management_nodes']
diff --git a/modules/ansible-roles/roles/lsf_post_config/tasks/configure_shared_folders.yml b/modules/ansible-roles/roles/lsf_post_config/tasks/configure_shared_folders.yml
index fde333e0..db9dc697 100644
--- a/modules/ansible-roles/roles/lsf_post_config/tasks/configure_shared_folders.yml
+++ b/modules/ansible-roles/roles/lsf_post_config/tasks/configure_shared_folders.yml
@@ -1,11 +1,5 @@
---
-# - name: Log directories | Remove duplicate logs
-# ansible.builtin.shell: >
-# find /opt/ibm/lsflogs -type f ! -name "*.{{ dns_domain_names }}" ! -name "ibmcloudgen2*" -delete
-# become: true
-# when: inventory_hostname in groups['management_nodes']
-
- name: Log directories | Setup shared base directories
file:
path: "{{ item.path }}"
@@ -114,3 +108,12 @@
group: root
become: yes
when: inventory_hostname == groups['login_node'][0]
+
+- name: Duplicate Logs | Remove duplicate logs from LSF log directory
+ shell: |
+ rm -rf /opt/ibm/lsflogs/* /opt/ibm/lsflogs/.* 2>/dev/null || true
+ args:
+ warn: false
+ ignore_errors: true
+ become: true
+ when: inventory_hostname in groups['management_nodes']
diff --git a/modules/ansible-roles/roles/lsf_post_config/tasks/reload_services.yml b/modules/ansible-roles/roles/lsf_post_config/tasks/reload_services.yml
index 53d5712e..a5200d1c 100644
--- a/modules/ansible-roles/roles/lsf_post_config/tasks/reload_services.yml
+++ b/modules/ansible-roles/roles/lsf_post_config/tasks/reload_services.yml
@@ -5,7 +5,7 @@
service:
name: lsfd
state: restarted
- when: inventory_hostname == groups['management_nodes'][0]
+ when: inventory_hostname in groups['mgmt_compute_nodes']
# Restart the NetworkManager service on all nodes
- name: Restart NetworkManager
diff --git a/modules/baremetal/datasource.tf b/modules/baremetal/datasource.tf
index 71e65999..358eae54 100644
--- a/modules/baremetal/datasource.tf
+++ b/modules/baremetal/datasource.tf
@@ -1,8 +1,13 @@
-data "ibm_resource_group" "existing_resource_group" {
- name = var.existing_resource_group
-}
+# data "ibm_resource_group" "existing_resource_group" {
+# name = var.existing_resource_group
+# }
+
+# data "ibm_is_image" "storage" {
+# count = length(var.storage_servers)
+# name = var.storage_servers[count.index]["image"]
+# }
-data "ibm_is_image" "storage" {
+data "ibm_is_bare_metal_server_profile" "itself" {
count = length(var.storage_servers)
- name = var.storage_servers[count.index]["image"]
+ name = var.storage_servers[count.index]["profile"]
}
diff --git a/modules/baremetal/locals.tf b/modules/baremetal/locals.tf
index 6fba7bc8..39150624 100644
--- a/modules/baremetal/locals.tf
+++ b/modules/baremetal/locals.tf
@@ -1,14 +1,49 @@
# define variables
locals {
- prefix = var.prefix
- storage_image_id = data.ibm_is_image.storage[*].id
- storage_node_name = format("%s-%s", local.prefix, "strg")
- resource_group_id = data.ibm_resource_group.existing_resource_group.id
- bms_interfaces = ["ens1", "ens2"]
- #storage_ssh_keys = [for name in var.storage_ssh_keys : data.ibm_is_ssh_key.storage[name].id]
+ # storage_image_id = data.ibm_is_image.storage[*].id
+ # resource_group_id = data.ibm_resource_group.existing_resource_group.id
+ # bms_interfaces = ["ens1", "ens2"]
+ # bms_interfaces = ["eth0", "eth1"]
+ # storage_ssh_keys = [for name in var.storage_ssh_keys : data.ibm_is_ssh_key.storage[name].id]
# TODO: explore (DA always keep it true)
#skip_iam_authorization_policy = true
- storage_server_count = sum(var.storage_servers[*]["count"])
- enable_storage = local.storage_server_count > 0
+ #storage_server_count = sum(var.storage_servers[*]["count"])
+ #enable_storage = local.storage_server_count > 0
+
+ raw_bm_details = flatten([
+ for module_instance in module.storage_baremetal : [
+ for server_key, server_details in module_instance.baremetal_servers :
+ {
+ id = server_details.bms_server_id
+ name = server_details.bms_server_name
+ ipv4_address = try(server_details.bms_server_primary_ip, null)
+ bms_primary_vni_id = try(server_details.bms_primary_vni_id, null)
+ bms_server_secondary_ip = try(server_details.bms_server_secondary_ip, null)
+ bms_secondary_vni_id = try(server_details.bms_secondary_vni_id, null)
+ }
+ ]
+ ])
+
+ bm_server_name = flatten(local.raw_bm_details[*].name)
+ # bm_serve_ips = flatten([for server in local.raw_bm_details : server[*].ipv4_address])
+
+ disk0_interface_type = (data.ibm_is_bare_metal_server_profile.itself[*].disks[0].supported_interface_types[0].default)[0]
+ disk_count = (data.ibm_is_bare_metal_server_profile.itself[*].disks[1].quantity[0].value)[0]
+
+ # Determine starting disk based on disk0 interface type
+ nvme_start_disk = local.disk0_interface_type == "sata" ? "0" : "1"
+
+ # Generate NVMe device list up to 36 disks
+ all_disks = [
+ "/dev/nvme0n1", "/dev/nvme1n1", "/dev/nvme2n1", "/dev/nvme3n1", "/dev/nvme4n1", "/dev/nvme5n1",
+ "/dev/nvme6n1", "/dev/nvme7n1", "/dev/nvme8n1", "/dev/nvme9n1", "/dev/nvme10n1", "/dev/nvme11n1",
+ "/dev/nvme12n1", "/dev/nvme13n1", "/dev/nvme14n1", "/dev/nvme15n1", "/dev/nvme16n1", "/dev/nvme17n1",
+ "/dev/nvme18n1", "/dev/nvme19n1", "/dev/nvme20n1", "/dev/nvme21n1", "/dev/nvme22n1", "/dev/nvme23n1",
+ "/dev/nvme24n1", "/dev/nvme25n1", "/dev/nvme26n1", "/dev/nvme27n1", "/dev/nvme28n1", "/dev/nvme29n1",
+ "/dev/nvme30n1", "/dev/nvme31n1", "/dev/nvme32n1", "/dev/nvme33n1", "/dev/nvme34n1", "/dev/nvme35n1"
+ ]
+
+ # Select only the required number of disks
+ selected_disks = slice(local.all_disks, local.nvme_start_disk, local.disk_count + local.nvme_start_disk)
}
diff --git a/modules/baremetal/main.tf b/modules/baremetal/main.tf
index 9f77bbb3..6767d4a6 100644
--- a/modules/baremetal/main.tf
+++ b/modules/baremetal/main.tf
@@ -1,23 +1,27 @@
-module "storage_key" {
- count = local.enable_storage ? 1 : 0
- source = "./../key"
-}
+# module "storage_key" {
+# count = local.enable_storage ? 1 : 0
+# source = "./../key"
+# }
module "storage_baremetal" {
- source = "terraform-ibm-modules/bare-metal-vpc/ibm"
- version = "1.1.0"
- count = length(var.storage_servers)
- server_count = var.storage_servers[count.index]["count"]
- prefix = count.index == 0 ? local.storage_node_name : format("%s-%s", local.storage_node_name, count.index)
- profile = var.storage_servers[count.index]["profile"]
- image_id = local.storage_image_id[count.index]
- create_security_group = false
- subnet_ids = var.storage_subnets
- ssh_key_ids = var.storage_ssh_keys
- bandwidth = var.bandwidth
- allowed_vlan_ids = var.allowed_vlan_ids
- access_tags = null
- resource_group_id = local.resource_group_id
- security_group_ids = var.security_group_ids
- user_data = data.template_file.storage_user_data.rendered
+ source = "terraform-ibm-modules/bare-metal-vpc/ibm"
+ version = "1.3.0"
+ count = length(var.storage_servers)
+ server_count = var.storage_servers[count.index]["count"]
+ prefix = var.prefix
+ profile = var.storage_servers[count.index]["profile"]
+ image_id = var.image_id
+ create_security_group = false
+ subnet_ids = var.storage_subnets
+ ssh_key_ids = var.storage_ssh_keys
+ bandwidth = var.sapphire_rapids_profile_check == true ? 200000 : 100000
+ allowed_vlan_ids = var.allowed_vlan_ids
+ access_tags = null
+ resource_group_id = var.existing_resource_group
+ security_group_ids = var.security_group_ids
+ user_data = var.user_data
+ secondary_vni_enabled = var.secondary_vni_enabled
+ secondary_subnet_ids = length(var.protocol_subnets) == 0 ? [] : [var.protocol_subnets[0].id]
+ secondary_security_group_ids = var.secondary_security_group_ids
+ tpm_mode = "tpm_2"
}
diff --git a/modules/baremetal/outputs.tf b/modules/baremetal/outputs.tf
index 1f429c38..b3446c89 100644
--- a/modules/baremetal/outputs.tf
+++ b/modules/baremetal/outputs.tf
@@ -4,11 +4,19 @@ output "list" {
for module_instance in module.storage_baremetal : [
for server_key, server_details in module_instance.baremetal_servers :
{
- id = server_details.bms_server_id
- name = server_details.bms_server_name
- ipv4_address = try(server_details.bms_server_ip, "")
- vni_id = server_details.bms_vni_id
+ id = server_details.bms_server_id
+ name = server_details.bms_server_name
+ ipv4_address = try(server_details.bms_server_primary_ip, null)
+ bms_primary_vni_id = try(server_details.bms_primary_vni_id, null)
+ bms_server_secondary_ip = try(server_details.bms_server_secondary_ip, null)
+ bms_secondary_vni_id = try(server_details.bms_secondary_vni_id, null)
}
]
])
+ depends_on = [module.storage_baremetal]
+}
+
+output "instance_ips_with_vol_mapping" {
+ value = { for instance_details in local.bm_server_name : instance_details => local.selected_disks }
+ description = "Instance ips with vol mapping"
}
diff --git a/modules/baremetal/template_files.tf b/modules/baremetal/template_files.tf
deleted file mode 100644
index a7117b26..00000000
--- a/modules/baremetal/template_files.tf
+++ /dev/null
@@ -1,10 +0,0 @@
-data "template_file" "storage_user_data" {
- template = file("${path.module}/templates/storage_user_data.tpl")
- vars = {
- bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : ""
- storage_public_key_content = local.enable_storage ? module.storage_key[0].public_key_content : ""
- storage_private_key_content = local.enable_storage ? module.storage_key[0].private_key_content : ""
- storage_interfaces = local.bms_interfaces[0]
- storage_dns_domain = var.dns_domain_names["storage"]
- }
-}
diff --git a/modules/baremetal/templates/storage_user_data.tpl b/modules/baremetal/templates/storage_user_data.tpl
deleted file mode 100644
index 31f15e6b..00000000
--- a/modules/baremetal/templates/storage_user_data.tpl
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/usr/bin/bash
-
-###################################################
-# Copyright (C) IBM Corp. 2023 All Rights Reserved.
-# Licensed under the Apache License v2.0
-###################################################
-
-#!/usr/bin/env bash
-exec > >(tee /var/log/ibm_spectrumscale_user-data.log)
-
-if grep -E -q "CentOS|Red Hat" /etc/os-release
-then
- USER=vpcuser
-elif grep -q "Ubuntu" /etc/os-release
-then
- USER=ubuntu
-fi
-sed -i -e "s/^/no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo \'Please client as the user \\\\\"$USER\\\\\" rather than the user \\\\\"root\\\\\".\';echo;sleep 5; exit 142\" /" /root/.ssh/authorized_keys
-
-# input parameters
-echo "${bastion_public_key_content}" >> ~/.ssh/authorized_keys
-echo "${storage_public_key_content}" >> ~/.ssh/authorized_keys
-echo "StrictHostKeyChecking no" >> ~/.ssh/config
-echo "${storage_private_key_content}" > ~/.ssh/id_rsa
-chmod 600 ~/.ssh/id_rsa
-
-# if grep -q "Red Hat" /etc/os-release
-if grep -q "CentOS|Red Hat" /etc/os-release
-then
- USER=vpcuser
- REQ_PKG_INSTALLED=0
- if grep -q "platform:el9" /etc/os-release
- then
- PACKAGE_MGR=dnf
- package_list="python3 kernel-devel-$(uname -r) kernel-headers-$(uname -r) firewalld numactl make gcc-c++ elfutils-libelf-devel bind-utils iptables-nft nfs-utils elfutils elfutils-devel python3-dnf-plugin-versionlock"
- elif grep -q "platform:el8" /etc/os-release
- then
- PACKAGE_MGR=dnf
- package_list="python38 kernel-devel-$(uname -r) kernel-headers-$(uname -r) firewalld numactl jq make gcc-c++ elfutils-libelf-devel bind-utils iptables nfs-utils elfutils elfutils-devel python3-dnf-plugin-versionlock"
- else
- PACKAGE_MGR=yum
- package_list="python3 kernel-devel-$(uname -r) kernel-headers-$(uname -r) firewalld numactl make gcc-c++ elfutils-libelf-devel bind-utils iptables nfs-utils elfutils elfutils-devel yum-plugin-versionlock"
- fi
-
- RETRY_LIMIT=5
- retry_count=0
- all_pkg_installed=1
-
- while [[ $all_pkg_installed -ne 0 && $retry_count -lt $RETRY_LIMIT ]]
- do
- # Install all required packages
- echo "INFO: Attempting to install packages"
- $PACKAGE_MGR install -y $package_list
-
- # Check to ensure packages are installed
- pkg_installed=0
- for pkg in $package_list
- do
- pkg_query=$($PACKAGE_MGR list installed $pkg)
- pkg_installed=$(($? + $pkg_installed))
- done
- if [[ $pkg_installed -ne 0 ]]
- then
- # The minimum required packages have not been installed.
- echo "WARN: Required packages not installed. Sleeping for 60 seconds and retrying..."
- touch /var/log/scale-rerun-package-install
- echo "INFO: Cleaning and repopulating repository data"
- $PACKAGE_MGR clean all
- $PACKAGE_MGR makecache
- sleep 60
- else
- all_pkg_installed=0
- fi
- retry_count=$(( $retry_count+1 ))
- done
-
-elif grep -q "Ubuntu" /etc/os-release
-then
- USER=ubuntu
-fi
-
-yum update --security -y
-yum versionlock $package_list
-yum versionlock list
-echo 'export PATH=$PATH:/usr/lpp/mmfs/bin' >> /root/.bashrc
-
-echo "###########################################################################################" >> /etc/motd
-echo "# You have logged in to Storage BareMetal Server. #" >> /etc/motd
-echo "# - Server storage is temporary storage that's available only while your Baremetal #" >> /etc/motd
-echo "# server is running. #" >> /etc/motd
-echo "# - Data on the drive is unrecoverable after server shutdown, disruptive maintenance, #" >> /etc/motd
-echo "# or hardware failure. #" >> /etc/motd
-echo "# #" >> /etc/motd
-echo "# Refer: https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-storage #" >> /etc/motd
-echo "###########################################################################################" >> /etc/motd
-
-echo "DOMAIN=${storage_dns_domain}" >> "/etc/sysconfig/network-scripts/ifcfg-${storage_interfaces}"
-echo "MTU=9000" >> "/etc/sysconfig/network-scripts/ifcfg-${storage_interfaces}"
-chage -I -1 -m 0 -M 99999 -E -1 -W 14 vpcuser
-sleep 120
-systemctl restart NetworkManager
-
-systemctl stop firewalld
-firewall-offline-cmd --zone=public --add-port=1191/tcp
-firewall-offline-cmd --zone=public --add-port=4444/tcp
-firewall-offline-cmd --zone=public --add-port=4444/udp
-firewall-offline-cmd --zone=public --add-port=4739/udp
-firewall-offline-cmd --zone=public --add-port=4739/tcp
-firewall-offline-cmd --zone=public --add-port=9084/tcp
-firewall-offline-cmd --zone=public --add-port=9085/tcp
-firewall-offline-cmd --zone=public --add-service=http
-firewall-offline-cmd --zone=public --add-service=https
-firewall-offline-cmd --zone=public --add-port=2049/tcp
-firewall-offline-cmd --zone=public --add-port=2049/udp
-firewall-offline-cmd --zone=public --add-port=111/tcp
-firewall-offline-cmd --zone=public --add-port=111/udp
-firewall-offline-cmd --zone=public --add-port=30000-61000/tcp
-firewall-offline-cmd --zone=public --add-port=30000-61000/udp
-systemctl start firewalld
-systemctl enable firewalld
diff --git a/modules/baremetal/variables.tf b/modules/baremetal/variables.tf
index f24d57c6..a6c8241b 100644
--- a/modules/baremetal/variables.tf
+++ b/modules/baremetal/variables.tf
@@ -18,6 +18,10 @@ variable "prefix" {
}
}
+variable "image_id" {
+ description = "This is the image id required for baremetal"
+ type = string
+}
##############################################################################
# Scale Storage Variables
##############################################################################
@@ -48,23 +52,21 @@ variable "storage_servers" {
object({
profile = string
count = number
- image = string
- filesystem = string
+ filesystem = optional(string)
})
)
default = [{
profile = "cx2d-metal-96x192"
count = 0
- image = "ibm-redhat-8-10-minimal-amd64-4"
filesystem = "/gpfs/fs1"
}]
description = "Number of BareMetal Servers to be launched for storage cluster."
}
-variable "bandwidth" {
- description = "The allocated bandwidth (in Mbps) for the bare metal server to manage network traffic. If unset, default values apply."
- type = number
- default = 100000
+variable "sapphire_rapids_profile_check" {
+ type = bool
+ default = false
+ description = "Check whether the profile uses Cascade Lake processors (x2) or Intel Sapphire Rapids processors (x3)."
}
variable "allowed_vlan_ids" {
@@ -79,31 +81,31 @@ variable "security_group_ids" {
default = []
}
-##############################################################################
-# Access Variables
-##############################################################################
+variable "secondary_security_group_ids" {
+ description = "A list of secondary security group ID's"
+ type = list(string)
+ default = []
+}
-variable "bastion_public_key_content" {
+variable "secondary_vni_enabled" {
+ description = "Whether to enable a secondary virtual network interface"
+ type = bool
+ default = false
+}
+
+variable "user_data" {
+ description = "User Data script path"
type = string
- sensitive = true
default = null
- description = "Bastion security group id."
}
-##############################################################################
-# DNS Template Variables
-##############################################################################
-
-variable "dns_domain_names" {
- type = object({
- compute = string
- storage = string
- protocol = string
- })
- default = {
- compute = "comp.com"
- storage = "strg.com"
- protocol = "ces.com"
- }
- description = "IBM Cloud HPC DNS domain names."
+variable "protocol_subnets" {
+ type = list(object({
+ name = string
+ id = string
+ zone = string
+ cidr = string
+ }))
+ default = []
+ description = "Subnets to launch the bastion host."
}
diff --git a/modules/baremetal/version.tf b/modules/baremetal/version.tf
index b87bee94..886be456 100644
--- a/modules/baremetal/version.tf
+++ b/modules/baremetal/version.tf
@@ -10,9 +10,5 @@ terraform {
source = "IBM-Cloud/ibm"
version = ">= 1.68.1, < 2.0.0"
}
- template = {
- source = "hashicorp/template"
- version = "~> 2"
- }
}
}
diff --git a/modules/common/client_configuration/client_configuration.tf b/modules/common/client_configuration/client_configuration.tf
index fe3ad0bb..7142341a 100644
--- a/modules/common/client_configuration/client_configuration.tf
+++ b/modules/common/client_configuration/client_configuration.tf
@@ -5,11 +5,23 @@ resource "local_sensitive_file" "write_client_meta_private_key" {
file_permission = "0600"
}
+resource "null_resource" "scale_host_play" {
+ count = (tobool(var.turn_on) == true && tobool(var.storage_cluster_create_complete) == true && tobool(var.create_scale_cluster) == true) ? 1 : 0
+ provisioner "local-exec" {
+ interpreter = ["/bin/bash", "-c"]
+ command = "sudo ansible-playbook -f 50 -i ${local.scale_all_inventory} -l 'client' -e @${local.scale_cluster_hosts} -e @${local.domain_name_file} ${local.scale_hostentry_playbook_path}"
+ }
+
+ triggers = {
+ build = timestamp()
+ }
+}
+
resource "null_resource" "prepare_client_inventory_using_jumphost_connection" {
count = (tobool(var.turn_on) == true && tobool(var.storage_cluster_create_complete) == true && tobool(var.using_jumphost_connection) == true && tobool(var.create_scale_cluster) == true) ? 1 : 0
provisioner "local-exec" {
interpreter = ["/bin/bash", "-c"]
- command = "python3 ${local.ansible_inv_script_path} --client_tf_inv_path ${var.client_inventory_path} --install_infra_path ${var.clone_path} --instance_private_key ${local.client_private_key} --bastion_user ${var.bastion_user} --bastion_ip ${var.bastion_instance_public_ip} --bastion_ssh_private_key ${var.bastion_ssh_private_key} --enable_ldap ${var.enable_ldap} --ldap_basedns ${var.ldap_basedns} --ldap_server ${var.ldap_server} --ldap_admin_password ${var.ldap_admin_password}"
+ command = "python3 ${local.ansible_inv_script_path} --client_tf_inv_path ${var.client_inventory_path} --install_infra_path ${var.clone_path} --instance_private_key ${local.client_private_key} --bastion_user ${var.bastion_user} --bastion_ip ${var.bastion_instance_public_ip} --bastion_ssh_private_key ${var.bastion_ssh_private_key} --enable_ldap ${var.enable_ldap} --ldap_basedns ${var.ldap_basedns} --ldap_server ${local.ldap_server} --ldap_admin_password ${var.ldap_admin_password}"
}
triggers = {
build = timestamp()
@@ -21,7 +33,7 @@ resource "null_resource" "prepare_client_inventory" {
count = (tobool(var.turn_on) == true && tobool(var.storage_cluster_create_complete) == true && tobool(var.using_jumphost_connection) == false && tobool(var.create_scale_cluster) == true) ? 1 : 0
provisioner "local-exec" {
interpreter = ["/bin/bash", "-c"]
- command = "python3 ${local.ansible_inv_script_path} --client_tf_inv_path ${var.client_inventory_path} --install_infra_path ${var.clone_path} --instance_private_key ${local.client_private_key} --enable_ldap ${var.enable_ldap} --ldap_basedns ${var.ldap_basedns} --ldap_server ${var.ldap_server} --ldap_admin_password ${var.ldap_admin_password}"
+ command = "python3 ${local.ansible_inv_script_path} --client_tf_inv_path ${var.client_inventory_path} --install_infra_path ${var.clone_path} --instance_private_key ${local.client_private_key} --enable_ldap ${var.enable_ldap} --ldap_basedns ${var.ldap_basedns} --ldap_server ${local.ldap_server} --ldap_admin_password ${var.ldap_admin_password}"
}
triggers = {
build = timestamp()
@@ -33,7 +45,7 @@ resource "null_resource" "perform_client_configuration" {
count = (tobool(var.turn_on) == true && tobool(var.storage_cluster_create_complete) == true && tobool(var.create_scale_cluster) == true) ? 1 : 0
provisioner "local-exec" {
interpreter = ["/bin/bash", "-c"]
- command = "ansible-playbook -i ${local.client_inventory_path} ${local.client_playbook}"
+ command = "sudo ansible-playbook -i ${local.client_inventory_path} ${local.client_playbook}"
}
triggers = {
build = timestamp()
diff --git a/modules/common/client_configuration/locals.tf b/modules/common/client_configuration/locals.tf
index 58b2457d..e3bb1c64 100644
--- a/modules/common/client_configuration/locals.tf
+++ b/modules/common/client_configuration/locals.tf
@@ -1,8 +1,13 @@
locals {
- client_inventory_path = format("%s/%s/client_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra")
- client_playbook = format("%s/%s/client_cloud_playbook.yaml", var.clone_path, "ibm-spectrum-scale-install-infra")
- scripts_path = replace(path.module, "client_configuration", "scripts")
- ansible_inv_script_path = format("%s/prepare_client_inv.py", local.scripts_path)
- client_private_key = format("%s/client_key/id_rsa", var.clone_path)
+ client_inventory_path = format("%s/%s/client_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra")
+ client_playbook = format("%s/%s/client_cloud_playbook.yaml", var.clone_path, "ibm-spectrum-scale-install-infra")
+ scripts_path = replace(path.module, "client_configuration", "scripts")
+ ansible_inv_script_path = format("%s/prepare_client_inv.py", local.scripts_path)
+ client_private_key = format("%s/client_key/id_rsa", var.clone_path)
+ ldap_server = jsonencode(var.ldap_server)
+ scale_hostentry_playbook_path = format("%s/%s/scale_host_entry_play.yml", var.clone_path, "ibm-spectrum-scale-install-infra")
+ scale_cluster_hosts = format("%s/%s/scale_cluster_hosts.yml", var.clone_path, "ibm-spectrum-scale-install-infra")
+ scale_all_inventory = format("%s/%s/scale_all_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra")
+ domain_name_file = format("%s/%s/domain_names.yml", var.clone_path, "ibm-spectrum-scale-install-infra")
}
diff --git a/modules/common/compute_configuration/compute_configuration.tf b/modules/common/compute_configuration/compute_configuration.tf
index 49a273a4..ccae57e4 100644
--- a/modules/common/compute_configuration/compute_configuration.tf
+++ b/modules/common/compute_configuration/compute_configuration.tf
@@ -26,6 +26,18 @@ resource "local_sensitive_file" "write_meta_private_key" {
file_permission = "0600"
}
+resource "null_resource" "scale_host_play" {
+ count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.create_scale_cluster) == true) ? 1 : 0
+ provisioner "local-exec" {
+ interpreter = ["/bin/bash", "-c"]
+ command = "sudo ansible-playbook -f 50 -i ${local.scale_all_inventory} -l 'compute' -e @${local.scale_cluster_hosts} -e @${local.domain_name_file} ${local.scale_hostentry_playbook_path}"
+ }
+
+ triggers = {
+ build = timestamp()
+ }
+}
+
resource "null_resource" "prepare_ansible_inventory_using_jumphost_connection" {
count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.using_jumphost_connection) == true && tobool(var.scale_encryption_enabled) == false) ? 1 : 0
provisioner "local-exec" {
@@ -42,7 +54,7 @@ resource "null_resource" "prepare_ansible_inventory_using_jumphost_connection_en
count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.using_jumphost_connection) == true && tobool(var.scale_encryption_enabled) == true) ? 1 : 0
provisioner "local-exec" {
interpreter = ["/bin/bash", "-c"]
- command = "python3 ${local.ansible_inv_script_path} --tf_inv_path ${var.inventory_path} --install_infra_path ${var.clone_path} --instance_private_key ${local.compute_private_key} --bastion_user ${var.bastion_user} --bastion_ip ${var.bastion_instance_public_ip} --bastion_ssh_private_key ${var.bastion_ssh_private_key} --using_packer_image ${var.using_packer_image} --using_rest_initialization ${var.using_rest_initialization} --gui_username ${var.compute_cluster_gui_username} --gui_password ${var.compute_cluster_gui_password} --enable_mrot_conf ${local.enable_mrot_conf} --enable_ces ${local.enable_ces} --scale_encryption_enabled ${var.scale_encryption_enabled} --scale_encryption_servers ${local.scale_encryption_servers} --scale_encryption_admin_password ${var.scale_encryption_admin_password} --enable_ldap ${var.enable_ldap} --ldap_basedns ${var.ldap_basedns} --ldap_server ${local.ldap_server} --ldap_admin_password ${var.ldap_admin_password} --comp_memory ${var.comp_memory} --comp_vcpus_count ${var.comp_vcpus_count} --comp_bandwidth ${var.comp_bandwidth} --enable_afm ${local.enable_afm} --enable_key_protect ${local.enable_key_protect}"
+ command = "python3 ${local.ansible_inv_script_path} --tf_inv_path ${var.inventory_path} --install_infra_path ${var.clone_path} --instance_private_key ${local.compute_private_key} --bastion_user ${var.bastion_user} --bastion_ip ${var.bastion_instance_public_ip} --bastion_ssh_private_key ${var.bastion_ssh_private_key} --using_packer_image ${var.using_packer_image} --using_rest_initialization ${var.using_rest_initialization} --gui_username ${var.compute_cluster_gui_username} --gui_password ${var.compute_cluster_gui_password} --enable_mrot_conf ${local.enable_mrot_conf} --enable_ces ${local.enable_ces} --scale_encryption_enabled ${var.scale_encryption_enabled} --scale_encryption_servers ${local.scale_encryption_servers} --scale_encryption_admin_password ${var.scale_encryption_admin_password} --scale_encryption_type ${var.scale_encryption_type} --enable_ldap ${var.enable_ldap} --ldap_basedns ${var.ldap_basedns} --ldap_server ${local.ldap_server} --ldap_admin_password ${var.ldap_admin_password} --comp_memory ${var.comp_memory} --comp_vcpus_count ${var.comp_vcpus_count} --comp_bandwidth ${var.comp_bandwidth} --enable_afm ${local.enable_afm} --enable_key_protect ${local.enable_key_protect}"
}
depends_on = [local_file.create_compute_tuning_parameters, local_sensitive_file.write_meta_private_key]
triggers = {
@@ -66,7 +78,7 @@ resource "null_resource" "prepare_ansible_inventory_encryption" {
count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.using_jumphost_connection) == false && tobool(var.scale_encryption_enabled) == true) ? 1 : 0
provisioner "local-exec" {
interpreter = ["/bin/bash", "-c"]
- command = "python3 ${local.ansible_inv_script_path} --tf_inv_path ${var.inventory_path} --install_infra_path ${var.clone_path} --instance_private_key ${local.compute_private_key} --using_packer_image ${var.using_packer_image} --using_rest_initialization ${var.using_rest_initialization} --gui_username ${var.compute_cluster_gui_username} --gui_password ${var.compute_cluster_gui_password} --enable_mrot_conf ${local.enable_mrot_conf} --enable_ces ${local.enable_ces} --scale_encryption_enabled ${var.scale_encryption_enabled} --scale_encryption_servers ${local.scale_encryption_servers} --scale_encryption_admin_password ${var.scale_encryption_admin_password} --enable_ldap ${var.enable_ldap} --ldap_basedns ${var.ldap_basedns} --ldap_server ${local.ldap_server} --ldap_admin_password ${var.ldap_admin_password} --comp_memory ${var.comp_memory} --comp_vcpus_count ${var.comp_vcpus_count} --comp_bandwidth ${var.comp_bandwidth} --enable_afm ${local.enable_afm} --enable_key_protect ${local.enable_key_protect}"
+ command = "python3 ${local.ansible_inv_script_path} --tf_inv_path ${var.inventory_path} --install_infra_path ${var.clone_path} --instance_private_key ${local.compute_private_key} --using_packer_image ${var.using_packer_image} --using_rest_initialization ${var.using_rest_initialization} --gui_username ${var.compute_cluster_gui_username} --gui_password ${var.compute_cluster_gui_password} --enable_mrot_conf ${local.enable_mrot_conf} --enable_ces ${local.enable_ces} --scale_encryption_enabled ${var.scale_encryption_enabled} --scale_encryption_servers ${local.scale_encryption_servers} --scale_encryption_admin_password ${var.scale_encryption_admin_password} --scale_encryption_type ${var.scale_encryption_type} --enable_ldap ${var.enable_ldap} --ldap_basedns ${var.ldap_basedns} --ldap_server ${local.ldap_server} --ldap_admin_password ${var.ldap_admin_password} --comp_memory ${var.comp_memory} --comp_vcpus_count ${var.comp_vcpus_count} --comp_bandwidth ${var.comp_bandwidth} --enable_afm ${local.enable_afm} --enable_key_protect ${local.enable_key_protect}"
}
depends_on = [local_file.create_compute_tuning_parameters, local_sensitive_file.write_meta_private_key]
triggers = {
@@ -96,7 +108,7 @@ resource "null_resource" "perform_scale_deployment" {
count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.create_scale_cluster) == true) ? 1 : 0
provisioner "local-exec" {
interpreter = ["/bin/bash", "-c"]
- command = "ansible-playbook -f 32 -i ${local.compute_inventory_path} ${local.compute_playbook_path} --extra-vars \"scale_version=${var.scale_version}\" --extra-vars \"scale_install_directory_pkg_path=${var.spectrumscale_rpms_path}\""
+ command = "sudo ansible-playbook -f 32 -i ${local.compute_inventory_path} ${local.compute_playbook_path} --extra-vars \"scale_version=${var.scale_version}\" --extra-vars \"scale_install_directory_pkg_path=${var.spectrumscale_rpms_path}\""
}
depends_on = [time_sleep.wait_60_seconds, null_resource.wait_for_ssh_availability, null_resource.prepare_ansible_inventory, null_resource.prepare_ansible_inventory_using_jumphost_connection, null_resource.prepare_ansible_inventory_encryption, null_resource.prepare_ansible_inventory_using_jumphost_connection_encryption]
triggers = {
diff --git a/modules/common/compute_configuration/locals.tf b/modules/common/compute_configuration/locals.tf
index c58a6db3..d70691b7 100644
--- a/modules/common/compute_configuration/locals.tf
+++ b/modules/common/compute_configuration/locals.tf
@@ -1,15 +1,19 @@
locals {
- scripts_path = replace(path.module, "compute_configuration", "scripts")
- ansible_inv_script_path = var.inventory_format == "ini" ? format("%s/prepare_scale_inv_ini.py", local.scripts_path) : format("%s/prepare_scale_inv_json.py", local.scripts_path)
- wait_for_ssh_script_path = format("%s/wait_for_ssh_availability.py", local.scripts_path)
- scale_tuning_config_path = format("%s/%s", var.clone_path, "computesncparams.profile")
- compute_private_key = format("%s/compute_key/id_rsa", var.clone_path) #tfsec:ignore:GEN002
- compute_inventory_path = format("%s/%s/compute_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra")
- compute_playbook_path = format("%s/%s/compute_cloud_playbook.yaml", var.clone_path, "ibm-spectrum-scale-install-infra")
- scale_encryption_servers = jsonencode(var.scale_encryption_servers)
- enable_mrot_conf = var.enable_mrot_conf ? "True" : "False"
- enable_ces = var.enable_ces ? "True" : "False"
- enable_afm = var.enable_afm ? "True" : "False"
- enable_key_protect = var.enable_key_protect == "key_protect" ? "True" : "False"
- ldap_server = jsonencode(var.ldap_server)
+ scripts_path = replace(path.module, "compute_configuration", "scripts")
+ ansible_inv_script_path = var.inventory_format == "ini" ? format("%s/prepare_scale_inv_ini.py", local.scripts_path) : format("%s/prepare_scale_inv_json.py", local.scripts_path)
+ wait_for_ssh_script_path = format("%s/wait_for_ssh_availability.py", local.scripts_path)
+ scale_tuning_config_path = format("%s/%s", var.clone_path, "computesncparams.profile")
+ compute_private_key = format("%s/compute_key/id_rsa", var.clone_path) #tfsec:ignore:GEN002
+ compute_inventory_path = format("%s/%s/compute_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra")
+ compute_playbook_path = format("%s/%s/compute_cloud_playbook.yaml", var.clone_path, "ibm-spectrum-scale-install-infra")
+ scale_encryption_servers = jsonencode(var.scale_encryption_servers)
+ enable_mrot_conf = var.enable_mrot_conf ? "True" : "False"
+ enable_ces = var.enable_ces ? "True" : "False"
+ enable_afm = var.enable_afm ? "True" : "False"
+ enable_key_protect = var.scale_encryption_enabled && var.enable_key_protect == "True" ? "True" : "False"
+ ldap_server = jsonencode(var.ldap_server)
+ scale_hostentry_playbook_path = format("%s/%s/scale_host_entry_play.yml", var.clone_path, "ibm-spectrum-scale-install-infra")
+ scale_cluster_hosts = format("%s/%s/scale_cluster_hosts.yml", var.clone_path, "ibm-spectrum-scale-install-infra")
+ scale_all_inventory = format("%s/%s/scale_all_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra")
+ domain_name_file = format("%s/%s/domain_names.yml", var.clone_path, "ibm-spectrum-scale-install-infra")
}
diff --git a/modules/common/compute_configuration/variables.tf b/modules/common/compute_configuration/variables.tf
index 13c9fd1e..ea7abb58 100644
--- a/modules/common/compute_configuration/variables.tf
+++ b/modules/common/compute_configuration/variables.tf
@@ -110,6 +110,11 @@ variable "scale_encryption_enabled" {
description = "To enable the encryption for the filesystem. Select true or false"
}
+variable "scale_encryption_type" {
+ type = string
+ description = "To enable filesystem encryption, specify either 'key_protect' or 'gklm'. If neither is specified, the default value will be 'null' and encryption is disabled"
+}
+
variable "scale_encryption_admin_password" {
type = string
description = "Password that is used for performing administrative operations for the GKLM.The password must contain at least 8 characters and at most 20 characters. For a strong password, at least three alphabetic characters are required, with at least one uppercase and one lowercase letter. Two numbers, and at least one special character from this(~@_+:). Make sure that the password doesn't include the username. Visit this [page](https://www.ibm.com/docs/en/gklm/3.0.1?topic=roles-password-policy) to know more about password policy of GKLM. "
diff --git a/modules/common/encryption_configuration/locals.tf b/modules/common/encryption_configuration/locals.tf
new file mode 100644
index 00000000..ad1d4bc2
--- /dev/null
+++ b/modules/common/encryption_configuration/locals.tf
@@ -0,0 +1,13 @@
+locals {
+ gklm_private_key = format("%s/gklm_key/id_rsa", var.clone_path)
+ scale_encryption_servers = jsonencode(var.scale_encryption_servers)
+ scale_encryption_servers_dns = jsonencode(var.scale_encryption_servers_dns)
+ compute_inventory_path = format("%s/%s/compute_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra")
+ storage_inventory_path = format("%s/%s/storage_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra")
+ encryption_gklm_playbook = format("%s/%s/encryption_gklm_playbook.yaml", var.clone_path, "ibm-spectrum-scale-install-infra")
+ encryption_cluster_playbook = format("%s/%s/encryption_cluster_playbook.yaml", var.clone_path, "ibm-spectrum-scale-install-infra")
+ scale_hostentry_playbook_path = format("%s/%s/scale_host_entry_play.yml", var.clone_path, "ibm-spectrum-scale-install-infra")
+ scale_cluster_hosts = format("%s/%s/scale_cluster_hosts.yml", var.clone_path, "ibm-spectrum-scale-install-infra")
+ scale_all_inventory = format("%s/%s/scale_all_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra")
+ domain_name_file = format("%s/%s/domain_names.yml", var.clone_path, "ibm-spectrum-scale-install-infra")
+}
diff --git a/modules/common/encryption_configuration/main.tf b/modules/common/encryption_configuration/main.tf
new file mode 100644
index 00000000..cc042827
--- /dev/null
+++ b/modules/common/encryption_configuration/main.tf
@@ -0,0 +1,55 @@
+
+resource "local_sensitive_file" "write_meta_private_key" {
+ count = (tobool(var.turn_on) == true && var.scale_encryption_type == "gklm") ? 1 : 0
+ content = var.meta_private_key
+ filename = local.gklm_private_key
+ file_permission = "0600"
+}
+
+resource "null_resource" "scale_host_play" {
+ count = (tobool(var.turn_on) == true && tobool(var.create_scale_cluster) == true && var.scale_encryption_type == "gklm") ? 1 : 0
+ provisioner "local-exec" {
+ interpreter = ["/bin/bash", "-c"]
+ command = "sudo ansible-playbook -f 50 -i ${local.scale_all_inventory} -l 'gklm' -e @${local.scale_cluster_hosts} -e @${local.domain_name_file} ${local.scale_hostentry_playbook_path}"
+ }
+
+ triggers = {
+ build = timestamp()
+ }
+}
+
+resource "null_resource" "perform_encryption_prepare" {
+ count = (tobool(var.turn_on) == true && tobool(var.create_scale_cluster) == true && var.scale_encryption_type == "gklm") ? 1 : 0
+ provisioner "local-exec" {
+ interpreter = ["/bin/bash", "-c"]
+ command = "sudo /usr/local/bin/ansible-playbook -f 32 ${local.encryption_gklm_playbook} -e scale_cluster_clustername=${var.scale_cluster_clustername} -e ansible_ssh_private_key_file=${local.gklm_private_key} -e scale_encryption_admin_default_password=${var.scale_encryption_admin_default_password} -e scale_encryption_admin_password=${var.scale_encryption_admin_password} -e scale_encryption_admin_user=${var.scale_encryption_admin_username} -e '{\"scale_encryption_servers_list\": ${local.scale_encryption_servers}}'"
+ }
+ depends_on = [local_sensitive_file.write_meta_private_key]
+ triggers = {
+ build = timestamp()
+ }
+}
+
+resource "null_resource" "perform_encryption_storage" {
+ count = (tobool(var.turn_on) == true && tobool(var.storage_cluster_encryption) == true && tobool(var.storage_cluster_create_complete) == true && tobool(var.remote_mount_create_complete) == true && tobool(var.create_scale_cluster) == true && var.scale_encryption_type == "gklm") ? 1 : 0
+ provisioner "local-exec" {
+ interpreter = ["/bin/bash", "-c"]
+ command = "sudo /usr/local/bin/ansible-playbook -f 32 -i ${local.storage_inventory_path} ${local.encryption_cluster_playbook} -e '{\"scale_encryption_servers_dns\": ${local.scale_encryption_servers_dns}}'"
+ }
+ depends_on = [null_resource.perform_encryption_prepare]
+ triggers = {
+ build = timestamp()
+ }
+}
+
+resource "null_resource" "perform_encryption_compute" {
+ count = (tobool(var.turn_on) == true && tobool(var.compute_cluster_encryption) == true && tobool(var.compute_cluster_create_complete) == true && tobool(var.remote_mount_create_complete) == true && tobool(var.create_scale_cluster) == true && var.scale_encryption_type == "gklm") ? 1 : 0
+ provisioner "local-exec" {
+ interpreter = ["/bin/bash", "-c"]
+ command = "sudo /usr/local/bin/ansible-playbook -f 32 -i ${local.compute_inventory_path} ${local.encryption_cluster_playbook} -e '{\"scale_encryption_servers_dns\": ${local.scale_encryption_servers_dns}}'"
+ }
+ depends_on = [null_resource.perform_encryption_prepare, null_resource.perform_encryption_storage]
+ triggers = {
+ build = timestamp()
+ }
+}
diff --git a/modules/cos/main.tf b/modules/common/encryption_configuration/outputs.tf
similarity index 100%
rename from modules/cos/main.tf
rename to modules/common/encryption_configuration/outputs.tf
diff --git a/modules/common/encryption_configuration/variables.tf b/modules/common/encryption_configuration/variables.tf
new file mode 100644
index 00000000..36223165
--- /dev/null
+++ b/modules/common/encryption_configuration/variables.tf
@@ -0,0 +1,78 @@
+variable "turn_on" {
+ type = string
+ description = "To turn on the null resources based on conditions."
+}
+
+variable "clone_path" {
+ type = string
+ description = "Scale repo clone path"
+}
+
+variable "create_scale_cluster" {
+ type = string
+ description = "Eenables scale cluster configuration."
+}
+
+variable "meta_private_key" {
+ type = string
+ description = "Meta private key."
+}
+
+variable "scale_cluster_clustername" {
+ type = string
+ description = "Name of the cluster."
+}
+
+variable "scale_encryption_servers" {
+ type = list(string)
+ description = "GKLM encryption server IP's."
+}
+
+variable "scale_encryption_servers_dns" {
+ type = list(string)
+ description = "GKLM encryption server hostnames."
+}
+
+variable "scale_encryption_admin_default_password" {
+ type = string
+ description = "The default administrator password used for resetting the admin password based on the user input. The password has to be updated which was configured during the GKLM installation."
+}
+
+variable "scale_encryption_admin_username" {
+ type = string
+ description = "The default Admin username for Security Key Lifecycle Manager(GKLM)."
+}
+
+variable "scale_encryption_admin_password" {
+ type = string
+ description = "Password that is used for performing administrative operations for the GKLM.The password must contain at least 8 characters and at most 20 characters. For a strong password, at least three alphabetic characters are required, with at least one uppercase and one lowercase letter. Two numbers, and at least one special character from this(~@_+:). Make sure that the password doesn't include the username. Visit this [page](https://www.ibm.com/docs/en/gklm/3.0.1?topic=roles-password-policy) to know more about password policy of GKLM. "
+}
+
+variable "scale_encryption_type" {
+ type = string
+ description = "To enable filesystem encryption, specify either 'key_protect' or 'gklm'. If neither is specified, the default value will be 'null' and encryption is disabled"
+}
+
+variable "compute_cluster_create_complete" {
+ type = bool
+ description = "Status of the compute cluster complete"
+}
+
+variable "storage_cluster_create_complete" {
+ type = bool
+ description = "Status of the compute cluster complete"
+}
+variable "remote_mount_create_complete" {
+ type = bool
+ description = "Status of the compute cluster complete"
+}
+
+variable "compute_cluster_encryption" {
+ type = bool
+ description = "Status of the compute cluster complete"
+}
+
+variable "storage_cluster_encryption" {
+ type = bool
+ description = "Status of the compute cluster complete"
+}
diff --git a/modules/common/encryption_configuration/version.tf b/modules/common/encryption_configuration/version.tf
new file mode 100644
index 00000000..4ba00afc
--- /dev/null
+++ b/modules/common/encryption_configuration/version.tf
@@ -0,0 +1,18 @@
+##############################################################################
+# Terraform Providers
+##############################################################################
+
+terraform {
+ required_version = ">= 1.9.0"
+ # Use "greater than or equal to" range for root level modules
+ required_providers {
+ local = {
+ source = "hashicorp/local"
+ version = "~> 2"
+ }
+ null = {
+ source = "hashicorp/null"
+ version = ">= 3.0.0"
+ }
+ }
+}
diff --git a/modules/common/key_protect_configuration/locals.tf b/modules/common/key_protect_configuration/locals.tf
new file mode 100644
index 00000000..695bb5a4
--- /dev/null
+++ b/modules/common/key_protect_configuration/locals.tf
@@ -0,0 +1,7 @@
+locals {
+ compute_inventory_path = format("%s/%s/compute_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra")
+ storage_inventory_path = format("%s/%s/storage_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra")
+ compute_kp_encryption_playbook = format("%s/%s/compute_kp_encryption_playbook.yaml", var.clone_path, "ibm-spectrum-scale-install-infra")
+ storage_kp_encryption_playbook = format("%s/%s/storage_kp_encryption_playbook.yaml", var.clone_path, "ibm-spectrum-scale-install-infra")
+ gpfs_restart_playbook_path = format("%s/%s/scale_gpfs_restart.yml", var.clone_path, "ibm-spectrum-scale-install-infra")
+}
diff --git a/modules/common/key_protect_configuration/main.tf b/modules/common/key_protect_configuration/main.tf
new file mode 100644
index 00000000..81ecf4ed
--- /dev/null
+++ b/modules/common/key_protect_configuration/main.tf
@@ -0,0 +1,24 @@
+resource "null_resource" "perform_encryption_storage" {
+ count = (tobool(var.turn_on) == true && tobool(var.storage_cluster_encryption) == true && tobool(var.storage_cluster_create_complete) == true && tobool(var.remote_mount_create_complete) == true && tobool(var.create_scale_cluster) == true && var.scale_encryption_type == "key_protect") ? 1 : 0
+ provisioner "local-exec" {
+ interpreter = ["/bin/bash", "-c"]
+ command = "sudo /usr/local/bin/ansible-playbook -f 32 -i ${local.storage_inventory_path} ${local.storage_kp_encryption_playbook}"
+ }
+}
+
+resource "null_resource" "perform_encryption_compute" {
+ count = (tobool(var.turn_on) == true && tobool(var.compute_cluster_encryption) == true && tobool(var.compute_cluster_create_complete) == true && tobool(var.remote_mount_create_complete) == true && tobool(var.create_scale_cluster) == true && var.scale_encryption_type == "key_protect") ? 1 : 0
+ provisioner "local-exec" {
+ interpreter = ["/bin/bash", "-c"]
+ command = "sudo /usr/local/bin/ansible-playbook -f 32 -i ${local.compute_inventory_path} ${local.compute_kp_encryption_playbook}"
+ }
+}
+
+resource "null_resource" "perform_encryption_gpfs_restart" {
+ count = (tobool(var.turn_on) == true && tobool(var.compute_cluster_encryption) == true && tobool(var.compute_cluster_create_complete) == true && tobool(var.remote_mount_create_complete) == true && tobool(var.create_scale_cluster) == true && var.scale_encryption_type == "key_protect") ? 1 : 0
+ provisioner "local-exec" {
+ interpreter = ["/bin/bash", "-c"]
+ command = "sudo /usr/local/bin/ansible-playbook -f 32 -i ${local.compute_inventory_path} ${local.gpfs_restart_playbook_path}"
+ }
+ depends_on = [null_resource.perform_encryption_compute]
+}
diff --git a/modules/common/key_protect_configuration/outputs.tf b/modules/common/key_protect_configuration/outputs.tf
new file mode 100644
index 00000000..e69de29b
diff --git a/modules/common/key_protect_configuration/variables.tf b/modules/common/key_protect_configuration/variables.tf
new file mode 100644
index 00000000..16c4ed87
--- /dev/null
+++ b/modules/common/key_protect_configuration/variables.tf
@@ -0,0 +1,43 @@
+variable "turn_on" {
+ type = string
+ description = "To turn on the null resources based on conditions."
+}
+
+variable "clone_path" {
+ type = string
+ description = "Scale repo clone path"
+}
+
+variable "create_scale_cluster" {
+ type = string
+ description = "Eenables scale cluster configuration."
+}
+
+variable "scale_encryption_type" {
+ type = string
+ description = "To enable filesystem encryption, specify either 'key_protect' or 'gklm'. If neither is specified, the default value will be 'null' and encryption is disabled"
+}
+
+variable "compute_cluster_create_complete" {
+ type = bool
+ description = "Status of the compute cluster complete"
+}
+
+variable "storage_cluster_create_complete" {
+ type = bool
+ description = "Status of the compute cluster complete"
+}
+variable "remote_mount_create_complete" {
+ type = bool
+ description = "Status of the compute cluster complete"
+}
+
+variable "compute_cluster_encryption" {
+ type = bool
+ description = "Status of the compute cluster complete"
+}
+
+variable "storage_cluster_encryption" {
+ type = bool
+ description = "Status of the compute cluster complete"
+}
diff --git a/modules/common/key_protect_configuration/version.tf b/modules/common/key_protect_configuration/version.tf
new file mode 100644
index 00000000..8f7d5d9c
--- /dev/null
+++ b/modules/common/key_protect_configuration/version.tf
@@ -0,0 +1,14 @@
+##############################################################################
+# Terraform Providers
+##############################################################################
+
+terraform {
+ required_version = ">= 1.9.0"
+ # Use "greater than or equal to" range for root level modules
+ required_providers {
+ null = {
+ source = "hashicorp/null"
+ version = ">= 3.0.0"
+ }
+ }
+}
diff --git a/modules/common/ldap_configuration/ldap_configuration.tf b/modules/common/ldap_configuration/ldap_configuration.tf
deleted file mode 100644
index d315ca41..00000000
--- a/modules/common/ldap_configuration/ldap_configuration.tf
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- Ansible playbook to enable scnryption using ldap.
-*/
-
-resource "local_sensitive_file" "write_meta_private_key" {
- count = (tobool(var.turn_on) == true) ? 1 : 0
- content = var.meta_private_key
- filename = local.ldap_private_key
- file_permission = "0600"
-}
-
-resource "null_resource" "prepare_ansible_inventory" {
- count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.using_jumphost_connection) == false) ? 1 : 0
- provisioner "local-exec" {
- interpreter = ["/bin/bash", "-c"]
- command = "python3 ${var.script_path} --install_infra_path ${var.clone_path} --instance_private_key ${local.ldap_private_key} --ldap_nodes ${local.ldap_server} --ldap_basedns ${var.ldap_basedns} --ldap_admin_password ${var.ldap_admin_password} --ldap_user_name ${var.ldap_user_name} --ldap_user_password ${var.ldap_user_password} --resource_prefix ${var.ldap_cluster_prefix}"
- }
- depends_on = [local_sensitive_file.write_meta_private_key]
- triggers = {
- build = timestamp()
- }
-}
-
-resource "null_resource" "prepare_ansible_inventory_using_jumphost_connection" {
- count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.using_jumphost_connection) == true) ? 1 : 0
- provisioner "local-exec" {
- interpreter = ["/bin/bash", "-c"]
- command = "python3 ${var.script_path} --install_infra_path ${var.clone_path} --bastion_user ${var.bastion_user} --bastion_ip ${var.bastion_instance_public_ip} --bastion_ssh_private_key ${var.bastion_ssh_private_key} --instance_private_key ${local.ldap_private_key} --ldap_nodes ${local.ldap_server} --ldap_basedns ${var.ldap_basedns} --ldap_admin_password ${var.ldap_admin_password} --ldap_user_name ${var.ldap_user_name} --ldap_user_password ${var.ldap_user_password} --resource_prefix ${var.ldap_cluster_prefix}"
- }
- depends_on = [local_sensitive_file.write_meta_private_key]
- triggers = {
- build = timestamp()
- }
-}
-
-resource "null_resource" "perform_ldap_prepare" {
- count = (tobool(var.turn_on) == true && tobool(var.create_scale_cluster) == true) ? 1 : 0
- provisioner "local-exec" {
- interpreter = ["/bin/bash", "-c"]
- command = "/usr/local/bin/ansible-playbook -f 32 -i ${local.ldap_inventory_path} ${local.ldap_configure_playbook} -e ldap_server=${local.ldap_server}"
- }
- depends_on = [local_sensitive_file.write_meta_private_key, null_resource.prepare_ansible_inventory, null_resource.prepare_ansible_inventory_using_jumphost_connection]
- triggers = {
- build = timestamp()
- }
-}
diff --git a/modules/common/ldap_configuration/locals.tf b/modules/common/ldap_configuration/locals.tf
index 2f78aafc..8cf21d0a 100644
--- a/modules/common/ldap_configuration/locals.tf
+++ b/modules/common/ldap_configuration/locals.tf
@@ -1,4 +1,6 @@
locals {
+ scripts_path = replace(path.module, "ldap_configuration", "scripts")
+ ansible_inv_script_path = format("%s/prepare_ldap_inv.py", local.scripts_path)
ldap_private_key = format("%s/ldap_key/id_rsa", var.clone_path)
ldap_server = jsonencode(var.ldap_server)
ldap_inventory_path = format("%s/%s/ldap_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra")
diff --git a/modules/common/ldap_configuration/main.tf b/modules/common/ldap_configuration/main.tf
index e69de29b..2976a831 100644
--- a/modules/common/ldap_configuration/main.tf
+++ b/modules/common/ldap_configuration/main.tf
@@ -0,0 +1,46 @@
+/*
+ LDAP Configurations and Ansible Plays
+*/
+
+resource "local_sensitive_file" "write_meta_private_key" {
+ count = (tobool(var.turn_on) == true) ? 1 : 0
+ content = var.meta_private_key
+ filename = local.ldap_private_key
+ file_permission = "0600"
+}
+
+resource "null_resource" "prepare_ansible_inventory" {
+ count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.using_jumphost_connection) == false) ? 1 : 0
+ provisioner "local-exec" {
+ interpreter = ["/bin/bash", "-c"]
+ command = "python3 ${local.ansible_inv_script_path} --install_infra_path ${var.clone_path} --instance_private_key ${local.ldap_private_key} --ldap_nodes ${local.ldap_server} --ldap_basedns ${var.ldap_basedns} --ldap_admin_password ${var.ldap_admin_password} --ldap_user_name ${var.ldap_user_name} --ldap_user_password ${var.ldap_user_password} --resource_prefix ${var.ldap_cluster_prefix}"
+ }
+ depends_on = [local_sensitive_file.write_meta_private_key]
+ triggers = {
+ build = timestamp()
+ }
+}
+
+resource "null_resource" "prepare_ansible_inventory_using_jumphost_connection" {
+ count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.using_jumphost_connection) == true) ? 1 : 0
+ provisioner "local-exec" {
+ interpreter = ["/bin/bash", "-c"]
+ command = "python3 ${local.ansible_inv_script_path} --install_infra_path ${var.clone_path} --bastion_user ${var.bastion_user} --bastion_ip ${var.bastion_instance_public_ip} --bastion_ssh_private_key ${var.bastion_ssh_private_key} --instance_private_key ${local.ldap_private_key} --ldap_nodes ${local.ldap_server} --ldap_basedns ${var.ldap_basedns} --ldap_admin_password ${var.ldap_admin_password} --ldap_user_name ${var.ldap_user_name} --ldap_user_password ${var.ldap_user_password} --resource_prefix ${var.ldap_cluster_prefix}"
+ }
+ depends_on = [local_sensitive_file.write_meta_private_key]
+ triggers = {
+ build = timestamp()
+ }
+}
+
+resource "null_resource" "perform_ldap_prepare" {
+ count = (tobool(var.turn_on) == true && tobool(var.create_scale_cluster) == true) ? 1 : 0
+ provisioner "local-exec" {
+ interpreter = ["/bin/bash", "-c"]
+ command = "sudo /usr/local/bin/ansible-playbook -f 32 -i ${local.ldap_inventory_path} ${local.ldap_configure_playbook} -e ldap_server=${local.ldap_server}"
+ }
+ depends_on = [local_sensitive_file.write_meta_private_key, null_resource.prepare_ansible_inventory, null_resource.prepare_ansible_inventory_using_jumphost_connection]
+ triggers = {
+ build = timestamp()
+ }
+}
diff --git a/modules/common/ldap_configuration/variables.tf b/modules/common/ldap_configuration/variables.tf
index 89cb86d6..56c35601 100644
--- a/modules/common/ldap_configuration/variables.tf
+++ b/modules/common/ldap_configuration/variables.tf
@@ -8,11 +8,6 @@ variable "clone_path" {
description = "Scale repo clone path"
}
-variable "script_path" {
- type = string
- description = "Python script path"
-}
-
variable "create_scale_cluster" {
type = string
description = "It enables scale cluster configuration."
diff --git a/modules/common/network_playbook/main.tf b/modules/common/network_playbook/main.tf
new file mode 100644
index 00000000..e69de29b
diff --git a/modules/common/network_playbook/network_playbook.tf b/modules/common/network_playbook/network_playbook.tf
new file mode 100644
index 00000000..9f7b6f4e
--- /dev/null
+++ b/modules/common/network_playbook/network_playbook.tf
@@ -0,0 +1,14 @@
+/*
+ Excutes network playbook.
+*/
+
+resource "null_resource" "perform_scale_deployment" {
+ count = (tobool(var.turn_on) == true && tobool(var.compute_cluster_create_complete) == true && tobool(var.storage_cluster_create_complete) == true && tobool(var.create_scale_cluster) == true) ? 1 : 0
+ provisioner "local-exec" {
+ interpreter = ["/bin/bash", "-c"]
+ command = "sudo ansible-playbook -i ${var.inventory_path} ${var.network_playbook_path}"
+ }
+ triggers = {
+ build = timestamp()
+ }
+}
diff --git a/modules/common/network_playbook/outputs.tf b/modules/common/network_playbook/outputs.tf
new file mode 100644
index 00000000..6809db99
--- /dev/null
+++ b/modules/common/network_playbook/outputs.tf
@@ -0,0 +1 @@
+#Place holder for output
diff --git a/modules/common/network_playbook/variables.tf b/modules/common/network_playbook/variables.tf
new file mode 100644
index 00000000..dcde1211
--- /dev/null
+++ b/modules/common/network_playbook/variables.tf
@@ -0,0 +1,29 @@
+variable "compute_cluster_create_complete" {
+ type = bool
+ description = "Compute cluster creation completed."
+}
+
+variable "storage_cluster_create_complete" {
+ type = bool
+ description = "Storage cluster creation completed."
+}
+
+variable "network_playbook_path" {
+ type = string
+ description = "Path for network playbook."
+}
+
+variable "turn_on" {
+ type = string
+ description = "It is used to turn on the null resources based on conditions."
+}
+
+variable "create_scale_cluster" {
+ type = string
+ description = "It enables scale cluster configuration."
+}
+
+variable "inventory_path" {
+ type = string
+ description = "Scale JSON inventory path"
+}
diff --git a/modules/common/network_playbook/version.tf b/modules/common/network_playbook/version.tf
new file mode 100644
index 00000000..cb7b1cd8
--- /dev/null
+++ b/modules/common/network_playbook/version.tf
@@ -0,0 +1,14 @@
+##############################################################################
+# Terraform Providers
+##############################################################################
+
+terraform {
+ required_version = ">= 1.3"
+ # Use "greater than or equal to" range for root level modules
+ required_providers {
+ null = {
+ source = "hashicorp/null"
+ version = ">= 3.0.0"
+ }
+ }
+}
diff --git a/modules/common/remote_mount_configuration/remote_mount_configuration.tf b/modules/common/remote_mount_configuration/remote_mount_configuration.tf
index 7d242470..dcbabe9f 100644
--- a/modules/common/remote_mount_configuration/remote_mount_configuration.tf
+++ b/modules/common/remote_mount_configuration/remote_mount_configuration.tf
@@ -34,7 +34,7 @@ resource "null_resource" "perform_scale_deployment" {
count = (tobool(var.turn_on) == true && tobool(var.compute_cluster_create_complete) == true && tobool(var.storage_cluster_create_complete) == true && tobool(var.create_scale_cluster) == true) ? 1 : 0
provisioner "local-exec" {
interpreter = ["/bin/bash", "-c"]
- command = "ansible-playbook -i ${local.remote_mnt_inventory_path} ${local.remote_mnt_playbook_path}"
+ command = "sudo ansible-playbook -i ${local.remote_mnt_inventory_path} ${local.remote_mnt_playbook_path}"
}
depends_on = [time_sleep.wait_for_gui_db_initializion, null_resource.prepare_remote_mnt_inventory, null_resource.prepare_remote_mnt_inventory_using_jumphost_connection]
triggers = {
diff --git a/modules/common/scripts/prepare_scale_inv_ini.py b/modules/common/scripts/prepare_scale_inv_ini.py
index 1057b62e..b5a61bd2 100755
--- a/modules/common/scripts/prepare_scale_inv_ini.py
+++ b/modules/common/scripts/prepare_scale_inv_ini.py
@@ -241,9 +241,6 @@ def prepare_ansible_playbook(hosts_config, cluster_config, cluster_key_file):
- {{ role: afm_cos_prepare, when: enable_afm }}
- {{ role: afm_cos_install, when: "enable_afm and scale_packages_installed is false" }}
- {{ role: afm_cos_configure, when: enable_afm }}
- - {{ role: kp_encryption_prepare, when: "enable_key_protect and scale_cluster_type == 'storage'" }}
- - {{ role: kp_encryption_configure, when: enable_key_protect }}
- - {{ role: kp_encryption_apply, when: "enable_key_protect and scale_cluster_type == 'storage'" }}
""".format(
hosts_config=hosts_config,
cluster_config=cluster_config,
@@ -342,6 +339,28 @@ def prepare_ansible_playbook_encryption_cluster(hosts_config):
return content.format(hosts_config=hosts_config)
+def prepare_ansible_playbook_key_protect_encryption(hosts_config, cluster_config):
+ # Write to playbook
+ content = """---
+# Install and config Spectrum Scale on nodes
+- hosts: {hosts_config}
+ collections:
+ - ibm.spectrum_scale
+ any_errors_fatal: true
+ vars:
+ - scale_node_update_check: false
+ pre_tasks:
+ - include_vars: group_vars/{cluster_config}
+ roles:
+ - {{ role: kp_encryption_prepare, when: "enable_key_protect and scale_cluster_type == 'storage'" }}
+ - {{ role: kp_encryption_configure, when: enable_key_protect }}
+ - {{ role: kp_encryption_apply, when: "enable_key_protect and scale_cluster_type == 'storage'" }}
+""".format(
+ hosts_config=hosts_config, cluster_config=cluster_config
+ )
+ return content
+
+
def initialize_cluster_details(
scale_version,
cluster_name,
@@ -1607,6 +1626,33 @@ def initialize_scale_ces_details(
"Content of ansible playbook for encryption:\n", encryption_playbook_content
)
+ # Step-4.2: Create Key Protect Encryption playbook
+ if (
+ ARGUMENTS.scale_encryption_enabled == "true"
+ and ARGUMENTS.scale_encryption_type == "key_protect"
+ and ARGUMENTS.enable_key_protect == "True"
+ ):
+
+ kp_encryption_playbook_content = (
+ prepare_ansible_playbook_key_protect_encryption(
+ "scale_nodes", "%s_cluster_config.yaml" % cluster_type
+ )
+ )
+ write_to_file(
+ "/%s/%s/%s_kp_encryption_playbook.yaml"
+ % (
+ ARGUMENTS.install_infra_path,
+ "ibm-spectrum-scale-install-infra",
+ cluster_type,
+ ),
+ kp_encryption_playbook_content,
+ )
+ if ARGUMENTS.verbose:
+ print(
+ "Content of ansible playbook for key protect encryption:\n",
+ kp_encryption_playbook_content,
+ )
+
# Step-5: Create hosts
config = configparser.ConfigParser(allow_no_value=True)
node_details = initialize_node_details(
diff --git a/modules/common/storage_configuration/locals.tf b/modules/common/storage_configuration/locals.tf
index 6780560f..19cba1dc 100644
--- a/modules/common/storage_configuration/locals.tf
+++ b/modules/common/storage_configuration/locals.tf
@@ -1,21 +1,29 @@
locals {
- scripts_path = replace(path.module, "storage_configuration", "scripts")
- ansible_inv_script_path = var.inventory_format == "ini" ? format("%s/prepare_scale_inv_ini.py", local.scripts_path) : format("%s/prepare_scale_inv_json.py", local.scripts_path)
- wait_for_ssh_script_path = format("%s/wait_for_ssh_availability.py", local.scripts_path)
- scale_tuning_config_path = format("%s/%s", var.clone_path, "storagesncparams.profile")
- storage_private_key = format("%s/storage_key/id_rsa", var.clone_path) #tfsec:ignore:GEN002
- default_metadata_replicas = var.default_metadata_replicas == null ? jsonencode("None") : jsonencode(var.default_metadata_replicas)
- default_data_replicas = var.default_data_replicas == null ? jsonencode("None") : jsonencode(var.default_data_replicas)
- storage_inventory_path = format("%s/%s/storage_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra")
- storage_playbook_path = format("%s/%s/storage_cloud_playbook.yaml", var.clone_path, "ibm-spectrum-scale-install-infra")
- scale_encryption_servers = var.scale_encryption_enabled && var.scale_encryption_type == "gklm" ? jsonencode(var.scale_encryption_servers) : jsonencode("None")
- scale_encryption_admin_password = var.scale_encryption_enabled ? var.scale_encryption_admin_password : "None"
- ldap_server_cert_path = format("%s/ldap_key/ldap_cacert.pem", var.clone_path)
- colocate_protocol_instances = var.colocate_protocol_instances ? "True" : "False"
- is_colocate_protocol_subset = var.is_colocate_protocol_subset ? "True" : "False"
- enable_mrot_conf = var.enable_mrot_conf ? "True" : "False"
- enable_ces = var.enable_ces ? "True" : "False"
- enable_afm = var.enable_afm ? "True" : "False"
- enable_key_protect = var.enable_key_protect == "key_protect" ? "True" : "False"
- ldap_server = jsonencode(var.ldap_server)
+ scripts_path = replace(path.module, "storage_configuration", "scripts")
+ ansible_inv_script_path = var.inventory_format == "ini" ? format("%s/prepare_scale_inv_ini.py", local.scripts_path) : format("%s/prepare_scale_inv_json.py", local.scripts_path)
+ wait_for_ssh_script_path = format("%s/wait_for_ssh_availability.py", local.scripts_path)
+ scale_tuning_config_path = format("%s/%s", var.clone_path, "storagesncparams.profile")
+ storage_private_key = format("%s/storage_key/id_rsa", var.clone_path) #tfsec:ignore:GEN002
+ default_metadata_replicas = var.default_metadata_replicas == null ? jsonencode("None") : jsonencode(var.default_metadata_replicas)
+ default_data_replicas = var.default_data_replicas == null ? jsonencode("None") : jsonencode(var.default_data_replicas)
+ storage_inventory_path = format("%s/%s/storage_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra")
+ storage_playbook_path = format("%s/%s/storage_cloud_playbook.yaml", var.clone_path, "ibm-spectrum-scale-install-infra")
+ scale_encryption_servers = var.scale_encryption_enabled && var.scale_encryption_type == "gklm" ? jsonencode(var.scale_encryption_servers) : jsonencode("None")
+ scale_encryption_admin_password = var.scale_encryption_enabled ? var.scale_encryption_admin_password : "None"
+ ldap_server_cert_path = format("%s/ldap_key/ldap_cacert.pem", var.scale_config_path)
+ colocate_protocol_instances = var.colocate_protocol_instances ? "True" : "False"
+ is_colocate_protocol_subset = var.is_colocate_protocol_subset ? "True" : "False"
+ enable_mrot_conf = var.enable_mrot_conf ? "True" : "False"
+ enable_ces = var.enable_ces ? "True" : "False"
+ enable_afm = var.enable_afm ? "True" : "False"
+ enable_key_protect = var.scale_encryption_enabled && var.enable_key_protect == "True" ? "True" : "False"
+ ldap_server = jsonencode(var.ldap_server)
+ scale_baremetal_ssh_check_playbook_path = format("%s/%s/scale_baremetal_ssh_check_playbook.yml", var.clone_path, "ibm-spectrum-scale-install-infra")
+ scale_baremetal_bootdrive_playbook_path = format("%s/%s/scale_baremetal_bootdrive.yml", var.clone_path, "ibm-spectrum-scale-install-infra")
+ scale_baremetal_prerequesite_vars = format("%s/%s/scale_baremetal_vars.yml", var.clone_path, "ibm-spectrum-scale-install-infra")
+ scale_baremetal_prerequesite_playbook_path = format("%s/%s/scale_baremetal_prerequesite.yml", var.clone_path, "ibm-spectrum-scale-install-infra")
+ scale_hostentry_playbook_path = format("%s/%s/scale_host_entry_play.yml", var.clone_path, "ibm-spectrum-scale-install-infra")
+ scale_cluster_hosts = format("%s/%s/scale_cluster_hosts.yml", var.clone_path, "ibm-spectrum-scale-install-infra")
+ scale_all_inventory = format("%s/%s/scale_all_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra")
+ domain_name_file = format("%s/%s/domain_names.yml", var.clone_path, "ibm-spectrum-scale-install-infra")
}
diff --git a/modules/common/storage_configuration/storage_configuration.tf b/modules/common/storage_configuration/storage_configuration.tf
index 162d5d68..a30e3ebf 100644
--- a/modules/common/storage_configuration/storage_configuration.tf
+++ b/modules/common/storage_configuration/storage_configuration.tf
@@ -41,6 +41,60 @@ resource "local_sensitive_file" "write_existing_ldap_cert" {
file_permission = "0600"
}
+resource "time_sleep" "wait_300_seconds" {
+ count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true) && var.storage_type == "persistent" ? 1 : 0
+ create_duration = "300s"
+ depends_on = [local_sensitive_file.write_meta_private_key]
+}
+
+resource "null_resource" "scale_baremetal_ssh_check_play" {
+ count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.create_scale_cluster) == true) && var.storage_type == "persistent" ? 1 : 0
+ provisioner "local-exec" {
+ interpreter = ["/bin/bash", "-c"]
+ command = "sudo ansible-playbook -f 50 -i ${local.scale_all_inventory} -l 'storage' -e @${local.scale_baremetal_prerequesite_vars} ${local.scale_baremetal_ssh_check_playbook_path}"
+ }
+ depends_on = [local_sensitive_file.write_meta_private_key, time_sleep.wait_300_seconds]
+ triggers = {
+ build = timestamp()
+ }
+}
+
+resource "null_resource" "scale_host_play" {
+ count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.create_scale_cluster) == true) ? 1 : 0
+ provisioner "local-exec" {
+ interpreter = ["/bin/bash", "-c"]
+ command = "sudo ansible-playbook -f 50 -i ${local.scale_all_inventory} -l 'storage' -e @${local.scale_cluster_hosts} -e @${local.domain_name_file} ${local.scale_hostentry_playbook_path}"
+ }
+ depends_on = [null_resource.scale_baremetal_ssh_check_play, time_sleep.wait_300_seconds]
+ triggers = {
+ build = timestamp()
+ }
+}
+
+resource "null_resource" "scale_baremetal_bootdrive_play" {
+ count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.create_scale_cluster) == true) && var.storage_type == "persistent" && var.bms_boot_drive_encryption == true ? 1 : 0
+ provisioner "local-exec" {
+ interpreter = ["/bin/bash", "-c"]
+ command = "sudo ansible-playbook -f 50 -i ${local.scale_all_inventory} -l 'storage' -e @${local.scale_baremetal_prerequesite_vars} ${local.scale_baremetal_bootdrive_playbook_path}"
+ }
+ depends_on = [null_resource.scale_baremetal_ssh_check_play, null_resource.scale_host_play]
+ triggers = {
+ build = timestamp()
+ }
+}
+
+resource "null_resource" "scale_baremetal_prerequesite_play" {
+ count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.create_scale_cluster) == true) && var.storage_type == "persistent" ? 1 : 0
+ provisioner "local-exec" {
+ interpreter = ["/bin/bash", "-c"]
+ command = "sudo ansible-playbook -f 50 -i ${local.scale_all_inventory} -l 'storage' -e @${local.scale_baremetal_prerequesite_vars} ${local.scale_baremetal_prerequesite_playbook_path}"
+ }
+ depends_on = [null_resource.scale_baremetal_ssh_check_play, null_resource.scale_host_play]
+ triggers = {
+ build = timestamp()
+ }
+}
+
resource "null_resource" "prepare_ansible_inventory_using_jumphost_connection" {
count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.using_jumphost_connection) == true && tobool(var.scale_encryption_enabled) == false) && var.bastion_instance_public_ip != null && var.bastion_ssh_private_key != null ? 1 : 0
provisioner "local-exec" {
@@ -111,9 +165,9 @@ resource "null_resource" "perform_scale_deployment" {
count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.create_scale_cluster) == true) ? 1 : 0
provisioner "local-exec" {
interpreter = ["/bin/bash", "-c"]
- command = "ansible-playbook -f 32 -i ${local.storage_inventory_path} ${local.storage_playbook_path} --extra-vars \"scale_version=${var.scale_version}\" --extra-vars \"scale_install_directory_pkg_path=${var.spectrumscale_rpms_path}\""
+ command = "sudo ansible-playbook -f 32 -i ${local.storage_inventory_path} ${local.storage_playbook_path} --extra-vars \"scale_version=${var.scale_version}\" --extra-vars \"scale_install_directory_pkg_path=${var.spectrumscale_rpms_path}\""
}
- depends_on = [time_sleep.wait_60_seconds, null_resource.wait_for_ssh_availability, null_resource.prepare_ansible_inventory, null_resource.prepare_ansible_inventory_using_jumphost_connection, null_resource.prepare_ansible_inventory, null_resource.prepare_ansible_inventory_using_jumphost_connection]
+ depends_on = [null_resource.scale_host_play, null_resource.scale_baremetal_prerequesite_play, null_resource.scale_baremetal_bootdrive_play, time_sleep.wait_60_seconds, null_resource.wait_for_ssh_availability, null_resource.prepare_ansible_inventory, null_resource.prepare_ansible_inventory_using_jumphost_connection, null_resource.prepare_ansible_inventory, null_resource.prepare_ansible_inventory_using_jumphost_connection]
triggers = {
build = timestamp()
}
diff --git a/modules/common/storage_configuration/variables.tf b/modules/common/storage_configuration/variables.tf
index 953761e2..a91e4639 100644
--- a/modules/common/storage_configuration/variables.tf
+++ b/modules/common/storage_configuration/variables.tf
@@ -24,6 +24,11 @@ variable "inventory_path" {
description = "Scale JSON inventory path"
}
+variable "scale_config_path" {
+ type = string
+ description = "Path to clone github.com/IBM/ibm-spectrum-scale-install-infra."
+}
+
variable "inventory_format" {
type = string
description = "Scale inventory format"
@@ -276,3 +281,15 @@ variable "afm_bandwidth" {
type = string
description = "AFM node bandwidth"
}
+
+variable "storage_type" {
+ type = string
+ default = "scratch"
+ description = "Select the required storage type(scratch/persistent/eval)."
+}
+
+variable "bms_boot_drive_encryption" {
+ type = bool
+ default = false
+ description = "To enable the encryption for the boot drive of bare metal server. Select true or false"
+}
diff --git a/modules/cos/cos.tf b/modules/cos/cos.tf
deleted file mode 100644
index 8268f57c..00000000
--- a/modules/cos/cos.tf
+++ /dev/null
@@ -1,553 +0,0 @@
-#############################################################################################################
-# 1. It creates new COS instance, Bucket and Hmac Key
-#############################################################################################################
-
-locals {
- path_elements = split("/", var.filesystem)
- filesystem = element(local.path_elements, length(local.path_elements) - 1)
-
- new_cos_instance = distinct([for instance in var.new_instance_bucket_hmac : instance.cos_instance])
- # New bucket single Site
- new_bucket_single_site_region = [for region in var.new_instance_bucket_hmac : region.bucket_region if region.bucket_type == "single_site_location"]
- storage_class_single_site = [for class in var.new_instance_bucket_hmac : class.bucket_storage_class if class.bucket_type == "single_site_location"]
- mode_single_site = [for mode in var.new_instance_bucket_hmac : mode.mode if mode.bucket_type == "single_site_location"]
- afm_fileset_single_site = [for fileset in var.new_instance_bucket_hmac : fileset.afm_fileset if fileset.bucket_type == "single_site_location"]
- # New bucket regional
- new_bucket_regional_region = [for region in var.new_instance_bucket_hmac : region.bucket_region if region.bucket_type == "region_location" || region.bucket_type == ""]
- storage_class_regional = [for class in var.new_instance_bucket_hmac : class.bucket_storage_class if class.bucket_type == "region_location" || class.bucket_type == ""]
- mode_regional = [for mode in var.new_instance_bucket_hmac : mode.mode if mode.bucket_type == "region_location" || mode.bucket_type == ""]
- afm_fileset_regional = [for fileset in var.new_instance_bucket_hmac : fileset.afm_fileset if fileset.bucket_type == "region_location" || fileset.bucket_type == ""]
- # New bucket cross region
- new_bucket_cross_region = [for region in var.new_instance_bucket_hmac : region.bucket_region if region.bucket_type == "cross_region_location"]
- storage_class_cross_regional = [for class in var.new_instance_bucket_hmac : class.bucket_storage_class if class.bucket_type == "cross_region_location"]
- mode_cross_regional = [for mode in var.new_instance_bucket_hmac : mode.mode if mode.bucket_type == "cross_region_location"]
- fileset_cross_regional = [for fileset in var.new_instance_bucket_hmac : fileset.afm_fileset if fileset.bucket_type == "cross_region_location"]
-}
-
-resource "ibm_resource_instance" "cos_instance" {
- for_each = {
- for idx, count_number in range(1, length(local.new_cos_instance) + 1) : idx => {
- sequence_string = tostring(count_number)
- }
- }
- name = format("%s-%03s", "${var.prefix}instance", each.value.sequence_string)
- resource_group_id = var.resource_group_id
- plan = var.cos_instance_plan
- location = var.cos_instance_location
- service = var.cos_instance_service
-}
-
-resource "ibm_cos_bucket" "cos_bucket_single_site" {
- for_each = {
- for idx, count_number in range(1, length(local.new_bucket_single_site_region) + 1) : idx => {
- sequence_string = tostring(count_number)
- cos_instance = element(flatten([for instance_id in ibm_resource_instance.cos_instance : instance_id[*].id]), idx)
- region_location = element(local.new_bucket_single_site_region, idx)
- storage_class = element(local.storage_class_single_site, idx)
- }
- }
- bucket_name = format("%s-%03s", "${var.prefix}bucket-new", each.value.sequence_string)
- resource_instance_id = each.value.cos_instance
- single_site_location = each.value.region_location
- storage_class = each.value.storage_class == "" ? "smart" : each.value.storage_class
- depends_on = [ibm_resource_instance.cos_instance]
-}
-
-resource "ibm_cos_bucket" "cos_bucket_regional" {
- for_each = {
- for idx, count_number in range(1, length(local.new_bucket_regional_region) + 1) : idx => {
- sequence_string = tostring(count_number)
- cos_instance = element(flatten([for instance_id in ibm_resource_instance.cos_instance : instance_id[*].id]), idx)
- region_location = element(local.new_bucket_regional_region, idx)
- storage_class = element(local.storage_class_regional, idx)
- }
- }
- bucket_name = format("%s-%03s", "${var.prefix}bucket-new", (each.value.sequence_string + length(local.new_bucket_single_site_region)))
- resource_instance_id = each.value.cos_instance
- region_location = each.value.region_location
- storage_class = each.value.storage_class == "" ? "smart" : each.value.storage_class
- depends_on = [ibm_resource_instance.cos_instance]
-}
-
-resource "ibm_cos_bucket" "cos_bucket_cross_region" {
- for_each = {
- for idx, count_number in range(1, length(local.new_bucket_cross_region) + 1) : idx => {
- sequence_string = tostring(count_number)
- cos_instance = element(flatten([for instance_id in ibm_resource_instance.cos_instance : instance_id[*].id]), idx)
- region_location = element(local.new_bucket_cross_region, idx)
- storage_class = element(local.storage_class_cross_regional, idx)
- }
- }
- bucket_name = format("%s-%03s", "${var.prefix}bucket-new", (each.value.sequence_string + (length(local.new_bucket_single_site_region) + length(local.new_bucket_regional_region))))
- resource_instance_id = each.value.cos_instance
- cross_region_location = each.value.region_location
- storage_class = each.value.storage_class == "" ? "smart" : each.value.storage_class
- depends_on = [ibm_resource_instance.cos_instance]
-}
-
-resource "ibm_resource_key" "hmac_key" {
- for_each = {
- for idx, count_number in range(1, length(local.new_cos_instance) + 1) : idx => {
- sequence_string = tostring(count_number)
- cos_instance = element(flatten([for instance_id in ibm_resource_instance.cos_instance : instance_id[*].id]), idx)
- }
- }
- name = format("%s-%03s", "${var.prefix}hmac-key-new", each.value.sequence_string)
- resource_instance_id = each.value.cos_instance
- parameters = { "HMAC" = true }
- role = var.cos_hmac_role
-}
-
-locals {
- buckets = concat((flatten([for bucket in ibm_cos_bucket.cos_bucket_single_site : bucket[*].bucket_name])), (flatten([for bucket in ibm_cos_bucket.cos_bucket_regional : bucket[*].bucket_name])), (flatten([for bucket in ibm_cos_bucket.cos_bucket_cross_region : bucket[*].bucket_name])))
- endpoints = concat((flatten([for endpoint in ibm_cos_bucket.cos_bucket_single_site : endpoint[*].s3_endpoint_direct])), (flatten([for endpoint in ibm_cos_bucket.cos_bucket_regional : endpoint[*].s3_endpoint_direct])), (flatten([for endpoint in ibm_cos_bucket.cos_bucket_cross_region : endpoint[*].s3_endpoint_direct])))
- modes = concat(local.mode_single_site, local.mode_regional, local.mode_cross_regional)
- filesets = concat(local.afm_fileset_single_site, local.afm_fileset_regional, local.fileset_cross_regional)
-
-
- afm_cos_bucket_details_1 = [for idx, config in var.new_instance_bucket_hmac : {
- akey = ibm_resource_key.hmac_key[0].credentials["cos_hmac_keys.access_key_id"]
- bucket = (local.buckets)[idx]
- skey = ibm_resource_key.hmac_key[0].credentials["cos_hmac_keys.secret_access_key"]
- }]
-
- afm_config_details_1 = [for idx, config in var.new_instance_bucket_hmac : {
- bucket = (local.buckets)[idx]
- filesystem = local.filesystem
- fileset = (local.filesets)[idx]
- mode = (local.modes)[idx]
- endpoint = "https://${(local.endpoints)[idx]}"
- }]
-}
-
-#############################################################################################################
-# 2. It uses existing COS instance and creates new COS Bucket and Hmac Key in that instance.
-#############################################################################################################
-
-locals {
- exstng_instance_new_bkt_hmac = [for instance in var.exstng_instance_new_bucket_hmac : instance.cos_instance]
- # New bucket single Site
- exstng_instance_new_bkt_hmac_single_site = [for instance in var.exstng_instance_new_bucket_hmac : instance.cos_instance if instance.bucket_type == "single_site_location"]
- exstng_instance_single_site_region = [for region in var.exstng_instance_new_bucket_hmac : region.bucket_region if region.bucket_type == "single_site_location"]
- exstng_instance_storage_class_single_site = [for class in var.exstng_instance_new_bucket_hmac : class.bucket_storage_class if class.bucket_type == "single_site_location"]
- exstng_instance_mode_single_site = [for mode in var.exstng_instance_new_bucket_hmac : mode.mode if mode.bucket_type == "single_site_location"]
- exstng_instance_fileset_single_site = [for fileset in var.exstng_instance_new_bucket_hmac : fileset.afm_fileset if fileset.bucket_type == "single_site_location"]
- # New bucket regional
- exstng_instance_new_bkt_hmac_regional = [for instance in var.exstng_instance_new_bucket_hmac : instance.cos_instance if instance.bucket_type == "region_location" || instance.bucket_type == ""]
- exstng_instance_regional_region = [for region in var.exstng_instance_new_bucket_hmac : region.bucket_region if region.bucket_type == "region_location" || region.bucket_type == ""]
- exstng_instance_storage_class_regional = [for class in var.exstng_instance_new_bucket_hmac : class.bucket_storage_class if class.bucket_type == "region_location" || class.bucket_type == ""]
- exstng_instance_mode_regional = [for mode in var.exstng_instance_new_bucket_hmac : mode.mode if mode.bucket_type == "region_location" || mode.bucket_type == ""]
- exstng_instance_fileset_regional = [for fileset in var.exstng_instance_new_bucket_hmac : fileset.afm_fileset if fileset.bucket_type == "region_location" || fileset.bucket_type == ""]
- # New bucket cross region
- exstng_instance_new_bkt_hmac_cross_regional = [for instance in var.exstng_instance_new_bucket_hmac : instance.cos_instance if instance.bucket_type == "cross_region_location"]
- exstng_instance_cross_regional = [for region in var.exstng_instance_new_bucket_hmac : region.bucket_region if region.bucket_type == "cross_region_location"]
- exstng_instance_storage_class_cross_regional = [for class in var.exstng_instance_new_bucket_hmac : class.bucket_storage_class if class.bucket_type == "cross_region_location"]
- exstng_instance_mode_cross_regional = [for mode in var.exstng_instance_new_bucket_hmac : mode.mode if mode.bucket_type == "cross_region_location"]
- exstng_instance_fileset_cross_regional = [for fileset in var.exstng_instance_new_bucket_hmac : fileset.afm_fileset if fileset.bucket_type == "cross_region_location"]
-}
-
-data "ibm_resource_instance" "existing_cos_instance_single_site" {
- for_each = {
- for idx, value in local.exstng_instance_new_bkt_hmac_single_site : idx => {
- cos_instance = element(local.exstng_instance_new_bkt_hmac_single_site, idx)
- }
- }
- name = each.value.cos_instance
- service = var.cos_instance_service
-}
-
-resource "ibm_cos_bucket" "existing_instance_new_cos_bucket_single_site" {
- for_each = {
- for idx, count_number in range(1, length(local.exstng_instance_single_site_region) + 1) : idx => {
- sequence_string = tostring(count_number)
- cos_instance = element(flatten([for instance_id in data.ibm_resource_instance.existing_cos_instance_single_site : instance_id[*].id]), idx)
- region_location = element(local.exstng_instance_single_site_region, idx)
- storage_class = element(local.exstng_instance_storage_class_single_site, idx)
- }
- }
- bucket_name = format("%s-%03s", "${var.prefix}bucket", each.value.sequence_string)
- resource_instance_id = each.value.cos_instance
- single_site_location = each.value.region_location
- storage_class = each.value.storage_class == "" ? "smart" : each.value.storage_class
- depends_on = [data.ibm_resource_instance.existing_cos_instance_single_site]
-}
-
-data "ibm_resource_instance" "existing_cos_instance_bucket_regional" {
- for_each = {
- for idx, value in local.exstng_instance_new_bkt_hmac_regional : idx => {
- cos_instance = element(local.exstng_instance_new_bkt_hmac_regional, idx)
- }
- }
- name = each.value.cos_instance
- service = var.cos_instance_service
-}
-
-resource "ibm_cos_bucket" "existing_instance_new_cos_bucket_regional" {
- for_each = {
- for idx, count_number in range(1, length(local.exstng_instance_regional_region) + 1) : idx => {
- sequence_string = tostring(count_number)
- cos_instance = element(flatten([for instance_id in data.ibm_resource_instance.existing_cos_instance_bucket_regional : instance_id[*].id]), idx)
- region_location = element(local.exstng_instance_regional_region, idx)
- storage_class = element(local.exstng_instance_storage_class_regional, idx)
- }
- }
- bucket_name = format("%s-%03s", "${var.prefix}bucket", (each.value.sequence_string + length(local.exstng_instance_single_site_region)))
- resource_instance_id = each.value.cos_instance
- region_location = each.value.region_location
- storage_class = each.value.storage_class == "" ? "smart" : each.value.storage_class
- depends_on = [data.ibm_resource_instance.existing_cos_instance_bucket_regional]
-}
-
-data "ibm_resource_instance" "existing_cos_instancecross_regional" {
- for_each = {
- for idx, value in local.exstng_instance_new_bkt_hmac_cross_regional : idx => {
- cos_instance = element(local.exstng_instance_new_bkt_hmac_cross_regional, idx)
- }
- }
- name = each.value.cos_instance
- service = var.cos_instance_service
-}
-
-resource "ibm_cos_bucket" "existing_instance_new_cos_bucket_cross_regional" {
- for_each = {
- for idx, count_number in range(1, length(local.exstng_instance_cross_regional) + 1) : idx => {
- sequence_string = tostring(count_number)
- cos_instance = element(flatten([for instance_id in data.ibm_resource_instance.existing_cos_instancecross_regional : instance_id[*].id]), idx)
- region_location = element(local.exstng_instance_cross_regional, idx)
- storage_class = element(local.exstng_instance_storage_class_cross_regional, idx)
- }
- }
- bucket_name = format("%s-%03s", "${var.prefix}bucket", (each.value.sequence_string + (length(local.exstng_instance_single_site_region) + length(local.exstng_instance_regional_region))))
- resource_instance_id = each.value.cos_instance
- cross_region_location = each.value.region_location
- storage_class = each.value.storage_class == "" ? "smart" : each.value.storage_class
- depends_on = [data.ibm_resource_instance.existing_cos_instancecross_regional]
-}
-
-data "ibm_resource_instance" "existing_cos_instance" {
- for_each = {
- for idx, value in local.exstng_instance_new_bkt_hmac : idx => {
- cos_instance = element(local.exstng_instance_new_bkt_hmac, idx)
- }
- }
- name = each.value.cos_instance
- service = var.cos_instance_service
-}
-
-resource "ibm_resource_key" "existing_instance_new_hmac_keys" {
- for_each = {
- for idx, count_number in range(1, length(local.exstng_instance_new_bkt_hmac) + 1) : idx => {
- sequence_string = tostring(count_number)
- cos_instance = element(flatten([for instance_id in data.ibm_resource_instance.existing_cos_instance : instance_id[*].id]), idx)
- }
- }
- name = format("%s-%03s", "${var.prefix}hmac-key", each.value.sequence_string)
- resource_instance_id = each.value.cos_instance
- parameters = { "HMAC" = true }
- role = var.cos_hmac_role
- depends_on = [data.ibm_resource_instance.existing_cos_instance]
-}
-
-locals {
- exstng_instance_buckets = concat((flatten([for bucket in ibm_cos_bucket.existing_instance_new_cos_bucket_single_site : bucket[*].bucket_name])), (flatten([for bucket in ibm_cos_bucket.existing_instance_new_cos_bucket_regional : bucket[*].bucket_name])), (flatten([for bucket in ibm_cos_bucket.existing_instance_new_cos_bucket_cross_regional : bucket[*].bucket_name])))
- exstng_instance_endpoints = concat((flatten([for endpoint in ibm_cos_bucket.existing_instance_new_cos_bucket_single_site : endpoint[*].s3_endpoint_direct])), (flatten([for endpoint in ibm_cos_bucket.existing_instance_new_cos_bucket_regional : endpoint[*].s3_endpoint_direct])), (flatten([for endpoint in ibm_cos_bucket.existing_instance_new_cos_bucket_cross_regional : endpoint[*].s3_endpoint_direct])))
- exstng_instance_modes = concat(local.exstng_instance_mode_single_site, local.exstng_instance_mode_regional, local.exstng_instance_mode_cross_regional)
- exstng_instance_filesets = concat(local.exstng_instance_fileset_single_site, local.exstng_instance_fileset_regional, local.exstng_instance_fileset_cross_regional)
-
- afm_cos_bucket_details_2 = [for idx, config in var.exstng_instance_new_bucket_hmac : {
- akey = (flatten([for access_key in ibm_resource_key.existing_instance_new_hmac_keys : access_key[*].credentials["cos_hmac_keys.access_key_id"]]))[idx]
- bucket = (local.exstng_instance_buckets)[idx]
- skey = (flatten([for secret_access_key in ibm_resource_key.existing_instance_new_hmac_keys : secret_access_key[*].credentials["cos_hmac_keys.secret_access_key"]]))[idx]
- }]
-
- afm_config_details_2 = [for idx, config in var.exstng_instance_new_bucket_hmac : {
- bucket = (local.exstng_instance_buckets)[idx]
- filesystem = local.filesystem
- fileset = (local.exstng_instance_filesets)[idx]
- mode = (local.exstng_instance_modes)[idx]
- endpoint = "https://${(local.exstng_instance_endpoints)[idx]}"
- }]
-}
-
-#############################################################################################################
-# 3. It uses existing COS instance and existing Bucket and creates new Hmac Key in that instance.
-#############################################################################################################
-
-locals {
- exstng_instance_bkt_new_hmac = [for instance in var.exstng_instance_bucket_new_hmac : instance.cos_instance]
- exstng_instance_exstng_bucket = [for bucket in var.exstng_instance_bucket_new_hmac : bucket.bucket_name]
- region_exstng_instance_bucket_new_hmac = [for region in var.exstng_instance_bucket_new_hmac : region.bucket_region]
- exstng_instance_exstng_bucket_type = [for type in var.exstng_instance_bucket_new_hmac : type.bucket_type]
-}
-
-data "ibm_resource_instance" "existing_cos_instance_bucket_new_hmac" {
- for_each = {
- for idx, value in var.exstng_instance_bucket_new_hmac : idx => {
- cos_instance = element(local.exstng_instance_bkt_new_hmac, idx)
- }
- }
- name = each.value.cos_instance
- service = var.cos_instance_service
-}
-
-data "ibm_cos_bucket" "existing_cos_instance_bucket" {
- for_each = {
- for idx, value in var.exstng_instance_bucket_new_hmac : idx => {
- bucket_name = element(local.exstng_instance_exstng_bucket, idx)
- resource_instance_id = element(flatten([for instance in data.ibm_resource_instance.existing_cos_instance_bucket_new_hmac : instance[*].id]), idx)
- bucket_region = element(local.region_exstng_instance_bucket_new_hmac, idx)
- bucket_type = element(local.exstng_instance_exstng_bucket_type, idx)
- }
- }
- bucket_name = each.value.bucket_name
- resource_instance_id = each.value.resource_instance_id
- bucket_region = each.value.bucket_region
- bucket_type = each.value.bucket_type
- depends_on = [data.ibm_resource_instance.existing_cos_instance_bucket_new_hmac]
-}
-
-resource "ibm_resource_key" "existing_instance_bkt_new_hmac_keys" {
- for_each = {
- for idx, count_number in range(1, length(var.exstng_instance_bucket_new_hmac) + 1) : idx => {
- sequence_string = tostring(count_number)
- cos_instance = element(flatten([for instance_id in data.ibm_resource_instance.existing_cos_instance_bucket_new_hmac : instance_id[*].id]), idx)
- }
- }
- name = format("%s-%03s", "${var.prefix}new-hmac-key", each.value.sequence_string)
- resource_instance_id = each.value.cos_instance
- parameters = { "HMAC" = true }
- role = var.cos_hmac_role
- depends_on = [data.ibm_resource_instance.existing_cos_instance_bucket_new_hmac]
-}
-
-locals {
- afm_cos_bucket_details_3 = [for idx, config in var.exstng_instance_bucket_new_hmac : {
- akey = (flatten([for access_key in ibm_resource_key.existing_instance_bkt_new_hmac_keys : access_key[*].credentials["cos_hmac_keys.access_key_id"]]))[idx]
- bucket = (flatten([for bucket in data.ibm_cos_bucket.existing_cos_instance_bucket : bucket[*].bucket_name]))[idx]
- skey = (flatten([for secret_access_key in ibm_resource_key.existing_instance_bkt_new_hmac_keys : secret_access_key[*].credentials["cos_hmac_keys.secret_access_key"]]))[idx]
- }]
-
- afm_config_details_3 = [for idx, config in var.exstng_instance_bucket_new_hmac : {
- bucket = (flatten([for bucket in data.ibm_cos_bucket.existing_cos_instance_bucket : bucket[*].bucket_name]))[idx]
- filesystem = local.filesystem
- fileset = ([for fileset in var.exstng_instance_bucket_new_hmac : fileset.afm_fileset])[idx]
- mode = ([for mode in var.exstng_instance_bucket_new_hmac : mode.mode])[idx]
- endpoint = "https://${(flatten([for endpoint in data.ibm_cos_bucket.existing_cos_instance_bucket : endpoint[*].s3_endpoint_direct]))[idx]}"
- }]
-}
-
-#############################################################################################################
-# 4. It uses existing COS instance and existing Hmac Key and creates new Bucket in that instance.
-#############################################################################################################
-
-locals {
- exstng_instance_hmac_new_bkt = [for instance in var.exstng_instance_hmac_new_bucket : instance.cos_instance]
- exstng_instance_exstng_hmac = [for hmac in var.exstng_instance_hmac_new_bucket : hmac.cos_service_cred_key]
-
- # New bucket single Site
- exstng_instance_hmac_new_bkt_single_site = [for instance in var.exstng_instance_hmac_new_bucket : instance.cos_instance if instance.bucket_type == "single_site_location"]
- exstng_instance_hmac_single_site_region = [for region in var.exstng_instance_hmac_new_bucket : region.bucket_region if region.bucket_type == "single_site_location"]
- exstng_instance_hmac_storage_class_single_site = [for class in var.exstng_instance_hmac_new_bucket : class.bucket_storage_class if class.bucket_type == "single_site_location"]
- exstng_instance_hmac_mode_single_site = [for mode in var.exstng_instance_hmac_new_bucket : mode.mode if mode.bucket_type == "single_site_location"]
- exstng_instance_hmac_fileset_single_site = [for fileset in var.exstng_instance_hmac_new_bucket : fileset.afm_fileset if fileset.bucket_type == "single_site_location"]
- # New bucket regional
- exstng_instance_hmac_new_bkt_regional = [for instance in var.exstng_instance_hmac_new_bucket : instance.cos_instance if instance.bucket_type == "region_location" || instance.bucket_type == ""]
- exstng_instance_hmac_regional_region = [for region in var.exstng_instance_hmac_new_bucket : region.bucket_region if region.bucket_type == "region_location" || region.bucket_type == ""]
- exstng_instance_hmac_storage_class_regional = [for class in var.exstng_instance_hmac_new_bucket : class.bucket_storage_class if class.bucket_type == "region_location" || class.bucket_type == ""]
- exstng_instance_hmac_mode_regional = [for mode in var.exstng_instance_hmac_new_bucket : mode.mode if mode.bucket_type == "region_location" || mode.bucket_type == ""]
- exstng_instance_hmac_fileset_regional = [for fileset in var.exstng_instance_hmac_new_bucket : fileset.afm_fileset if fileset.bucket_type == "region_location" || fileset.bucket_type == ""]
- # New bucket cross region
- exstng_instance_hmac_new_bkt_cross_region = [for instance in var.exstng_instance_hmac_new_bucket : instance.cos_instance if instance.bucket_type == "cross_region_location"]
- exstng_instance_hmac_cross_region = [for region in var.exstng_instance_hmac_new_bucket : region.bucket_region if region.bucket_type == "cross_region_location"]
- exstng_instance_hmac_storage_class_cross_regional = [for class in var.exstng_instance_hmac_new_bucket : class.bucket_storage_class if class.bucket_type == "cross_region_location"]
- exstng_instance_hmac_mode_cross_regional = [for mode in var.exstng_instance_hmac_new_bucket : mode.mode if mode.bucket_type == "cross_region_location"]
- exstng_instance_hmac_fileset_cross_regional = [for fileset in var.exstng_instance_hmac_new_bucket : fileset.afm_fileset if fileset.bucket_type == "cross_region_location"]
-}
-
-data "ibm_resource_instance" "exstng_cos_instance_hmac_new_bucket_single_site" {
- for_each = length(local.exstng_instance_hmac_new_bkt_single_site) == 0 ? {} : {
- for idx, value in local.exstng_instance_hmac_new_bkt_single_site : idx => {
- cos_instance = element(local.exstng_instance_hmac_new_bkt_single_site, idx)
- }
- }
- name = each.value.cos_instance
- service = var.cos_instance_service
-}
-
-resource "ibm_cos_bucket" "existing_cos_instance_hmac_new_cos_bucket_single_site" {
- for_each = {
- for idx, count_number in range(1, length(local.exstng_instance_hmac_single_site_region) + 1) : idx => {
- sequence_string = tostring(count_number)
- cos_instance = element(flatten([for instance_id in data.ibm_resource_instance.exstng_cos_instance_hmac_new_bucket_single_site : instance_id[*].id]), idx)
- region_location = element(local.exstng_instance_hmac_single_site_region, idx)
- storage_class = element(local.exstng_instance_hmac_storage_class_single_site, idx)
- }
- }
- bucket_name = format("%s-%03s", "${var.prefix}new-bucket", each.value.sequence_string)
- resource_instance_id = each.value.cos_instance
- single_site_location = each.value.region_location
- storage_class = each.value.storage_class == "" ? "smart" : each.value.storage_class
- depends_on = [data.ibm_resource_instance.exstng_cos_instance_hmac_new_bucket_single_site]
-}
-
-data "ibm_resource_instance" "exstng_cos_instance_hmac_new_bucket_regional" {
- for_each = length(local.exstng_instance_hmac_new_bkt_regional) == 0 ? {} : {
- for idx, value in local.exstng_instance_hmac_new_bkt_regional : idx => {
- cos_instance = element(local.exstng_instance_hmac_new_bkt_regional, idx)
- }
- }
- name = each.value.cos_instance
- service = var.cos_instance_service
-}
-
-resource "ibm_cos_bucket" "existing_cos_instance_hmac_new_cos_bucket_regional" {
- for_each = {
- for idx, count_number in range(1, length(local.exstng_instance_hmac_regional_region) + 1) : idx => {
- sequence_string = tostring(count_number)
- cos_instance = element(flatten([for instance_id in data.ibm_resource_instance.exstng_cos_instance_hmac_new_bucket_regional : instance_id[*].id]), idx)
- region_location = element(local.exstng_instance_hmac_regional_region, idx)
- storage_class = element(local.exstng_instance_hmac_storage_class_regional, idx)
- }
- }
- bucket_name = format("%s-%03s", "${var.prefix}new-bucket", (each.value.sequence_string + length(local.exstng_instance_hmac_single_site_region)))
- resource_instance_id = each.value.cos_instance
- region_location = each.value.region_location
- storage_class = each.value.storage_class == "" ? "smart" : each.value.storage_class
- depends_on = [data.ibm_resource_instance.exstng_cos_instance_hmac_new_bucket_regional]
-}
-
-data "ibm_resource_instance" "exstng_cos_instance_hmac_new_bucket_cross_region" {
- for_each = length(local.exstng_instance_hmac_new_bkt_cross_region) == 0 ? {} : {
- for idx, value in local.exstng_instance_hmac_new_bkt_cross_region : idx => {
- cos_instance = element(local.exstng_instance_hmac_new_bkt_cross_region, idx)
- }
- }
- name = each.value.cos_instance
- service = var.cos_instance_service
-}
-
-resource "ibm_cos_bucket" "existing_cos_instance_hmac_new_cos_bucket_cross_region" {
- for_each = {
- for idx, count_number in range(1, length(local.exstng_instance_hmac_cross_region) + 1) : idx => {
- sequence_string = tostring(count_number)
- cos_instance = element(flatten([for instance_id in data.ibm_resource_instance.exstng_cos_instance_hmac_new_bucket_cross_region : instance_id[*].id]), idx)
- region_location = element(local.exstng_instance_hmac_cross_region, idx)
- storage_class = element(local.exstng_instance_hmac_storage_class_cross_regional, idx)
- }
- }
- bucket_name = format("%s-%03s", "${var.prefix}new-bucket", (each.value.sequence_string + (length(local.exstng_instance_hmac_single_site_region) + length(local.exstng_instance_hmac_regional_region))))
- resource_instance_id = each.value.cos_instance
- cross_region_location = each.value.region_location
- storage_class = each.value.storage_class == "" ? "smart" : each.value.storage_class
- depends_on = [data.ibm_resource_instance.exstng_cos_instance_hmac_new_bucket_cross_region]
-}
-
-data "ibm_resource_instance" "exstng_cos_instance_hmac_new_bucket" {
- for_each = {
- for idx, value in local.exstng_instance_hmac_new_bkt : idx => {
- cos_instance = element(local.exstng_instance_hmac_new_bkt, idx)
- }
- }
- name = each.value.cos_instance
- service = var.cos_instance_service
-}
-
-data "ibm_resource_key" "existing_hmac_key" {
- for_each = {
- for idx, value in local.exstng_instance_exstng_hmac : idx => {
- hmac_key = element(local.exstng_instance_exstng_hmac, idx)
- resource_instance_id = element(flatten([for instance in data.ibm_resource_instance.exstng_cos_instance_hmac_new_bucket : instance[*].id]), idx)
- }
- }
- name = each.value.hmac_key
- resource_instance_id = each.value.resource_instance_id
- depends_on = [data.ibm_resource_instance.exstng_cos_instance_hmac_new_bucket]
-}
-
-locals {
- exstng_instance_hmac_buckets = concat((flatten([for bucket in ibm_cos_bucket.existing_cos_instance_hmac_new_cos_bucket_single_site : bucket[*].bucket_name])), (flatten([for bucket in ibm_cos_bucket.existing_cos_instance_hmac_new_cos_bucket_regional : bucket[*].bucket_name])), (flatten([for bucket in ibm_cos_bucket.existing_cos_instance_hmac_new_cos_bucket_cross_region : bucket[*].bucket_name])))
- exstng_instance_hmac_endpoints = concat((flatten([for endpoint in ibm_cos_bucket.existing_cos_instance_hmac_new_cos_bucket_single_site : endpoint[*].s3_endpoint_direct])), (flatten([for endpoint in ibm_cos_bucket.existing_cos_instance_hmac_new_cos_bucket_regional : endpoint[*].s3_endpoint_direct])), (flatten([for endpoint in ibm_cos_bucket.existing_cos_instance_hmac_new_cos_bucket_cross_region : endpoint[*].s3_endpoint_direct])))
- exstng_instance_hmac_modes = concat(local.exstng_instance_hmac_mode_single_site, local.exstng_instance_hmac_mode_regional, local.exstng_instance_hmac_mode_cross_regional)
- exstng_instance_hmac_filesets = concat(local.exstng_instance_hmac_fileset_single_site, local.exstng_instance_hmac_fileset_regional, local.exstng_instance_hmac_fileset_cross_regional)
-
- afm_cos_bucket_details_4 = [for idx, config in var.exstng_instance_hmac_new_bucket : {
- akey = (flatten([for access_key in data.ibm_resource_key.existing_hmac_key : access_key[*].credentials["cos_hmac_keys.access_key_id"]]))[idx]
- bucket = (local.exstng_instance_hmac_buckets)[idx]
- skey = (flatten([for secret_access_key in data.ibm_resource_key.existing_hmac_key : secret_access_key[*].credentials["cos_hmac_keys.secret_access_key"]]))[idx]
- }]
-
- afm_config_details_4 = [for idx, config in var.exstng_instance_hmac_new_bucket : {
- bucket = (local.exstng_instance_hmac_buckets)[idx]
- filesystem = local.filesystem
- fileset = (local.exstng_instance_hmac_filesets)[idx]
- mode = (local.exstng_instance_hmac_modes)[idx]
- endpoint = "https://${(local.exstng_instance_hmac_endpoints)[idx]}"
- }]
-}
-
-#############################################################################################################
-# 5. It uses existing COS instance, Bucket and Hmac Key
-#############################################################################################################
-
-locals {
- exstng_instance_bkt_hmac = [for instance in var.exstng_instance_bucket_hmac : instance.cos_instance]
- exstng_instance_exstng_bkt = [for bucket in var.exstng_instance_bucket_hmac : bucket.bucket_name]
- exstng_instance_hmac_bkt = [for hmac in var.exstng_instance_bucket_hmac : hmac.cos_service_cred_key]
- region_exstng_instance_bucket_hmac = [for region in var.exstng_instance_bucket_hmac : region.bucket_region]
- exstng_instance_bkt_type = [for type in var.exstng_instance_bucket_hmac : type.bucket_type]
-}
-
-
-data "ibm_resource_instance" "exstng_cos_instance_bucket_hmac" {
- for_each = {
- for idx, value in var.exstng_instance_bucket_hmac : idx => {
- cos_instance = element(local.exstng_instance_bkt_hmac, idx)
- }
- }
- name = each.value.cos_instance
- service = var.cos_instance_service
-}
-
-data "ibm_cos_bucket" "exstng_cos_instance_bucket" {
- for_each = {
- for idx, value in var.exstng_instance_bucket_hmac : idx => {
- bucket_name = element(local.exstng_instance_exstng_bkt, idx)
- resource_instance_id = element(flatten([for instance in data.ibm_resource_instance.exstng_cos_instance_bucket_hmac : instance[*].id]), idx)
- bucket_region = element(local.region_exstng_instance_bucket_hmac, idx)
- bucket_type = element(local.exstng_instance_bkt_type, idx)
- }
- }
- bucket_name = each.value.bucket_name
- resource_instance_id = each.value.resource_instance_id
- bucket_region = each.value.bucket_region
- bucket_type = each.value.bucket_type
- depends_on = [data.ibm_resource_instance.exstng_cos_instance_bucket_hmac]
-}
-
-data "ibm_resource_key" "exstng_cos_instance_hmac" {
- for_each = {
- for idx, value in var.exstng_instance_bucket_hmac : idx => {
- hmac_key = element(local.exstng_instance_hmac_bkt, idx)
- resource_instance_id = element(flatten([for instance in data.ibm_resource_instance.exstng_cos_instance_bucket_hmac : instance[*].id]), idx)
- }
- }
- name = each.value.hmac_key
- resource_instance_id = each.value.resource_instance_id
- depends_on = [data.ibm_resource_instance.exstng_cos_instance_bucket_hmac]
-}
-
-locals {
- afm_cos_bucket_details_5 = [for idx, config in var.exstng_instance_bucket_hmac : {
- akey = (flatten([for access_key in data.ibm_resource_key.exstng_cos_instance_hmac : access_key[*].credentials["cos_hmac_keys.access_key_id"]]))[idx]
- bucket = (flatten([for bucket in data.ibm_cos_bucket.exstng_cos_instance_bucket : bucket[*].bucket_name]))[idx]
- skey = (flatten([for secret_access_key in data.ibm_resource_key.exstng_cos_instance_hmac : secret_access_key[*].credentials["cos_hmac_keys.secret_access_key"]]))[idx]
- }]
-
- afm_config_details_5 = [for idx, config in var.exstng_instance_bucket_hmac : {
- bucket = (flatten([for bucket in data.ibm_cos_bucket.exstng_cos_instance_bucket : bucket[*].bucket_name]))[idx]
- filesystem = local.filesystem
- fileset = ([for fileset in var.exstng_instance_bucket_hmac : fileset.afm_fileset])[idx]
- mode = ([for mode in var.exstng_instance_bucket_hmac : mode.mode])[idx]
- endpoint = "https://${(flatten([for endpoint in data.ibm_cos_bucket.exstng_cos_instance_bucket : endpoint[*].s3_endpoint_direct]))[idx]}"
- }]
-}
diff --git a/modules/cos/outputs.tf b/modules/cos/outputs.tf
deleted file mode 100644
index 44aa1d99..00000000
--- a/modules/cos/outputs.tf
+++ /dev/null
@@ -1,9 +0,0 @@
-output "afm_cos_bucket_details" {
- value = concat(local.afm_cos_bucket_details_1, local.afm_cos_bucket_details_2, local.afm_cos_bucket_details_3, local.afm_cos_bucket_details_4, local.afm_cos_bucket_details_5)
- description = "AFM cos bucket details"
-}
-
-output "afm_config_details" {
- value = concat(local.afm_config_details_1, local.afm_config_details_2, local.afm_config_details_3, local.afm_config_details_4, local.afm_config_details_5)
- description = "AFM configuration details"
-}
diff --git a/modules/cos/variables.tf b/modules/cos/variables.tf
deleted file mode 100644
index 5008046d..00000000
--- a/modules/cos/variables.tf
+++ /dev/null
@@ -1,109 +0,0 @@
-variable "prefix" {
- type = string
- description = "A unique identifier for resources. Must begin with a letter and end with a letter or number. This prefix will be prepended to any resources provisioned by this template. Prefixes must be 16 or fewer characters."
-}
-
-variable "resource_group_id" {
- type = string
- description = "Resource group id."
-}
-
-variable "cos_instance_plan" {
- type = string
- description = "COS instance plan."
-}
-variable "cos_instance_location" {
- type = string
- description = "COS instance location."
-}
-
-variable "cos_instance_service" {
- type = string
- description = "COS instance service."
-}
-
-variable "cos_hmac_role" {
- type = string
- description = "HMAC key role."
-}
-
-variable "new_instance_bucket_hmac" {
- type = list(
- object({
- afm_fileset = string,
- mode = string,
- cos_instance = string,
- bucket_name = string,
- bucket_region = string,
- cos_service_cred_key = string,
- bucket_type = string,
- bucket_storage_class = string
- })
- )
- description = "It creates new COS instance, Bucket and Hmac Key"
-}
-variable "exstng_instance_new_bucket_hmac" {
- type = list(
- object({
- afm_fileset = string,
- mode = string,
- cos_instance = string,
- bucket_name = string,
- bucket_region = string,
- cos_service_cred_key = string,
- bucket_type = string,
- bucket_storage_class = string
- })
- )
- description = "It creates new COS instance, Bucket and Hmac Key"
-}
-variable "exstng_instance_bucket_new_hmac" {
- type = list(
- object({
- afm_fileset = string,
- mode = string,
- cos_instance = string,
- bucket_name = string,
- bucket_region = string,
- cos_service_cred_key = string,
- bucket_type = string,
- bucket_storage_class = string
- })
- )
- description = "It creates new COS instance, Bucket and Hmac Key"
-}
-variable "exstng_instance_hmac_new_bucket" {
- type = list(
- object({
- afm_fileset = string,
- mode = string,
- cos_instance = string,
- bucket_name = string,
- bucket_region = string,
- cos_service_cred_key = string,
- bucket_type = string,
- bucket_storage_class = string
- })
- )
- description = "It creates new COS instance, Bucket and Hmac Key"
-}
-variable "exstng_instance_bucket_hmac" {
- type = list(
- object({
- afm_fileset = string,
- mode = string,
- cos_instance = string,
- bucket_name = string,
- bucket_region = string,
- cos_service_cred_key = string,
- bucket_type = string,
- bucket_storage_class = string
- })
- )
- description = "It creates new COS instance, Bucket and Hmac Key"
-}
-
-variable "filesystem" {
- type = string
- description = "Storage filesystem name."
-}
diff --git a/modules/deployer/datasource.tf b/modules/deployer/datasource.tf
index fc391e8a..a993c051 100644
--- a/modules/deployer/datasource.tf
+++ b/modules/deployer/datasource.tf
@@ -26,3 +26,8 @@ data "ibm_is_instance" "bastion_instance_name" {
data "ibm_is_public_gateways" "public_gateways" {
count = var.ext_vpc_name != null ? 1 : 0
}
+
+data "ibm_is_security_group" "login_security_group" {
+ count = var.login_security_group_name != null ? 1 : 0
+ name = var.login_security_group_name
+}
diff --git a/modules/deployer/image_map.tf b/modules/deployer/image_map.tf
index 292e2b49..d4d29018 100644
--- a/modules/deployer/image_map.tf
+++ b/modules/deployer/image_map.tf
@@ -25,6 +25,18 @@ locals {
"br-sao" = "r042-93c1a769-c138-4765-91d2-5796965b6a98"
"ca-tor" = "r038-9448213f-22ce-4a6a-b6b0-22dd6ed9fbb3"
"ca-mon" = "r058-b3211406-9eec-4148-aafb-d6ab7c26a6eb"
+ },
+ "hpcc-scale-deployer-v1" = {
+ "eu-es" = "r050-9ae3a0b4-6353-4c8e-84eb-c3b1cd4255fa"
+ "eu-gb" = "r018-73480732-827e-440f-82aa-9cd2221b71ee"
+ "eu-de" = "r010-0a275d00-6d18-49b1-a961-3082e5376864"
+ "us-east" = "r014-7304cbc7-61f8-43cf-9098-ebdef3287b81"
+ "us-south" = "r006-be93bb57-4226-49c1-8089-d6ed95df071d"
+ "jp-tok" = "r022-e84d39a4-0726-467e-aa1c-d482665ecc6f"
+ "jp-osa" = "r034-6900c41a-b3f8-4c57-ae1a-fc54de86668f"
+ "au-syd" = "r026-0f3084e9-53eb-4a32-8b7f-2e222ce843cd"
+ "br-sao" = "r042-a97879cb-1e8a-4b9a-ba80-adcf7b8b37e7"
+ "ca-tor" = "r038-73dfb66d-b9c3-4fbf-a6fe-8cd7c81325c6"
}
}
}
diff --git a/modules/deployer/locals.tf b/modules/deployer/locals.tf
index 140240bc..312d185a 100644
--- a/modules/deployer/locals.tf
+++ b/modules/deployer/locals.tf
@@ -75,7 +75,8 @@ locals {
# resource_group_id = data.ibm_resource_group.existing_resource_group.id
# Subnets
- bastion_subnets = var.bastion_subnets
+ bastion_subnets = var.bastion_subnets
+ login_security_group_name_id = var.login_security_group_name != null ? data.ibm_is_security_group.login_security_group[*].id : []
}
locals {
@@ -88,3 +89,12 @@ locals {
public_gateways_list = var.ext_vpc_name != null ? data.ibm_is_public_gateways.public_gateways[0].public_gateways : []
zone_1_pgw_ids = var.ext_vpc_name != null ? [for gateway in local.public_gateways_list : gateway.id if gateway.vpc == var.vpc_id && gateway.zone == var.zones[0]] : []
}
+
+locals {
+ storage_secondary_security_group = [
+ for i, subnet in var.compute_subnets : {
+ security_group_id = one(module.bastion_sg[*].security_group_id)
+ interface_name = subnet.name
+ }
+ ]
+}
diff --git a/modules/deployer/main.tf b/modules/deployer/main.tf
index adf15701..18b8914e 100644
--- a/modules/deployer/main.tf
+++ b/modules/deployer/main.tf
@@ -1,13 +1,31 @@
resource "ibm_is_subnet_public_gateway_attachment" "zone_1_attachment" {
- count = (var.ext_vpc_name != null && var.ext_cluster_subnet_id == null) ? 1 : 0
- subnet = var.cluster_subnets[0].id
- public_gateway = length(local.zone_1_pgw_ids) > 0 ? local.zone_1_pgw_ids[0] : ""
+ count = (var.ext_vpc_name != null && var.ext_compute_subnet_id == null && length(var.compute_subnets) > 0 && length(local.zone_1_pgw_ids) > 0) ? 1 : 0
+ subnet = var.compute_subnets[0].id
+ public_gateway = local.zone_1_pgw_ids[0]
}
resource "ibm_is_subnet_public_gateway_attachment" "bastion_attachment" {
- count = (var.ext_vpc_name != null && var.ext_login_subnet_id == null) ? 1 : 0
+ count = (var.ext_vpc_name != null && var.ext_login_subnet_id == null && length(var.bastion_subnets) > 0 && length(local.zone_1_pgw_ids) > 0) ? 1 : 0
subnet = local.bastion_subnets[0].id
- public_gateway = length(local.zone_1_pgw_ids) > 0 ? local.zone_1_pgw_ids[0] : ""
+ public_gateway = local.zone_1_pgw_ids[0]
+}
+
+resource "ibm_is_subnet_public_gateway_attachment" "storage_attachment" {
+ count = (var.ext_vpc_name != null && var.ext_storage_subnet_id == null && length(var.storage_subnets) > 0 && length(local.zone_1_pgw_ids) > 0) ? 1 : 0
+ subnet = var.storage_subnets[0].id
+ public_gateway = local.zone_1_pgw_ids[0]
+}
+
+resource "ibm_is_subnet_public_gateway_attachment" "client_attachment" {
+ count = (var.ext_vpc_name != null && var.ext_client_subnet_id == null && length(var.client_subnets) > 0 && length(local.zone_1_pgw_ids) > 0) ? 1 : 0
+ subnet = var.client_subnets[0].id
+ public_gateway = local.zone_1_pgw_ids[0]
+}
+
+resource "ibm_is_subnet_public_gateway_attachment" "protocol_attachment" {
+ count = (var.ext_vpc_name != null && var.ext_protocol_subnet_id == null && length(var.protocol_subnets) > 0 && length(local.zone_1_pgw_ids) > 0) ? 1 : 0
+ subnet = var.protocol_subnets[0].id
+ public_gateway = local.zone_1_pgw_ids[0]
}
module "ssh_key" {
@@ -17,7 +35,7 @@ module "ssh_key" {
}
module "bastion_sg" {
- count = var.enable_deployer ? 1 : 0
+ count = var.enable_deployer && var.login_security_group_name == null ? 1 : 0
source = "terraform-ibm-modules/security-group/ibm"
version = "2.6.2"
add_ibm_cloud_internal_rules = true
@@ -30,7 +48,7 @@ module "bastion_sg" {
module "bastion_vsi" {
count = (var.enable_deployer && var.bastion_instance_name == null) ? 1 : 0
source = "terraform-ibm-modules/landing-zone-vsi/ibm"
- version = "5.0.0"
+ version = "5.4.16"
vsi_per_subnet = 1
create_security_group = false
security_group = null
@@ -39,22 +57,24 @@ module "bastion_vsi" {
prefix = local.bastion_node_name
resource_group_id = var.resource_group
enable_floating_ip = true
- security_group_ids = module.bastion_sg[*].security_group_id
+ security_group_ids = var.login_security_group_name == null ? module.bastion_sg[*].security_group_id : local.login_security_group_name_id
ssh_key_ids = local.bastion_ssh_keys
- subnets = local.bastion_subnets
+ subnets = var.scheduler == "Scale" && var.enable_sec_interface_compute ? var.storage_subnets : local.bastion_subnets
tags = local.tags
user_data = data.template_file.bastion_user_data.rendered
vpc_id = var.vpc_id
kms_encryption_enabled = var.kms_encryption_enabled
skip_iam_authorization_policy = true
boot_volume_encryption_key = var.boot_volume_encryption_key
- existing_kms_instance_guid = var.existing_kms_instance_guid
+ secondary_security_groups = var.scheduler == "Scale" && var.enable_sec_interface_compute ? local.storage_secondary_security_group : []
+ secondary_subnets = var.scheduler == "Scale" && var.enable_sec_interface_compute ? var.compute_subnets : []
+ manage_reserved_ips = var.scheduler == "Scale" && var.enable_sec_interface_compute ? true : false
}
module "deployer_vsi" {
count = local.enable_deployer ? 1 : 0
source = "terraform-ibm-modules/landing-zone-vsi/ibm"
- version = "5.0.0"
+ version = "5.4.6"
vsi_per_subnet = 1
create_security_group = false
security_group = null
@@ -63,14 +83,13 @@ module "deployer_vsi" {
prefix = local.deployer_node_name
resource_group_id = var.resource_group
enable_floating_ip = false
- security_group_ids = module.bastion_sg[*].security_group_id
+ security_group_ids = var.login_security_group_name == null ? module.bastion_sg[*].security_group_id : local.login_security_group_name_id
ssh_key_ids = local.bastion_ssh_keys
- subnets = local.bastion_subnets
+ subnets = var.scheduler == "Scale" && var.enable_sec_interface_compute ? var.storage_subnets : local.bastion_subnets
tags = local.tags
user_data = data.template_file.deployer_user_data.rendered
vpc_id = var.vpc_id
kms_encryption_enabled = var.kms_encryption_enabled
skip_iam_authorization_policy = var.skip_iam_authorization_policy
boot_volume_encryption_key = var.boot_volume_encryption_key
- existing_kms_instance_guid = var.existing_kms_instance_guid
}
diff --git a/modules/deployer/templates/deployer_user_data.tpl b/modules/deployer/templates/deployer_user_data.tpl
index ebaacb88..24bc6895 100644
--- a/modules/deployer/templates/deployer_user_data.tpl
+++ b/modules/deployer/templates/deployer_user_data.tpl
@@ -6,13 +6,36 @@
###################################################
#!/usr/bin/env bash
-if grep -E -q "CentOS|Red Hat" /etc/os-release
-then
+set -e
+
+# Detect OS and set user
+if grep -E -q "CentOS|Red Hat" /etc/os-release; then
USER=vpcuser
-elif grep -q "Ubuntu" /etc/os-release
-then
+ yum install -y nc curl unzip jq
+elif grep -q "Ubuntu" /etc/os-release; then
USER=ubuntu
+ apt-get update -y
+ apt-get install -y netcat curl unzip jq
fi
+
+# Install IBM Cloud CLI
+echo "Installing IBM Cloud CLI..."
+curl -fsSL https://clis.cloud.ibm.com/install/linux | sh
+
+# Add CLI to PATH for immediate use
+export PATH=$PATH:/usr/local/bin
+
+# Install infrastructure service plugin (is)
+echo "Installing IBM Cloud plugins..."
+ibmcloud plugin install infrastructure-service -f
+
+# Verify installation
+echo "Verifying installation..."
+ibmcloud --version
+ibmcloud plugin list | grep infrastructure-service || echo "plugin not found!"
+
+echo "IBM Cloud CLI and IS plugin installed successfully."
+
sed -i -e "s/^/no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo \'Please login as the user \\\\\"$USER\\\\\" rather than the user \\\\\"root\\\\\".\';echo;sleep 5; exit 142\" /" /root/.ssh/authorized_keys
echo "DOMAIN=${compute_dns_domain}" >> "/etc/sysconfig/network-scripts/ifcfg-${compute_interfaces}"
echo "MTU=9000" >> "/etc/sysconfig/network-scripts/ifcfg-${compute_interfaces}"
diff --git a/modules/deployer/variables.tf b/modules/deployer/variables.tf
index e1c9fb2e..21446863 100644
--- a/modules/deployer/variables.tf
+++ b/modules/deployer/variables.tf
@@ -43,7 +43,7 @@ variable "cluster_cidr" {
default = "10.241.0.0/18"
}
-variable "cluster_subnets" {
+variable "compute_subnets" {
type = list(object({
name = string
id = string
@@ -60,7 +60,25 @@ variable "ext_login_subnet_id" {
description = "Name of an existing subnets in which the bastion and cluster resources will be deployed. If no value is given, then new subnet(s) will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)"
}
-variable "ext_cluster_subnet_id" {
+variable "ext_compute_subnet_id" {
+ type = string
+ default = null
+ description = "Name of an existing subnets in which the bastion and cluster resources will be deployed. If no value is given, then new subnet(s) will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)"
+}
+
+variable "ext_client_subnet_id" {
+ type = string
+ default = null
+ description = "Name of an existing subnets in which the bastion and cluster resources will be deployed. If no value is given, then new subnet(s) will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)"
+}
+
+variable "ext_storage_subnet_id" {
+ type = string
+ default = null
+ description = "Name of an existing subnets in which the bastion and cluster resources will be deployed. If no value is given, then new subnet(s) will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)"
+}
+
+variable "ext_protocol_subnet_id" {
type = string
default = null
description = "Name of an existing subnets in which the bastion and cluster resources will be deployed. If no value is given, then new subnet(s) will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)"
@@ -72,7 +90,7 @@ variable "ext_cluster_subnet_id" {
variable "scheduler" {
type = string
default = null
- description = "Select one of the scheduler (LSF/Symphony/Slurm/null)"
+ description = "Select one of the scheduler (Scale/LSF/Symphony/Slurm/null)"
}
##############################################################################
@@ -147,12 +165,6 @@ variable "boot_volume_encryption_key" {
description = "CRN of boot volume encryption key"
}
-variable "existing_kms_instance_guid" {
- type = string
- default = null
- description = "GUID of boot volume encryption key"
-}
-
variable "skip_iam_authorization_policy" {
type = bool
default = true
@@ -194,3 +206,48 @@ variable "zones" {
description = "Region where VPC will be created. To find your VPC region, use `ibmcloud is regions` command to find available regions."
type = list(string)
}
+
+variable "storage_subnets" {
+ type = list(object({
+ name = string
+ id = string
+ zone = string
+ cidr = string
+ }))
+ default = []
+ description = "Subnets to launch the storage host."
+}
+
+variable "client_subnets" {
+ type = list(object({
+ name = string
+ id = string
+ zone = string
+ cidr = string
+ }))
+ default = []
+ description = "Subnets to launch the client host."
+}
+
+variable "protocol_subnets" {
+ type = list(object({
+ name = string
+ id = string
+ zone = string
+ cidr = string
+ }))
+ default = []
+ description = "Subnets to launch the protocol host."
+}
+
+variable "login_security_group_name" {
+ type = string
+ default = null
+ description = "Provide the security group name to provision the bastion node. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the bastion node to function properly."
+}
+
+variable "enable_sec_interface_compute" {
+ type = bool
+ default = false
+ description = "Specifies whether the secondary interface is enabled for the Scale compute cluster."
+}
diff --git a/modules/host_resolution_add/locals.tf b/modules/host_resolution_add/locals.tf
new file mode 100644
index 00000000..a9aad7c3
--- /dev/null
+++ b/modules/host_resolution_add/locals.tf
@@ -0,0 +1,40 @@
+locals {
+ deployer_hostentry_playbook_path = format("%s/%s/deployer_host_entry_play.yml", var.clone_path, "ibm-spectrum-scale-install-infra")
+ scale_hostentry_playbook_path = format("%s/%s/scale_host_entry_play.yml", var.clone_path, "ibm-spectrum-scale-install-infra")
+ scale_cluster_hosts = format("%s/%s/scale_cluster_hosts.yml", var.clone_path, "ibm-spectrum-scale-install-infra")
+ scale_baremetal_prerequesite_vars = format("%s/%s/scale_baremetal_vars.yml", var.clone_path, "ibm-spectrum-scale-install-infra")
+ scale_all_inventory = format("%s/%s/scale_all_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra")
+ remove_hostentry_playbooks_path = format("%s/%s/remove_host_entry_play.yml", var.clone_path, "ibm-spectrum-scale-install-infra")
+ scale_baremetal_ssh_check_playbook_path = format("%s/%s/scale_baremetal_ssh_check_playbook.yml", var.clone_path, "ibm-spectrum-scale-install-infra")
+ scale_baremetal_bootdrive_playbook_path = format("%s/%s/scale_baremetal_bootdrive.yml", var.clone_path, "ibm-spectrum-scale-install-infra")
+ scale_baremetal_prerequesite_playbook_path = format("%s/%s/scale_baremetal_prerequesite.yml", var.clone_path, "ibm-spectrum-scale-install-infra")
+ gpfs_restart_playbook_path = format("%s/%s/scale_gpfs_restart.yml", var.clone_path, "ibm-spectrum-scale-install-infra")
+ domain_name_file = format("%s/%s/domain_names.yml", var.clone_path, "ibm-spectrum-scale-install-infra")
+ storage_domain = try(var.domain_names.storage, null)
+ protocol_domain = try(var.domain_names.protocol, null)
+ client_private_key = format("%s/client_key/id_rsa", var.clone_path) #tfsec:ignore:GEN002
+ compute_private_key = format("%s/compute_key/id_rsa", var.clone_path) #tfsec:ignore:GEN002
+ storage_private_key = format("%s/storage_key/id_rsa", var.clone_path) #tfsec:ignore:GEN002
+ gklm_private_key = format("%s/gklm_key/id_rsa", var.clone_path) #tfsec:ignore:GEN002
+}
+
+locals {
+ normalize_hosts = {
+ # groups with string values → wrap into {name=...}
+ compute_hosts = { for k, v in try(var.compute_hosts, {}) : k => { name = v, id = null } }
+ compute_mgmnt_hosts = { for k, v in try(var.compute_mgmnt_hosts, {}) : k => { name = v, id = null } }
+ client_hosts = { for k, v in try(var.client_hosts, {}) : k => { name = v, id = null } }
+ gklm_hosts = { for k, v in try(var.gklm_hosts, {}) : k => { name = v, id = null } }
+ afm_hosts = { for k, v in try(var.afm_hosts, {}) : k => { name = v, id = null } }
+ protocol_hosts = { for k, v in try(var.protocol_hosts, {}) : k => { name = v, id = null } }
+ storage_hosts = { for k, v in try(var.storage_hosts, {}) : k => { name = v, id = null } }
+ storage_tb_hosts = { for k, v in try(var.storage_tb_hosts, {}) : k => { name = v, id = null } }
+ storage_mgmnt_hosts = { for k, v in try(var.storage_mgmnt_hosts, {}) : k => { name = v, id = null } }
+
+ # groups that already have {id,name}
+ storage_bms_hosts = try(var.storage_bms_hosts, {})
+ storage_tb_bms_hosts = try(var.storage_tb_bms_hosts, {})
+ afm_bms_hosts = try(var.afm_bms_hosts, {})
+ protocol_bms_hosts = try(var.protocol_bms_hosts, {})
+ }
+}
diff --git a/modules/host_resolution_add/main.tf b/modules/host_resolution_add/main.tf
new file mode 100644
index 00000000..0278d4d5
--- /dev/null
+++ b/modules/host_resolution_add/main.tf
@@ -0,0 +1,805 @@
+resource "local_file" "scale_cluster_hosts" {
+ filename = local.scale_cluster_hosts
+ content = yamlencode({
+ storage_hosts = var.storage_hosts
+ storage_mgmnt_hosts = var.storage_mgmnt_hosts
+ storage_tb_hosts = var.storage_tb_hosts
+ compute_hosts = var.compute_hosts
+ compute_mgmnt_hosts = var.compute_mgmnt_hosts
+ client_hosts = var.client_hosts
+ protocol_hosts = var.protocol_hosts
+ gklm_hosts = var.gklm_hosts
+ afm_hosts = var.afm_hosts
+ storage_bms_hosts = var.storage_bms_hosts
+ storage_tb_bms_hosts = var.storage_tb_bms_hosts
+ protocol_bms_hosts = var.protocol_bms_hosts
+ afm_bms_hosts = var.afm_bms_hosts
+ })
+}
+
+resource "local_file" "domain_file" {
+ filename = local.domain_name_file
+
+ content = yamlencode({
+ domain_names = {
+ compute = try(var.domain_names.compute, null)
+ storage = try(var.domain_names.storage, null)
+ protocol = try(var.domain_names.protocol, null)
+ client = try(var.domain_names.client, null)
+ gklm = try(var.domain_names.gklm, null)
+ }
+ })
+}
+
+resource "local_file" "deployer_host_entry_playbook" {
+ count = var.scheduler == "Scale" ? 1 : 0
+ content = < 0 ? [
+ "[storage]",
+ join("\n", flatten([
+ # Non-persistent storage hosts
+ [
+ for host in flatten([
+ values(local.normalize_hosts.storage_hosts),
+ values(local.normalize_hosts.storage_tb_hosts),
+ values(local.normalize_hosts.storage_mgmnt_hosts)
+ ]) : "${host.name} ansible_ssh_private_key_file=${local.storage_private_key} storage_type=scratch colocate_protocol_instances=${var.colocate_protocol_instances} scale_protocol_node=${var.enable_protocol}"
+ ],
+ # Persistent storage hosts
+ [
+ for host in flatten([
+ values(local.normalize_hosts.storage_bms_hosts),
+ values(local.normalize_hosts.storage_tb_bms_hosts)
+ ]) : "${host.name} id=${host.id} ansible_ssh_private_key_file=${local.storage_private_key} storage_type=persistent scale_protocol_node=${var.enable_protocol} colocate_protocol_instances=${var.colocate_protocol_instances} bms_boot_drive_encryption=${var.bms_boot_drive_encryption}"
+ ],
+ # AFM hosts
+ [
+ for host in values(local.normalize_hosts.afm_hosts) :
+ "${host.name} ansible_ssh_private_key_file=${local.storage_private_key} storage_type=scratch scale_protocol_node=false"
+ ],
+ # AFM BMS hosts
+ [
+ for host in values(local.normalize_hosts.afm_bms_hosts) :
+ "${host.name} id=${host.id} ansible_ssh_private_key_file=${local.storage_private_key} storage_type=persistent scale_protocol_node=false bms_boot_drive_encryption=${var.bms_boot_drive_encryption}"
+ ],
+ # Protocol hosts
+ [
+ for host in values(local.normalize_hosts.protocol_hosts) :
+ "${host.name} ansible_ssh_private_key_file=${local.storage_private_key} storage_type=scratch scale_protocol_node=true colocate_protocol_instances=false"
+ ],
+ # Protocol BMS hosts
+ [
+ for host in values(local.normalize_hosts.protocol_bms_hosts) :
+ "${host.name} id=${host.id} ansible_ssh_private_key_file=${local.storage_private_key} storage_type=persistent scale_protocol_node=true colocate_protocol_instances=false bms_boot_drive_encryption=${var.bms_boot_drive_encryption}"
+ ]
+ ])),
+ ""
+ ] : [],
+
+ # COMPUTE
+ length(flatten([
+ values(local.normalize_hosts.compute_hosts),
+ values(local.normalize_hosts.compute_mgmnt_hosts)
+ ])) > 0 ? [
+ "[compute]",
+ join("\n", [
+ for host in flatten([
+ values(local.normalize_hosts.compute_hosts),
+ values(local.normalize_hosts.compute_mgmnt_hosts)
+ ]) : "${host.name} ansible_ssh_private_key_file=${local.compute_private_key}"
+ ]),
+ ""
+ ] : [],
+
+ # CLIENT
+ length(values(local.normalize_hosts.client_hosts)) > 0 ? [
+ "[client]",
+ join("\n", [
+ for host in values(local.normalize_hosts.client_hosts) :
+ "${host.name} ansible_ssh_private_key_file=${local.client_private_key}"
+ ]),
+ ""
+ ] : [],
+
+ # GKLM
+ length(values(local.normalize_hosts.gklm_hosts)) > 0 ? [
+ "[gklm]",
+ join("\n", [
+ for host in values(local.normalize_hosts.gklm_hosts) :
+ "${host.name} ansible_ssh_private_key_file=${local.gklm_private_key}"
+ ]),
+ ""
+ ] : []
+
+ ])))
+}
+
+resource "local_file" "scale_host_entry_playbook" {
+ count = var.scheduler == "Scale" ? 1 : 0
+ content = <-
+ -o ConnectTimeout=20
+ -o StrictHostKeyChecking=accept-new
+ -o UserKnownHostsFile=/dev/null
+ -o ServerAliveInterval=15
+ -o ServerAliveCountMax=3
+
+ tasks:
+ # Verify required variables are set
+ - name: Validate required variables
+ block:
+ - name: Check for IBM Cloud API key
+ ansible.builtin.fail:
+ msg: "ibmcloud_api_key is not defined"
+ when: ibmcloud_api_key is not defined
+
+ - name: Check for resource group
+ ansible.builtin.fail:
+ msg: "resource_group is not defined"
+ when: resource_group is not defined
+
+ - name: Check for VPC region
+ ansible.builtin.fail:
+ msg: "vpc_region is not defined"
+ when: vpc_region is not defined
+
+ # Connection verification
+ - name: Attempt SSH connection
+ ansible.builtin.wait_for:
+ port: 22
+ host: "{{ inventory_hostname }}"
+ timeout: 20
+ delay: 5
+ connect_timeout: 20
+ register: ssh_check
+ until: ssh_check is success
+ retries: "{{ max_ssh_attempts }}"
+ delay: "{{ ssh_retry_delay }}"
+ ignore_errors: true
+ delegate_to: localhost
+ changed_when: false
+
+ - name: Check SSH port status
+ ansible.builtin.shell: |
+ nc -zv -w 5 "{{ inventory_hostname }}" 22 && echo "OPEN" || echo "CLOSED"
+ register: port_check
+ ignore_errors: true
+ changed_when: false
+ delegate_to: localhost
+ when: ssh_check is failed
+
+ - name: Debug connection status
+ ansible.builtin.debug:
+ msg: |
+ Server: {{ inventory_hostname }}
+ SSH Status: {{ ssh_check | default('undefined') }}
+ Port Status: {{ port_check.stdout | default('undefined') }}
+ Server ID: {{ id | default('undefined') }}
+ when: ssh_check is failed
+
+ # Server recovery for unresponsive systems
+ - name: Recover unresponsive server (via IBM Cloud CLI)
+ block:
+ - name: Login to IBM Cloud (local)
+ ansible.builtin.shell: |
+ /usr/local/bin/ibmcloud logout || true
+ /usr/local/bin/ibmcloud login --apikey "{{ ibmcloud_api_key }}" -q
+ /usr/local/bin/ibmcloud target -g "{{ resource_group }}" -r "{{ vpc_region }}"
+ args:
+ executable: /bin/bash
+ delegate_to: localhost
+ changed_when: false
+
+ - name: Get current server status (local)
+ ansible.builtin.shell: |
+ /usr/local/bin/ibmcloud is bm {{ id }} --output JSON | jq -r '.status'
+ args:
+ executable: /bin/bash
+ register: current_status
+ delegate_to: localhost
+ changed_when: false
+
+ - name: Stop server if not already stopped (local)
+ ansible.builtin.shell: |
+ status=$(/usr/local/bin/ibmcloud is bm {{ id }} --output JSON | jq -r '.status')
+ if [ "$status" != "stopped" ]; then
+ /usr/local/bin/ibmcloud is bm-stop {{ id }} --type hard --force --quiet
+ fi
+ args:
+ executable: /bin/bash
+ async: 300
+ poll: 0
+ delegate_to: localhost
+
+ - name: Wait for server to stop
+ ansible.builtin.shell: |
+ # Set timeout to 15 minutes (900 seconds)
+ end_time=$(( $(date +%s) + 900 ))
+ while [ $(date +%s) -lt $end_time ]; do
+ # Get status with full path and proper error handling
+ status=$(/usr/local/bin/ibmcloud is bm {{ id }} --output JSON 2>/dev/null | jq -r '.status' || echo "ERROR")
+
+ # Exit immediately if stopped
+ if [ "$status" == "stopped" ]; then
+ exit 0
+ fi
+
+ # Log current status
+ echo "Current status: $status"
+ sleep 30
+ done
+
+ # If we get here, timeout was reached
+ echo "Timeout waiting for server to stop"
+ exit 1
+ args:
+ executable: /bin/bash
+ register: stop_wait
+ delegate_to: localhost
+ changed_when: false
+ until: stop_wait.rc == 0
+ retries: 10
+ delay: 30
+
+ - name: Show stop wait debug info
+ ansible.builtin.debug:
+ var: stop_wait.stdout_lines
+ when: stop_wait is defined
+
+ - name: Start server (local)
+ ansible.builtin.shell: |
+ /usr/local/bin/ibmcloud is bm-start {{ id }} --quiet
+ args:
+ executable: /bin/bash
+ async: 300
+ poll: 0
+ delegate_to: localhost
+
+ - name: Wait for server to come online
+ ansible.builtin.wait_for:
+ port: 22
+ host: "{{ inventory_hostname }}"
+ timeout: 900
+ delay: 30
+ connect_timeout: 30
+ delegate_to: localhost
+
+ when:
+ - ssh_check is failed
+ - port_check.stdout is defined
+ - "'CLOSED' in port_check.stdout"
+
+ - name: Fail if still unresponsive
+ ansible.builtin.fail:
+ msg: |
+ Server {{ inventory_hostname }} remains unresponsive after recovery attempts
+ Last SSH Status: {{ ssh_check | default('undefined') }}
+ Last Port Status: {{ port_check.stdout | default('undefined') }}
+ Server Status: {{ current_status.stdout | default('undefined') }}
+ when:
+ - ssh_check is failed
+ - port_check.stdout is defined
+ - "'OPEN' in port_check.stdout"
+EOT
+ filename = local.scale_baremetal_ssh_check_playbook_path
+}
+
+resource "local_file" "bms_bootdrive_playbook" {
+ count = var.scheduler == "Scale" ? 1 : 0
+ content = <-
+ -o ConnectTimeout=20
+ -o StrictHostKeyChecking=accept-new
+ -o UserKnownHostsFile=/dev/null
+ -o ServerAliveInterval=15
+ -o ServerAliveCountMax=3
+
+ tasks:
+ # Main boot drive encryption tasks
+ - name: Handle boot drive encryption for persistent storage
+ when:
+ - bms_boot_drive_encryption | default(false)
+ - storage_type | default("") == "persistent"
+ - "'mgmt' not in inventory_hostname"
+ block:
+ # Post-recovery verification
+ - name: Verify encryption setup
+ block:
+ - name: Check for encrypted drives
+ ansible.builtin.command: lsblk -o NAME,FSTYPE,MOUNTPOINT
+ register: lsblk_output
+ changed_when: false
+
+ - name: Debug storage configuration
+ ansible.builtin.debug:
+ var: lsblk_output.stdout_lines
+
+ - name: Restart NetworkManager
+ ansible.builtin.service:
+ name: NetworkManager
+ state: restarted
+ async: 60
+ poll: 0
+
+ - name: Verify NetworkManager status
+ ansible.builtin.service:
+ name: NetworkManager
+ state: started
+ changed_when: false
+EOT
+ filename = local.scale_baremetal_bootdrive_playbook_path
+}
+
+resource "local_file" "scale_baremetal_prerequesite_playbook" {
+ count = var.scheduler == "Scale" && var.storage_type == "persistent" ? 1 : 0
+ content = </dev/null || true
+ echo "Installing consumer RPM..."
+ rpm -Uvh "http://$${capsule}/pub/katello-ca-consumer-latest.noarch.rpm" || true
+ subscription-manager config --server.hostname="$${capsule}" || true
+ subscription-manager config --rhsm.baseurl="https://$${capsule}/pulp/repos" || true
+ if [ -f /etc/rhsm/facts/katello.facts ]; then
+ mv /etc/rhsm/facts/katello.facts "/etc/rhsm/facts/katello.facts.bak.$(date +%s)"
+ fi
+ echo "{\"network.hostname-override\":\"$${profileName}\"}" > /etc/rhsm/facts/katello.facts
+ echo "Registering system..."
+ subscription-manager register --org="$${organization}" --activationkey="$${activationKey}" --force
+ dest: /tmp/register_rhel.sh
+ mode: '0755'
+ when:
+ - ansible_os_family == "RedHat"
+ - subscription_status.rc != 0 or "not registered" in subscription_status.stderr
+
+ - name: Execute subscription registration script
+ command: /bin/bash /tmp/register_rhel.sh
+ args:
+ warn: false
+ register: registration_result
+ failed_when: registration_result.rc != 0 and "This system is already registered" not in registration_result.stderr and "is already registered" not in registration_result.stderr
+ when:
+ - ansible_os_family == "RedHat"
+ - subscription_status.rc != 0 or "not registered" in subscription_status.stderr
+
+ - name: Clean up registration script
+ file:
+ path: /tmp/register_rhel.sh
+ state: absent
+ when: ansible_os_family == "RedHat"
+
+ # --- OS detection and package installation ---
+ - name: Gather OS facts
+ ansible.builtin.setup:
+ filter: "ansible_distribution*"
+
+ - name: Set RHEL vars
+ set_fact:
+ package_mgr: "dnf"
+ package_list: >-
+ {% if 'RedHat' in ansible_distribution %}
+ {% if '9' in ansible_distribution_version %}
+ python3 kernel-devel-{{ ansible_kernel }} kernel-headers-{{ ansible_kernel }} firewalld numactl make gcc-c++ elfutils-libelf-devel bind-utils iptables-nft nfs-utils elfutils elfutils-devel python3-dnf-plugin-versionlock
+ {% else %}
+ python38 kernel-devel-{{ ansible_kernel }} kernel-headers-{{ ansible_kernel }} firewalld numactl jq make gcc-c++ elfutils-libelf-devel bind-utils iptables nfs-utils elfutils elfutils-devel python3-dnf-plugin-versionlock
+ {% endif %}
+ {% else %}
+ ""
+ {% endif %}
+ when: ansible_os_family == "RedHat"
+
+ - name: Enable RHEL 9 supplementary repo
+ command: "subscription-manager repos --enable=rhel-9-for-x86_64-supplementary-eus-rpms"
+ ignore_errors: yes
+ when: ansible_distribution_major_version == "9" and ansible_os_family == "RedHat"
+
+ - name: Install required packages
+ yum:
+ name: "{{ package_list.split() }}"
+ state: present
+ register: package_install
+ until: package_install is succeeded
+ retries: 3
+ delay: 10
+ when: package_list != ""
+
+ - name: Security update
+ yum:
+ name: "*"
+ security: yes
+ state: latest
+ ignore_errors: yes
+ when: ansible_os_family == "RedHat"
+
+ - name: Version lock packages
+ command: "yum versionlock add {{ package_list }}"
+ ignore_errors: yes
+ when: ansible_os_family == "RedHat"
+
+ - name: Add GPFS bin path to root bashrc
+ lineinfile:
+ path: "/root/.bashrc"
+ line: "export PATH=$PATH:/usr/lpp/mmfs/bin"
+
+ # --- Firewall ---
+ - name: Stop firewalld
+ service:
+ name: "firewalld"
+ state: stopped
+
+ - name: Configure firewall ports and services (permanent)
+ firewalld:
+ port: "{{ item.port }}/{{ item.proto }}"
+ permanent: true
+ state: enabled
+ loop:
+ - { port: 1191, proto: tcp }
+ - { port: 4444, proto: tcp }
+ - { port: 4444, proto: udp }
+ - { port: 4739, proto: udp }
+ - { port: 4739, proto: tcp }
+ - { port: 9084, proto: tcp }
+ - { port: 9085, proto: tcp }
+ - { port: 2049, proto: tcp }
+ - { port: 2049, proto: udp }
+ - { port: 111, proto: tcp }
+ - { port: 111, proto: udp }
+ - { port: 30000-61000, proto: tcp }
+ - { port: 30000-61000, proto: udp }
+
+ - name: Enable HTTP/HTTPS services (permanent)
+ firewalld:
+ service: "{{ item }}"
+ permanent: true
+ state: enabled
+ loop:
+ - "http"
+ - "https"
+
+ - name: Start and enable firewalld
+ service:
+ name: "firewalld"
+ state: started
+ enabled: true
+
+ when:
+ - storage_type | default("") == "persistent"
+ - "'mgmt' not in inventory_hostname"
+
+ # Protocol-specific configuration
+ - block:
+ # --- Hostname ---
+ - name: Configure hostname with DNS domain
+ hostname:
+ name: "{{ ansible_hostname }}.{{ protocol_domain }}"
+
+ - name: Remove existing eth1 connection
+ shell: |
+ sec_interface=$(nmcli -t con show --active | grep eth1 | cut -d ':' -f 1)
+ nmcli conn del "$sec_interface"
+ ignore_errors: yes
+
+ - name: Add eth1 ethernet connection
+ command: nmcli con add type ethernet con-name eth1 ifname eth1
+
+ - name: Add DOMAIN to protocol interface config
+ lineinfile:
+ path: "/etc/sysconfig/network-scripts/ifcfg-{{ protocol_interface }}"
+ line: "DOMAIN={{ protocol_domain }}"
+ create: yes
+
+ - name: Set MTU to 9000 for protocol interface
+ lineinfile:
+ path: "/etc/sysconfig/network-scripts/ifcfg-{{ protocol_interface }}"
+ line: "MTU=9000"
+ create: yes
+
+ - name: Add IC_REGION to root bashrc
+ lineinfile:
+ path: "/root/.bashrc"
+ line: "export IC_REGION={{ vpc_region }}"
+
+ - name: Add IC_SUBNET to root bashrc
+ lineinfile:
+ path: "/root/.bashrc"
+ line: "export IC_SUBNET={{ protocol_subnet }}"
+
+ - name: Add IC_RG to root bashrc
+ lineinfile:
+ path: "/root/.bashrc"
+ line: "export IC_RG={{ resource_group }}"
+ when:
+ - storage_type | default("") == "persistent"
+ - scale_protocol_node | default(false) | bool
+EOT
+ filename = local.scale_baremetal_prerequesite_playbook_path
+}
+
+resource "local_file" "scale_gpfs_restart_playbook" {
+ count = var.scheduler == "Scale" && var.scale_encryption_type == "key_protect" ? 1 : 0
+ content = < "${local.key_protect_path}/Key_Protect_Server.cert"
+ # Create a Key Protect Server Root and CA certs
+ [ -f "${local.key_protect_path}/Key_Protect_Server.cert" ] && awk '/-----BEGIN CERTIFICATE-----/,/-----END CERTIFICATE-----/' "${local.key_protect_path}/Key_Protect_Server.cert" > "${local.key_protect_path}/Key_Protect_Server_CA.cert"
+ [ -f "${local.key_protect_path}/Key_Protect_Server_CA.cert" ] && awk '/-----BEGIN CERTIFICATE-----/{x="${local.key_protect_path}/Key_Protect_Server.chain"i".cert"; i++} {print > x}' "${local.key_protect_path}/Key_Protect_Server_CA.cert"
+ [ -f "${local.key_protect_path}/Key_Protect_Server.chain.cert" ] && mv "${local.key_protect_path}/Key_Protect_Server.chain.cert" "${local.key_protect_path}/Key_Protect_Server.chain0.cert"
+ # Create a Self Signed Certificates
+ [ ! -f "${local.key_protect_path}/${var.resource_prefix}.key" ] && openssl genpkey -algorithm RSA -out "${local.key_protect_path}/${var.resource_prefix}.key"
+ [ ! -f "${local.key_protect_path}/${var.resource_prefix}.csr" ] && openssl req -new -key "${local.key_protect_path}/${var.resource_prefix}.key" -out "${local.key_protect_path}/${var.resource_prefix}.csr" -subj "/CN=${var.vpc_storage_cluster_dns_domain}"
+ [ ! -f "${local.key_protect_path}/${var.resource_prefix}.cert" ] && openssl x509 -req -days 3650 -in "${local.key_protect_path}/${var.resource_prefix}.csr" -signkey "${local.key_protect_path}/${var.resource_prefix}.key" -out "${local.key_protect_path}/${var.resource_prefix}.cert"
+ EOT
+ }
+}
+
+resource "ibm_kms_key" "scale_key" {
+ instance_id = var.key_protect_instance_id
+ key_name = "key"
+ standard_key = false
+}
+
+resource "ibm_kms_kmip_adapter" "sclae_kmip_adapter" {
+ instance_id = var.key_protect_instance_id
+ profile = "native_1.0"
+ profile_data = {
+ "crk_id" = ibm_kms_key.scale_key.key_id
+ }
+ description = "Key Protect adapter"
+ name = format("%s-kp-adapter", var.resource_prefix)
+}
+
+resource "ibm_kms_kmip_client_cert" "mycert" {
+ instance_id = var.key_protect_instance_id
+ adapter_id = ibm_kms_kmip_adapter.sclae_kmip_adapter.adapter_id
+ certificate = data.local_file.kpclient_cert.content
+ name = format("%s-kp-cert", var.resource_prefix)
+ depends_on = [data.local_file.kpclient_cert]
+}
diff --git a/modules/key_protect/outputs.tf b/modules/key_protect/outputs.tf
new file mode 100644
index 00000000..e69de29b
diff --git a/modules/key_protect/variables.tf b/modules/key_protect/variables.tf
new file mode 100644
index 00000000..884dbf37
--- /dev/null
+++ b/modules/key_protect/variables.tf
@@ -0,0 +1,29 @@
+variable "key_protect_instance_id" {
+ type = string
+ default = null
+ description = "An existing Key Protect instance used for filesystem encryption"
+}
+
+variable "resource_prefix" {
+ type = string
+ default = "scale"
+ description = "A unique identifier for resources. Must begin with a letter and end with a letter or number. This prefix will be prepended to any resources provisioned by this template. Prefixes must be 16 or fewer characters."
+}
+
+variable "vpc_region" {
+ type = string
+ default = null
+ description = "vpc region"
+}
+
+variable "scale_config_path" {
+ type = string
+ default = "/opt/IBM/ibm-spectrumscale-cloud-deploy"
+ description = "Path to clone github.com/IBM/ibm-spectrum-scale-install-infra."
+}
+
+variable "vpc_storage_cluster_dns_domain" {
+ type = string
+ default = "ldap.com"
+ description = "Base domain for the LDAP Server"
+}
diff --git a/modules/cos/version.tf b/modules/key_protect/version.tf
similarity index 64%
rename from modules/cos/version.tf
rename to modules/key_protect/version.tf
index 913bf325..4edd14fc 100644
--- a/modules/cos/version.tf
+++ b/modules/key_protect/version.tf
@@ -3,20 +3,20 @@
##############################################################################
terraform {
- required_version = ">= 1.3"
+ required_version = ">= 1.9.0"
# Use "greater than or equal to" range for root level modules
required_providers {
ibm = {
source = "IBM-Cloud/ibm"
version = ">= 1.68.1, < 2.0.0"
}
- # local = {
- # source = "hashicorp/local"
- # version = "~> 2"
- # }
- # ansible = {
- # source = "ansible/ansible"
- # version = "~> 1.3.0"
- # }
+ local = {
+ source = "hashicorp/local"
+ version = "~> 2"
+ }
+ null = {
+ source = "hashicorp/null"
+ version = ">= 3.0.0"
+ }
}
}
diff --git a/modules/landing_zone/datasource.tf b/modules/landing_zone/datasource.tf
index 028278ec..25e1b569 100644
--- a/modules/landing_zone/datasource.tf
+++ b/modules/landing_zone/datasource.tf
@@ -19,3 +19,208 @@ data "ibm_is_subnet" "subnet" {
count = (var.vpc_name != null && length(var.compute_subnet_id) > 0) ? 1 : 0
identifier = var.compute_subnet_id
}
+
+#############################################################################################################
+
+#############################################################################################################
+
+locals {
+ exstng_cos_instance_bkt_hmc_key = var.scheduler == "Scale" ? [for details in var.afm_cos_config : details.cos_instance if(details.cos_instance != "" && details.bucket_name != "" && details.cos_service_cred_key != "")] : []
+ # exstng_cos_instance = var.scheduler == "Scale" ? [for details in var.afm_cos_config : details.cos_instance if(details.cos_instance != "")] : []
+}
+
+data "ibm_resource_instance" "afm_cos_instances" {
+ for_each = {
+ for idx, value in local.exstng_cos_instance_bkt_hmc_key : idx => {
+ total_cos_instance = element(local.exstng_cos_instance_bkt_hmc_key, idx)
+ }
+ }
+ name = each.value.total_cos_instance
+ service = "cloud-object-storage"
+}
+
+locals {
+ instnace_data = [for key, value in data.ibm_resource_instance.afm_cos_instances : value]
+ cos_instance_data = concat(flatten(module.landing_zone[*].cos_data), local.instnace_data)
+ total_instance = [
+ for item in local.cos_instance_data : {
+ name = item.resource_name
+ resource_instance_id = item.guid
+ }
+ ]
+}
+
+# data "ibm_resource_instance" "exstng_cos_instances" {
+# for_each = {
+# for idx, value in local.exstng_cos_instance : idx => {
+# total_cos_instance = element(local.exstng_cos_instance, idx)
+# }
+# }
+# name = each.value.total_cos_instance
+# service = "cloud-object-storage"
+# }
+
+locals {
+ # existing_instnace_data = [for key, value in data.ibm_resource_instance.exstng_cos_instances : value]
+ # total_existing_instances = setsubtract(([for item in local.cos_instance_data : item]), ([for item in local.existing_instnace_data : item]))
+
+ total_new_instance = [
+ for item in local.cos_instance_data : {
+ name = item.resource_name
+ resource_instance_id = item.guid
+ }
+ ]
+ newly_created_instance_afm = [for instance in local.total_new_instance : instance.resource_instance_id if(join("-", slice(split("-", instance.name), 0, length(split("-", instance.name)) - 1))) != "${local.prefix}-hpc-cos"]
+
+ config_details = flatten([
+ for instance in local.total_instance : [
+ for config in var.afm_cos_config : {
+ afm_fileset = config.afm_fileset
+ mode = config.mode
+ resource_instance_id = instance.resource_instance_id
+ } if config.cos_instance == instance.name
+ ]
+ ])
+}
+
+# Existing Bucket Data
+
+locals {
+ total_exstng_bucket_instance = var.scheduler == "Scale" ? [for bucket in var.afm_cos_config : bucket.cos_instance if(bucket.bucket_name != "")] : []
+
+ total_exstng_bucket_name = var.scheduler == "Scale" ? [for bucket in var.afm_cos_config : bucket.bucket_name if(bucket.bucket_name != "")] : []
+
+ total_exstng_bucket_region = var.scheduler == "Scale" ? [for bucket in var.afm_cos_config : bucket.bucket_region if(bucket.bucket_name != "")] : []
+
+ total_exstng_bucket_type = var.scheduler == "Scale" ? [for bucket in var.afm_cos_config : bucket.bucket_type if(bucket.bucket_name != "")] : []
+}
+
+data "ibm_resource_instance" "afm_exstng_bucket_cos_instance" {
+ for_each = {
+ for idx, value in local.total_exstng_bucket_instance : idx => {
+ total_cos_instance = element(local.total_exstng_bucket_instance, idx)
+ }
+ }
+ name = each.value.total_cos_instance
+ service = "cloud-object-storage"
+}
+
+data "ibm_cos_bucket" "afm_exstng_cos_buckets" {
+ for_each = {
+ for idx, value in local.total_exstng_bucket_instance : idx => {
+ bucket_name = element(local.total_exstng_bucket_name, idx)
+ resource_instance_id = element(flatten([for instance in data.ibm_resource_instance.afm_exstng_bucket_cos_instance : instance[*].id]), idx)
+ bucket_region = element(local.total_exstng_bucket_region, idx)
+ bucket_type = element(local.total_exstng_bucket_type, idx)
+ }
+ }
+ bucket_name = each.value.bucket_name
+ resource_instance_id = each.value.resource_instance_id
+ bucket_region = each.value.bucket_region
+ bucket_type = each.value.bucket_type
+ depends_on = [data.ibm_resource_instance.afm_exstng_bucket_cos_instance]
+}
+
+# Existing Hmac Key Data
+
+locals {
+ total_exstng_hmac_key_instance = var.scheduler == "Scale" ? [for key in var.afm_cos_config : key.cos_instance if(key.cos_service_cred_key != "")] : []
+ total_exstng_hmac_key_name = var.scheduler == "Scale" ? [for key in var.afm_cos_config : key.cos_service_cred_key if(key.cos_service_cred_key != "")] : []
+}
+
+data "ibm_resource_instance" "afm_exstng_hmac_key_cos_instance" {
+ for_each = {
+ for idx, value in local.total_exstng_hmac_key_instance : idx => {
+ total_cos_instance = element(local.total_exstng_hmac_key_instance, idx)
+ }
+ }
+ name = each.value.total_cos_instance
+ service = "cloud-object-storage"
+}
+
+data "ibm_resource_key" "afm_exstng_cos_hmac_keys" {
+ for_each = {
+ for idx, value in local.total_exstng_hmac_key_instance : idx => {
+ hmac_key = element(local.total_exstng_hmac_key_name, idx)
+ resource_instance_id = element(flatten([for instance in data.ibm_resource_instance.afm_exstng_hmac_key_cos_instance : instance[*].id]), idx)
+ }
+ }
+ name = each.value.hmac_key
+ resource_instance_id = each.value.resource_instance_id
+ depends_on = [data.ibm_resource_instance.afm_exstng_hmac_key_cos_instance]
+}
+
+locals {
+ # Final Bucket Data
+ existing_buckets = [for num, bucket in data.ibm_cos_bucket.afm_exstng_cos_buckets : bucket]
+ total_buckets_data = concat(local.existing_buckets, flatten(module.landing_zone[*].cos_bucket_data))
+ total_buckets = [
+ for item in local.total_buckets_data : {
+ endpoint = item.s3_endpoint_direct
+ bucket = item.bucket_name
+ resource_instance_id = split(":", item.resource_instance_id)[7]
+ }
+ ]
+
+ newly_created_instance_bucket = [
+ for item in local.total_buckets : {
+ endpoint = item.endpoint
+ bucket = item.bucket
+ resource_instance_id = item.resource_instance_id
+ } if item.resource_instance_id == (var.enable_landing_zone && local.enable_afm ? local.newly_created_instance_afm[0] : "")
+ ]
+
+ afm_config_details_0 = flatten([
+ for bucket in local.total_buckets : [
+ for config in local.config_details : {
+ bucket = bucket.bucket
+ fileset = config.afm_fileset
+ filesystem = local.filesystem
+ mode = config.mode
+ endpoint = "https://${bucket.endpoint}"
+ } if bucket.resource_instance_id == config.resource_instance_id
+ ]
+ ])
+
+ afm_config_details_1 = [
+ for i in range(length(local.newly_created_instance_bucket)) : {
+ bucket = local.newly_created_instance_bucket[i].bucket
+ endpoint = "https://${local.newly_created_instance_bucket[i].endpoint}"
+ fileset = local.new_instance_bucket_hmac[i].afm_fileset
+ filesystem = local.filesystem
+ mode = local.new_instance_bucket_hmac[i].mode
+ }
+ ]
+
+ scale_afm_bucket_config_details = concat(local.afm_config_details_0, local.afm_config_details_1)
+
+ # Final Hmac Key Data
+ existing_hmac_keys = [
+ for item in [for num, keys in([for key in [for num, keys in data.ibm_resource_key.afm_exstng_cos_hmac_keys : keys] : key]) : keys] : {
+ credentials = item.credentials
+ credentials_json = item.credentials_json
+ resource_instance_id = split(":", item.id)[7]
+ name = item.name
+ }
+ ]
+
+ new_hmac_keys = [
+ for item in [for num, keys in((var.enable_landing_zone ? [for key in flatten(module.landing_zone[*].cos_key_credentials_map)[0] : key] : [])) : keys] : {
+ credentials = item.credentials
+ credentials_json = item.credentials_json
+ resource_instance_id = split(":", item.id)[7]
+ name = item.name
+ }
+ ]
+ total_hmac_keys = concat(local.existing_hmac_keys, local.new_hmac_keys)
+
+ scale_afm_cos_hmac_key_params = flatten([
+ for key in local.total_hmac_keys : [
+ for bucket in local.total_buckets : {
+ akey = key.credentials["cos_hmac_keys.access_key_id"]
+ bucket = bucket.bucket
+ skey = key.credentials["cos_hmac_keys.secret_access_key"]
+ } if key.resource_instance_id == bucket.resource_instance_id
+ ]
+ ])
+}
diff --git a/modules/landing_zone/locals.tf b/modules/landing_zone/locals.tf
index 0c045ef9..74b6b29c 100644
--- a/modules/landing_zone/locals.tf
+++ b/modules/landing_zone/locals.tf
@@ -1,6 +1,6 @@
locals {
# Defined values
- name = "lsf"
+ name = lower(var.scheduler)
prefix = var.prefix
tags = [local.prefix, local.name]
@@ -62,6 +62,7 @@ locals {
public_gateway = true
no_addr_prefix = true
} : null,
+ # Compute subnet is always created without any conditions
{
name = "compute-subnet-${zone}"
acl_name = "hpc-acl"
@@ -84,7 +85,7 @@ locals {
no_addr_prefix = true
} : null,
zone == local.active_zones[0] ? {
- name = "bastion-subnet"
+ name = "bastion-subnet-${zone}"
acl_name = "hpc-acl"
cidr = var.vpc_cluster_login_private_subnets_cidr_blocks
public_gateway = true
@@ -133,16 +134,28 @@ locals {
vpcs = [
{
existing_vpc_id = var.vpc_name == null ? null : data.ibm_is_vpc.existing_vpc[0].id
- existing_subnets = (var.vpc_name != null && length(var.compute_subnet_id) > 0) ? [
- {
+ existing_subnets = var.vpc_name != null ? flatten([
+ var.compute_subnet_id != "" && var.compute_subnet_id != null ? [{
id = var.compute_subnet_id
public_gateway = false
- },
- {
+ }] : [],
+ var.bastion_subnet_id != "" && var.bastion_subnet_id != null ? [{
id = var.bastion_subnet_id
public_gateway = false
- }
- ] : null
+ }] : [],
+ var.storage_subnet_id != "" && var.storage_subnet_id != null ? [{
+ id = var.storage_subnet_id
+ public_gateway = false
+ }] : [],
+ var.protocol_subnet_id != "" && var.protocol_subnet_id != null ? [{
+ id = var.protocol_subnet_id
+ public_gateway = false
+ }] : [],
+ var.client_subnet_id != "" && var.client_subnet_id != null ? [{
+ id = var.client_subnet_id
+ public_gateway = false
+ }] : []
+ ]) : null
prefix = local.name
resource_group = var.existing_resource_group == "null" ? "${local.prefix}-workload-rg" : var.existing_resource_group
clean_default_security_group = true
@@ -187,10 +200,86 @@ locals {
transit_gateway_resource_group = local.service_resource_group
transit_gateway_connections = [var.vpc_name]
- active_cos = [
- (
- var.enable_cos_integration || var.enable_vpc_flow_logs || var.enable_atracker || var.observability_logs_enable
- ) ? {
+ ##############################################################################################################
+ # AFM Related Calculation
+ ##############################################################################################################
+
+ enable_afm = sum(var.afm_instances[*]["count"]) > 0 ? true : false
+ new_instance_bucket_hmac = var.scheduler == "Scale" ? [for details in var.afm_cos_config : details if(details.cos_instance == "" && details.bucket_name == "" && details.cos_service_cred_key == "")] : []
+ exstng_instance_new_bucket_hmac = var.scheduler == "Scale" ? [for details in var.afm_cos_config : details if(details.cos_instance != "" && details.bucket_name == "" && details.cos_service_cred_key == "")] : []
+ exstng_instance_bucket_new_hmac = var.scheduler == "Scale" ? [for details in var.afm_cos_config : details if(details.cos_instance != "" && details.bucket_name != "" && details.cos_service_cred_key == "")] : []
+ exstng_instance_hmac_new_bucket = var.scheduler == "Scale" ? [for details in var.afm_cos_config : details if(details.cos_instance != "" && details.bucket_name == "" && details.cos_service_cred_key != "")] : []
+
+ path_elements = split("/", var.storage_instances[0]["filesystem"] != "" ? var.storage_instances[0]["filesystem"] : var.filesystem_config[0]["filesystem"])
+ filesystem = element(local.path_elements, length(local.path_elements) - 1)
+ total = concat(local.exstng_instance_new_bucket_hmac, local.exstng_instance_bucket_new_hmac, local.exstng_instance_hmac_new_bucket)
+
+ total_new_data = local.enable_afm && length(local.new_instance_bucket_hmac) > 0 ? [{
+ name = "hpc-instance"
+ resource_group = local.service_resource_group
+ plan = "standard"
+ random_suffix = true
+ use_data = false
+ skip_flowlogs_s2s_auth_policy = var.skip_flowlogs_s2s_auth_policy
+ skip_kms_s2s_auth_policy = var.skip_kms_s2s_auth_policy
+ buckets = [
+ for idx, all in local.new_instance_bucket_hmac : {
+ name = all.bucket_name == "" ? format("hpcc-bucket%d", idx) : all.bucket_name
+ storage_class = all.bucket_storage_class
+ endpoint_type = "public"
+ force_delete = true
+ kms_key = null
+ expire_rule = null
+ single_site_location = all.bucket_type == "single_site_location" ? all.bucket_region : null
+ region_location = all.bucket_type == "region_location" ? all.bucket_region : null
+ cross_region_location = all.bucket_type == "cross_region_location" ? all.bucket_region : null
+ }
+ ]
+ keys = [{
+ name = "hpcc-key"
+ role = "Manager"
+ enable_HMAC = true
+ }]
+ }
+ ] : []
+
+ total_existing_data = [for idx, all in tolist(local.total) : {
+ name = all.cos_instance == "" ? format("hpcc-instance%d", idx) : all.cos_instance
+ resource_group = local.service_resource_group
+ plan = "standard"
+ random_suffix = true
+ use_data = all.cos_instance == "" ? false : true
+ skip_flowlogs_s2s_auth_policy = var.skip_flowlogs_s2s_auth_policy
+ skip_kms_s2s_auth_policy = var.skip_kms_s2s_auth_policy
+ buckets = all.bucket_name == "" ? [
+ {
+ name = format("hpc-bucket%d", idx)
+ storage_class = all.bucket_storage_class
+ endpoint_type = "public"
+ force_delete = true
+ kms_key = null
+ expire_rule = null
+ single_site_location = all.bucket_type == "single_site_location" ? all.bucket_region : null
+ region_location = all.bucket_type == "region_location" ? all.bucket_region : null
+ cross_region_location = all.bucket_type == "cross_region_location" ? all.bucket_region : null
+ },
+ ] : []
+ keys = all.cos_service_cred_key == "" ? [{
+ name = format("hpc-key%d", idx)
+ role = "Manager"
+ enable_HMAC = true
+ }] : []
+ } if local.enable_afm && length(local.total) > 0
+ ]
+
+ ##############################################################################################################
+
+ ##############################################################################################################
+ final_instance_bucket_hmac_creation = concat(local.total_new_data, local.total_existing_data)
+
+ active_cos = concat(local.final_instance_bucket_hmac_creation, [
+
+ (var.enable_cos_integration || var.enable_vpc_flow_logs || var.enable_atracker || var.observability_logs_enable) ? {
name = var.cos_instance_name == null ? "hpc-cos" : var.cos_instance_name
resource_group = local.service_resource_group
plan = "standard"
@@ -203,19 +292,25 @@ locals {
# Extra bucket for solution specific object storage
buckets = [
var.enable_cos_integration ? {
- name = "hpc-bucket"
- storage_class = "standard"
- endpoint_type = "public"
- force_delete = true
- kms_key = var.key_management == "key_protect" ? (var.kms_key_name == null ? format("%s-key", var.prefix) : var.kms_key_name) : null
- expire_rule = null
+ name = "hpc-bucket"
+ storage_class = "standard"
+ endpoint_type = "public"
+ force_delete = true
+ single_site_location = null
+ region_location = null
+ cross_region_location = null
+ kms_key = var.key_management == "key_protect" ? (var.kms_key_name == null ? format("%s-key", var.prefix) : var.kms_key_name) : null
+ expire_rule = null
} : null,
var.enable_vpc_flow_logs ? {
- name = "vpc-flow-logs-bucket"
- storage_class = "standard"
- endpoint_type = "public"
- force_delete = true
- kms_key = var.key_management == "key_protect" ? (var.kms_key_name == null ? format("%s-slz-key", var.prefix) : var.kms_key_name) : null
+ name = "vpc-flow-logs-bucket"
+ storage_class = "standard"
+ endpoint_type = "public"
+ force_delete = true
+ single_site_location = null
+ region_location = null
+ cross_region_location = null
+ kms_key = var.key_management == "key_protect" ? (var.kms_key_name == null ? format("%s-slz-key", var.prefix) : var.kms_key_name) : null
expire_rule = {
days = 30
enable = true
@@ -223,11 +318,14 @@ locals {
}
} : null,
var.enable_atracker ? {
- name = "atracker-bucket"
- storage_class = "standard"
- endpoint_type = "public"
- force_delete = true
- kms_key = var.key_management == "key_protect" ? (var.kms_key_name == null ? format("%s-atracker-key", var.prefix) : var.kms_key_name) : null
+ name = "atracker-bucket"
+ storage_class = "standard"
+ endpoint_type = "public"
+ force_delete = true
+ single_site_location = null
+ region_location = null
+ cross_region_location = null
+ kms_key = var.key_management == "key_protect" ? (var.kms_key_name == null ? format("%s-atracker-key", var.prefix) : var.kms_key_name) : null
expire_rule = {
days = 30
enable = true
@@ -235,11 +333,14 @@ locals {
}
} : null,
var.observability_logs_enable ? {
- name = "logs-data-bucket"
- storage_class = "standard"
- endpoint_type = "public"
- force_delete = true
- kms_key = var.key_management == "key_protect" ? (var.kms_key_name == null ? format("%s-logs-data-key", var.prefix) : var.kms_key_name) : null
+ name = "logs-data-bucket"
+ storage_class = "standard"
+ endpoint_type = "public"
+ force_delete = true
+ single_site_location = null
+ region_location = null
+ cross_region_location = null
+ kms_key = var.key_management == "key_protect" ? (var.kms_key_name == null ? format("%s-logs-data-key", var.prefix) : var.kms_key_name) : null
expire_rule = {
days = 30
enable = true
@@ -247,11 +348,14 @@ locals {
}
} : null,
var.observability_logs_enable ? {
- name = "metrics-data-bucket"
- storage_class = "standard"
- endpoint_type = "public"
- force_delete = true
- kms_key = var.key_management == "key_protect" ? (var.kms_key_name == null ? format("%s-metrics-data-key", var.prefix) : var.kms_key_name) : null
+ name = "metrics-data-bucket"
+ storage_class = "standard"
+ endpoint_type = "public"
+ force_delete = true
+ single_site_location = null
+ region_location = null
+ cross_region_location = null
+ kms_key = var.key_management == "key_protect" ? (var.kms_key_name == null ? format("%s-metrics-data-key", var.prefix) : var.kms_key_name) : null
expire_rule = {
days = 30
enable = true
@@ -260,7 +364,8 @@ locals {
} : null
]
} : null
- ]
+ ]
+ )
cos = [
for instance in local.active_cos :
@@ -276,12 +381,15 @@ locals {
buckets = [
for bucket in instance.buckets :
{
- name = bucket.name
- storage_class = bucket.storage_class
- endpoint_type = bucket.endpoint_type
- force_delete = bucket.force_delete
- kms_key = bucket.kms_key
- expire_rule = bucket.expire_rule
+ name = bucket.name
+ storage_class = bucket.storage_class
+ endpoint_type = bucket.endpoint_type
+ force_delete = bucket.force_delete
+ kms_key = bucket.kms_key
+ expire_rule = bucket.expire_rule
+ single_site_location = bucket.single_site_location
+ region_location = bucket.region_location
+ cross_region_location = bucket.cross_region_location
}
if bucket != null
]
@@ -315,11 +423,11 @@ locals {
}
]) : null
- key_management = var.key_management == "key_protect" ? {
+ key_management = var.key_management == "key_protect" || (var.scale_encryption_enabled && var.scale_encryption_type == "key_protect" && var.key_protect_instance_id == null) ? {
name = var.kms_instance_name != null ? var.kms_instance_name : format("%s-kms", var.prefix) # var.key_management == "hs_crypto" ? var.hpcs_instance_name : format("%s-kms", var.prefix)
resource_group = local.service_resource_group
use_hs_crypto = false
- keys = [for each in local.active_keys : each if each != null]
+ keys = [for each in coalesce(local.active_keys, []) : each if each != null]
use_data = var.kms_instance_name != null ? true : false
} : {
name = null
@@ -401,5 +509,6 @@ locals {
f5_vsi = local.f5_vsi
f5_template_data = local.f5_template_data
skip_kms_block_storage_s2s_auth_policy = local.skip_kms_block_storage_s2s_auth_policy
+
}
}
diff --git a/modules/landing_zone/main.tf b/modules/landing_zone/main.tf
index 80688431..54a15b7b 100644
--- a/modules/landing_zone/main.tf
+++ b/modules/landing_zone/main.tf
@@ -1,7 +1,7 @@
module "landing_zone" {
count = var.enable_landing_zone ? 1 : 0
source = "terraform-ibm-modules/landing-zone/ibm"
- version = "8.2.0"
+ version = "8.4.3"
prefix = local.prefix
region = local.region
tags = local.tags
diff --git a/modules/landing_zone/outputs.tf b/modules/landing_zone/outputs.tf
index 37d17a3b..0f6afeb8 100644
--- a/modules/landing_zone/outputs.tf
+++ b/modules/landing_zone/outputs.tf
@@ -30,7 +30,7 @@ output "bastion_subnets" {
id = subnet["id"]
zone = subnet["zone"]
cidr = subnet["cidr"]
- } if strcontains(subnet["name"], "-lsf-bastion-subnet")
+ } if strcontains(subnet["name"], "-${local.name}-bastion-subnet")
]
}
@@ -41,7 +41,7 @@ output "client_subnets" {
id = subnet["id"]
zone = subnet["zone"]
cidr = subnet["cidr"]
- } if strcontains(subnet["name"], "-lsf-client-subnet")
+ } if strcontains(subnet["name"], "-${local.name}-client-subnet")
]
}
@@ -52,7 +52,7 @@ output "compute_subnets" {
id = subnet["id"]
zone = subnet["zone"]
cidr = subnet["cidr"]
- } if strcontains(subnet["name"], "-lsf-compute-subnet-zone-")
+ } if strcontains(subnet["name"], "-${local.name}-compute-subnet-zone-")
]
}
@@ -63,7 +63,7 @@ output "storage_subnets" {
id = subnet["id"]
zone = subnet["zone"]
cidr = subnet["cidr"]
- } if strcontains(subnet["name"], "-lsf-storage-subnet-zone-")
+ } if strcontains(subnet["name"], "-${local.name}-storage-subnet-zone-")
]
}
@@ -74,7 +74,7 @@ output "protocol_subnets" {
id = subnet["id"]
zone = subnet["zone"]
cidr = subnet["cidr"]
- } if strcontains(subnet["name"], "-lsf-protocol-subnet-zone-")
+ } if strcontains(subnet["name"], "-${local.name}-protocol-subnet-zone-")
]
}
@@ -91,7 +91,7 @@ output "boot_volume_encryption_key" {
output "key_management_guid" {
description = "GUID for KMS instance"
- value = var.enable_landing_zone ? var.key_management != null ? module.landing_zone[0].key_management_guid : null : null
+ value = var.enable_landing_zone ? var.key_management != null || (var.scale_encryption_enabled && var.scale_encryption_type == "key_protect" && var.key_protect_instance_id == null) ? module.landing_zone[0].key_management_guid : null : null
}
output "cos_buckets_data" {
@@ -105,8 +105,33 @@ output "cos_instance_crns" {
}
output "cos_buckets_names" {
- description = "Name of the COS Bucket created for SCC Instance"
+ description = "List of names for COS buckets created"
value = flatten(module.landing_zone[*].cos_bucket_names)
}
+output "cos_data" {
+ description = "COS buckets data"
+ value = flatten(module.landing_zone[*].cos_data)
+}
+
+output "hmac_key_data" {
+ description = "COS hmac data"
+ value = var.enable_landing_zone ? [for key in flatten(module.landing_zone[*].cos_key_credentials_map)[0] : key] : []
+}
+
+output "cos_names" {
+ description = "List of Cloud Object Storage instance names"
+ value = flatten(module.landing_zone[*].cos_names)
+}
+
+output "scale_afm_bucket_config_details" {
+ description = "Scale AFM COS Bucket and Configuration Details"
+ value = local.scale_afm_bucket_config_details
+}
+
+output "scale_afm_cos_hmac_key_params" {
+ description = "Scale AFM COS HMAC Key Details"
+ value = local.scale_afm_cos_hmac_key_params
+}
+
# TODO: Observability data
diff --git a/modules/landing_zone/variables.tf b/modules/landing_zone/variables.tf
index 2cfc7a32..92ac981d 100644
--- a/modules/landing_zone/variables.tf
+++ b/modules/landing_zone/variables.tf
@@ -8,6 +8,15 @@ variable "enable_landing_zone" {
description = "Run landing zone module."
}
+##############################################################################
+# Offering Variations
+##############################################################################
+variable "scheduler" {
+ type = string
+ default = null
+ description = "Select one of the scheduler (Scale/LSF/Symphony/Slurm/null)"
+}
+
##############################################################################
# Resource Groups Variables
##############################################################################
@@ -132,13 +141,17 @@ variable "management_instances" {
variable "compute_instances" {
type = list(
object({
- profile = string
- count = number
+ profile = string
+ count = number
+ image = string
+ filesystem = optional(string)
})
)
default = [{
- profile = "cx2-2x4"
- count = 0
+ profile = "cx2-2x4"
+ count = 0
+ image = "ibm-redhat-8-10-minimal-amd64-4"
+ filesystem = "/ibm/fs1"
}]
description = "Min Number of instances to be launched for compute cluster."
}
@@ -162,13 +175,17 @@ variable "storage_subnets_cidr" {
variable "storage_instances" {
type = list(
object({
- profile = string
- count = number
+ profile = string
+ count = number
+ image = string
+ filesystem = optional(string)
})
)
default = [{
- profile = "bx2-2x8"
- count = 3
+ profile = "bx2d-32x128"
+ count = 0
+ image = "ibm-redhat-8-10-minimal-amd64-4"
+ filesystem = "/ibm/fs1"
}]
description = "Number of instances to be launched for storage cluster."
}
@@ -176,15 +193,19 @@ variable "storage_instances" {
variable "storage_servers" {
type = list(
object({
- profile = string
- count = number
+ profile = string
+ count = number
+ image = string
+ filesystem = optional(string)
})
)
default = [{
- profile = "cx2d-metal-96x192"
- count = 2
+ profile = "cx2d-metal-96x192"
+ count = 0
+ image = "ibm-redhat-8-10-minimal-amd64-4"
+ filesystem = "/ibm/fs1"
}]
- description = "Number of Bareemetal servers to be launched for storage cluster."
+ description = "Number of BareMetal Servers to be launched for storage cluster."
}
variable "protocol_subnets_cidr" {
@@ -207,6 +228,51 @@ variable "protocol_instances" {
description = "Number of instances to be launched for protocol hosts."
}
+variable "afm_instances" {
+ type = list(
+ object({
+ profile = string
+ count = number
+ })
+ )
+ default = [{
+ profile = "bx2-32x128"
+ count = 1
+ }]
+ description = "Number of instances to be launched for afm hosts."
+}
+
+variable "filesystem_config" {
+ type = list(
+ object({
+ filesystem = string
+ block_size = string
+ default_data_replica = number
+ default_metadata_replica = number
+ max_data_replica = number
+ max_metadata_replica = number
+ })
+ )
+ default = null
+ description = "File system configurations."
+}
+
+variable "afm_cos_config" {
+ type = list(
+ object({
+ afm_fileset = string,
+ mode = string,
+ cos_instance = string,
+ bucket_name = string,
+ bucket_region = string,
+ cos_service_cred_key = string,
+ bucket_type = string,
+ bucket_storage_class = string
+ })
+ )
+ nullable = false
+ description = "AFM configurations."
+}
##############################################################################
# Observability Variables
##############################################################################
@@ -257,6 +323,27 @@ variable "kms_key_name" {
description = "Provide the existing KMS encryption key name that you want to use for the IBM Cloud HPC cluster. (for example kms_key_name: my-encryption-key)."
}
+
+##Scale Encryption Variables
+
+variable "scale_encryption_enabled" {
+ type = bool
+ default = false
+ description = "To enable the encryption for the filesystem. Select true or false"
+}
+
+variable "scale_encryption_type" {
+ type = string
+ default = null
+ description = "To enable filesystem encryption, specify either 'key_protect' or 'gklm'. If neither is specified, the default value will be 'null' and encryption is disabled"
+}
+
+variable "key_protect_instance_id" {
+ type = string
+ default = null
+ description = "An existing Key Protect instance used for filesystem encryption"
+}
+
# variable "hpcs_instance_name" {
# type = string
# default = null
@@ -290,6 +377,27 @@ variable "enable_vpn" {
default = false
description = "The solution supports multiple ways to connect to your HPC cluster for example, using bastion node, via VPN or direct connection. If connecting to the HPC cluster via VPN, set this value to true."
}
+
+##############################################################################
+# Subnet_id Variables
+##############################################################################
+variable "client_subnet_id" {
+ type = string
+ description = "Name of an existing subnet for protocol nodes. If no value is given, a new subnet will be created"
+ default = null
+}
+
+variable "storage_subnet_id" {
+ type = string
+ description = "Name of an existing subnet for storage nodes. If no value is given, a new subnet will be created"
+ default = null
+}
+
+variable "protocol_subnet_id" {
+ type = string
+ description = "Name of an existing subnet for protocol nodes. If no value is given, a new subnet will be created"
+ default = null
+}
##############################################################################
# Landing Zone Variables
##############################################################################
diff --git a/modules/landing_zone_vsi/datasource.tf b/modules/landing_zone_vsi/datasource.tf
index 55b62f4b..6f4d52b9 100644
--- a/modules/landing_zone_vsi/datasource.tf
+++ b/modules/landing_zone_vsi/datasource.tf
@@ -1,22 +1,8 @@
-# data "ibm_resource_group" "existing_resource_group" {
-# name = var.existing_resource_group
-# }
-
data "ibm_is_image" "management_stock_image" {
count = local.image_mapping_entry_found ? 0 : length(var.management_instances)
name = var.management_instances[count.index]["image"]
}
-# data "ibm_is_image" "management" {
-# name = var.management_instances[0]["image"]
-# count = local.image_mapping_entry_found ? 0 : 1
-# }
-
-# data "ibm_is_image" "compute" {
-# name = var.static_compute_instances[0]["image"]
-# count = local.compute_image_found_in_map ? 1 : 0
-# }
-
# TODO: Verify distinct profiles
/*
data "ibm_is_instance_profile" "management" {
@@ -33,25 +19,48 @@ data "ibm_is_instance_profile" "protocol" {
*/
data "ibm_is_image" "client" {
- count = length(var.client_instances)
+ count = var.scheduler == "Scale" ? length(var.client_instances) : 0
name = var.client_instances[count.index]["image"]
}
+# data "ibm_is_image" "compute_stock_image" {
+# count = local.compute_image_found_in_map ? 0 : length(var.static_compute_instances)
+# name = var.static_compute_instances[count.index]["image"]
+# }
+
data "ibm_is_image" "compute_stock_image" {
- count = local.compute_image_found_in_map ? 0 : length(var.static_compute_instances)
+ count = var.scheduler == "LSF" && !local.compute_image_found_in_map ? length(var.static_compute_instances) : 0
name = var.static_compute_instances[count.index]["image"]
}
-data "ibm_is_image" "storage" {
- count = length(var.storage_instances)
- name = var.storage_instances[count.index]["image"]
+data "ibm_is_image" "scale_compute_stock_image" {
+ count = (
+ var.scheduler == "Scale" &&
+ !local.scale_compute_image_found_in_map
+ ) ? length(var.static_compute_instances) : 0
+ name = var.static_compute_instances[count.index]["image"]
}
-# data "ibm_is_image" "protocol" {
-# count = length(var.protocol_instances)
-# name = var.protocol_instances[count.index]["image"]
-# }
+data "ibm_is_instance_profile" "compute_profile" {
+ count = length(var.static_compute_instances)
+ name = var.static_compute_instances[count.index]["profile"]
+}
+data "ibm_is_image" "storage_vsi" {
+ count = (
+ var.scheduler == "Scale" &&
+ !local.scale_storage_image_found_in_map
+ ) ? length(var.storage_instances) : 0
+ name = var.storage_instances[count.index]["image"]
+}
+
+data "ibm_is_image" "baremetal_storage" {
+ count = (
+ var.scheduler == "Scale" &&
+ !local.storage_bare_metal_image_mapping_entry_found
+ ) ? length(var.storage_servers) : 0
+ name = var.storage_servers[count.index]["image"]
+}
data "ibm_is_ssh_key" "ssh_keys" {
for_each = toset(var.ssh_keys)
@@ -68,36 +77,82 @@ data "ibm_is_instance_profile" "storage_tie_instance" {
name = var.storage_instances[count.index]["profile"]
}
-data "ibm_is_ssh_key" "gklm" {
- for_each = toset(var.gklm_instance_key_pair)
- name = each.key
-}
-
-data "ibm_is_ssh_key" "ldap" {
- for_each = toset(var.ldap_instance_key_pair)
- name = each.key
-}
-
data "ibm_is_image" "ldap_vsi_image" {
count = var.enable_ldap != null && var.ldap_server == "null" ? 1 : 0
name = var.ldap_instances[count.index]["image"]
}
-data "ibm_is_image" "afm" {
- count = length(var.afm_instances)
- name = var.afm_instances[count.index]["image"]
-}
+# data "ibm_is_image" "afm_vsi" {
+# count = var.scheduler == "Scale" ? (
+# (!local.scale_storage_image_found_in_map)
+# ? length(var.afm_instances)
+# : 0
+# ) : 0
+# name = var.afm_instances[count.index]["image"]
+# }
+
+# data "ibm_is_image" "baremetal_afm" {
+# count = var.scheduler == "Scale" ? (
+# (!local.storage_bare_metal_image_mapping_entry_found)
+# ? length(var.afm_instances)
+# : 0
+# ) : 0
+# name = var.afm_instances[count.index]["image"]
+# }
+
+# data "ibm_is_image" "protocol_vsi" {
+# count = var.scheduler == "Scale" ? (
+# (!local.scale_storage_image_found_in_map)
+# ? length(var.protocol_instances)
+# : 0
+# ) : 0
+# name = var.protocol_instances[count.index]["image"]
+# }
+
+# data "ibm_is_image" "baremetal_protocol" {
+# count = var.scheduler == "Scale" ? (
+# (!local.storage_bare_metal_image_mapping_entry_found)
+# ? length(var.protocol_instances)
+# : 0
+# ) : 0
+# name = var.protocol_instances[count.index]["image"]
+# }
data "ibm_is_image" "gklm" {
- count = length(var.gklm_instances)
+ count = var.scheduler == "Scale" ? (var.scale_encryption_enabled && var.scale_encryption_type == "gklm" && length(var.gklm_instances) > 0 && !local.scale_encryption_image_mapping_entry_found ? 1 : 0) : 0
name = var.gklm_instances[count.index]["image"]
}
data "ibm_is_image" "login_vsi_image" {
- count = local.login_image_found_in_map ? 0 : 1
+ count = var.scheduler == "LSF" ? (local.login_image_found_in_map ? 0 : 1) : 0
name = var.login_instance[count.index]["image"]
}
data "ibm_is_dedicated_host_profiles" "profiles" {
count = var.enable_dedicated_host ? 1 : 0
}
+
+data "ibm_is_security_group" "storage_security_group" {
+ count = var.storage_security_group_name != null ? 1 : 0
+ name = var.storage_security_group_name
+}
+
+data "ibm_is_security_group" "compute_security_group" {
+ count = var.compute_security_group_name != null ? 1 : 0
+ name = var.compute_security_group_name
+}
+
+data "ibm_is_security_group" "gklm_security_group" {
+ count = var.gklm_security_group_name != null ? 1 : 0
+ name = var.gklm_security_group_name
+}
+
+data "ibm_is_security_group" "ldap_security_group" {
+ count = var.ldap_security_group_name != null ? 1 : 0
+ name = var.ldap_security_group_name
+}
+
+data "ibm_is_security_group" "client_security_group" {
+ count = var.client_security_group_name != null ? 1 : 0
+ name = var.client_security_group_name
+}
diff --git a/modules/landing_zone_vsi/image_map.tf b/modules/landing_zone_vsi/image_map.tf
index f58ee9d7..6783e351 100644
--- a/modules/landing_zone_vsi/image_map.tf
+++ b/modules/landing_zone_vsi/image_map.tf
@@ -47,6 +47,48 @@ locals {
"au-syd" = "r026-11aee148-c938-4524-91e6-8e6da5933a42"
"br-sao" = "r042-5cb62448-e771-4caf-a556-28fdf88acab9"
"ca-tor" = "r038-fa815ec1-d52e-42b2-8221-5b8c2145a248"
+ },
+ }
+ storage_image_region_map = {
+ "hpcc-scale5232-rhel810-v1" = {
+ "eu-es" = "r050-7f28959f-74a4-4ad7-be30-8107da85406f"
+ "eu-gb" = "r018-5286d07b-527f-49a2-b0a7-2c88278349e8"
+ "eu-de" = "r010-1e558d55-bc2e-4e96-9164-b4b1139ba06b"
+ "us-east" = "r014-8befe151-c36d-4056-9955-3480210adf98"
+ "us-south" = "r006-7ab41080-5af0-47e5-ad44-abc18589197a"
+ "jp-tok" = "r022-d60e9e5f-264d-4e37-9fc0-9ad6270a054e"
+ "jp-osa" = "r034-eac88b73-0978-4340-9188-e28e99aeae2a"
+ "au-syd" = "r026-221f1bb0-1ba3-40c3-a83f-59334a2fda4b"
+ "br-sao" = "r042-e3d377a0-69f6-4079-9cbe-021021fb4a84"
+ "ca-tor" = "r038-73809daf-d414-4319-bc46-1bdd26a8e85d"
+ }
+ }
+ evaluation_image_region_map = {
+ "hpcc-scale5232-dev-rhel810" = {
+ "eu-es" = "r050-eb14661e-8290-4c03-a198-3e65a1b17a6b"
+ "eu-gb" = "r018-46ec71d2-2137-48c1-b348-a2ff0a671d91"
+ "eu-de" = "r010-cf5e0560-cbbf-43a6-9ba7-39fb4d4e82ff"
+ "us-east" = "r014-27ceeecc-c5bc-461e-a687-11e5b843274d"
+ "us-south" = "r006-12668685-f580-4cc8-86c5-335f1a979278"
+ "jp-tok" = "r022-bfe30f3f-c68f-4f61-ba90-7fbaa1a29665"
+ "jp-osa" = "r034-320617e2-b565-4843-bd8d-9f4bd2dd4641"
+ "au-syd" = "r026-ad179ec6-37a0-4d0c-9816-d065768414cf"
+ "br-sao" = "r042-ed759187-cd74-4d13-b475-bd0ed443197b"
+ "ca-tor" = "r038-90ca620e-5bf9-494e-a6ba-7e5ee663a54b"
+ }
+ }
+ encryption_image_region_map = {
+ "hpcc-scale-gklm4202-v2-5-3" = {
+ "eu-es" = "r050-fda24f7a-f395-487f-8179-d3c505d7fa8b"
+ "eu-gb" = "r018-74d533de-03b6-43ea-9f3f-dcd0d76ebb94"
+ "eu-de" = "r010-a5ff7b80-8ccc-451d-b384-e14bc119200f"
+ "us-east" = "r014-23d9f6b8-5c3f-43c5-8953-6e4cbbc01b47"
+ "us-south" = "r006-e12a939e-cd76-4394-bc38-4166d4df5818"
+ "jp-tok" = "r022-e27ef40e-82b2-481c-86c6-53032d8bda38"
+ "jp-osa" = "r034-a42046c2-60c3-4a43-9234-c06edd27dd84"
+ "au-syd" = "r026-5f90526b-5da6-4fae-ad16-33bbb5448cfc"
+ "br-sao" = "r042-a9a29acf-6810-4749-9c4e-757c7abb7c59"
+ "ca-tor" = "r038-95be651c-35a4-4b41-a629-dc46efe38442"
}
}
}
diff --git a/modules/landing_zone_vsi/locals.tf b/modules/landing_zone_vsi/locals.tf
index 99f1755d..bd9888d7 100644
--- a/modules/landing_zone_vsi/locals.tf
+++ b/modules/landing_zone_vsi/locals.tf
@@ -1,12 +1,12 @@
# define variables
locals {
# Future use
- # products = "scale"
- name = "lsf"
+ name = lower(var.scheduler)
prefix = var.prefix
tags = [local.prefix, local.name]
vsi_interfaces = ["eth0", "eth1"]
- bms_interfaces = ["ens1", "ens2"]
+ bms_interfaces = ["eth0", "eth1"]
+ # bms_interfaces = ["ens1", "ens2"]
# TODO: explore (DA always keep it true)
skip_iam_authorization_policy = true
# Region and Zone calculations
@@ -29,6 +29,19 @@ locals {
# If not found, assume the name is the id already (customer provided image)
new_login_image_id = local.login_image_found_in_map ? local.image_region_map[var.login_instance[0]["image"]][local.region] : "Image not found with the given name"
+ scale_storage_image_found_in_map = contains(keys(local.storage_image_region_map), var.storage_instances[0]["image"])
+ evaluation_image_id = local.evaluation_image_region_map[one(keys(local.evaluation_image_region_map))][local.region]
+ new_storage_image_id = var.storage_type != "evaluation" ? (local.scale_storage_image_found_in_map ? local.storage_image_region_map[var.storage_instances[0]["image"]][local.region] : "Image not found with the given name") : local.evaluation_image_id
+
+ storage_bare_metal_image_mapping_entry_found = contains(keys(local.storage_image_region_map), var.storage_servers[0]["image"])
+ storage_bare_metal_image_id = local.storage_bare_metal_image_mapping_entry_found ? local.storage_image_region_map[var.storage_servers[0]["image"]][local.region] : "Image not found with the given name"
+
+ scale_encryption_image_mapping_entry_found = contains(keys(local.encryption_image_region_map), var.gklm_instances[0]["image"])
+ scale_encryption_image_id = (var.scale_encryption_enabled == true && var.scale_encryption_type == "gklm") ? (local.scale_encryption_image_mapping_entry_found ? local.encryption_image_region_map[var.gklm_instances[0]["image"]][local.region] : "Image not found with the given name") : "Either encryption is not enabled or encryption type is not gklm"
+
+ scale_compute_image_found_in_map = contains(keys(local.storage_image_region_map), var.static_compute_instances[0]["image"])
+ scale_compute_image_id = local.scale_compute_image_found_in_map ? local.storage_image_region_map[var.static_compute_instances[0]["image"]][local.region] : "Image not found with the given name"
+
products = var.scheduler == "Scale" ? "scale" : "lsf"
block_storage_volumes = [for volume in coalesce(var.nsd_details, []) : {
name = format("nsd-%s", index(var.nsd_details, volume) + 1)
@@ -63,12 +76,14 @@ locals {
storage_instance_count = var.storage_type == "persistent" ? sum(var.storage_servers[*]["count"]) : sum(var.storage_instances[*]["count"])
protocol_instance_count = sum(var.protocol_instances[*]["count"])
static_compute_instance_count = sum(var.static_compute_instances[*]["count"])
+ afm_instances_count = sum(var.afm_instances[*]["count"])
enable_client = local.client_instance_count > 0
enable_management = local.management_instance_count > 0
enable_compute = local.management_instance_count > 0 || local.static_compute_instance_count > 0
enable_storage = local.storage_instance_count > 0
enable_protocol = local.storage_instance_count > 0 && local.protocol_instance_count > 0
+ enable_afm = local.afm_instances_count > 0
# TODO: Fix the logic
enable_block_storage = var.storage_type == "scratch" ? true : false
@@ -76,17 +91,18 @@ locals {
# TODO: Fix the logic
# enable_load_balancer = false
- client_node_name = format("%s-%s", local.prefix, "client")
- management_node_name = format("%s-%s", local.prefix, "mgmt")
- compute_node_name = format("%s-%s", local.prefix, "comp")
- storage_node_name = format("%s-%s", local.prefix, "strg")
- protocol_node_name = format("%s-%s", local.prefix, "proto")
- storage_management_node_name = format("%s-%s", local.prefix, "strg-mgmt")
- ldap_node_name = format("%s-%s", local.prefix, "ldap")
- afm_node_name = format("%s-%s", local.prefix, "afm")
- gklm_node_name = format("%s-%s", local.prefix, "gklm")
- cpmoute_management_node_name = format("%s-%s", local.prefix, "comp-mgmt")
- login_node_name = format("%s-%s", local.prefix, "login")
+ client_node_name = format("%s-%s", local.prefix, "client")
+ management_node_name = format("%s-%s", local.prefix, "mgmt")
+ compute_node_name = format("%s-%s", local.prefix, "comp")
+ storage_node_name = format("%s-%s", local.prefix, "strg")
+ storage_tie_breaker_node_name = format("%s-%s", local.prefix, "strg-tie")
+ protocol_node_name = format("%s-%s", local.prefix, "proto")
+ storage_management_node_name = format("%s-%s", local.prefix, "strg-mgmt")
+ ldap_node_name = format("%s-%s", local.prefix, "ldap")
+ afm_node_name = format("%s-%s", local.prefix, "afm")
+ gklm_node_name = format("%s-%s", local.prefix, "gklm")
+ compute_management_node_name = format("%s-%s", local.prefix, "comp-mgmt")
+ login_node_name = format("%s-%s", local.prefix, "login")
# Future use
/*
@@ -108,16 +124,16 @@ locals {
protocol_image_name = var.storage_image_name
*/
- client_image_id = data.ibm_is_image.client[*].id
- storage_image_id = data.ibm_is_image.storage[*].id
- protocol_image_id = data.ibm_is_image.storage[*].id
- ldap_image_id = data.ibm_is_image.ldap_vsi_image[*].id
- afm_image_id = data.ibm_is_image.afm[*].id
- gklm_image_id = data.ibm_is_image.gklm[*].id
+ # client_image_id = data.ibm_is_image.client[*].id
+ # storage_image_id = data.ibm_is_image.storage[*].id
+ # protocol_image_id = data.ibm_is_image.storage[*].id
+ ldap_image_id = data.ibm_is_image.ldap_vsi_image[*].id
+ # afm_image_id = data.ibm_is_image.afm[*].id
+ # gklm_image_id = data.ibm_is_image.gklm[*].id
- ssh_keys = [for name in var.ssh_keys : data.ibm_is_ssh_key.ssh_keys[name].id]
- ldap_ssh_keys = [for name in var.ldap_instance_key_pair : data.ibm_is_ssh_key.ldap[name].id]
- gklm_ssh_keys = [for name in var.gklm_instance_key_pair : data.ibm_is_ssh_key.gklm[name].id]
+ ssh_keys = [for name in var.ssh_keys : data.ibm_is_ssh_key.ssh_keys[name].id]
+ #ldap_ssh_keys = [for name in var.ldap_instance_key_pair : data.ibm_is_ssh_key.ldap[name].id]
+ # gklm_ssh_keys = [for name in var.gklm_instance_key_pair : data.ibm_is_ssh_key.gklm[name].id]
# Future use
/*
@@ -152,22 +168,66 @@ locals {
# TODO: Multi-zone multi-vNIC VSIs deployment support (bug #https://github.ibm.com/GoldenEye/issues/issues/5830)
# Findings: Singe zone multi-vNICs VSIs deployment & multi-zone single vNIC VSIs deployment are supported.
client_subnets = var.client_subnets
- cluster_subnet_id = var.cluster_subnet_id
+ compute_subnet_id = var.compute_subnet_id
storage_subnets = var.storage_subnets
protocol_subnets = var.protocol_subnets
compute_public_key_content = one(module.compute_key[*].public_key_content)
compute_private_key_content = one(module.compute_key[*].private_key_content)
+ storage_public_key_content = one(module.storage_key[*].public_key_content)
+ storage_private_key_content = one(module.storage_key[*].private_key_content)
+
+ client_public_key_content = one(module.client_key[*].public_key_content)
+ client_private_key_content = one(module.client_key[*].private_key_content)
+
+ protocol_vsi_profile = var.protocol_instances[*]["profile"]
+ ces_server_type = strcontains(local.protocol_vsi_profile[0], "metal")
+ afm_vsi_profile = var.afm_instances[*]["profile"]
+ afm_server_type = strcontains(local.afm_vsi_profile[0], "metal")
+
+ sapphire_rapids_profile_check = strcontains(local.protocol_vsi_profile[0], "3-metal") || strcontains(local.protocol_vsi_profile[0], "3d-metal")
+
+ tie_breaker_bm_server = [{
+ profile = var.tie_breaker_bm_server_profile == null ? (var.storage_servers[*]["profile"])[0] : var.tie_breaker_bm_server_profile
+ count = 1
+ image = (var.storage_servers[*]["image"])[0]
+ filesystem = (var.storage_servers[*]["filesystem"])[0]
+ }]
+
+ user_data_vars = {
+ dns_domain = var.dns_domain_names["storage"],
+ enable_protocol = local.enable_protocol,
+ protocol_domain = var.dns_domain_names["protocol"],
+ vpc_region = var.vpc_region,
+ protocol_subnet_id = length(var.protocol_subnets) == 0 ? "" : var.protocol_subnets[0].id,
+ resource_group_id = var.resource_group,
+ bastion_public_key_content = base64encode(var.bastion_public_key_content != null ? var.bastion_public_key_content : ""),
+ storage_private_key_content = var.scheduler == "Scale" ? base64encode(module.storage_key[0].private_key_content) : "",
+ storage_public_key_content = var.scheduler == "Scale" ? base64encode(module.storage_key[0].public_key_content) : ""
+ }
+
+ enable_sec_interface_compute = local.enable_protocol == false && data.ibm_is_instance_profile.compute_profile[0].bandwidth[0].value >= 64000 ? true : false
+ enable_sec_interface_storage = local.enable_protocol == false && var.storage_type != "persistent" && data.ibm_is_instance_profile.storage[0].bandwidth[0].value >= 64000 ? true : false
+
# Security Groups
- protocol_secondary_security_group = flatten([
+ protocol_secondary_security_group = distinct(flatten([
for subnet_index, subnet in local.protocol_subnets : [
for i in range(var.protocol_instances[subnet_index]["count"]) : {
+ security_group_id = one(var.storage_security_group_name == null ? module.storage_sg[*].security_group_id : local.storage_security_group_name_id)
+ interface_name = subnet["name"]
+ }
+ ]
+ ]))
+
+ storage_secondary_security_group = distinct(flatten([
+ for subnet_index, subnet in local.storage_subnets : [
+ for i in range(var.static_compute_instances[subnet_index]["count"]) : {
security_group_id = one(module.storage_sg[*].security_group_id)
- interface_name = "${subnet["name"]}-${i}"
+ interface_name = subnet["name"]
}
]
- ])
+ ]))
# ldap_instance_image_id = var.enable_ldap == true && var.ldap_server == "null" ? data.ibm_is_image.ldap_vsi_image[0].id : "null"
}
@@ -268,32 +328,39 @@ locals {
bastion_security_group = var.bastion_security_group_id
# Security group id
- client_security_group = local.enable_client ? module.client_sg[0].security_group_id_for_ref : null
- compute_security_group = local.enable_compute ? module.compute_sg[0].security_group_id_for_ref : null
- storage_security_group = local.enable_storage ? module.storage_sg[0].security_group_id_for_ref : null
+ client_security_group = local.client_instance_count > 0 ? (local.enable_client && var.client_security_group_name == null ? module.client_sg[0].security_group_id_for_ref : local.client_security_group_name_id[0]) : ""
+ compute_security_group = local.static_compute_instance_count > 0 ? (local.enable_compute && var.compute_security_group_name == null ? module.compute_sg[0].security_group_id_for_ref : local.compute_security_group_name_id[0]) : ""
+ storage_security_group = local.storage_instance_count > 0 ? (local.enable_storage && var.storage_security_group_name == null ? module.storage_sg[0].security_group_id_for_ref : local.storage_security_group_name_id[0]) : ""
client_security_group_rules = local.enable_client ? (local.enable_compute ?
[
{ name = "client-allow-bastionsg-inbound", direction = "inbound", remote = local.bastion_security_group },
{ name = "client-allow-clientsg-inbound", direction = "inbound", remote = local.client_security_group },
{ name = "client-allow-computesg-inbound", direction = "inbound", remote = local.compute_security_group },
- { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr }
+ { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr },
+ { name = "storage-allow-storagesg-inbound", direction = "inbound", remote = local.storage_security_group },
+ { name = "client-allow-all-outbound", direction = "outbound", remote = "0.0.0.0/0" }
] :
[
{ name = "client-allow-bastionsg-inbound", direction = "inbound", remote = local.bastion_security_group },
{ name = "client-allow-clientsg-inbound", direction = "inbound", remote = local.client_security_group },
- { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr }
+ { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr },
+ { name = "storage-allow-storagesg-inbound", direction = "inbound", remote = local.storage_security_group },
+ { name = "client-allow-all-outbound", direction = "outbound", remote = "0.0.0.0/0" }
]
) : (local.enable_compute ?
[
{ name = "client-allow-bastionsg-inbound", direction = "inbound", remote = local.bastion_security_group },
{ name = "client-allow-computesg-inbound", direction = "inbound", remote = local.compute_security_group },
- { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr }
+ { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr },
+ { name = "client-allow-all-outbound", direction = "outbound", remote = "0.0.0.0/0" }
]
:
[
{ name = "client-allow-bastionsg-inbound", direction = "inbound", remote = local.bastion_security_group },
- { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr }
+ { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr },
+ { name = "storage-allow-storagesg-inbound", direction = "inbound", remote = local.storage_security_group },
+ { name = "client-allow-all-outbound", direction = "outbound", remote = "0.0.0.0/0" }
]
)
@@ -311,7 +378,7 @@ locals {
{ name = "compute-allow-clientsg-inbound", direction = "inbound", remote = local.client_security_group },
{ name = "compute-allow-computesg-inbound", direction = "inbound", remote = local.compute_security_group },
{ name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr },
- { name = "compute-allow-all-outbound", direction = "outbound", remote = "0.0.0.0/0" }
+ { name = "compute-allow-all-outbound", direction = "outbound", remote = "0.0.0.0/0" },
]
) : (local.enable_storage ?
[
@@ -362,22 +429,32 @@ locals {
{ name = "storage-allow-bastionsg-inbound", direction = "inbound", remote = local.bastion_security_group },
{ name = "storage-allow-computesg-inbound", direction = "inbound", remote = local.compute_security_group },
{ name = "storage-allow-storagesg-inbound", direction = "inbound", remote = local.storage_security_group },
- { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr }
+ { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr },
+ { name = "client-allow-clientsg-inbound", direction = "inbound", remote = local.client_security_group },
+ { name = "client-allow-all-outbound", direction = "outbound", remote = "0.0.0.0/0" }
] :
[
{ name = "storage-allow-bastionsg-inbound", direction = "inbound", remote = local.bastion_security_group },
{ name = "storage-allow-computesg-inbound", direction = "inbound", remote = local.compute_security_group },
- { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr }
+ { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr },
+ { name = "client-allow-all-outbound", direction = "outbound", remote = "0.0.0.0/0" },
+ { name = "compute-allow-all-outbound", direction = "outbound", remote = "0.0.0.0/0" }
+
]
) : (local.enable_storage ?
[
{ name = "storage-allow-bastionsg-inbound", direction = "inbound", remote = local.bastion_security_group },
{ name = "storage-allow-storagesg-inbound", direction = "inbound", remote = local.storage_security_group },
- { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr }
+ { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr },
+ { name = "client-allow-clientsg-inbound", direction = "inbound", remote = local.client_security_group },
+ { name = "client-allow-all-outbound", direction = "outbound", remote = "0.0.0.0/0" }
] :
[
{ name = "storage-allow-bastionsg-inbound", direction = "inbound", remote = local.bastion_security_group },
- { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr }
+ { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr },
+ { name = "client-allow-all-outbound", direction = "outbound", remote = "0.0.0.0/0" },
+ { name = "compute-allow-all-outbound", direction = "outbound", remote = "0.0.0.0/0" }
+
]
)
@@ -397,3 +474,11 @@ locals {
{ name = "bastion-allow-client-sg", direction = "inbound", remote = local.client_security_group }] : []
))
}
+
+locals {
+ storage_security_group_name_id = var.storage_security_group_name != null ? data.ibm_is_security_group.storage_security_group[*].id : []
+ client_security_group_name_id = var.client_security_group_name != null ? data.ibm_is_security_group.client_security_group[*].id : []
+ gklm_security_group_name_id = var.gklm_security_group_name != null ? data.ibm_is_security_group.gklm_security_group[*].id : []
+ ldap_security_group_name_id = var.ldap_security_group_name != null ? data.ibm_is_security_group.ldap_security_group[*].id : []
+ compute_security_group_name_id = var.compute_security_group_name != null ? data.ibm_is_security_group.compute_security_group[*].id : []
+}
diff --git a/modules/landing_zone_vsi/main.tf b/modules/landing_zone_vsi/main.tf
index e0b3930f..1e867ad2 100644
--- a/modules/landing_zone_vsi/main.tf
+++ b/modules/landing_zone_vsi/main.tf
@@ -4,6 +4,12 @@ module "compute_key" {
# private_key_path = "./../../modules/ansible-roles/compute_id_rsa" #checkov:skip=CKV_SECRET_6
}
+module "client_key" {
+ count = local.enable_client ? 1 : 0
+ source = "./../key"
+ # private_key_path = "./../../modules/ansible-roles/compute_id_rsa" #checkov:skip=CKV_SECRET_6
+}
+
resource "null_resource" "entitlement_check" {
count = var.scheduler == "Scale" && var.storage_type != "evaluation" ? 1 : 0
provisioner "local-exec" {
@@ -71,7 +77,7 @@ module "storage_key" {
}
module "client_sg" {
- count = local.enable_client ? 1 : 0
+ count = local.enable_client && var.client_security_group_name == null ? 1 : 0
source = "terraform-ibm-modules/security-group/ibm"
version = "2.6.2"
add_ibm_cloud_internal_rules = true
@@ -82,7 +88,7 @@ module "client_sg" {
}
module "compute_sg" {
- count = local.enable_compute ? 1 : 0
+ count = local.enable_compute && var.compute_security_group_name == null ? 1 : 0
source = "terraform-ibm-modules/security-group/ibm"
version = "2.6.2"
add_ibm_cloud_internal_rules = true
@@ -93,6 +99,7 @@ module "compute_sg" {
}
module "bastion_sg_existing" {
+ count = var.login_security_group_name == null ? 1 : 0
source = "terraform-ibm-modules/security-group/ibm"
version = "2.6.2"
resource_group = var.resource_group
@@ -116,7 +123,7 @@ module "nfs_storage_sg" {
}
module "storage_sg" {
- count = local.enable_storage ? 1 : 0
+ count = local.enable_storage && var.storage_security_group_name == null ? 1 : 0
source = "terraform-ibm-modules/security-group/ibm"
version = "2.6.2"
add_ibm_cloud_internal_rules = true
@@ -129,7 +136,7 @@ module "storage_sg" {
module "login_vsi" {
count = var.scheduler == "LSF" ? 1 : 0
source = "terraform-ibm-modules/landing-zone-vsi/ibm"
- version = "5.0.0"
+ version = "5.4.6"
vsi_per_subnet = 1
create_security_group = false
security_group = null
@@ -147,7 +154,6 @@ module "login_vsi" {
kms_encryption_enabled = var.kms_encryption_enabled
skip_iam_authorization_policy = local.skip_iam_authorization_policy
boot_volume_encryption_key = var.boot_volume_encryption_key
- existing_kms_instance_guid = var.existing_kms_instance_guid
placement_group_id = var.placement_group_ids
#placement_group_id = var.placement_group_ids[(var.management_instances[count.index]["count"])%(length(var.placement_group_ids))]
}
@@ -155,7 +161,7 @@ module "login_vsi" {
module "management_vsi" {
count = length(var.management_instances)
source = "terraform-ibm-modules/landing-zone-vsi/ibm"
- version = "5.0.0"
+ version = "5.4.16"
vsi_per_subnet = var.management_instances[count.index]["count"]
create_security_group = false
security_group = null
@@ -166,119 +172,117 @@ module "management_vsi" {
enable_floating_ip = false
security_group_ids = module.compute_sg[*].security_group_id
ssh_key_ids = local.ssh_keys
- subnets = local.cluster_subnet_id
+ subnets = local.compute_subnet_id
tags = local.tags
user_data = data.template_file.management_user_data.rendered
vpc_id = var.vpc_id
kms_encryption_enabled = var.kms_encryption_enabled
skip_iam_authorization_policy = local.skip_iam_authorization_policy
boot_volume_encryption_key = var.boot_volume_encryption_key
- existing_kms_instance_guid = var.existing_kms_instance_guid
placement_group_id = var.placement_group_ids
}
module "compute_vsi" {
count = length(var.static_compute_instances)
source = "terraform-ibm-modules/landing-zone-vsi/ibm"
- version = "5.0.0"
+ version = "5.4.16"
vsi_per_subnet = var.static_compute_instances[count.index]["count"]
create_security_group = false
security_group = null
- image_id = local.compute_image_found_in_map ? local.new_compute_image_id : data.ibm_is_image.compute_stock_image[0].id
+ image_id = var.scheduler == "LSF" ? (local.compute_image_found_in_map ? local.new_compute_image_id : data.ibm_is_image.compute_stock_image[0].id) : (var.storage_type != "evaluation" ? (local.scale_compute_image_found_in_map ? local.scale_compute_image_id : data.ibm_is_image.scale_compute_stock_image[0].id) : local.evaluation_image_id)
machine_type = var.static_compute_instances[count.index]["profile"]
prefix = format("%s-%s", local.compute_node_name, count.index + 1)
resource_group_id = var.resource_group
enable_floating_ip = false
- security_group_ids = module.compute_sg[*].security_group_id
+ security_group_ids = var.compute_security_group_name == null ? module.compute_sg[*].security_group_id : local.compute_security_group_name_id
ssh_key_ids = local.ssh_keys
- subnets = local.cluster_subnet_id
+ subnets = local.compute_subnet_id
tags = local.tags
user_data = var.scheduler == "Scale" ? data.template_file.scale_compute_user_data.rendered : data.template_file.lsf_compute_user_data.rendered
vpc_id = var.vpc_id
kms_encryption_enabled = var.kms_encryption_enabled
skip_iam_authorization_policy = local.skip_iam_authorization_policy
boot_volume_encryption_key = var.boot_volume_encryption_key
- existing_kms_instance_guid = var.existing_kms_instance_guid
placement_group_id = var.enable_dedicated_host ? null : var.placement_group_ids
enable_dedicated_host = var.enable_dedicated_host
dedicated_host_id = var.enable_dedicated_host && length(var.static_compute_instances) > 0 ? local.dedicated_host_map[var.static_compute_instances[count.index]["profile"]] : null
+ secondary_security_groups = local.enable_sec_interface_compute ? local.storage_secondary_security_group : []
+ secondary_subnets = local.enable_sec_interface_compute ? local.storage_subnets : []
+ manage_reserved_ips = local.enable_sec_interface_compute ? true : false
depends_on = [module.dedicated_host, null_resource.dedicated_host_validation]
}
module "compute_cluster_management_vsi" {
count = var.scheduler == "Scale" && local.enable_compute ? 1 : 0
source = "terraform-ibm-modules/landing-zone-vsi/ibm"
- version = "5.0.0"
+ version = "5.4.16"
vsi_per_subnet = 1
create_security_group = false
security_group = null
- image_id = data.ibm_is_image.compute_stock_image[0].id
- machine_type = var.static_compute_instances[count.index]["profile"]
- prefix = count.index == 0 ? local.cpmoute_management_node_name : format("%s-%s", local.cpmoute_management_node_name, count.index)
+ image_id = var.storage_type != "evaluation" ? (local.scale_compute_image_found_in_map ? local.scale_compute_image_id : data.ibm_is_image.scale_compute_stock_image[0].id) : local.evaluation_image_id
+ machine_type = var.scale_management_vsi_profile
+ prefix = count.index == 0 ? local.compute_management_node_name : format("%s-%s", local.compute_management_node_name, count.index)
resource_group_id = var.resource_group
enable_floating_ip = false
- security_group_ids = module.compute_sg[*].security_group_id
+ security_group_ids = var.compute_security_group_name == null ? module.compute_sg[*].security_group_id : local.compute_security_group_name_id
ssh_key_ids = local.ssh_keys
- subnets = local.cluster_subnet_id
+ subnets = local.compute_subnet_id
tags = local.tags
user_data = data.template_file.scale_compute_user_data.rendered
vpc_id = var.vpc_id
kms_encryption_enabled = var.kms_encryption_enabled
skip_iam_authorization_policy = local.skip_iam_authorization_policy
boot_volume_encryption_key = var.boot_volume_encryption_key
- existing_kms_instance_guid = var.existing_kms_instance_guid
placement_group_id = var.placement_group_ids
+ secondary_security_groups = local.enable_sec_interface_compute ? local.storage_secondary_security_group : []
+ secondary_subnets = local.enable_sec_interface_compute ? local.storage_subnets : []
+ manage_reserved_ips = local.enable_sec_interface_compute ? true : false
}
module "storage_vsi" {
- count = length(var.storage_instances) > 0 && var.storage_type != "persistent" ? 1 : 0
- source = "terraform-ibm-modules/landing-zone-vsi/ibm"
- version = "5.0.0"
- vsi_per_subnet = var.storage_instances[count.index]["count"]
- create_security_group = false
- security_group = null
- image_id = local.storage_image_id[count.index]
- machine_type = var.storage_instances[count.index]["profile"]
- prefix = count.index == 0 ? local.storage_node_name : format("%s-%s", local.storage_node_name, count.index)
- resource_group_id = var.resource_group
- enable_floating_ip = false
- security_group_ids = module.storage_sg[*].security_group_id
- ssh_key_ids = local.ssh_keys
- subnets = local.storage_subnets
- tags = local.tags
- user_data = data.template_file.storage_user_data.rendered
- vpc_id = var.vpc_id
- block_storage_volumes = local.enable_block_storage ? local.block_storage_volumes : []
- kms_encryption_enabled = var.kms_encryption_enabled
- skip_iam_authorization_policy = local.skip_iam_authorization_policy
- boot_volume_encryption_key = var.boot_volume_encryption_key
- existing_kms_instance_guid = var.existing_kms_instance_guid
- placement_group_id = var.placement_group_ids
- secondary_allow_ip_spoofing = local.enable_protocol && var.colocate_protocol_instances ? true : false
- secondary_security_groups = local.protocol_secondary_security_group
- secondary_subnets = local.enable_protocol && var.colocate_protocol_instances ? local.protocol_subnets : []
- manage_reserved_ips = local.enable_protocol && var.colocate_protocol_instances ? true : false
- primary_vni_additional_ip_count = local.enable_protocol && var.colocate_protocol_instances ? var.protocol_instances[count.index]["count"] : 0
- depends_on = [resource.null_resource.entitlement_check]
- # manage_reserved_ips = true
- # primary_vni_additional_ip_count = var.storage_instances[count.index]["count"]
- # placement_group_id = var.placement_group_ids[(var.storage_instances[count.index]["count"])%(length(var.placement_group_ids))]
+ count = var.scheduler == "Scale" ? (length(var.storage_instances) > 0 && var.storage_type != "persistent" ? 1 : 0) : 0
+ source = "terraform-ibm-modules/landing-zone-vsi/ibm"
+ version = "5.4.16"
+ vsi_per_subnet = var.storage_instances[count.index]["count"]
+ create_security_group = false
+ security_group = null
+ image_id = var.storage_type != "evaluation" ? (local.scale_storage_image_found_in_map ? local.new_storage_image_id : data.ibm_is_image.storage_vsi[0].id) : local.evaluation_image_id
+ machine_type = var.storage_instances[count.index]["profile"]
+ prefix = count.index == 0 ? local.storage_node_name : format("%s-%s", local.storage_node_name, count.index)
+ resource_group_id = var.resource_group
+ enable_floating_ip = false
+ security_group_ids = var.storage_security_group_name == null ? module.storage_sg[*].security_group_id : local.storage_security_group_name_id
+ ssh_key_ids = local.ssh_keys
+ subnets = local.storage_subnets
+ tags = local.tags
+ user_data = data.template_file.storage_user_data.rendered
+ vpc_id = var.vpc_id
+ block_storage_volumes = local.enable_block_storage ? local.block_storage_volumes : []
+ kms_encryption_enabled = var.kms_encryption_enabled
+ skip_iam_authorization_policy = local.skip_iam_authorization_policy
+ boot_volume_encryption_key = var.boot_volume_encryption_key
+ placement_group_id = var.placement_group_ids
+ secondary_allow_ip_spoofing = local.enable_protocol && var.colocate_protocol_instances ? true : false
+ secondary_security_groups = local.enable_sec_interface_storage ? local.storage_secondary_security_group : (local.enable_protocol && var.colocate_protocol_instances) ? local.protocol_secondary_security_group : []
+ secondary_subnets = local.enable_sec_interface_storage ? local.storage_subnets : local.enable_protocol && var.colocate_protocol_instances ? local.protocol_subnets : []
+ manage_reserved_ips = local.enable_sec_interface_storage || (local.enable_protocol && var.colocate_protocol_instances) ? true : false
+ depends_on = [resource.null_resource.entitlement_check]
}
module "storage_cluster_management_vsi" {
- count = length(var.storage_instances)
+ count = var.scheduler == "Scale" ? length(var.storage_instances) : 0
source = "terraform-ibm-modules/landing-zone-vsi/ibm"
- version = "5.0.0"
+ version = "5.4.16"
vsi_per_subnet = 1
create_security_group = false
security_group = null
- image_id = local.storage_image_id[count.index]
- machine_type = var.management_instances[count.index]["profile"]
+ image_id = var.storage_type != "evaluation" ? (local.scale_storage_image_found_in_map ? local.new_storage_image_id : data.ibm_is_image.storage_vsi[0].id) : local.evaluation_image_id
+ machine_type = var.scale_management_vsi_profile
prefix = count.index == 0 ? local.storage_management_node_name : format("%s-%s", local.storage_management_node_name, count.index)
resource_group_id = var.resource_group
enable_floating_ip = false
- security_group_ids = module.storage_sg[*].security_group_id
+ security_group_ids = var.storage_security_group_name == null ? module.storage_sg[*].security_group_id : local.storage_security_group_name_id
ssh_key_ids = local.ssh_keys
subnets = local.storage_subnets
tags = local.tags
@@ -288,27 +292,27 @@ module "storage_cluster_management_vsi" {
kms_encryption_enabled = var.kms_encryption_enabled
skip_iam_authorization_policy = local.skip_iam_authorization_policy
boot_volume_encryption_key = var.boot_volume_encryption_key
- existing_kms_instance_guid = var.existing_kms_instance_guid
placement_group_id = var.placement_group_ids
- depends_on = [resource.null_resource.entitlement_check]
- #placement_group_id = var.placement_group_ids[(var.storage_instances[count.index]["count"])%(length(var.placement_group_ids))]
+ secondary_security_groups = local.enable_sec_interface_storage ? local.storage_secondary_security_group : []
+ secondary_subnets = local.enable_sec_interface_storage ? local.storage_subnets : []
+ manage_reserved_ips = local.enable_sec_interface_storage ? true : false
}
module "storage_cluster_tie_breaker_vsi" {
- count = var.storage_type != "persistent" ? 1 : 0
+ count = var.scheduler == "Scale" ? (var.storage_type != "persistent" ? 1 : 0) : 0
source = "terraform-ibm-modules/landing-zone-vsi/ibm"
- version = "5.0.0"
+ version = "5.4.16"
vsi_per_subnet = 1
create_security_group = false
security_group = null
- image_id = local.storage_image_id[count.index]
+ image_id = var.storage_type != "evaluation" ? (local.scale_storage_image_found_in_map ? local.new_storage_image_id : data.ibm_is_image.storage_vsi[0].id) : local.evaluation_image_id
machine_type = var.storage_instances[count.index]["profile"]
prefix = format("%s-strg-tie", local.prefix)
resource_group_id = var.resource_group
enable_floating_ip = false
- security_group_ids = module.storage_sg[*].security_group_id
+ security_group_ids = var.storage_security_group_name == null ? module.storage_sg[*].security_group_id : local.storage_security_group_name_id
ssh_key_ids = local.ssh_keys
- subnets = local.storage_subnets #[local.storage_subnets[0]]
+ subnets = local.storage_subnets
tags = local.tags
user_data = data.template_file.storage_user_data.rendered
vpc_id = var.vpc_id
@@ -316,26 +320,26 @@ module "storage_cluster_tie_breaker_vsi" {
kms_encryption_enabled = var.kms_encryption_enabled
skip_iam_authorization_policy = local.skip_iam_authorization_policy
boot_volume_encryption_key = var.boot_volume_encryption_key
- existing_kms_instance_guid = var.existing_kms_instance_guid
placement_group_id = var.placement_group_ids
- # manage_reserved_ips = true
- # primary_vni_additional_ip_count = var.storage_instances[count.index]["count"]
- # placement_group_id = var.placement_group_ids[(var.storage_instances[count.index]["count"])%(length(var.placement_group_ids))]
+ secondary_security_groups = local.enable_sec_interface_storage ? local.storage_secondary_security_group : []
+ secondary_subnets = local.enable_sec_interface_storage ? local.storage_subnets : []
+ manage_reserved_ips = local.enable_sec_interface_storage ? true : false
+ depends_on = [resource.null_resource.entitlement_check]
}
module "client_vsi" {
- count = length(var.client_instances)
+ count = var.scheduler == "Scale" ? length(var.client_instances) : 0
source = "terraform-ibm-modules/landing-zone-vsi/ibm"
- version = "5.0.0"
+ version = "5.4.6"
vsi_per_subnet = var.client_instances[count.index]["count"]
create_security_group = false
security_group = null
- image_id = local.client_image_id[count.index]
+ image_id = data.ibm_is_image.client[0].id
machine_type = var.client_instances[count.index]["profile"]
prefix = count.index == 0 ? local.client_node_name : format("%s-%s", local.client_node_name, count.index)
resource_group_id = var.resource_group
enable_floating_ip = false
- security_group_ids = module.client_sg[*].security_group_id
+ security_group_ids = var.client_security_group_name == null ? module.client_sg[*].security_group_id : local.client_security_group_name_id
ssh_key_ids = local.ssh_keys
subnets = local.client_subnets
tags = local.tags
@@ -344,23 +348,21 @@ module "client_vsi" {
kms_encryption_enabled = var.kms_encryption_enabled
skip_iam_authorization_policy = local.skip_iam_authorization_policy
boot_volume_encryption_key = var.boot_volume_encryption_key
- existing_kms_instance_guid = var.existing_kms_instance_guid
- depends_on = [resource.null_resource.entitlement_check]
}
module "protocol_vsi" {
- count = var.colocate_protocol_instances == true ? 0 : length(var.protocol_instances)
+ count = var.scheduler == "Scale" ? ((local.enable_protocol && var.colocate_protocol_instances == false && local.ces_server_type == false) ? 1 : 0) : 0
source = "terraform-ibm-modules/landing-zone-vsi/ibm"
- version = "5.0.0"
+ version = "5.4.16"
vsi_per_subnet = var.protocol_instances[count.index]["count"]
create_security_group = false
security_group = null
- image_id = local.protocol_image_id[count.index]
+ image_id = var.storage_type != "evaluation" ? (local.scale_storage_image_found_in_map ? local.new_storage_image_id : data.ibm_is_image.storage_vsi[0].id) : local.evaluation_image_id
machine_type = var.protocol_instances[count.index]["profile"]
prefix = count.index == 0 ? local.protocol_node_name : format("%s-%s", local.protocol_node_name, count.index)
resource_group_id = var.resource_group
enable_floating_ip = false
- security_group_ids = module.storage_sg[*].security_group_id
+ security_group_ids = var.storage_security_group_name == null ? module.storage_sg[*].security_group_id : local.storage_security_group_name_id
ssh_key_ids = local.ssh_keys
subnets = local.storage_subnets
tags = local.tags
@@ -369,32 +371,28 @@ module "protocol_vsi" {
kms_encryption_enabled = var.kms_encryption_enabled
skip_iam_authorization_policy = local.skip_iam_authorization_policy
boot_volume_encryption_key = var.boot_volume_encryption_key
- existing_kms_instance_guid = var.existing_kms_instance_guid
# Bug: 5847 - LB profile & subnets are not configurable
# load_balancers = local.enable_load_balancer ? local.load_balancers : []
- secondary_allow_ip_spoofing = true
- secondary_security_groups = local.protocol_secondary_security_group
- secondary_subnets = local.protocol_subnets
- placement_group_id = var.placement_group_ids
- manage_reserved_ips = true
- primary_vni_additional_ip_count = var.protocol_instances[count.index]["count"]
- depends_on = [resource.null_resource.entitlement_check]
- # placement_group_id = var.placement_group_ids[(var.protocol_instances[count.index]["count"])%(length(var.placement_group_ids))]
+ secondary_allow_ip_spoofing = true
+ secondary_security_groups = local.protocol_secondary_security_group
+ secondary_subnets = local.protocol_subnets
+ manage_reserved_ips = true
+ depends_on = [resource.null_resource.entitlement_check]
}
module "afm_vsi" {
- count = length(var.afm_instances)
+ count = var.scheduler == "Scale" ? ((local.afm_server_type == false && local.enable_afm) ? 1 : 0) : 0
source = "terraform-ibm-modules/landing-zone-vsi/ibm"
- version = "5.0.0"
+ version = "5.4.6"
vsi_per_subnet = var.afm_instances[count.index]["count"]
create_security_group = false
security_group = null
- image_id = local.afm_image_id[count.index]
+ image_id = var.storage_type != "evaluation" ? (local.scale_storage_image_found_in_map ? local.new_storage_image_id : data.ibm_is_image.storage_vsi[0].id) : local.evaluation_image_id
machine_type = var.afm_instances[count.index]["profile"]
prefix = count.index == 0 ? local.afm_node_name : format("%s-%s", local.afm_node_name, count.index)
resource_group_id = var.resource_group
enable_floating_ip = false
- security_group_ids = module.storage_sg[*].security_group_id
+ security_group_ids = var.storage_security_group_name == null ? module.storage_sg[*].security_group_id : local.storage_security_group_name_id
ssh_key_ids = local.ssh_keys
subnets = local.storage_subnets
tags = local.tags
@@ -403,25 +401,23 @@ module "afm_vsi" {
kms_encryption_enabled = var.kms_encryption_enabled
skip_iam_authorization_policy = local.skip_iam_authorization_policy
boot_volume_encryption_key = var.boot_volume_encryption_key
- existing_kms_instance_guid = var.existing_kms_instance_guid
- # manage_reserved_ips = true
- # primary_vni_additional_ip_count = var.afm_instances[count.index]["count"]
+ depends_on = [resource.null_resource.entitlement_check]
}
module "gklm_vsi" {
- count = var.scale_encryption_enabled == true && var.scale_encryption_type == "gklm" ? 1 : 0
+ count = var.scheduler == "Scale" ? (var.scale_encryption_enabled == true && var.scale_encryption_type == "gklm" ? 1 : 0) : 0
source = "terraform-ibm-modules/landing-zone-vsi/ibm"
- version = "5.0.0"
+ version = "5.4.6"
vsi_per_subnet = var.gklm_instances[count.index]["count"]
create_security_group = false
security_group = null
- image_id = local.gklm_image_id[count.index]
+ image_id = local.scale_encryption_image_mapping_entry_found ? local.scale_encryption_image_id : data.ibm_is_image.gklm[0].id
machine_type = var.gklm_instances[count.index]["profile"]
prefix = count.index == 0 ? local.gklm_node_name : format("%s-%s", local.gklm_node_name, count.index)
resource_group_id = var.resource_group
enable_floating_ip = false
- security_group_ids = module.storage_sg[*].security_group_id
- ssh_key_ids = local.gklm_ssh_keys
+ security_group_ids = var.gklm_security_group_name == null ? module.storage_sg[*].security_group_id : local.gklm_security_group_name_id
+ ssh_key_ids = local.ssh_keys
subnets = local.storage_subnets
tags = local.tags
user_data = data.template_file.gklm_user_data.rendered
@@ -429,13 +425,12 @@ module "gklm_vsi" {
kms_encryption_enabled = var.kms_encryption_enabled
skip_iam_authorization_policy = local.skip_iam_authorization_policy
boot_volume_encryption_key = var.boot_volume_encryption_key
- existing_kms_instance_guid = var.existing_kms_instance_guid
}
module "ldap_vsi" {
count = var.enable_ldap == true && var.ldap_server == "null" ? 1 : 0
source = "terraform-ibm-modules/landing-zone-vsi/ibm"
- version = "5.0.0"
+ version = "5.4.6"
vsi_per_subnet = 1
create_security_group = false
security_group = null
@@ -444,9 +439,9 @@ module "ldap_vsi" {
prefix = local.ldap_node_name
resource_group_id = var.resource_group
enable_floating_ip = false
- security_group_ids = local.products == "lsf" ? module.compute_sg[*].security_group_id : module.storage_sg[*].security_group_id
- ssh_key_ids = local.products == "lsf" ? local.ssh_keys : local.ldap_ssh_keys
- subnets = local.products == "lsf" ? local.cluster_subnet_id : [local.storage_subnets[0]]
+ security_group_ids = local.products == "lsf" ? module.compute_sg[*].security_group_id : (var.ldap_security_group_name == null ? module.storage_sg[*].security_group_id : local.ldap_security_group_name_id)
+ ssh_key_ids = local.ssh_keys
+ subnets = local.products == "lsf" ? local.compute_subnet_id : [local.storage_subnets[0]]
tags = local.tags
user_data = data.template_file.ldap_user_data.rendered
vpc_id = var.vpc_id
@@ -454,7 +449,6 @@ module "ldap_vsi" {
kms_encryption_enabled = var.kms_encryption_enabled
skip_iam_authorization_policy = local.skip_iam_authorization_policy
boot_volume_encryption_key = var.boot_volume_encryption_key
- existing_kms_instance_guid = var.existing_kms_instance_guid
placement_group_id = var.placement_group_ids
#placement_group_id = var.placement_group_ids[(var.storage_instances[count.index]["count"])%(length(var.placement_group_ids))]
}
@@ -480,14 +474,70 @@ module "dedicated_host" {
########################################################################
module "storage_baremetal" {
+ count = length(var.storage_servers) > 0 && var.storage_type == "persistent" ? 1 : 0
+ source = "../baremetal"
+ existing_resource_group = var.resource_group
+ image_id = local.storage_bare_metal_image_mapping_entry_found ? local.storage_bare_metal_image_id : data.ibm_is_image.baremetal_storage[0].id
+ prefix = format("%s-%s", local.storage_node_name, substr(local.storage_subnets[count.index].id, length(local.storage_subnets[count.index].id) - 4, 4))
+ storage_subnets = [for subnet in local.storage_subnets : subnet.id]
+ storage_ssh_keys = local.ssh_keys
+ storage_servers = var.storage_servers
+ security_group_ids = module.storage_sg[*].security_group_id
+ user_data = var.bms_boot_drive_encryption == false ? data.template_file.storage_bm_user_data.rendered : templatefile("${path.module}/templates/storage_bootdrive_user_data/cloud_init.yml", local.user_data_vars)
+ secondary_vni_enabled = local.enable_protocol && var.colocate_protocol_instances ? true : false
+ protocol_subnets = local.enable_protocol && var.colocate_protocol_instances ? local.protocol_subnets : []
+ secondary_security_group_ids = local.enable_protocol && var.colocate_protocol_instances ? module.storage_sg[*].security_group_id : []
+ # manage_reserved_ips = local.enable_protocol && var.colocate_protocol_instances ? true : false
+ sapphire_rapids_profile_check = local.sapphire_rapids_profile_check
+}
- count = length(var.storage_servers) > 0 && var.storage_type == "persistent" ? 1 : 0
- source = "../baremetal"
- existing_resource_group = var.resource_group
- prefix = var.prefix
- storage_subnets = [for subnet in local.storage_subnets : subnet.id]
- storage_ssh_keys = local.ssh_keys
- storage_servers = var.storage_servers
- security_group_ids = module.storage_sg[*].security_group_id
- bastion_public_key_content = var.bastion_public_key_content
+module "storage_baremetal_tie_breaker" {
+ count = length(var.storage_servers) > 0 && var.storage_type == "persistent" ? 1 : 0
+ source = "../baremetal"
+ existing_resource_group = var.resource_group
+ image_id = local.storage_bare_metal_image_mapping_entry_found ? local.storage_bare_metal_image_id : data.ibm_is_image.baremetal_storage[0].id
+ prefix = format("%s-%s", local.storage_tie_breaker_node_name, substr(local.storage_subnets[count.index].id, length(local.storage_subnets[count.index].id) - 4, 4))
+ storage_subnets = [for subnet in local.storage_subnets : subnet.id]
+ storage_ssh_keys = local.ssh_keys
+ storage_servers = local.tie_breaker_bm_server
+ security_group_ids = module.storage_sg[*].security_group_id
+ user_data = var.bms_boot_drive_encryption == false ? data.template_file.storage_bmtb_user_data.rendered : templatefile("${path.module}/templates/storage_tb_bootdrive_user_data/cloud_init.yml", local.user_data_vars)
+ secondary_vni_enabled = false
+ protocol_subnets = local.protocol_subnets
+ secondary_security_group_ids = []
+ sapphire_rapids_profile_check = local.sapphire_rapids_profile_check
+}
+
+module "protocol_baremetal_server" {
+ count = (var.colocate_protocol_instances == false && local.ces_server_type == true && local.enable_protocol) ? 1 : 0
+ source = "../baremetal"
+ existing_resource_group = var.resource_group
+ image_id = local.storage_bare_metal_image_mapping_entry_found ? local.storage_bare_metal_image_id : data.ibm_is_image.baremetal_storage[0].id
+ prefix = format("%s-%s", local.protocol_node_name, substr(local.protocol_subnets[count.index].id, length(local.protocol_subnets[count.index].id) - 4, 4))
+ storage_subnets = [for subnet in local.storage_subnets : subnet.id]
+ storage_ssh_keys = local.ssh_keys
+ storage_servers = var.protocol_instances
+ security_group_ids = module.storage_sg[*].security_group_id
+ user_data = var.bms_boot_drive_encryption == false ? data.template_file.protocol_bm_user_data.rendered : templatefile("${path.module}/templates/protocol_bootdrive_user_data/cloud_init.yml", local.user_data_vars)
+ secondary_vni_enabled = true
+ protocol_subnets = local.protocol_subnets
+ secondary_security_group_ids = module.storage_sg[*].security_group_id
+ sapphire_rapids_profile_check = local.sapphire_rapids_profile_check
+}
+
+module "afm_baremetal_server" {
+ count = (local.afm_server_type == true && local.enable_afm) ? 1 : 0
+ source = "../baremetal"
+ existing_resource_group = var.resource_group
+ image_id = local.storage_bare_metal_image_mapping_entry_found ? local.storage_bare_metal_image_id : data.ibm_is_image.baremetal_storage[0].id
+ prefix = format("%s-%s", local.afm_node_name, substr(local.storage_subnets[count.index].id, length(local.storage_subnets[count.index].id) - 4, 4))
+ storage_subnets = [for subnet in local.storage_subnets : subnet.id]
+ storage_ssh_keys = local.ssh_keys
+ storage_servers = var.afm_instances
+ security_group_ids = module.storage_sg[*].security_group_id
+ user_data = var.bms_boot_drive_encryption == false ? data.template_file.afm_bm_user_data.rendered : templatefile("${path.module}/templates/afm_bootdrive_user_data/cloud_init.yml", local.user_data_vars)
+ secondary_vni_enabled = false
+ protocol_subnets = local.protocol_subnets
+ secondary_security_group_ids = []
+ sapphire_rapids_profile_check = local.sapphire_rapids_profile_check
}
diff --git a/modules/landing_zone_vsi/outputs.tf b/modules/landing_zone_vsi/outputs.tf
index c8d3523d..7d31ef84 100644
--- a/modules/landing_zone_vsi/outputs.tf
+++ b/modules/landing_zone_vsi/outputs.tf
@@ -31,6 +31,35 @@ output "storage_vsi_data" {
output "storage_bms_data" {
description = "Storage BareMetal Server data"
value = flatten(module.storage_baremetal[*].list)
+ depends_on = [module.storage_baremetal]
+}
+
+output "storage_bm_name_with_vol_mapping" {
+ description = "Storage BareMetal Server data"
+ value = flatten(module.storage_baremetal[*].instance_ips_with_vol_mapping)
+}
+
+output "storage_tie_breaker_bms_data" {
+ description = "Storage Tie- Breaker BareMetal Server data"
+ value = flatten(module.storage_baremetal_tie_breaker[*].list)
+ depends_on = [module.storage_baremetal_tie_breaker]
+}
+
+output "storage_tie_breaker_bms_name_with_vol_mapping" {
+ description = "Storage BareMetal Server data"
+ value = flatten(module.storage_baremetal_tie_breaker[*].instance_ips_with_vol_mapping)
+}
+
+output "protocol_bms_data" {
+ description = "Protocol BareMetal Server data"
+ value = flatten(module.protocol_baremetal_server[*].list)
+ depends_on = [module.protocol_baremetal_server]
+}
+
+output "afm_bms_data" {
+ description = "AFM BareMetal Server data"
+ value = flatten(module.afm_baremetal_server[*].list)
+ depends_on = [module.afm_baremetal_server]
}
output "storage_cluster_management_vsi" {
@@ -60,6 +89,18 @@ output "compute_private_key_content" {
value = one(module.compute_key[*].private_key_content)
}
+output "client_public_key_content" {
+ description = "Client public key content"
+ sensitive = true
+ value = one(module.client_key[*].public_key_content)
+}
+
+output "client_private_key_content" {
+ description = "Client private key content"
+ sensitive = true
+ value = one(module.client_key[*].private_key_content)
+}
+
output "afm_vsi_data" {
description = "AFM VSI data"
value = module.afm_vsi[*]["list"]
diff --git a/modules/landing_zone_vsi/template_files.tf b/modules/landing_zone_vsi/template_files.tf
index 6b1cc54f..911f27c0 100644
--- a/modules/landing_zone_vsi/template_files.tf
+++ b/modules/landing_zone_vsi/template_files.tf
@@ -1,11 +1,22 @@
data "template_file" "ldap_user_data" {
template = file("${path.module}/templates/ldap_user_data.tpl")
vars = {
- bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : ""
- compute_public_key_content = local.enable_compute ? local.compute_public_key_content != null ? local.compute_public_key_content : "" : ""
- compute_private_key_content = local.enable_compute ? local.compute_private_key_content != null ? local.compute_private_key_content : "" : ""
- compute_interfaces = var.storage_type == "scratch" ? local.vsi_interfaces[0] : local.bms_interfaces[0]
- compute_dns_domain = var.dns_domain_names["compute"]
+ bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : ""
+
+ cluster_public_key_content = (
+ var.scheduler == "LSF" && local.enable_compute ? try(local.compute_public_key_content, "") :
+ var.scheduler == "Scale" && local.enable_storage ? try(local.storage_public_key_content, "") :
+ ""
+ )
+
+ cluster_private_key_content = (
+ var.scheduler == "LSF" && local.enable_compute ? try(local.compute_private_key_content, "") :
+ var.scheduler == "Scale" && local.enable_storage ? try(local.storage_private_key_content, "") :
+ ""
+ )
+
+ compute_interfaces = var.storage_type == "scratch" ? local.vsi_interfaces[0] : local.bms_interfaces[0]
+ cluster_dns_domain = var.scheduler == "LSF" && local.enable_compute ? var.dns_domain_names["compute"] : "ldap.com"
}
}
@@ -13,10 +24,10 @@ data "template_file" "client_user_data" {
template = file("${path.module}/templates/client_user_data.tpl")
vars = {
bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : ""
- client_public_key_content = local.enable_client ? local.compute_public_key_content != null ? local.compute_public_key_content : "" : ""
- client_private_key_content = local.enable_client ? local.compute_private_key_content != null ? local.compute_private_key_content : "" : ""
- client_interfaces = var.storage_type == "scratch" ? local.vsi_interfaces[0] : local.bms_interfaces[0]
- client_dns_domain = var.dns_domain_names["compute"]
+ client_public_key_content = local.enable_client ? local.client_public_key_content != null ? local.client_public_key_content : "" : ""
+ client_private_key_content = local.enable_client ? local.client_private_key_content != null ? local.client_private_key_content : "" : ""
+ client_interfaces = local.vsi_interfaces[0]
+ client_dns_domain = local.enable_client ? var.dns_domain_names["client"] : ""
}
}
@@ -59,28 +70,33 @@ data "template_file" "login_user_data" {
data "template_file" "scale_compute_user_data" {
template = file("${path.module}/templates/scale_compute_user_data.tpl")
vars = {
- bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : ""
- compute_public_key_content = local.enable_compute ? local.compute_public_key_content != null ? local.compute_public_key_content : "" : ""
- compute_private_key_content = local.enable_compute ? local.compute_private_key_content != null ? local.compute_private_key_content : "" : ""
- compute_interfaces = var.storage_type == "scratch" ? local.vsi_interfaces[0] : local.bms_interfaces[0]
- compute_dns_domain = var.dns_domain_names["compute"]
+ bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : ""
+ compute_public_key_content = local.enable_compute ? local.compute_public_key_content != null ? local.compute_public_key_content : "" : ""
+ compute_private_key_content = local.enable_compute ? local.compute_private_key_content != null ? local.compute_private_key_content : "" : ""
+ compute_interfaces = local.vsi_interfaces[0]
+ compute_dns_domain = local.enable_compute ? var.dns_domain_names["compute"] : ""
+ storage_dns_domain = local.enable_storage && local.enable_sec_interface_compute ? var.dns_domain_names["storage"] : ""
+ protocol_interfaces = local.vsi_interfaces[1]
+ enable_sec_interface_compute = local.enable_sec_interface_compute
}
}
data "template_file" "storage_user_data" {
template = file("${path.module}/templates/storage_user_data.tpl")
vars = {
- bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : ""
- storage_public_key_content = local.enable_storage ? module.storage_key[0].public_key_content : ""
- storage_private_key_content = local.enable_storage ? module.storage_key[0].private_key_content : ""
- storage_interfaces = var.storage_type == "scratch" ? local.vsi_interfaces[0] : local.bms_interfaces[0]
- storage_dns_domain = local.enable_storage ? var.dns_domain_names["storage"] : ""
- storage_disk_type = var.storage_type == "scratch" ? data.ibm_is_instance_profile.storage[0].disks[0].quantity[0].type : ""
- protocol_dns_domain = local.enable_protocol ? var.dns_domain_names["protocol"] : ""
- enable_protocol = local.enable_protocol
- vpc_region = var.vpc_region
- resource_group_id = var.resource_group
- protocol_subnets = local.enable_protocol ? local.protocol_subnets[0].id : ""
+ bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : ""
+ storage_public_key_content = local.enable_storage ? module.storage_key[0].public_key_content : ""
+ storage_private_key_content = local.enable_storage ? module.storage_key[0].private_key_content : ""
+ storage_interfaces = local.vsi_interfaces[0]
+ protocol_interfaces = local.vsi_interfaces[1]
+ storage_dns_domain = local.enable_storage ? var.dns_domain_names["storage"] : ""
+ storage_disk_type = var.storage_type == "scratch" ? data.ibm_is_instance_profile.storage[0].disks[0].quantity[0].type : ""
+ protocol_dns_domain = local.enable_protocol && var.colocate_protocol_instances ? var.dns_domain_names["protocol"] : ""
+ enable_protocol = local.enable_protocol && var.colocate_protocol_instances ? true : false
+ vpc_region = local.enable_protocol && var.colocate_protocol_instances ? var.vpc_region : ""
+ resource_group_id = local.enable_protocol && var.colocate_protocol_instances ? var.resource_group : ""
+ protocol_subnets = local.enable_protocol && var.colocate_protocol_instances ? (length(local.protocol_subnets) > 0 ? local.protocol_subnets[0].id : "") : ""
+ enable_sec_interface_storage = local.enable_sec_interface_storage
}
}
@@ -90,13 +106,13 @@ data "template_file" "protocol_user_data" {
bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : ""
storage_public_key_content = local.enable_protocol ? module.storage_key[0].public_key_content : ""
storage_private_key_content = local.enable_protocol ? module.storage_key[0].private_key_content : ""
- storage_interfaces = var.storage_type == "scratch" ? local.vsi_interfaces[0] : local.bms_interfaces[0]
- protocol_interfaces = var.storage_type == "scratch" ? local.vsi_interfaces[1] : local.bms_interfaces[1]
+ storage_interfaces = local.vsi_interfaces[0]
+ protocol_interfaces = local.vsi_interfaces[1]
storage_dns_domain = local.enable_storage ? var.dns_domain_names["storage"] : ""
protocol_dns_domain = local.enable_protocol ? var.dns_domain_names["protocol"] : ""
vpc_region = var.vpc_region
resource_group_id = var.resource_group
- protocol_subnets = local.enable_protocol ? local.protocol_subnets[0].id : ""
+ protocol_subnets = local.enable_protocol ? (length(local.protocol_subnets) > 0 ? local.protocol_subnets[0].id : "") : ""
}
}
@@ -106,7 +122,7 @@ data "template_file" "afm_user_data" {
bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : ""
storage_public_key_content = local.enable_storage ? module.storage_key[0].public_key_content : ""
storage_private_key_content = local.enable_storage ? module.storage_key[0].private_key_content : ""
- storage_interfaces = var.storage_type == "scratch" ? local.vsi_interfaces[0] : local.bms_interfaces[0]
+ storage_interfaces = local.vsi_interfaces[0]
storage_dns_domain = local.enable_storage ? var.dns_domain_names["storage"] : ""
}
}
@@ -117,5 +133,67 @@ data "template_file" "gklm_user_data" {
bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : ""
storage_public_key_content = local.enable_storage ? module.storage_key[0].public_key_content : ""
storage_private_key_content = local.enable_storage ? module.storage_key[0].private_key_content : ""
+ domain_name = local.enable_storage ? var.dns_domain_names["gklm"] : ""
+ }
+}
+
+data "template_file" "storage_bm_user_data" {
+ template = file("${path.module}/templates/storage_bm_user_data.tpl")
+ vars = {
+ bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : ""
+ storage_public_key_content = local.enable_storage ? module.storage_key[0].public_key_content : ""
+ storage_private_key_content = local.enable_storage ? module.storage_key[0].private_key_content : ""
+ storage_interfaces = local.bms_interfaces[0]
+ protocol_interfaces = local.bms_interfaces[1]
+ storage_dns_domain = local.enable_storage ? var.dns_domain_names["storage"] : ""
+ protocol_dns_domain = local.enable_protocol && var.colocate_protocol_instances ? var.dns_domain_names["protocol"] : ""
+ enable_protocol = local.enable_protocol && var.colocate_protocol_instances ? true : false
+ vpc_region = local.enable_protocol && var.colocate_protocol_instances ? var.vpc_region : ""
+ resource_group_id = local.enable_protocol && var.colocate_protocol_instances ? var.resource_group : ""
+ protocol_subnets = local.enable_protocol && var.colocate_protocol_instances ? (length(local.protocol_subnets) > 0 ? local.protocol_subnets[0].id : "") : ""
+ }
+}
+
+data "template_file" "storage_bmtb_user_data" {
+ template = file("${path.module}/templates/storage_bmtb_user_data.tpl")
+ vars = {
+ bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : ""
+ storage_public_key_content = local.enable_storage ? module.storage_key[0].public_key_content : ""
+ storage_private_key_content = local.enable_storage ? module.storage_key[0].private_key_content : ""
+ storage_interfaces = local.bms_interfaces[0]
+ protocol_interfaces = local.bms_interfaces[1]
+ storage_dns_domain = local.enable_storage ? var.dns_domain_names["storage"] : ""
+ protocol_dns_domain = local.enable_protocol && var.colocate_protocol_instances ? var.dns_domain_names["protocol"] : ""
+ enable_protocol = local.enable_protocol && var.colocate_protocol_instances ? true : false
+ vpc_region = local.enable_protocol && var.colocate_protocol_instances ? var.vpc_region : ""
+ resource_group_id = local.enable_protocol && var.colocate_protocol_instances ? var.resource_group : ""
+ protocol_subnets = local.enable_protocol && var.colocate_protocol_instances ? (length(local.protocol_subnets) > 0 ? local.protocol_subnets[0].id : "") : ""
+ }
+}
+
+data "template_file" "protocol_bm_user_data" {
+ template = file("${path.module}/templates/protocol_bm_user_data.tpl")
+ vars = {
+ bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : ""
+ storage_public_key_content = local.enable_protocol ? module.storage_key[0].public_key_content : ""
+ storage_private_key_content = local.enable_protocol ? module.storage_key[0].private_key_content : ""
+ storage_interfaces = local.bms_interfaces[0]
+ protocol_interfaces = local.bms_interfaces[1]
+ storage_dns_domain = local.enable_storage ? var.dns_domain_names["storage"] : ""
+ protocol_dns_domain = local.enable_protocol ? var.dns_domain_names["protocol"] : ""
+ vpc_region = var.vpc_region
+ resource_group_id = var.resource_group
+ protocol_subnets = local.enable_protocol ? (length(local.protocol_subnets) > 0 ? local.protocol_subnets[0].id : "") : ""
+ }
+}
+
+data "template_file" "afm_bm_user_data" {
+ template = file("${path.module}/templates/afm_bm_user_data.tpl")
+ vars = {
+ bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : ""
+ storage_public_key_content = local.enable_afm ? module.storage_key[0].public_key_content : ""
+ storage_private_key_content = local.enable_afm ? module.storage_key[0].private_key_content : ""
+ storage_interfaces = local.bms_interfaces[0]
+ storage_dns_domain = local.enable_afm ? var.dns_domain_names["storage"] : ""
}
}
diff --git a/modules/landing_zone_vsi/templates/afm_bm_user_data.tpl b/modules/landing_zone_vsi/templates/afm_bm_user_data.tpl
new file mode 100644
index 00000000..cccf8fc2
--- /dev/null
+++ b/modules/landing_zone_vsi/templates/afm_bm_user_data.tpl
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+###################################################
+# Copyright (C) IBM Corp. 2023 All Rights Reserved.
+# Licensed under the Apache License v2.0
+###################################################
+
+# Setup logging
+exec > >(tee /var/log/ibm_spectrumscale_user-data.log)
+exec 2>&1
+set -e
+
+# Configure SSH
+mkdir -p ~/.ssh
+chmod 700 ~/.ssh
+echo "${storage_public_key_content}" >> ~/.ssh/authorized_keys
+echo "${bastion_public_key_content}" >> ~/.ssh/authorized_keys
+echo "StrictHostKeyChecking no" >> ~/.ssh/config
+echo "${storage_private_key_content}" > ~/.ssh/id_rsa
+chmod 600 ~/.ssh/id_rsa ~/.ssh/authorized_keys
+ethtool -L eth0 combined 16
+
+# Banner configuration
+echo "###########################################################################################" >> /etc/motd
+echo "# You have logged in to AFM BareMetal Server. #" >> /etc/motd
+echo "# #" >> /etc/motd
+echo "# Refer: https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-storage #" >> /etc/motd
+echo "###########################################################################################" >> /etc/motd
+
+# Create completion marker
+touch /var/user_data_complete
+echo "User data script completed successfully at $(date)"
diff --git a/modules/landing_zone_vsi/templates/afm_bootdrive_user_data/cloud_init.yml b/modules/landing_zone_vsi/templates/afm_bootdrive_user_data/cloud_init.yml
new file mode 100644
index 00000000..44902e2d
--- /dev/null
+++ b/modules/landing_zone_vsi/templates/afm_bootdrive_user_data/cloud_init.yml
@@ -0,0 +1,123 @@
+#cloud-config
+growpart:
+ mode: off
+ devices: ['/']
+resize_rootfs: false
+write_files:
+ - content: |
+ #!/usr/bin/env bash
+ # Setup logging
+ exec > >(tee /var/log/ibm_spectrumscale_user-data.log)
+ exec 2>&1
+ set -e
+
+ # Configure SSH
+ mkdir -p ~/.ssh
+ chmod 700 ~/.ssh
+ echo "${storage_public_key_content}" | base64 --decode >> /root/.ssh/authorized_keys
+ echo "${bastion_public_key_content}" | base64 --decode >> /root/.ssh/authorized_keys
+ echo "StrictHostKeyChecking no" >> ~/.ssh/config
+ echo "${storage_private_key_content}" | base64 --decode >> ~/.ssh/id_rsa
+ chmod 600 ~/.ssh/id_rsa ~/.ssh/authorized_keys
+ ethtool -L eth0 combined 16
+
+ # Banner configuration
+ echo "###########################################################################################" >> /etc/motd
+ echo "# You have logged in to Storage BareMetal Server. #" >> /etc/motd
+ echo "# #" >> /etc/motd
+ echo "# Refer: https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-storage #" >> /etc/motd
+ echo "###########################################################################################" >> /etc/motd
+ USER=vpcuser
+ PACKAGE_MGR=dnf
+ package_list="python38 kernel-devel-$(uname -r) kernel-headers-$(uname -r) firewalld numactl jq make gcc-c++ elfutils-libelf-devel bind-utils iptables nfs-utils elfutils elfutils-devel python3-dnf-plugin-versionlock cryptsetup clevis clevis-luks clevis-dracut tpm2-tools"
+ $PACKAGE_MGR install -y $package_list
+ yum update --security -y
+ yum versionlock add $package_list
+ yum versionlock list
+ echo 'export PATH=$PATH:/usr/lpp/mmfs/bin' >> /root/.bashrc
+ # Create completion marker
+ touch /var/user_data_complete
+ echo "User data script completed successfully at $(date)"
+ path: /usr/local/bin/scale_user_data.sh
+ permissions: '0755'
+ - content: |
+ #!/bin/bash
+ # This script encrypts the root partition of a Redhat 8/9 stock IBM Cloud
+ # image using the TPM to encrypt the LUKS keys. It assumes there is plenty
+ # of unpartition space on the drive, and leaves the current root partition
+ # for rescue boot (but this could be deleted on a subsequent boot).
+ #
+ # * Create a new partition on the drive using all free space
+ # * Encrypt the new partition using LUKS with a known passphrase
+ # * Use 'clevis' to create an additional LUKS passphrase that is bound to the TPM
+ # * Re-generate initramfs via dracut to ensure the root drive is auto-unlocked on boot
+ # * Copy the current root filesystem to the new drive
+ # * Update fstab and crypttab for auto-mounting
+ # * Update grub to boot using the newly encrypted root drive
+ #
+ echo "Encrypt my boot drive"
+ # Determine the boot device (minus partition name)
+ # Assumes 'sdaX' or 'nvmeXnYpZ'
+ device=$(mount | grep "on / type" | awk '{print $1}')
+ if [[ "$device" =~ "nvme" ]]; then
+ device=$${device%??}
+ else
+ device=$${device%?}
+ fi
+ echo $device
+ # Create a root partition filling up the rest of the drive
+ echo -e 'n\np\n\n\n\nw' | fdisk $${device}
+ partition=$(fdisk -l $device | grep $device | tail -1 | awk '{print $1}')
+ echo $partition
+ # Setup encryption on the drive with a well known passphrase, and format the filesystem
+ echo -n n0tsecret | cryptsetup luksFormat --type luks2 -q --force-password $partition
+ echo -n n0tsecret | cryptsetup open $partition root
+ mkfs.xfs /dev/mapper/root
+ # Add the TPM key to the LUKS encrypted drive.
+ # For additional security, you can bind it to specific TPM PCR banks, but this will cause the TPM unlock
+ # to fail when the bank changes (EG firmware is updated). If you want to bind it to a PCR:
+ # ,"pcr_bank":"sha256","pcr_ids":"7"
+ echo -n n0tsecret | clevis luks bind -y -k - -d $partition tpm2 '{"hash":"sha256","key":"rsa"}'
+ # Regenerate dracut initramfs to allow unlock on boot
+ dracut -fv --regenerate-all
+ # Copy the OS into the encrypted partition
+ mkdir /mnt/encryptedroot
+ mount /dev/mapper/root /mnt/encryptedroot
+ rsync -a --exclude='/proc/*' --exclude='/sys/*' --exclude='/boot' --exclude='/mnt/encryptedroot' / /mnt/encryptedroot
+ # Grab the UUID for the encrypted partition and setup the crypttab
+ uuid=$(lsblk -lfi -o NAME,FSTYPE,UUID | grep crypto_LUKS | awk '{print $3}')
+ echo "root UUID=$${uuid} none luks" > /mnt/encryptedroot/etc/crypttab
+ # Replace root with '/dev/mapper/root / xfs defaults 0 1' in fstab
+ sed -i "/\t\/\t/c/dev/mapper/root\t/\txfs\tdefaults\t0\t1" /mnt/encryptedroot/etc/fstab
+ # Setup grub
+ # Grab default cmdline args
+ args=$(grep CMDLINE_LINUX /etc/default/grub | sed 's/.*GRUB_CMDLINE_LINUX=//' | sed 's/\"//g')
+ # Update grub and set the new entry to be the default.
+ grubby --add-kernel="/boot/vmlinuz-$(uname -r)" \
+ --title="Boot from encrypted root" \
+ --initrd="/boot/initramfs-$(uname -r).img" \
+ --args "$${args} root=/dev/mapper/root rd.luks.name=$${uuid}=root" \
+ --make-default
+ # Since we use EFI, copy the grubenv over (note the \cp is not a typo,
+ # it ensures that the 'cp' alias isn't used.)
+ efidir=$(ls /boot/efi/EFI/ | grep -v BOOT)
+ \cp -f /boot/grub2/grubenv /boot/efi/EFI/$${efidir}/
+ # We MUST have a separate /boot partiiton to host the kernel and initramfs unencrypted
+ # as these are needed to unlock the root drive. The IBM CLoud RHEL 9.x images have
+ # a separate boot partiiton, but 8.x do not.
+ # If we dont have a separate /boot partition, we'll use the current root partition
+ # as /boot. So copy the current /boot content into the root of the filessytem.
+ if ! lsblk -l | grep /boot$; then
+ rsync -a --exclude='/efi*' /boot/ /
+ # Current root device UUID - it will become boot device uuid
+ curr_root_uuid=$(lsblk -fl | grep /$ | awk '{print $4}')
+ # Add the new /boot partition to fstab for auto-mounting.
+ echo -e "UUID=$${curr_root_uuid}\t/boot\txfs\tdefaults\t0\t0" >> /mnt/encryptedroot/etc/fstab
+ fi
+ # Reboot the system
+ shutdown -r now
+ path: /usr/local/bin/boot_drive_encryption.sh
+ permissions: '0755'
+runcmd:
+ - /usr/local/bin/scale_user_data.sh
+ - /usr/local/bin/boot_drive_encryption.sh
diff --git a/modules/landing_zone_vsi/templates/afm_user_data.tpl b/modules/landing_zone_vsi/templates/afm_user_data.tpl
index c2f936af..acb947fd 100644
--- a/modules/landing_zone_vsi/templates/afm_user_data.tpl
+++ b/modules/landing_zone_vsi/templates/afm_user_data.tpl
@@ -8,13 +8,6 @@
#!/usr/bin/env bash
exec > >(tee /var/log/ibm_spectrumscale_user-data.log)
-if grep -E -q "CentOS|Red Hat" /etc/os-release
-then
- USER=vpcuser
-elif grep -q "Ubuntu" /etc/os-release
-then
- USER=ubuntu
-fi
sed -i -e "s/^/no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo \'Please client as the user \\\\\"$USER\\\\\" rather than the user \\\\\"root\\\\\".\';echo;sleep 5; exit 142\" /" /root/.ssh/authorized_keys
# input parameters
@@ -24,8 +17,7 @@ echo "StrictHostKeyChecking no" >> ~/.ssh/config
echo "${storage_private_key_content}" > ~/.ssh/id_rsa
chmod 600 ~/.ssh/id_rsa
-# if grep -q "Red Hat" /etc/os-release
-if grep -q "CentOS|Red Hat" /etc/os-release
+if grep -q "Red Hat" /etc/os-release
then
USER=vpcuser
REQ_PKG_INSTALLED=0
@@ -87,8 +79,8 @@ echo 'export PATH=$PATH:/usr/lpp/mmfs/bin' >> /root/.bashrc
echo "DOMAIN=${storage_dns_domain}" >> "/etc/sysconfig/network-scripts/ifcfg-${storage_interfaces}"
echo "MTU=9000" >> "/etc/sysconfig/network-scripts/ifcfg-${storage_interfaces}"
chage -I -1 -m 0 -M 99999 -E -1 -W 14 vpcuser
-sleep 120
systemctl restart NetworkManager
+hostnamectl set-hostname "$(hostname).${storage_dns_domain}"
systemctl stop firewalld
firewall-offline-cmd --zone=public --add-port=1191/tcp
diff --git a/modules/landing_zone_vsi/templates/client_user_data.tpl b/modules/landing_zone_vsi/templates/client_user_data.tpl
index 24abf3d3..e85c082d 100644
--- a/modules/landing_zone_vsi/templates/client_user_data.tpl
+++ b/modules/landing_zone_vsi/templates/client_user_data.tpl
@@ -9,13 +9,14 @@
exec > >(tee /var/log/ibm_spectrumscale_user-data.log)
-if grep -E -q "CentOS|Red Hat" /etc/os-release
-then
- USER=vpcuser
-elif grep -q "Ubuntu" /etc/os-release
-then
- USER=ubuntu
-fi
+sed -i -e "s/^/no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo \'Please client as the user \\\\\"$USER\\\\\" rather than the user \\\\\"root\\\\\".\';echo;sleep 5; exit 142\" /" /root/.ssh/authorized_keys
+
+# input parameters
+echo "${bastion_public_key_content}" >> ~/.ssh/authorized_keys
+echo "${client_public_key_content}" >> ~/.ssh/authorized_keys
+echo "StrictHostKeyChecking no" >> ~/.ssh/config
+echo "${client_private_key_content}" > ~/.ssh/id_rsa
+chmod 600 ~/.ssh/id_rsa
if grep -q "Red Hat" /etc/os-release
then
@@ -76,15 +77,8 @@ yum versionlock add $package_list
yum versionlock list
echo 'export PATH=$PATH:/usr/lpp/mmfs/bin' >> /root/.bashrc
-sed -i -e "s/^/no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo \'Please login as the user \\\\\"$USER\\\\\" rather than the user \\\\\"root\\\\\".\';echo;sleep 10; exit 142\" /" ~/.ssh/authorized_keys
-echo "${bastion_public_key_content}" >> /~/.ssh/authorized_keys
-echo "${client_public_key_content}" >> ~/.ssh/authorized_keys
-echo "StrictHostKeyChecking no" >> ~/.ssh/config
-echo "${client_private_key_content}" > ~/.ssh/id_rsa
-chmod 600 ~/.ssh/id_rsa
-
echo "DOMAIN=${client_dns_domain}" >> "/etc/sysconfig/network-scripts/ifcfg-${client_interfaces}"
echo "MTU=9000" >> "/etc/sysconfig/network-scripts/ifcfg-${client_interfaces}"
chage -I -1 -m 0 -M 99999 -E -1 -W 14 vpcuser
-sleep 120
systemctl restart NetworkManager
+hostnamectl set-hostname "$(hostname).${client_dns_domain}"
diff --git a/modules/landing_zone_vsi/templates/gklm_user_data.tpl b/modules/landing_zone_vsi/templates/gklm_user_data.tpl
index cb14c0eb..124288d3 100644
--- a/modules/landing_zone_vsi/templates/gklm_user_data.tpl
+++ b/modules/landing_zone_vsi/templates/gklm_user_data.tpl
@@ -1,17 +1,21 @@
+#!/bin/bash
+
###################################################
# Copyright (C) IBM Corp. 2023 All Rights Reserved.
# Licensed under the Apache License v2.0
###################################################
#!/bin/bash
-echo "0 $(hostname) 0" > /home/klmdb42/sqllib/db2nodes.cfg
+echo "0 $(hostname).${domain_name} 0" > /home/klmdb42/sqllib/db2nodes.cfg
systemctl start db2c_klmdb42.service
sleep 10
systemctl status db2c_klmdb42.service
sleep 10
#Copying SSH for passwordless authentication
-echo "${storage_private_key_content}" > ~/.ssh/id_rsa
-chmod 600 ~/.ssh/id_rsa
echo "${bastion_public_key_content}" >> ~/.ssh/authorized_keys
+echo "${storage_public_key_content}" >> ~/.ssh/authorized_keys
echo "StrictHostKeyChecking no" >> ~/.ssh/config
+echo "${storage_private_key_content}" > ~/.ssh/id_rsa
+chmod 600 ~/.ssh/id_rsa
+hostnamectl set-hostname "$(hostname).${domain_name}"
reboot
diff --git a/modules/landing_zone_vsi/templates/ldap_user_data.tpl b/modules/landing_zone_vsi/templates/ldap_user_data.tpl
index 1ffc145f..4e9cd337 100644
--- a/modules/landing_zone_vsi/templates/ldap_user_data.tpl
+++ b/modules/landing_zone_vsi/templates/ldap_user_data.tpl
@@ -18,7 +18,8 @@ sed -i -e "s/^/no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=
#input parameters
# input parameters
echo "${bastion_public_key_content}" >> ~/.ssh/authorized_keys
-echo "${compute_public_key_content}" >> ~/.ssh/authorized_keys
+echo "${cluster_public_key_content}" >> ~/.ssh/authorized_keys
echo "StrictHostKeyChecking no" >> ~/.ssh/config
-echo "${compute_private_key_content}" > ~/.ssh/id_rsa
+echo "${cluster_private_key_content}" > ~/.ssh/id_rsa
+hostnamectl set-hostname "$(hostname).${cluster_dns_domain}"
chmod 600 ~/.ssh/id_rsa
diff --git a/modules/landing_zone_vsi/templates/protocol_bm_user_data.tpl b/modules/landing_zone_vsi/templates/protocol_bm_user_data.tpl
new file mode 100644
index 00000000..18ad5558
--- /dev/null
+++ b/modules/landing_zone_vsi/templates/protocol_bm_user_data.tpl
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+###################################################
+# Copyright (C) IBM Corp. 2023 All Rights Reserved.
+# Licensed under the Apache License v2.0
+###################################################
+
+# Setup logging
+exec > >(tee /var/log/ibm_spectrumscale_user-data.log)
+exec 2>&1
+set -e
+
+# Configure SSH
+mkdir -p ~/.ssh
+chmod 700 ~/.ssh
+echo "${storage_public_key_content}" >> ~/.ssh/authorized_keys
+echo "${bastion_public_key_content}" >> ~/.ssh/authorized_keys
+echo "StrictHostKeyChecking no" >> ~/.ssh/config
+echo "${storage_private_key_content}" > ~/.ssh/id_rsa
+chmod 600 ~/.ssh/id_rsa ~/.ssh/authorized_keys
+ethtool -L eth0 combined 16
+
+# Banner configuration
+echo "###########################################################################################" >> /etc/motd
+echo "# You have logged in to Protocol BareMetal Server. #" >> /etc/motd
+echo "# #" >> /etc/motd
+echo "# Refer: https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-storage #" >> /etc/motd
+echo "###########################################################################################" >> /etc/motd
+
+# Create completion marker
+touch /var/user_data_complete
+echo "User data script completed successfully at $(date)"
diff --git a/modules/landing_zone_vsi/templates/protocol_bootdrive_user_data/cloud_init.yml b/modules/landing_zone_vsi/templates/protocol_bootdrive_user_data/cloud_init.yml
new file mode 100644
index 00000000..44902e2d
--- /dev/null
+++ b/modules/landing_zone_vsi/templates/protocol_bootdrive_user_data/cloud_init.yml
@@ -0,0 +1,123 @@
+#cloud-config
+growpart:
+ mode: off
+ devices: ['/']
+resize_rootfs: false
+write_files:
+ - content: |
+ #!/usr/bin/env bash
+ # Setup logging
+ exec > >(tee /var/log/ibm_spectrumscale_user-data.log)
+ exec 2>&1
+ set -e
+
+ # Configure SSH
+ mkdir -p ~/.ssh
+ chmod 700 ~/.ssh
+ echo "${storage_public_key_content}" | base64 --decode >> /root/.ssh/authorized_keys
+ echo "${bastion_public_key_content}" | base64 --decode >> /root/.ssh/authorized_keys
+ echo "StrictHostKeyChecking no" >> ~/.ssh/config
+ echo "${storage_private_key_content}" | base64 --decode >> ~/.ssh/id_rsa
+ chmod 600 ~/.ssh/id_rsa ~/.ssh/authorized_keys
+ ethtool -L eth0 combined 16
+
+ # Banner configuration
+ echo "###########################################################################################" >> /etc/motd
+ echo "# You have logged in to Storage BareMetal Server. #" >> /etc/motd
+ echo "# #" >> /etc/motd
+ echo "# Refer: https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-storage #" >> /etc/motd
+ echo "###########################################################################################" >> /etc/motd
+ USER=vpcuser
+ PACKAGE_MGR=dnf
+ package_list="python38 kernel-devel-$(uname -r) kernel-headers-$(uname -r) firewalld numactl jq make gcc-c++ elfutils-libelf-devel bind-utils iptables nfs-utils elfutils elfutils-devel python3-dnf-plugin-versionlock cryptsetup clevis clevis-luks clevis-dracut tpm2-tools"
+ $PACKAGE_MGR install -y $package_list
+ yum update --security -y
+ yum versionlock add $package_list
+ yum versionlock list
+ echo 'export PATH=$PATH:/usr/lpp/mmfs/bin' >> /root/.bashrc
+ # Create completion marker
+ touch /var/user_data_complete
+ echo "User data script completed successfully at $(date)"
+ path: /usr/local/bin/scale_user_data.sh
+ permissions: '0755'
+ - content: |
+ #!/bin/bash
+ # This script encrypts the root partition of a Redhat 8/9 stock IBM Cloud
+ # image using the TPM to encrypt the LUKS keys. It assumes there is plenty
+ # of unpartition space on the drive, and leaves the current root partition
+ # for rescue boot (but this could be deleted on a subsequent boot).
+ #
+ # * Create a new partition on the drive using all free space
+ # * Encrypt the new partition using LUKS with a known passphrase
+ # * Use 'clevis' to create an additional LUKS passphrase that is bound to the TPM
+ # * Re-generate initramfs via dracut to ensure the root drive is auto-unlocked on boot
+ # * Copy the current root filesystem to the new drive
+ # * Update fstab and crypttab for auto-mounting
+ # * Update grub to boot using the newly encrypted root drive
+ #
+ echo "Encrypt my boot drive"
+ # Determine the boot device (minus partition name)
+ # Assumes 'sdaX' or 'nvmeXnYpZ'
+ device=$(mount | grep "on / type" | awk '{print $1}')
+ if [[ "$device" =~ "nvme" ]]; then
+ device=$${device%??}
+ else
+ device=$${device%?}
+ fi
+ echo $device
+ # Create a root partition filling up the rest of the drive
+ echo -e 'n\np\n\n\n\nw' | fdisk $${device}
+ partition=$(fdisk -l $device | grep $device | tail -1 | awk '{print $1}')
+ echo $partition
+ # Setup encryption on the drive with a well known passphrase, and format the filesystem
+ echo -n n0tsecret | cryptsetup luksFormat --type luks2 -q --force-password $partition
+ echo -n n0tsecret | cryptsetup open $partition root
+ mkfs.xfs /dev/mapper/root
+ # Add the TPM key to the LUKS encrypted drive.
+ # For additional security, you can bind it to specific TPM PCR banks, but this will cause the TPM unlock
+ # to fail when the bank changes (EG firmware is updated). If you want to bind it to a PCR:
+ # ,"pcr_bank":"sha256","pcr_ids":"7"
+ echo -n n0tsecret | clevis luks bind -y -k - -d $partition tpm2 '{"hash":"sha256","key":"rsa"}'
+ # Regenerate dracut initramfs to allow unlock on boot
+ dracut -fv --regenerate-all
+ # Copy the OS into the encrypted partition
+ mkdir /mnt/encryptedroot
+ mount /dev/mapper/root /mnt/encryptedroot
+ rsync -a --exclude='/proc/*' --exclude='/sys/*' --exclude='/boot' --exclude='/mnt/encryptedroot' / /mnt/encryptedroot
+ # Grab the UUID for the encrypted partition and setup the crypttab
+ uuid=$(lsblk -lfi -o NAME,FSTYPE,UUID | grep crypto_LUKS | awk '{print $3}')
+ echo "root UUID=$${uuid} none luks" > /mnt/encryptedroot/etc/crypttab
+ # Replace root with '/dev/mapper/root / xfs defaults 0 1' in fstab
+ sed -i "/\t\/\t/c/dev/mapper/root\t/\txfs\tdefaults\t0\t1" /mnt/encryptedroot/etc/fstab
+ # Setup grub
+ # Grab default cmdline args
+ args=$(grep CMDLINE_LINUX /etc/default/grub | sed 's/.*GRUB_CMDLINE_LINUX=//' | sed 's/\"//g')
+ # Update grub and set the new entry to be the default.
+ grubby --add-kernel="/boot/vmlinuz-$(uname -r)" \
+ --title="Boot from encrypted root" \
+ --initrd="/boot/initramfs-$(uname -r).img" \
+ --args "$${args} root=/dev/mapper/root rd.luks.name=$${uuid}=root" \
+ --make-default
+ # Since we use EFI, copy the grubenv over (note the \cp is not a typo,
+ # it ensures that the 'cp' alias isn't used.)
+ efidir=$(ls /boot/efi/EFI/ | grep -v BOOT)
+ \cp -f /boot/grub2/grubenv /boot/efi/EFI/$${efidir}/
+ # We MUST have a separate /boot partiiton to host the kernel and initramfs unencrypted
+ # as these are needed to unlock the root drive. The IBM CLoud RHEL 9.x images have
+ # a separate boot partiiton, but 8.x do not.
+ # If we dont have a separate /boot partition, we'll use the current root partition
+ # as /boot. So copy the current /boot content into the root of the filessytem.
+ if ! lsblk -l | grep /boot$; then
+ rsync -a --exclude='/efi*' /boot/ /
+ # Current root device UUID - it will become boot device uuid
+ curr_root_uuid=$(lsblk -fl | grep /$ | awk '{print $4}')
+ # Add the new /boot partition to fstab for auto-mounting.
+ echo -e "UUID=$${curr_root_uuid}\t/boot\txfs\tdefaults\t0\t0" >> /mnt/encryptedroot/etc/fstab
+ fi
+ # Reboot the system
+ shutdown -r now
+ path: /usr/local/bin/boot_drive_encryption.sh
+ permissions: '0755'
+runcmd:
+ - /usr/local/bin/scale_user_data.sh
+ - /usr/local/bin/boot_drive_encryption.sh
diff --git a/modules/landing_zone_vsi/templates/protocol_user_data.tpl b/modules/landing_zone_vsi/templates/protocol_user_data.tpl
index 4fafedc1..952600a2 100644
--- a/modules/landing_zone_vsi/templates/protocol_user_data.tpl
+++ b/modules/landing_zone_vsi/templates/protocol_user_data.tpl
@@ -8,13 +8,6 @@
#!/usr/bin/env bash
exec > >(tee /var/log/ibm_spectrumscale_user-data.log)
-if grep -E -q "CentOS|Red Hat" /etc/os-release
-then
- USER=vpcuser
-elif grep -q "Ubuntu" /etc/os-release
-then
- USER=ubuntu
-fi
sed -i -e "s/^/no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo \'Please client as the user \\\\\"$USER\\\\\" rather than the user \\\\\"root\\\\\".\';echo;sleep 5; exit 142\" /" /root/.ssh/authorized_keys
# input parameters
@@ -87,8 +80,8 @@ echo 'export PATH=$PATH:/usr/lpp/mmfs/bin' >> /root/.bashrc
echo "DOMAIN=${storage_dns_domain}" >> "/etc/sysconfig/network-scripts/ifcfg-${storage_interfaces}"
echo "MTU=9000" >> "/etc/sysconfig/network-scripts/ifcfg-${storage_interfaces}"
chage -I -1 -m 0 -M 99999 -E -1 -W 14 vpcuser
-sleep 120
systemctl restart NetworkManager
+hostnamectl set-hostname "$(hostname).${storage_dns_domain}"
systemctl stop firewalld
firewall-offline-cmd --zone=public --add-port=1191/tcp
diff --git a/modules/landing_zone_vsi/templates/scale_compute_user_data.tpl b/modules/landing_zone_vsi/templates/scale_compute_user_data.tpl
index 605ea6f8..27cd26ac 100644
--- a/modules/landing_zone_vsi/templates/scale_compute_user_data.tpl
+++ b/modules/landing_zone_vsi/templates/scale_compute_user_data.tpl
@@ -1,25 +1,17 @@
+#!/usr/bin/bash
+
###################################################
# Copyright (C) IBM Corp. 2023 All Rights Reserved.
# Licensed under the Apache License v2.0
###################################################
-##################################################################################################################
-# Scale Compute Cluter User Data
-##################################################################################################################
-
#!/usr/bin/env bash
exec > >(tee /var/log/ibm_spectrumscale_user-data.log)
-if grep -E -q "CentOS|Red Hat" /etc/os-release
-then
- USER=vpcuser
-elif grep -q "Ubuntu" /etc/os-release
-then
- USER=ubuntu
-fi
+sed -i -e "s/^/no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo \'Please client as the user \\\\\"$USER\\\\\" rather than the user \\\\\"root\\\\\".\';echo;sleep 5; exit 142\" /" /root/.ssh/authorized_keys
-sed -i -e "s/^/no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo \'Please login as the user \\\\\"$USER\\\\\" rather than the user \\\\\"root\\\\\".\';echo;sleep 10; exit 142\" /" ~/.ssh/authorized_keys
+# input parameters
echo "${bastion_public_key_content}" >> ~/.ssh/authorized_keys
echo "${compute_public_key_content}" >> ~/.ssh/authorized_keys
echo "StrictHostKeyChecking no" >> ~/.ssh/config
@@ -88,8 +80,8 @@ echo 'export PATH=$PATH:/usr/lpp/mmfs/bin' >> /root/.bashrc
echo "DOMAIN=${compute_dns_domain}" >> "/etc/sysconfig/network-scripts/ifcfg-${compute_interfaces}"
echo "MTU=9000" >> "/etc/sysconfig/network-scripts/ifcfg-${compute_interfaces}"
chage -I -1 -m 0 -M 99999 -E -1 -W 14 vpcuser
-sleep 120
systemctl restart NetworkManager
+hostnamectl set-hostname "$(hostname).${compute_dns_domain}"
systemctl stop firewalld
firewall-offline-cmd --zone=public --add-port=1191/tcp
@@ -109,3 +101,12 @@ firewall-offline-cmd --zone=public --add-service=https
systemctl start firewalld
systemctl enable firewalld
+
+if [ "${enable_sec_interface_compute}" == true ]; then
+ sec_interface=$(nmcli -t con show --active | grep eth1 | cut -d ':' -f 1)
+ nmcli conn del "$sec_interface"
+ nmcli con add type ethernet con-name eth1 ifname eth1
+ echo "DOMAIN=\"${storage_dns_domain}\"" >> "/etc/sysconfig/network-scripts/ifcfg-${protocol_interfaces}"
+ echo "MTU=9000" >> "/etc/sysconfig/network-scripts/ifcfg-${protocol_interfaces}"
+ systemctl restart NetworkManager
+fi
diff --git a/modules/landing_zone_vsi/templates/storage_bm_user_data.tpl b/modules/landing_zone_vsi/templates/storage_bm_user_data.tpl
new file mode 100644
index 00000000..bfdad10a
--- /dev/null
+++ b/modules/landing_zone_vsi/templates/storage_bm_user_data.tpl
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+###################################################
+# Copyright (C) IBM Corp. 2023 All Rights Reserved.
+# Licensed under the Apache License v2.0
+###################################################
+
+# Setup logging
+exec > >(tee /var/log/ibm_spectrumscale_user-data.log)
+exec 2>&1
+set -e
+
+# Configure SSH
+mkdir -p ~/.ssh
+chmod 700 ~/.ssh
+echo "${storage_public_key_content}" >> ~/.ssh/authorized_keys
+echo "${bastion_public_key_content}" >> ~/.ssh/authorized_keys
+echo "StrictHostKeyChecking no" >> ~/.ssh/config
+echo "${storage_private_key_content}" > ~/.ssh/id_rsa
+chmod 600 ~/.ssh/id_rsa ~/.ssh/authorized_keys
+ethtool -L eth0 combined 16
+
+# Banner configuration
+echo "###########################################################################################" >> /etc/motd
+echo "# You have logged in to Storage BareMetal Server. #" >> /etc/motd
+echo "# #" >> /etc/motd
+echo "# Refer: https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-storage #" >> /etc/motd
+echo "###########################################################################################" >> /etc/motd
+
+# Create completion marker
+touch /var/user_data_complete
+echo "User data script completed successfully at $(date)"
diff --git a/modules/landing_zone_vsi/templates/storage_bmtb_user_data.tpl b/modules/landing_zone_vsi/templates/storage_bmtb_user_data.tpl
new file mode 100644
index 00000000..bfdad10a
--- /dev/null
+++ b/modules/landing_zone_vsi/templates/storage_bmtb_user_data.tpl
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+###################################################
+# Copyright (C) IBM Corp. 2023 All Rights Reserved.
+# Licensed under the Apache License v2.0
+###################################################
+
+# Setup logging
+exec > >(tee /var/log/ibm_spectrumscale_user-data.log)
+exec 2>&1
+set -e
+
+# Configure SSH
+mkdir -p ~/.ssh
+chmod 700 ~/.ssh
+echo "${storage_public_key_content}" >> ~/.ssh/authorized_keys
+echo "${bastion_public_key_content}" >> ~/.ssh/authorized_keys
+echo "StrictHostKeyChecking no" >> ~/.ssh/config
+echo "${storage_private_key_content}" > ~/.ssh/id_rsa
+chmod 600 ~/.ssh/id_rsa ~/.ssh/authorized_keys
+ethtool -L eth0 combined 16
+
+# Banner configuration
+echo "###########################################################################################" >> /etc/motd
+echo "# You have logged in to Storage BareMetal Server. #" >> /etc/motd
+echo "# #" >> /etc/motd
+echo "# Refer: https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-storage #" >> /etc/motd
+echo "###########################################################################################" >> /etc/motd
+
+# Create completion marker
+touch /var/user_data_complete
+echo "User data script completed successfully at $(date)"
diff --git a/modules/landing_zone_vsi/templates/storage_bootdrive_user_data/cloud_init.yml b/modules/landing_zone_vsi/templates/storage_bootdrive_user_data/cloud_init.yml
new file mode 100644
index 00000000..44902e2d
--- /dev/null
+++ b/modules/landing_zone_vsi/templates/storage_bootdrive_user_data/cloud_init.yml
@@ -0,0 +1,123 @@
+#cloud-config
+growpart:
+ mode: off
+ devices: ['/']
+resize_rootfs: false
+write_files:
+ - content: |
+ #!/usr/bin/env bash
+ # Setup logging
+ exec > >(tee /var/log/ibm_spectrumscale_user-data.log)
+ exec 2>&1
+ set -e
+
+ # Configure SSH
+ mkdir -p ~/.ssh
+ chmod 700 ~/.ssh
+ echo "${storage_public_key_content}" | base64 --decode >> /root/.ssh/authorized_keys
+ echo "${bastion_public_key_content}" | base64 --decode >> /root/.ssh/authorized_keys
+ echo "StrictHostKeyChecking no" >> ~/.ssh/config
+ echo "${storage_private_key_content}" | base64 --decode >> ~/.ssh/id_rsa
+ chmod 600 ~/.ssh/id_rsa ~/.ssh/authorized_keys
+ ethtool -L eth0 combined 16
+
+ # Banner configuration
+ echo "###########################################################################################" >> /etc/motd
+ echo "# You have logged in to Storage BareMetal Server. #" >> /etc/motd
+ echo "# #" >> /etc/motd
+ echo "# Refer: https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-storage #" >> /etc/motd
+ echo "###########################################################################################" >> /etc/motd
+ USER=vpcuser
+ PACKAGE_MGR=dnf
+ package_list="python38 kernel-devel-$(uname -r) kernel-headers-$(uname -r) firewalld numactl jq make gcc-c++ elfutils-libelf-devel bind-utils iptables nfs-utils elfutils elfutils-devel python3-dnf-plugin-versionlock cryptsetup clevis clevis-luks clevis-dracut tpm2-tools"
+ $PACKAGE_MGR install -y $package_list
+ yum update --security -y
+ yum versionlock add $package_list
+ yum versionlock list
+ echo 'export PATH=$PATH:/usr/lpp/mmfs/bin' >> /root/.bashrc
+ # Create completion marker
+ touch /var/user_data_complete
+ echo "User data script completed successfully at $(date)"
+ path: /usr/local/bin/scale_user_data.sh
+ permissions: '0755'
+ - content: |
+ #!/bin/bash
+ # This script encrypts the root partition of a Redhat 8/9 stock IBM Cloud
+ # image using the TPM to encrypt the LUKS keys. It assumes there is plenty
+ # of unpartition space on the drive, and leaves the current root partition
+ # for rescue boot (but this could be deleted on a subsequent boot).
+ #
+ # * Create a new partition on the drive using all free space
+ # * Encrypt the new partition using LUKS with a known passphrase
+ # * Use 'clevis' to create an additional LUKS passphrase that is bound to the TPM
+ # * Re-generate initramfs via dracut to ensure the root drive is auto-unlocked on boot
+ # * Copy the current root filesystem to the new drive
+ # * Update fstab and crypttab for auto-mounting
+ # * Update grub to boot using the newly encrypted root drive
+ #
+ echo "Encrypt my boot drive"
+ # Determine the boot device (minus partition name)
+ # Assumes 'sdaX' or 'nvmeXnYpZ'
+ device=$(mount | grep "on / type" | awk '{print $1}')
+ if [[ "$device" =~ "nvme" ]]; then
+ device=$${device%??}
+ else
+ device=$${device%?}
+ fi
+ echo $device
+ # Create a root partition filling up the rest of the drive
+ echo -e 'n\np\n\n\n\nw' | fdisk $${device}
+ partition=$(fdisk -l $device | grep $device | tail -1 | awk '{print $1}')
+ echo $partition
+ # Setup encryption on the drive with a well known passphrase, and format the filesystem
+ echo -n n0tsecret | cryptsetup luksFormat --type luks2 -q --force-password $partition
+ echo -n n0tsecret | cryptsetup open $partition root
+ mkfs.xfs /dev/mapper/root
+ # Add the TPM key to the LUKS encrypted drive.
+ # For additional security, you can bind it to specific TPM PCR banks, but this will cause the TPM unlock
+ # to fail when the bank changes (EG firmware is updated). If you want to bind it to a PCR:
+ # ,"pcr_bank":"sha256","pcr_ids":"7"
+ echo -n n0tsecret | clevis luks bind -y -k - -d $partition tpm2 '{"hash":"sha256","key":"rsa"}'
+ # Regenerate dracut initramfs to allow unlock on boot
+ dracut -fv --regenerate-all
+ # Copy the OS into the encrypted partition
+ mkdir /mnt/encryptedroot
+ mount /dev/mapper/root /mnt/encryptedroot
+ rsync -a --exclude='/proc/*' --exclude='/sys/*' --exclude='/boot' --exclude='/mnt/encryptedroot' / /mnt/encryptedroot
+ # Grab the UUID for the encrypted partition and setup the crypttab
+ uuid=$(lsblk -lfi -o NAME,FSTYPE,UUID | grep crypto_LUKS | awk '{print $3}')
+ echo "root UUID=$${uuid} none luks" > /mnt/encryptedroot/etc/crypttab
+ # Replace root with '/dev/mapper/root / xfs defaults 0 1' in fstab
+ sed -i "/\t\/\t/c/dev/mapper/root\t/\txfs\tdefaults\t0\t1" /mnt/encryptedroot/etc/fstab
+ # Setup grub
+ # Grab default cmdline args
+ args=$(grep CMDLINE_LINUX /etc/default/grub | sed 's/.*GRUB_CMDLINE_LINUX=//' | sed 's/\"//g')
+ # Update grub and set the new entry to be the default.
+ grubby --add-kernel="/boot/vmlinuz-$(uname -r)" \
+ --title="Boot from encrypted root" \
+ --initrd="/boot/initramfs-$(uname -r).img" \
+ --args "$${args} root=/dev/mapper/root rd.luks.name=$${uuid}=root" \
+ --make-default
+ # Since we use EFI, copy the grubenv over (note the \cp is not a typo,
+ # it ensures that the 'cp' alias isn't used.)
+ efidir=$(ls /boot/efi/EFI/ | grep -v BOOT)
+ \cp -f /boot/grub2/grubenv /boot/efi/EFI/$${efidir}/
+ # We MUST have a separate /boot partiiton to host the kernel and initramfs unencrypted
+ # as these are needed to unlock the root drive. The IBM CLoud RHEL 9.x images have
+ # a separate boot partiiton, but 8.x do not.
+ # If we dont have a separate /boot partition, we'll use the current root partition
+ # as /boot. So copy the current /boot content into the root of the filessytem.
+ if ! lsblk -l | grep /boot$; then
+ rsync -a --exclude='/efi*' /boot/ /
+ # Current root device UUID - it will become boot device uuid
+ curr_root_uuid=$(lsblk -fl | grep /$ | awk '{print $4}')
+ # Add the new /boot partition to fstab for auto-mounting.
+ echo -e "UUID=$${curr_root_uuid}\t/boot\txfs\tdefaults\t0\t0" >> /mnt/encryptedroot/etc/fstab
+ fi
+ # Reboot the system
+ shutdown -r now
+ path: /usr/local/bin/boot_drive_encryption.sh
+ permissions: '0755'
+runcmd:
+ - /usr/local/bin/scale_user_data.sh
+ - /usr/local/bin/boot_drive_encryption.sh
diff --git a/modules/landing_zone_vsi/templates/storage_tb_bootdrive_user_data/cloud_init.yml b/modules/landing_zone_vsi/templates/storage_tb_bootdrive_user_data/cloud_init.yml
new file mode 100644
index 00000000..44902e2d
--- /dev/null
+++ b/modules/landing_zone_vsi/templates/storage_tb_bootdrive_user_data/cloud_init.yml
@@ -0,0 +1,123 @@
+#cloud-config
+growpart:
+ mode: off
+ devices: ['/']
+resize_rootfs: false
+write_files:
+ - content: |
+ #!/usr/bin/env bash
+ # Setup logging
+ exec > >(tee /var/log/ibm_spectrumscale_user-data.log)
+ exec 2>&1
+ set -e
+
+ # Configure SSH
+ mkdir -p ~/.ssh
+ chmod 700 ~/.ssh
+ echo "${storage_public_key_content}" | base64 --decode >> /root/.ssh/authorized_keys
+ echo "${bastion_public_key_content}" | base64 --decode >> /root/.ssh/authorized_keys
+ echo "StrictHostKeyChecking no" >> ~/.ssh/config
+ echo "${storage_private_key_content}" | base64 --decode >> ~/.ssh/id_rsa
+ chmod 600 ~/.ssh/id_rsa ~/.ssh/authorized_keys
+ ethtool -L eth0 combined 16
+
+ # Banner configuration
+ echo "###########################################################################################" >> /etc/motd
+ echo "# You have logged in to Storage BareMetal Server. #" >> /etc/motd
+ echo "# #" >> /etc/motd
+ echo "# Refer: https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-storage #" >> /etc/motd
+ echo "###########################################################################################" >> /etc/motd
+ USER=vpcuser
+ PACKAGE_MGR=dnf
+ package_list="python38 kernel-devel-$(uname -r) kernel-headers-$(uname -r) firewalld numactl jq make gcc-c++ elfutils-libelf-devel bind-utils iptables nfs-utils elfutils elfutils-devel python3-dnf-plugin-versionlock cryptsetup clevis clevis-luks clevis-dracut tpm2-tools"
+ $PACKAGE_MGR install -y $package_list
+ yum update --security -y
+ yum versionlock add $package_list
+ yum versionlock list
+ echo 'export PATH=$PATH:/usr/lpp/mmfs/bin' >> /root/.bashrc
+ # Create completion marker
+ touch /var/user_data_complete
+ echo "User data script completed successfully at $(date)"
+ path: /usr/local/bin/scale_user_data.sh
+ permissions: '0755'
+ - content: |
+ #!/bin/bash
+ # This script encrypts the root partition of a Redhat 8/9 stock IBM Cloud
+ # image using the TPM to encrypt the LUKS keys. It assumes there is plenty
+ # of unpartition space on the drive, and leaves the current root partition
+ # for rescue boot (but this could be deleted on a subsequent boot).
+ #
+ # * Create a new partition on the drive using all free space
+ # * Encrypt the new partition using LUKS with a known passphrase
+ # * Use 'clevis' to create an additional LUKS passphrase that is bound to the TPM
+ # * Re-generate initramfs via dracut to ensure the root drive is auto-unlocked on boot
+ # * Copy the current root filesystem to the new drive
+ # * Update fstab and crypttab for auto-mounting
+ # * Update grub to boot using the newly encrypted root drive
+ #
+ echo "Encrypt my boot drive"
+ # Determine the boot device (minus partition name)
+ # Assumes 'sdaX' or 'nvmeXnYpZ'
+ device=$(mount | grep "on / type" | awk '{print $1}')
+ if [[ "$device" =~ "nvme" ]]; then
+ device=$${device%??}
+ else
+ device=$${device%?}
+ fi
+ echo $device
+ # Create a root partition filling up the rest of the drive
+ echo -e 'n\np\n\n\n\nw' | fdisk $${device}
+ partition=$(fdisk -l $device | grep $device | tail -1 | awk '{print $1}')
+ echo $partition
+ # Setup encryption on the drive with a well known passphrase, and format the filesystem
+ echo -n n0tsecret | cryptsetup luksFormat --type luks2 -q --force-password $partition
+ echo -n n0tsecret | cryptsetup open $partition root
+ mkfs.xfs /dev/mapper/root
+ # Add the TPM key to the LUKS encrypted drive.
+ # For additional security, you can bind it to specific TPM PCR banks, but this will cause the TPM unlock
+ # to fail when the bank changes (EG firmware is updated). If you want to bind it to a PCR:
+ # ,"pcr_bank":"sha256","pcr_ids":"7"
+ echo -n n0tsecret | clevis luks bind -y -k - -d $partition tpm2 '{"hash":"sha256","key":"rsa"}'
+ # Regenerate dracut initramfs to allow unlock on boot
+ dracut -fv --regenerate-all
+ # Copy the OS into the encrypted partition
+ mkdir /mnt/encryptedroot
+ mount /dev/mapper/root /mnt/encryptedroot
+ rsync -a --exclude='/proc/*' --exclude='/sys/*' --exclude='/boot' --exclude='/mnt/encryptedroot' / /mnt/encryptedroot
+ # Grab the UUID for the encrypted partition and setup the crypttab
+ uuid=$(lsblk -lfi -o NAME,FSTYPE,UUID | grep crypto_LUKS | awk '{print $3}')
+ echo "root UUID=$${uuid} none luks" > /mnt/encryptedroot/etc/crypttab
+ # Replace root with '/dev/mapper/root / xfs defaults 0 1' in fstab
+ sed -i "/\t\/\t/c/dev/mapper/root\t/\txfs\tdefaults\t0\t1" /mnt/encryptedroot/etc/fstab
+ # Setup grub
+ # Grab default cmdline args
+ args=$(grep CMDLINE_LINUX /etc/default/grub | sed 's/.*GRUB_CMDLINE_LINUX=//' | sed 's/\"//g')
+ # Update grub and set the new entry to be the default.
+ grubby --add-kernel="/boot/vmlinuz-$(uname -r)" \
+ --title="Boot from encrypted root" \
+ --initrd="/boot/initramfs-$(uname -r).img" \
+ --args "$${args} root=/dev/mapper/root rd.luks.name=$${uuid}=root" \
+ --make-default
+ # Since we use EFI, copy the grubenv over (note the \cp is not a typo,
+ # it ensures that the 'cp' alias isn't used.)
+ efidir=$(ls /boot/efi/EFI/ | grep -v BOOT)
+ \cp -f /boot/grub2/grubenv /boot/efi/EFI/$${efidir}/
+ # We MUST have a separate /boot partiiton to host the kernel and initramfs unencrypted
+ # as these are needed to unlock the root drive. The IBM CLoud RHEL 9.x images have
+ # a separate boot partiiton, but 8.x do not.
+ # If we dont have a separate /boot partition, we'll use the current root partition
+ # as /boot. So copy the current /boot content into the root of the filessytem.
+ if ! lsblk -l | grep /boot$; then
+ rsync -a --exclude='/efi*' /boot/ /
+ # Current root device UUID - it will become boot device uuid
+ curr_root_uuid=$(lsblk -fl | grep /$ | awk '{print $4}')
+ # Add the new /boot partition to fstab for auto-mounting.
+ echo -e "UUID=$${curr_root_uuid}\t/boot\txfs\tdefaults\t0\t0" >> /mnt/encryptedroot/etc/fstab
+ fi
+ # Reboot the system
+ shutdown -r now
+ path: /usr/local/bin/boot_drive_encryption.sh
+ permissions: '0755'
+runcmd:
+ - /usr/local/bin/scale_user_data.sh
+ - /usr/local/bin/boot_drive_encryption.sh
diff --git a/modules/landing_zone_vsi/templates/storage_user_data.tpl b/modules/landing_zone_vsi/templates/storage_user_data.tpl
index 3b33285e..8dfcd250 100644
--- a/modules/landing_zone_vsi/templates/storage_user_data.tpl
+++ b/modules/landing_zone_vsi/templates/storage_user_data.tpl
@@ -8,13 +8,6 @@
#!/usr/bin/env bash
exec > >(tee /var/log/ibm_spectrumscale_user-data.log)
-if grep -E -q "CentOS|Red Hat" /etc/os-release
-then
- USER=vpcuser
-elif grep -q "Ubuntu" /etc/os-release
-then
- USER=ubuntu
-fi
sed -i -e "s/^/no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo \'Please client as the user \\\\\"$USER\\\\\" rather than the user \\\\\"root\\\\\".\';echo;sleep 5; exit 142\" /" /root/.ssh/authorized_keys
# input parameters
@@ -24,8 +17,7 @@ echo "StrictHostKeyChecking no" >> ~/.ssh/config
echo "${storage_private_key_content}" > ~/.ssh/id_rsa
chmod 600 ~/.ssh/id_rsa
-# if grep -q "Red Hat" /etc/os-release
-if grep -q "CentOS|Red Hat" /etc/os-release
+if grep -q "Red Hat" /etc/os-release
then
USER=vpcuser
REQ_PKG_INSTALLED=0
@@ -100,8 +92,8 @@ fi
echo "DOMAIN=${storage_dns_domain}" >> "/etc/sysconfig/network-scripts/ifcfg-${storage_interfaces}"
echo "MTU=9000" >> "/etc/sysconfig/network-scripts/ifcfg-${storage_interfaces}"
chage -I -1 -m 0 -M 99999 -E -1 -W 14 vpcuser
-sleep 120
systemctl restart NetworkManager
+hostnamectl set-hostname "$(hostname).${storage_dns_domain}"
systemctl stop firewalld
firewall-offline-cmd --zone=public --add-port=1191/tcp
@@ -122,12 +114,21 @@ firewall-offline-cmd --zone=public --add-port=30000-61000/udp
systemctl start firewalld
systemctl enable firewalld
+if [ "${enable_sec_interface_storage}" == true ]; then
+ sec_interface=$(nmcli -t con show --active | grep eth1 | cut -d ':' -f 1)
+ nmcli conn del "$sec_interface"
+ nmcli con add type ethernet con-name eth1 ifname eth1
+ echo "DOMAIN=\"${storage_dns_domain}\"" >> "/etc/sysconfig/network-scripts/ifcfg-${protocol_interfaces}"
+ echo "MTU=9000" >> "/etc/sysconfig/network-scripts/ifcfg-${protocol_interfaces}"
+ systemctl restart NetworkManager
+fi
+
if [ "${enable_protocol}" == true ]; then
sec_interface=$(nmcli -t con show --active | grep eth1 | cut -d ':' -f 1)
nmcli conn del "$sec_interface"
nmcli con add type ethernet con-name eth1 ifname eth1
- echo "DOMAIN=\"${protocol_dns_domain}\"" >> "/etc/sysconfig/network-scripts/ifcfg-eth1"
- echo "MTU=9000" >> "/etc/sysconfig/network-scripts/ifcfg-eth1"
+ echo "DOMAIN=${protocol_dns_domain}" >> "/etc/sysconfig/network-scripts/ifcfg-${protocol_interfaces}"
+ echo "MTU=9000" >> "/etc/sysconfig/network-scripts/ifcfg-${protocol_interfaces}"
systemctl restart NetworkManager
###### TODO: Fix Me ######
echo 'export IC_REGION=${vpc_region}' >> /root/.bashrc
diff --git a/modules/landing_zone_vsi/variables.tf b/modules/landing_zone_vsi/variables.tf
index b22f7e97..a2a52378 100644
--- a/modules/landing_zone_vsi/variables.tf
+++ b/modules/landing_zone_vsi/variables.tf
@@ -111,7 +111,7 @@ variable "client_instances" {
description = "Number of instances to be launched for client."
}
-variable "cluster_subnet_id" {
+variable "compute_subnet_id" {
type = list(object({
name = string
id = string
@@ -215,8 +215,8 @@ variable "storage_instances" {
})
)
default = [{
- profile = "bx2-2x8"
- count = 2
+ profile = "bx2d-32x128"
+ count = 0
image = "ibm-redhat-8-10-minimal-amd64-4"
filesystem_name = "fs1"
}]
@@ -229,7 +229,7 @@ variable "storage_servers" {
profile = string
count = number
image = string
- filesystem = string
+ filesystem = optional(string)
})
)
default = [{
@@ -241,6 +241,17 @@ variable "storage_servers" {
description = "Number of BareMetal Servers to be launched for storage cluster."
}
+variable "tie_breaker_bm_server_profile" {
+ type = string
+ default = null
+ description = "Specify the bare metal server profile type name to be used for creating the bare metal Tie breaker node. If no value is provided, the storage bare metal server profile will be used as the default. For more information, see [bare metal server profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-profile&interface=ui). [Tie Breaker Node](https://www.ibm.com/docs/en/storage-scale/5.2.2?topic=quorum-node-tiebreaker-disks)"
+}
+
+variable "scale_management_vsi_profile" {
+ type = string
+ description = "The virtual server instance profile type name to be used to create the Management node. For more information, see [Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)."
+}
+
variable "protocol_subnets" {
type = list(object({
name = string
@@ -257,13 +268,11 @@ variable "protocol_instances" {
object({
profile = string
count = number
- image = string
})
)
default = [{
profile = "bx2-2x8"
count = 2
- image = "ibm-redhat-8-10-minimal-amd64-4"
}]
description = "Number of instances to be launched for protocol hosts."
}
@@ -325,12 +334,6 @@ variable "boot_volume_encryption_key" {
description = "CRN of boot volume encryption key"
}
-variable "existing_kms_instance_guid" {
- type = string
- default = null
- description = "The existing KMS instance guid."
-}
-
##############################################################################
# TODO: Auth Server (LDAP/AD) Variables
##############################################################################
@@ -370,11 +373,11 @@ variable "ldap_server" {
description = "Provide the IP address for the existing LDAP server. If no address is given, a new LDAP server will be created."
}
-variable "ldap_instance_key_pair" {
- type = list(string)
- default = null
- description = "Name of the SSH key configured in your IBM Cloud account that is used to establish a connection to the LDAP Server. Make sure that the SSH key is present in the same resource group and region where the LDAP Servers are provisioned. If you do not have an SSH key in your IBM Cloud account, create one by using the [SSH keys](https://cloud.ibm.com/docs/vpc?topic=vpc-ssh-keys) instructions."
-}
+# variable "ldap_instance_key_pair" {
+# type = list(string)
+# default = null
+# description = "Name of the SSH key configured in your IBM Cloud account that is used to establish a connection to the LDAP Server. Make sure that the SSH key is present in the same resource group and region where the LDAP Servers are provisioned. If you do not have an SSH key in your IBM Cloud account, create one by using the [SSH keys](https://cloud.ibm.com/docs/vpc?topic=vpc-ssh-keys) instructions."
+# }
variable "ldap_instances" {
type = list(
@@ -395,13 +398,11 @@ variable "afm_instances" {
object({
profile = string
count = number
- image = string
})
)
default = [{
profile = "bx2-32x128"
count = 1
- image = "ibm-redhat-8-10-minimal-amd64-4"
}]
description = "Number of instances to be launched for afm hosts."
}
@@ -421,12 +422,6 @@ variable "scale_encryption_type" {
description = "To enable filesystem encryption, specify either 'key_protect' or 'gklm'. If neither is specified, the default value will be 'null' and encryption is disabled"
}
-variable "gklm_instance_key_pair" {
- type = list(string)
- default = null
- description = "The key pair to use to launch the GKLM host."
-}
-
variable "gklm_instances" {
type = list(
object({
@@ -452,7 +447,7 @@ variable "vpc_region" {
variable "scheduler" {
type = string
default = null
- description = "Select one of the scheduler (LSF/Symphony/Slurm/null)"
+ description = "Select one of the scheduler (Scale/LSF/Symphony/Slurm/null)"
}
variable "ibm_customer_number" {
@@ -499,3 +494,45 @@ variable "bastion_subnets" {
default = []
description = "Subnets to launch the bastion host."
}
+
+variable "bms_boot_drive_encryption" {
+ type = bool
+ default = false
+ description = "To enable the encryption for the boot drive of bare metal server. Select true or false"
+}
+
+variable "login_security_group_name" {
+ type = string
+ default = null
+ description = "Provide the security group name to provision the bastion node. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the bastion node to function properly."
+}
+
+variable "storage_security_group_name" {
+ type = string
+ default = null
+ description = "Provide the security group name to provision the storage nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the storage nodes to function properly."
+}
+
+variable "compute_security_group_name" {
+ type = string
+ default = null
+ description = "Provide the security group name to provision the compute nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the compute nodes to function properly."
+}
+
+variable "client_security_group_name" {
+ type = string
+ default = null
+ description = "Provide the security group name to provision the gklm nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the gklm nodes to function properly."
+}
+
+variable "gklm_security_group_name" {
+ type = string
+ default = null
+ description = "Provide the security group name to provision the gklm nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the gklm nodes to function properly."
+}
+
+variable "ldap_security_group_name" {
+ type = string
+ default = null
+ description = "Provide the security group name to provision the ldap nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the ldap nodes to function properly."
+}
diff --git a/modules/playbook/variables.tf b/modules/playbook/variables.tf
index 0407fe44..6dbe4153 100644
--- a/modules/playbook/variables.tf
+++ b/modules/playbook/variables.tf
@@ -73,7 +73,7 @@ variable "cloudlogs_provision" {
variable "scheduler" {
default = null
type = string
- description = "Select one of the scheduler (LSF/Symphony/Slurm/null)"
+ description = "Select one of the scheduler (Scale/LSF/Symphony/Slurm/null)"
}
variable "mgmnt_hosts" {
diff --git a/modules/prepare_tf_input/locals.tf b/modules/prepare_tf_input/locals.tf
index 9ae9dcce..885d41be 100644
--- a/modules/prepare_tf_input/locals.tf
+++ b/modules/prepare_tf_input/locals.tf
@@ -7,15 +7,17 @@ locals {
list_ssh_keys = jsonencode(var.ssh_keys)
list_storage_instances = jsonencode(var.storage_instances)
list_storage_servers = jsonencode(var.storage_servers)
+ list_tie_breaker_bm_server = jsonencode(var.tie_breaker_bm_server_profile)
+ list_scale_management_vsi_profile = jsonencode(var.scale_management_vsi_profile)
list_management_instances = jsonencode(var.management_instances)
list_protocol_instances = jsonencode(var.protocol_instances)
list_compute_instances = jsonencode(var.static_compute_instances)
list_client_instances = jsonencode(var.client_instances)
remote_allowed_ips = jsonencode(var.remote_allowed_ips)
- list_storage_subnets = jsonencode(length(var.storage_subnets) == 0 ? null : var.storage_subnets)
- list_protocol_subnets = jsonencode(length(var.protocol_subnets) == 0 ? null : var.protocol_subnets)
- list_cluster_subnet_id = jsonencode(length(var.cluster_subnet_id) == 0 ? null : var.cluster_subnet_id)
- list_client_subnets = jsonencode(length(var.client_subnets) == 0 ? null : var.client_subnets)
+ list_storage_subnet_id = jsonencode(length(var.storage_subnet_id) == 0 ? null : var.storage_subnet_id)
+ list_client_subnet_id = jsonencode(length(var.client_subnet_id) == 0 ? null : var.client_subnet_id)
+ list_protocol_subnet_id = jsonencode(length(var.protocol_subnet_id) == 0 ? null : var.protocol_subnet_id)
+ list_compute_subnet_id = jsonencode(length(var.compute_subnet_id) == 0 ? null : var.compute_subnet_id)
list_login_subnet_ids = jsonencode(length(var.login_subnet_id) == 0 ? null : var.login_subnet_id)
dns_domain_names = jsonencode(var.dns_domain_names)
dynamic_compute_instances = jsonencode(var.dynamic_compute_instances)
@@ -29,18 +31,25 @@ locals {
list_ldap_instances = jsonencode(var.ldap_instance)
ldap_server = jsonencode(var.ldap_server)
ldap_basedns = jsonencode(var.ldap_basedns)
- list_ldap_ssh_keys = jsonencode(var.ldap_instance_key_pair)
list_afm_instances = jsonencode(var.afm_instances)
afm_cos_config_details = jsonencode(var.afm_cos_config)
- list_gklm_ssh_keys = jsonencode(var.gklm_instance_key_pair)
list_gklm_instances = jsonencode(var.gklm_instances)
scale_encryption_type = jsonencode(var.scale_encryption_type)
filesystem_config = jsonencode(var.filesystem_config)
scale_encryption_admin_password = jsonencode(var.scale_encryption_admin_password)
+ key_protect_instance_id = jsonencode(var.key_protect_instance_id)
custom_file_shares = jsonencode(var.custom_file_shares)
resource_group_ids = jsonencode(var.resource_group_ids)
existing_bastion_instance_name = jsonencode(var.existing_bastion_instance_name == null ? null : var.existing_bastion_instance_name)
existing_bastion_security_group_id = jsonencode(var.existing_bastion_security_group_id == null ? null : var.existing_bastion_security_group_id)
login_instance = jsonencode(var.login_instance)
-
+ filesets_config = jsonencode(var.filesets_config)
+ login_security_group_name = jsonencode(var.login_security_group_name == null ? null : var.login_security_group_name)
+ storage_security_group_name = jsonencode(var.storage_security_group_name == null ? null : var.storage_security_group_name)
+ compute_security_group_name = jsonencode(var.compute_security_group_name == null ? null : var.compute_security_group_name)
+ client_security_group_name = jsonencode(var.client_security_group_name == null ? null : var.client_security_group_name)
+ gklm_security_group_name = jsonencode(var.gklm_security_group_name == null ? null : var.gklm_security_group_name)
+ ldap_security_group_name = jsonencode(var.ldap_security_group_name == null ? null : var.ldap_security_group_name)
+ scale_afm_bucket_config_details = jsonencode(var.scale_afm_bucket_config_details)
+ scale_afm_cos_hmac_key_params = jsonencode(var.scale_afm_cos_hmac_key_params)
}
diff --git a/modules/prepare_tf_input/main.tf b/modules/prepare_tf_input/main.tf
index 2f45d195..b3020f10 100644
--- a/modules/prepare_tf_input/main.tf
+++ b/modules/prepare_tf_input/main.tf
@@ -15,6 +15,8 @@ resource "local_sensitive_file" "prepare_tf_input" {
"ssh_keys": ${local.list_ssh_keys},
"storage_instances": ${local.list_storage_instances},
"storage_servers": ${local.list_storage_servers},
+ "tie_breaker_bm_server_profile": ${local.list_tie_breaker_bm_server},
+ "scale_management_vsi_profile": ${local.list_scale_management_vsi_profile},
"storage_type": "${var.storage_type}",
"management_instances": ${local.list_management_instances},
"protocol_instances": ${local.list_protocol_instances},
@@ -23,16 +25,22 @@ resource "local_sensitive_file" "prepare_tf_input" {
"static_compute_instances": ${local.list_compute_instances},
"dynamic_compute_instances": ${local.dynamic_compute_instances},
"client_instances": ${local.list_client_instances},
+ "login_security_group_name": ${local.login_security_group_name},
+ "storage_security_group_name": ${local.storage_security_group_name},
+ "compute_security_group_name": ${local.compute_security_group_name},
+ "client_security_group_name": ${local.client_security_group_name},
+ "gklm_security_group_name": ${local.gklm_security_group_name},
+ "ldap_security_group_name": ${local.ldap_security_group_name},
"enable_cos_integration": ${var.enable_cos_integration},
"enable_atracker": ${var.enable_atracker},
"enable_vpc_flow_logs": ${var.enable_vpc_flow_logs},
"remote_allowed_ips": ${local.remote_allowed_ips},
"vpc_name": "${var.vpc_name}",
- "storage_subnets": ${local.list_storage_subnets},
- "protocol_subnets": ${local.list_protocol_subnets},
- "cluster_subnet_id": ${local.list_cluster_subnet_id},
- "client_subnets": ${local.list_client_subnets},
+ "compute_subnet_id": ${local.list_compute_subnet_id},
"login_subnet_id": ${local.list_login_subnet_ids},
+ "protocol_subnet_id": ${local.list_protocol_subnet_id},
+ "storage_subnet_id": ${local.list_storage_subnet_id},
+ "client_subnet_id": ${local.list_client_subnet_id},
"dns_domain_names": ${local.dns_domain_names},
"key_management": ${local.key_management},
"kms_instance_name" : ${local.kms_instance_name},
@@ -51,7 +59,6 @@ resource "local_sensitive_file" "prepare_tf_input" {
"enable_ldap": ${var.enable_ldap},
"ldap_server": ${local.ldap_server},
"ldap_basedns": ${local.ldap_basedns},
- "ldap_instance_key_pair": ${local.list_ldap_ssh_keys},
"ldap_admin_password": "${var.ldap_admin_password}",
"ldap_user_name": "${var.ldap_user_name}",
"ldap_user_password": "${var.ldap_user_password}",
@@ -60,10 +67,15 @@ resource "local_sensitive_file" "prepare_tf_input" {
"afm_cos_config": ${local.afm_cos_config_details},
"scale_encryption_enabled": ${var.scale_encryption_enabled},
"scale_encryption_type": ${local.scale_encryption_type},
- "gklm_instance_key_pair": ${local.list_gklm_ssh_keys},
"gklm_instances": ${local.list_gklm_instances},
- "scale_encryption_admin_password": "${local.scale_encryption_admin_password}",
+ "scale_encryption_admin_password": ${local.scale_encryption_admin_password},
+ "key_protect_instance_id": ${local.key_protect_instance_id},
"filesystem_config": ${local.filesystem_config},
+ "filesets_config": ${local.filesets_config},
+ "storage_gui_username": "${var.storage_gui_username}",
+ "storage_gui_password": "${var.storage_gui_password}",
+ "compute_gui_username": "${var.compute_gui_username}",
+ "compute_gui_password": "${var.compute_gui_password}",
"enable_hyperthreading": ${var.enable_hyperthreading},
"cloud_logs_data_bucket": ${var.cloud_logs_data_bucket},
"cloud_metrics_data_bucket": ${var.cloud_metrics_data_bucket},
@@ -82,11 +94,14 @@ resource "local_sensitive_file" "prepare_tf_input" {
"custom_file_shares": ${local.custom_file_shares},
"login_instance": ${local.login_instance},
"vpc_cluster_private_subnets_cidr_blocks": "${var.vpc_cluster_private_subnets_cidr_blocks}",
+ "bms_boot_drive_encryption": ${var.bms_boot_drive_encryption},
"existing_resource_group": "${var.existing_resource_group}",
"sccwp_service_plan": "${var.sccwp_service_plan}",
"sccwp_enable": ${var.sccwp_enable},
"cspm_enabled": ${var.cspm_enabled},
- "app_config_plan": "${var.app_config_plan}"
+ "app_config_plan": "${var.app_config_plan}",
+ "scale_afm_bucket_config_details": ${local.scale_afm_bucket_config_details},
+ "scale_afm_cos_hmac_key_params": ${local.scale_afm_cos_hmac_key_params}
}
EOT
filename = local.schematics_inputs_path
diff --git a/modules/prepare_tf_input/variables.tf b/modules/prepare_tf_input/variables.tf
index 45b9b377..d439db95 100644
--- a/modules/prepare_tf_input/variables.tf
+++ b/modules/prepare_tf_input/variables.tf
@@ -47,11 +47,6 @@ variable "resource_group_ids" {
##############################################################################
# Compute Variables
##############################################################################
-variable "client_subnets" {
- type = list(string)
- default = null
- description = "Name of an existing subnets in which the cluster resources will be deployed. If no value is given, then new subnet(s) will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)"
-}
variable "ssh_keys" {
type = list(string)
@@ -70,7 +65,7 @@ variable "client_instances" {
description = "Number of instances to be launched for client."
}
-variable "cluster_subnet_id" {
+variable "compute_subnet_id" {
type = string
default = null
description = "Name of an existing subnets in which the cluster resources will be deployed. If no value is given, then new subnet(s) will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)"
@@ -90,9 +85,10 @@ variable "management_instances" {
variable "static_compute_instances" {
type = list(
object({
- profile = string
- count = number
- image = string
+ profile = string
+ count = number
+ image = string
+ filesystem = optional(string)
})
)
description = "Min Number of instances to be launched for compute cluster."
@@ -121,11 +117,6 @@ variable "login_subnet_id" {
##############################################################################
# Storage Variables
##############################################################################
-variable "storage_subnets" {
- type = list(string)
- default = null
- description = "Name of an existing subnets in which the cluster resources will be deployed. If no value is given, then new subnet(s) will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)"
-}
variable "storage_instances" {
type = list(
@@ -136,6 +127,12 @@ variable "storage_instances" {
filesystem = optional(string)
})
)
+ default = [{
+ profile = "bx2d-32x128"
+ count = 0
+ image = "ibm-redhat-8-10-minimal-amd64-4"
+ filesystem_name = "fs1"
+ }]
description = "Number of instances to be launched for storage cluster."
}
@@ -145,7 +142,7 @@ variable "storage_servers" {
profile = string
count = number
image = string
- filesystem = string
+ filesystem = optional(string)
})
)
default = [{
@@ -157,21 +154,57 @@ variable "storage_servers" {
description = "Number of BareMetal Servers to be launched for storage cluster."
}
+variable "tie_breaker_bm_server_profile" {
+ type = string
+ default = null
+ description = "Specify the bare metal server profile type name to be used for creating the bare metal Tie breaker node. If no value is provided, the storage bare metal server profile will be used as the default. For more information, see [bare metal server profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-profile&interface=ui). [Tie Breaker Node](https://www.ibm.com/docs/en/storage-scale/5.2.2?topic=quorum-node-tiebreaker-disks)"
+}
+
+variable "scale_management_vsi_profile" {
+ type = string
+ description = "The virtual server instance profile type name to be used to create the Management node. For more information, see [Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)."
+}
+
variable "protocol_instances" {
type = list(
object({
profile = string
count = number
- image = string
})
)
description = "Number of instances to be launched for protocol hosts."
}
-variable "protocol_subnets" {
- type = list(string)
- default = null
- description = "Name of an existing subnets in which the cluster resources will be deployed. If no value is given, then new subnet(s) will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)"
+##############################################################################
+# Scale GUI Variables
+##############################################################################
+
+variable "storage_gui_username" {
+ type = string
+ default = "null"
+ sensitive = true
+ description = "GUI user to perform system management and monitoring tasks on storage cluster."
+}
+
+variable "storage_gui_password" {
+ type = string
+ default = "null"
+ sensitive = true
+ description = "Password for storage cluster GUI"
+}
+
+variable "compute_gui_username" {
+ type = string
+ default = "null"
+ sensitive = true
+ description = "GUI user to perform system management and monitoring tasks on compute cluster."
+}
+
+variable "compute_gui_password" {
+ type = string
+ default = "null"
+ sensitive = true
+ description = "Password for compute cluster GUI"
}
##############################################################################
@@ -248,6 +281,9 @@ variable "enable_vpc_flow_logs" {
description = "Enable Activity tracker"
}
+##############################################################################
+# SCC Variables
+##############################################################################
variable "enable_atracker" {
type = bool
default = false
@@ -284,6 +320,10 @@ variable "bastion_fip" {
description = "bastion node fip"
}
+##############################################################################
+# SCC Variables
+##############################################################################
+
variable "cloud_logs_data_bucket" {
type = any
default = null
@@ -442,11 +482,11 @@ variable "ldap_user_password" {
description = "The LDAP user password should be 8 to 20 characters long, with a mix of at least three alphabetic characters, including one uppercase and one lowercase letter. It must also include two numerical digits and at least one special character from (~@_+:) are required.It is important to avoid including the username in the password for enhanced security.[This value is ignored for an existing LDAP server]."
}
-variable "ldap_instance_key_pair" {
- type = list(string)
- default = null
- description = "Name of the SSH key configured in your IBM Cloud account that is used to establish a connection to the LDAP Server. Make sure that the SSH key is present in the same resource group and region where the LDAP Servers are provisioned. If you do not have an SSH key in your IBM Cloud account, create one by using the [SSH keys](https://cloud.ibm.com/docs/vpc?topic=vpc-ssh-keys) instructions."
-}
+# variable "ldap_instance_key_pair" {
+# type = list(string)
+# default = null
+# description = "Name of the SSH key configured in your IBM Cloud account that is used to establish a connection to the LDAP Server. Make sure that the SSH key is present in the same resource group and region where the LDAP Servers are provisioned. If you do not have an SSH key in your IBM Cloud account, create one by using the [SSH keys](https://cloud.ibm.com/docs/vpc?topic=vpc-ssh-keys) instructions."
+# }
variable "ldap_instance" {
type = list(
@@ -477,12 +517,6 @@ variable "scale_encryption_type" {
description = "To enable filesystem encryption, specify either 'key_protect' or 'gklm'. If neither is specified, the default value will be 'null' and encryption is disabled"
}
-variable "gklm_instance_key_pair" {
- type = list(string)
- default = null
- description = "The key pair to use to launch the GKLM host."
-}
-
variable "gklm_instances" {
type = list(
object({
@@ -499,24 +533,18 @@ variable "gklm_instances" {
description = "Number of instances to be launched for client."
}
-# variable "scale_encryption_admin_default_password" {
-# type = string
-# default = null
-# description = "The default administrator password used for resetting the admin password based on the user input. The password has to be updated which was configured during the GKLM installation."
-# }
-
-# variable "scale_encryption_admin_username" {
-# type = string
-# default = null
-# description = "The default Admin username for Security Key Lifecycle Manager(GKLM)."
-# }
-
variable "scale_encryption_admin_password" {
type = string
default = null
description = "Password that is used for performing administrative operations for the GKLM.The password must contain at least 8 characters and at most 20 characters. For a strong password, at least three alphabetic characters are required, with at least one uppercase and one lowercase letter. Two numbers, and at least one special character from this(~@_+:). Make sure that the password doesn't include the username. Visit this [page](https://www.ibm.com/docs/en/gklm/3.0.1?topic=roles-password-policy) to know more about password policy of GKLM. "
}
+variable "key_protect_instance_id" {
+ type = string
+ default = null
+ description = "An existing Key Protect instance used for filesystem encryption"
+}
+
variable "storage_type" {
type = string
default = "scratch"
@@ -534,7 +562,6 @@ variable "afm_instances" {
object({
profile = string
count = number
- image = string
})
)
description = "Number of instances to be launched for afm hosts."
@@ -553,10 +580,32 @@ variable "afm_cos_config" {
bucket_storage_class = string
})
)
- default = null
+ nullable = false
description = "AFM configurations."
}
+variable "scale_afm_bucket_config_details" {
+ description = "Scale AFM COS Bucket and Configuration Details"
+ type = list(object({
+ bucket = string
+ endpoint = string
+ fileset = string
+ filesystem = string
+ mode = string
+ }))
+ default = null
+}
+
+variable "scale_afm_cos_hmac_key_params" {
+ description = "Scale AFM COS HMAC Key Details"
+ type = list(object({
+ akey = string
+ bucket = string
+ skey = string
+ }))
+ default = null
+}
+
variable "filesystem_config" {
type = list(
object({
@@ -566,13 +615,23 @@ variable "filesystem_config" {
default_metadata_replica = number
max_data_replica = number
max_metadata_replica = number
- mount_point = string
})
)
default = null
description = "File system configurations."
}
+variable "filesets_config" {
+ type = list(
+ object({
+ client_mount_path = string
+ quota = number
+ })
+ )
+ default = null
+ description = "Fileset configurations."
+}
+
variable "scheduler" {
type = string
default = null
@@ -649,6 +708,13 @@ variable "custom_file_shares" {
description = "Provide details for customizing your shared file storage layout, including mount points, sizes (in GB), and IOPS ranges for up to five file shares if using VPC file storage as the storage option.If using IBM Storage Scale as an NFS mount, update the appropriate mount path and nfs_share values created from the Storage Scale cluster. Note that VPC file storage supports attachment to a maximum of 256 nodes. Exceeding this limit may result in mount point failures due to attachment restrictions.For more information, see [Storage options](https://test.cloud.ibm.com/docs/hpc-ibm-spectrumlsf?topic=hpc-ibm-spectrumlsf-integrating-scale#integrate-scale-and-hpc)."
}
+
+variable "bms_boot_drive_encryption" {
+ type = bool
+ default = false
+ description = "To enable the encryption for the boot drive of bare metal server. Select true or false"
+}
+
###########################################################################
# Existing Bastion Support variables
###########################################################################
@@ -735,3 +801,57 @@ variable "app_config_plan" {
)
}
}
+
+variable "protocol_subnet_id" {
+ type = string
+ description = "Name of an existing subnet for protocol nodes. If no value is given, a new subnet will be created"
+ default = null
+}
+
+variable "client_subnet_id" {
+ type = string
+ description = "Name of an existing subnet for client nodes. If no value is given, a new subnet will be created"
+ default = null
+}
+
+variable "storage_subnet_id" {
+ type = string
+ description = "Name of an existing subnet for storage nodes. If no value is given, a new subnet will be created"
+ default = null
+}
+
+variable "login_security_group_name" {
+ type = string
+ default = null
+ description = "Provide the security group name to provision the bastion node. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the bastion node to function properly."
+}
+
+variable "storage_security_group_name" {
+ type = string
+ default = null
+ description = "Provide the security group name to provision the storage nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the storage nodes to function properly."
+}
+
+variable "compute_security_group_name" {
+ type = string
+ default = null
+ description = "Provide the security group name to provision the compute nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the compute nodes to function properly."
+}
+
+variable "client_security_group_name" {
+ type = string
+ default = null
+ description = "Provide the security group name to provision the gklm nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the gklm nodes to function properly."
+}
+
+variable "gklm_security_group_name" {
+ type = string
+ default = null
+ description = "Provide the security group name to provision the gklm nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the gklm nodes to function properly."
+}
+
+variable "ldap_security_group_name" {
+ type = string
+ default = null
+ description = "Provide the security group name to provision the ldap nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the ldap nodes to function properly."
+}
diff --git a/modules/resource_provisioner/locals.tf b/modules/resource_provisioner/locals.tf
index c7a75cc5..31a5a41f 100644
--- a/modules/resource_provisioner/locals.tf
+++ b/modules/resource_provisioner/locals.tf
@@ -1,14 +1,15 @@
locals {
- schematics_inputs_path = format("/tmp/.schematics/%s/solution_terraform.auto.tfvars.json", var.cluster_prefix)
- remote_inputs_path = format("%s/terraform.tfvars.json", "/tmp")
- deployer_path = "/opt/ibm"
- remote_terraform_path = format("%s/terraform-ibm-hpc", local.deployer_path)
+ schematics_inputs_path = format("/tmp/.schematics/%s/solution_terraform.auto.tfvars.json", var.cluster_prefix)
+ remote_inputs_path = format("%s/terraform.tfvars.json", "/tmp")
+ deployer_path = "/opt/ibm"
+ remote_terraform_path = format("%s/terraform-ibm-hpc", local.deployer_path)
+ # da_hpc_repo_url = "github.ibm.com/workload-eng-services/HPCaaS.git"
da_hpc_repo_url = "github.com/terraform-ibm-modules/terraform-ibm-hpc.git"
- da_hpc_repo_tag = "main"
+ da_hpc_repo_tag = "2-sept" ###### change it to main in future
remote_ansible_path = format("%s/ibm-spectrumscale-cloud-deploy", local.deployer_path)
scale_cloud_infra_repo_url = "https://github.com/jayeshh123/ibm-spectrum-scale-install-infra"
scale_cloud_infra_repo_name = "ibm-spectrum-scale-install-infra"
- scale_cloud_infra_repo_tag = "jay_scale_da_api"
+ scale_cloud_infra_repo_tag = "jay_new_scale_da_infra"
products = var.scheduler == "Scale" ? "scale" : "lsf"
ssh_key_file = "${path.root}/../../solutions/${local.products}/bastion_id_rsa"
bastion_public_key_content = var.existing_bastion_instance_name != null ? var.bastion_public_key_content : ""
diff --git a/modules/resource_provisioner/main.tf b/modules/resource_provisioner/main.tf
index 67a491cd..087dc3d9 100644
--- a/modules/resource_provisioner/main.tf
+++ b/modules/resource_provisioner/main.tf
@@ -18,13 +18,11 @@ resource "null_resource" "tf_resource_provisioner" {
provisioner "remote-exec" {
inline = [
- # Remove and re-clone the remote terraform path repo
- # "if [ -d ${local.remote_terraform_path} ]; then echo 'Removing existing repository at ${local.remote_terraform_path}' && sudo rm -rf ${local.remote_terraform_path}; fi",
- # "echo 'Cloning repository with tag: ${local.da_hpc_repo_tag}' && sudo git clone -b ${local.da_hpc_repo_tag} https://${var.github_token}@${local.da_hpc_repo_url} ${local.remote_terraform_path}",
- "if [ ! -d ${local.remote_terraform_path} ]; then echo 'Cloning repository with tag: ${local.da_hpc_repo_tag}' && sudo git clone -b ${local.da_hpc_repo_tag} https://${local.da_hpc_repo_url} ${local.remote_terraform_path}; fi",
+ # Conditionally clone "terraform-ibm-hpc" repository from TIM
+ "if [ -f ${local.remote_terraform_path} ]; then sudo rm -f ${local.remote_terraform_path}; fi && if [ ! -d ${local.remote_terraform_path} ]; then echo 'Cloning repository with tag: ${local.da_hpc_repo_tag}' && sudo git clone -b ${local.da_hpc_repo_tag} https://${local.da_hpc_repo_url} ${local.remote_terraform_path}; fi",
# Clone Spectrum Scale collection if it doesn't exist
- "if [ ! -d ${local.remote_ansible_path}/${local.scale_cloud_infra_repo_name}/collections/ansible_collections/ibm/spectrum_scale ]; then sudo git clone -b ${local.scale_cloud_infra_repo_tag} ${local.scale_cloud_infra_repo_url} ${local.remote_ansible_path}/${local.scale_cloud_infra_repo_name}/collections/ansible_collections/ibm/spectrum_scale; fi",
+ "if [ \"${var.scheduler}\" = \"Scale\" ]; then if [ ! -d ${local.remote_ansible_path}/${local.scale_cloud_infra_repo_name}/collections/ansible_collections/ibm/spectrum_scale ]; then sudo git clone -b ${local.scale_cloud_infra_repo_tag} ${local.scale_cloud_infra_repo_url} ${local.remote_ansible_path}/${local.scale_cloud_infra_repo_name}/collections/ansible_collections/ibm/spectrum_scale; fi; fi",
# Ensure ansible-playbook is available
"sudo ln -fs /usr/local/bin/ansible-playbook /usr/bin/ansible-playbook",
@@ -62,7 +60,7 @@ resource "null_resource" "ext_bastion_access" {
}
resource "null_resource" "fetch_host_details_from_deployer" {
- count = var.enable_deployer == true && var.scheduler == "LSF" ? 1 : 0
+ count = var.enable_deployer == true ? 1 : 0
provisioner "local-exec" {
command = < 0 ? "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -J ubuntu@${var.bastion_fip} lsfadmin@${local.mgmt_hosts_ips[0]}" : null
+ value = var.scheduler == "LSF" && (var.enable_deployer == false) && length(local.mgmt_hosts_ips) > 0 ? "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -J ubuntu@${var.bastion_fip} vpcuser@${local.mgmt_hosts_ips[0]}" : null
}
output "ssh_to_login_node" {
@@ -33,27 +36,46 @@ output "ssh_to_login_node" {
value = var.scheduler == "LSF" && (var.enable_deployer == false) ? "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -J ubuntu@${var.bastion_fip} lsfadmin@${local.login_host_ip[0]}" : null
}
-output "ssh_to_ldap_node" {
- description = "SSH command to connect to LDAP node"
- value = (var.scheduler == "LSF" && var.enable_deployer == false && var.enable_ldap && length(local.ldap_hosts_ips) > 0) ? "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ServerAliveInterval=5 -o ServerAliveCountMax=1 -J ubuntu@${var.bastion_fip} ubuntu@${local.ldap_hosts_ips[0]}" : null
-}
output "cloud_monitoring_url" {
- value = var.observability_monitoring_enable && (var.enable_deployer == false) ? module.cloud_monitoring_instance_creation[0].cloud_monitoring_url : null
+ value = var.scheduler == "LSF" && var.observability_monitoring_enable && (var.enable_deployer == false) ? module.cloud_monitoring_instance_creation[0].cloud_monitoring_url : null
description = "IBM Cloud Monitoring URL"
}
output "cloud_logs_url" {
- value = (var.enable_deployer == false) && (var.observability_logs_enable_for_management || var.observability_logs_enable_for_compute) ? module.cloud_monitoring_instance_creation[0].cloud_logs_url : null
+ value = var.scheduler == "LSF" && (var.enable_deployer == false) && (var.observability_logs_enable_for_management || var.observability_logs_enable_for_compute) ? module.cloud_monitoring_instance_creation[0].cloud_logs_url : null
description = "IBM Cloud Logs URL"
}
output "application_center_tunnel" {
description = "Available if IBM Spectrum LSF Application Center GUI is installed"
- value = var.enable_deployer ? "" : local.ssh_cmd
+ value = var.scheduler == "LSF" && (var.enable_deployer == false) ? local.ssh_cmd : null
}
output "application_center_url" {
description = "Available if IBM Spectrum LSF Application Center GUI is installed"
- value = "https://localhost:8443"
+ value = var.scheduler == "LSF" ? "https://localhost:8443" : null
+}
+
+#############################################
+### Scale Outputs ###
+#############################################
+
+output "scale_version" {
+ description = "Version of Scale"
+ value = var.scheduler == "Scale" ? local.scale_version : null
+}
+
+#############################################
+### Common Outputa ###
+#############################################
+
+output "ssh_to_deployer" {
+ description = "SSH command to connect to the deployer"
+ value = (var.enable_deployer == false) ? "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -J ubuntu@${var.bastion_fip} vpcuser@${var.deployer_ip}" : null
+}
+
+output "ssh_to_ldap_node" {
+ description = "SSH command to connect to LDAP node"
+ value = (var.enable_deployer == false && var.enable_ldap && length(local.ldap_hosts_ips) > 0) ? "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ServerAliveInterval=5 -o ServerAliveCountMax=1 -J ubuntu@${var.bastion_fip} ubuntu@${local.ldap_hosts_ips[0]}" : null
}
diff --git a/samples/configs/hpc_catalog_values.json b/samples/configs/hpc_catalog_values.json
index f9ad780b..d3e75d3a 100644
--- a/samples/configs/hpc_catalog_values.json
+++ b/samples/configs/hpc_catalog_values.json
@@ -13,7 +13,7 @@
"vpc_cidr": "10.241.0.0/18",
"vpc_cluster_private_subnets_cidr_blocks": "10.241.0.0/20",
"vpc_cluster_login_private_subnets_cidr_blocks": "10.241.16.0/28",
- "dns_domain_name": "{compute = \"lsf.com\"}",
+ "dns_domain_name": "{compute = \"hpc.local\"}",
"dns_instance_id": "__NULL__",
"dns_custom_resolver_id": "__NULL__",
"bastion_instance": "{ image = \"ibm-ubuntu-22-04-5-minimal-amd64-3\", profile = \"cx2-4x8\" }",
@@ -29,7 +29,7 @@
"kms_key_name": "__NULL__",
"enable_vpc_flow_logs": "false",
"enable_ldap": "false",
- "ldap_basedns": "lsf.com",
+ "ldap_basedns": "hpc.local",
"ldap_server": "null",
"ldap_server_cert": "null",
"ldap_admin_password": "",
diff --git a/samples/configs/hpc_schematics_values.json b/samples/configs/hpc_schematics_values.json
index 89273f7c..c041fca1 100644
--- a/samples/configs/hpc_schematics_values.json
+++ b/samples/configs/hpc_schematics_values.json
@@ -147,7 +147,7 @@
},
{
"name": "dns_domain_name",
- "value": "{compute= \"lsf.com\"}",
+ "value": "{compute= \"hpc.local\"}",
"type": "object({compute = string})",
"secure": false,
"description": "IBM Cloud DNS Services domain name to be used for the IBM Cloud HPC cluster."
@@ -287,7 +287,7 @@
},
{
"name": "ldap_basedns",
- "value": "lsf.com",
+ "value": "hpc.local",
"type": "string",
"secure": false,
"description": "The dns domain name is used for configuring the LDAP server. If an LDAP server is already in existence, ensure to provide the associated DNS domain name."
diff --git a/samples/configs/scale_catalog_values.json b/samples/configs/scale_catalog_values.json
new file mode 100644
index 00000000..92c1a45c
--- /dev/null
+++ b/samples/configs/scale_catalog_values.json
@@ -0,0 +1,80 @@
+{
+ "ibmcloud_api_key": "Fill the value here",
+ "ibm_customer_number": "Fill the value here",
+ "existing_resource_group": "Default",
+ "zones": "[\"us-east-1\"]",
+ "cluster_prefix": "scale",
+ "ssh_keys": "[\"Fill the value here\"]",
+ "remote_allowed_ips": "[\"Fill the value here\"]",
+ "storage_gui_username": "Fill the value here",
+ "storage_gui_password": "Fill the value here",
+ "vpc_name": "__NULL__",
+ "vpc_cidr": "10.241.0.0/18",
+ "login_subnets_cidr": "10.241.16.0/28",
+ "compute_subnets_cidr": "10.241.0.0/20",
+ "storage_subnets_cidr": "10.241.30.0/24",
+ "protocol_subnets_cidr": "10.241.40.0/24",
+ "client_subnets_cidr": "10.241.50.0/24",
+ "compute_gui_username": "",
+ "compute_gui_password": "",
+ "bastion_instance": "{ image = \"ibm-ubuntu-22-04-5-minimal-amd64-3\", profile = \"cx2-4x8\" }",
+ "deployer_instance": "{ image = \"hpcc-scale-deployer-v1\", profile = \"mx2-4x32\" }",
+ "compute_instances": "[{ profile = \"cx2-2x4\", count = 0, image = \"hpcc-scale5232-rhel810-v1\", filesystem = \"/gpfs/fs1\" }]",
+ "client_instances": "[{ profile = \"cx2-2x4\", count = 0, image = \"ibm-redhat-8-10-minimal-amd64-6\" }]",
+ "storage_instances": "[{ profile = \"bx2d-32x128\", count =2, image = \"ibm-redhat-8-10-minimal-amd64-6\", filesystem = \"/gpfs/fs1\" }]",
+ "storage_baremetal_server": "[{ profile = \"cx2d-metal-96x192\", count = 2, image = \"hpcc-scale5232-rhel810-v1\", filesystem = \"/gpfs/fs1\" }]",
+ "tie_breaker_baremetal_server_profile": "__NULL__",
+ "scale_management_vsi_profile": "\"bx2-8x32\"",
+ "afm_instances": "[{ profile = \"bx2d-32x128\", count = 0, image = \"hpcc-scale5232-rhel810-v1\" }]",
+ "protocol_instances": "[{ profile = \"cx2-32x64\", count = 2, image = \"hpcc-scale5232-rhel810-v1\" }]",
+ "colocate_protocol_instances": "true",
+ "filesystem_config": "[{ filesystem = \"/gpfs/fs1\", block_size = \"4M\", default_data_replica = 2, default_metadata_replica = 2, max_data_replica = 3, max_metadata_replica = 3 }]",
+ "filesets_config": "[{ client_mount_path = \"/mnt/scale/tools\", quota = 0 } , { client_mount_path = \"/mnt/scale/data\", quota = 0 }]",
+ "afm_cos_config": "[{ afm_fileset = \"afm_fileset\", mode = \"iw\", cos_instance = \"\", bucket_name = \"\", bucket_region = \"us-south\", cos_service_cred_key = \"\", bucket_storage_class = \"smart\", bucket_type = \"region_location\" }]",
+ "dns_domain_name": "{ compute = \"comp.com\", storage = \"strg.com\", protocol = \"ces.com\", client = \"clnt.com\", gklm = \"gklm.com\"}",
+ "dns_instance_id": "__NULL__",
+ "dns_custom_resolver_id": "__NULL__",
+ "enable_vpc_flow_logs": "true",
+ "skip_flowlogs_s2s_auth_policy": "false",
+ "enable_ldap": "false",
+ "ldap_basedns": "ldapscale.com",
+ "ldap_server": "null",
+ "ldap_server_cert": "null",
+ "ldap_admin_password": "",
+ "ldap_user_name": "",
+ "ldap_user_password": "",
+ "ldap_instance": "[{ profile = \"cx2-2x4\", image = \"ibm-ubuntu-22-04-5-minimal-amd64-5\" }]",
+ "scale_encryption_enabled": "false",
+ "scale_encryption_type": "null",
+ "gklm_instances": "[{ profile = \"bx2-2x8\", count = 2, image = \"hpcc-scale-gklm4202-v2-5-3\" }]",
+ "scale_encryption_admin_password": "__NULL__",
+ "key_protect_instance_id": "__NULL__",
+ "storage_type": "scratch",
+ "observability_atracker_enable": "true",
+ "observability_atracker_target_type": "cloudlogs",
+ "sccwp_enable": "true",
+ "app_config_plan": "basic",
+ "cspm_enabled": "true",
+ "sccwp_service_plan": "free-trial",
+ "existing_bastion_instance_name": "__NULL__",
+ "existing_bastion_instance_public_ip": "__NULL__",
+ "existing_bastion_security_group_id": "__NULL__",
+ "existing_bastion_ssh_private_key": "__NULL__",
+ "enable_cos_integration": "false",
+ "cos_instance_name": "__NULL__",
+ "bms_boot_drive_encryption": "false",
+ "enable_sg_validation": "true",
+ "login_security_group_name": "__NULL__",
+ "storage_security_group_name": "__NULL__",
+ "compute_security_group_name": "__NULL__",
+ "client_security_group_name": "__NULL__",
+ "gklm_security_group_name": "__NULL__",
+ "login_subnet_id": "__NULL__",
+ "compute_subnet_id": "__NULL__",
+ "storage_subnet_id": "__NULL__",
+ "protocol_subnet_id": "__NULL__",
+ "client_subnet_id": "__NULL__",
+ "ldap_security_group_name": "__NULL__",
+ "TF_VERSION": "1.9",
+ "TF_PARALLELISM": "250"
+}
diff --git a/samples/configs/scale_schematics_values.json b/samples/configs/scale_schematics_values.json
new file mode 100644
index 00000000..86ac1263
--- /dev/null
+++ b/samples/configs/scale_schematics_values.json
@@ -0,0 +1,599 @@
+{
+ "name": "scale-test",
+ "type": [
+ "terraform_v1.9"
+ ],
+ "location": "eu-de",
+ "resource_group": "Default",
+ "description": "",
+ "tags": [],
+ "template_repo": {
+ "url": "https://github.com/terraform-ibm-modules/terraform-ibm-hpc",
+ "branch": "11-aug-scale"
+ },
+ "template_data": [
+ {
+ "folder": "solutions/scale",
+ "type": "terraform_v1.9",
+ "env_values": [
+ {
+ "TF_CLI_ARGS_apply": "-parallelism=250"
+ },
+ {
+ "TF_CLI_ARGS_plan": "-parallelism=250"
+ },
+ {
+ "TF_CLI_ARGS_destroy": "-parallelism=100"
+ },
+ {
+ "VAR1": ""
+ },
+ {
+ "VAR2": ""
+ }
+ ],
+ "variablestore": [
+ {
+ "name": "ibm_customer_number",
+ "value": "Please fill here",
+ "type": "string",
+ "secure": true,
+ "description": "IBM Customer Number (ICN) used for Bring Your Own License (BYOL) entitlement check and not required if storage_type is evaluation, but must be provided if storage_type is scratch or persistent. Failing to provide an ICN will cause the deployment to fail to decrypt the packages. For more information on how to find your ICN, see [What is my IBM Customer Number (ICN)?](https://www.ibm.com/support/pages/what-my-ibm-customer-number-icn)."
+ },
+ {
+ "name": "ibmcloud_api_key",
+ "value": "Please fill here",
+ "type": "string",
+ "secure": true,
+ "description": "Provide the IBM Cloud API key for the account where the IBM Storage Scale cluster will be deployed, this is a required value that must be provided as it is used to authenticate and authorize access during the deployment. For instructions on creating an API key, see [Managing user API keys](https://cloud.ibm.com/docs/account?topic=account-userapikey&interface=ui)."
+ },
+ {
+ "name": "zones",
+ "value": "[\"us-east-1\"]",
+ "type": "list(string)",
+ "secure": false,
+ "description": "Specify the IBM Cloud zone within the chosen region where the IBM Storage scale cluster will be deployed. A single zone input is required, (for example, [\"us-east-1\"]) all the cluster nodes will all be provisioned in this zone.[Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-creating-a-vpc-in-a-different-region#get-zones-using-the-cli)."
+ },
+ {
+ "name": "ssh_keys",
+ "value": "[\"Please fill here\"]",
+ "type": "list(string)",
+ "secure": false,
+ "description": "Provide the names of the SSH keys already configured in your IBM Cloud account to enable access to the Storage Scale nodes. The solution does not create new SSH keys, so ensure you provide existing ones. These keys must reside in the same resource group and region as the cluster being provisioned.To provide multiple SSH keys, use a comma-separated list in the format: [\"key-name-1\", \"key-name-2\"]. If you do not have an SSH key in your IBM Cloud account, you can create one by following the instructions [SSH Keys](https://cloud.ibm.com/docs/vpc?topic=vpc-ssh-keys)."
+ },
+ {
+ "name": "remote_allowed_ips",
+ "value": "[\"Please fill here\"]",
+ "type": "list(string)",
+ "secure": false,
+ "description": "To ensure secure access to the IBM Storage Scale cluster via SSH, you must specify the public IP addresses of the devices that are permitted to connect. These IPs will be used to configure access restrictions and protect the environment from unauthorized connections. To allow access from multiple devices, provide the IP addresses as a comma-separated list in the format: [\"169.45.117.34\", \"203.0.113.25\"]. Identify your current public IP address, you can visit: https://ipv4.icanhazip.com."
+ },
+ {
+ "name": "cluster_prefix",
+ "value": "scale",
+ "type": "string",
+ "secure": false,
+ "description": "Prefix that is used to name the IBM Cloud resources that are provisioned to build the Storage Scale cluster. Make sure that the prefix is unique, since you cannot create multiple resources with the same name. The maximum length of supported characters is 64. Preifx must begin with a letter and end with a letter or number."
+ },
+ {
+ "name": "existing_resource_group",
+ "value": "Default",
+ "type": "string",
+ "secure": false,
+ "description": "Specify the name of the existing resource group in your IBM Cloud account where cluster resources will be deployed. By default, the resource group is set to 'Default.' In some older accounts, it may be 'default,' so please verify the resource group name before proceeding. If the value is set to \"null\", the automation will create two separate resource groups: 'workload-rg' and 'service-rg.' For more details, see [Managing resource groups](https://cloud.ibm.com/docs/account?topic=account-rgs&interface=ui)."
+ },
+ {
+ "name": "vpc_name",
+ "value": "__NULL__",
+ "type": "string",
+ "secure": false,
+ "description": "Provide the name of an existing VPC in which the cluster resources will be deployed. If no value is given, the solution provisions a new VPC. [Learn more](https://cloud.ibm.com/docs/vpc). You can also choose to use existing subnets under this VPC or let the solution create new subnets as part of the deployment. If a custom DNS resolver is already configured for your VPC, specify its ID under the dns_custom_resolver_id input value."
+ },
+ {
+ "name": "vpc_cidr",
+ "value": "10.241.0.0/18",
+ "type": "string",
+ "secure": false,
+ "description": "Provide an address prefix to create a new VPC when the vpc_name variable is set to null. VPC will be created using this address prefix, and subnets can then be defined within it using the specified subnet CIDR blocks. For more information on address prefix, see [Setting IP ranges](https://cloud.ibm.com/docs/vpc?topic=vpc-vpc-addressing-plan-design)."
+ },
+ {
+ "name": "bastion_instance",
+ "value": "{image = \"ibm-ubuntu-22-04-5-minimal-amd64-5\", profile = \"cx2-4x8\"}",
+ "type": "object({ image = string, profile = string })",
+ "secure": false,
+ "description": "Bastion node functions as a jump server to enable secure SSH access to cluster nodes, ensuring controlled connectivity within the private network. Specify the configuration details for the bastion node, including the image and instance profile. Only Ubuntu 22.04 stock images are supported."
+ },
+ {
+ "name": "deployer_instance",
+ "value": "{image = \"hpcc-scale-deployer-v1\", profile = \"mx2-4x32\"}",
+ "type": "object({ image = string, profile = string })",
+ "secure": false,
+ "description": "A deployer node is a dedicated virtual machine or server instance used to automate the deployment and configuration of infrastructure and applications for HPC cluster components. Specify the configuration for the deployer node, including the custom image and virtual server instance profile."
+ },
+ {
+ "name": "login_subnets_cidr",
+ "value": "10.241.16.0/28",
+ "type": "string",
+ "secure": false,
+ "description": "Provide the CIDR block required for the creation of the login cluster private subnet. Single CIDR block is required. If using a hybrid environment, modify the CIDR block to avoid conflicts with any on-premises CIDR blocks. Since the login subnet is used only for the creation of login virtual server instances, provide a CIDR range of /28."
+ },
+ {
+ "name": "compute_subnets_cidr",
+ "value": "10.241.0.0/20",
+ "type": "string",
+ "secure": false,
+ "description": "Provide the CIDR block required for the creation of the compute private subnet. Single CIDR block is required. If using a hybrid environment, modify the CIDR block to avoid conflicts with any on-premises CIDR blocks. Ensure the selected CIDR block size can accommodate the maximum number of scale compute nodes expected in your cluster. For more information on CIDR block size selection, refer to the documentation, see [Choosing IP ranges for your VPC](https://cloud.ibm.com/docs/vpc?topic=vpc-choosing-ip-ranges-for-your-vpc)."
+ },
+ {
+ "name": "storage_subnets_cidr",
+ "value": "10.241.30.0/24",
+ "type": "string",
+ "secure": false,
+ "description": "Provide the CIDR block required for the creation of the storage private subnet. Single CIDR block is required. If using a hybrid environment, modify the CIDR block to avoid conflicts with any on-premises CIDR blocks. Ensure the selected CIDR block size can accommodate the maximum number of scale storage nodes expected in your cluster. For more information on CIDR block size selection, refer to the documentation, see [Choosing IP ranges for your VPC](https://cloud.ibm.com/docs/vpc?topic=vpc-choosing-ip-ranges-for-your-vpc)."
+ },
+ {
+ "name": "protocol_subnets_cidr",
+ "value": "10.241.40.0/24",
+ "type": "string",
+ "secure": false,
+ "description": "Provide the CIDR block required for the creation of the protocol private subnet. Single CIDR block is required. If using a hybrid environment, modify the CIDR block to avoid conflicts with any on-premises CIDR blocks. Ensure the selected CIDR block size can accommodate the maximum number of protocol nodes expected in your cluster. For more information on CIDR block size selection, refer to the documentation, see [Choosing IP ranges for your VPC](https://cloud.ibm.com/docs/vpc?topic=vpc-choosing-ip-ranges-for-your-vpc)."
+ },
+ {
+ "name": "client_subnets_cidr",
+ "value": "10.241.50.0/24",
+ "type": "string",
+ "secure": false,
+ "description": "Provide the CIDR block required for the creation of the client private subnet. Single CIDR block is required. If using a hybrid environment, modify the CIDR block to avoid conflicts with any on-premises CIDR blocks. Ensure the selected CIDR block size can accommodate the maximum number of scale client nodes expected in your cluster. For more information on CIDR block size selection, refer to the documentation, see [Choosing IP ranges for your VPC](https://cloud.ibm.com/docs/vpc?topic=vpc-choosing-ip-ranges-for-your-vpc)."
+ },
+ {
+ "name": "compute_gui_username",
+ "value": "",
+ "type": "string",
+ "secure": true,
+ "description": "GUI username to perform system management and monitoring tasks on the compute cluster. The Username should be at least 4 characters, (any combination of lowercase and uppercase letters)."
+ },
+ {
+ "name": "compute_gui_password",
+ "value": "",
+ "type": "string",
+ "secure": true,
+ "description": "Password for logging in to the compute cluster GUI. Must be at least 8 characters long and include a combination of uppercase and lowercase letters, a number, and a special character. It must not contain the username or start with a special character."
+ },
+ {
+ "name": "compute_instances",
+ "value": "[{ profile = \"cx2-2x4\", count = 0, image = \"hpcc-scale5232-rhel810-v1\", filesystem = \"/gpfs/fs1\" }]",
+ "type": "list(object({ profile = string, count = number, image = string, filesystem = optional(string) }))",
+ "secure": false,
+ "description": "Specify the list of virtual server instances to be provisioned as compute nodes in the cluster. Each object includes the instance profile (machine type), number of instances (count), OS image to use, and an optional filesystem mount path. This configuration allows customization of the compute tier to suit specific performance and workload requirements. The profile must match a valid IBM Cloud VPC Gen2 instance profile format. A minimum of 3 compute nodes is required to form a cluster, and a maximum of 64 nodes is supported. For more details, refer[Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)."
+ },
+ {
+ "name": "client_instances",
+ "value": "[{ profile = \"cx2-2x4\", count = 0, image = \"ibm-redhat-8-10-minimal-amd64-6\" }]",
+ "type": "list(object({ profile = string, count = number, image = string }))",
+ "secure": false,
+ "description": "Specify the list of virtual server instances to be provisioned as client nodes in the cluster. Each object includes the instance profile (machine type), number of instances (count), OS image to use. This configuration allows customization of the compute tier to suit specific performance and workload requirements. The profile must match a valid IBM Cloud VPC Gen2 instance profile format. For more details, refer [Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)."
+ },
+ {
+ "name": "storage_instances",
+ "value": "[{ profile = \"bx2d-32x128\", count = 2, image = \"hpcc-scale5232-rhel810-v1\", filesystem = \"/gpfs/fs1\" }]",
+ "type": "list(object({ profile = string, count = number, image = string, filesystem = optional(string) }))",
+ "secure": false,
+ "description": "Specify the list of virtual server instances to be provisioned as storage nodes in the cluster. Each object includes the instance profile (machine type), number of instances (count), OS image to use, and an optional filesystem mount path. This configuration allows customization of the storage tier to suit specific storage performance cluster. The profile must match a valid IBM Cloud VPC Gen2 instance profile format. A minimum of 2 storage nodes is required to form a cluster, and a maximum of 64 nodes is supported. For more details, refer[Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)."
+ },
+ {
+ "name": "storage_baremetal_server",
+ "value": "[{ profile = \"cx2d-metal-96x192\", count = 2, image = \"hpcc-scale5232-rhel810-v1\", filesystem = \"/gpfs/fs1\" }]",
+ "type": "list(object({ profile = string, count = number, image = string, filesystem = optional(string) }))",
+ "secure": false,
+ "description": "Specify the list of bare metal servers to be provisioned for the storage cluster. Each object in the list specifies the server profile (hardware configuration), the count (number of servers), the image (OS image to use), and an optional filesystem mount path. This configuration allows flexibility in scaling and customizing the storage cluster based on performance and capacity requirements. Only valid bare metal profiles supported in IBM Cloud VPC should be used. A minimum of 2 baremetal storage nodes is required to form a cluster, and a maximum of 64 nodes is supported For available bare metal profiles, refer to the [Baremetal Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-profile&interface=ui)."
+ },
+ {
+ "name": "tie_breaker_baremetal_server_profile",
+ "value": "__NULL__",
+ "type": "string",
+ "secure": false,
+ "description": "Specify the bare metal server profile type name to be used for creating the bare metal Tie breaker node. If no value is provided, the storage bare metal server profile will be used as the default. For more information, see [bare metal server profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-profile&interface=ui). [Tie Breaker Node](https://www.ibm.com/docs/en/storage-scale/5.2.2?topic=quorum-node-tiebreaker-disks)"
+ },
+ {
+ "name": "scale_management_vsi_profile",
+ "value": "bx2-8x32",
+ "type": "string",
+ "secure": false,
+ "description": "The virtual server instance profile type name to be used to create the Management node. For more information, see [Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)."
+ },
+ {
+ "name": "afm_instances",
+ "value": "[{ profile = \"bx2d-32x128\", count = 0}]",
+ "type": "list(object({ profile = string, count = number}))",
+ "secure": false,
+ "description": "Specify the list of virtual server instances to be provisioned as AFM nodes in the cluster. Each object in the list includes the instance profile (machine type), the count (number of instances), the image (OS image to use). This configuration allows you to access remote data and high-performance computing needs.This input can be used to provision virtual server instances (VSI). If persistent, high-throughput storage is required, consider using bare metal instances instead. Ensure you provide valid instance profiles. Maximum of 16 afm nodes is supported. For more details, refer to [Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)."
+ },
+ {
+ "name": "protocol_instances",
+ "value": "[{ profile = \"cx2-32x64\", count = 2}]",
+ "type": "list(object({ profile = string, count = number}))",
+ "secure": false,
+ "description": "Specify the list of virtual server instances to be provisioned as protocol nodes in the cluster. Each object in the list includes the instance profile (machine type), the count (number of instances), the image (OS image to use). This configuration allows allows for a unified data management solution, enabling different clients to access the same data using NFS protocol.This input can be used to provision virtual server instances (VSI). If persistent, high-throughput storage is required, consider using bare metal instances instead. Ensure you provide valid instance profiles. Maximum of 32 VSI or baremetal nodes are supported. For more details, refer to [Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)."
+ },
+ {
+ "name": "colocate_protocol_instances",
+ "value": "true",
+ "type": "bool",
+ "secure": false,
+ "description": "Enable this option to colocate protocol services on the same virtual server instances used for storage. When set to true, the storage nodes will also act as protocol nodes for reducing the need for separate infrastructure. This can optimize resource usage and simplify the cluster setup, especially for smaller environments or cost-sensitive deployments. For larger or performance-intensive workloads, consider deploying dedicated protocol instances instead."
+ },
+ {
+ "name": "storage_gui_username",
+ "value": "",
+ "type": "string",
+ "secure": true,
+ "description": "GUI username to perform system management and monitoring tasks on the storage cluster. Note: Username should be at least 4 characters, (any combination of lowercase and uppercase letters)."
+ },
+ {
+ "name": "storage_gui_password",
+ "value": "",
+ "type": "string",
+ "secure": true,
+ "description": "The storage cluster GUI password is used for logging in to the storage cluster through the GUI. The password should contain a minimum of 8 characters. For a strong password, use a combination of uppercase and lowercase letters, one number, and a special character. Make sure that the password doesn't contain the username and it should not start with a special character."
+ },
+ {
+ "name": "filesystem_config",
+ "value": "[{ filesystem = \"/gpfs/fs1\", block_size = \"4M\", default_data_replica = 2, default_metadata_replica = 2, max_data_replica = 3, max_metadata_replica = 3 }]",
+ "type": "list(object({ filesystem = string, block_size = string, default_data_replica = number, default_metadata_replica = number, max_data_replica = number, max_metadata_replica = number }))",
+ "secure": false,
+ "description": "Specify the configuration parameters for one or more IBM Storage Scale (GPFS) filesystems. Each object in the list includes the filesystem mount point, block size, and replica settings for both data and metadata. These settings determine how data is distributed and replicated across the cluster for performance and fault tolerance."
+ },
+ {
+ "name": "filesets_config",
+ "value": "[{ client_mount_path = \"/mnt/scale/tools\", quota = 0 } , { client_mount_path = \"/mnt/scale/data\", quota = 0 }]",
+ "type": "list(object({ client_mount_path = string, quota = number}))",
+ "secure": false,
+ "description": "Specify a list of filesets with client mount paths and optional storage quotas (0 means no quota) to be created within the IBM Storage Scale filesystem.."
+ },
+ {
+ "name": "afm_cos_config",
+ "value": "[{ afm_fileset = \"afm_fileset\", mode = \"iw\", cos_instance = \"\", bucket_name = \"\", bucket_region = \"us-south\", cos_service_cred_key = \"\", bucket_storage_class = \"smart\", bucket_type = \"region_location\" }]",
+ "type": "list(object({afm_fileset = string, mode = string, cos_instance = string, bucket_name = string, bucket_region = string, cos_service_cred_key = string, bucket_type = string, bucket_storage_class = string }))",
+ "secure": false,
+ "description": "Please provide details for the Cloud Object Storage (COS) instance, including information about the COS bucket, service credentials (HMAC key), AFM fileset, mode (such as Read-only (RO), Single writer (SW), Local updates (LU), and Independent writer (IW)), storage class (standard, vault, cold, or smart), and bucket type (single_site_location, region_location, cross_region_location). Note : The 'afm_cos_config' can contain up to 5 entries. For further details on COS bucket locations, refer to the relevant documentation https://cloud.ibm.com/docs/cloud-object-storage/basics?topic=cloud-object-storage-endpoints."
+ },
+ {
+ "name": "dns_instance_id",
+ "value": "__NULL__",
+ "type": "string",
+ "secure": false,
+ "description": "Specify the ID of an existing IBM Cloud DNS service instance. When provided, domain names are created within the specified instance. If set to null, a new DNS service instance is created, and the required DNS zones are associated with it."
+ },
+ {
+ "name": "dns_custom_resolver_id",
+ "value": "__NULL__",
+ "type": "string",
+ "secure": false,
+ "description": "Specify the ID of an existing IBM Cloud DNS custom resolver to avoid creating a new one. If set to null, a new custom resolver will be created and associated with the VPC. Note: A VPC can be associated with only one custom resolver. When using an existing VPC, if a custom resolver is already associated and this ID is not provided, the deployment will fail."
+ },
+ {
+ "name": "dns_domain_names",
+ "value": "{ compute = \"comp.com\", storage = \"strg.com\", protocol = \"ces.com\", client = \"clnt.com\", gklm = \"gklm.com\"}",
+ "type": "object({ compute = string, storage = string, protocol = string, client = string, gklm = string })",
+ "secure": false,
+ "description": "DNS domain names are user-friendly addresses that map to systems within a network, making them easier to identify and access. Provide the DNS domain names for IBM Cloud HPC components: compute, storage, protocol, client, and GKLM. These domains will be assigned to the respective nodes that are part of the scale cluster."
+ },
+ {
+ "name": "enable_cos_integration",
+ "value": "true",
+ "type": "bool",
+ "secure": false,
+ "description": "Set to true to create an extra cos bucket to integrate with scale cluster deployment."
+ },
+ {
+ "name": "cos_instance_name",
+ "value": "__NULL__",
+ "type": "string",
+ "secure": false,
+ "description": "Provide the name of the existing COS instance where the logs for the enabled functionalities will be stored."
+ },
+ {
+ "name": "enable_vpc_flow_logs",
+ "value": "true",
+ "type": "bool",
+ "secure": false,
+ "description": "This flag determines whether VPC flow logs are enabled. When set to true, a flow log collector will be created to capture and monitor network traffic data within the VPC. Enabling flow logs provides valuable insights for troubleshooting, performance monitoring, and security auditing by recording information about the traffic passing through your VPC. Consider enabling this feature to enhance visibility and maintain robust network management practices."
+ },
+ {
+ "name": "override",
+ "value": "false",
+ "type": "bool",
+ "secure": false,
+ "description": "Override default values with custom JSON template. This uses the file `override.json` to allow users to create a fully customized environment."
+ },
+ {
+ "name": "override_json_string",
+ "value": "__NULL__",
+ "type": "string",
+ "secure": false,
+ "description": "Override default values with a JSON object. Any JSON other than an empty string overrides other configuration changes."
+ },
+ {
+ "name": "enable_ldap",
+ "value": "false",
+ "type": "bool",
+ "secure": false,
+ "description": "Set this option to true to enable LDAP for IBM Spectrum Scale (GPFS), with the default value set to false."
+ },
+ {
+ "name": "ldap_basedns",
+ "value": "ldapscale.com",
+ "type": "string",
+ "secure": false,
+ "description": "The dns domain name is used for configuring the LDAP server. If an LDAP server is already in existence, ensure to provide the associated DNS domain name."
+ },
+ {
+ "name": "ldap_server",
+ "value": "__NULL__",
+ "type": "string",
+ "secure": false,
+ "description": "Provide the IP address for the existing LDAP server. If no address is given, a new LDAP server will be created."
+ },
+ {
+ "name": "ldap_server_cert",
+ "value": "__NULL__",
+ "type": "string",
+ "secure": true,
+ "description": "Provide the existing LDAP server certificate. This value is required if the 'ldap_server' variable is not set to null. If the certificate is not provided or is invalid, the LDAP configuration may fail. For more information on how to create or obtain the certificate, please refer [existing LDAP server certificate](https://cloud.ibm.com/docs/allowlist/hpc-service?topic=hpc-service-integrating-openldap)."
+ },
+ {
+ "name": "ldap_admin_password",
+ "value": "__NULL__",
+ "type": "string",
+ "secure": true,
+ "description": "The LDAP admin password must be 8 to 20 characters long and include at least two alphabetic characters (with one uppercase and one lowercase), one number, and one special character from the set (!@#$%^&*()_+=-). The password must not contain the username or any spaces. [This value is ignored for an existing LDAP server]."
+ },
+ {
+ "name": "ldap_user_name",
+ "value": "",
+ "type": "string",
+ "secure": false,
+ "description": "Custom LDAP User for performing cluster operations. Note: Username should be between 4 to 32 characters, (any combination of lowercase and uppercase letters).[This value is ignored for an existing LDAP server]"
+ },
+ {
+ "name": "ldap_user_password",
+ "value": "",
+ "type": "string",
+ "secure": true,
+ "description": "The LDAP user password must be 8 to 20 characters long and include at least two alphabetic characters (with one uppercase and one lowercase), one numeric digit, and at least one special character from the set (!@#$%^&*()_+=-). Spaces are not allowed. The password must not contain the username for enhanced security. [This value is ignored for an existing LDAP server]."
+ },
+ {
+ "name": "ldap_instance",
+ "value": "[{ profile = \"cx2-2x4\", image = \"ibm-ubuntu-22-04-5-minimal-amd64-1\" }]",
+ "type": "list(object({ profile = string, image = string }))",
+ "secure": false,
+ "description": "Specify the list of virtual server instances to be provisioned as ldap nodes in the cluster. Each object in the list defines the instance profile (machine type), the count (number of instances), the image (OS image to use). This configuration allows you to customize the server for setting up ldap server. The profile must match a valid IBM Cloud VPC Gen2 instance profile format. For more details, refer [Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)."
+ },
+ {
+ "name": "scale_encryption_enabled",
+ "value": "false",
+ "type": "bool",
+ "secure": false,
+ "description": "Encryption ensures that data stored in the filesystem is protected from unauthorized access and secures sensitive information at rest. To enable the encryption for the filesystem. Select true or false"
+ },
+ {
+ "name": "scale_encryption_type",
+ "value": "null",
+ "type": "string",
+ "secure": false,
+ "description": "To enable filesystem encryption, specify either 'key_protect' or 'gklm'. If neither is specified, the default value will be 'null' and encryption is disabled"
+ },
+ {
+ "name": "gklm_instances",
+ "value": "[{ profile = \"bx2-2x8\", count = 2, image = \"hpcc-scale-gklm4202-v2-5-3\" }]",
+ "type": "list(object({ profile = string, count = number, image = string }))",
+ "secure": false,
+ "description": "Specify the list of virtual server instances to be provisioned as GKLM (Guardium Key Lifecycle Manager) nodes in the cluster. Each object in the list includes the instance profile (machine type), the count (number of instances), and the image (OS image to use). This configuration allows you to manage and securely store encryption keys used across the cluster components. The profile must match a valid IBM Cloud VPC Gen2 instance profile format. A minimum of 2 and maximum of 5 gklm nodes are supported. For more details, refer[Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)."
+ },
+ {
+ "name": "scale_encryption_admin_password",
+ "value": "__NULL__",
+ "type": "string",
+ "secure": true,
+ "description": "Specifies the administrator password for GKLM-based encryption. This is required when encryption is enabled for IBM Spectrum Scale (GPFS) and the encryption type is set to 'gklm'. The password is used to authenticate administrative access to the Guardium Key Lifecycle Manager (GKLM) for managing encryption keys. Ensure the password meets your organization's security standards."
+ },
+ {
+ "name": "key_protect_instance_id",
+ "value": "__NULL__",
+ "type": "string",
+ "secure": false,
+ "description": "Provide the ID of an existing IBM Key Protect instance to be used for filesystem encryption in IBM Storage Scale. If this value is provided, the automation will use the existing Key Protect instance and create a new encryption key within it. If not provided, a new Key Protect instance will be created automatically during deployment."
+ },
+ {
+ "name": "storage_type",
+ "value": "scratch",
+ "type": "string",
+ "secure": false,
+ "description": "Select the Storage Scale file system deployment method. Note: The Storage Scale scratch and evaluation type deploys the Storage Scale file system on virtual server instances, and the persistent type deploys the Storage Scale file system on bare metal servers."
+ },
+ {
+ "name": "observability_atracker_enable",
+ "value": "false",
+ "type": "bool",
+ "secure": false,
+ "description": "Activity Tracker Event Routing to configure how to route auditing events. While multiple Activity Tracker instances can be created, only one tracker is needed to capture all events. Creating additional trackers is unnecessary if an existing Activity Tracker is already integrated with a COS bucket. In such cases, set the value to false, as all events can be monitored and accessed through the existing Activity Tracker."
+ },
+ {
+ "name": "observability_atracker_target_type",
+ "value": "cloudlogs",
+ "type": "string",
+ "secure": false,
+ "description": "All the events will be stored in either COS bucket or Cloud Logs on the basis of user input, so customers can retrieve or ingest them in their system."
+ },
+ {
+ "name": "sccwp_service_plan",
+ "value": "free-trial",
+ "type": "string",
+ "secure": false,
+ "description": "Specify the plan type for the Security and Compliance Center (SCC) Workload Protection instance. Valid values are free-trial and graduated-tier only."
+ },
+ {
+ "name": "sccwp_enable",
+ "value": "false",
+ "type": "bool",
+ "secure": false,
+ "description": "Set this flag to true to create an instance of IBM Security and Compliance Center (SCC) Workload Protection. When enabled, it provides tools to discover and prioritize vulnerabilities, monitor for security threats, and enforce configuration, permission, and compliance policies across the full lifecycle of your workloads. To view the data on the dashboard, enable the cspm to create the app configuration and required trusted profile policies.[Learn more](https://cloud.ibm.com/docs/workload-protection?topic=workload-protection-about)."
+ },
+ {
+ "name": "cspm_enabled",
+ "value": "true",
+ "type": "bool",
+ "secure": false,
+ "description": "CSPM (Cloud Security Posture Management) is a set of tools and practices that continuously monitor and secure cloud infrastructure. When enabled, it creates a trusted profile with viewer access to the App Configuration and Enterprise services for the SCC Workload Protection instance. Make sure the required IAM permissions are in place, as missing permissions will cause deployment to fail. If CSPM is disabled, dashboard data will not be available.[Learn more](https://cloud.ibm.com/docs/workload-protection?topic=workload-protection-about)."
+ },
+ {
+ "name": "app_config_plan",
+ "value": "basic",
+ "type": "string",
+ "secure": false,
+ "description": "Specify the IBM service pricing plan for the app configuration. Allowed values are 'basic', 'lite', 'standardv2', 'enterprise'."
+ },
+ {
+ "name": "skip_flowlogs_s2s_auth_policy",
+ "value": "false",
+ "type": "bool",
+ "secure": false,
+ "description": "Skip auth policy between flow logs service and COS instance, set to true if this policy is already in place on account."
+ },
+ {
+ "name": "existing_bastion_instance_name",
+ "value": "__NULL__",
+ "type": "string",
+ "secure": false,
+ "description": "Provide the name of the bastion instance. If none given then new bastion will be created."
+ },
+ {
+ "name": "existing_bastion_instance_public_ip",
+ "value": "__NULL__",
+ "type": "string",
+ "secure": false,
+ "description": "Provide the public ip address of the bastion instance to establish the remote connection."
+ },
+ {
+ "name": "existing_bastion_security_group_id",
+ "value": "__NULL__",
+ "type": "string",
+ "secure": false,
+ "description": "Specify the security group ID for the bastion server. This ID will be added as an allowlist rule on the HPC cluster nodes to facilitate secure SSH connections through the bastion node. By restricting access through a bastion server, this setup enhances security by controlling and monitoring entry points into the cluster environment. Ensure that the specified security group is correctly configured to permit only authorized traffic for secure and efficient management of cluster resources."
+ },
+ {
+ "name": "existing_bastion_ssh_private_key",
+ "value": "__NULL__",
+ "type": "string",
+ "secure": true,
+ "description": "Provide the private SSH key (named id_rsa) used during the creation and configuration of the bastion server to securely authenticate and connect to the bastion server. This allows access to internal network resources from a secure entry point. Note: The corresponding public SSH key (named id_rsa.pub) must already be available in the ~/.ssh/authorized_keys file on the bastion host to establish authentication."
+ },
+ {
+ "name": "bms_boot_drive_encryption",
+ "value": "false",
+ "type": "bool",
+ "secure": false,
+ "description": "Enable or disable encryption for the boot drive of bare metal servers. When set to true, the boot drive will be encrypted to enhance data security, protecting the operating system and any sensitive information stored on the root volume. This is especially recommended for workloads with strict compliance or security requirements. Set to false to disable boot drive encryption."
+ },
+ {
+ "name": "enable_sg_validation",
+ "value": "true",
+ "type": "bool",
+ "secure": false,
+ "description": "Enable or disable security group validation. Security group validation ensures that the specified security groups are properly assigned"
+ },
+ {
+ "name": "login_security_group_name",
+ "value": "__NULL__",
+ "type": "string",
+ "secure": false,
+ "description": "Provide the existing security group name to provision the bastion node. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the bastion node to function properly."
+ },
+ {
+ "name": "storage_security_group_name",
+ "value": "__NULL__",
+ "type": "string",
+ "secure": false,
+ "description": "Provide the security group name to provision the storage nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the storage nodes to function properly. When using existing security groups, you must provide the corresponding group names for all other associated components as well."
+ },
+ {
+ "name": "compute_security_group_name",
+ "value": "__NULL__",
+ "type": "string",
+ "secure": false,
+ "description": "Provide the security group name to provision the compute nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the storage nodes to function properly. When using existing security groups, you must provide the corresponding group names for all other associated components as well"
+ },
+ {
+ "name": "client_security_group_name",
+ "value": "__NULL__",
+ "type": "string",
+ "secure": false,
+ "description": "Provide the security group name to provision the client nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the storage nodes to function properly. When using existing security groups, you must provide the corresponding group names for all other associated components as well"
+ },
+ {
+ "name": "gklm_security_group_name",
+ "value": "__NULL__",
+ "type": "string",
+ "secure": false,
+ "description": "Provide the security group name to provision the gklm nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the storage nodes to function properly. When using existing security groups, you must provide the corresponding group names for all other associated components as well"
+ },
+ {
+ "name": "ldap_security_group_name",
+ "value": "__NULL__",
+ "type": "string",
+ "secure": false,
+ "description": "Provide the security group name to provision the ldap nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the storage nodes to function properly. When using existing security groups, you must provide the corresponding group names for all other associated components as well"
+ },
+ {
+ "name": "login_subnet_id",
+ "value": "__NULL__",
+ "type": "string",
+ "secure": false,
+ "description": "Provide ID of an existing subnet to be used for provisioning bastion/deployer node. This is required only when deploying into an existing VPC (i.e., when a value is provided for `vpc_name`). When specifying an existing subnet, ensure that a public gateway is attached to the subnet to enable outbound internet access if required. Additionally, if this subnet ID is provided, you must also provide subnet IDs for all other applicable components (e.g., storage , compute, client, protocol, gklm) to maintain consistency across the deployment."
+ },
+ {
+ "name": "compute_subnet_id",
+ "value": "__NULL__",
+ "type": "string",
+ "secure": false,
+ "description": "Provide ID of an existing subnet to be used for provisioning compute nodes. This is required only when deploying into an existing VPC (i.e., when a value is provided for `vpc_name`). When specifying an existing subnet, ensure that a public gateway is attached to the subnet to enable outbound internet access if required. Additionally, if this subnet ID is provided, you must also provide subnet IDs for all other applicable components (e.g., storage , protocol, client, login, gklm) to maintain consistency across the deployment."
+ },
+ {
+ "name": "storage_subnet_id",
+ "value": "__NULL__",
+ "type": "string",
+ "secure": false,
+ "description": "Provide ID of an existing subnet to be used for storage nodes. This is required only when deploying into an existing VPC (i.e., when a value is provided for `vpc_name`). When specifying an existing subnet, ensure that a public gateway is attached to the subnet to enable outbound internet access if required. Additionally, if this subnet ID is provided, you must also provide subnet IDs for all other applicable components (e.g., compute , protocol, client, login, gklm) to maintain consistency across the deployment."
+ },
+ {
+ "name": "protocol_subnet_id",
+ "value": "__NULL__",
+ "type": "string",
+ "secure": false,
+ "description": "Provide ID of an existing subnet to be used for protocol nodes. This is required only when deploying into an existing VPC (i.e., when a value is provided for `vpc_name`). When specifying an existing subnet, ensure that a public gateway is attached to the subnet to enable outbound internet access if required. Additionally, if this subnet ID is provided, you must also provide subnet IDs for all other applicable components (e.g., storage , compute, client, login, gklm) to maintain consistency across the deployment."
+ },
+ {
+ "name": "client_subnet_id",
+ "value": "__NULL__",
+ "type": "string",
+ "secure": false,
+ "description": "Provide ID of an existing subnet to be used for client nodes. This is required only when deploying into an existing VPC (i.e., when a value is provided for `vpc_name`). When specifying an existing subnet, ensure that a public gateway is attached to the subnet to enable outbound internet access if required. Additionally, if this subnet ID is provided, you must also provide subnet IDs for all other applicable components (e.g., storage , compute, protocol, login, gklm) to maintain consistency across the deployment."
+ },
+ {
+ "name": "TF_VERSION",
+ "value": "1.9",
+ "type": "string",
+ "secure": false,
+ "description": "The version of the Terraform engine that's used in the Schematics workspace."
+ },
+ {
+ "name": "TF_PARALLELISM",
+ "value": "250",
+ "type": "string",
+ "secure": false,
+ "description": "Parallelism/ concurrent operations limit. Valid values are between 1 and 256, both inclusive. [Learn more](https://www.terraform.io/docs/internals/graph.html#walking-the-graph)."
+ }
+ ]
+ }
+ ]
+}
diff --git a/solutions/custom/variables.tf b/solutions/custom/variables.tf
index a9bb616f..149845ac 100644
--- a/solutions/custom/variables.tf
+++ b/solutions/custom/variables.tf
@@ -4,7 +4,7 @@
variable "scheduler" {
type = string
default = "LSF"
- description = "Select one of the scheduler (LSF/Symphony/Slurm/null)"
+ description = "Select one of the scheduler (Scale/LSF/Symphony/Slurm/null)"
}
variable "ibm_customer_number" {
@@ -247,7 +247,7 @@ variable "storage_instances" {
profile = string
count = number
image = string
- filesystem = string
+ filesystem = optional(string)
})
)
default = [{
@@ -480,66 +480,6 @@ variable "enable_vpc_flow_logs" {
description = "Enable Activity tracker"
}
-##############################################################################
-# Scale specific Variables
-##############################################################################
-# variable "filesystem_config" {
-# type = list(object({
-# filesystem = string
-# block_size = string
-# default_data_replica = number
-# default_metadata_replica = number
-# max_data_replica = number
-# max_metadata_replica = number
-# mount_point = string
-# }))
-# default = null
-# description = "File system configurations."
-# }
-
-# variable "filesets_config" {
-# type = list(object({
-# fileset = string
-# filesystem = string
-# junction_path = string
-# client_mount_path = string
-# quota = number
-# }))
-# default = null
-# description = "Fileset configurations."
-# }
-
-# variable "afm_instances" {
-# type = list(
-# object({
-# profile = string
-# count = number
-# image = string
-# })
-# )
-# default = [{
-# profile = "bx2-2x8"
-# count = 0
-# image = "ibm-redhat-8-10-minimal-amd64-2"
-# }]
-# description = "Number of instances to be launched for afm hosts."
-# }
-
-# variable "afm_cos_config" {
-# type = list(object({
-# afm_fileset = string,
-# mode = string,
-# cos_instance = string,
-# bucket_name = string,
-# bucket_region = string,
-# cos_service_cred_key = string,
-# bucket_type = string,
-# bucket_storage_class = string
-# }))
-# default = null
-# description = "AFM configurations."
-# }
-
##############################################################################
# LSF specific Variables
##############################################################################
diff --git a/solutions/hpcaas/variables.tf b/solutions/hpcaas/variables.tf
index eededab6..015f07fb 100644
--- a/solutions/hpcaas/variables.tf
+++ b/solutions/hpcaas/variables.tf
@@ -240,7 +240,7 @@ variable "storage_instances" {
profile = string
count = number
image = string
- filesystem = string
+ filesystem = optional(string)
})
)
default = [{
diff --git a/solutions/lsf/datasource.tf b/solutions/lsf/datasource.tf
index afdf5435..cac16716 100644
--- a/solutions/lsf/datasource.tf
+++ b/solutions/lsf/datasource.tf
@@ -8,11 +8,11 @@ data "ibm_is_vpc" "existing_vpc" {
name = var.vpc_name
}
-data "ibm_is_subnet" "existing_cluster_subnets" {
- count = var.vpc_name != null && var.cluster_subnet_id != null ? 1 : 0
- identifier = var.cluster_subnet_id
+data "ibm_is_subnet" "existing_compute_subnets" {
+ count = var.vpc_name != null && var.compute_subnet_id != null ? 1 : 0
+ identifier = var.compute_subnet_id
}
data "ibm_is_public_gateways" "public_gateways" {
- count = var.vpc_name != null && var.cluster_subnet_id == null && var.login_subnet_id == null ? 1 : 0
+ count = var.vpc_name != null && var.compute_subnet_id == null && var.login_subnet_id == null ? 1 : 0
}
diff --git a/solutions/lsf/input_validation.tf b/solutions/lsf/input_validation.tf
index 64e6f141..5c8ddeae 100644
--- a/solutions/lsf/input_validation.tf
+++ b/solutions/lsf/input_validation.tf
@@ -22,36 +22,36 @@ locals {
(local.validate_login_subnet_id_vpc ? local.validate_login_subnet_id_vpc_msg : ""))
# Validate existing subnet public gateways
- validate_subnet_name_pg_msg = "Provided existing cluster_subnet_id should have public gateway attached."
- validate_subnet_name_pg = anytrue([var.cluster_subnet_id == null, var.cluster_subnet_id != null && var.vpc_name != null ? (data.ibm_is_subnet.existing_cluster_subnets[0].public_gateway != "") : false])
+ validate_subnet_name_pg_msg = "Provided existing compute_subnet_id should have public gateway attached."
+ validate_subnet_name_pg = anytrue([var.compute_subnet_id == null, var.compute_subnet_id != null && var.vpc_name != null ? (data.ibm_is_subnet.existing_compute_subnets[0].public_gateway != "") : false])
# tflint-ignore: terraform_unused_declarations
validate_subnet_name_pg_chk = regex("^${local.validate_subnet_name_pg_msg}$",
(local.validate_subnet_name_pg ? local.validate_subnet_name_pg_msg : ""))
# Validate existing cluster subnet should be in the appropriate zone.
validate_subnet_id_zone_msg = "Provided cluster subnets should be in appropriate zone."
- validate_subnet_id_zone = anytrue([var.cluster_subnet_id == null, var.cluster_subnet_id != null && var.vpc_name != null ? alltrue([data.ibm_is_subnet.existing_cluster_subnets[0].zone == var.zones[0]]) : false])
+ validate_subnet_id_zone = anytrue([var.compute_subnet_id == null, var.compute_subnet_id != null && var.vpc_name != null ? alltrue([data.ibm_is_subnet.existing_compute_subnets[0].zone == var.zones[0]]) : false])
# tflint-ignore: terraform_unused_declarations
validate_subnet_id_zone_chk = regex("^${local.validate_subnet_id_zone_msg}$",
(local.validate_subnet_id_zone ? local.validate_subnet_id_zone_msg : ""))
# Validate existing cluster subnet should be the subset of vpc_name entered
- validate_cluster_subnet_id_vpc_msg = "Provided cluster subnet should be within the vpc entered."
- validate_cluster_subnet_id_vpc = anytrue([var.cluster_subnet_id == null, var.cluster_subnet_id != null && var.vpc_name != null ? alltrue([for subnet_id in [var.cluster_subnet_id] : contains(data.ibm_is_vpc.existing_vpc[0].subnets[*].id, subnet_id)]) : false])
+ validate_compute_subnet_id_vpc_msg = "Provided cluster subnet should be within the vpc entered."
+ validate_compute_subnet_id_vpc = anytrue([var.compute_subnet_id == null, var.compute_subnet_id != null && var.vpc_name != null ? alltrue([for subnet_id in [var.compute_subnet_id] : contains(data.ibm_is_vpc.existing_vpc[0].subnets[*].id, subnet_id)]) : false])
# tflint-ignore: terraform_unused_declarations
- validate_subnet_id_vpc_chk = regex("^${local.validate_cluster_subnet_id_vpc_msg}$",
- (local.validate_cluster_subnet_id_vpc ? local.validate_cluster_subnet_id_vpc_msg : ""))
+ validate_subnet_id_vpc_chk = regex("^${local.validate_compute_subnet_id_vpc_msg}$",
+ (local.validate_compute_subnet_id_vpc ? local.validate_compute_subnet_id_vpc_msg : ""))
# Validate existing vpc public gateways
validate_existing_vpc_pgw_msg = "Provided existing vpc should have the public gateways created in the provided zones."
- validate_existing_vpc_pgw = anytrue([(var.vpc_name == null), alltrue([var.vpc_name != null, var.cluster_subnet_id != null]), alltrue([var.vpc_name != null, var.cluster_subnet_id == null, var.login_subnet_id == null, length(local.zone_1_pgw_ids) > 0])])
+ validate_existing_vpc_pgw = anytrue([(var.vpc_name == null), alltrue([var.vpc_name != null, var.compute_subnet_id != null]), alltrue([var.vpc_name != null, var.compute_subnet_id == null, var.login_subnet_id == null, length(local.zone_1_pgw_ids) > 0])])
# tflint-ignore: terraform_unused_declarations
validate_existing_vpc_pgw_chk = regex("^${local.validate_existing_vpc_pgw_msg}$",
(local.validate_existing_vpc_pgw ? local.validate_existing_vpc_pgw_msg : ""))
}
locals {
- vpc_id = var.vpc_name != null && var.cluster_subnet_id == null && var.login_subnet_id == null ? data.ibm_is_vpc.existing_vpc[0].id : null
- public_gateways_list = var.vpc_name != null && var.cluster_subnet_id == null && var.login_subnet_id == null ? data.ibm_is_public_gateways.public_gateways[0].public_gateways : []
- zone_1_pgw_ids = var.vpc_name != null && var.cluster_subnet_id == null && var.login_subnet_id == null ? [for gateway in local.public_gateways_list : gateway.id if gateway.vpc == local.vpc_id && gateway.zone == var.zones[0]] : []
+ vpc_id = var.vpc_name != null && var.compute_subnet_id == null && var.login_subnet_id == null ? data.ibm_is_vpc.existing_vpc[0].id : null
+ public_gateways_list = var.vpc_name != null && var.compute_subnet_id == null && var.login_subnet_id == null ? data.ibm_is_public_gateways.public_gateways[0].public_gateways : []
+ zone_1_pgw_ids = var.vpc_name != null && var.compute_subnet_id == null && var.login_subnet_id == null ? [for gateway in local.public_gateways_list : gateway.id if gateway.vpc == local.vpc_id && gateway.zone == var.zones[0]] : []
}
diff --git a/solutions/lsf/locals.tf b/solutions/lsf/locals.tf
index ce4d1c41..94470939 100644
--- a/solutions/lsf/locals.tf
+++ b/solutions/lsf/locals.tf
@@ -23,7 +23,7 @@ locals {
ssh_keys = var.ssh_keys
vpc_cluster_login_private_subnets_cidr_blocks = var.vpc_cluster_login_private_subnets_cidr_blocks
vpc_cluster_private_subnets_cidr_blocks = var.vpc_cluster_private_subnets_cidr_blocks
- cluster_subnet_id = var.cluster_subnet_id
+ compute_subnet_id = var.compute_subnet_id
cos_instance_name = var.cos_instance_name
dns_custom_resolver_id = var.dns_custom_resolver_id
dns_instance_id = var.dns_instance_id
@@ -82,7 +82,6 @@ locals {
sccwp_enable = var.sccwp_enable
cspm_enabled = var.cspm_enabled
app_config_plan = var.app_config_plan
-
}
}
@@ -95,7 +94,7 @@ locals {
vpc_cluster_login_private_subnets_cidr_blocks = lookup(local.override[local.override_type], "vpc_cluster_login_private_subnets_cidr_blocks", local.config.vpc_cluster_login_private_subnets_cidr_blocks)
login_subnet_id = lookup(local.override[local.override_type], "login_subnet_id", local.config.login_subnet_id)
vpc_cluster_private_subnets_cidr_blocks = lookup(local.override[local.override_type], "vpc_cluster_private_subnets_cidr_blocks", local.config.vpc_cluster_private_subnets_cidr_blocks)
- cluster_subnet_id = lookup(local.override[local.override_type], "cluster_subnet_id", local.config.cluster_subnet_id)
+ compute_subnet_id = lookup(local.override[local.override_type], "compute_subnet_id", local.config.compute_subnet_id)
cos_instance_name = lookup(local.override[local.override_type], "cos_instance_name", local.config.cos_instance_name)
dns_custom_resolver_id = lookup(local.override[local.override_type], "dns_custom_resolver_id", local.config.dns_custom_resolver_id)
dns_instance_id = lookup(local.override[local.override_type], "dns_instance_id", local.config.dns_instance_id)
diff --git a/solutions/lsf/main.tf b/solutions/lsf/main.tf
index 774e2dec..c711c07c 100644
--- a/solutions/lsf/main.tf
+++ b/solutions/lsf/main.tf
@@ -9,7 +9,7 @@ module "lsf" {
vpc_cluster_login_private_subnets_cidr_blocks = local.env.vpc_cluster_login_private_subnets_cidr_blocks
login_subnet_id = local.env.login_subnet_id
vpc_cluster_private_subnets_cidr_blocks = local.env.vpc_cluster_private_subnets_cidr_blocks
- cluster_subnet_id = local.env.cluster_subnet_id
+ compute_subnet_id = local.env.compute_subnet_id
cos_instance_name = local.env.cos_instance_name
dns_custom_resolver_id = local.env.dns_custom_resolver_id
dns_instance_id = local.env.dns_instance_id
diff --git a/solutions/lsf/override.json b/solutions/lsf/override.json
index ddfc48ca..f20df220 100644
--- a/solutions/lsf/override.json
+++ b/solutions/lsf/override.json
@@ -98,10 +98,6 @@
"observability_logs_retention_period": 7,
"observability_monitoring_on_compute_nodes_enable": false,
"observability_monitoring_plan": "graduated-tier",
- "scc_enable": true,
- "scc_profile": "CIS IBM Cloud Foundations Benchmark v1.1.0",
- "scc_location": "us-south",
- "scc_event_notification_plan": "lite",
"skip_flowlogs_s2s_auth_policy": false,
"skip_kms_s2s_auth_policy": false,
"skip_iam_authorization_policy": false
diff --git a/solutions/lsf/variables.tf b/solutions/lsf/variables.tf
index 59d750e2..000c3adc 100644
--- a/solutions/lsf/variables.tf
+++ b/solutions/lsf/variables.tf
@@ -146,18 +146,18 @@ variable "login_subnet_id" {
default = null
description = "Provide the ID of an existing subnet to deploy cluster resources, this is used only for provisioning bastion, deployer, and login nodes. If not provided, new subnet will be created.When providing an existing subnet ID, make sure that the subnet has an associated public gateway..[Learn more](https://cloud.ibm.com/docs/vpc)."
validation {
- condition = (var.cluster_subnet_id == null && var.login_subnet_id == null) || (var.cluster_subnet_id != null && var.login_subnet_id != null)
- error_message = "In case of existing subnets, provide both login_subnet_id and cluster_subnet_id."
+ condition = (var.compute_subnet_id == null && var.login_subnet_id == null) || (var.compute_subnet_id != null && var.login_subnet_id != null)
+ error_message = "In case of existing subnets, provide both login_subnet_id and compute_subnet_id."
}
}
-variable "cluster_subnet_id" {
+variable "compute_subnet_id" {
type = string
default = null
description = "Provide the ID of an existing subnet to deploy cluster resources; this is used only for provisioning VPC file storage shares, management, and compute nodes. If not provided, a new subnet will be created. Ensure that a public gateway is attached to enable VPC API communication. [Learn more](https://cloud.ibm.com/docs/vpc)."
validation {
- condition = anytrue([var.vpc_name != null && var.cluster_subnet_id != null, var.cluster_subnet_id == null])
- error_message = "If the cluster_subnet_id are provided, the user should also provide the vpc_name."
+ condition = anytrue([var.vpc_name != null && var.compute_subnet_id != null, var.compute_subnet_id == null])
+ error_message = "If the compute_subnet_id are provided, the user should also provide the vpc_name."
}
}
##############################################################################
@@ -435,12 +435,12 @@ variable "dns_domain_name" {
compute = string
})
default = {
- compute = "lsf.com"
+ compute = "hpc.local"
}
description = "IBM Cloud DNS Services domain name to be used for the IBM Spectrum LSF cluster."
validation {
- condition = can(regex("^[a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\\.com$", var.dns_domain_name.compute))
- error_message = "The compute domain name must be a valid FQDN ending in '.com'. It may include letters, digits, hyphens, and must start and end with an alphanumeric character."
+ condition = can(regex("^[a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(\\.[a-zA-Z]{2,})+$", var.dns_domain_name.compute))
+ error_message = "The compute domain name must be a valid FQDN. It may include letters, digits, hyphens, and must start and end with an alphanumeric character."
}
}
@@ -499,7 +499,7 @@ variable "enable_ldap" {
variable "ldap_basedns" {
type = string
- default = "lsf.com"
+ default = "hpc.local"
description = "The dns domain name is used for configuring the LDAP server. If an LDAP server is already in existence, ensure to provide the associated DNS domain name."
validation {
condition = var.enable_ldap == false || (var.ldap_basedns != null ? (length(trimspace(var.ldap_basedns)) > 0 && var.ldap_basedns != "null") : false)
@@ -831,9 +831,9 @@ variable "app_config_plan" {
type = string
default = "basic"
validation {
- error_message = "Plan for App configuration can only be basic, lite, standardv2, enterprise.."
+ error_message = "Plan for App configuration can only be basic, standardv2, enterprise.."
condition = contains(
- ["basic", "lite", "standardv2", "enterprise"],
+ ["basic", "standardv2", "enterprise"],
var.app_config_plan
)
}
diff --git a/solutions/scale/catalogValidationValues.json.template b/solutions/scale/catalogValidationValues.json.template
index bb5298d4..f4ed6d44 100644
--- a/solutions/scale/catalogValidationValues.json.template
+++ b/solutions/scale/catalogValidationValues.json.template
@@ -3,5 +3,8 @@
"cluster_prefix": $PREFIX,
"zones": "[\"ca-tor-1\"]",
"existing_resource_group": "geretain-hpc-rg",
- "ssh_keys": "[\"geretain-hpc-ssh-key\"]"
+ "ssh_keys": "[\"geretain-hpc-ssh-key\"]",
+ "ibm_customer_number": $HPC_IBM_CUSTOMER_NUMBER,
+ "storage_gui_username": "storagegui",
+ "storage_gui_password": $RAND_PASSWORD
}
diff --git a/solutions/scale/datasource.tf b/solutions/scale/datasource.tf
new file mode 100644
index 00000000..f320a653
--- /dev/null
+++ b/solutions/scale/datasource.tf
@@ -0,0 +1,29 @@
+data "ibm_is_security_group" "storage_security_group" {
+ count = var.vpc_name != null && var.storage_security_group_name != null ? 1 : 0
+ name = var.storage_security_group_name
+}
+
+data "ibm_is_security_group" "compute_security_group" {
+ count = var.vpc_name != null && var.compute_security_group_name != null ? 1 : 0
+ name = var.compute_security_group_name
+}
+
+data "ibm_is_security_group" "gklm_security_group" {
+ count = var.vpc_name != null && var.gklm_security_group_name != null ? 1 : 0
+ name = var.gklm_security_group_name
+}
+
+data "ibm_is_security_group" "ldap_security_group" {
+ count = var.vpc_name != null && var.ldap_security_group_name != null ? 1 : 0
+ name = var.ldap_security_group_name
+}
+
+data "ibm_is_security_group" "client_security_group" {
+ count = var.vpc_name != null && var.client_security_group_name != null ? 1 : 0
+ name = var.client_security_group_name
+}
+
+data "ibm_is_security_group" "login_security_group" {
+ count = var.vpc_name != null && var.login_security_group_name != null ? 1 : 0
+ name = var.login_security_group_name
+}
diff --git a/solutions/scale/input_validation.tf b/solutions/scale/input_validation.tf
index 066abd17..64a141f4 100644
--- a/solutions/scale/input_validation.tf
+++ b/solutions/scale/input_validation.tf
@@ -11,3 +11,199 @@ locals {
# tflint-ignore: terraform_unused_declarations
icn_chk = regex("^${local.icn_msg}$", (local.icn_cnd ? local.icn_msg : ""))
}
+
+locals {
+ total_compute_instance_count = sum(var.compute_instances[*]["count"])
+ total_storage_instance_count = var.storage_type == "persistent" ? sum(var.storage_baremetal_server[*]["count"]) : sum(var.storage_instances[*]["count"])
+ total_client_instance_count = sum(var.client_instances[*]["count"])
+ total_gklm_instance_count = sum(var.gklm_instances[*]["count"])
+ total_protocol_instance_count = sum(var.protocol_instances[*]["count"])
+
+ storage_sg_rules = flatten([for remote in data.ibm_is_security_group.storage_security_group[*].rules[*] : remote[*].remote])
+ compute_sg_rules = flatten([for remote in data.ibm_is_security_group.compute_security_group[*].rules[*] : remote[*].remote])
+ gklm_sg_rules = flatten([for remote in data.ibm_is_security_group.gklm_security_group[*].rules[*] : remote[*].remote])
+ ldap_sg_rules = flatten([for remote in data.ibm_is_security_group.ldap_security_group[*].rules[*] : remote[*].remote])
+ client_sg_rules = flatten([for remote in data.ibm_is_security_group.client_security_group[*].rules[*] : remote[*].remote])
+ # bastion_sg_rules = flatten([for remote in data.ibm_is_security_group.login_security_group[*].rules[*] : remote[*].remote])
+
+ gklm_condition = var.enable_sg_validation == true && local.total_gklm_instance_count > 0 && var.scale_encryption_enabled == true && var.scale_encryption_type == "gklm" && var.gklm_security_group_name != null
+ strg_condition = var.enable_sg_validation == true && local.total_storage_instance_count > 0 && var.storage_security_group_name != null
+ clnt_condition = var.enable_sg_validation == true && local.total_client_instance_count > 0 && var.client_security_group_name != null
+ comp_condition = var.enable_sg_validation == true && local.total_compute_instance_count > 0 && var.compute_security_group_name != null
+ ldap_condition = var.enable_sg_validation == true && var.enable_ldap == true && var.ldap_security_group_name != null
+ bastion_condition = var.enable_sg_validation == true && var.login_security_group_name != null
+
+ # Storage Security group validation
+ validate_strg_sg_in_strg_sg = local.strg_condition ? contains(local.storage_sg_rules, tolist(data.ibm_is_security_group.storage_security_group[*].id)[0]) : true
+ strg_sg_in_strg_sg_msg = "The Storage security group does not include the storage security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_strg_sg_in_strg_sg_chk = var.storage_security_group_name != null ? regex("^${local.strg_sg_in_strg_sg_msg}$", (local.validate_strg_sg_in_strg_sg ? local.strg_sg_in_strg_sg_msg : "")) : true
+
+ validate_comp_sg_in_strg_sg = local.comp_condition ? contains(local.storage_sg_rules, tolist(data.ibm_is_security_group.compute_security_group[*].id)[0]) : true
+ comp_sg_in_strg_sg_msg = "The Storage security group does not include the compute security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_comp_sg_in_strg_sg_chk = var.storage_security_group_name != null ? regex("^${local.comp_sg_in_strg_sg_msg}$", (local.validate_comp_sg_in_strg_sg ? local.comp_sg_in_strg_sg_msg : "")) : true
+
+ validate_client_sg_in_strg_sg = local.clnt_condition ? contains(local.storage_sg_rules, tolist(data.ibm_is_security_group.client_security_group[*].id)[0]) : true
+ client_sg_in_strg_sg_msg = "The Storage security group does not include the client security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_client_sg_in_strg_sg_chk = var.storage_security_group_name != null ? regex("^${local.client_sg_in_strg_sg_msg}$", (local.validate_client_sg_in_strg_sg ? local.client_sg_in_strg_sg_msg : "")) : true
+
+ validate_gklm_sg_in_strg_sg = local.gklm_condition ? contains(local.storage_sg_rules, tolist(data.ibm_is_security_group.gklm_security_group[*].id)[0]) : true
+ gklm_sg_in_strg_sg_msg = "The Storage security group does not include the gklm security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_gklm_sg_in_strg_sg_chk = var.storage_security_group_name != null ? regex("^${local.gklm_sg_in_strg_sg_msg}$", (local.validate_gklm_sg_in_strg_sg ? local.gklm_sg_in_strg_sg_msg : "")) : true
+
+ validate_ldap_sg_in_strg_sg = local.ldap_condition ? contains(local.storage_sg_rules, tolist(data.ibm_is_security_group.ldap_security_group[*].id)[0]) : true
+ ldap_sg_in_strg_sg_msg = "The Storage security group does not include the ldap security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_ldap_sg_in_strg_sg_chk = var.storage_security_group_name != null ? regex("^${local.ldap_sg_in_strg_sg_msg}$", (local.validate_ldap_sg_in_strg_sg ? local.ldap_sg_in_strg_sg_msg : "")) : true
+
+ validate_bastion_in_strg_sg = local.bastion_condition ? contains(local.storage_sg_rules, tolist(data.ibm_is_security_group.login_security_group[*].id)[0]) : true
+ bastion_sg_in_strg_sg_msg = "The Storage security group does not include the bastion security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_bastion_in_strg_sg_chk = var.storage_security_group_name != null ? regex("^${local.bastion_sg_in_strg_sg_msg}$", (local.validate_bastion_in_strg_sg ? local.bastion_sg_in_strg_sg_msg : "")) : true
+
+
+ # Compute Security group validation
+ validate_strg_sg_in_comp_sg = local.strg_condition ? contains(local.compute_sg_rules, tolist(data.ibm_is_security_group.storage_security_group[*].id)[0]) : true
+ strg_sg_in_comp_sg_msg = "The Compute security group does not include the storage security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_strg_sg_in_comp_sg_chk = var.compute_security_group_name != null ? regex("^${local.strg_sg_in_comp_sg_msg}$", (local.validate_strg_sg_in_comp_sg ? local.strg_sg_in_comp_sg_msg : "")) : true
+
+ validate_comp_sg_in_comp_sg = local.comp_condition ? contains(local.compute_sg_rules, tolist(data.ibm_is_security_group.compute_security_group[*].id)[0]) : true
+ comp_sg_in_comp_sg_msg = "The Compute security group does not include the compute security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_comp_sg_in_comp_sg_chk = var.compute_security_group_name != null ? regex("^${local.comp_sg_in_comp_sg_msg}$", (local.validate_comp_sg_in_comp_sg ? local.comp_sg_in_comp_sg_msg : "")) : true
+
+ validate_client_sg_in_comp_sg = local.clnt_condition ? contains(local.compute_sg_rules, tolist(data.ibm_is_security_group.client_security_group[*].id)[0]) : true
+ client_sg_in_comp_sg_msg = "The Compute security group does not include the client security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_client_sg_in_comp_sg_chk = var.compute_security_group_name != null ? regex("^${local.client_sg_in_comp_sg_msg}$", (local.validate_client_sg_in_comp_sg ? local.client_sg_in_comp_sg_msg : "")) : true
+
+ validate_gklm_sg_in_comp_sg = local.gklm_condition ? contains(local.compute_sg_rules, tolist(data.ibm_is_security_group.gklm_security_group[*].id)[0]) : true
+ gklm_sg_in_comp_sg_msg = "The Compute security group does not include the gklm security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_gklm_sg_in_comp_sg_chk = var.compute_security_group_name != null ? regex("^${local.gklm_sg_in_comp_sg_msg}$", (local.validate_gklm_sg_in_comp_sg ? local.gklm_sg_in_comp_sg_msg : "")) : true
+
+ validate_ldap_sg_in_comp_sg = local.ldap_condition ? contains(local.compute_sg_rules, tolist(data.ibm_is_security_group.ldap_security_group[*].id)[0]) : true
+ ldap_sg_in_comp_sg_msg = "The Compute security group does not include the ldap security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_ldap_sg_in_comp_sg_chk = var.compute_security_group_name != null ? regex("^${local.ldap_sg_in_comp_sg_msg}$", (local.validate_ldap_sg_in_comp_sg ? local.ldap_sg_in_comp_sg_msg : "")) : true
+
+ validate_bastion_sg_in_comp_sg = local.bastion_condition ? contains(local.compute_sg_rules, tolist(data.ibm_is_security_group.login_security_group[*].id)[0]) : true
+ bastion_sg_in_comp_sg_msg = "The Compute security group does not include the bastion security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_bastion_sg_in_comp_sg_chk = var.compute_security_group_name != null ? regex("^${local.bastion_sg_in_comp_sg_msg}$", (local.validate_bastion_sg_in_comp_sg ? local.bastion_sg_in_comp_sg_msg : "")) : true
+
+
+ # GKLM Security group validation
+ validate_strg_sg_in_gklm_sg = local.strg_condition ? contains(local.gklm_sg_rules, tolist(data.ibm_is_security_group.storage_security_group[*].id)[0]) : true
+ strg_sg_in_gklm_sg_msg = "The GKLM security group does not include the storage security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_strg_sg_in_gklm_sg_chk = var.gklm_security_group_name != null ? regex("^${local.strg_sg_in_gklm_sg_msg}$", (local.validate_strg_sg_in_gklm_sg ? local.strg_sg_in_gklm_sg_msg : "")) : true
+
+ validate_comp_sg_in_gklm_sg = local.comp_condition ? contains(local.gklm_sg_rules, tolist(data.ibm_is_security_group.compute_security_group[*].id)[0]) : true
+ comp_sg_in_gklm_sg_msg = "The GKLM security group does not include the compute security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_comp_sg_in_gklm_sg_chk = var.gklm_security_group_name != null ? regex("^${local.comp_sg_in_gklm_sg_msg}$", (local.validate_comp_sg_in_gklm_sg ? local.comp_sg_in_gklm_sg_msg : "")) : true
+
+ validate_gklm_sg_in_gklm_sg = local.gklm_condition ? contains(local.gklm_sg_rules, tolist(data.ibm_is_security_group.gklm_security_group[*].id)[0]) : true
+ gklm_sg_in_gklm_sg_msg = "The GKLM security group does not include the GKLM security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_gklm_sg_in_gklm_sg_chk = var.gklm_security_group_name != null ? regex("^${local.gklm_sg_in_gklm_sg_msg}$", (local.validate_gklm_sg_in_gklm_sg ? local.gklm_sg_in_gklm_sg_msg : "")) : true
+
+ validate_client_sg_in_gklm_sg = local.clnt_condition ? contains(local.gklm_sg_rules, tolist(data.ibm_is_security_group.client_security_group[*].id)[0]) : true
+ client_sg_in_gklm_sg_msg = "The GKLM security group does not include the client security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_client_sg_in_gklm_sg_chk = var.gklm_security_group_name != null ? regex("^${local.client_sg_in_gklm_sg_msg}$", (local.validate_client_sg_in_gklm_sg ? local.client_sg_in_gklm_sg_msg : "")) : true
+
+ validate_ldap_sg_in_gklm_sg = local.ldap_condition ? contains(local.gklm_sg_rules, tolist(data.ibm_is_security_group.ldap_security_group[*].id)[0]) : true
+ ldap_sg_in_gklm_sg_msg = "The GKLM security group does not include the ldap security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_ldap_sg_in_gklm_sg_chk = var.gklm_security_group_name != null ? regex("^${local.ldap_sg_in_gklm_sg_msg}$", (local.validate_ldap_sg_in_gklm_sg ? local.ldap_sg_in_gklm_sg_msg : "")) : true
+
+ validate_bastion_sg_in_gklm_sg = local.bastion_condition ? contains(local.gklm_sg_rules, tolist(data.ibm_is_security_group.login_security_group[*].id)[0]) : true
+ bastion_sg_in_gklm_sg_msg = "The GKLM security group does not include the bastion security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_bastion_sg_in_gklm_sg_chk = var.gklm_security_group_name != null ? regex("^${local.bastion_sg_in_gklm_sg_msg}$", (local.validate_bastion_sg_in_gklm_sg ? local.bastion_sg_in_gklm_sg_msg : "")) : true
+
+
+ # LDAP Security group validation
+ validate_strg_sg_in_ldap_sg = local.strg_condition ? contains(local.ldap_sg_rules, tolist(data.ibm_is_security_group.storage_security_group[*].id)[0]) : true
+ strg_sg_in_ldap_sg_msg = "The LDAP security group does not include the storage security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_strg_sg_in_ldap_sg_chk = var.ldap_security_group_name != null ? regex("^${local.strg_sg_in_ldap_sg_msg}$", (local.validate_strg_sg_in_ldap_sg ? local.strg_sg_in_ldap_sg_msg : "")) : true
+
+ validate_comp_sg_in_ldap_sg = local.comp_condition ? contains(local.ldap_sg_rules, tolist(data.ibm_is_security_group.compute_security_group[*].id)[0]) : true
+ comp_sg_in_ldap_sg_msg = "The LDAP security group does not include the compute security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_comp_sg_in_ldap_sg_chk = var.ldap_security_group_name != null ? regex("^${local.comp_sg_in_ldap_sg_msg}$", (local.validate_comp_sg_in_ldap_sg ? local.comp_sg_in_ldap_sg_msg : "")) : true
+
+ validate_ldap_sg_in_ldap_sg = local.ldap_condition ? contains(local.ldap_sg_rules, tolist(data.ibm_is_security_group.ldap_security_group[*].id)[0]) : true
+ ldap_sg_in_ldap_sg_msg = "The LDAP security group does not include the LDAP security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_ldap_sg_in_ldap_sg_chk = var.ldap_security_group_name != null ? regex("^${local.ldap_sg_in_ldap_sg_msg}$", (local.validate_ldap_sg_in_ldap_sg ? local.ldap_sg_in_ldap_sg_msg : "")) : true
+
+ validate_gklm_sg_in_ldap_sg = local.gklm_condition ? contains(local.ldap_sg_rules, tolist(data.ibm_is_security_group.gklm_security_group[*].id)[0]) : true
+ gklm_sg_in_ldap_sg_msg = "The LDAP security group does not include the GKLM security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_gklm_sg_in_ldap_sg_chk = var.ldap_security_group_name != null ? regex("^${local.gklm_sg_in_ldap_sg_msg}$", (local.validate_gklm_sg_in_ldap_sg ? local.gklm_sg_in_ldap_sg_msg : "")) : true
+
+ validate_client_sg_in_ldap_sg = local.clnt_condition ? contains(local.ldap_sg_rules, tolist(data.ibm_is_security_group.client_security_group[*].id)[0]) : true
+ client_sg_in_ldap_sg_msg = "The LDAP security group does not include the client security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_client_sg_in_ldap_sg_chk = var.ldap_security_group_name != null ? regex("^${local.client_sg_in_ldap_sg_msg}$", (local.validate_client_sg_in_ldap_sg ? local.client_sg_in_ldap_sg_msg : "")) : true
+
+ validate_bastion_sg_in_ldap_sg = local.bastion_condition ? contains(local.ldap_sg_rules, tolist(data.ibm_is_security_group.login_security_group[*].id)[0]) : true
+ bastion_sg_in_ldap_sg_msg = "The LDAP security group does not include the bastion security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_bastion_sg_in_ldap_sg_chk = var.ldap_security_group_name != null ? regex("^${local.bastion_sg_in_ldap_sg_msg}$", (local.validate_bastion_sg_in_ldap_sg ? local.bastion_sg_in_ldap_sg_msg : "")) : true
+
+ # Client Security group validation
+ validate_strg_sg_in_client_sg = local.strg_condition ? contains(local.client_sg_rules, tolist(data.ibm_is_security_group.storage_security_group[*].id)[0]) : true
+ strg_sg_in_client_sg_msg = "The Client security group does not include the storage security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_strg_sg_in_client_sg_chk = var.client_security_group_name != null ? regex("^${local.strg_sg_in_client_sg_msg}$", (local.validate_strg_sg_in_client_sg ? local.strg_sg_in_client_sg_msg : "")) : true
+
+ validate_comp_sg_in_client_sg = local.comp_condition ? contains(local.client_sg_rules, tolist(data.ibm_is_security_group.compute_security_group[*].id)[0]) : true
+ comp_sg_in_client_sg_msg = "The Client security group does not include the compute security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_comp_sg_in_client_sg_chk = var.client_security_group_name != null ? regex("^${local.comp_sg_in_client_sg_msg}$", (local.validate_comp_sg_in_client_sg ? local.comp_sg_in_client_sg_msg : "")) : true
+
+ validate_ldap_sg_in_client_sg = local.ldap_condition ? contains(local.client_sg_rules, tolist(data.ibm_is_security_group.ldap_security_group[*].id)[0]) : true
+ ldap_sg_in_client_sg_msg = "The Client security group does not include the LDAP security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_ldap_sg_in_client_sg_chk = var.client_security_group_name != null ? regex("^${local.ldap_sg_in_client_sg_msg}$", (local.validate_ldap_sg_in_client_sg ? local.ldap_sg_in_client_sg_msg : "")) : true
+
+ validate_gklm_sg_in_client_sg = local.gklm_condition ? contains(local.client_sg_rules, tolist(data.ibm_is_security_group.gklm_security_group[*].id)[0]) : true
+ gklm_sg_in_client_sg_msg = "The Client security group does not include the GKLM security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_gklm_sg_in_client_sg_chk = var.client_security_group_name != null ? regex("^${local.gklm_sg_in_client_sg_msg}$", (local.validate_gklm_sg_in_client_sg ? local.gklm_sg_in_client_sg_msg : "")) : true
+
+ validate_client_sg_in_client_sg = local.clnt_condition ? contains(local.client_sg_rules, tolist(data.ibm_is_security_group.client_security_group[*].id)[0]) : true
+ client_sg_in_client_sg_msg = "The Client security group does not include the client security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_client_sg_in_client_sg_chk = var.client_security_group_name != null ? regex("^${local.client_sg_in_client_sg_msg}$", (local.validate_client_sg_in_client_sg ? local.client_sg_in_client_sg_msg : "")) : true
+
+ validate_bastion_sg_in_client_sg = local.bastion_condition ? contains(local.client_sg_rules, tolist(data.ibm_is_security_group.login_security_group[*].id)[0]) : true
+ bastion_sg_in_client_sg_msg = "The Client security group does not include the bastion security group as a rule."
+ # tflint-ignore: terraform_unused_declarations
+ validate_bastion_sg_in_client_sg_chk = var.client_security_group_name != null ? regex("^${local.bastion_sg_in_client_sg_msg}$", (local.validate_bastion_sg_in_client_sg ? local.bastion_sg_in_client_sg_msg : "")) : true
+}
+
+locals {
+ # Subnet ID validation for existing VPC with instances count greater than 0
+ validate_subnet_id_ext_vpc_msg = "When 'subnet_id' is passed and any of the 'instance_count' values are greater than 0, you must provide the respective 'subnet_id' or set 'instance_count' to 0."
+ validate_subnet_id_ext_vpc = alltrue([
+ var.vpc_name != null && (var.storage_subnet_id != null || var.compute_subnet_id != null || var.protocol_subnet_id != null || var.client_subnet_id != null || var.login_subnet_id != null) ?
+ ((local.total_storage_instance_count > 0 && var.storage_subnet_id != null) ? true : ((local.total_storage_instance_count == 0 && var.storage_subnet_id == null) ? true : false)) &&
+ ((local.total_client_instance_count > 0 && var.client_subnet_id != null) ? true : ((local.total_client_instance_count == 0 && var.client_subnet_id == null) ? true : false)) &&
+ ((local.total_protocol_instance_count > 0 && var.protocol_subnet_id != null) ? true : ((local.total_protocol_instance_count == 0 && var.protocol_subnet_id == null) ? true : false)) &&
+ ((local.total_compute_instance_count > 0 && var.compute_subnet_id != null) ? true : ((local.total_compute_instance_count == 0 && var.compute_subnet_id == null) ? true : false)) &&
+ ((var.login_subnet_id != null) ? true : false)
+ : true])
+ # tflint-ignore: terraform_unused_declarations
+ validate_subnet_id_ext_vpc_chk = regex("^${local.validate_subnet_id_ext_vpc_msg}$",
+ (local.validate_subnet_id_ext_vpc ? local.validate_subnet_id_ext_vpc_msg : ""))
+}
diff --git a/solutions/scale/locals.tf b/solutions/scale/locals.tf
index 90f43d9c..82d4bb65 100644
--- a/solutions/scale/locals.tf
+++ b/solutions/scale/locals.tf
@@ -18,153 +18,158 @@ locals {
locals {
config = {
- existing_resource_group = var.existing_resource_group
- remote_allowed_ips = var.remote_allowed_ips
- ssh_keys = var.ssh_keys
- vpc_cluster_login_private_subnets_cidr_blocks = var.vpc_cluster_login_private_subnets_cidr_blocks
- compute_gui_password = var.compute_gui_password
- compute_gui_username = var.compute_gui_username
- vpc_cluster_private_subnets_cidr_blocks = var.vpc_cluster_private_subnets_cidr_blocks
- cos_instance_name = var.cos_instance_name
- dns_custom_resolver_id = var.dns_custom_resolver_id
- dns_instance_id = var.dns_instance_id
- dns_domain_names = var.dns_domain_names
- enable_atracker = var.enable_atracker
- # enable_bastion = var.enable_bastion
- bastion_instance = var.bastion_instance
- deployer_instance = var.deployer_instance
- enable_cos_integration = var.enable_cos_integration
- enable_vpc_flow_logs = var.enable_vpc_flow_logs
- hpcs_instance_name = var.hpcs_instance_name
- key_management = var.key_management
- client_instances = var.client_instances
- client_subnets_cidr = var.client_subnets_cidr
- vpc_cidr = var.vpc_cidr
- placement_strategy = var.placement_strategy
- cluster_prefix = var.cluster_prefix
- protocol_instances = var.protocol_instances
- protocol_subnets_cidr = var.protocol_subnets_cidr
- compute_instances = var.compute_instances
- storage_gui_password = var.storage_gui_password
- storage_gui_username = var.storage_gui_username
- storage_instances = var.storage_instances
- storage_servers = var.storage_servers
- storage_subnets_cidr = var.storage_subnets_cidr
- vpc_name = var.vpc_name
- observability_atracker_enable = var.observability_atracker_enable
- observability_atracker_target_type = var.observability_atracker_target_type
- observability_monitoring_enable = var.observability_monitoring_enable
- observability_logs_enable_for_management = var.observability_logs_enable_for_management
- observability_logs_enable_for_compute = var.observability_logs_enable_for_compute
- observability_enable_platform_logs = var.observability_enable_platform_logs
- observability_enable_metrics_routing = var.observability_enable_metrics_routing
- observability_logs_retention_period = var.observability_logs_retention_period
- observability_monitoring_on_compute_nodes_enable = var.observability_monitoring_on_compute_nodes_enable
- observability_monitoring_plan = var.observability_monitoring_plan
- skip_flowlogs_s2s_auth_policy = var.skip_flowlogs_s2s_auth_policy
- skip_kms_s2s_auth_policy = var.skip_kms_s2s_auth_policy
- skip_iam_block_storage_authorization_policy = var.skip_iam_block_storage_authorization_policy
- ibmcloud_api_key = var.ibmcloud_api_key
- afm_instances = var.afm_instances
- afm_cos_config = var.afm_cos_config
- enable_ldap = var.enable_ldap
- ldap_basedns = var.ldap_basedns
- ldap_admin_password = var.ldap_admin_password
- ldap_user_name = var.ldap_user_name
- ldap_user_password = var.ldap_user_password
- ldap_server = var.ldap_server
- ldap_server_cert = var.ldap_server_cert
- ldap_instance = var.ldap_instance
- scale_encryption_enabled = var.scale_encryption_enabled
- scale_encryption_type = var.scale_encryption_type
- gklm_instance_key_pair = var.gklm_instance_key_pair
- gklm_instances = var.gklm_instances
- storage_type = var.storage_type
- colocate_protocol_instances = var.colocate_protocol_instances
- scale_encryption_admin_default_password = var.scale_encryption_admin_default_password
- scale_encryption_admin_password = var.scale_encryption_admin_password
- scale_encryption_admin_username = var.scale_encryption_admin_username
- filesystem_config = var.filesystem_config
- existing_bastion_instance_name = var.existing_bastion_instance_name
- existing_bastion_instance_public_ip = var.existing_bastion_instance_public_ip
- existing_bastion_security_group_id = var.existing_bastion_security_group_id
- existing_bastion_ssh_private_key = var.existing_bastion_ssh_private_key
+ existing_resource_group = var.existing_resource_group
+ remote_allowed_ips = var.remote_allowed_ips
+ ssh_keys = var.ssh_keys
+ login_subnets_cidr = var.login_subnets_cidr
+ compute_gui_password = var.compute_gui_password
+ compute_gui_username = var.compute_gui_username
+ compute_subnets_cidr = var.compute_subnets_cidr
+ cos_instance_name = var.cos_instance_name
+ dns_custom_resolver_id = var.dns_custom_resolver_id
+ dns_instance_id = var.dns_instance_id
+ dns_domain_names = var.dns_domain_names
+ bastion_instance = var.bastion_instance
+ deployer_instance = var.deployer_instance
+ enable_cos_integration = var.enable_cos_integration
+ enable_vpc_flow_logs = var.enable_vpc_flow_logs
+ client_instances = var.client_instances
+ client_subnets_cidr = var.client_subnets_cidr
+ vpc_cidr = var.vpc_cidr
+ cluster_prefix = var.cluster_prefix
+ protocol_instances = var.protocol_instances
+ protocol_subnets_cidr = var.protocol_subnets_cidr
+ compute_instances = var.compute_instances
+ storage_gui_password = var.storage_gui_password
+ storage_gui_username = var.storage_gui_username
+ storage_instances = var.storage_instances
+ storage_baremetal_server = var.storage_baremetal_server
+ storage_subnets_cidr = var.storage_subnets_cidr
+ vpc_name = var.vpc_name
+ observability_atracker_enable = var.observability_atracker_enable
+ observability_atracker_target_type = var.observability_atracker_target_type
+ sccwp_service_plan = var.sccwp_service_plan
+ sccwp_enable = var.sccwp_enable
+ cspm_enabled = var.cspm_enabled
+ app_config_plan = var.app_config_plan
+ skip_flowlogs_s2s_auth_policy = var.skip_flowlogs_s2s_auth_policy
+ ibmcloud_api_key = var.ibmcloud_api_key
+ afm_instances = var.afm_instances
+ afm_cos_config = var.afm_cos_config
+ enable_ldap = var.enable_ldap
+ ldap_basedns = var.ldap_basedns
+ ldap_admin_password = var.ldap_admin_password
+ ldap_user_name = var.ldap_user_name
+ ldap_user_password = var.ldap_user_password
+ ldap_server = var.ldap_server
+ ldap_server_cert = var.ldap_server_cert
+ ldap_instance = var.ldap_instance
+ scale_encryption_enabled = var.scale_encryption_enabled
+ scale_encryption_type = var.scale_encryption_type
+ gklm_instances = var.gklm_instances
+ storage_type = var.storage_type
+ colocate_protocol_instances = var.colocate_protocol_instances
+ scale_encryption_admin_password = var.scale_encryption_admin_password
+ key_protect_instance_id = var.key_protect_instance_id
+ filesystem_config = var.filesystem_config
+ existing_bastion_instance_name = var.existing_bastion_instance_name
+ existing_bastion_instance_public_ip = var.existing_bastion_instance_public_ip
+ existing_bastion_security_group_id = var.existing_bastion_security_group_id
+ existing_bastion_ssh_private_key = var.existing_bastion_ssh_private_key
+ bms_boot_drive_encryption = var.bms_boot_drive_encryption
+ tie_breaker_baremetal_server_profile = var.tie_breaker_baremetal_server_profile
+ filesets_config = var.filesets_config
+ login_security_group_name = var.login_security_group_name
+ storage_security_group_name = var.storage_security_group_name
+ compute_security_group_name = var.compute_security_group_name
+ client_security_group_name = var.client_security_group_name
+ gklm_security_group_name = var.gklm_security_group_name
+ ldap_security_group_name = var.ldap_security_group_name
+ login_subnet_id = var.login_subnet_id
+ compute_subnet_id = var.compute_subnet_id
+ storage_subnet_id = var.storage_subnet_id
+ protocol_subnet_id = var.protocol_subnet_id
+ client_subnet_id = var.client_subnet_id
+ scale_management_vsi_profile = var.scale_management_vsi_profile
}
}
+
# Compile Environment for Config output
locals {
env = {
- existing_resource_group = lookup(local.override[local.override_type], "existing_resource_group", local.config.existing_resource_group)
- remote_allowed_ips = lookup(local.override[local.override_type], "remote_allowed_ips", local.config.remote_allowed_ips)
- ssh_keys = lookup(local.override[local.override_type], "ssh_keys", local.config.ssh_keys)
- vpc_cluster_login_private_subnets_cidr_blocks = lookup(local.override[local.override_type], "vpc_cluster_login_private_subnets_cidr_blocks", local.config.vpc_cluster_login_private_subnets_cidr_blocks)
- compute_gui_password = lookup(local.override[local.override_type], "compute_gui_password", local.config.compute_gui_password)
- compute_gui_username = lookup(local.override[local.override_type], "compute_gui_username", local.config.compute_gui_username)
- vpc_cluster_private_subnets_cidr_blocks = lookup(local.override[local.override_type], "vpc_cluster_private_subnets_cidr_blocks", local.config.vpc_cluster_private_subnets_cidr_blocks)
- cos_instance_name = lookup(local.override[local.override_type], "cos_instance_name", local.config.cos_instance_name)
- dns_custom_resolver_id = lookup(local.override[local.override_type], "dns_custom_resolver_id", local.config.dns_custom_resolver_id)
- dns_instance_id = lookup(local.override[local.override_type], "dns_instance_id", local.config.dns_instance_id)
- dns_domain_names = lookup(local.override[local.override_type], "dns_domain_names", local.config.dns_domain_names)
- enable_atracker = lookup(local.override[local.override_type], "enable_atracker", local.config.enable_atracker)
- # enable_bastion = lookup(local.override[local.override_type], "enable_bastion", local.config.enable_bastion)
- bastion_instance = lookup(local.override[local.override_type], "bastion_instance", local.config.bastion_instance)
- deployer_instance = lookup(local.override[local.override_type], "deployer_instance", local.config.deployer_instance)
- enable_cos_integration = lookup(local.override[local.override_type], "enable_cos_integration", local.config.enable_cos_integration)
- enable_vpc_flow_logs = lookup(local.override[local.override_type], "enable_vpc_flow_logs", local.config.enable_vpc_flow_logs)
- hpcs_instance_name = lookup(local.override[local.override_type], "hpcs_instance_name", local.config.hpcs_instance_name)
- key_management = lookup(local.override[local.override_type], "key_management", local.config.key_management)
- client_instances = lookup(local.override[local.override_type], "client_instances", local.config.client_instances)
- client_subnets_cidr = lookup(local.override[local.override_type], "client_subnets_cidr", local.config.client_subnets_cidr)
- vpc_cidr = lookup(local.override[local.override_type], "vpc_cidr", local.config.vpc_cidr)
- placement_strategy = lookup(local.override[local.override_type], "placement_strategy", local.config.placement_strategy)
- cluster_prefix = lookup(local.override[local.override_type], "cluster_prefix", local.config.cluster_prefix)
- protocol_instances = lookup(local.override[local.override_type], "protocol_instances", local.config.protocol_instances)
- protocol_subnets_cidr = lookup(local.override[local.override_type], "protocol_subnets_cidr", local.config.protocol_subnets_cidr)
- compute_instances = lookup(local.override[local.override_type], "compute_instances", local.config.compute_instances)
- storage_gui_password = lookup(local.override[local.override_type], "storage_gui_password", local.config.storage_gui_password)
- storage_gui_username = lookup(local.override[local.override_type], "storage_gui_username", local.config.storage_gui_username)
- storage_instances = lookup(local.override[local.override_type], "storage_instances", local.config.storage_instances)
- storage_servers = lookup(local.override[local.override_type], "storage_servers", local.config.storage_servers)
- storage_subnets_cidr = lookup(local.override[local.override_type], "storage_subnets_cidr", local.config.storage_subnets_cidr)
- vpc_name = lookup(local.override[local.override_type], "vpc_name", local.config.vpc_name)
- observability_atracker_enable = lookup(local.override[local.override_type], "observability_atracker_enable", local.config.observability_atracker_enable)
- observability_atracker_target_type = lookup(local.override[local.override_type], "observability_atracker_target_type", local.config.observability_atracker_target_type)
- observability_monitoring_enable = lookup(local.override[local.override_type], "observability_monitoring_enable", local.config.observability_monitoring_enable)
- observability_logs_enable_for_management = lookup(local.override[local.override_type], "observability_logs_enable_for_management", local.config.observability_logs_enable_for_management)
- observability_logs_enable_for_compute = lookup(local.override[local.override_type], "observability_logs_enable_for_compute", local.config.observability_logs_enable_for_compute)
- observability_enable_platform_logs = lookup(local.override[local.override_type], "observability_enable_platform_logs", local.config.observability_enable_platform_logs)
- observability_enable_metrics_routing = lookup(local.override[local.override_type], "observability_enable_metrics_routing", local.config.observability_enable_metrics_routing)
- observability_logs_retention_period = lookup(local.override[local.override_type], "observability_logs_retention_period", local.config.observability_logs_retention_period)
- observability_monitoring_on_compute_nodes_enable = lookup(local.override[local.override_type], "observability_monitoring_on_compute_nodes_enable", local.config.observability_monitoring_on_compute_nodes_enable)
- observability_monitoring_plan = lookup(local.override[local.override_type], "observability_monitoring_plan", local.config.observability_monitoring_plan)
- skip_flowlogs_s2s_auth_policy = lookup(local.override[local.override_type], "skip_flowlogs_s2s_auth_policy", local.config.skip_flowlogs_s2s_auth_policy)
- skip_kms_s2s_auth_policy = lookup(local.override[local.override_type], "skip_kms_s2s_auth_policy", local.config.skip_kms_s2s_auth_policy)
- skip_iam_block_storage_authorization_policy = lookup(local.override[local.override_type], "skip_iam_block_storage_authorization_policy", local.config.skip_iam_block_storage_authorization_policy)
- ibmcloud_api_key = lookup(local.override[local.override_type], "ibmcloud_api_key", local.config.ibmcloud_api_key)
- afm_instances = lookup(local.override[local.override_type], "afm_instances", local.config.afm_instances)
- afm_cos_config = lookup(local.override[local.override_type], "afm_cos_config", local.config.afm_cos_config)
- enable_ldap = lookup(local.override[local.override_type], "enable_ldap", local.config.enable_ldap)
- ldap_basedns = lookup(local.override[local.override_type], "ldap_basedns", local.config.ldap_basedns)
- ldap_admin_password = lookup(local.override[local.override_type], "ldap_admin_password", local.config.ldap_admin_password)
- ldap_user_name = lookup(local.override[local.override_type], "ldap_user_name", local.config.ldap_user_name)
- ldap_user_password = lookup(local.override[local.override_type], "ldap_user_password", local.config.ldap_user_password)
- ldap_server = lookup(local.override[local.override_type], "ldap_server", local.config.ldap_server)
- ldap_server_cert = lookup(local.override[local.override_type], "ldap_server_cert", local.config.ldap_server_cert)
- ldap_instance = lookup(local.override[local.override_type], "ldap_instance", local.config.ldap_instance)
- scale_encryption_enabled = lookup(local.override[local.override_type], "scale_encryption_enabled", local.config.scale_encryption_enabled)
- scale_encryption_type = lookup(local.override[local.override_type], "scale_encryption_type", local.config.scale_encryption_type)
- gklm_instance_key_pair = lookup(local.override[local.override_type], "gklm_instance_key_pair", local.config.gklm_instance_key_pair)
- gklm_instances = lookup(local.override[local.override_type], "gklm_instances", local.config.gklm_instances)
- storage_type = lookup(local.override[local.override_type], "storage_type", local.config.storage_type)
- colocate_protocol_instances = lookup(local.override[local.override_type], "colocate_protocol_instances", local.config.colocate_protocol_instances)
- scale_encryption_admin_default_password = lookup(local.override[local.override_type], "scale_encryption_admin_default_password", local.config.scale_encryption_admin_default_password)
- scale_encryption_admin_password = lookup(local.override[local.override_type], "scale_encryption_admin_password", local.config.scale_encryption_admin_password)
- scale_encryption_admin_username = lookup(local.override[local.override_type], "scale_encryption_admin_username", local.config.scale_encryption_admin_username)
- filesystem_config = lookup(local.override[local.override_type], "filesystem_config", local.config.filesystem_config)
- existing_bastion_instance_name = lookup(local.override[local.override_type], "existing_bastion_instance_name", local.config.existing_bastion_instance_name)
- existing_bastion_instance_public_ip = lookup(local.override[local.override_type], "existing_bastion_instance_public_ip", local.config.existing_bastion_instance_public_ip)
- existing_bastion_security_group_id = lookup(local.override[local.override_type], "existing_bastion_security_group_id", local.config.existing_bastion_security_group_id)
- existing_bastion_ssh_private_key = lookup(local.override[local.override_type], "existing_bastion_ssh_private_key", local.config.existing_bastion_ssh_private_key)
+ existing_resource_group = lookup(local.override[local.override_type], "existing_resource_group", local.config.existing_resource_group)
+ remote_allowed_ips = lookup(local.override[local.override_type], "remote_allowed_ips", local.config.remote_allowed_ips)
+ ssh_keys = lookup(local.override[local.override_type], "ssh_keys", local.config.ssh_keys)
+ login_subnets_cidr = lookup(local.override[local.override_type], "login_subnets_cidr", local.config.login_subnets_cidr)
+ compute_gui_password = lookup(local.override[local.override_type], "compute_gui_password", local.config.compute_gui_password)
+ compute_gui_username = lookup(local.override[local.override_type], "compute_gui_username", local.config.compute_gui_username)
+ compute_subnets_cidr = lookup(local.override[local.override_type], "compute_subnets_cidr", local.config.compute_subnets_cidr)
+ cos_instance_name = lookup(local.override[local.override_type], "cos_instance_name", local.config.cos_instance_name)
+ dns_custom_resolver_id = lookup(local.override[local.override_type], "dns_custom_resolver_id", local.config.dns_custom_resolver_id)
+ dns_instance_id = lookup(local.override[local.override_type], "dns_instance_id", local.config.dns_instance_id)
+ dns_domain_names = lookup(local.override[local.override_type], "dns_domain_names", local.config.dns_domain_names)
+ bastion_instance = lookup(local.override[local.override_type], "bastion_instance", local.config.bastion_instance)
+ deployer_instance = lookup(local.override[local.override_type], "deployer_instance", local.config.deployer_instance)
+ enable_cos_integration = lookup(local.override[local.override_type], "enable_cos_integration", local.config.enable_cos_integration)
+ enable_vpc_flow_logs = lookup(local.override[local.override_type], "enable_vpc_flow_logs", local.config.enable_vpc_flow_logs)
+ client_instances = lookup(local.override[local.override_type], "client_instances", local.config.client_instances)
+ client_subnets_cidr = lookup(local.override[local.override_type], "client_subnets_cidr", local.config.client_subnets_cidr)
+ vpc_cidr = lookup(local.override[local.override_type], "vpc_cidr", local.config.vpc_cidr)
+ cluster_prefix = lookup(local.override[local.override_type], "cluster_prefix", local.config.cluster_prefix)
+ protocol_instances = lookup(local.override[local.override_type], "protocol_instances", local.config.protocol_instances)
+ protocol_subnets_cidr = lookup(local.override[local.override_type], "protocol_subnets_cidr", local.config.protocol_subnets_cidr)
+ compute_instances = lookup(local.override[local.override_type], "compute_instances", local.config.compute_instances)
+ storage_gui_password = lookup(local.override[local.override_type], "storage_gui_password", local.config.storage_gui_password)
+ storage_gui_username = lookup(local.override[local.override_type], "storage_gui_username", local.config.storage_gui_username)
+ storage_instances = lookup(local.override[local.override_type], "storage_instances", local.config.storage_instances)
+ storage_baremetal_server = lookup(local.override[local.override_type], "storage_baremetal_server", local.config.storage_baremetal_server)
+ storage_subnets_cidr = lookup(local.override[local.override_type], "storage_subnets_cidr", local.config.storage_subnets_cidr)
+ vpc_name = lookup(local.override[local.override_type], "vpc_name", local.config.vpc_name)
+ observability_atracker_enable = lookup(local.override[local.override_type], "observability_atracker_enable", local.config.observability_atracker_enable)
+ observability_atracker_target_type = lookup(local.override[local.override_type], "observability_atracker_target_type", local.config.observability_atracker_target_type)
+ sccwp_enable = lookup(local.override[local.override_type], "scc_wp_enable", local.config.sccwp_enable)
+ cspm_enable = lookup(local.override[local.override_type], "cspm_enable", local.config.cspm_enabled)
+ sccwp_service_plan = lookup(local.override[local.override_type], "scc_wp_service_plan", local.config.sccwp_service_plan)
+ app_config_plan = lookup(local.override[local.override_type], "app_config_plan", local.config.app_config_plan)
+ skip_flowlogs_s2s_auth_policy = lookup(local.override[local.override_type], "skip_flowlogs_s2s_auth_policy", local.config.skip_flowlogs_s2s_auth_policy)
+ ibmcloud_api_key = lookup(local.override[local.override_type], "ibmcloud_api_key", local.config.ibmcloud_api_key)
+ afm_instances = lookup(local.override[local.override_type], "afm_instances", local.config.afm_instances)
+ afm_cos_config = lookup(local.override[local.override_type], "afm_cos_config", local.config.afm_cos_config)
+ enable_ldap = lookup(local.override[local.override_type], "enable_ldap", local.config.enable_ldap)
+ ldap_basedns = lookup(local.override[local.override_type], "ldap_basedns", local.config.ldap_basedns)
+ ldap_admin_password = lookup(local.override[local.override_type], "ldap_admin_password", local.config.ldap_admin_password)
+ ldap_user_name = lookup(local.override[local.override_type], "ldap_user_name", local.config.ldap_user_name)
+ ldap_user_password = lookup(local.override[local.override_type], "ldap_user_password", local.config.ldap_user_password)
+ ldap_server = lookup(local.override[local.override_type], "ldap_server", local.config.ldap_server)
+ ldap_server_cert = lookup(local.override[local.override_type], "ldap_server_cert", local.config.ldap_server_cert)
+ ldap_instance = lookup(local.override[local.override_type], "ldap_instance", local.config.ldap_instance)
+ scale_encryption_enabled = lookup(local.override[local.override_type], "scale_encryption_enabled", local.config.scale_encryption_enabled)
+ scale_encryption_type = lookup(local.override[local.override_type], "scale_encryption_type", local.config.scale_encryption_type)
+ gklm_instances = lookup(local.override[local.override_type], "gklm_instances", local.config.gklm_instances)
+ key_protect_instance_id = lookup(local.override[local.override_type], "key_protect_instance_id", local.config.key_protect_instance_id)
+ storage_type = lookup(local.override[local.override_type], "storage_type", local.config.storage_type)
+ colocate_protocol_instances = lookup(local.override[local.override_type], "colocate_protocol_instances", local.config.colocate_protocol_instances)
+ scale_encryption_admin_password = lookup(local.override[local.override_type], "scale_encryption_admin_password", local.config.scale_encryption_admin_password)
+ filesystem_config = lookup(local.override[local.override_type], "filesystem_config", local.config.filesystem_config)
+ existing_bastion_instance_name = lookup(local.override[local.override_type], "existing_bastion_instance_name", local.config.existing_bastion_instance_name)
+ existing_bastion_instance_public_ip = lookup(local.override[local.override_type], "existing_bastion_instance_public_ip", local.config.existing_bastion_instance_public_ip)
+ existing_bastion_security_group_id = lookup(local.override[local.override_type], "existing_bastion_security_group_id", local.config.existing_bastion_security_group_id)
+ existing_bastion_ssh_private_key = lookup(local.override[local.override_type], "existing_bastion_ssh_private_key", local.config.existing_bastion_ssh_private_key)
+ bms_boot_drive_encryption = lookup(local.override[local.override_type], "bms_boot_drive_encryption", local.config.bms_boot_drive_encryption)
+ tie_breaker_baremetal_server_profile = lookup(local.override[local.override_type], "tie_breaker_baremetal_server_profile", local.config.tie_breaker_baremetal_server_profile)
+ filesets_config = lookup(local.override[local.override_type], "filesets_config", local.config.filesets_config)
+ login_security_group_name = lookup(local.override[local.override_type], "login_security_group_name", local.config.login_security_group_name)
+ storage_security_group_name = lookup(local.override[local.override_type], "storage_security_group_name", local.config.storage_security_group_name)
+ compute_security_group_name = lookup(local.override[local.override_type], "compute_security_group_name", local.config.compute_security_group_name)
+ client_security_group_name = lookup(local.override[local.override_type], "client_security_group_name", local.config.client_security_group_name)
+ gklm_security_group_name = lookup(local.override[local.override_type], "gklm_security_group_name", local.config.gklm_security_group_name)
+ ldap_security_group_name = lookup(local.override[local.override_type], "ldap_security_group_name", local.config.ldap_security_group_name)
+ login_subnet_id = lookup(local.override[local.override_type], "login_subnet_id", local.config.login_subnet_id)
+ compute_subnet_id = lookup(local.override[local.override_type], "compute_subnet_id", local.config.compute_subnet_id)
+ storage_subnet_id = lookup(local.override[local.override_type], "storage_subnet_id", local.config.storage_subnet_id)
+ protocol_subnet_id = lookup(local.override[local.override_type], "protocol_subnet_id", local.config.protocol_subnet_id)
+ client_subnet_id = lookup(local.override[local.override_type], "client_subnet_id", local.config.client_subnet_id)
+ scale_management_vsi_profile = lookup(local.override[local.override_type], "scale_management_vsi_profile", local.config.scale_management_vsi_profile)
}
}
diff --git a/solutions/scale/main.tf b/solutions/scale/main.tf
index 5c86c9ed..a4981057 100644
--- a/solutions/scale/main.tf
+++ b/solutions/scale/main.tf
@@ -7,71 +7,73 @@ module "scale" {
cluster_prefix = local.env.cluster_prefix
ssh_keys = local.env.ssh_keys
existing_resource_group = local.env.existing_resource_group
- vpc_cluster_login_private_subnets_cidr_blocks = local.env.vpc_cluster_login_private_subnets_cidr_blocks
- vpc_cluster_private_subnets_cidr_blocks = local.env.vpc_cluster_private_subnets_cidr_blocks
+ vpc_cluster_login_private_subnets_cidr_blocks = local.env.login_subnets_cidr
+ vpc_cluster_private_subnets_cidr_blocks = local.env.compute_subnets_cidr
cos_instance_name = local.env.cos_instance_name
dns_custom_resolver_id = local.env.dns_custom_resolver_id
dns_instance_id = local.env.dns_instance_id
dns_domain_names = local.env.dns_domain_names
- enable_atracker = local.env.enable_atracker
- # enable_bastion = local.env.enable_bastion
- bastion_instance = local.env.bastion_instance
- deployer_instance = local.env.deployer_instance
- enable_cos_integration = local.env.enable_cos_integration
- enable_vpc_flow_logs = local.env.enable_vpc_flow_logs
- key_management = local.env.key_management
- client_instances = local.env.client_instances
- vpc_cidr = local.env.vpc_cidr
- placement_strategy = local.env.placement_strategy
- protocol_instances = local.env.protocol_instances
- protocol_subnets_cidr = [local.env.protocol_subnets_cidr]
- colocate_protocol_instances = local.env.colocate_protocol_instances
- static_compute_instances = local.env.compute_instances
- storage_instances = local.env.storage_instances
- storage_servers = local.env.storage_servers
- storage_subnets_cidr = [local.env.storage_subnets_cidr]
- vpc_name = local.env.vpc_name
- compute_gui_password = local.env.compute_gui_password
- compute_gui_username = local.env.compute_gui_username
- storage_gui_password = local.env.storage_gui_password
- storage_gui_username = local.env.storage_gui_username
- observability_atracker_enable = local.env.observability_atracker_enable
- observability_atracker_target_type = local.env.observability_atracker_target_type
- observability_monitoring_enable = local.env.observability_monitoring_enable
- observability_logs_enable_for_management = local.env.observability_logs_enable_for_management
- observability_logs_enable_for_compute = local.env.observability_logs_enable_for_compute
- observability_enable_platform_logs = local.env.observability_enable_platform_logs
- observability_enable_metrics_routing = local.env.observability_enable_metrics_routing
- observability_logs_retention_period = local.env.observability_logs_retention_period
- observability_monitoring_on_compute_nodes_enable = local.env.observability_monitoring_on_compute_nodes_enable
- observability_monitoring_plan = local.env.observability_monitoring_plan
- skip_flowlogs_s2s_auth_policy = local.env.skip_flowlogs_s2s_auth_policy
- skip_kms_s2s_auth_policy = local.env.skip_kms_s2s_auth_policy
- skip_iam_block_storage_authorization_policy = local.env.skip_iam_block_storage_authorization_policy
- ibmcloud_api_key = local.env.ibmcloud_api_key
- afm_instances = local.env.afm_instances
- afm_cos_config = local.env.afm_cos_config
- enable_ldap = local.env.enable_ldap
- ldap_basedns = local.env.ldap_basedns
- ldap_admin_password = local.env.ldap_admin_password
- ldap_user_name = local.env.ldap_user_name
- ldap_user_password = local.env.ldap_user_password
- ldap_server = local.env.ldap_server
- ldap_server_cert = local.env.ldap_server_cert
- ldap_instance = local.env.ldap_instance
- scale_encryption_enabled = local.env.scale_encryption_enabled
- scale_encryption_type = local.env.scale_encryption_type
- gklm_instance_key_pair = local.env.gklm_instance_key_pair
- gklm_instances = local.env.gklm_instances
- storage_type = local.env.storage_type
- scale_encryption_admin_password = local.env.scale_encryption_admin_password
- filesystem_config = local.env.filesystem_config
- existing_bastion_instance_name = local.env.existing_bastion_instance_name
- existing_bastion_instance_public_ip = local.env.existing_bastion_instance_public_ip
- existing_bastion_security_group_id = local.env.existing_bastion_security_group_id
- existing_bastion_ssh_private_key = local.env.existing_bastion_ssh_private_key
- client_subnets_cidr = [local.env.client_subnets_cidr]
- # hpcs_instance_name = local.env.hpcs_instance_name
- # scale_encryption_admin_username = local.env.scale_encryption_admin_username
- # scale_encryption_admin_default_password = local.env.scale_encryption_admin_default_password
+ bastion_instance = local.env.bastion_instance
+ deployer_instance = local.env.deployer_instance
+ enable_cos_integration = local.env.enable_cos_integration
+ enable_vpc_flow_logs = local.env.enable_vpc_flow_logs
+ client_instances = local.env.client_instances
+ vpc_cidr = local.env.vpc_cidr
+ protocol_instances = local.env.protocol_instances
+ protocol_subnets_cidr = [local.env.protocol_subnets_cidr]
+ colocate_protocol_instances = local.env.colocate_protocol_instances
+ static_compute_instances = local.env.compute_instances
+ storage_instances = local.env.storage_instances
+ storage_servers = local.env.storage_baremetal_server
+ storage_subnets_cidr = [local.env.storage_subnets_cidr]
+ vpc_name = local.env.vpc_name
+ compute_gui_password = local.env.compute_gui_password
+ compute_gui_username = local.env.compute_gui_username
+ storage_gui_password = local.env.storage_gui_password
+ storage_gui_username = local.env.storage_gui_username
+ observability_atracker_enable = local.env.observability_atracker_enable
+ observability_atracker_target_type = local.env.observability_atracker_target_type
+ sccwp_enable = local.env.sccwp_enable
+ sccwp_service_plan = local.env.sccwp_service_plan
+ cspm_enabled = local.env.cspm_enable
+ app_config_plan = local.env.app_config_plan
+ skip_flowlogs_s2s_auth_policy = local.env.skip_flowlogs_s2s_auth_policy
+ ibmcloud_api_key = local.env.ibmcloud_api_key
+ afm_instances = local.env.afm_instances
+ afm_cos_config = local.env.afm_cos_config
+ enable_ldap = local.env.enable_ldap
+ ldap_basedns = local.env.ldap_basedns
+ ldap_admin_password = local.env.ldap_admin_password
+ ldap_user_name = local.env.ldap_user_name
+ ldap_user_password = local.env.ldap_user_password
+ ldap_server = local.env.ldap_server
+ ldap_server_cert = local.env.ldap_server_cert
+ ldap_instance = local.env.ldap_instance
+ scale_encryption_enabled = local.env.scale_encryption_enabled
+ scale_encryption_type = local.env.scale_encryption_type
+ gklm_instances = local.env.gklm_instances
+ storage_type = local.env.storage_type
+ scale_encryption_admin_password = local.env.scale_encryption_admin_password
+ key_protect_instance_id = local.env.key_protect_instance_id
+ filesystem_config = local.env.filesystem_config
+ existing_bastion_instance_name = local.env.existing_bastion_instance_name
+ existing_bastion_instance_public_ip = local.env.existing_bastion_instance_public_ip
+ existing_bastion_security_group_id = local.env.existing_bastion_security_group_id
+ existing_bastion_ssh_private_key = local.env.existing_bastion_ssh_private_key
+ client_subnets_cidr = [local.env.client_subnets_cidr]
+ bms_boot_drive_encryption = local.env.bms_boot_drive_encryption
+ tie_breaker_bm_server_profile = local.env.tie_breaker_baremetal_server_profile
+ filesets_config = local.env.filesets_config
+ login_security_group_name = local.env.login_security_group_name
+ storage_security_group_name = local.env.storage_security_group_name
+ compute_security_group_name = local.env.compute_security_group_name
+ client_security_group_name = local.env.client_security_group_name
+ gklm_security_group_name = local.env.gklm_security_group_name
+ ldap_security_group_name = local.env.ldap_security_group_name
+ login_subnet_id = local.env.login_subnet_id
+ compute_subnet_id = local.env.compute_subnet_id
+ storage_subnet_id = local.env.storage_subnet_id
+ protocol_subnet_id = local.env.protocol_subnet_id
+ client_subnet_id = local.env.client_subnet_id
+ scale_management_vsi_profile = local.env.scale_management_vsi_profile
}
diff --git a/solutions/scale/override.json b/solutions/scale/override.json
index 894e99b2..8d5d5398 100644
--- a/solutions/scale/override.json
+++ b/solutions/scale/override.json
@@ -3,12 +3,11 @@
"existing_resource_group": "Default",
"vpc_name": null,
"vpc_cidr": "10.0.0.0/8",
- "placement_strategy": null,
"ssh_keys": null,
"enable_bastion": true,
"enable_deployer": true,
"deployer_instance_profile": "mx2-4x32",
- "vpc_cluster_login_private_subnets_cidr_blocks": "10.0.0.0/24",
+ "login_subnets_cidr": "10.0.0.0/24",
"client_subnets_cidr": "10.10.10.0/24",
"client_instances": [
{
@@ -17,7 +16,7 @@
"image": "ibm-redhat-8-10-minimal-amd64-2"
}
],
- "vpc_cluster_private_subnets_cidr_blocks": "10.10.20.0/24",
+ "compute_subnets_cidr": "10.10.20.0/24",
"compute_instances": [
{
"profile": "cx2-2x4",
@@ -116,7 +115,5 @@
"cos_instance_name": null,
"enable_atracker": true,
"enable_vpc_flow_logs": true,
- "key_management": "key_protect",
- "hpcs_instance_name": null,
"clusters": null
}
diff --git a/solutions/scale/variables.tf b/solutions/scale/variables.tf
index dfa93ff8..8d01a639 100644
--- a/solutions/scale/variables.tf
+++ b/solutions/scale/variables.tf
@@ -1,51 +1,66 @@
##############################################################################
# Offering Variations
##############################################################################
+
variable "ibm_customer_number" {
type = string
sensitive = true
default = null
- description = "Comma-separated list of the IBM Customer Number(s) (ICN) that is used for the Bring Your Own License (BYOL) entitlement check. For more information on how to find your ICN, see [What is my IBM Customer Number (ICN)?](https://www.ibm.com/support/pages/what-my-ibm-customer-number-icn)."
+ description = "IBM Customer Number (ICN) used for Bring Your Own License (BYOL) entitlement check and not required if storage_type is evaluation, but must be provided if storage_type is scratch or persistent. Failing to provide an ICN will cause the deployment to fail to decrypt the packages. For more information on how to find your ICN, see [What is my IBM Customer Number (ICN)?](https://www.ibm.com/support/pages/what-my-ibm-customer-number-icn)."
+ # Format validation - Only if value is not null
validation {
condition = (
var.ibm_customer_number == null ||
- can(regex("^[0-9A-Za-z]+(,[0-9A-Za-z]+)*$", var.ibm_customer_number))
+ can(regex("^[0-9]+(,[0-9]+)*$", var.ibm_customer_number))
)
- error_message = "The IBM customer number input value cannot have special characters."
+ error_message = "The IBM customer number must be a comma-separated list of numeric values with no alphabets and special characters."
+ }
+
+ # Presence validation - Must be set when storage_type is not evaluation
+ validation {
+ condition = (
+ var.storage_type == "evaluation" || var.ibm_customer_number != null
+ )
+ error_message = "The IBM customer number cannot be null when storage_type is 'scratch' or 'persistent'."
}
}
+
##############################################################################
# Account Variables
##############################################################################
variable "ibmcloud_api_key" {
type = string
sensitive = true
- description = "IBM Cloud API Key that will be used for authentication in scripts run in this module. Only required if certain options are required."
+ description = "Provide the IBM Cloud API key for the account where the IBM Storage Scale cluster will be deployed, this is a required value that must be provided as it is used to authenticate and authorize access during the deployment. For instructions on creating an API key, see [Managing user API keys](https://cloud.ibm.com/docs/account?topic=account-userapikey&interface=ui)."
}
##############################################################################
# Cluster Level Variables
##############################################################################
variable "zones" {
- description = "Specify the IBM Cloud zone within the chosen region where the IBM Spectrum LSF cluster will be deployed. A single zone input is required, and the management nodes, file storage shares, and compute nodes will all be provisioned in this zone.[Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-creating-a-vpc-in-a-different-region#get-zones-using-the-cli)."
+ description = "Specify the IBM Cloud zone within the chosen region where the IBM Storage scale cluster will be deployed. A single zone input is required, (for example, [\"us-east-1\"]) all the cluster nodes will all be provisioned in this zone.[Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-creating-a-vpc-in-a-different-region#get-zones-using-the-cli)."
type = list(string)
default = ["us-east-1"]
validation {
condition = length(var.zones) == 1
- error_message = "HPC product deployment supports only a single zone. Provide a value for a single zone from the supported regions: eu-de-2 or eu-de-3 for eu-de, us-east-1 or us-east-3 for us-east, and us-south-1 for us-south."
+ error_message = "Provide a value for a single zone from the supported regions."
+ }
+ validation {
+ condition = can(regex("^[a-z]{2}-[a-z]+-[1-3]$", var.zones[0]))
+ error_message = "Provide a value from the supported regions."
}
}
variable "ssh_keys" {
type = list(string)
default = null
- description = "The key pair to use to access the HPC cluster."
+ description = "Provide the names of the SSH keys already configured in your IBM Cloud account to enable access to the Storage Scale nodes. The solution does not create new SSH keys, so ensure you provide existing ones. These keys must reside in the same resource group and region as the cluster being provisioned.To provide multiple SSH keys, use a comma-separated list in the format: [\"key-name-1\", \"key-name-2\"]. If you do not have an SSH key in your IBM Cloud account, you can create one by following the instructions [SSH Keys](https://cloud.ibm.com/docs/vpc?topic=vpc-ssh-keys)."
}
variable "remote_allowed_ips" {
type = list(string)
- description = "Comma-separated list of IP addresses that can access the IBM Spectrum LSF cluster instance through an SSH interface. For security purposes, provide the public IP addresses assigned to the devices that are authorized to establish SSH connections (for example, [\"169.45.117.34\"]). To fetch the IP address of the device, use [https://ipv4.icanhazip.com/](https://ipv4.icanhazip.com/)."
+ description = "To ensure secure access to the IBM Storage Scale cluster via SSH, you must specify the public IP addresses of the devices that are permitted to connect. These IPs will be used to configure access restrictions and protect the environment from unauthorized connections. To allow access from multiple devices, provide the IP addresses as a comma-separated list in the format: [\"169.45.117.34\", \"203.0.113.25\"]. Identify your current public IP address, you can visit: https://ipv4.icanhazip.com."
validation {
condition = alltrue([
for o in var.remote_allowed_ips : !contains(["0.0.0.0/0", "0.0.0.0"], o)
@@ -54,7 +69,8 @@ variable "remote_allowed_ips" {
}
validation {
condition = alltrue([
- for a in var.remote_allowed_ips : can(regex("^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(/(3[0-2]|2[0-9]|1[0-9]|[0-9]))?$", a))
+ for a in var.remote_allowed_ips : can(regex("^((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9][0-9]|[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9][0-9]|[0-9])(/(3[0-2]|2[0-9]|1[0-9]|[0-9]))?$", a))
+
])
error_message = "The provided IP address format is not valid. Check if the IP address contains a comma instead of a dot, and ensure there are double quotation marks between each IP address range if using multiple IP ranges. For multiple IP address, use the format [\"169.45.117.34\",\"128.122.144.145\"]."
}
@@ -63,14 +79,14 @@ variable "remote_allowed_ips" {
variable "cluster_prefix" {
type = string
default = "scale"
- description = "A unique identifier for resources. Must begin with a letter and end with a letter or number. This cluster_prefix will be prepended to any resources provisioned by this template. Prefixes must be 16 or fewer characters."
+ description = "Prefix that is used to name the IBM Cloud resources that are provisioned to build the Storage Scale cluster. Make sure that the prefix is unique, since you cannot create multiple resources with the same name. The maximum length of supported characters is 64. Preifx must begin with a letter and end with a letter or number."
validation {
- error_message = "Prefix must begin and end with a letter and contain only letters, numbers, and - characters."
- condition = can(regex("^([A-z]|[a-z][-a-z0-9]*[a-z0-9])$", var.cluster_prefix))
+ error_message = "Prefix must begin with a lower case letter, should not end with '-' and contain only lower case letters, numbers, and '-' characters."
+ condition = can(regex("^[a-z](?:[a-z0-9]*(-[a-z0-9]+)*)?$", var.cluster_prefix))
}
validation {
- condition = length(var.cluster_prefix) <= 16
- error_message = "The cluster_prefix must be 16 characters or fewer."
+ condition = length(trimspace(var.cluster_prefix)) > 0 && length(var.cluster_prefix) <= 16
+ error_message = "The cluster_prefix must be 64 characters or fewer. No spaces allowed. "
}
}
@@ -80,8 +96,11 @@ variable "cluster_prefix" {
variable "existing_resource_group" {
type = string
default = "Default"
- description = "String describing resource groups to create or reference"
-
+ description = "Specify the name of the existing resource group in your IBM Cloud account where cluster resources will be deployed. By default, the resource group is set to 'Default.' In some older accounts, it may be 'default,' so please verify the resource group name before proceeding. If the value is set to \"null\", the automation will create two separate resource groups: 'workload-rg' and 'service-rg.' For more details, see [Managing resource groups](https://cloud.ibm.com/docs/account?topic=account-rgs&interface=ui)."
+ validation {
+ condition = var.existing_resource_group != null && length(trimspace(var.existing_resource_group)) > 0 && var.existing_resource_group == trimspace(var.existing_resource_group)
+ error_message = "If you want to provide null for resource_group variable, it should be within double quotes and must not be null, empty, or contain leading/trailing spaces"
+ }
}
##############################################################################
@@ -90,52 +109,39 @@ variable "existing_resource_group" {
variable "vpc_name" {
type = string
default = null
- description = "Name of an existing VPC in which the cluster resources will be deployed. If no value is given, then a new VPC will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)"
+ description = "Provide the name of an existing VPC in which the cluster resources will be deployed. If no value is given, the solution provisions a new VPC. [Learn more](https://cloud.ibm.com/docs/vpc). You can also choose to use existing subnets under this VPC or let the solution create new subnets as part of the deployment. If a custom DNS resolver is already configured for your VPC, specify its ID under the dns_custom_resolver_id input value."
}
variable "vpc_cidr" {
type = string
default = "10.241.0.0/18"
- description = "Network CIDR for the VPC. This is used to manage network ACL rules for cluster provisioning."
-}
-
-variable "placement_strategy" {
- type = string
- default = null
- description = "VPC placement groups to create (null / host_spread / power_spread)"
+ description = "Provide an address prefix to create a new VPC when the vpc_name variable is set to null. VPC will be created using this address prefix, and subnets can then be defined within it using the specified subnet CIDR blocks. For more information on address prefix, see [Setting IP ranges](https://cloud.ibm.com/docs/vpc?topic=vpc-vpc-addressing-plan-design)."
}
##############################################################################
# Access Variables
##############################################################################
-# variable "enable_bastion" {
-# type = bool
-# default = true
-# description = "The solution supports multiple ways to connect to your HPC cluster for example, using bastion node, via VPN or direct connection. If connecting to the HPC cluster via VPN or direct connection, set this value to false."
-# }
-
variable "bastion_instance" {
type = object({
image = string
profile = string
})
default = {
- image = "ibm-ubuntu-22-04-5-minimal-amd64-3"
+ image = "ibm-ubuntu-22-04-5-minimal-amd64-5"
profile = "cx2-4x8"
}
- description = "Configuration for the Bastion node, including the image and instance profile. Only Ubuntu stock images are supported."
-}
-
-variable "vpc_cluster_login_private_subnets_cidr_blocks" {
- type = string
- default = "10.241.16.0/28"
- description = "Provide the CIDR block required for the creation of the login cluster's private subnet. Only one CIDR block is needed. If using a hybrid environment, modify the CIDR block to avoid conflicts with any on-premises CIDR blocks. Since the login subnet is used only for the creation of login virtual server instances, provide a CIDR range of /28."
validation {
- condition = tonumber(regex("^.*?/(\\d+)$", var.vpc_cluster_login_private_subnets_cidr_blocks)[0]) <= 28
- error_message = "This subnet is used to create only a login virtual server instance. Providing a larger CIDR size will waste the usage of available IPs. A CIDR range of /28 is sufficient for the creation of the login subnet."
+ condition = can(regex("^ibm-ubuntu", var.bastion_instance.image))
+ error_message = "Only IBM Ubuntu stock images are supported for the Bastion node."
+ }
+ validation {
+ condition = can(regex("^[^\\s]+-[0-9]+x[0-9]+", var.bastion_instance.profile))
+ error_message = "The profile must be a valid virtual server instance profile."
}
+ description = "Bastion node functions as a jump server to enable secure SSH access to cluster nodes, ensuring controlled connectivity within the private network. Specify the configuration details for the bastion node, including the image and instance profile. Only Ubuntu 22.04 stock images are supported."
}
+
##############################################################################
# Deployer Variables
##############################################################################
@@ -146,82 +152,158 @@ variable "deployer_instance" {
profile = string
})
default = {
- image = "jay-lsf-new-image"
- profile = "mx2-4x32"
+ image = "hpcc-scale-deployer-v1"
+ profile = "bx2-8x32"
+ }
+ validation {
+ condition = can(regex("^(b|c|m)x[0-9]+d?-[0-9]+x[0-9]+$", var.deployer_instance.profile))
+ error_message = "The profile must be a valid virtual server instance profile and must be from the Balanced, Compute, Memory Categories"
}
- description = "Configuration for the deployer node, including the custom image and instance profile. By default, uses fixpack_15 image and a bx2-8x32 profile."
+ description = "A deployer node is a dedicated virtual machine or server instance used to automate the deployment and configuration of infrastructure and applications for HPC cluster components. Specify the configuration for the deployer node, including the custom image and virtual server instance profile."
}
##############################################################################
# Compute Variables
##############################################################################
-variable "client_subnets_cidr" {
+variable "login_subnets_cidr" {
type = string
- default = "10.241.50.0/24"
- description = "Subnet CIDR block to launch the client host."
-}
+ default = "10.241.16.0/28"
+ description = "Provide the CIDR block required for the creation of the login cluster private subnet. Single CIDR block is required. If using a hybrid environment, modify the CIDR block to avoid conflicts with any on-premises CIDR blocks. Since the login subnet is used only for the creation of login virtual server instances, provide a CIDR range of /28."
+ validation {
+ condition = (
+ can(
+ regex(
+ "^((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])/(3[0-2]|[12]?[0-9])$", trimspace(var.login_subnets_cidr)
+ )
+ )
+ )
+ error_message = "login_node_cidr must be a valid IPv4 CIDR (e.g., 192.168.1.0/28)."
+ }
-variable "client_instances" {
- type = list(
- object({
- profile = string
- count = number
- image = string
- })
- )
- default = [{
- profile = "cx2-2x4"
- count = 2
- image = "ibm-redhat-8-10-minimal-amd64-4"
- }]
- description = "Number of instances to be launched for client."
+ validation {
+ condition = can(
+ regex(
+ "^((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])/(2[8-9]|3[0-2])$", trimspace(var.login_subnets_cidr)
+ )
+ )
+ error_message = "This subnet is used to create only a login virtual server instance. Providing a larger CIDR size will waste the usage of available IPs. A CIDR range of /28 is sufficient for the creation of the login subnet."
+ }
}
-variable "vpc_cluster_private_subnets_cidr_blocks" {
+variable "compute_subnets_cidr" {
type = string
default = "10.241.0.0/20"
- description = "Provide the CIDR block required for the creation of the compute cluster's private subnet. One CIDR block is required. If using a hybrid environment, modify the CIDR block to avoid conflicts with any on-premises CIDR blocks. Ensure the selected CIDR block size can accommodate the maximum number of management and dynamic compute nodes expected in your cluster. For more information on CIDR block size selection, refer to the documentation, see [Choosing IP ranges for your VPC](https://cloud.ibm.com/docs/vpc?topic=vpc-choosing-ip-ranges-for-your-vpc)."
+ description = "Provide the CIDR block required for the creation of the compute private subnet. Single CIDR block is required. If using a hybrid environment, modify the CIDR block to avoid conflicts with any on-premises CIDR blocks. Ensure the selected CIDR block size can accommodate the maximum number of scale compute nodes expected in your cluster. For more information on CIDR block size selection, refer to the documentation, see [Choosing IP ranges for your VPC](https://cloud.ibm.com/docs/vpc?topic=vpc-choosing-ip-ranges-for-your-vpc)."
}
-variable "compute_instances" {
- type = list(
- object({
- profile = string
- count = number
- image = string
- filesystem = string
- })
- )
- default = [{
- profile = "cx2-2x4"
- count = 3
- image = "ibm-redhat-8-10-minimal-amd64-4"
- filesystem = "/ibm/fs1"
- }]
- description = "Total Number of instances to be launched for compute cluster."
+variable "storage_subnets_cidr" {
+ type = string
+ default = "10.241.30.0/24"
+ description = "Provide the CIDR block required for the creation of the storage private subnet. Single CIDR block is required. If using a hybrid environment, modify the CIDR block to avoid conflicts with any on-premises CIDR blocks. Ensure the selected CIDR block size can accommodate the maximum number of scale storage nodes expected in your cluster. For more information on CIDR block size selection, refer to the documentation, see [Choosing IP ranges for your VPC](https://cloud.ibm.com/docs/vpc?topic=vpc-choosing-ip-ranges-for-your-vpc)."
+}
+
+variable "protocol_subnets_cidr" {
+ type = string
+ default = "10.241.40.0/24"
+ description = "Provide the CIDR block required for the creation of the protocol private subnet. Single CIDR block is required. If using a hybrid environment, modify the CIDR block to avoid conflicts with any on-premises CIDR blocks. Ensure the selected CIDR block size can accommodate the maximum number of protocol nodes expected in your cluster. For more information on CIDR block size selection, refer to the documentation, see [Choosing IP ranges for your VPC](https://cloud.ibm.com/docs/vpc?topic=vpc-choosing-ip-ranges-for-your-vpc)."
+}
+
+variable "client_subnets_cidr" {
+ type = string
+ default = "10.241.50.0/24"
+ description = "Provide the CIDR block required for the creation of the client private subnet. Single CIDR block is required. If using a hybrid environment, modify the CIDR block to avoid conflicts with any on-premises CIDR blocks. Ensure the selected CIDR block size can accommodate the maximum number of scale client nodes expected in your cluster. For more information on CIDR block size selection, refer to the documentation, see [Choosing IP ranges for your VPC](https://cloud.ibm.com/docs/vpc?topic=vpc-choosing-ip-ranges-for-your-vpc)."
}
variable "compute_gui_username" {
type = string
- default = "admin"
+ default = ""
sensitive = true
- description = "GUI user to perform system management and monitoring tasks on compute cluster."
+ description = "GUI username to perform system management and monitoring tasks on the compute cluster. The Username should be at least 4 characters, (any combination of lowercase and uppercase letters)."
+ validation {
+ condition = sum([for inst in var.compute_instances : inst.count]) == 0 || (length(var.compute_gui_username) >= 4 && length(var.compute_gui_username) <= 30 && trimspace(var.compute_gui_username) != "")
+ error_message = "Specified input for \"compute_gui_username\" is not valid. Username should be greater or equal to 4 letters and less than equal to 30."
+ }
+ validation {
+ # Structural check
+ condition = sum([for inst in var.compute_instances : inst.count]) == 0 || can(regex("^[A-Za-z0-9]([._]?[A-Za-z0-9])*$", var.compute_gui_username))
+
+ error_message = "Specified input for \"compute_gui_username\" is not valid. Username should only have alphanumerics, dot(.) and underscore(_). No consecutive dots or underscores"
+ }
}
variable "compute_gui_password" {
type = string
- default = "hpc@IBMCloud"
+ default = ""
sensitive = true
- description = "Password for compute cluster GUI"
+ description = "Password for logging in to the compute cluster GUI. Must be at least 8 characters long and include a combination of uppercase and lowercase letters, a number, and a special character. It must not contain the username or start with a special character."
+ validation {
+ condition = (
+ sum([for inst in var.compute_instances : inst.count]) == 0 || can(regex("^.{8,}$", var.compute_gui_password) != "") && can(regex("[0-9]{1,}", var.compute_gui_password) != "") && can(regex("[a-z]{1,}", var.compute_gui_password) != "") && can(regex("[A-Z]{1,}", var.compute_gui_password) != "") && can(regex("[!@#$%^&*()_+=-]{1,}", var.compute_gui_password) != "") && trimspace(var.compute_gui_password) != "" && can(regex("^[!@#$%^&*()_+=-]", var.compute_gui_password)) == false && (replace(lower(var.compute_gui_password), lower(var.compute_gui_username), "") == lower(var.compute_gui_password))
+ )
+ error_message = "If compute instances are used, the GUI password must be at least 8 characters long, include upper/lowercase letters, a number, a special character, must not start with a special character, and must not contain the username."
+ }
}
##############################################################################
# Storage Scale Variables
##############################################################################
-variable "storage_subnets_cidr" {
- type = string
- default = "10.241.30.0/24"
- description = "Subnet CIDR block to launch the storage cluster host."
+variable "compute_instances" {
+ type = list(
+ object({
+ profile = string
+ count = number
+ image = string
+ filesystem = optional(string)
+ })
+ )
+ default = [{
+ profile = "bx2-2x8"
+ count = 0
+ image = "hpcc-scale5232-rhel810-v1"
+ filesystem = "/gpfs/fs1"
+ }]
+ validation {
+ condition = alltrue([
+ for inst in var.compute_instances : can(regex("^(b|c|m)x[0-9]+d?-[0-9]+x[0-9]+$", inst.profile))
+ ])
+ error_message = "Specified profile must be a valid IBM Cloud VPC GEN2 profile name and must be from the Balanced, Compute, Memory Categories [Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles)."
+ }
+ validation {
+ condition = alltrue([
+ for inst in var.compute_instances : inst.count == 0 || (inst.count >= 3 && inst.count <= 64)
+ ])
+ error_message = "Specified count must be 0 or in range 3 to 64"
+ }
+ description = "Specify the list of virtual server instances to be provisioned as compute nodes in the cluster. Each object includes the instance profile (machine type), number of instances (count), OS image to use, and an optional filesystem mount path. This configuration allows customization of the compute tier to suit specific performance and workload requirements. The profile must match a valid IBM Cloud VPC Gen2 instance profile format. A minimum of 3 compute nodes is required to form a cluster, and a maximum of 64 nodes is supported. For more details, refer[Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)."
+}
+
+variable "client_instances" {
+ type = list(
+ object({
+ profile = string
+ count = number
+ image = string
+ })
+ )
+ default = [{
+ profile = "cx2-2x4"
+ count = 0
+ image = "ibm-redhat-8-10-minimal-amd64-6"
+ }]
+ validation {
+ condition = alltrue([
+ for inst in var.client_instances : can(regex("^(b|c|m)x[0-9]+d?-[0-9]+x[0-9]+$", inst.profile))
+ ])
+ error_message = "Specified profile must be a valid IBM Cloud VPC GEN2 profile name and must be from the Balanced, Compute, Memory Categories (e.g., bx2-4x16, cx2d-16x64). [Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles)"
+ }
+ validation {
+ condition = alltrue([
+ for inst in var.client_instances : inst.count >= 0 && inst.count <= 2000
+ ])
+ error_message = "client_instances 'count' value must be between 0 and 2000."
+ }
+
+ description = "Specify the list of virtual server instances to be provisioned as client nodes in the cluster. Each object includes the instance profile (machine type), number of instances (count), OS image to use. This configuration allows customization of the compute tier to suit specific performance and workload requirements. The profile must match a valid IBM Cloud VPC Gen2 instance profile format. For more details, refer [Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)."
}
variable "storage_instances" {
@@ -230,76 +312,175 @@ variable "storage_instances" {
profile = string
count = number
image = string
- filesystem = string
+ filesystem = optional(string)
})
)
default = [{
- profile = "bx2-2x8"
- count = 0
- image = "ibm-redhat-8-10-minimal-amd64-4"
- filesystem = "/ibm/fs1"
+ profile = "bx2d-32x128"
+ count = 2
+ image = "hpcc-scale5232-rhel810-v1"
+ filesystem = "/gpfs/fs1"
}]
- description = "Number of instances to be launched for storage cluster."
+ validation {
+ condition = alltrue([
+ for inst in var.storage_instances : can(regex("^(b|c|m)x[0-9]+d-[0-9]+x[0-9]+$", inst.profile))
+ ])
+ error_message = "Specified profile must be a valid IBM Cloud VPC GEN2 profile name and must be from the Balanced, Compute, Memory Categories (e.g., bx2d-4x16, cx2d-16x64). [Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles)"
+ }
+ validation {
+ condition = alltrue([
+ for inst in var.storage_instances : inst.count % 2 == 0
+ ])
+ error_message = "Storage count should always be an even number."
+ }
+ validation {
+ condition = alltrue([
+ for inst in var.storage_instances : inst.count >= 2 && inst.count <= 64
+ ])
+ error_message = "storage_instances 'count' value must be in range 2 to 64."
+ }
+ description = "Specify the list of virtual server instances to be provisioned as storage nodes in the cluster. Each object includes the instance profile (machine type), number of instances (count), OS image to use, and an optional filesystem mount path. This configuration allows customization of the storage tier to suit specific storage performance cluster. The profile must match a valid IBM Cloud VPC Gen2 instance profile format. A minimum of 2 storage nodes is required to form a cluster, and a maximum of 64 nodes is supported. For more details, refer[Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)."
}
-variable "storage_servers" {
+variable "storage_baremetal_server" {
type = list(
object({
profile = string
count = number
image = string
- filesystem = string
+ filesystem = optional(string)
})
)
+
default = [{
profile = "cx2d-metal-96x192"
- count = 0
- image = "ibm-redhat-8-10-minimal-amd64-4"
+ count = 2
+ image = "hpcc-scale5232-rhel810-v1"
filesystem = "/gpfs/fs1"
}]
- description = "Number of BareMetal Servers to be launched for storage cluster."
+
+ validation {
+ condition = var.storage_type == "persistent" ? alltrue([
+ for inst in var.storage_baremetal_server : can(regex("^[b|c|m]x[0-9]+d?-[a-z]+-[0-9]+x[0-9]+", inst.profile))
+ ]) : true
+ error_message = "Specified profile must be a valid IBM Cloud VPC GEN2 profile name [Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles)."
+ }
+
+ validation {
+ condition = var.storage_type == "persistent" ? alltrue([
+ for inst in var.storage_baremetal_server : inst.count >= 2 && inst.count <= 64
+ ]) : true
+ error_message = "Each storage_baremetal_server 'count' value must be between 2 and 64."
+ }
+
+ description = "Specify the list of bare metal servers to be provisioned for the storage cluster. Each object in the list specifies the server profile (hardware configuration), the count (number of servers), the image (OS image to use), and an optional filesystem mount path. This configuration allows flexibility in scaling and customizing the storage cluster based on performance and capacity requirements. Only valid bare metal profiles supported in IBM Cloud VPC should be used. A minimum of 2 baremetal storage nodes is required to form a cluster, and a maximum of 64 nodes is supported For available bare metal profiles, refer to the [Baremetal Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-profile&interface=ui)."
}
-variable "protocol_subnets_cidr" {
+variable "tie_breaker_baremetal_server_profile" {
type = string
- default = "10.241.40.0/24"
- description = "Subnet CIDR block to launch the storage cluster host."
+ default = null
+ description = "Specify the bare metal server profile type name to be used for creating the bare metal Tie breaker node. If no value is provided, the storage bare metal server profile will be used as the default. For more information, see [bare metal server profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-profile&interface=ui). [Tie Breaker Node](https://www.ibm.com/docs/en/storage-scale/5.2.2?topic=quorum-node-tiebreaker-disks)"
+}
+
+variable "scale_management_vsi_profile" {
+ type = string
+ default = "bx2-8x32"
+ description = "The virtual server instance profile type name to be used to create the Management node. For more information, see [Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)."
+ validation {
+ condition = can(regex("^[b|c|m]x[0-9]+d?-[0-9]+x[0-9]+", var.scale_management_vsi_profile))
+ error_message = "Specified profile must be a valid IBM Cloud VPC GEN2 Instance Storage profile name [Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles)."
+ }
}
+variable "afm_instances" {
+ type = list(
+ object({
+ profile = string
+ count = number
+ })
+ )
+ default = [{
+ profile = "bx2-32x128"
+ count = 0
+ }]
+ validation {
+ condition = alltrue([
+ for inst in var.afm_instances : can(regex("^[bcm]x[0-9]+d?-[0-9]+x[0-9]+$", inst.profile))
+ ])
+ error_message = "Specified profile must be a valid IBM Cloud VPC GEN2 profile name and must be from the Balanced, Compute, Memory Categories [Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles)."
+ }
+ validation {
+ condition = alltrue([
+ for inst in var.afm_instances : inst.count >= 0 && inst.count <= 16
+ ])
+ error_message = "afm_instances 'count' value must be between 0 and 16."
+ }
+ description = "Specify the list of virtual server instances to be provisioned as AFM nodes in the cluster. Each object in the list includes the instance profile (machine type), the count (number of instances), the image (OS image to use). This configuration allows you to access remote data and high-performance computing needs.This input can be used to provision virtual server instances (VSI). If persistent, high-throughput storage is required, consider using bare metal instances instead. Ensure you provide valid instance profiles. Maximum of 16 afm nodes is supported. For more details, refer to [Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)."
+}
+
+
variable "protocol_instances" {
type = list(
object({
profile = string
count = number
- image = string
})
)
default = [{
- profile = "bx2-2x8"
+ profile = "cx2-32x64"
count = 2
- image = "ibm-redhat-8-10-minimal-amd64-4"
}]
- description = "Number of instances to be launched for protocol hosts."
+ validation {
+ condition = alltrue([
+ for inst in var.protocol_instances : can(regex("^[bcm]x[0-9]+d?(-[a-z]+)?-[0-9]+x[0-9]+$", inst.profile))
+ ])
+ error_message = "Specified profile must be a valid IBM Cloud VPC GEN2 profile name [Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles)."
+ }
+ validation {
+ condition = alltrue([
+ for inst in var.protocol_instances : inst.count >= 0 && inst.count <= 32
+ ])
+ error_message = "protocol_instances 'count' value must be between 0 and 32."
+ }
+ description = "Specify the list of virtual server instances to be provisioned as protocol nodes in the cluster. Each object in the list includes the instance profile (machine type), the count (number of instances), the image (OS image to use). This configuration allows allows for a unified data management solution, enabling different clients to access the same data using NFS protocol.This input can be used to provision virtual server instances (VSI). If persistent, high-throughput storage is required, consider using bare metal instances instead. Ensure you provide valid instance profiles. Maximum of 32 VSI or baremetal nodes are supported. For more details, refer to [Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)."
}
variable "colocate_protocol_instances" {
type = bool
default = true
- description = "Enable it to use storage instances as protocol instances"
+ description = "Enable this option to colocate protocol services on the same virtual server instances used for storage. When set to true, the storage nodes will also act as protocol nodes for reducing the need for separate infrastructure. This can optimize resource usage and simplify the cluster setup, especially for smaller environments or cost-sensitive deployments. For larger or performance-intensive workloads, consider deploying dedicated protocol instances instead."
+ validation {
+ condition = anytrue([var.colocate_protocol_instances == true && var.storage_type != "persistent" && sum(var.protocol_instances[*]["count"]) <= sum(var.storage_instances[*]["count"]), var.colocate_protocol_instances == true && var.storage_type == "persistent" && sum(var.protocol_instances[*]["count"]) <= sum(var.storage_baremetal_server[*]["count"]), var.colocate_protocol_instances == false])
+ error_message = "When colocation is true, protocol instance count should always be less than or equal to storage instance count"
+ }
}
variable "storage_gui_username" {
type = string
- default = "admin"
+ default = ""
sensitive = true
- description = "GUI user to perform system management and monitoring tasks on storage cluster."
+ description = "GUI username to perform system management and monitoring tasks on the storage cluster. Note: Username should be at least 4 characters, (any combination of lowercase and uppercase letters)."
+ validation {
+ condition = length(var.storage_gui_username) >= 4 && length(var.storage_gui_username) <= 30 && trimspace(var.storage_gui_username) != ""
+ error_message = "Specified input for \"storage_gui_username\" is not valid. Username should be greater or equal to 4 letters and less than equal to 30."
+ }
+ validation {
+ # Structural check
+ condition = can(regex("^[A-Za-z0-9]([._]?[A-Za-z0-9])*$", var.storage_gui_username))
+
+ error_message = "Specified input for \"storage_gui_username\" is not valid. Username should only have alphanumerics, dot(.) and underscore(_). No consecutive dots or underscores"
+ }
}
variable "storage_gui_password" {
type = string
- default = "hpc@IBMCloud"
+ default = ""
sensitive = true
- description = "Password for storage cluster GUI"
+ description = "The storage cluster GUI password is used for logging in to the storage cluster through the GUI. The password should contain a minimum of 8 characters. For a strong password, use a combination of uppercase and lowercase letters, one number, and a special character. Make sure that the password doesn't contain the username and it should not start with a special character."
+ validation {
+ condition = can(regex("^.{8,20}$", var.storage_gui_password) != "") && can(regex("[0-9]", var.storage_gui_password) != "") && can(regex("[a-z]", var.storage_gui_password) != "") && can(regex("[A-Z]", var.storage_gui_password) != "") && can(regex("[!@#$%^&*()_+=-]", var.storage_gui_password) != "") && trimspace(var.storage_gui_password) != "" && can(regex("^[!@#$%^&*()_+=-]", var.storage_gui_password)) == false && can(regex(lower(var.storage_gui_username), lower(var.storage_gui_password))) == false
+ error_message = "The Storage GUI password must be 8 to 20 characters long and include at least two alphabetic characters (with one uppercase and one lowercase), one number, and one special character from the set (!@#$%^&*()_+=-). The password must not contain the username or any spaces."
+ }
}
variable "filesystem_config" {
@@ -310,52 +491,34 @@ variable "filesystem_config" {
default_metadata_replica = number
max_data_replica = number
max_metadata_replica = number
- mount_point = string
}))
default = [{
- filesystem = "fs1"
+ filesystem = "/gpfs/fs1"
block_size = "4M"
default_data_replica = 2
default_metadata_replica = 2
max_data_replica = 3
max_metadata_replica = 3
- mount_point = "/ibm/fs1"
}]
- description = "File system configurations."
-}
-
-# variable "filesets_config" {
-# type = list(object({
-# fileset = string
-# filesystem = string
-# junction_path = string
-# client_mount_path = string
-# quota = number
-# }))
-# default = [{
-# fileset = "fileset1"
-# filesystem = "fs1"
-# junction_path = "/ibm/fs1/fileset1"
-# client_mount_path = "/mnt"
-# quota = 100
-# }]
-# description = "Fileset configurations."
-# }
+ description = "Specify the configuration parameters for one or more IBM Storage Scale (GPFS) filesystems. Each object in the list includes the filesystem mount point, block size, and replica settings for both data and metadata. These settings determine how data is distributed and replicated across the cluster for performance and fault tolerance."
+}
-variable "afm_instances" {
- type = list(
- object({
- profile = string
- count = number
- image = string
- })
- )
- default = [{
- profile = "bx2-2x8"
- count = 0
- image = "ibm-redhat-8-10-minimal-amd64-4"
- }]
- description = "Number of instances to be launched for afm hosts."
+variable "filesets_config" {
+ type = list(object({
+ client_mount_path = string
+ quota = number
+ }))
+ default = [
+ {
+ client_mount_path = "/mnt/scale/tools"
+ quota = 0
+ },
+ {
+ client_mount_path = "/mnt/scale/data"
+ quota = 0
+ }
+ ]
+ description = "Specify a list of filesets with client mount paths and optional storage quotas (0 means no quota) to be created within the IBM Storage Scale filesystem.."
}
variable "afm_cos_config" {
@@ -379,17 +542,48 @@ variable "afm_cos_config" {
bucket_storage_class = "smart"
bucket_type = "region_location"
}]
- # default = [{
- # afm_fileset = "afm_fileset"
- # mode = "iw"
- # cos_instance = null
- # bucket_name = null
- # bucket_region = "us-south"
- # cos_service_cred_key = ""
- # bucket_storage_class = "smart"
- # bucket_type = "region_location"
- # }]
- description = "AFM configurations."
+ nullable = false
+ description = "Please provide details for the Cloud Object Storage (COS) instance, including information about the COS bucket, service credentials (HMAC key), AFM fileset, mode (such as Read-only (RO), Single writer (SW), Local updates (LU), and Independent writer (IW)), storage class (standard, vault, cold, or smart), and bucket type (single_site_location, region_location, cross_region_location). Note : The 'afm_cos_config' can contain up to 5 entries. For further details on COS bucket locations, refer to the relevant documentation https://cloud.ibm.com/docs/cloud-object-storage/basics?topic=cloud-object-storage-endpoints."
+ validation {
+ condition = length([for item in var.afm_cos_config : item]) >= 1 && length([for item in var.afm_cos_config : item]) <= 5
+ error_message = "The length of \"afm_cos_config\" must be greater than or equal to 1 and less than or equal to 5."
+ }
+ validation {
+ condition = alltrue([for item in var.afm_cos_config : trimspace(item.mode) != "" && item.mode != null])
+ error_message = "The \"mode\" field must not be empty or null."
+ }
+ validation {
+ condition = length(distinct([for item in var.afm_cos_config : item.afm_fileset])) == length(var.afm_cos_config)
+ error_message = "The \"afm_fileset\" name should be unique for each AFM COS bucket relation."
+ }
+ validation {
+ condition = alltrue([for item in var.afm_cos_config : trimspace(item.afm_fileset) != "" && item.afm_fileset != null])
+ error_message = "The \"afm_fileset\" field must not be empty or null."
+ }
+ validation {
+ condition = alltrue([for config in var.afm_cos_config : !(config.bucket_type == "single_site_location") || contains(["ams03", "che01", "mil01", "mon01", "par01", "sjc04", "sng01"], config.bucket_region)])
+ error_message = "When 'bucket_type' is 'single_site_location', 'bucket_region' must be one of ['ams03', 'che01', 'mil01', 'mon01', 'par01', 'sjc04', 'sng01']."
+ }
+ validation {
+ condition = alltrue([for config in var.afm_cos_config : !(config.bucket_type == "cross_region_location") || contains(["us", "eu", "ap"], config.bucket_region)])
+ error_message = "When 'bucket_type' is 'cross_region_location', 'bucket_region' must be one of ['us', 'eu', 'ap']."
+ }
+ validation {
+ condition = alltrue([for config in var.afm_cos_config : !(config.bucket_type == "region_location") || contains(["us-south", "us-east", "eu-gb", "eu-de", "jp-tok", "au-syd", "jp-osa", "ca-tor", "br-sao", "eu-es"], config.bucket_region)])
+ error_message = "When 'bucket_type' is 'region_location', 'bucket_region' must be one of ['us-south', 'us-east', 'eu-gb', 'eu-de', 'jp-tok', 'au-syd', 'jp-osa', 'ca-tor', 'br-sao', 'eu-es']."
+ }
+ validation {
+ condition = alltrue([for item in var.afm_cos_config : (item.bucket_type == "" || contains(["cross_region_location", "single_site_location", "region_location"], item.bucket_type))])
+ error_message = "Each 'bucket_type' must be either empty or one of 'region_location', 'single_site_location', 'cross_region_location'."
+ }
+ validation {
+ condition = alltrue([for item in var.afm_cos_config : (item.bucket_storage_class == "" || (can(regex("^[a-z]+$", item.bucket_storage_class)) && contains(["smart", "standard", "cold", "vault"], item.bucket_storage_class)))])
+ error_message = "Each 'bucket_storage_class' must be either empty or one of 'smart', 'standard', 'cold', or 'vault', and all in lowercase."
+ }
+ validation {
+ condition = alltrue([for item in var.afm_cos_config : trimspace(item.bucket_region) != "" && item.bucket_region != null])
+ error_message = "The \"bucket_region\" field must not be empty or null."
+ }
}
##############################################################################
@@ -399,13 +593,17 @@ variable "afm_cos_config" {
variable "dns_instance_id" {
type = string
default = null
- description = "IBM Cloud HPC DNS service instance id."
+ description = "Specify the ID of an existing IBM Cloud DNS service instance. When provided, domain names are created within the specified instance. If set to null, a new DNS service instance is created, and the required DNS zones are associated with it."
}
variable "dns_custom_resolver_id" {
type = string
default = null
- description = "IBM Cloud DNS custom resolver id."
+ description = "Specify the ID of an existing IBM Cloud DNS custom resolver to avoid creating a new one. If set to null, a new custom resolver will be created and associated with the VPC. Note: A VPC can be associated with only one custom resolver. When using an existing VPC, if a custom resolver is already associated and this ID is not provided, the deployment will fail."
+ validation {
+ condition = var.vpc_name != null || var.dns_custom_resolver_id == null
+ error_message = "If this is a new VPC deployment (vpc_name is null), do not provide dns_custom_resolver_id, as it may impact name resolution."
+ }
}
variable "dns_domain_names" {
@@ -423,26 +621,7 @@ variable "dns_domain_names" {
client = "clnt.com"
gklm = "gklm.com"
}
- description = "IBM Cloud HPC DNS domain names."
-}
-
-##############################################################################
-# Encryption Variables
-##############################################################################
-variable "key_management" {
- type = string
- default = "key_protect"
- description = "Set the value as key_protect to enable customer managed encryption for boot volume and file share. If the key_management is set as null, IBM Cloud resources will be always be encrypted through provider managed."
- validation {
- condition = var.key_management == "null" || var.key_management == null || var.key_management == "key_protect"
- error_message = "key_management must be either 'null' or 'key_protect'."
- }
-}
-
-variable "hpcs_instance_name" {
- type = string
- default = null
- description = "Hyper Protect Crypto Service instance"
+ description = "DNS domain names are user-friendly addresses that map to systems within a network, making them easier to identify and access. Provide the DNS domain names for IBM Cloud HPC components: compute, storage, protocol, client, and GKLM. These domains will be assigned to the respective nodes that are part of the scale cluster."
}
##############################################################################
@@ -451,25 +630,19 @@ variable "hpcs_instance_name" {
variable "enable_cos_integration" {
type = bool
default = true
- description = "Integrate COS with HPC solution"
+ description = "Set to true to create an extra cos bucket to integrate with scale cluster deployment."
}
variable "cos_instance_name" {
type = string
default = null
- description = "Exiting COS instance name"
-}
-
-variable "enable_atracker" {
- type = bool
- default = true
- description = "Enable Activity tracker"
+ description = "Provide the name of the existing COS instance where the logs for the enabled functionalities will be stored."
}
variable "enable_vpc_flow_logs" {
type = bool
default = true
- description = "Enable Activity tracker"
+ description = "This flag determines whether VPC flow logs are enabled. When set to true, a flow log collector will be created to capture and monitor network traffic data within the VPC. Enabling flow logs provides valuable insights for troubleshooting, performance monitoring, and security auditing by recording information about the traffic passing through your VPC. Consider enabling this feature to enhance visibility and maintain robust network management practices."
}
##############################################################################
@@ -494,54 +667,86 @@ variable "override_json_string" {
variable "enable_ldap" {
type = bool
default = false
- description = "Set this option to true to enable LDAP for IBM Cloud HPC, with the default value set to false."
+ description = "Set this option to true to enable LDAP for IBM Spectrum Scale (GPFS), with the default value set to false."
}
variable "ldap_basedns" {
type = string
default = "ldapscale.com"
description = "The dns domain name is used for configuring the LDAP server. If an LDAP server is already in existence, ensure to provide the associated DNS domain name."
+ validation {
+ condition = var.enable_ldap == false || (var.ldap_basedns != null ? (length(trimspace(var.ldap_basedns)) > 3 && length(var.ldap_basedns) <= 253 && var.ldap_basedns != "null" && !startswith(trimspace(var.ldap_basedns), "www.") && can(regex("^[a-zA-Z0-9]+([a-zA-Z0-9.]*[a-zA-Z0-9]+)*([a-zA-Z0-9]+[a-zA-Z0-9-]*[a-zA-Z0-9]+)*\\.[a-zA-Z]{2,63}$", trimspace(var.ldap_basedns)))) : false)
+ error_message = "If LDAP is enabled, then the base DNS should not be empty or null. Furthermore, DNS provided should be a properly formatted domain name (without www. prefix), between 4-253 characters, and matches standard DNS naming rules."
+ }
}
variable "ldap_server" {
type = string
default = null
description = "Provide the IP address for the existing LDAP server. If no address is given, a new LDAP server will be created."
+ validation {
+ condition = var.enable_ldap == false || var.ldap_server == null || (var.ldap_server != null ? (length(trimspace(var.ldap_server)) > 0 && var.ldap_server != "null") : true)
+ error_message = "If LDAP is enabled, an existing LDAP server IP should be provided."
+ }
}
variable "ldap_server_cert" {
type = string
sensitive = true
default = null
- description = "Provide the existing LDAP server certificate. This value is required if the 'ldap_server' variable is not set to null. If the certificate is not provided or is invalid, the LDAP configuration may fail."
+ description = "Provide the existing LDAP server certificate. This value is required if the 'ldap_server' variable is not set to null. If the certificate is not provided or is invalid, the LDAP configuration may fail. For more information on how to create or obtain the certificate, please refer [existing LDAP server certificate](https://cloud.ibm.com/docs/allowlist/hpc-service?topic=hpc-service-integrating-openldap)."
+ validation {
+ condition = var.enable_ldap == false || var.ldap_server == null || (var.ldap_server_cert != null ? (length(trimspace(var.ldap_server_cert)) > 0 && var.ldap_server_cert != "null") : false)
+ error_message = "Provide the current LDAP server certificate. This is required if 'ldap_server' is set; otherwise, the LDAP configuration will not succeed."
+ }
}
variable "ldap_admin_password" {
type = string
sensitive = true
default = null
- description = "The LDAP administrative password should be 8 to 20 characters long, with a mix of at least three alphabetic characters, including one uppercase and one lowercase letter. It must also include two numerical digits and at least one special character from (~@_+:) are required. It is important to avoid including the username in the password for enhanced security."
+ description = "The LDAP admin password must be 8 to 20 characters long and include at least two alphabetic characters (with one uppercase and one lowercase), one number, and one special character from the set (!@#$%^&*()_+=-). The password must not contain the username or any spaces. [This value is ignored for an existing LDAP server]."
+ validation {
+ condition = (
+ var.enable_ldap ? (
+ var.ldap_server == null ? (
+ var.ldap_admin_password != null ? (
+ try(length(var.ldap_admin_password)) >= 8 &&
+ try(length(var.ldap_admin_password)) <= 20 &&
+ try(can(regex(".*[0-9].*", var.ldap_admin_password)), false) &&
+ try(can(regex(".*[A-Z].*", var.ldap_admin_password)), false) &&
+ try(can(regex(".*[a-z].*", var.ldap_admin_password)), false) &&
+ try(can(regex(".*[!@#$%^&*()_+=-].*", var.ldap_admin_password)), false) &&
+ !try(can(regex(".*\\s.*", var.ldap_admin_password)), false)
+ ) : false
+ ) : true
+ ) : true
+ )
+ error_message = "The LDAP admin password must be 8 to 20 characters long and include at least two alphabetic characters (with one uppercase and one lowercase), one number, and one special character from the set (!@#$%^&*()_+=-). The password must not contain any spaces."
+ }
}
variable "ldap_user_name" {
type = string
default = ""
description = "Custom LDAP User for performing cluster operations. Note: Username should be between 4 to 32 characters, (any combination of lowercase and uppercase letters).[This value is ignored for an existing LDAP server]"
+ validation {
+ condition = var.enable_ldap == false || var.ldap_server != null || (length(var.ldap_user_name) >= 4 && length(var.ldap_user_name) <= 32 && var.ldap_user_name != "" && can(regex("^[a-zA-Z0-9_-]*$", var.ldap_user_name)) && trimspace(var.ldap_user_name) != "")
+ error_message = "LDAP username must be between 4-32 characters long and can only contain letters, numbers, hyphens, and underscores. Spaces are not permitted."
+ }
}
variable "ldap_user_password" {
type = string
sensitive = true
default = ""
- description = "The LDAP user password should be 8 to 20 characters long, with a mix of at least three alphabetic characters, including one uppercase and one lowercase letter. It must also include two numerical digits and at least one special character from (~@_+:) are required.It is important to avoid including the username in the password for enhanced security.[This value is ignored for an existing LDAP server]."
+ description = "The LDAP user password must be 8 to 20 characters long and include at least two alphabetic characters (with one uppercase and one lowercase), one numeric digit, and at least one special character from the set (!@#$%^&*()_+=-). Spaces are not allowed. The password must not contain the username for enhanced security. [This value is ignored for an existing LDAP server]."
+ validation {
+ condition = !var.enable_ldap || var.ldap_server != null || ((replace(lower(var.ldap_user_password), lower(var.ldap_user_name), "") == lower(var.ldap_user_password)) && length(var.ldap_user_password) >= 8 && length(var.ldap_user_password) <= 20 && can(regex("^(.*[0-9]){1}.*$", var.ldap_user_password))) && can(regex("^(.*[A-Z]){1}.*$", var.ldap_user_password)) && can(regex("^(.*[a-z]){1}.*$", var.ldap_user_password)) && can(regex("^.*[!@#$%^&*()_+=-].*$", var.ldap_user_password)) && !can(regex(".*\\s.*", var.ldap_user_password))
+ error_message = "The LDAP user password must be 8 to 20 characters long and include at least two alphabetic characters (with one uppercase and one lowercase), one number, and one special character from the set (!@#$%^&*()_+=-). The password must not contain the username or any spaces."
+ }
}
-# variable "ldap_instance_key_pair" {
-# type = list(string)
-# default = null
-# description = "Name of the SSH key configured in your IBM Cloud account that is used to establish a connection to the LDAP Server. Make sure that the SSH key is present in the same resource group and region where the LDAP Servers are provisioned. If you do not have an SSH key in your IBM Cloud account, create one by using the [SSH keys](https://cloud.ibm.com/docs/vpc?topic=vpc-ssh-keys) instructions."
-# }
-
variable "ldap_instance" {
type = list(
object({
@@ -551,30 +756,47 @@ variable "ldap_instance" {
)
default = [{
profile = "cx2-2x4"
- image = "ibm-ubuntu-22-04-5-minimal-amd64-1"
+ image = "ibm-ubuntu-22-04-5-minimal-amd64-5"
}]
- description = "Profile and Image name to be used for provisioning the LDAP instances. Note: Debian based OS are only supported for the LDAP feature"
+ description = "Specify the list of virtual server instances to be provisioned as ldap nodes in the cluster. Each object in the list defines the instance profile (machine type), the count (number of instances), the image (OS image to use). This configuration allows you to customize the server for setting up ldap server. The profile must match a valid IBM Cloud VPC Gen2 instance profile format. For more details, refer [Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)."
+ validation {
+ condition = alltrue([
+ for inst in var.ldap_instance : can(regex("^(b|c|m)x[0-9]+d?-[0-9]+x[0-9]+$", inst.profile))
+ ])
+ error_message = "Specified profile must be a valid IBM Cloud VPC GEN2 profile name [Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles)."
+ }
+ validation {
+ condition = alltrue([
+ for inst in var.ldap_instance : can(regex("^ibm-ubuntu", inst.image))
+ ])
+ error_message = "Specified image should necessarily be an IBM Ubuntu image [Learn more](https://cloud.ibm.com/docs/vpc?group=stock-images)."
+ }
}
+
##############################################################################
# GKLM variables
##############################################################################
variable "scale_encryption_enabled" {
type = bool
default = false
- description = "To enable the encryption for the filesystem. Select true or false"
+ description = "Encryption ensures that data stored in the filesystem is protected from unauthorized access and secures sensitive information at rest. To enable the encryption for the filesystem. Select true or false"
}
variable "scale_encryption_type" {
type = string
- default = null
+ default = "null"
description = "To enable filesystem encryption, specify either 'key_protect' or 'gklm'. If neither is specified, the default value will be 'null' and encryption is disabled"
-}
-variable "gklm_instance_key_pair" {
- type = list(string)
- default = null
- description = "The key pair to use to launch the GKLM host."
+ validation {
+ condition = can(regex("^(key_protect|gklm|null)$", var.scale_encryption_type)) && (var.scale_encryption_type == "null" || var.scale_encryption_enabled) && (!var.scale_encryption_enabled || var.scale_encryption_type != "null")
+ error_message = <= 2 && inst.count <= 5
+ ])
+ )
+ error_message = "Specified profile must be a valid IBM Cloud VPC GEN2 profile name [Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles)."
+ }
+ validation {
+ condition = (var.scale_encryption_type != "gklm" || (sum([for inst in var.gklm_instances : inst.count]) >= 2 && sum([for inst in var.gklm_instances : inst.count]) <= 5))
+ #condition = (sum([for inst in var.gklm_instances : inst.count]) == 0 || (sum([for inst in var.gklm_instances : inst.count]) >= 2 && sum([for inst in var.gklm_instances : inst.count]) <= 5))
+ error_message = "For High availability the GKLM instance type should be greater than 2 or less than 5"
+ }
+ description = "Specify the list of virtual server instances to be provisioned as GKLM (Guardium Key Lifecycle Manager) nodes in the cluster. Each object in the list includes the instance profile (machine type), the count (number of instances), and the image (OS image to use). This configuration allows you to manage and securely store encryption keys used across the cluster components. The profile must match a valid IBM Cloud VPC Gen2 instance profile format. A minimum of 2 and maximum of 5 gklm nodes are supported. For more details, refer[Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)."
}
-variable "scale_encryption_admin_default_password" {
+variable "scale_encryption_admin_password" {
type = string
+ sensitive = true
default = null
- description = "The default administrator password used for resetting the admin password based on the user input. The password has to be updated which was configured during the GKLM installation."
-}
+ description = "Specifies the administrator password for GKLM-based encryption. This is required when encryption is enabled for IBM Spectrum Scale (GPFS) and the encryption type is set to 'gklm'. The password is used to authenticate administrative access to the Guardium Key Lifecycle Manager (GKLM) for managing encryption keys. Ensure the password meets your organization's security standards."
-variable "scale_encryption_admin_username" {
- type = string
- default = "SKLMAdmin"
- description = "The default Admin username for Security Key Lifecycle Manager(GKLM)."
+ validation {
+ condition = (
+ var.scale_encryption_enabled && var.scale_encryption_type == "gklm"
+ ? var.scale_encryption_admin_password != null && length(var.scale_encryption_admin_password) >= 8 && length(var.scale_encryption_admin_password) <= 20 && can(regex(".*[0-9].*", var.scale_encryption_admin_password)) && regex(".*[0-9].*", var.scale_encryption_admin_password) != "" && can(regex(".*[A-Z].*", var.scale_encryption_admin_password)) && regex(".*[A-Z].*", var.scale_encryption_admin_password) != "" && can(regex(".*[a-z].*", var.scale_encryption_admin_password)) && regex(".*[a-z].*", var.scale_encryption_admin_password) != "" && can(regex(".*[!@#$%^&*()_+=-].*", var.scale_encryption_admin_password)) && regex(".*[!@#$%^&*()_+=-].*", var.scale_encryption_admin_password) != "" && (can(regex(".*\\s.*", var.scale_encryption_admin_password)) ? regex(".*\\s.*", var.scale_encryption_admin_password) == "" : true)
+ : true
+ )
+ error_message = "You must provide scale_encryption_admin_password when scale_encryption_enabled is true and scale_encryption_type is 'gklm'. The scale encryption admin password must be 8 to 20 characters long and include at least two alphabetic characters (with one uppercase and one lowercase), one number, and one special character from the set (!@#$%^&*()_+=-). The password must not contain any spaces."
+ }
}
-variable "scale_encryption_admin_password" {
+# Existing Key Protect Instance Details
+
+variable "key_protect_instance_id" {
type = string
default = null
- description = "Password that is used for performing administrative operations for the GKLM.The password must contain at least 8 characters and at most 20 characters. For a strong password, at least three alphabetic characters are required, with at least one uppercase and one lowercase letter. Two numbers, and at least one special character from this(~@_+:). Make sure that the password doesn't include the username. Visit this [page](https://www.ibm.com/docs/en/gklm/3.0.1?topic=roles-password-policy) to know more about password policy of GKLM. "
+ description = "Provide the ID of an existing IBM Key Protect instance to be used for filesystem encryption in IBM Storage Scale. If this value is provided, the automation will use the existing Key Protect instance and create a new encryption key within it. If not provided, a new Key Protect instance will be created automatically during deployment."
}
variable "storage_type" {
type = string
default = "scratch"
- description = "Select the required storage type(scratch/persistent/evaluation)."
-}
-
-# variable "custom_file_shares" {
-# type = list(
-# object({
-# mount_path = string,
-# size = number,
-# iops = number
-# })
-# )
-# default = [{
-# mount_path = "/mnt/binaries"
-# size = 100
-# iops = 1000
-# }, {
-# mount_path = "/mnt/data"
-# size = 100
-# iops = 1000
-# }]
-# description = "Custom file shares to access shared storage"
-# }
+ description = "Select the Storage Scale file system deployment method. Note: The Storage Scale scratch and evaluation type deploys the Storage Scale file system on virtual server instances, and the persistent type deploys the Storage Scale file system on bare metal servers."
+ validation {
+ condition = can(regex("^(scratch|persistent|evaluation)$", lower(var.storage_type)))
+ error_message = "The solution only support scratch, evaluation, and persistent; provide any one of the value."
+ }
+ validation {
+ condition = var.storage_type == "persistent" ? contains(["us-south-1", "us-south-2", "us-south-3", "us-east-1", "us-east-2", "eu-de-1", "eu-de-2", "eu-de-3", "eu-gb-1", "eu-es-3", "eu-es-1", "jp-tok-2", "jp-tok-3", "ca-tor-2", "ca-tor-3"], join(",", var.zones)) : true
+ error_message = "The solution supports bare metal server creation in only given availability zones i.e. us-south-1, us-south-3, us-south-2, eu-de-1, eu-de-2, eu-de-3, jp-tok-2, eu-gb-1, us-east-1, us-east-2, eu-es-3, eu-es-1, jp-tok-3, jp-tok-2, ca-tor-2 and ca-tor-3. To deploy persistent storage provide any one of the supported availability zones."
+ }
+}
##############################################################################
# Observability Variables
@@ -643,7 +874,7 @@ variable "storage_type" {
variable "observability_atracker_enable" {
type = bool
- default = true
+ default = false
description = "Activity Tracker Event Routing to configure how to route auditing events. While multiple Activity Tracker instances can be created, only one tracker is needed to capture all events. Creating additional trackers is unnecessary if an existing Activity Tracker is already integrated with a COS bucket. In such cases, set the value to false, as all events can be monitored and accessed through the existing Activity Tracker."
}
@@ -657,105 +888,231 @@ variable "observability_atracker_target_type" {
}
}
-variable "observability_monitoring_enable" {
- description = "Set false to disable IBM Cloud Monitoring integration. If enabled, infrastructure and LSF application metrics from Management Nodes will be ingested."
- type = bool
- default = true
+##############################################################################
+# SCC Workload Protection Variables
+##############################################################################
+
+variable "sccwp_service_plan" {
+ description = "Specify the plan type for the Security and Compliance Center (SCC) Workload Protection instance. Valid values are free-trial and graduated-tier only."
+ type = string
+ default = "free-trial"
+ validation {
+ error_message = "Plan for SCC Workload Protection instances can only be `free-trial` or `graduated-tier`."
+ condition = contains(
+ ["free-trial", "graduated-tier"],
+ var.sccwp_service_plan
+ )
+ }
}
-variable "observability_logs_enable_for_management" {
- description = "Set false to disable IBM Cloud Logs integration. If enabled, infrastructure and LSF application logs from Management Nodes will be ingested."
+variable "sccwp_enable" {
type = bool
default = false
+ description = "Set this flag to true to create an instance of IBM Security and Compliance Center (SCC) Workload Protection. When enabled, it provides tools to discover and prioritize vulnerabilities, monitor for security threats, and enforce configuration, permission, and compliance policies across the full lifecycle of your workloads. To view the data on the dashboard, enable the cspm to create the app configuration and required trusted profile policies.[Learn more](https://cloud.ibm.com/docs/workload-protection?topic=workload-protection-about)."
}
-variable "observability_logs_enable_for_compute" {
- description = "Set false to disable IBM Cloud Logs integration. If enabled, infrastructure and LSF application logs from Compute Nodes will be ingested."
+variable "cspm_enabled" {
+ description = "CSPM (Cloud Security Posture Management) is a set of tools and practices that continuously monitor and secure cloud infrastructure. When enabled, it creates a trusted profile with viewer access to the App Configuration and Enterprise services for the SCC Workload Protection instance. Make sure the required IAM permissions are in place, as missing permissions will cause deployment to fail. If CSPM is disabled, dashboard data will not be available.[Learn more](https://cloud.ibm.com/docs/workload-protection?topic=workload-protection-about)."
type = bool
- default = false
+ default = true
+ nullable = false
+}
+
+variable "app_config_plan" {
+ description = "Specify the IBM service pricing plan for the app configuration. Allowed values are 'basic', 'lite', 'standardv2', 'enterprise'."
+ type = string
+ default = "basic"
+ validation {
+ error_message = "Plan for App configuration can only be basic, standardv2, enterprise.."
+ condition = contains(
+ ["basic", "standardv2", "enterprise"],
+ var.app_config_plan
+ )
+ }
}
-variable "observability_enable_platform_logs" {
- description = "Setting this to true will create a tenant in the same region that the Cloud Logs instance is provisioned to enable platform logs for that region. NOTE: You can only have 1 tenant per region in an account."
+variable "skip_flowlogs_s2s_auth_policy" {
type = bool
default = false
+ description = "Skip auth policy between flow logs service and COS instance, set to true if this policy is already in place on account."
+}
+
+###########################################################################
+# Existing Bastion Support variables
+###########################################################################
+
+variable "existing_bastion_instance_name" {
+ type = string
+ default = null
+ description = "Provide the name of the bastion instance. If none given then new bastion will be created."
+}
+
+variable "existing_bastion_instance_public_ip" {
+ type = string
+ default = null
+ description = "Provide the public ip address of the bastion instance to establish the remote connection."
+}
+
+variable "existing_bastion_security_group_id" {
+ type = string
+ default = null
+ description = "Specify the security group ID for the bastion server. This ID will be added as an allowlist rule on the HPC cluster nodes to facilitate secure SSH connections through the bastion node. By restricting access through a bastion server, this setup enhances security by controlling and monitoring entry points into the cluster environment. Ensure that the specified security group is correctly configured to permit only authorized traffic for secure and efficient management of cluster resources."
+}
+
+variable "existing_bastion_ssh_private_key" {
+ type = string
+ sensitive = true
+ default = null
+ description = "Provide the private SSH key (named id_rsa) used during the creation and configuration of the bastion server to securely authenticate and connect to the bastion server. This allows access to internal network resources from a secure entry point. Note: The corresponding public SSH key (named id_rsa.pub) must already be available in the ~/.ssh/authorized_keys file on the bastion host to establish authentication."
}
-variable "observability_enable_metrics_routing" {
- description = "Enable metrics routing to manage metrics at the account-level by configuring targets and routes that define where data points are routed."
+variable "bms_boot_drive_encryption" {
type = bool
default = false
+ description = "Enable or disable encryption for the boot drive of bare metal servers. When set to true, the boot drive will be encrypted to enhance data security, protecting the operating system and any sensitive information stored on the root volume. This is especially recommended for workloads with strict compliance or security requirements. Set to false to disable boot drive encryption."
}
-variable "observability_logs_retention_period" {
- description = "The number of days IBM Cloud Logs will retain the logs data in Priority insights. Allowed values: 7, 14, 30, 60, 90."
- type = number
- default = 7
+##############################################################################
+# Existing VPC Storage Security Variables
+##############################################################################
+variable "enable_sg_validation" {
+ type = bool
+ default = true
+ description = "Enable or disable security group validation. Security group validation ensures that the specified security groups are properly assigned"
+}
+
+variable "login_security_group_name" {
+ type = string
+ default = null
+ description = "Provide the existing security group name to provision the bastion node. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the bastion node to function properly."
validation {
- condition = contains([7, 14, 30, 60, 90], var.observability_logs_retention_period)
- error_message = "Allowed values for cloud logs retention period is 7, 14, 30, 60, 90."
+ condition = anytrue([var.vpc_name != null && var.login_security_group_name != null, var.login_security_group_name == null])
+ error_message = "If the login_security_group_name are provided, the user should also provide the vpc_name."
}
}
-variable "observability_monitoring_on_compute_nodes_enable" {
- description = "Set false to disable IBM Cloud Monitoring integration. If enabled, infrastructure metrics from Compute Nodes will be ingested."
- type = bool
- default = false
+variable "storage_security_group_name" {
+ type = string
+ default = null
+ description = "Provide the security group name to provision the storage nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the storage nodes to function properly. When using existing security groups, you must provide the corresponding group names for all other associated components as well."
+ validation {
+ condition = anytrue([var.vpc_name != null && var.storage_security_group_name != null, var.storage_security_group_name == null])
+ error_message = "If the storage_security_group_name are provided, the user should also provide the vpc_name."
+ }
}
-variable "observability_monitoring_plan" {
- description = "Type of service plan for IBM Cloud Monitoring instance. You can choose one of the following: lite, graduated-tier. For all details visit [IBM Cloud Monitoring Service Plans](https://cloud.ibm.com/docs/monitoring?topic=monitoring-service_plans)."
+variable "compute_security_group_name" {
type = string
- default = "graduated-tier"
+ default = null
+ description = "Provide the security group name to provision the compute nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the storage nodes to function properly. When using existing security groups, you must provide the corresponding group names for all other associated components as well"
validation {
- condition = can(regex("lite|graduated-tier", var.observability_monitoring_plan))
- error_message = "Please enter a valid plan for IBM Cloud Monitoring, for all details visit https://cloud.ibm.com/docs/monitoring?topic=monitoring-service_plans."
+ condition = anytrue([var.vpc_name != null && var.compute_security_group_name != null, var.compute_security_group_name == null])
+ error_message = "If the compute_security_group_name are provided, the user should also provide the vpc_name."
}
}
-variable "skip_flowlogs_s2s_auth_policy" {
- type = bool
- default = false
- description = "Skip auth policy between flow logs service and COS instance, set to true if this policy is already in place on account."
+variable "client_security_group_name" {
+ type = string
+ default = null
+ description = "Provide the security group name to provision the client nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the storage nodes to function properly. When using existing security groups, you must provide the corresponding group names for all other associated components as well"
+ validation {
+ condition = anytrue([var.vpc_name != null && var.client_security_group_name != null, var.client_security_group_name == null])
+ error_message = "If the client_security_group_name are provided, the user should also provide the vpc_name."
+ }
}
-variable "skip_kms_s2s_auth_policy" {
- type = bool
- default = false
- description = "Skip auth policy between KMS service and COS instance, set to true if this policy is already in place on account."
+variable "gklm_security_group_name" {
+ type = string
+ default = null
+ description = "Provide the security group name to provision the gklm nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the storage nodes to function properly. When using existing security groups, you must provide the corresponding group names for all other associated components as well"
+ validation {
+ condition = anytrue([var.vpc_name != null && var.gklm_security_group_name != null, var.gklm_security_group_name == null])
+ error_message = "If the gklm_security_group_name are provided, the user should also provide the vpc_name."
+ }
+ validation {
+ condition = anytrue([var.vpc_name != null && var.storage_security_group_name != null && sum(var.gklm_instances[*]["count"]) >= 2 ? (var.gklm_security_group_name != null ? true : false) : true])
+ error_message = "If the storage_security_group_name are provided with gklm_instances count more than or equal to 2, the user should also provide the gklm_security_group_name along with vpc_name. Note: Pass the value for gklm_security_group_name as storage_security_group_name."
+ }
}
-variable "skip_iam_block_storage_authorization_policy" {
- type = bool
- default = false
- description = "When using an existing KMS instance name, set this value to true if authorization is already enabled between KMS instance and the block storage volume. Otherwise, default is set to false. Ensuring proper authorization avoids access issues during deployment.For more information on how to create authorization policy manually, see [creating authorization policies for block storage volume](https://cloud.ibm.com/docs/vpc?topic=vpc-block-s2s-auth&interface=ui)."
+variable "ldap_security_group_name" {
+ type = string
+ default = null
+ description = "Provide the security group name to provision the ldap nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the storage nodes to function properly. When using existing security groups, you must provide the corresponding group names for all other associated components as well"
+ validation {
+ condition = anytrue([var.vpc_name != null && var.ldap_security_group_name != null, var.ldap_security_group_name == null])
+ error_message = "If the ldap_security_group_name are provided, the user should also provide the vpc_name."
+ }
+ validation {
+ condition = anytrue([var.vpc_name != null && var.storage_security_group_name != null && var.enable_ldap ? (var.ldap_security_group_name != null ? true : false) : true])
+ error_message = "If the storage_security_group_name are provided with enable_ldap as true, the user should also provide the ldap_security_group_name along with vpc_name. Note: Pass the value for ldap_security_group_name as storage_security_group_name."
+ }
}
-###########################################################################
-# Existing Bastion Support variables
-###########################################################################
+variable "login_subnet_id" {
+ type = string
+ default = null
+ description = "Provide ID of an existing subnet to be used for provisioning bastion/deployer node. This is required only when deploying into an existing VPC (i.e., when a value is provided for `vpc_name`). When specifying an existing subnet, ensure that a public gateway is attached to the subnet to enable outbound internet access if required. Additionally, if this subnet ID is provided, you must also provide subnet IDs for all other applicable components (e.g., storage , compute, client, protocol, gklm) to maintain consistency across the deployment."
+ validation {
+ condition = anytrue([var.vpc_name != null && var.login_subnet_id != null, var.login_subnet_id == null])
+ error_message = "If the login_subnet_id are provided, the user should also provide the vpc_name."
+ }
+}
-variable "existing_bastion_instance_name" {
+variable "compute_subnet_id" {
type = string
default = null
- description = "Provide the name of the bastion instance. If none given then new bastion will be created."
+ description = "Provide ID of an existing subnet to be used for provisioning compute nodes. This is required only when deploying into an existing VPC (i.e., when a value is provided for `vpc_name`). When specifying an existing subnet, ensure that a public gateway is attached to the subnet to enable outbound internet access if required. Additionally, if this subnet ID is provided, you must also provide subnet IDs for all other applicable components (e.g., storage , protocol, client, login, gklm) to maintain consistency across the deployment."
+ validation {
+ condition = anytrue([var.vpc_name != null && var.compute_subnet_id != null, var.compute_subnet_id == null])
+ error_message = "If the compute_subnet_id are provided, the user should also provide the vpc_name."
+ }
}
-variable "existing_bastion_instance_public_ip" {
+variable "storage_subnet_id" {
type = string
+ description = "Provide ID of an existing subnet to be used for storage nodes. This is required only when deploying into an existing VPC (i.e., when a value is provided for `vpc_name`). When specifying an existing subnet, ensure that a public gateway is attached to the subnet to enable outbound internet access if required. Additionally, if this subnet ID is provided, you must also provide subnet IDs for all other applicable components (e.g., compute , protocol, client, login, gklm) to maintain consistency across the deployment."
default = null
- description = "Provide the public ip address of the bastion instance to establish the remote connection."
+ validation {
+ condition = anytrue([var.vpc_name != null && var.storage_subnet_id != null, var.storage_subnet_id == null])
+ error_message = "If the storage_subnet_id are provided, the user should also provide the vpc_name."
+ }
}
-variable "existing_bastion_security_group_id" {
+variable "protocol_subnet_id" {
type = string
+ description = "Provide ID of an existing subnet to be used for protocol nodes. This is required only when deploying into an existing VPC (i.e., when a value is provided for `vpc_name`). When specifying an existing subnet, ensure that a public gateway is attached to the subnet to enable outbound internet access if required. Additionally, if this subnet ID is provided, you must also provide subnet IDs for all other applicable components (e.g., storage , compute, client, login, gklm) to maintain consistency across the deployment."
default = null
- description = "Specify the security group ID for the bastion server. This ID will be added as an allowlist rule on the HPC cluster nodes to facilitate secure SSH connections through the bastion node. By restricting access through a bastion server, this setup enhances security by controlling and monitoring entry points into the cluster environment. Ensure that the specified security group is correctly configured to permit only authorized traffic for secure and efficient management of cluster resources."
+ validation {
+ condition = anytrue([var.vpc_name != null && var.protocol_subnet_id != null, var.protocol_subnet_id == null])
+ error_message = "If the protocol_subnet_id are provided, the user should also provide the vpc_name."
+ }
}
-variable "existing_bastion_ssh_private_key" {
+variable "client_subnet_id" {
type = string
- sensitive = true
+ description = "Provide ID of an existing subnet to be used for client nodes. This is required only when deploying into an existing VPC (i.e., when a value is provided for `vpc_name`). When specifying an existing subnet, ensure that a public gateway is attached to the subnet to enable outbound internet access if required. Additionally, if this subnet ID is provided, you must also provide subnet IDs for all other applicable components (e.g., storage , compute, protocol, login, gklm) to maintain consistency across the deployment."
default = null
- description = "Provide the private SSH key (named id_rsa) used during the creation and configuration of the bastion server to securely authenticate and connect to the bastion server. This allows access to internal network resources from a secure entry point. Note: The corresponding public SSH key (named id_rsa.pub) must already be available in the ~/.ssh/authorized_keys file on the bastion host to establish authentication."
+ validation {
+ condition = anytrue([var.vpc_name != null && var.client_subnet_id != null, var.client_subnet_id == null])
+ error_message = "If the client_subnet_id are provided, the user should also provide the vpc_name."
+ }
+}
+
+# tflint-ignore: all
+variable "TF_VERSION" {
+ type = string
+ default = "1.9"
+ description = "The version of the Terraform engine that's used in the Schematics workspace."
+}
+
+# tflint-ignore: all
+variable "TF_PARALLELISM" {
+ type = string
+ default = "250"
+ description = "Parallelism/ concurrent operations limit. Valid values are between 1 and 256, both inclusive. [Learn more](https://www.terraform.io/docs/internals/graph.html#walking-the-graph)."
+ validation {
+ condition = 1 <= var.TF_PARALLELISM && var.TF_PARALLELISM <= 256
+ error_message = "Input \"TF_PARALLELISM\" must be greater than or equal to 1 and less than or equal to 256."
+ }
}
diff --git a/solutions/slurm/variables.tf b/solutions/slurm/variables.tf
index 852efaaa..cef4ee57 100644
--- a/solutions/slurm/variables.tf
+++ b/solutions/slurm/variables.tf
@@ -240,11 +240,11 @@ variable "storage_instances" {
profile = string
count = number
image = string
- filesystem = string
+ filesystem = optional(string)
})
)
default = [{
- profile = "bx2-2x8"
+ profile = "bx2d-32x128"
count = 0
image = "ibm-redhat-8-10-minimal-amd64-4"
filesystem = "/ibm/fs1"
diff --git a/solutions/symphony/variables.tf b/solutions/symphony/variables.tf
index 852efaaa..cef4ee57 100644
--- a/solutions/symphony/variables.tf
+++ b/solutions/symphony/variables.tf
@@ -240,11 +240,11 @@ variable "storage_instances" {
profile = string
count = number
image = string
- filesystem = string
+ filesystem = optional(string)
})
)
default = [{
- profile = "bx2-2x8"
+ profile = "bx2d-32x128"
count = 0
image = "ibm-redhat-8-10-minimal-amd64-4"
filesystem = "/ibm/fs1"
diff --git a/tests/data/scale_config.yml b/tests/data/scale_config.yml
new file mode 100644
index 00000000..a638297f
--- /dev/null
+++ b/tests/data/scale_config.yml
@@ -0,0 +1,128 @@
+# IBM Storage Scale Configuration
+scale_version: 5.2.2
+zones: ["jp-tok-1"]
+remote_allowed_ips:
+existing_resource_group: "Default"
+vpc_name: null
+ibm_customer_number: 051700
+
+# Storage Type
+storage_type: "scratch"
+
+# SSH Configuration
+ssh_keys: geretain-hpc
+ssh_file_path: /artifacts/.ssh/id_rsa
+
+# Bastion Configuration
+bastion_instance:
+ image: "ibm-ubuntu-22-04-5-minimal-amd64-6"
+ profile: "cx2-4x8"
+
+# Deployer Configuration
+deployer_instance:
+ profile: "mx2-4x32"
+ image: "hpcc-scale-deployer-v1"
+
+
+# Compute Configuration
+compute_gui_username: "computeUsername"
+compute_gui_password: "Pass@1234" # pragma: allowlist secret
+
+# Storage Configuration
+storage_gui_username: "storageUsername"
+storage_gui_password: "Pass@1234" # pragma: allowlist secret
+
+# Instance Configurations
+compute_instances:
+ - profile: "cx2-2x4"
+ count: 3
+ image: "hpcc-scale5232-rhel810-v1"
+ filesystem: "/comp/fs1"
+
+client_instances:
+ - profile: "cx2-2x4"
+ count: 2
+ image: "ibm-redhat-8-10-minimal-amd64-5"
+
+storage_instances:
+ - profile: "bx2d-16x64"
+ count: 2
+ image: "hpcc-scale5232-rhel810-v1"
+ filesystem: "/storage/fs1"
+
+storage_servers:
+ - profile: "cx2d-metal-96x192"
+ count: 0
+ image: "hpcc-scale5232-rhel810-v1"
+ filesystem: "/gpfs/fs1"
+
+protocol_instances:
+ - profile: "bx2d-16x64"
+ count: 0
+ image: "hpcc-scale5232-rhel810-v1"
+
+# AFM
+afm_instances:
+ - profile: "bx2d-32x128"
+ count: 1
+ image: "hpcc-scale5232-rhel810-v1"
+
+# Filesystem Configuration
+filesystem_config:
+ - filesystem: "/ibm/fs1"
+ block_size: "4M"
+ default_data_replica: 2
+ default_metadata_replica: 2
+ max_data_replica: 3
+ max_metadata_replica: 3
+
+filesets_config:
+ - client_mount_path: "/mnt/scale/tools"
+ quota: 100
+ - client_mount_path: "/mnt/scale/data"
+ quota: 100
+
+# DNS Configuration
+dns_domain_names:
+ compute: "comp.com"
+ storage: "strg.com"
+ protocol: "ces.com"
+ client: "clnt.com"
+ gklm: "gklm.com"
+
+# Observability
+enable_cos_integration: false
+enable_vpc_flow_logs: false
+
+# LDAP Configuration
+enable_ldap: false
+ldap_basedns: "ldapscale.com"
+ldap_server: null
+ldap_admin_password: "Pass@123" # pragma: allowlist secret
+ldap_user_name: "tester"
+ldap_user_password: "Pass@123" # pragma: allowlist secret
+ldap_instance:
+ - profile: "cx2-2x4"
+ image: "ibm-ubuntu-22-04-5-minimal-amd64-1"
+
+# Encryption Configuration
+scale_encryption_enabled: true
+scale_encryption_type: "key_protect"
+gklm_instances:
+ - profile: "bx2-2x8"
+ count: 2
+ image: "hpcc-scale-gklm4202-v2-5-3"
+scale_encryption_admin_password: "Pass@1234" # pragma: allowlist secret
+
+
+
+
+# Security and Compliance
+sccwp_enable: false
+cspm_enabled: false
+sccwp_service_plan: "graduated-tier"
+app_config_plan: "basic"
+
+# Observability Configuration
+observability_atracker_enable: false
+observability_atracker_target_type: "cloudlogs"
diff --git a/tests/deployment/lsf_deployment.go b/tests/deployment/lsf_deployment.go
index 046ae0de..e2478084 100644
--- a/tests/deployment/lsf_deployment.go
+++ b/tests/deployment/lsf_deployment.go
@@ -135,8 +135,8 @@ type Config struct {
AttrackerTestZone string `yaml:"attracker_test_zone"`
}
-// GetConfigFromYAML reads a YAML file and populates the Config struct.
-func GetConfigFromYAML(filePath string) (*Config, error) {
+// GetLSFConfigFromYAML reads a YAML file and populates the Config struct.
+func GetLSFConfigFromYAML(filePath string) (*Config, error) {
file, err := os.Open(filePath)
if err != nil {
return nil, fmt.Errorf("failed to open YAML file %s: %w", filePath, err)
diff --git a/tests/deployment/scale_deployment.go b/tests/deployment/scale_deployment.go
new file mode 100644
index 00000000..58a44f8d
--- /dev/null
+++ b/tests/deployment/scale_deployment.go
@@ -0,0 +1,317 @@
+package tests
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/terraform-ibm-modules/ibmcloud-terratest-wrapper/common"
+ utils "github.com/terraform-ibm-modules/terraform-ibm-hpc/utilities"
+ "gopkg.in/yaml.v3"
+)
+
+var scaleGlobalIP string
+var IbmCustomerNumberValue string
+
+const yamlLocation = "../common-dev-assets/common-go-assets/common-permanent-resources.yaml"
+
+type ClientInstance struct {
+ Profile string `yaml:"profile" json:"profile"`
+ Count int `yaml:"count" json:"count"`
+ Image string `yaml:"image" json:"image"`
+}
+
+type ProtocolInstance struct {
+ Profile string `yaml:"profile" json:"profile"`
+ Count int `yaml:"count" json:"count"`
+ Image string `yaml:"image" json:"image"`
+}
+
+type ComputeInstance struct {
+ Profile string `yaml:"profile" json:"profile"`
+ Count int `yaml:"count" json:"count"`
+ Image string `yaml:"image" json:"image"`
+ Filesystem string `yaml:"filesystem" json:"filesystem"`
+}
+
+type StorageInstance struct {
+ Profile string `yaml:"profile" json:"profile"`
+ Count int `yaml:"count" json:"count"`
+ Image string `yaml:"image" json:"image"`
+ Filesystem string `yaml:"filesystem" json:"filesystem"`
+}
+
+type ScaleDeployerInstance struct {
+ Profile string `yaml:"profile" json:"profile"`
+ Image string `yaml:"image" json:"image"`
+}
+
+// GKLMInstance represents GKLM node configuration
+type GKLMInstance struct {
+ Profile string `yaml:"profile" json:"profile"`
+ Count int `yaml:"count" json:"count"`
+ Image string `yaml:"image" json:"image"`
+}
+
+// FilesystemConfig represents filesystem configuration
+type FilesystemConfig struct {
+ Filesystem string `yaml:"filesystem" json:"filesystem"`
+ BlockSize string `yaml:"block_size" json:"block_size"`
+ DefaultDataReplica int `yaml:"default_data_replica" json:"default_data_replica"`
+ DefaultMetadataReplica int `yaml:"default_metadata_replica" json:"default_metadata_replica"`
+ MaxDataReplica int `yaml:"max_data_replica" json:"max_data_replica"`
+ MaxMetadataReplica int `yaml:"max_metadata_replica" json:"max_metadata_replica"`
+}
+
+// FilesetConfig represents fileset configuration
+type FilesetConfig struct {
+ ClientMountPath string `yaml:"client_mount_path" json:"client_mount_path"`
+ Quota int `yaml:"quota" json:"quota"`
+}
+
+// DNSDomainNames represents DNS configuration
+type DNSDomainNames struct {
+ Compute string `yaml:"compute" json:"compute"`
+ Storage string `yaml:"storage" json:"storage"`
+ Protocol string `yaml:"protocol" json:"protocol"`
+ Client string `yaml:"client" json:"client"`
+ GKLM string `yaml:"gklm" json:"gklm"`
+}
+
+type AfmInstance struct {
+ Profile string `yaml:"profile" json:"profile"`
+ Count int `yaml:"count" json:"count"`
+ Image string `yaml:"image" json:"image"`
+}
+
+type ScaleConfig struct {
+ ScaleVersion string `yaml:"scale_version" json:"scale_version"`
+ IbmCustomerNumber string `yaml:"ibm_customer_number" json:"ibm_customer_number"`
+ Zones []string `yaml:"zones" json:"zones"`
+ RemoteAllowedIPs []string `yaml:"remote_allowed_ips" json:"remote_allowed_ips"`
+ ExistingResourceGroup string `yaml:"existing_resource_group" json:"existing_resource_group"`
+ StorageType string `yaml:"storage_type" json:"storage_type"`
+ SSHKeys string `yaml:"ssh_keys" json:"ssh_keys"`
+ ScaleDeployerInstance ScaleDeployerInstance `yaml:"deployer_instance" json:"deployer_instance"`
+ ComputeGUIUsername string `yaml:"compute_gui_username" json:"compute_gui_username"`
+ ComputeGUIPassword string `yaml:"compute_gui_password" json:"compute_gui_password"`
+ StorageGUIUsername string `yaml:"storage_gui_username" json:"storage_gui_username"`
+ StorageGUIPassword string `yaml:"storage_gui_password" json:"storage_gui_password"`
+ ComputeInstances []ComputeInstance `yaml:"compute_instances" json:"compute_instances"`
+ ClientInstances []ClientInstance `yaml:"client_instances" json:"client_instances"`
+ StorageInstances []StorageInstance `yaml:"storage_instances" json:"storage_instances"`
+ ScaleEncryptionEnabled bool `yaml:"scale_encryption_enabled" json:"scale_encryption_enabled"`
+ ScaleEncryptionType string `yaml:"scale_encryption_type" json:"scale_encryption_type"`
+ ScaleObservabilityAtrackerEnable bool `yaml:"observability_atracker_enable" json:"observability_atracker_enable"`
+ ScaleObservabilityAtrackerTargetType string `yaml:"observability_atracker_target_type" json:"observability_atracker_target_type"`
+ ScaleSCCWPEnable bool `yaml:"sccwp_enable" json:"sccwp_enable"`
+ ScaleCSPMEnabled bool `yaml:"cspm_enabled" json:"cspm_enabled"`
+ ScaleSCCWPServicePlan string `yaml:"sccwp_service_plan" json:"sccwp_service_plan"`
+ GKLMInstances []GKLMInstance `yaml:"gklm_instances" json:"gklm_instances"`
+ ScaleEncryptionAdminPassword string `yaml:"scale_encryption_admin_password" json:"scale_encryption_admin_password"` // pragma: allowlist secret
+ ScaleFilesystemConfig []FilesystemConfig `yaml:"filesystem_config" json:"filesystem_config"`
+ ScaleFilesetsConfig []FilesetConfig `yaml:"filesets_config" json:"filesets_config"`
+ ScaleDNSDomainNames DNSDomainNames `yaml:"dns_domain_names" json:"dns_domain_names"`
+ ScaleEnableCOSIntegration bool `yaml:"enable_cos_integration" json:"enable_cos_integration"`
+ ScaleEnableVPCFlowLogs bool `yaml:"enable_vpc_flow_logs" json:"enable_vpc_flow_logs"`
+ AfmInstances []AfmInstance `yaml:"afm_instances" json:"afm_instances"`
+ ProtocolInstances []ProtocolInstance `yaml:"protocol_instances" json:"protocol_instances"`
+}
+
+func GetScaleConfigFromYAML(filePath string) (*ScaleConfig, error) {
+ file, err := os.Open(filePath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to open YAML file: %w", err)
+ }
+
+ defer func() {
+ if closeErr := file.Close(); closeErr != nil {
+ log.Printf("Warning: failed to close file %s: %v", filePath, closeErr)
+ }
+ }()
+
+ var config ScaleConfig
+ if err := yaml.NewDecoder(file).Decode(&config); err != nil {
+ return nil, fmt.Errorf("failed to decode YAML: %w", err)
+ }
+
+ scaleGlobalIP, err = utils.GetPublicIP()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get public IP: %w", err)
+ }
+
+ // Load permanent resources from YAML
+ permanentResources, err := common.LoadMapFromYaml(yamlLocation)
+ if err != nil {
+ return nil, fmt.Errorf("failed to load permanent resources from YAML: %v", err)
+ }
+
+ // Retrieve ibmCustomerNumberSecretID from Secrets Manager // pragma: allowlist secret
+ ibmCustomerNumberSecretID, ok := permanentResources["hpc_ibm_customer_number_secret_id"].(string)
+ if !ok {
+ fmt.Println("Invalid type or nil value for hpc_ibm_customer_number_secret_id")
+ } else {
+ ibmCustomerNumberValue, err := utils.GetSecretsManagerKey(
+ permanentResources["secretsManagerGuid"].(string),
+ permanentResources["secretsManagerRegion"].(string),
+ ibmCustomerNumberSecretID, // Safely extracted value
+ )
+
+ if err != nil {
+ fmt.Printf("WARN : Retrieving ibmCustomerNumberSecretID from Secrets Manager") // pragma: allowlist secret
+ } else if ibmCustomerNumberValue != nil {
+ IbmCustomerNumberValue = *ibmCustomerNumberValue
+ }
+ }
+
+ if err := scaleSetEnvFromConfig(&config); err != nil {
+ return nil, fmt.Errorf("failed to set environment variables: %w", err)
+ }
+
+ return &config, nil
+}
+
+func scaleSetEnvFromConfig(config *ScaleConfig) error {
+ envVars := map[string]interface{}{
+ "SCALE_VERSION": config.ScaleVersion,
+ "IBM_CUSTOMER_NUMBER": config.IbmCustomerNumber,
+ "ZONES": strings.Join(config.Zones, ","),
+ "REMOTE_ALLOWED_IPS": strings.Join(config.RemoteAllowedIPs, ","),
+ "EXISTING_RESOURCE_GROUP": config.ExistingResourceGroup,
+ "STORAGE_TYPE": config.StorageType,
+ "SSH_KEYS": config.SSHKeys,
+ "SCALE_DEPLOYER_INSTANCE": config.ScaleDeployerInstance,
+ "COMPUTE_GUI_USERNAME": config.ComputeGUIUsername,
+ "COMPUTE_GUI_PASSWORD": config.ComputeGUIPassword, // # pragma: allowlist secret
+ "STORAGE_GUI_USERNAME": config.StorageGUIUsername,
+ "STORAGE_GUI_PASSWORD": config.StorageGUIPassword, // # pragma: allowlist secret
+ "COMPUTE_INSTANCES": config.ComputeInstances,
+ "CLIENT_INSTANCES": config.ClientInstances,
+ "STORAGE_INSTANCES": config.StorageInstances,
+ "SCALE_ENCRYPTION_ENABLED": config.ScaleEncryptionEnabled,
+ "SCALE_ENCRYPTION_TYPE": config.ScaleEncryptionType,
+ "SCALE_OBSERVABILITY_ATRACKER_ENABLE": config.ScaleObservabilityAtrackerEnable,
+ "SCALE_OBSERVABILITY_ATRACKER_TARGET_TYPE": config.ScaleObservabilityAtrackerTargetType,
+ "SCALE_SCCWP_ENABLE": config.ScaleSCCWPEnable,
+ "SCALE_CSPM_ENABLED": config.ScaleCSPMEnabled,
+ "SCALE_SCCWP_SERVICE_PLAN": config.ScaleSCCWPServicePlan,
+ "GKLM_INSTANCES": config.GKLMInstances,
+ "SCALE_ENCRYPTION_ADMIN_PASSWORD": config.ScaleEncryptionAdminPassword, // # pragma: allowlist secret
+ "SCALE_FILESYSTEM_CONFIG": config.ScaleFilesystemConfig,
+ "SCALE_FILESETS_CONFIG": config.ScaleFilesetsConfig,
+ "SCALE_DNS_DOMAIN_NAMES": config.ScaleDNSDomainNames,
+ "SCALE_ENABLE_COS_INTEGRATION": config.ScaleEnableCOSIntegration,
+ "SCALE_ENABLE_VPC_FLOW_LOGS": config.ScaleEnableVPCFlowLogs,
+ "AFM_INSTANCES": config.AfmInstances,
+ "PROTOCOL_INSTANCES": config.ProtocolInstances,
+ }
+
+ if config.ScaleEncryptionType == "null" {
+ delete(envVars, "SCALE_ENCRYPTION_TYPE")
+ }
+
+ if err := processScaleSliceConfigs(config, envVars); err != nil {
+ return fmt.Errorf("error processing slice configurations: %w", err)
+ }
+
+ for key, value := range envVars {
+ if err := scaleSetEnvironmentVariable(key, value); err != nil {
+ return fmt.Errorf("failed to set %s: %w", key, err)
+ }
+ }
+
+ return nil
+}
+
+func processScaleSliceConfigs(config *ScaleConfig, envVars map[string]interface{}) error {
+ sliceProcessors := []struct {
+ name string
+ instances interface{}
+ }{
+ {"COMPUTE_INSTANCES", config.ComputeInstances},
+ {"CLIENT_INSTANCES", config.ClientInstances},
+ {"STORAGE_INSTANCES", config.StorageInstances},
+ {"AFM_INSTANCES", config.AfmInstances},
+ {"PROTOCOL_INSTANCES", config.ProtocolInstances},
+ }
+
+ for _, processor := range sliceProcessors {
+ if err := scaleMarshalToEnv(processor.name, processor.instances, envVars); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func scaleMarshalToEnv(key string, data interface{}, envVars map[string]interface{}) error {
+ jsonBytes, err := json.Marshal(data)
+ if err != nil {
+ return fmt.Errorf("failed to marshal %s: %w", key, err)
+ }
+ envVars[key] = string(jsonBytes)
+ return nil
+}
+
+func scaleSetEnvironmentVariable(key string, value interface{}) error {
+ if value == nil {
+ return nil
+ }
+
+ if existing := os.Getenv(key); existing != "" {
+ log.Printf("Environment variable %s is already set. Skipping overwrite.", key)
+ return nil
+ }
+
+ if key == "REMOTE_ALLOWED_IPS" {
+ return scaleHandleRemoteAllowedIPs(value)
+ }
+
+ if key == "IBM_CUSTOMER_NUMBER" && IbmCustomerNumberValue != "" {
+ return os.Setenv(key, IbmCustomerNumberValue)
+ }
+
+ switch v := value.(type) {
+ case string:
+ if v != "" {
+ return os.Setenv(key, v)
+ }
+ case bool:
+ return os.Setenv(key, strconv.FormatBool(v))
+ case int:
+ return os.Setenv(key, strconv.Itoa(v))
+ case float64:
+ return os.Setenv(key, strconv.FormatFloat(v, 'f', -1, 64))
+ case []string:
+ if len(v) > 0 {
+ return os.Setenv(key, strings.Join(v, ","))
+ }
+ default:
+ jsonBytes, err := json.Marshal(value)
+ if err != nil {
+ return fmt.Errorf("failed to marshal %s: %w", key, err)
+ }
+ return os.Setenv(key, string(jsonBytes))
+ }
+
+ return nil
+}
+
+func scaleHandleRemoteAllowedIPs(value interface{}) error {
+ cidr, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("remote_allowed_ips must be a string")
+ }
+
+ if cidr == "" || cidr == "0.0.0.0/0" {
+ if scaleGlobalIP == "" {
+ return fmt.Errorf("scaleGlobalIP is empty, cannot set REMOTE_ALLOWED_IPS")
+ }
+ return os.Setenv("REMOTE_ALLOWED_IPS", scaleGlobalIP)
+ }
+
+ return os.Setenv("REMOTE_ALLOWED_IPS", cidr)
+}
diff --git a/tests/go.mod b/tests/go.mod
index eb935b78..6101355b 100644
--- a/tests/go.mod
+++ b/tests/go.mod
@@ -5,26 +5,26 @@ go 1.24.2
toolchain go1.24.3
require (
- github.com/IBM/go-sdk-core/v5 v5.20.0
- github.com/IBM/secrets-manager-go-sdk/v2 v2.0.11
- github.com/gruntwork-io/terratest v0.49.0
+ github.com/IBM/go-sdk-core/v5 v5.21.0
+ github.com/IBM/secrets-manager-go-sdk/v2 v2.0.14
+ github.com/gruntwork-io/terratest v0.50.0
github.com/stretchr/testify v1.10.0
- github.com/terraform-ibm-modules/ibmcloud-terratest-wrapper v1.50.1
- golang.org/x/crypto v0.39.0
+ github.com/terraform-ibm-modules/ibmcloud-terratest-wrapper v1.58.12
+ golang.org/x/crypto v0.41.0
gopkg.in/yaml.v3 v3.0.1
)
require (
- dario.cat/mergo v1.0.0 // indirect
- github.com/IBM-Cloud/bluemix-go v0.0.0-20240719075425-078fcb3a55be // indirect
- github.com/IBM-Cloud/power-go-client v1.11.0 // indirect
- github.com/IBM/cloud-databases-go-sdk v0.7.1 // indirect
- github.com/IBM/platform-services-go-sdk v0.81.1 // indirect
+ dario.cat/mergo v1.0.2 // indirect
+ github.com/IBM-Cloud/bluemix-go v0.0.0-20250818082648-8ebc393b4b26 // indirect
+ github.com/IBM-Cloud/power-go-client v1.12.0 // indirect
+ github.com/IBM/cloud-databases-go-sdk v0.8.0 // indirect
+ github.com/IBM/platform-services-go-sdk v0.85.1 // indirect
github.com/IBM/project-go-sdk v0.3.6 // indirect
github.com/IBM/schematics-go-sdk v0.4.0 // indirect
- github.com/IBM/vpc-go-sdk v0.68.0 // indirect
+ github.com/IBM/vpc-go-sdk v0.70.1 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
- github.com/ProtonMail/go-crypto v1.1.6 // indirect
+ github.com/ProtonMail/go-crypto v1.3.0 // indirect
github.com/agext/levenshtein v1.2.3 // indirect
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
@@ -33,26 +33,28 @@ require (
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emirpasic/gods v1.18.1 // indirect
- github.com/gabriel-vasile/mimetype v1.4.8 // indirect
+ github.com/fxamacker/cbor/v2 v2.9.0 // indirect
+ github.com/gabriel-vasile/mimetype v1.4.9 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
github.com/go-git/go-billy/v5 v5.6.2 // indirect
- github.com/go-git/go-git/v5 v5.16.0 // indirect
- github.com/go-logr/logr v1.4.2 // indirect
+ github.com/go-git/go-git/v5 v5.16.2 // indirect
+ github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
- github.com/go-openapi/analysis v0.21.5 // indirect
- github.com/go-openapi/errors v0.22.1 // indirect
- github.com/go-openapi/jsonpointer v0.20.1 // indirect
- github.com/go-openapi/jsonreference v0.20.3 // indirect
- github.com/go-openapi/loads v0.21.3 // indirect
- github.com/go-openapi/runtime v0.26.0 // indirect
- github.com/go-openapi/spec v0.20.12 // indirect
+ github.com/go-openapi/analysis v0.23.0 // indirect
+ github.com/go-openapi/errors v0.22.2 // indirect
+ github.com/go-openapi/jsonpointer v0.21.2 // indirect
+ github.com/go-openapi/jsonreference v0.21.0 // indirect
+ github.com/go-openapi/loads v0.22.0 // indirect
+ github.com/go-openapi/runtime v0.28.0 // indirect
+ github.com/go-openapi/spec v0.21.0 // indirect
github.com/go-openapi/strfmt v0.23.0 // indirect
- github.com/go-openapi/swag v0.22.5 // indirect
- github.com/go-openapi/validate v0.22.4 // indirect
+ github.com/go-openapi/swag v0.23.1 // indirect
+ github.com/go-openapi/validate v0.24.0 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
- github.com/go-playground/validator/v10 v10.26.0 // indirect
+ github.com/go-playground/validator/v10 v10.27.0 // indirect
+ github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/google/uuid v1.6.0 // indirect
@@ -60,44 +62,56 @@ require (
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-getter/v2 v2.2.3 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
- github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
+ github.com/hashicorp/go-retryablehttp v0.7.8 // indirect
github.com/hashicorp/go-safetemp v1.0.0 // indirect
github.com/hashicorp/go-version v1.7.0 // indirect
- github.com/hashicorp/hcl/v2 v2.22.0 // indirect
- github.com/hashicorp/terraform-json v0.25.0 // indirect
+ github.com/hashicorp/hcl/v2 v2.24.0 // indirect
+ github.com/hashicorp/terraform-json v0.26.0 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/jinzhu/copier v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
- github.com/kevinburke/ssh_config v1.2.0 // indirect
- github.com/klauspost/compress v1.16.7 // indirect
+ github.com/json-iterator/go v1.1.12 // indirect
+ github.com/kevinburke/ssh_config v1.4.0 // indirect
+ github.com/klauspost/compress v1.18.0 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
- github.com/mailru/easyjson v0.7.7 // indirect
- github.com/mattn/go-zglob v0.0.4 // indirect
+ github.com/mailru/easyjson v0.9.0 // indirect
+ github.com/mattn/go-zglob v0.0.6 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/go-testing-interface v1.14.1 // indirect
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
+ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+ github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/oklog/ulid v1.3.1 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
- github.com/pjbgf/sha1cd v0.3.2 // indirect
+ github.com/pjbgf/sha1cd v0.4.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect
+ github.com/sergi/go-diff v1.4.0 // indirect
github.com/skeema/knownhosts v1.3.1 // indirect
- github.com/tmccombs/hcl2json v0.6.4 // indirect
- github.com/ulikunitz/xz v0.5.11 // indirect
+ github.com/stretchr/objx v0.5.2 // indirect
+ github.com/tmccombs/hcl2json v0.6.7 // indirect
+ github.com/ulikunitz/xz v0.5.12 // indirect
+ github.com/x448/float16 v0.8.4 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
- github.com/zclconf/go-cty v1.16.2 // indirect
- go.mongodb.org/mongo-driver v1.17.3 // indirect
- go.opentelemetry.io/otel v1.29.0 // indirect
- go.opentelemetry.io/otel/metric v1.29.0 // indirect
- go.opentelemetry.io/otel/trace v1.29.0 // indirect
- golang.org/x/mod v0.25.0 // indirect
- golang.org/x/net v0.40.0 // indirect
- golang.org/x/sync v0.15.0 // indirect
- golang.org/x/sys v0.33.0 // indirect
- golang.org/x/text v0.26.0 // indirect
- golang.org/x/tools v0.33.0 // indirect
+ github.com/zclconf/go-cty v1.16.3 // indirect
+ go.mongodb.org/mongo-driver v1.17.4 // indirect
+ go.opentelemetry.io/auto/sdk v1.1.0 // indirect
+ go.opentelemetry.io/otel v1.37.0 // indirect
+ go.opentelemetry.io/otel/metric v1.37.0 // indirect
+ go.opentelemetry.io/otel/trace v1.37.0 // indirect
+ go.yaml.in/yaml/v2 v2.4.2 // indirect
+ golang.org/x/mod v0.27.0 // indirect
+ golang.org/x/net v0.43.0 // indirect
+ golang.org/x/sync v0.16.0 // indirect
+ golang.org/x/sys v0.35.0 // indirect
+ golang.org/x/text v0.28.0 // indirect
+ golang.org/x/tools v0.36.0 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
- sigs.k8s.io/yaml v1.4.0 // indirect
+ k8s.io/apimachinery v0.33.4 // indirect
+ k8s.io/client-go v0.33.4 // indirect
+ k8s.io/klog/v2 v2.130.1 // indirect
+ sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
+ sigs.k8s.io/structured-merge-diff/v4 v4.7.0 // indirect
+ sigs.k8s.io/yaml v1.6.0 // indirect
)
diff --git a/tests/go.sum b/tests/go.sum
index 80416288..c90c82f8 100644
--- a/tests/go.sum
+++ b/tests/go.sum
@@ -1,28 +1,28 @@
-dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
-dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
-github.com/IBM-Cloud/bluemix-go v0.0.0-20240719075425-078fcb3a55be h1:USOcBHkYQ4o/ccoEvoHinrba8NQthLJpFXnAoBY+MI4=
-github.com/IBM-Cloud/bluemix-go v0.0.0-20240719075425-078fcb3a55be/go.mod h1:/7hMjdZA6fEpd/dQAOEABxKEwN0t72P3PlpEDu0Y7bE=
-github.com/IBM-Cloud/power-go-client v1.11.0 h1:4xlYXF2+S3s6Crb0D2+d5c1kb6gUE7eowMXLB7Q6cWY=
-github.com/IBM-Cloud/power-go-client v1.11.0/go.mod h1:UDyXeIKEp6r7yWUXYu3r0ZnFSlNZ2YeQTHwM2Tmlgv0=
-github.com/IBM/cloud-databases-go-sdk v0.7.1 h1:5kK4/3NUsGxZzmuUe+1ftajpOQbeDVh5VeemrPgROP4=
-github.com/IBM/cloud-databases-go-sdk v0.7.1/go.mod h1:JYucI1PdwqbAd8XGdDAchxzxRP7bxOh1zUnseovHKsc=
-github.com/IBM/go-sdk-core/v5 v5.20.0 h1:rG1fn5GmJfFzVtpDKndsk6MgcarluG8YIWf89rVqLP8=
-github.com/IBM/go-sdk-core/v5 v5.20.0/go.mod h1:Q3BYO6iDA2zweQPDGbNTtqft5tDcEpm6RTuqMlPcvbw=
-github.com/IBM/platform-services-go-sdk v0.81.1 h1:Ch9wUIigyA3HzW7MQnA1WTHAw+QA6W4bSP3ThgzDpx0=
-github.com/IBM/platform-services-go-sdk v0.81.1/go.mod h1:XOowH+JnIih3FA7uilLVM/9VH7XgCmJ4T/i6eZi7gkw=
+dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
+dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
+github.com/IBM-Cloud/bluemix-go v0.0.0-20250818082648-8ebc393b4b26 h1:Gauwtw47rvv79uAgjah63G0zwmB4uzEEAHqthcqITnU=
+github.com/IBM-Cloud/bluemix-go v0.0.0-20250818082648-8ebc393b4b26/go.mod h1:PVD407jrZx0i/TW5GaTRI12ouzUfrFlZshbnjs9aQvg=
+github.com/IBM-Cloud/power-go-client v1.12.0 h1:tF9Mq5GLYHebpzQT6IYB89lIxEST1E9teuchjxSAaw0=
+github.com/IBM-Cloud/power-go-client v1.12.0/go.mod h1:SpTK1ttW8bfMNUVQS8qOEuWn2KOkzaCLyzfze8MG1JE=
+github.com/IBM/cloud-databases-go-sdk v0.8.0 h1:uMFqhnc/roVTzfCaUsJ23eaHKjChhGpM1F7Mpxik0bo=
+github.com/IBM/cloud-databases-go-sdk v0.8.0/go.mod h1:JYucI1PdwqbAd8XGdDAchxzxRP7bxOh1zUnseovHKsc=
+github.com/IBM/go-sdk-core/v5 v5.21.0 h1:DUnYhvC4SoC8T84rx5omnhY3+xcQg/Whyoa3mDPIMkk=
+github.com/IBM/go-sdk-core/v5 v5.21.0/go.mod h1:Q3BYO6iDA2zweQPDGbNTtqft5tDcEpm6RTuqMlPcvbw=
+github.com/IBM/platform-services-go-sdk v0.85.1 h1:lrBEeGaIajhSPMB6cPVAx53XTtVGrKOeA36gIXh2FYI=
+github.com/IBM/platform-services-go-sdk v0.85.1/go.mod h1:aGD045m6I8pfcB77wft8w2cHqWOJjcM3YSSV55BX0Js=
github.com/IBM/project-go-sdk v0.3.6 h1:DRiANKnAePevFsIKSvR89SUaMa2xsd7YKK71Ka1eqKI=
github.com/IBM/project-go-sdk v0.3.6/go.mod h1:FOJM9ihQV3EEAY6YigcWiTNfVCThtdY8bLC/nhQHFvo=
github.com/IBM/schematics-go-sdk v0.4.0 h1:x01f/tPquYJYLQzJLGuxWfCbV/EdSMXRikOceNy/JLM=
github.com/IBM/schematics-go-sdk v0.4.0/go.mod h1:Xe7R7xgwmXBHu09w2CbBe8lkWZaYxNQo19bS4dpLrUA=
-github.com/IBM/secrets-manager-go-sdk/v2 v2.0.11 h1:RG/hnKvKSMrG3X5Jm/P/itg+y/FGPY7+B5N3XYQDbmQ=
-github.com/IBM/secrets-manager-go-sdk/v2 v2.0.11/go.mod h1:7r0LOxg+K/y2fVbh2Uopu5r+VE76p1VTk/3gHAs5MQk=
-github.com/IBM/vpc-go-sdk v0.68.0 h1:Zs65PWeWBG5IwafAJV0RdPVsi3hCjIkhFZkqr1sLt5g=
-github.com/IBM/vpc-go-sdk v0.68.0/go.mod h1:VL7sy61ybg6tvA60SepoQx7TFe20m7JyNUt+se2tHP4=
+github.com/IBM/secrets-manager-go-sdk/v2 v2.0.14 h1:xKcplIoyh6UknnZSM+xUZVmmAqJckN4CdLT6c6VxoXc=
+github.com/IBM/secrets-manager-go-sdk/v2 v2.0.14/go.mod h1:B1RtnQGpMt9uU1GimIPv1f0amUSM0WAx8UKLOzKCt/c=
+github.com/IBM/vpc-go-sdk v0.70.1 h1:6NsbRkiA5gDNxe7cjNx8Pi1j9s0PlhwNQj29wsKZxAo=
+github.com/IBM/vpc-go-sdk v0.70.1/go.mod h1:K3vVlje72PYE3ZRt1iouE+jSIq+vCyYzT1HiFC06hUA=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
-github.com/ProtonMail/go-crypto v1.1.6 h1:ZcV+Ropw6Qn0AX9brlQLAUXfqLBc7Bl+f/DmNxpLfdw=
-github.com/ProtonMail/go-crypto v1.1.6/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE=
+github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw=
+github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE=
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
@@ -37,12 +37,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas=
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4=
-github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
-github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
-github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
-github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s=
github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -50,16 +46,18 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o=
github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE=
+github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
+github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
-github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM=
-github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8=
+github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
+github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
+github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY=
+github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c=
@@ -70,79 +68,57 @@ github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UN
github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU=
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII=
-github.com/go-git/go-git/v5 v5.16.0 h1:k3kuOEpkc0DeY7xlL6NaaNg39xdgQbtH5mwCafHO9AQ=
-github.com/go-git/go-git/v5 v5.16.0/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8=
+github.com/go-git/go-git/v5 v5.16.2 h1:fT6ZIOjE5iEnkzKyxTHK1W4HGAsPhqEqiSAssSO77hM=
+github.com/go-git/go-git/v5 v5.16.2/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
-github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
-github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
+github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/go-openapi/analysis v0.21.5 h1:3tHfEBh6Ia8eKc4M7khOGjPOAlWKJ10d877Cr9teujI=
-github.com/go-openapi/analysis v0.21.5/go.mod h1:25YcZosX9Lwz2wBsrFrrsL8bmjjXdlyP6zsr2AMy29M=
-github.com/go-openapi/errors v0.20.3/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk=
-github.com/go-openapi/errors v0.22.1 h1:kslMRRnK7NCb/CvR1q1VWuEQCEIsBGn5GgKD9e+HYhU=
-github.com/go-openapi/errors v0.22.1/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0=
-github.com/go-openapi/jsonpointer v0.20.1 h1:MkK4VEIEZMj4wT9PmjaUmGflVBr9nvud4Q4UVFbDoBE=
-github.com/go-openapi/jsonpointer v0.20.1/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs=
-github.com/go-openapi/jsonreference v0.20.3 h1:EjGcjTW8pD1mRis6+w/gmoBdqv5+RbE9B85D1NgDOVQ=
-github.com/go-openapi/jsonreference v0.20.3/go.mod h1:FviDZ46i9ivh810gqzFLl5NttD5q3tSlMLqLr6okedM=
-github.com/go-openapi/loads v0.21.3 h1:8sSH2FIm/SnbDUGv572md4YqVMFne/a9Eubvcd3anew=
-github.com/go-openapi/loads v0.21.3/go.mod h1:Y3aMR24iHbKHppOj91nQ/SHc0cuPbAr4ndY4a02xydc=
-github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc=
-github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ=
-github.com/go-openapi/spec v0.20.12 h1:cgSLbrsmziAP2iais+Vz7kSazwZ8rsUZd6TUzdDgkVI=
-github.com/go-openapi/spec v0.20.12/go.mod h1:iSCgnBcwbMW9SfzJb8iYynXvcY6C/QFrI7otzF7xGM4=
-github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew=
+github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU=
+github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo=
+github.com/go-openapi/errors v0.22.2 h1:rdxhzcBUazEcGccKqbY1Y7NS8FDcMyIRr0934jrYnZg=
+github.com/go-openapi/errors v0.22.2/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0=
+github.com/go-openapi/jsonpointer v0.21.2 h1:AqQaNADVwq/VnkCmQg6ogE+M3FOsKTytwges0JdwVuA=
+github.com/go-openapi/jsonpointer v0.21.2/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk=
+github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
+github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
+github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco=
+github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs=
+github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ=
+github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc=
+github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY=
+github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk=
github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c=
github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4=
-github.com/go-openapi/swag v0.22.5 h1:fVS63IE3M0lsuWRzuom3RLwUMVI2peDH01s6M70ugys=
-github.com/go-openapi/swag v0.22.5/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0=
-github.com/go-openapi/validate v0.22.4 h1:5v3jmMyIPKTR8Lv9syBAIRxG6lY0RqeBPB1LKEijzk8=
-github.com/go-openapi/validate v0.22.4/go.mod h1:qm6O8ZIcPVdSY5219468Jv7kBdGvkiZLPOmqnqTUZ2A=
+github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU=
+github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0=
+github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58=
+github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
-github.com/go-playground/validator/v10 v10.26.0 h1:SP05Nqhjcvz81uJaRfEV0YBSSSGMc/iMaVtFbr3Sw2k=
-github.com/go-playground/validator/v10 v10.26.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
-github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
-github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
+github.com/go-playground/validator/v10 v10.27.0 h1:w8+XrWVMhGkxOaaowyKH35gFydVHOvC0/uWoy2Fzwn4=
+github.com/go-playground/validator/v10 v10.27.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
github.com/go-test/deep v1.0.7 h1:/VSMRlnY/JSyqxQUzQLKVMAskpY/NZKFA5j2P+0pP2M=
github.com/go-test/deep v1.0.7/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
+github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
-github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/gruntwork-io/terratest v0.49.0 h1:GurfpHEOEr8vntB77QcxDh+P7aiQRUgPFdgb6q9PuWI=
-github.com/gruntwork-io/terratest v0.49.0/go.mod h1:/+dfGio9NqUpvvukuPo29B8zy6U5FYJn9PdmvwztK4A=
+github.com/gruntwork-io/terratest v0.50.0 h1:AbBJ7IRCpLZ9H4HBrjeoWESITv8nLjN6/f1riMNcAsw=
+github.com/gruntwork-io/terratest v0.50.0/go.mod h1:see0lbKvAqz6rvzvN2wyfuFQQG4PWcAb2yHulF6B2q4=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -154,29 +130,30 @@ github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB1
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
-github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
-github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
+github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48=
+github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw=
github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo=
github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I=
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
-github.com/hashicorp/hcl/v2 v2.22.0 h1:hkZ3nCtqeJsDhPRFz5EA9iwcG1hNWGePOTw6oyul12M=
-github.com/hashicorp/hcl/v2 v2.22.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA=
-github.com/hashicorp/terraform-json v0.25.0 h1:rmNqc/CIfcWawGiwXmRuiXJKEiJu1ntGoxseG1hLhoQ=
-github.com/hashicorp/terraform-json v0.25.0/go.mod h1:sMKS8fiRDX4rVlR6EJUMudg1WcanxCMoWwTLkgZP/vc=
-github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/hashicorp/hcl/v2 v2.24.0 h1:2QJdZ454DSsYGoaE6QheQZjtKZSUs9Nh2izTWiwQxvE=
+github.com/hashicorp/hcl/v2 v2.24.0/go.mod h1:oGoO1FIQYfn/AgyOhlg9qLC6/nOJPX3qGbkZpYAcqfM=
+github.com/hashicorp/terraform-json v0.26.0 h1:+BnJavhRH+oyNWPnfzrfQwVWCZBFMvjdiH2Vi38Udz4=
+github.com/hashicorp/terraform-json v0.26.0/go.mod h1:eyWCeC3nrZamyrKLFnrvwpc3LQPIJsx8hWHQ/nu2/v4=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8=
github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
-github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=
-github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
-github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
-github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/kevinburke/ssh_config v1.4.0 h1:6xxtP5bZ2E4NF5tuQulISpTO2z8XbtH8cg1PWkxoFkQ=
+github.com/kevinburke/ssh_config v1.4.0/go.mod h1:q2RIzfka+BXARoNexmF9gkxEX7DmvbW9P4hIVx2Kg4M=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
+github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
@@ -186,14 +163,14 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
-github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
-github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
+github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
-github.com/mattn/go-zglob v0.0.4 h1:LQi2iOm0/fGgu80AioIJ/1j9w9Oh+9DZ39J4VAGzHQM=
-github.com/mattn/go-zglob v0.0.4/go.mod h1:MxxjyoXXnMxfIpxTK2GAkw1w8glPsQILx3N5wrKakiY=
+github.com/mattn/go-zglob v0.0.6 h1:mP8RnmCgho4oaUYDIDn6GNxYk+qJGUs8fJLn+twYj2A=
+github.com/mattn/go-zglob v0.0.6/go.mod h1:MxxjyoXXnMxfIpxTK2GAkw1w8glPsQILx3N5wrKakiY=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU=
@@ -202,298 +179,161 @@ github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQ
github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
-github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
-github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY=
github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc=
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
-github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
-github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
-github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk=
-github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0=
-github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo=
-github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw=
-github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo=
-github.com/onsi/ginkgo/v2 v2.8.1/go.mod h1:N1/NbDngAFcSLdyZ+/aYTYGSlq9qMCS/cNKGJjy+csc=
-github.com/onsi/ginkgo/v2 v2.9.0/go.mod h1:4xkjoL/tZv4SMWeww56BU5kAt19mVB47gTWxmrTcxyk=
-github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo=
-github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts=
-github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k=
-github.com/onsi/ginkgo/v2 v2.9.7/go.mod h1:cxrmXWykAwTwhQsJOPfdIDiJ+l2RYq7U8hFU+M/1uw0=
-github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM=
-github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
-github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM=
-github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
-github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
-github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
-github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo=
-github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc=
-github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM=
-github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
-github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM=
-github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
-github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw=
-github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557cZ6Gw=
-github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ=
-github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
-github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRahPmr4=
-github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ=
-github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=
-github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
-github.com/onsi/gomega v1.31.1/go.mod h1:y40C95dwAD1Nz36SsEnxvfFe8FFfNxzI5eJ0EYGyAy0=
-github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y=
-github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
+github.com/onsi/gomega v1.38.0 h1:c/WX+w8SLAinvuKKQFh77WEucCnPk4j2OTUr7lt7BeY=
+github.com/onsi/gomega v1.38.0/go.mod h1:OcXcwId0b9QsE7Y49u+BTrL4IdKOBOKnD6VQNTJEB6o=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
-github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4=
-github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A=
+github.com/pjbgf/sha1cd v0.4.0 h1:NXzbL1RvjTUi6kgYZCX3fPwwl27Q1LJndxtUDVfJGRY=
+github.com/pjbgf/sha1cd v0.4.0/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
-github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=
-github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
+github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw=
+github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8=
github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
-github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
-github.com/terraform-ibm-modules/ibmcloud-terratest-wrapper v1.50.1 h1:5t2x8tkTeEeLrVy141bLVTWfd8zC9pvidByXJxUH6k8=
-github.com/terraform-ibm-modules/ibmcloud-terratest-wrapper v1.50.1/go.mod h1:DPxpxzMr8GCuuUzNlNWdAFAHfHRv1mETuEs2G47+7+M=
-github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
-github.com/tmccombs/hcl2json v0.6.4 h1:/FWnzS9JCuyZ4MNwrG4vMrFrzRgsWEOVi+1AyYUVLGw=
-github.com/tmccombs/hcl2json v0.6.4/go.mod h1:+ppKlIW3H5nsAsZddXPy2iMyvld3SHxyjswOZhavRDk=
-github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=
-github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
+github.com/terraform-ibm-modules/ibmcloud-terratest-wrapper v1.58.12 h1:c6/my1qhlnD7twSjZ66/1xsKQHu2OC9EF4rRQmsDKMU=
+github.com/terraform-ibm-modules/ibmcloud-terratest-wrapper v1.58.12/go.mod h1:6Wz8vnBelmRZxD5qjm5K4MpvPPWpoCWRPzG76j0B36g=
+github.com/tmccombs/hcl2json v0.6.7 h1:RYKTs4kd/gzRsEiv7J3M2WQ7TYRYZVc+0H0pZdERkxA=
+github.com/tmccombs/hcl2json v0.6.7/go.mod h1:lJgBOOGDpbhjvdG2dLaWsqB4KBzul2HytfDTS3H465o=
+github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc=
+github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
+github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
+github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
-github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
-github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
-github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
-github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
-github.com/zclconf/go-cty v1.16.2 h1:LAJSwc3v81IRBZyUVQDUdZ7hs3SYs9jv0eZJDWHD/70=
-github.com/zclconf/go-cty v1.16.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
+github.com/zclconf/go-cty v1.16.3 h1:osr++gw2T61A8KVYHoQiFbFd1Lh3JOCXc/jFLJXKTxk=
+github.com/zclconf/go-cty v1.16.3/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo=
github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM=
-go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g=
-go.mongodb.org/mongo-driver v1.17.3 h1:TQyXhnsWfWtgAhMtOgtYHMTkZIfBTpMTsMnd9ZBeHxQ=
-go.mongodb.org/mongo-driver v1.17.3/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
-go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw=
-go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8=
-go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc=
-go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8=
+go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFXVw=
+go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
+go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
+go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
+go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
+go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
+go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
+go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo=
go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok=
-go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4=
-go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ=
+go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
+go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
+go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
+go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
+go.yaml.in/yaml/v3 v3.0.3 h1:bXOww4E/J3f66rav3pX3m8w6jDE4knZjGOw8b5Y6iNE=
+go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
-golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
-golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
-golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
-golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
-golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
-golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
-golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
+golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
+golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
-golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
-golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
-golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
-golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
-golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
+golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
-golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
-golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
-golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
-golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
-golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
-golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
-golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
-golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
-golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
-golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
-golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
-golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
-golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
-golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
+golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
-golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
-golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
-golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
-golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
+golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
-golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
+golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
-golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
-golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
-golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
-golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
-golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
-golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
-golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
-golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
-golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
-golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
-golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
-golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
+golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4=
+golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
-golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
-golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
-golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
-golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
-golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
-golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
+golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
+golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
-golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
-golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
-golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
-golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
-golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
-golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
-golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
-golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
-golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
-golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc=
-golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
+golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
-google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
+google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
-gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
+k8s.io/apimachinery v0.33.4 h1:SOf/JW33TP0eppJMkIgQ+L6atlDiP/090oaX0y9pd9s=
+k8s.io/apimachinery v0.33.4/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
+k8s.io/client-go v0.33.4 h1:TNH+CSu8EmXfitntjUPwaKVPN0AYMbc9F1bBS8/ABpw=
+k8s.io/client-go v0.33.4/go.mod h1:LsA0+hBG2DPwovjd931L/AoaezMPX9CmBgyVyBZmbCY=
+k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
+k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
+k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
+k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
+k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
+k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
+sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
+sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
+sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
+sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
+sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI=
+sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
+sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
+sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
diff --git a/tests/lsf_tests/lsf_e2e_test.go b/tests/lsf_tests/lsf_e2e_test.go
index a99f07d6..0608636d 100644
--- a/tests/lsf_tests/lsf_e2e_test.go
+++ b/tests/lsf_tests/lsf_e2e_test.go
@@ -42,7 +42,7 @@ func TestMain(m *testing.M) {
log.Fatalf("❌ Config file not accessible: %v", err)
}
- if _, err := deploy.GetConfigFromYAML(configFilePath); err != nil {
+ if _, err := deploy.GetLSFConfigFromYAML(configFilePath); err != nil {
log.Fatalf("❌ Config load failed: %v", err)
}
log.Printf("✅ Configuration loaded successfully from %s", filepath.Base(configFilePath))
@@ -1981,7 +1981,7 @@ func RunCreateClusterWithExistingVpcSubnetsNoDns(t *testing.T, vpcName string, b
options, err := setupOptions(t, clusterNamePrefix, terraformDir, envVars.DefaultExistingResourceGroup)
options.TerraformVars["vpc_name"] = vpcName
options.TerraformVars["login_subnet_id"] = bastionsubnetId
- options.TerraformVars["cluster_subnet_id"] = computesubnetIds
+ options.TerraformVars["compute_subnet_id"] = computesubnetIds
require.NoError(t, err, "Error setting up test options: %v", err)
// Skip test teardown for further inspection
@@ -2022,7 +2022,7 @@ func TestRunCreateVpcWithCustomDns(t *testing.T) {
// Set up the test options with the relevant parameters, including environment variables and resource group, set up test environment
options, err := setupOptionsVPC(t, clusterNamePrefix, createVpcTerraformDir, envVars.DefaultExistingResourceGroup)
options.TerraformVars["enable_hub"] = true
- options.TerraformVars["dns_zone_name"] = "lsf.com"
+ options.TerraformVars["dns_zone_name"] = "hpc.local"
require.NoError(t, err, "Error setting up test options: %v", err)
@@ -2075,7 +2075,7 @@ func RunCreateClusterWithDnsAndResolver(t *testing.T, vpcName string, bastionsub
options, err := setupOptions(t, clusterNamePrefix, terraformDir, envVars.DefaultExistingResourceGroup)
options.TerraformVars["vpc_name"] = vpcName
options.TerraformVars["login_subnet_id"] = bastionsubnetId
- options.TerraformVars["cluster_subnet_id"] = computesubnetIds
+ options.TerraformVars["compute_subnet_id"] = computesubnetIds
options.TerraformVars["dns_instance_id"] = instanceId
options.TerraformVars["dns_custom_resolver_id"] = customResolverId
@@ -2119,7 +2119,7 @@ func RunCreateClusterWithOnlyResolver(t *testing.T, vpcName string, bastionsubne
options, err := setupOptions(t, clusterNamePrefix, terraformDir, envVars.DefaultExistingResourceGroup)
options.TerraformVars["vpc_name"] = vpcName
options.TerraformVars["login_subnet_id"] = bastionsubnetId
- options.TerraformVars["cluster_subnet_id"] = computesubnetIds
+ options.TerraformVars["compute_subnet_id"] = computesubnetIds
options.TerraformVars["dns_custom_resolver_id"] = customResolverId
require.NoError(t, err, "Error setting up test options: %v", err)
@@ -2162,7 +2162,7 @@ func TestRunCreateVpcWithCustomDnsOnlyDNS(t *testing.T) {
// Set up the test options with the relevant parameters, including environment variables and resource group, set up test environment
options, err := setupOptionsVPC(t, clusterNamePrefix, createVpcTerraformDir, envVars.DefaultExistingResourceGroup)
options.TerraformVars["enable_hub"] = true
- options.TerraformVars["dns_zone_name"] = "lsf.com"
+ options.TerraformVars["dns_zone_name"] = "hpc.local"
require.NoError(t, err, "Error setting up test options: %v", err)
diff --git a/tests/lsf_tests/resource_exemptions.go b/tests/lsf_tests/lsf_resource_exemptions.go
similarity index 100%
rename from tests/lsf_tests/resource_exemptions.go
rename to tests/lsf_tests/lsf_resource_exemptions.go
diff --git a/tests/pr_test.go b/tests/pr_test.go
index e1f1780e..32ff30b5 100644
--- a/tests/pr_test.go
+++ b/tests/pr_test.go
@@ -9,10 +9,11 @@ import (
"github.com/stretchr/testify/require"
deploy "github.com/terraform-ibm-modules/terraform-ibm-hpc/deployment"
lsf_tests "github.com/terraform-ibm-modules/terraform-ibm-hpc/lsf_tests"
+ scale_tests "github.com/terraform-ibm-modules/terraform-ibm-hpc/scale_tests"
utils "github.com/terraform-ibm-modules/terraform-ibm-hpc/utilities"
)
-func TestRunDefault(t *testing.T) {
+func TestRunLSFDefault(t *testing.T) {
t.Parallel()
require.NoError(t, os.Setenv("ZONES", "us-east-3"), "Failed to set ZONES env variable")
@@ -22,29 +23,60 @@ func TestRunDefault(t *testing.T) {
lsf_tests.DefaultTest(t)
}
+func TestRunScaleDefault(t *testing.T) {
+ t.Parallel()
+
+ require.NoError(t, os.Setenv("ZONES", "us-east-3"), "Failed to set ZONES env variable")
+ require.NoError(t, os.Setenv("DEFAULT_EXISTING_RESOURCE_GROUP", "Default"), "Failed to set DEFAULT_EXISTING_RESOURCE_GROUP")
+
+ t.Log("Running default LSF cluster test for region us-east-3")
+ scale_tests.DefaultTest(t)
+}
+
// TestMain is the entry point for all tests
func TestMain(m *testing.M) {
// Load LSF version configuration
- productFileName, err := lsf_tests.GetLSFVersionConfig()
+ lsfProductFileName, err := lsf_tests.GetLSFVersionConfig()
+ if err != nil {
+ log.Fatalf("❌ Failed to get LSF version config: %v", err)
+ }
+
+ // Load and validate configuration
+ lsfConfigFilePath, err := filepath.Abs("data/" + lsfProductFileName)
+ if err != nil {
+ log.Fatalf("❌ Failed to resolve config path: %v", err)
+ }
+
+ if _, err := os.Stat(lsfConfigFilePath); err != nil {
+ log.Fatalf("❌ Config file not accessible: %v", err)
+ }
+
+ if _, err := deploy.GetLSFConfigFromYAML(lsfConfigFilePath); err != nil {
+ log.Fatalf("❌ Config load failed: %v", err)
+ }
+ log.Printf("✅ lsf Configuration loaded successfully from %s", filepath.Base(lsfConfigFilePath))
+
+ // Load Scale version configuration
+ scaleProductFileName, err := scale_tests.GetScaleVersionConfig()
if err != nil {
log.Fatalf("❌ Failed to get LSF version config: %v", err)
}
// Load and validate configuration
- configFilePath, err := filepath.Abs("data/" + productFileName)
+ scaleConfigFilePath, err := filepath.Abs("data/" + scaleProductFileName)
if err != nil {
log.Fatalf("❌ Failed to resolve config path: %v", err)
}
- if _, err := os.Stat(configFilePath); err != nil {
+ if _, err := os.Stat(scaleConfigFilePath); err != nil {
log.Fatalf("❌ Config file not accessible: %v", err)
}
- if _, err := deploy.GetConfigFromYAML(configFilePath); err != nil {
+ if _, err := deploy.GetScaleConfigFromYAML(scaleConfigFilePath); err != nil {
log.Fatalf("❌ Config load failed: %v", err)
}
- log.Printf("✅ Configuration loaded successfully from %s", filepath.Base(configFilePath))
+ log.Printf("✅ Scale Configuration loaded successfully from %s", filepath.Base(scaleConfigFilePath))
// Execute tests
exitCode := m.Run()
diff --git a/tests/scale_tests/scale_resource_exemptions.go b/tests/scale_tests/scale_resource_exemptions.go
new file mode 100644
index 00000000..b12f889f
--- /dev/null
+++ b/tests/scale_tests/scale_resource_exemptions.go
@@ -0,0 +1,58 @@
+package tests
+
+// ResourceExemptions contains lists of resources to ignore during Terraform operations
+type ResourceExemptions struct {
+ Destroys []string // Resources to ignore during destroy operations
+ Updates []string // Resources to ignore during update operations
+}
+
+// LSFIgnoreLists contains the standard resource exemptions for LSF cluster tests
+var SCALEIgnoreLists = ResourceExemptions{
+ Destroys: []string{
+ // Null resources used for provisioning checks
+ "module.landing_zone_vsi.module.hpc.module.check_cluster_status.null_resource.remote_exec[0]",
+ "module.landing_zone_vsi.module.hpc.module.check_node_status.null_resource.remote_exec[0]",
+ "module.landing_zone_vsi.module.hpc.module.check_node_status.null_resource.remote_exec[1]",
+ "module.landing_zone_vsi.module.hpc.module.check_node_status.null_resource.remote_exec[2]",
+ "module.check_node_status.null_resource.remote_exec[0]",
+ "module.check_node_status.null_resource.remote_exec[1]",
+ "module.check_node_status.null_resource.remote_exec[2]",
+ "module.check_cluster_status.null_resource.remote_exec[0]",
+ "module.scale.module.resource_provisioner.null_resource.tf_resource_provisioner[0]",
+
+ // Boot waiting resources
+ "module.landing_zone_vsi.module.wait_management_vsi_booted.null_resource.remote_exec[0]",
+ "module.landing_zone_vsi.module.wait_management_candidate_vsi_booted.null_resource.remote_exec[0]",
+ "module.landing_zone_vsi[0].module.wait_management_vsi_booted.null_resource.remote_exec[0]",
+ "module.landing_zone_vsi[0].module.wait_management_candidate_vsi_booted.null_resource.remote_exec[0]",
+ "module.landing_zone_vsi[0].module.wait_management_candidate_vsi_booted.null_resource.remote_exec[1]",
+ "module.landing_zone_vsi[0].module.wait_worker_vsi_booted[0].null_resource.remote_exec[0]",
+ "module.landing_zone_vsi[0].module.wait_worker_vsi_booted[0].null_resource.remote_exec[1]",
+
+ // Configuration resources
+ "module.landing_zone_vsi.module.do_management_vsi_configuration.null_resource.remote_exec_script_cp_files[0]",
+ "module.landing_zone_vsi.module.do_management_vsi_configuration.null_resource.remote_exec_script_cp_files[1]",
+ "module.landing_zone_vsi.module.do_management_vsi_configuration.null_resource.remote_exec_script_new_file[0]",
+ "module.landing_zone_vsi.module.do_management_candidate_vsi_configuration.null_resource.remote_exec_script_new_file[0]",
+ "module.landing_zone_vsi.module.do_management_candidate_vsi_configuration.null_resource.remote_exec_script_run[0]",
+ "module.landing_zone_vsi[0].module.do_management_vsi_configuration.null_resource.remote_exec_script_run[0]",
+
+ // Other temporary resources
+ "module.lsf.module.resource_provisioner.null_resource.tf_resource_provisioner[0]",
+ "module.landing_zone_vsi[0].module.lsf_entitlement[0].null_resource.remote_exec[0]",
+ "module.landing_zone_vsi.module.hpc.module.landing_zone_vsi.module.wait_management_candidate_vsi_booted.null_resource.remote_exec[0]",
+ "module.landing_zone_vsi.module.hpc.module.landing_zone_vsi.module.wait_management_vsi_booted.null_resource.remote_exec[0]",
+ "module.lsf.module.prepare_tf_input.local_sensitive_file.prepare_tf_input[0]",
+ "module.compute_playbook[0].null_resource.run_playbook[0]",
+ },
+
+ Updates: []string{
+ // File storage resources that can be updated without cluster impact
+ "module.file_storage.ibm_is_share.share[0]",
+ "module.file_storage.ibm_is_share.share[1]",
+ "module.file_storage.ibm_is_share.share[2]",
+ "module.file_storage.ibm_is_share.share[3]",
+ "module.file_storage.ibm_is_share.share[4]",
+ "module.lsf.module.prepare_tf_input.local_sensitive_file.prepare_tf_input[0]",
+ },
+}
diff --git a/tests/scale_tests/scale_setup.go b/tests/scale_tests/scale_setup.go
new file mode 100644
index 00000000..457cab29
--- /dev/null
+++ b/tests/scale_tests/scale_setup.go
@@ -0,0 +1,266 @@
+package tests
+
+import (
+ "fmt"
+ "os"
+ "reflect"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/gruntwork-io/terratest/modules/terraform"
+ "github.com/stretchr/testify/require"
+ "github.com/terraform-ibm-modules/ibmcloud-terratest-wrapper/testhelper"
+
+ utils "github.com/terraform-ibm-modules/terraform-ibm-hpc/utilities"
+)
+
+// Constants for configuration
+const (
+ // Terraform solution directory
+ terraformDir = "solutions/scale"
+
+ // Default scheduler
+ Solution = "scale"
+
+ // Configuration files for Scale version
+ defaultConfigFile = "scale_config.yml" // Use latest as default
+
+ // Log file suffixes
+ defaultLogFileSuffix = ".log"
+ defaultJSONLogFileSuffix = ".json"
+)
+
+// EnvVars represents all environment variables required for the test
+type EnvVars struct {
+ ScaleVersion string
+ IbmCustomerNumber string
+ Zones string `required:"true"`
+ RemoteAllowedIPs string `required:"true"`
+ ExistingResourceGroup string `required:"true"`
+ StorageType string `required:"true"`
+ SSHKeys string `required:"true"`
+ ScaleDeployerInstance string
+ ComputeGUIUsername string
+ ComputeGUIPassword string // pragma: allowlist secret
+ StorageGUIUsername string `required:"true"`
+ StorageGUIPassword string `required:"true"` // pragma: allowlist secret
+ ComputeInstances string
+ ClientInstances string
+ StorageInstances string
+ ScaleEncryptionEnabled string
+ ScaleEncryptionType string
+ ScaleObservabilityAtrackerEnable string
+ ScaleObservabilityAtrackerTargetType string
+ ScaleSCCWPEnable string
+ ScaleCSPMEnabled string
+ ScaleSCCWPServicePlan string
+ GKLMInstances string
+ ScaleEncryptionAdminPassword string // pragma: allowlist secret
+ ScaleFilesystemConfig string
+ ScaleFilesetsConfig string
+ ScaleDNSDomainNames string
+ ScaleEnableCOSIntegration string
+ ScaleEnableVPCFlowLogs string
+ AfmInstances string
+ ProtocolInstances string
+}
+
+func GetEnvVars() (*EnvVars, error) {
+ vars := &EnvVars{
+ ScaleVersion: os.Getenv("SCALE_VERSION"),
+ IbmCustomerNumber: os.Getenv("IBM_CUSTOMER_NUMBER"),
+ Zones: os.Getenv("ZONES"),
+ RemoteAllowedIPs: os.Getenv("REMOTE_ALLOWED_IPS"),
+ ExistingResourceGroup: os.Getenv("EXISTING_RESOURCE_GROUP"),
+ StorageType: os.Getenv("STORAGE_TYPE"),
+ SSHKeys: os.Getenv("SSH_KEYS"),
+ ScaleDeployerInstance: os.Getenv("SCALE_DEPLOYER_INSTANCE"),
+ ComputeGUIUsername: os.Getenv("COMPUTE_GUI_USERNAME"),
+ ComputeGUIPassword: os.Getenv("COMPUTE_GUI_PASSWORD"),
+ StorageGUIUsername: os.Getenv("STORAGE_GUI_USERNAME"),
+ StorageGUIPassword: os.Getenv("STORAGE_GUI_PASSWORD"),
+ ComputeInstances: os.Getenv("COMPUTE_INSTANCES"),
+ ClientInstances: os.Getenv("CLIENT_INSTANCES"),
+ StorageInstances: os.Getenv("STORAGE_INSTANCES"),
+ ScaleEncryptionEnabled: os.Getenv("SCALE_ENCRYPTION_ENABLED"),
+ ScaleEncryptionType: os.Getenv("SCALE_ENCRYPTION_TYPE"),
+ ScaleObservabilityAtrackerEnable: os.Getenv("SCALE_OBSERVABILITY_ATRACKER_ENABLE"),
+ ScaleObservabilityAtrackerTargetType: os.Getenv("SCALE_OBSERVABILITY_ATRACKER_TARGET_TYPE"),
+ ScaleSCCWPEnable: os.Getenv("SCALE_SCCWP_ENABLE"),
+ ScaleCSPMEnabled: os.Getenv("SCALE_CSPM_ENABLED"),
+ ScaleSCCWPServicePlan: os.Getenv("SCALE_SCCWP_SERVICE_PLAN"),
+ GKLMInstances: os.Getenv("GKLM_INSTANCES"),
+ ScaleEncryptionAdminPassword: os.Getenv("SCALE_ENCRYPTION_ADMIN_PASSWORD"),
+ ScaleFilesystemConfig: os.Getenv("SCALE_FILESYSTEM_CONFIG"),
+ ScaleFilesetsConfig: os.Getenv("SCALE_FILESETS_CONFIG"),
+ ScaleDNSDomainNames: os.Getenv("SCALE_DNS_DOMAIN_NAMES"),
+ ScaleEnableCOSIntegration: os.Getenv("SCALE_ENABLE_COS_INTEGRATION"),
+ ScaleEnableVPCFlowLogs: os.Getenv("SCALE_ENABLE_VPC_FLOW_LOGS"),
+ AfmInstances: os.Getenv("AFM_INSTANCES"),
+ ProtocolInstances: os.Getenv("PROTOCOL_INSTANCES"),
+ }
+
+ // Validate required fields
+ v := reflect.ValueOf(vars).Elem()
+ t := v.Type()
+ for i := 0; i < v.NumField(); i++ {
+ field := t.Field(i)
+ if tag, ok := field.Tag.Lookup("required"); ok && tag == "true" {
+ fieldValue := v.Field(i).String()
+ if fieldValue == "" {
+ return nil, fmt.Errorf("missing required environment variable: %s", field.Name)
+ }
+ }
+ }
+
+ return vars, nil
+}
+
+var (
+ // testLogger stores the logger instance for logging test messages.
+ testLogger *utils.AggregatedLogger
+
+ // once ensures that the test suite initialization logic (e.g., logger setup) runs only once,
+ // even when called concurrently by multiple test functions.
+ once sync.Once
+)
+
+func setupTestSuite(t *testing.T) {
+ once.Do(func() {
+ timestamp := time.Now().Format("2006-01-02_15-04-05")
+ var logFileName string
+
+ if validationLogFilePrefix, ok := os.LookupEnv("LOG_FILE_NAME"); ok {
+ fileName := strings.Split(validationLogFilePrefix, defaultJSONLogFileSuffix)[0]
+ logFileName = fmt.Sprintf("%s%s", fileName, defaultLogFileSuffix)
+ } else {
+ logFileName = fmt.Sprintf("%s%s", timestamp, defaultLogFileSuffix)
+ }
+
+ _ = os.Setenv("LOG_FILE_NAME", fmt.Sprintf("%s%s", strings.Split(logFileName, ".")[0], defaultJSONLogFileSuffix))
+
+ var err error
+ testLogger, err = utils.NewAggregatedLogger(logFileName)
+ if err != nil {
+ t.Fatalf("Error initializing logger: %v", err)
+ }
+ testLogger.Info(t, "Logger initialized successfully")
+ })
+}
+
+var upgradeOnce sync.Once
+
+func UpgradeTerraformOnce(t *testing.T, terraformOptions *terraform.Options) {
+ upgradeOnce.Do(func() {
+ testLogger.Info(t, "Running Terraform upgrade with `-upgrade=true`...")
+
+ output, err := terraform.RunTerraformCommandE(t, terraformOptions, "init", "-upgrade=true")
+ if err != nil {
+ testLogger.FAIL(t, fmt.Sprintf("Terraform upgrade failed: %v", err))
+ testLogger.FAIL(t, fmt.Sprintf("Terraform upgrade output:\n%s", output))
+ require.NoError(t, err, "Terraform upgrade failed")
+ }
+ testLogger.PASS(t, "Terraform upgrade completed successfully")
+ })
+}
+
+func checkRequiredEnvVars() error {
+ required := []string{"TF_VAR_ibmcloud_api_key", "ZONES", "REMOTE_ALLOWED_IPS", "SSH_KEYS"}
+
+ for _, envVar := range required {
+ if os.Getenv(envVar) == "" {
+ return fmt.Errorf("environment variable %s is not set", envVar)
+ }
+ }
+ return nil
+}
+
+func setupOptions(t *testing.T, clusterNamePrefix, terraformDir, existingResourceGroup string) (*testhelper.TestOptions, error) {
+ if err := checkRequiredEnvVars(); err != nil {
+ return nil, err
+ }
+
+ envVars, err := GetEnvVars()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get environment variables: %v", err)
+ }
+
+ terraformVars := map[string]interface{}{
+ "cluster_prefix": clusterNamePrefix,
+ "ibm_customer_number": envVars.IbmCustomerNumber,
+ "ssh_keys": utils.SplitAndTrim(envVars.SSHKeys, ","),
+ "zones": utils.SplitAndTrim(envVars.Zones, ","),
+ "remote_allowed_ips": utils.SplitAndTrim(envVars.RemoteAllowedIPs, ","),
+ "existing_resource_group": existingResourceGroup,
+ "storage_type": envVars.StorageType,
+ "deployer_instance": envVars.ScaleDeployerInstance,
+ "storage_gui_username": envVars.StorageGUIUsername,
+ "storage_gui_password": envVars.StorageGUIPassword, // # pragma: allowlist secret
+ "storage_instances": envVars.StorageInstances,
+ "enable_cos_integration": false,
+ "enable_vpc_flow_logs": false,
+ "observability_atracker_enable": false,
+ "colocate_protocol_instances": false,
+ "protocol_instances": envVars.ProtocolInstances,
+ }
+
+ options := &testhelper.TestOptions{
+ Testing: t,
+ TerraformDir: terraformDir,
+ IgnoreDestroys: testhelper.Exemptions{List: SCALEIgnoreLists.Destroys},
+ IgnoreUpdates: testhelper.Exemptions{List: SCALEIgnoreLists.Updates},
+ TerraformVars: terraformVars,
+ }
+
+ // Remove empty values from TerraformVars
+ for key, value := range options.TerraformVars {
+ if value == "" {
+ delete(options.TerraformVars, key)
+ }
+ }
+
+ return options, nil
+}
+
+func GetScaleVersionConfig() (string, error) {
+ if defaultConfigFile == "" {
+ return "", fmt.Errorf("default config file path is empty")
+ }
+ return defaultConfigFile, nil
+}
+
+// DefaultTest runs the default test using the provided Terraform directory and existing resource group.
+// It provisions a cluster, waits for it to be ready, and then validates it.
+func DefaultTest(t *testing.T) {
+ setupTestSuite(t)
+ if testLogger == nil {
+ t.Fatal("Logger initialization failed")
+ }
+ testLogger.Info(t, fmt.Sprintf("Test %s starting execution", t.Name()))
+
+ clusterNamePrefix := utils.GenerateTimestampedClusterPrefix(utils.GenerateRandomString())
+ testLogger.Info(t, fmt.Sprintf("Generated cluster prefix: %s", clusterNamePrefix))
+
+ envVars, err := GetEnvVars()
+ if err != nil {
+ testLogger.Error(t, fmt.Sprintf("Environment config error: %v", err))
+ }
+ require.NoError(t, err, "Environment configuration failed")
+
+ options, err := setupOptions(t, clusterNamePrefix, terraformDir, envVars.ExistingResourceGroup)
+ if err != nil {
+ testLogger.Error(t, fmt.Sprintf("Test setup error: %v", err))
+ }
+ require.NoError(t, err, "Test options initialization failed")
+
+ output, err := options.RunTestConsistency()
+ if err != nil {
+ testLogger.FAIL(t, fmt.Sprintf("Provisioning failed: %v", err))
+ }
+ require.NoError(t, err, "Cluster provisioning failed with output: %v", output)
+ require.NotNil(t, output, "Received nil output from provisioning")
+
+ testLogger.PASS(t, fmt.Sprintf("Test %s completed successfully", t.Name()))
+}
diff --git a/tests/utilities/helpers.go b/tests/utilities/helpers.go
index e0572011..c63f5555 100644
--- a/tests/utilities/helpers.go
+++ b/tests/utilities/helpers.go
@@ -7,6 +7,7 @@ import (
"errors"
"fmt"
"math/rand"
+
"os"
"os/exec"
"path/filepath"
@@ -769,3 +770,22 @@ func GetBoolVar(vars map[string]interface{}, key string) (bool, error) {
return boolVal, nil
}
+
+// GeneratePassword generates a random string of length 8 using lowercase characters
+func GeneratePassword() string {
+ // Define the character set containing lowercase letters
+ const charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*"
+
+ b := make([]byte, 8)
+
+ // Loop through each index of the byte slice
+ for i := range b {
+ // Generate a random index within the length of the character set
+ randomIndex := rand.Intn(len(charset))
+
+ b[i] = charset[randomIndex]
+ }
+
+ // Convert the byte slice to a string and return it
+ return string(b) + "1*"
+}
diff --git a/variables.tf b/variables.tf
index 6cef5666..746569fc 100644
--- a/variables.tf
+++ b/variables.tf
@@ -20,7 +20,7 @@ variable "lsf_version" {
variable "scheduler" {
type = string
default = null
- description = "Select one of the scheduler (LSF/Symphony/Slurm/null)"
+ description = "Select one of the scheduler (Scale/LSF/Symphony/Slurm/null)"
}
variable "ibm_customer_number" {
@@ -133,7 +133,11 @@ variable "vpc_cluster_login_private_subnets_cidr_blocks" {
default = "10.241.16.0/28"
description = "Provide the CIDR block required for the creation of the login cluster's private subnet. Only one CIDR block is needed. If using a hybrid environment, modify the CIDR block to avoid conflicts with any on-premises CIDR blocks. Since the login subnet is used only for the creation of login virtual server instances, provide a CIDR range of /28."
validation {
- condition = tonumber(regex("^.*?/(\\d+)$", var.vpc_cluster_login_private_subnets_cidr_blocks)[0]) <= 28
+ condition = can(
+ regex(
+ "^((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])/(2[8-9]|3[0-2])$", trimspace(var.vpc_cluster_login_private_subnets_cidr_blocks)
+ )
+ )
error_message = "This subnet is used to create only a login virtual server instance. Providing a larger CIDR size will waste the usage of available IPs. A CIDR range of /28 is sufficient for the creation of the login subnet."
}
}
@@ -162,8 +166,8 @@ variable "deployer_instance" {
##############################################################################
# Compute Variables
##############################################################################
-variable "client_subnets" {
- type = list(string)
+variable "client_subnet_id" {
+ type = string
default = null
description = "Name of an existing subnets in which the cluster resources will be deployed. If no value is given, then new subnet(s) will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)"
}
@@ -185,12 +189,12 @@ variable "client_instances" {
default = [{
profile = "cx2-2x4"
count = 0
- image = "ibm-redhat-8-10-minimal-amd64-4"
+ image = "ibm-redhat-8-10-minimal-amd64-6"
}]
description = "Number of instances to be launched for client."
}
-variable "cluster_subnet_id" {
+variable "compute_subnet_id" {
type = string
default = null
description = "Name of an existing subnets in which the cluster resources will be deployed. If no value is given, then new subnet(s) will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)"
@@ -221,15 +225,17 @@ variable "management_instances" {
variable "static_compute_instances" {
type = list(
object({
- profile = string
- count = number
- image = string
+ profile = string
+ count = number
+ image = string
+ filesystem = optional(string)
})
)
default = [{
- profile = "cx2-2x4"
- count = 0
- image = "ibm-redhat-8-10-minimal-amd64-4"
+ profile = "cx2-2x4"
+ count = 0
+ image = "ibm-redhat-8-10-minimal-amd64-4"
+ filesystem = "/ibm/fs1"
}]
description = "Min Number of instances to be launched for compute cluster."
}
@@ -252,14 +258,14 @@ variable "dynamic_compute_instances" {
variable "compute_gui_username" {
type = string
- default = "admin"
+ default = ""
sensitive = true
description = "GUI user to perform system management and monitoring tasks on compute cluster."
}
variable "compute_gui_password" {
type = string
- default = "hpc@IBMCloud"
+ default = ""
sensitive = true
description = "Password for compute cluster GUI"
}
@@ -267,8 +273,8 @@ variable "compute_gui_password" {
##############################################################################
# Storage Variables
##############################################################################
-variable "storage_subnets" {
- type = list(string)
+variable "storage_subnet_id" {
+ type = string
default = null
description = "Name of an existing subnets in which the cluster resources will be deployed. If no value is given, then new subnet(s) will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)"
}
@@ -285,11 +291,11 @@ variable "storage_instances" {
profile = string
count = number
image = string
- filesystem = string
+ filesystem = optional(string)
})
)
default = [{
- profile = "bx2d-2x8"
+ profile = "bx2d-32x128"
count = 0
image = "ibm-redhat-8-10-minimal-amd64-4"
filesystem = "/ibm/fs1"
@@ -303,7 +309,7 @@ variable "storage_servers" {
profile = string
count = number
image = string
- filesystem = string
+ filesystem = optional(string)
})
)
default = [{
@@ -315,8 +321,20 @@ variable "storage_servers" {
description = "Number of BareMetal Servers to be launched for storage cluster."
}
-variable "protocol_subnets" {
- type = list(string)
+variable "tie_breaker_bm_server_profile" {
+ type = string
+ default = null
+ description = "Specify the bare metal server profile type name to be used for creating the bare metal Tie breaker node. If no value is provided, the storage bare metal server profile will be used as the default. For more information, see [bare metal server profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-profile&interface=ui). [Tie Breaker Node](https://www.ibm.com/docs/en/storage-scale/5.2.2?topic=quorum-node-tiebreaker-disks)"
+}
+
+variable "scale_management_vsi_profile" {
+ type = string
+ default = "bx2-8x32"
+ description = "The virtual server instance profile type name to be used to create the Management node. For more information, see [Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)."
+}
+
+variable "protocol_subnet_id" {
+ type = string
default = null
description = "Name of an existing subnets in which the cluster resources will be deployed. If no value is given, then new subnet(s) will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)"
}
@@ -332,13 +350,11 @@ variable "protocol_instances" {
object({
profile = string
count = number
- image = string
})
)
default = [{
profile = "bx2-2x8"
count = 0
- image = "ibm-redhat-8-10-minimal-amd64-4"
}]
description = "Number of instances to be launched for protocol hosts."
}
@@ -351,14 +367,14 @@ variable "colocate_protocol_instances" {
variable "storage_gui_username" {
type = string
- default = "admin"
+ default = ""
sensitive = true
description = "GUI user to perform system management and monitoring tasks on storage cluster."
}
variable "storage_gui_password" {
type = string
- default = "hpc@IBMCloud"
+ default = ""
sensitive = true
description = "Password for storage cluster GUI"
}
@@ -480,6 +496,12 @@ variable "existing_kms_instance_guid" {
description = "The existing KMS instance guid."
}
+variable "key_protect_instance_id" {
+ type = string
+ default = null
+ description = "An existing Key Protect instance used for filesystem encryption"
+}
+
# variable "hpcs_instance_name" {
# type = string
# default = null
@@ -543,39 +565,33 @@ variable "filesystem_config" {
default_metadata_replica = number
max_data_replica = number
max_metadata_replica = number
- mount_point = string
})
)
default = null
description = "File system configurations."
}
-# variable "filesets_config" {
-# type = list(
-# object({
-# fileset = string
-# filesystem = string
-# junction_path = string
-# client_mount_path = string
-# quota = number
-# })
-# )
-# default = null
-# description = "Fileset configurations."
-# }
+variable "filesets_config" {
+ type = list(
+ object({
+ client_mount_path = string
+ quota = number
+ })
+ )
+ default = null
+ description = "Fileset configurations."
+}
variable "afm_instances" {
type = list(
object({
profile = string
count = number
- image = string
})
)
default = [{
- profile = "bx2-2x8"
+ profile = "bx2-32x128"
count = 0
- image = "ibm-redhat-8-10-minimal-amd64-4"
}]
description = "Number of instances to be launched for afm hosts."
}
@@ -603,19 +619,32 @@ variable "afm_cos_config" {
bucket_storage_class = "smart"
bucket_type = "region_location"
}]
- # default = [{
- # afm_fileset = "afm_fileset"
- # mode = "iw"
- # cos_instance = null
- # bucket_name = null
- # bucket_region = "us-south"
- # cos_service_cred_key = ""
- # bucket_storage_class = "smart"
- # bucket_type = "region_location"
- # }]
+ nullable = false
description = "AFM configurations."
}
+variable "scale_afm_bucket_config_details" {
+ description = "Scale AFM COS Bucket and Configuration Details"
+ type = list(object({
+ bucket = string
+ endpoint = string
+ fileset = string
+ filesystem = string
+ mode = string
+ }))
+ default = null
+}
+
+variable "scale_afm_cos_hmac_key_params" {
+ description = "Scale AFM COS HMAC Key Details"
+ type = list(object({
+ akey = string
+ bucket = string
+ skey = string
+ }))
+ default = null
+}
+
##############################################################################
# LSF specific Variables
##############################################################################
@@ -672,7 +701,7 @@ variable "app_center_gui_password" {
variable "observability_atracker_enable" {
type = bool
- default = true
+ default = false
description = "Activity Tracker Event Routing to configure how to route auditing events. While multiple Activity Tracker instances can be created, only one tracker is needed to capture all events. Creating additional trackers is unnecessary if an existing Activity Tracker is already integrated with a COS bucket. In such cases, set the value to false, as all events can be monitored and accessed through the existing Activity Tracker."
}
@@ -689,7 +718,7 @@ variable "observability_atracker_target_type" {
variable "observability_monitoring_enable" {
description = "Set false to disable IBM Cloud Monitoring integration. If enabled, infrastructure and LSF application metrics from Management Nodes will be ingested."
type = bool
- default = true
+ default = false
}
variable "observability_logs_enable_for_management" {
@@ -784,28 +813,16 @@ variable "cloud_metrics_data_bucket" {
description = "cloud metrics data bucket"
}
-# variable "scc_cos_bucket" {
-# type = string
-# default = null
-# description = "scc cos bucket"
-# }
-
-# variable "scc_cos_instance_crn" {
-# type = string
-# default = null
-# description = "scc cos instance crn"
-# }
-
#############################################################################
# VARIABLES TO BE CHECKED
##############################################################################
-
-
-
-
-
+variable "sccwp_enable" {
+ type = bool
+ default = false
+ description = "Flag to enable SCC instance creation. If true, an instance of SCC (Security and Compliance Center) will be created."
+}
#############################################################################
# LDAP variables
@@ -824,7 +841,7 @@ variable "ldap_basedns" {
variable "ldap_server" {
type = string
- default = ""
+ default = "null"
description = "Provide the IP address for the existing LDAP server. If no address is given, a new LDAP server will be created."
}
@@ -855,11 +872,11 @@ variable "ldap_user_password" {
description = "The LDAP user password should be 8 to 20 characters long, with a mix of at least three alphabetic characters, including one uppercase and one lowercase letter. It must also include two numerical digits and at least one special character from (~@_+:) are required.It is important to avoid including the username in the password for enhanced security.[This value is ignored for an existing LDAP server]."
}
-variable "ldap_instance_key_pair" {
- type = list(string)
- default = null
- description = "Name of the SSH key configured in your IBM Cloud account that is used to establish a connection to the LDAP Server. Make sure that the SSH key is present in the same resource group and region where the LDAP Servers are provisioned. If you do not have an SSH key in your IBM Cloud account, create one by using the [SSH keys](https://cloud.ibm.com/docs/vpc?topic=vpc-ssh-keys) instructions."
-}
+# variable "ldap_instance_key_pair" {
+# type = list(string)
+# default = null
+# description = "Name of the SSH key configured in your IBM Cloud account that is used to establish a connection to the LDAP Server. Make sure that the SSH key is present in the same resource group and region where the LDAP Servers are provisioned. If you do not have an SSH key in your IBM Cloud account, create one by using the [SSH keys](https://cloud.ibm.com/docs/vpc?topic=vpc-ssh-keys) instructions."
+# }
variable "ldap_instance" {
type = list(
@@ -890,12 +907,6 @@ variable "scale_encryption_type" {
description = "To enable filesystem encryption, specify either 'key_protect' or 'gklm'. If neither is specified, the default value will be 'null' and encryption is disabled"
}
-variable "gklm_instance_key_pair" {
- type = list(string)
- default = null
- description = "The key pair to use to launch the GKLM host."
-}
-
variable "gklm_instances" {
type = list(
object({
@@ -907,23 +918,11 @@ variable "gklm_instances" {
default = [{
profile = "bx2-2x8"
count = 2
- image = "ibm-redhat-8-10-minimal-amd64-4"
+ image = "hpcc-scale-gklm4202-v2-5-2"
}]
- description = "Number of instances to be launched for client."
+ description = "Number of GKLM instances to be launched for scale cluster."
}
-# variable "scale_encryption_admin_default_password" {
-# type = string
-# default = null
-# description = "The default administrator password used for resetting the admin password based on the user input. The password has to be updated which was configured during the GKLM installation."
-# }
-
-# variable "scale_encryption_admin_username" {
-# type = string
-# default = null
-# description = "The default Admin username for Security Key Lifecycle Manager(GKLM)."
-# }
-
variable "scale_encryption_admin_password" {
type = string
default = null
@@ -936,9 +935,15 @@ variable "scale_ansible_repo_clone_path" {
description = "Path to clone github.com/IBM/ibm-spectrum-scale-install-infra."
}
+variable "scale_config_path" {
+ type = string
+ default = "/opt/IBM/ibm-spectrumscale-cloud-deploy"
+ description = "Path to clone github.com/IBM/ibm-spectrum-scale-install-infra."
+}
+
variable "spectrumscale_rpms_path" {
type = string
- default = "/opt/ibm/gpfs_cloud_rpms"
+ default = "/opt/IBM/gpfs_cloud_rpms"
description = "Path that contains IBM Spectrum Scale product cloud rpms."
}
@@ -1007,12 +1012,6 @@ variable "bastion_fip" {
default = null
description = "bastion fip"
}
-
-variable "scale_compute_cluster_filesystem_mountpoint" {
- type = string
- default = "/gpfs/fs1"
- description = "Compute cluster (accessingCluster) Filesystem mount point."
-}
##############################################################################
# Dedicatedhost Variables
##############################################################################
@@ -1057,6 +1056,7 @@ variable "resource_group_ids" {
default = null
description = "Map describing resource groups to create or reference"
}
+
##############################################################################
# Login Variables
##############################################################################
@@ -1071,7 +1071,7 @@ variable "login_instance" {
profile = "bx2-2x8"
image = "hpcaas-lsf10-rhel810-compute-v8"
}]
- description = "Specify the list of login node configurations, including instance profile, image name. By default, login node is created using Fix Pack 15. If deploying with Fix Pack 14, set lsf_version to fixpack_14 and use the corresponding image hpc-lsf-fp14-compute-rhel810-v1. The selected image must align with the specified lsf_version, any mismatch may lead to deployment failures."
+ description = "Number of instances to be launched for login node."
}
##############################################################################
@@ -1113,7 +1113,7 @@ variable "sccwp_service_plan" {
}
}
-variable "sccwp_enable" {
+variable "bms_boot_drive_encryption" {
type = bool
default = true
description = "Set this flag to true to create an instance of IBM Security and Compliance Center (SCC) Workload Protection. When enabled, it provides tools to discover and prioritize vulnerabilities, monitor for security threats, and enforce configuration, permission, and compliance policies across the full lifecycle of your workloads. To view the data on the dashboard, enable the cspm to create the app configuration and required trusted profile policies.[Learn more](https://cloud.ibm.com/docs/workload-protection?topic=workload-protection-about)."
@@ -1127,14 +1127,74 @@ variable "cspm_enabled" {
}
variable "app_config_plan" {
- description = "Specify the IBM service pricing plan for the app configuration. Allowed values are 'basic', 'lite', 'standardv2', 'enterprise'."
+ description = "To enable the encryption for the boot drive of bare metal server. Select true or false"
type = string
default = "basic"
validation {
- error_message = "Plan for App configuration can only be basic, lite, standardv2, enterprise.."
+ error_message = "Plan for App configuration can only be basic, standardv2, enterprise.."
condition = contains(
- ["basic", "lite", "standardv2", "enterprise"],
+ ["basic", "standardv2", "enterprise"],
var.app_config_plan
)
}
}
+
+variable "client_security_group_name" {
+ type = string
+ default = null
+ description = "Provide the security group name to provision the client nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the client nodes to function properly."
+ validation {
+ condition = anytrue([var.vpc_name != null && var.client_security_group_name != null, var.client_security_group_name == null])
+ error_message = "If the client_security_group_name are provided, the user should also provide the vpc_name."
+ }
+}
+
+variable "compute_security_group_name" {
+ type = string
+ default = null
+ description = "Provide the security group name to provision the compute nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the compute nodes to function properly."
+ validation {
+ condition = anytrue([var.vpc_name != null && var.compute_security_group_name != null, var.compute_security_group_name == null])
+ error_message = "If the compute_security_group_name are provided, the user should also provide the vpc_name."
+ }
+}
+
+variable "storage_security_group_name" {
+ type = string
+ default = null
+ description = "Provide the security group name to provision the storage node. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the storage node to function properly."
+ validation {
+ condition = anytrue([var.vpc_name != null && var.storage_security_group_name != null, var.storage_security_group_name == null])
+ error_message = "If the storage_security_group_name are provided, the user should also provide the vpc_name."
+ }
+}
+
+variable "ldap_security_group_name" {
+ type = string
+ default = null
+ description = "Provide the security group name to provision the ldap nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the ldap nodes to function properly."
+ validation {
+ condition = anytrue([var.vpc_name != null && var.ldap_security_group_name != null, var.ldap_security_group_name == null])
+ error_message = "If the ldap_security_group_name are provided, the user should also provide the vpc_name."
+ }
+}
+
+variable "gklm_security_group_name" {
+ type = string
+ default = null
+ description = "Provide the security group name to provision the gklm nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the gklm nodes to function properly."
+ validation {
+ condition = anytrue([var.vpc_name != null && var.gklm_security_group_name != null, var.gklm_security_group_name == null])
+ error_message = "If the gklm_security_group_name are provided, the user should also provide the vpc_name."
+ }
+}
+
+variable "login_security_group_name" {
+ type = string
+ default = null
+ description = "Provide the security group name to provision the bastion node. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the bastion node to function properly."
+ validation {
+ condition = anytrue([var.vpc_name != null && var.login_security_group_name != null, var.login_security_group_name == null])
+ error_message = "If the login_security_group_name are provided, the user should also provide the vpc_name."
+ }
+}