From 8448f767c525c21e034aeacecac261b19ef99f15 Mon Sep 17 00:00:00 2001 From: Nupur Goyal Date: Tue, 21 Oct 2025 15:07:19 +0530 Subject: [PATCH 1/2] Revert main back to 8b6254e: undo commits post that --- .catalog-onboard-pipeline.yaml | 20 - .secrets.baseline | 148 +-- .../hpcaas-pipeline-git-pr-status.yaml | 2 +- .../listener-git-pr-status.yaml | 8 +- .../hpcaas-pipeline-git-trigger.yaml | 8 +- .../listener-git-trigger.yaml | 8 +- .../hpcaas_task/hpcaas-task-infra-rhel.yaml | 8 +- .../hpcaas_task/hpcaas-task-negative.yaml | 8 +- .../hpcaas_task/hpcaas-task-pr-rhel.yaml | 2 +- .../hpcaas_task/hpcaas-task-pr-ubuntu.yaml | 2 +- .../hpcaas_task/hpcaas-task-region.yaml | 8 +- .../hpcaas_task/hpcaas-test-infra-ubuntu.yaml | 8 +- .tekton/scripts/cos_data.py | 13 +- .tekton/scripts/ssh_create_delete.sh | 12 +- .tekton/task-clone.yaml | 2 +- .tekton/task-set-commit-status.yaml | 4 +- CHANGELOG.md | 2 +- LSF-DA-Architecture.drawio.svg | 4 - README.md | 2 +- Scale-DA-Architecture.drawio.svg | 4 - cra-config.yaml | 11 +- datasource.tf | 44 +- ibm_catalog.json | 915 +---------------- locals.tf | 447 +++------ main.tf | 438 +++----- .../tasks/mgmt-cloudmonitoring-configure.yml | 14 +- .../tasks/login_node_configuration.yml | 4 +- .../tasks/app_center_configure.yml | 3 + .../tasks/configure_management_nodes.yml | 2 +- .../tasks/hosts_file_update.yml | 4 - .../roles/lsf_mgmt_config/tasks/main.yml | 4 - .../lsf_mgmt_config/tasks/web_services.yml | 14 - .../lsf_mgmt_config/templates/user_data.sh | 17 +- .../tasks/cluster_validation.yml | 20 +- .../tasks/configure_shared_folders.yml | 34 +- .../lsf_post_config/tasks/reload_services.yml | 2 +- .../roles/lsf_post_config/vars/main.yml | 1 - .../tasks/disable_ansible_repo.yml | 4 - .../templates/fp14-inventory.j2 | 2 +- .../templates/fp15-inventory.j2 | 5 +- modules/baremetal/datasource.tf | 15 +- modules/baremetal/locals.tf | 51 +- modules/baremetal/main.tf | 44 +- modules/baremetal/outputs.tf | 16 +- modules/baremetal/template_files.tf | 10 + .../baremetal/templates/storage_user_data.tpl | 120 +++ modules/baremetal/variables.tf | 60 +- modules/baremetal/version.tf | 4 + .../client_configuration.tf | 18 +- modules/common/client_configuration/locals.tf | 15 +- .../compute_configuration.tf | 20 +- .../common/compute_configuration/locals.tf | 30 +- .../common/compute_configuration/variables.tf | 5 - .../common/encryption_configuration/locals.tf | 13 - .../common/encryption_configuration/main.tf | 55 - .../encryption_configuration/outputs.tf | 0 .../encryption_configuration/variables.tf | 78 -- .../encryption_configuration/version.tf | 18 - .../key_protect_configuration/locals.tf | 7 - .../common/key_protect_configuration/main.tf | 24 - .../key_protect_configuration/outputs.tf | 0 .../key_protect_configuration/variables.tf | 43 - .../key_protect_configuration/version.tf | 14 - .../ldap_configuration/ldap_configuration.tf | 46 + modules/common/ldap_configuration/locals.tf | 2 - modules/common/ldap_configuration/main.tf | 46 - .../common/ldap_configuration/variables.tf | 5 + .../network_playbook/network_playbook.tf | 14 - modules/common/network_playbook/outputs.tf | 1 - modules/common/network_playbook/variables.tf | 29 - modules/common/network_playbook/version.tf | 14 - .../remote_mount_configuration/outputs.tf | 2 +- .../remote_mount_configuration.tf | 8 +- modules/common/scripts/prepare_client_inv.py | 41 +- modules/common/scripts/prepare_ldap_inv.py | 16 +- .../scripts/prepare_remote_mount_inv.py | 24 +- .../common/scripts/prepare_scale_inv_ini.py | 228 +++-- .../scripts/wait_for_ssh_availability.py | 12 +- .../common/storage_configuration/locals.tf | 46 +- .../storage_configuration.tf | 60 +- .../common/storage_configuration/variables.tf | 17 - modules/cos/cos.tf | 553 ++++++++++ .../{common/network_playbook => cos}/main.tf | 0 modules/cos/outputs.tf | 9 + modules/cos/variables.tf | 109 ++ modules/{key_protect => cos}/version.tf | 18 +- modules/deployer/datasource.tf | 5 - modules/deployer/image_map.tf | 12 - modules/deployer/locals.tf | 12 +- modules/deployer/main.tf | 47 +- .../deployer/templates/deployer_user_data.tpl | 31 +- modules/deployer/variables.tf | 75 +- modules/file_storage/main.tf | 2 +- modules/host_resolution_add/locals.tf | 40 - modules/host_resolution_add/main.tf | 805 --------------- modules/host_resolution_add/outputs.tf | 0 modules/host_resolution_add/variables.tf | 181 ---- modules/host_resolution_add/version.tf | 18 - modules/host_resolution_remove/locals.tf | 4 - modules/host_resolution_remove/main.tf | 22 - modules/host_resolution_remove/outputs.tf | 0 modules/host_resolution_remove/variables.tf | 14 - modules/host_resolution_remove/version.tf | 14 - modules/inventory/main.tf | 3 - modules/inventory/variables.tf | 10 +- modules/key_protect/datasource.tf | 4 - modules/key_protect/locals.tf | 5 - modules/key_protect/main.tf | 42 - modules/key_protect/outputs.tf | 0 modules/key_protect/variables.tf | 29 - modules/landing_zone/datasource.tf | 205 ---- modules/landing_zone/locals.tf | 203 +--- modules/landing_zone/main.tf | 2 +- modules/landing_zone/outputs.tf | 39 +- modules/landing_zone/variables.tf | 134 +-- modules/landing_zone_vsi/datasource.tf | 133 +-- modules/landing_zone_vsi/image_map.tf | 42 - modules/landing_zone_vsi/locals.tf | 163 +-- modules/landing_zone_vsi/main.tf | 280 +++--- modules/landing_zone_vsi/outputs.tf | 41 - modules/landing_zone_vsi/template_files.tf | 136 +-- .../templates/afm_bm_user_data.tpl | 32 - .../afm_bootdrive_user_data/cloud_init.yml | 123 --- .../templates/afm_user_data.tpl | 12 +- .../templates/client_user_data.tpl | 24 +- .../templates/gklm_user_data.tpl | 10 +- .../templates/ldap_user_data.tpl | 5 +- .../templates/protocol_bm_user_data.tpl | 32 - .../cloud_init.yml | 123 --- .../templates/protocol_user_data.tpl | 9 +- .../templates/scale_compute_user_data.tpl | 27 +- .../templates/storage_bm_user_data.tpl | 32 - .../templates/storage_bmtb_user_data.tpl | 32 - .../cloud_init.yml | 123 --- .../cloud_init.yml | 123 --- .../templates/storage_user_data.tpl | 25 +- modules/landing_zone_vsi/variables.tf | 89 +- modules/playbook/main.tf | 82 +- modules/playbook/variables.tf | 2 +- modules/prepare_tf_input/locals.tf | 23 +- modules/prepare_tf_input/main.tf | 31 +- modules/prepare_tf_input/variables.tf | 212 +--- modules/resource_provisioner/locals.tf | 13 +- modules/resource_provisioner/main.tf | 15 +- modules/resource_provisioner/variables.tf | 2 +- modules/security/sccwp/variables.tf | 4 +- outputs.tf | 61 +- samples/configs/hpc_catalog_values.json | 14 +- samples/configs/hpc_schematics_values.json | 10 +- samples/configs/scale_catalog_values.json | 80 -- samples/configs/scale_schematics_values.json | 599 ----------- solutions/custom/variables.tf | 64 +- solutions/hpcaas/variables.tf | 2 +- solutions/lsf/datasource.tf | 8 +- solutions/lsf/input_validation.tf | 22 +- solutions/lsf/locals.tf | 4 +- solutions/lsf/main.tf | 2 +- solutions/lsf/override.json | 4 + solutions/lsf/variables.tf | 65 +- .../catalogValidationValues.json.template | 5 +- solutions/scale/datasource.tf | 29 - solutions/scale/input_validation.tf | 196 ---- solutions/scale/locals.tf | 289 +++--- solutions/scale/main.tf | 128 ++- solutions/scale/override.json | 7 +- solutions/scale/variables.tf | 949 ++++++------------ solutions/slurm/variables.tf | 4 +- solutions/symphony/variables.tf | 4 +- tests/data/lsf_fp14_config.yml | 8 +- tests/data/lsf_fp15_config.yml | 8 +- tests/data/scale_config.yml | 128 --- tests/deployment/lsf_deployment.go | 4 +- tests/deployment/scale_deployment.go | 317 ------ tests/go.mod | 110 +- tests/go.sum | 470 ++++++--- tests/lsf/cluster_helpers.go | 43 +- tests/lsf/cluster_utils.go | 20 +- tests/lsf/cluster_validation.go | 29 +- tests/lsf_tests/lsf_constants.go | 6 +- tests/lsf_tests/lsf_e2e_test.go | 28 +- tests/lsf_tests/lsf_negative_test.go | 200 ++-- ...e_exemptions.go => resource_exemptions.go} | 0 tests/pr_test.go | 44 +- .../scale_tests/scale_resource_exemptions.go | 58 -- tests/scale_tests/scale_setup.go | 266 ----- tests/utilities/helpers.go | 20 - tools/access-management/README.md | 59 -- tools/access-management/permissions.sh | 338 ------- .../packer/hpcaas/compute/script.sh | 69 +- tools/image-builder/template_files.tf | 1 + .../templates/packer_user_data.tpl | 13 + tools/image-builder/variables.tf | 13 +- tools/image-builder/version.tf | 2 +- tools/minimal-demo-prod-scripts/README.md | 146 --- .../catalog_values_demo_deployment.json | 69 -- .../catalog_values_minimal_deployment.json | 69 -- .../catalog_values_production_deployment.json | 69 -- tools/minimal-demo-prod-scripts/cp.sh | 47 - .../create_lsf_environment.sh | 97 -- tools/minimal-demo-prod-scripts/destroy.sh | 47 - tools/minimal-demo-prod-scripts/jump.sh | 38 - tools/minimal-demo-prod-scripts/show.sh | 31 - tools/minimal-demo-prod-scripts/submit.sh | 14 - variables.tf | 278 ++--- 204 files changed, 3315 insertions(+), 10566 deletions(-) delete mode 100644 LSF-DA-Architecture.drawio.svg delete mode 100644 Scale-DA-Architecture.drawio.svg delete mode 100644 modules/ansible-roles/roles/lsf_mgmt_config/tasks/web_services.yml create mode 100644 modules/baremetal/template_files.tf create mode 100644 modules/baremetal/templates/storage_user_data.tpl delete mode 100644 modules/common/encryption_configuration/locals.tf delete mode 100644 modules/common/encryption_configuration/main.tf delete mode 100644 modules/common/encryption_configuration/outputs.tf delete mode 100644 modules/common/encryption_configuration/variables.tf delete mode 100644 modules/common/encryption_configuration/version.tf delete mode 100644 modules/common/key_protect_configuration/locals.tf delete mode 100644 modules/common/key_protect_configuration/main.tf delete mode 100644 modules/common/key_protect_configuration/outputs.tf delete mode 100644 modules/common/key_protect_configuration/variables.tf delete mode 100644 modules/common/key_protect_configuration/version.tf create mode 100644 modules/common/ldap_configuration/ldap_configuration.tf delete mode 100644 modules/common/network_playbook/network_playbook.tf delete mode 100644 modules/common/network_playbook/outputs.tf delete mode 100644 modules/common/network_playbook/variables.tf delete mode 100644 modules/common/network_playbook/version.tf create mode 100644 modules/cos/cos.tf rename modules/{common/network_playbook => cos}/main.tf (100%) create mode 100644 modules/cos/outputs.tf create mode 100644 modules/cos/variables.tf rename modules/{key_protect => cos}/version.tf (64%) delete mode 100644 modules/host_resolution_add/locals.tf delete mode 100644 modules/host_resolution_add/main.tf delete mode 100644 modules/host_resolution_add/outputs.tf delete mode 100644 modules/host_resolution_add/variables.tf delete mode 100644 modules/host_resolution_add/version.tf delete mode 100644 modules/host_resolution_remove/locals.tf delete mode 100644 modules/host_resolution_remove/main.tf delete mode 100644 modules/host_resolution_remove/outputs.tf delete mode 100644 modules/host_resolution_remove/variables.tf delete mode 100644 modules/host_resolution_remove/version.tf delete mode 100644 modules/key_protect/datasource.tf delete mode 100644 modules/key_protect/locals.tf delete mode 100644 modules/key_protect/main.tf delete mode 100644 modules/key_protect/outputs.tf delete mode 100644 modules/key_protect/variables.tf delete mode 100644 modules/landing_zone_vsi/templates/afm_bm_user_data.tpl delete mode 100644 modules/landing_zone_vsi/templates/afm_bootdrive_user_data/cloud_init.yml delete mode 100644 modules/landing_zone_vsi/templates/protocol_bm_user_data.tpl delete mode 100644 modules/landing_zone_vsi/templates/protocol_bootdrive_user_data/cloud_init.yml delete mode 100644 modules/landing_zone_vsi/templates/storage_bm_user_data.tpl delete mode 100644 modules/landing_zone_vsi/templates/storage_bmtb_user_data.tpl delete mode 100644 modules/landing_zone_vsi/templates/storage_bootdrive_user_data/cloud_init.yml delete mode 100644 modules/landing_zone_vsi/templates/storage_tb_bootdrive_user_data/cloud_init.yml delete mode 100644 samples/configs/scale_catalog_values.json delete mode 100644 samples/configs/scale_schematics_values.json delete mode 100644 solutions/scale/datasource.tf delete mode 100644 tests/data/scale_config.yml delete mode 100644 tests/deployment/scale_deployment.go rename tests/lsf_tests/{lsf_resource_exemptions.go => resource_exemptions.go} (100%) delete mode 100644 tests/scale_tests/scale_resource_exemptions.go delete mode 100644 tests/scale_tests/scale_setup.go delete mode 100644 tools/access-management/README.md delete mode 100755 tools/access-management/permissions.sh delete mode 100644 tools/minimal-demo-prod-scripts/README.md delete mode 100644 tools/minimal-demo-prod-scripts/catalog_values_demo_deployment.json delete mode 100644 tools/minimal-demo-prod-scripts/catalog_values_minimal_deployment.json delete mode 100644 tools/minimal-demo-prod-scripts/catalog_values_production_deployment.json delete mode 100755 tools/minimal-demo-prod-scripts/cp.sh delete mode 100755 tools/minimal-demo-prod-scripts/create_lsf_environment.sh delete mode 100755 tools/minimal-demo-prod-scripts/destroy.sh delete mode 100755 tools/minimal-demo-prod-scripts/jump.sh delete mode 100755 tools/minimal-demo-prod-scripts/show.sh delete mode 100755 tools/minimal-demo-prod-scripts/submit.sh diff --git a/.catalog-onboard-pipeline.yaml b/.catalog-onboard-pipeline.yaml index 78ac1103..4c9003c4 100644 --- a/.catalog-onboard-pipeline.yaml +++ b/.catalog-onboard-pipeline.yaml @@ -22,23 +22,3 @@ offerings: region: us-south # pre_validation: tests/scripts/pre-validation.sh # optionally run a command before validation runs # post_validation: tests/scripts/post-validation.sh # optionally run a command after validation completes -- name: deploy-arch-ibm-storage-scale # must match the offering name in the ibm_catalog.json - kind: solution - catalog_id: 90717ada-be34-4b82-a0d9-0f225f8dbd76 - offering_id: 33105573-84df-4279-9efa-48887456fa6d - # list all of the variations (flavors) you have included in the ibm_catalog.json - variations: - - name: Cluster-with-Scale - mark_ready: false # have pipeline mark as visible if validation passes - install_type: fullstack # ensure value matches what is in ibm_catalog.json (fullstack or extension) - destroy_resources_on_failure: false # defaults to false if not specified so resources can be inspected to debug failures during validation - destroy_workspace_on_failure: false # defaults to false if not specified so schematics workspace can be inspected to debug failures during validation - import_only: false # defaults to false - set to true if you do not want to do any validation, but be aware offering can't be publish if not validated - validation_rg: validation # the resource group in which to do validation in. Will be created if does not exist. If not specified, default value is 'validation' - # scc details needed if your offering is claiming any compliance controls - scc: - # must be an instance in the same account the validation is being done in - instance_id: 1c7d5f78-9262-44c3-b779-b28fe4d88c37 - region: us-south - # pre_validation: tests/scripts/pre-validation.sh # optionally run a command before validation runs - # post_validation: tests/scripts/post-validation.sh # optionally run a command after validation completes diff --git a/.secrets.baseline b/.secrets.baseline index 31f31b7c..e72e0cdb 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2025-09-29T12:05:07Z", + "generated_at": "2025-06-19T07:38:57Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -102,152 +102,6 @@ "type": "Secret Keyword", "verified_result": null } - ], - "samples/configs/scale_catalog_values.json": [ - { - "hashed_secret": "2e7ec5f9611439242fd8e30e3e13bc36baff526c", - "is_secret": true, - "is_verified": false, - "line_number": 10, - "type": "Secret Keyword", - "verified_result": null - }, - { - "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": true, - "is_verified": false, - "line_number": 45, - "type": "Secret Keyword", - "verified_result": null - }, - { - "hashed_secret": "b295b04949a98dc50ba65adcddd588077b93ab3c", - "is_secret": true, - "is_verified": false, - "line_number": 62, - "type": "Secret Keyword", - "verified_result": null - } - ], - "tools/minimal-demo-prod-scripts/README.md": [ - { - "hashed_secret": "89a6cfe2a229151e8055abee107d45ed087bbb4f", - "is_secret": true, - "is_verified": false, - "line_number": 28, - "type": "Secret Keyword", - "verified_result": null - }, - { - "hashed_secret": "365b78d42089abe4583503eded60fa7c1b3e1cd0", - "is_secret": true, - "is_verified": false, - "line_number": 47, - "type": "Secret Keyword", - "verified_result": null - } - ], - "tools/minimal-demo-prod-scripts/catalog_values_demo_deployment.json": [ - { - "hashed_secret": "216168000275f83a7bc3599e708c5bafab959783", - "is_secret": true, - "is_verified": false, - "line_number": 2, - "type": "Secret Keyword", - "verified_result": null - }, - { - "hashed_secret": "55aac310878e11ce14b29027f77e58cb7c4fe7a4", - "is_secret": true, - "is_verified": false, - "line_number": 8, - "type": "Secret Keyword", - "verified_result": null - }, - { - "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": true, - "is_verified": false, - "line_number": 56, - "type": "Secret Keyword", - "verified_result": null - }, - { - "hashed_secret": "b295b04949a98dc50ba65adcddd588077b93ab3c", - "is_secret": true, - "is_verified": false, - "line_number": 65, - "type": "Secret Keyword", - "verified_result": null - } - ], - "tools/minimal-demo-prod-scripts/catalog_values_minimal_deployment.json": [ - { - "hashed_secret": "216168000275f83a7bc3599e708c5bafab959783", - "is_secret": true, - "is_verified": false, - "line_number": 2, - "type": "Secret Keyword", - "verified_result": null - }, - { - "hashed_secret": "55aac310878e11ce14b29027f77e58cb7c4fe7a4", - "is_secret": true, - "is_verified": false, - "line_number": 8, - "type": "Secret Keyword", - "verified_result": null - }, - { - "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": true, - "is_verified": false, - "line_number": 56, - "type": "Secret Keyword", - "verified_result": null - }, - { - "hashed_secret": "b295b04949a98dc50ba65adcddd588077b93ab3c", - "is_secret": true, - "is_verified": false, - "line_number": 65, - "type": "Secret Keyword", - "verified_result": null - } - ], - "tools/minimal-demo-prod-scripts/catalog_values_production_deployment.json": [ - { - "hashed_secret": "216168000275f83a7bc3599e708c5bafab959783", - "is_secret": true, - "is_verified": false, - "line_number": 2, - "type": "Secret Keyword", - "verified_result": null - }, - { - "hashed_secret": "55aac310878e11ce14b29027f77e58cb7c4fe7a4", - "is_secret": true, - "is_verified": false, - "line_number": 8, - "type": "Secret Keyword", - "verified_result": null - }, - { - "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": true, - "is_verified": false, - "line_number": 56, - "type": "Secret Keyword", - "verified_result": null - }, - { - "hashed_secret": "b295b04949a98dc50ba65adcddd588077b93ab3c", - "is_secret": true, - "is_verified": false, - "line_number": 65, - "type": "Secret Keyword", - "verified_result": null - } ] }, "version": "0.13.1+ibm.62.dss", diff --git a/.tekton/hpcaas/hpcaas-pr-pipeline/hpcaas-pipeline-git-pr-status.yaml b/.tekton/hpcaas/hpcaas-pr-pipeline/hpcaas-pipeline-git-pr-status.yaml index c6b3d1b1..0bb19d65 100644 --- a/.tekton/hpcaas/hpcaas-pr-pipeline/hpcaas-pipeline-git-pr-status.yaml +++ b/.tekton/hpcaas/hpcaas-pr-pipeline/hpcaas-pipeline-git-pr-status.yaml @@ -61,7 +61,7 @@ spec: description: Name of the custom image that you want to use to create virtual server instances in your IBM Cloud account to deploy the IBM Cloud HPC cluster management nodes. By default, the solution uses a RHEL88 base image with additional software packages mentioned [here](https://cloud.ibm.com/docs/ibm-spectrum-lsf#create-custom-image). If you would like to include your application-specific binary files, follow the instructions in [ Planning for custom images ](https://cloud.ibm.com/docs/vpc?topic=vpc-planning-custom-images) to create your own custom image and use that to build the IBM Cloud HPC cluster through this offering. default: "" - name: cluster_name - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). diff --git a/.tekton/hpcaas/hpcaas-pr-pipeline/listener-git-pr-status.yaml b/.tekton/hpcaas/hpcaas-pr-pipeline/listener-git-pr-status.yaml index fae156ad..382702fb 100644 --- a/.tekton/hpcaas/hpcaas-pr-pipeline/listener-git-pr-status.yaml +++ b/.tekton/hpcaas/hpcaas-pr-pipeline/listener-git-pr-status.yaml @@ -56,7 +56,7 @@ spec: description: Name of the custom image that you want to use to create virtual server instances in your IBM Cloud account to deploy the IBM Cloud HPC cluster management nodes. By default, the solution uses a RHEL88 base image with additional software packages mentioned [here](https://cloud.ibm.com/docs/ibm-spectrum-lsf#create-custom-image). If you would like to include your application-specific binary files, follow the instructions in [ Planning for custom images ](https://cloud.ibm.com/docs/vpc?topic=vpc-planning-custom-images) to create your own custom image and use that to build the IBM Cloud HPC cluster through this offering. default: "" - name: cluster_name - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). @@ -65,7 +65,7 @@ spec: default: "" description: The IBM Cloud zone name within the selected region where the IBM Cloud HPC cluster should be deployed and requires a single zone input value. Supported zones are eu-de-2 and eu-de-3 for eu-de, us-east-1 and us-east-3 for us-east, and us-south-1 for us-south. The management nodes, file storage shares, and compute nodes will be deployed in the same zone.[Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-creating-a-vpc-in-a-different-region#get-zones-using-the-cli). - name: us_east_cluster_id - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: us_east_reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). @@ -74,7 +74,7 @@ spec: default: "" description: The IBM Cloud zone name within the selected region where the IBM Cloud HPC cluster should be deployed and requires a single zone input value. Supported zones are eu-de-2 and eu-de-3 for eu-de, us-east-1 and us-east-3 for us-east, and us-south-1 for us-south. The management nodes, file storage shares, and compute nodes will be deployed in the same zone.[Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-creating-a-vpc-in-a-different-region#get-zones-using-the-cli). - name: eu_de_cluster_id - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: eu_de_reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). @@ -83,7 +83,7 @@ spec: default: "" description: The IBM Cloud zone name within the selected region where the IBM Cloud HPC cluster should be deployed and requires a single zone input value. Supported zones are eu-de-2 and eu-de-3 for eu-de, us-east-1 and us-east-3 for us-east, and us-south-1 for us-south. The management nodes, file storage shares, and compute nodes will be deployed in the same zone.[Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-creating-a-vpc-in-a-different-region#get-zones-using-the-cli). - name: us_south_cluster_id - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: us_south_reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). diff --git a/.tekton/hpcaas/hpcaas-regression-pipeline/hpcaas-pipeline-git-trigger.yaml b/.tekton/hpcaas/hpcaas-regression-pipeline/hpcaas-pipeline-git-trigger.yaml index 14fcb446..5118a2da 100644 --- a/.tekton/hpcaas/hpcaas-regression-pipeline/hpcaas-pipeline-git-trigger.yaml +++ b/.tekton/hpcaas/hpcaas-regression-pipeline/hpcaas-pipeline-git-trigger.yaml @@ -63,7 +63,7 @@ spec: description: Name of the custom image that you want to use to create virtual server instances in your IBM Cloud account to deploy the IBM Cloud HPC cluster management nodes. By default, the solution uses a RHEL88 base image with additional software packages mentioned [here](https://cloud.ibm.com/docs/ibm-spectrum-lsf#create-custom-image). If you would like to include your application-specific binary files, follow the instructions in [ Planning for custom images ](https://cloud.ibm.com/docs/vpc?topic=vpc-planning-custom-images) to create your own custom image and use that to build the IBM Cloud HPC cluster through this offering. default: "" - name: cluster_name - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). @@ -72,7 +72,7 @@ spec: default: "" description: The IBM Cloud zone name within the selected region where the IBM Cloud HPC cluster should be deployed and requires a single zone input value. Supported zones are eu-de-2 and eu-de-3 for eu-de, us-east-1 and us-east-3 for us-east, and us-south-1 for us-south. The management nodes, file storage shares, and compute nodes will be deployed in the same zone.[Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-creating-a-vpc-in-a-different-region#get-zones-using-the-cli). - name: us_east_cluster_id - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: us_east_reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). @@ -81,7 +81,7 @@ spec: default: "" description: The IBM Cloud zone name within the selected region where the IBM Cloud HPC cluster should be deployed and requires a single zone input value. Supported zones are eu-de-2 and eu-de-3 for eu-de, us-east-1 and us-east-3 for us-east, and us-south-1 for us-south. The management nodes, file storage shares, and compute nodes will be deployed in the same zone.[Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-creating-a-vpc-in-a-different-region#get-zones-using-the-cli). - name: eu_de_cluster_id - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: eu_de_reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). @@ -90,7 +90,7 @@ spec: default: "" description: The IBM Cloud zone name within the selected region where the IBM Cloud HPC cluster should be deployed and requires a single zone input value. Supported zones are eu-de-2 and eu-de-3 for eu-de, us-east-1 and us-east-3 for us-east, and us-south-1 for us-south. The management nodes, file storage shares, and compute nodes will be deployed in the same zone.[Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-creating-a-vpc-in-a-different-region#get-zones-using-the-cli). - name: us_south_cluster_id - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: us_south_reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). diff --git a/.tekton/hpcaas/hpcaas-regression-pipeline/listener-git-trigger.yaml b/.tekton/hpcaas/hpcaas-regression-pipeline/listener-git-trigger.yaml index afe95a31..5b4ab0ad 100644 --- a/.tekton/hpcaas/hpcaas-regression-pipeline/listener-git-trigger.yaml +++ b/.tekton/hpcaas/hpcaas-regression-pipeline/listener-git-trigger.yaml @@ -63,7 +63,7 @@ spec: description: Name of the custom image that you want to use to create virtual server instances in your IBM Cloud account to deploy the IBM Cloud HPC cluster management nodes. By default, the solution uses a RHEL88 base image with additional software packages mentioned [here](https://cloud.ibm.com/docs/ibm-spectrum-lsf#create-custom-image). If you would like to include your application-specific binary files, follow the instructions in [ Planning for custom images ](https://cloud.ibm.com/docs/vpc?topic=vpc-planning-custom-images) to create your own custom image and use that to build the IBM Cloud HPC cluster through this offering. default: "" - name: cluster_name - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). @@ -72,7 +72,7 @@ spec: default: "" description: The IBM Cloud zone name within the selected region where the IBM Cloud HPC cluster should be deployed and requires a single zone input value. Supported zones are eu-de-2 and eu-de-3 for eu-de, us-east-1 and us-east-3 for us-east, and us-south-1 for us-south. The management nodes, file storage shares, and compute nodes will be deployed in the same zone.[Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-creating-a-vpc-in-a-different-region#get-zones-using-the-cli). - name: us_east_cluster_id - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: us_east_reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). @@ -81,7 +81,7 @@ spec: default: "" description: The IBM Cloud zone name within the selected region where the IBM Cloud HPC cluster should be deployed and requires a single zone input value. Supported zones are eu-de-2 and eu-de-3 for eu-de, us-east-1 and us-east-3 for us-east, and us-south-1 for us-south. The management nodes, file storage shares, and compute nodes will be deployed in the same zone.[Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-creating-a-vpc-in-a-different-region#get-zones-using-the-cli). - name: eu_de_cluster_id - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: eu_de_reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). @@ -90,7 +90,7 @@ spec: default: "" description: The IBM Cloud zone name within the selected region where the IBM Cloud HPC cluster should be deployed and requires a single zone input value. Supported zones are eu-de-2 and eu-de-3 for eu-de, us-east-1 and us-east-3 for us-east, and us-south-1 for us-south. The management nodes, file storage shares, and compute nodes will be deployed in the same zone.[Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-creating-a-vpc-in-a-different-region#get-zones-using-the-cli). - name: us_south_cluster_id - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: us_south_reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). diff --git a/.tekton/hpcaas/hpcaas_task/hpcaas-task-infra-rhel.yaml b/.tekton/hpcaas/hpcaas_task/hpcaas-task-infra-rhel.yaml index 690d7992..68af49c8 100644 --- a/.tekton/hpcaas/hpcaas_task/hpcaas-task-infra-rhel.yaml +++ b/.tekton/hpcaas/hpcaas_task/hpcaas-task-infra-rhel.yaml @@ -54,7 +54,7 @@ spec: description: Name of the custom image that you want to use to create virtual server instances in your IBM Cloud account to deploy the IBM Cloud HPC cluster login node. By default, the solution uses a RHEL 8-6 OS image with additional software packages mentioned [here](https://cloud.ibm.com/docs/hpc-spectrum-LSF#create-custom-image). The solution also offers, Ubuntu 22-04 OS base image (hpcaas-lsf10-ubuntu2204-compute-v2). If you would like to include your application-specific binary files, follow the instructions in [ Planning for custom images ](https://cloud.ibm.com/docs/vpc?topic=vpc-planning-custom-images) to create your own custom image and use that to build the IBM Cloud HPC cluster through this offering. default: "" - name: cluster_name - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). @@ -63,7 +63,7 @@ spec: default: "" description: The IBM Cloud zone name within the selected region where the IBM Cloud HPC cluster should be deployed and requires a single zone input value. Supported zones are eu-de-2 and eu-de-3 for eu-de, us-east-1 and us-east-3 for us-east, and us-south-1 for us-south. The management nodes, file storage shares, and compute nodes will be deployed in the same zone.[Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-creating-a-vpc-in-a-different-region#get-zones-using-the-cli). - name: us_east_cluster_id - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: us_east_reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). @@ -72,7 +72,7 @@ spec: default: "" description: The IBM Cloud zone name within the selected region where the IBM Cloud HPC cluster should be deployed and requires a single zone input value. Supported zones are eu-de-2 and eu-de-3 for eu-de, us-east-1 and us-east-3 for us-east, and us-south-1 for us-south. The management nodes, file storage shares, and compute nodes will be deployed in the same zone.[Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-creating-a-vpc-in-a-different-region#get-zones-using-the-cli). - name: eu_de_cluster_id - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: eu_de_reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). @@ -81,7 +81,7 @@ spec: default: "" description: The IBM Cloud zone name within the selected region where the IBM Cloud HPC cluster should be deployed and requires a single zone input value. Supported zones are eu-de-2 and eu-de-3 for eu-de, us-east-1 and us-east-3 for us-east, and us-south-1 for us-south. The management nodes, file storage shares, and compute nodes will be deployed in the same zone.[Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-creating-a-vpc-in-a-different-region#get-zones-using-the-cli). - name: us_south_cluster_id - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: us_south_reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). diff --git a/.tekton/hpcaas/hpcaas_task/hpcaas-task-negative.yaml b/.tekton/hpcaas/hpcaas_task/hpcaas-task-negative.yaml index 5a7e93e0..2bf4508d 100644 --- a/.tekton/hpcaas/hpcaas_task/hpcaas-task-negative.yaml +++ b/.tekton/hpcaas/hpcaas_task/hpcaas-task-negative.yaml @@ -54,7 +54,7 @@ spec: description: Name of the custom image that you want to use to create virtual server instances in your IBM Cloud account to deploy the IBM Cloud HPC cluster login node. By default, the solution uses a RHEL 8-6 OS image with additional software packages mentioned [here](https://cloud.ibm.com/docs/hpc-spectrum-LSF#create-custom-image). The solution also offers, Ubuntu 22-04 OS base image (hpcaas-lsf10-ubuntu2204-compute-v2). If you would like to include your application-specific binary files, follow the instructions in [ Planning for custom images ](https://cloud.ibm.com/docs/vpc?topic=vpc-planning-custom-images) to create your own custom image and use that to build the IBM Cloud HPC cluster through this offering. default: "" - name: cluster_name - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). @@ -63,7 +63,7 @@ spec: default: "" description: The IBM Cloud zone name within the selected region where the IBM Cloud HPC cluster should be deployed and requires a single zone input value. Supported zones are eu-de-2 and eu-de-3 for eu-de, us-east-1 and us-east-3 for us-east, and us-south-1 for us-south. The management nodes, file storage shares, and compute nodes will be deployed in the same zone.[Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-creating-a-vpc-in-a-different-region#get-zones-using-the-cli). - name: us_east_cluster_id - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: us_east_reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). @@ -72,7 +72,7 @@ spec: default: "" description: The IBM Cloud zone name within the selected region where the IBM Cloud HPC cluster should be deployed and requires a single zone input value. Supported zones are eu-de-2 and eu-de-3 for eu-de, us-east-1 and us-east-3 for us-east, and us-south-1 for us-south. The management nodes, file storage shares, and compute nodes will be deployed in the same zone.[Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-creating-a-vpc-in-a-different-region#get-zones-using-the-cli). - name: eu_de_cluster_id - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: eu_de_reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). @@ -81,7 +81,7 @@ spec: default: "" description: The IBM Cloud zone name within the selected region where the IBM Cloud HPC cluster should be deployed and requires a single zone input value. Supported zones are eu-de-2 and eu-de-3 for eu-de, us-east-1 and us-east-3 for us-east, and us-south-1 for us-south. The management nodes, file storage shares, and compute nodes will be deployed in the same zone.[Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-creating-a-vpc-in-a-different-region#get-zones-using-the-cli). - name: us_south_cluster_id - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: us_south_reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). diff --git a/.tekton/hpcaas/hpcaas_task/hpcaas-task-pr-rhel.yaml b/.tekton/hpcaas/hpcaas_task/hpcaas-task-pr-rhel.yaml index 409108dc..e1f1eaed 100644 --- a/.tekton/hpcaas/hpcaas_task/hpcaas-task-pr-rhel.yaml +++ b/.tekton/hpcaas/hpcaas_task/hpcaas-task-pr-rhel.yaml @@ -52,7 +52,7 @@ spec: description: Name of the custom image that you want to use to create virtual server instances in your IBM Cloud account to deploy the IBM Cloud HPC cluster login node. By default, the solution uses a RHEL 8-6 OS image with additional software packages mentioned [here](https://cloud.ibm.com/docs/hpc-spectrum-LSF#create-custom-image). The solution also offers, Ubuntu 22-04 OS base image (hpcaas-lsf10-ubuntu2204-compute-v2). If you would like to include your application-specific binary files, follow the instructions in [ Planning for custom images ](https://cloud.ibm.com/docs/vpc?topic=vpc-planning-custom-images) to create your own custom image and use that to build the IBM Cloud HPC cluster through this offering. default: "" - name: cluster_name - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). diff --git a/.tekton/hpcaas/hpcaas_task/hpcaas-task-pr-ubuntu.yaml b/.tekton/hpcaas/hpcaas_task/hpcaas-task-pr-ubuntu.yaml index 76cf702d..07d70594 100644 --- a/.tekton/hpcaas/hpcaas_task/hpcaas-task-pr-ubuntu.yaml +++ b/.tekton/hpcaas/hpcaas_task/hpcaas-task-pr-ubuntu.yaml @@ -52,7 +52,7 @@ spec: description: Name of the custom image that you want to use to create virtual server instances in your IBM Cloud account to deploy the IBM Cloud HPC cluster login node. By default, the solution uses a RHEL 8-6 OS image with additional software packages mentioned [here](https://cloud.ibm.com/docs/hpc-spectrum-LSF#create-custom-image). The solution also offers, Ubuntu 22-04 OS base image (hpcaas-lsf10-ubuntu2204-compute-v2). If you would like to include your application-specific binary files, follow the instructions in [ Planning for custom images ](https://cloud.ibm.com/docs/vpc?topic=vpc-planning-custom-images) to create your own custom image and use that to build the IBM Cloud HPC cluster through this offering. default: "" - name: cluster_name - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). diff --git a/.tekton/hpcaas/hpcaas_task/hpcaas-task-region.yaml b/.tekton/hpcaas/hpcaas_task/hpcaas-task-region.yaml index 7249ac05..2512fee9 100644 --- a/.tekton/hpcaas/hpcaas_task/hpcaas-task-region.yaml +++ b/.tekton/hpcaas/hpcaas_task/hpcaas-task-region.yaml @@ -54,7 +54,7 @@ spec: description: Name of the custom image that you want to use to create virtual server instances in your IBM Cloud account to deploy the IBM Cloud HPC cluster login node. By default, the solution uses a RHEL 8-6 OS image with additional software packages mentioned [here](https://cloud.ibm.com/docs/hpc-spectrum-LSF#create-custom-image). The solution also offers, Ubuntu 22-04 OS base image (hpcaas-lsf10-ubuntu2204-compute-v2). If you would like to include your application-specific binary files, follow the instructions in [ Planning for custom images ](https://cloud.ibm.com/docs/vpc?topic=vpc-planning-custom-images) to create your own custom image and use that to build the IBM Cloud HPC cluster through this offering. default: "" - name: cluster_name - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). @@ -63,7 +63,7 @@ spec: default: "" description: The IBM Cloud zone name within the selected region where the IBM Cloud HPC cluster should be deployed and requires a single zone input value. Supported zones are eu-de-2 and eu-de-3 for eu-de, us-east-1 and us-east-3 for us-east, and us-south-1 for us-south. The management nodes, file storage shares, and compute nodes will be deployed in the same zone.[Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-creating-a-vpc-in-a-different-region#get-zones-using-the-cli). - name: us_east_cluster_id - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: us_east_reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). @@ -72,7 +72,7 @@ spec: default: "" description: The IBM Cloud zone name within the selected region where the IBM Cloud HPC cluster should be deployed and requires a single zone input value. Supported zones are eu-de-2 and eu-de-3 for eu-de, us-east-1 and us-east-3 for us-east, and us-south-1 for us-south. The management nodes, file storage shares, and compute nodes will be deployed in the same zone.[Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-creating-a-vpc-in-a-different-region#get-zones-using-the-cli). - name: eu_de_cluster_id - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: eu_de_reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). @@ -81,7 +81,7 @@ spec: default: "" description: The IBM Cloud zone name within the selected region where the IBM Cloud HPC cluster should be deployed and requires a single zone input value. Supported zones are eu-de-2 and eu-de-3 for eu-de, us-east-1 and us-east-3 for us-east, and us-south-1 for us-south. The management nodes, file storage shares, and compute nodes will be deployed in the same zone.[Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-creating-a-vpc-in-a-different-region#get-zones-using-the-cli). - name: us_south_cluster_id - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: us_south_reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). diff --git a/.tekton/hpcaas/hpcaas_task/hpcaas-test-infra-ubuntu.yaml b/.tekton/hpcaas/hpcaas_task/hpcaas-test-infra-ubuntu.yaml index 89ae6514..f5b3c333 100644 --- a/.tekton/hpcaas/hpcaas_task/hpcaas-test-infra-ubuntu.yaml +++ b/.tekton/hpcaas/hpcaas_task/hpcaas-test-infra-ubuntu.yaml @@ -54,7 +54,7 @@ spec: description: Name of the custom image that you want to use to create virtual server instances in your IBM Cloud account to deploy the IBM Cloud HPC cluster login node. By default, the solution uses a RHEL 8-6 OS image with additional software packages mentioned [here](https://cloud.ibm.com/docs/hpc-spectrum-LSF#create-custom-image). The solution also offers, Ubuntu 22-04 OS base image (hpcaas-lsf10-ubuntu2204-compute-v2). If you would like to include your application-specific binary files, follow the instructions in [ Planning for custom images ](https://cloud.ibm.com/docs/vpc?topic=vpc-planning-custom-images) to create your own custom image and use that to build the IBM Cloud HPC cluster through this offering. default: "" - name: cluster_name - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). @@ -63,7 +63,7 @@ spec: default: "" description: The IBM Cloud zone name within the selected region where the IBM Cloud HPC cluster should be deployed and requires a single zone input value. Supported zones are eu-de-2 and eu-de-3 for eu-de, us-east-1 and us-east-3 for us-east, and us-south-1 for us-south. The management nodes, file storage shares, and compute nodes will be deployed in the same zone.[Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-creating-a-vpc-in-a-different-region#get-zones-using-the-cli). - name: us_east_cluster_id - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: us_east_reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). @@ -72,7 +72,7 @@ spec: default: "" description: The IBM Cloud zone name within the selected region where the IBM Cloud HPC cluster should be deployed and requires a single zone input value. Supported zones are eu-de-2 and eu-de-3 for eu-de, us-east-1 and us-east-3 for us-east, and us-south-1 for us-south. The management nodes, file storage shares, and compute nodes will be deployed in the same zone.[Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-creating-a-vpc-in-a-different-region#get-zones-using-the-cli). - name: eu_de_cluster_id - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: eu_de_reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). @@ -81,7 +81,7 @@ spec: default: "" description: The IBM Cloud zone name within the selected region where the IBM Cloud HPC cluster should be deployed and requires a single zone input value. Supported zones are eu-de-2 and eu-de-3 for eu-de, us-east-1 and us-east-3 for us-east, and us-south-1 for us-south. The management nodes, file storage shares, and compute nodes will be deployed in the same zone.[Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-creating-a-vpc-in-a-different-region#get-zones-using-the-cli). - name: us_south_cluster_id - description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifier for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. + description: Ensure that you have received the cluster ID from IBM technical sales. A unique identifer for HPC cluster used by IBM Cloud HPC to differentiate different HPC clusters within the same reservation. This can be up to 39 alphanumeric characters including the underscore (_), the hyphen (-), and the period (.) characters. You cannot change the cluster ID after deployment. default: "" - name: us_south_reservation_id description: Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_). diff --git a/.tekton/scripts/cos_data.py b/.tekton/scripts/cos_data.py index 2e2b5778..8bd063c0 100644 --- a/.tekton/scripts/cos_data.py +++ b/.tekton/scripts/cos_data.py @@ -7,6 +7,7 @@ class DownloadFromCOS: + def upload_file(self, bucket_name, file_path, filename): print(f"-- working on file {filename}") try: @@ -15,10 +16,10 @@ def upload_file(self, bucket_name, file_path, filename): ) print(f"--- {filename} successfully uploaded in {file_path}!") except ClientError as be: - print(f"[CLIENT ERROR]: {be}\n") + print("[CLIENT ERROR]: {0}\n".format(be)) self.return_code += 1 except Exception as e: - print(f"[CLIENT ERROR] Unable to upload file to COS: {e}") + print("[CLIENT ERROR] Unable to upload file to COS: {0}".format(e)) self.return_code += 1 def upload_multiple_files(self, FILE_NAME_FULLPATH, bucket_name, file_path): @@ -34,10 +35,10 @@ def download_file(self, bucket_name, filename): ) print(f"--- {filename} successfully downloaded!") except ClientError as be: - print(f"[CLIENT ERROR]: {be}\n") + print("[CLIENT ERROR]: {0}\n".format(be)) self.return_code += 1 except Exception as e: - print(f"[CLIENT ERROR] Unable to download file from COS: {e}") + print("[CLIENT ERROR] Unable to download file from COS: {0}".format(e)) self.return_code += 1 def delete_file(self, bucket_name, filename): @@ -46,10 +47,10 @@ def delete_file(self, bucket_name, filename): self.client.delete_object(Bucket=bucket_name, Key=filename) print(f"--- {filename} successfully deleted!") except ClientError as be: - print(f"[CLIENT ERROR]: {be}\n") + print("[CLIENT ERROR]: {0}\n".format(be)) self.return_code += 1 except Exception as e: - print(f"[CLIENT ERROR] Unable to download file from COS: {e}") + print("[CLIENT ERROR] Unable to download file from COS: {0}".format(e)) self.return_code += 1 def main(self): diff --git a/.tekton/scripts/ssh_create_delete.sh b/.tekton/scripts/ssh_create_delete.sh index 690b45ec..dea78aab 100644 --- a/.tekton/scripts/ssh_create_delete.sh +++ b/.tekton/scripts/ssh_create_delete.sh @@ -57,8 +57,8 @@ ssh_key_create() { for region in "${REGIONS[@]}"; do disable_update_check=$(eval "ibmcloud config --check-version=false") echo "$disable_update_check" - authenticate=$(eval "ibmcloud login --apikey $API_KEY -r $region") - if [[ $authenticate = *OK* ]]; then + auhtenticate=$(eval "ibmcloud login --apikey $API_KEY -r $region") + if [[ $auhtenticate = *OK* ]]; then echo "************SSH-KEY creation process in $region ************" check_key=$(eval "ibmcloud is keys | grep $CICD_SSH_KEY | awk '{print $2}'") if [[ -z "$check_key" ]]; then @@ -89,7 +89,7 @@ ssh_key_create() { fi echo "************SSH-KEY create process in $region done ************" else - echo "Issue Login with IBMCLOUD $authenticate" + echo "Issue Login with IBMCLOUD $auhtenticate" exit 1 fi done @@ -102,8 +102,8 @@ ssh_key_delete() { for region in "${REGIONS[@]}"; do disable_update_check=$(eval "ibmcloud config --check-version=false") echo "$disable_update_check" - authenticate=$(eval "ibmcloud login --apikey $API_KEY -r $region") - if [[ $authenticate = *OK* ]]; then + auhtenticate=$(eval "ibmcloud login --apikey $API_KEY -r $region") + if [[ $auhtenticate = *OK* ]]; then echo "************SSH-KEY deletion process in $region ************" ssh_key_delete=$(eval "ibmcloud is key-delete $CICD_SSH_KEY -f") if [[ $ssh_key_delete = *deleted* ]]; then @@ -113,7 +113,7 @@ ssh_key_delete() { fi echo "************SSH-KEY delete process in $region done ************" else - echo "Issue Login with IBMCLOUD $authenticate" + echo "Issue Login with IBMCLOUD $auhtenticate" exit 1 fi done diff --git a/.tekton/task-clone.yaml b/.tekton/task-clone.yaml index 94bfb594..5579c73c 100644 --- a/.tekton/task-clone.yaml +++ b/.tekton/task-clone.yaml @@ -294,7 +294,7 @@ spec: echo "Clone was not successful. Code $_clone_code - Retrying shortly..." sleep 10 if [ $_retry_counter -eq $_max_retry ]; then - set -e # reset on the last attempt so we fail if all attempts fail + set -e # reset on the last attempt so we fail if all attemps fail fi eval "$_clone_command" _clone_code=$? diff --git a/.tekton/task-set-commit-status.yaml b/.tekton/task-set-commit-status.yaml index dd202b94..b6f6cbd8 100644 --- a/.tekton/task-set-commit-status.yaml +++ b/.tekton/task-set-commit-status.yaml @@ -226,7 +226,7 @@ spec: import urllib.request import urllib.parse - # extract the previously properties found in previous step + # extract the previouly properties found in previous step previous_step={} if os.environ["PIPELINE_DEBUG"] == "1": print("previous step properties:") @@ -257,7 +257,7 @@ spec: print("Looking for state in $(params.build-properties)") state = build["$(params.state-var)"] - # If state is one of PipelineRun Task execution status convert it to a generic state one + # If state is one of PipelineRun Taks execution status convert it to a generic state one if state == "Succeeded": state = "success" elif state == "Failed": diff --git a/CHANGELOG.md b/CHANGELOG.md index 0e83f76c..2bc6b412 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,7 +13,7 @@ - Support for creation of New resource groups. ## **1.3.1** -- Bug Fixes for the support of ldap users to access Application centre URL. +- Bug Fixes for the support of ldap users to access Aplication centre URL. ## **1.3.0** - Support for dedicate LSF login client node to monitor/manage LSF cluster. diff --git a/LSF-DA-Architecture.drawio.svg b/LSF-DA-Architecture.drawio.svg deleted file mode 100644 index c66ff795..00000000 --- a/LSF-DA-Architecture.drawio.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - -LoginSubnetDeployer nodeLogin node
Bastion node
Virtual ServerDynamic Compute nodesPublicNetworkInternetUser
SSH
Floating IPVPN Gateway(optional)VPCRegionAvailability ZoneIBM Cloud

LDAP
sever
Public GatewayIBM StorageScale(Optional)VPCAPIEndpointFile StorageClusterSubnet                      LSF Management Nodes -                  v10.1.0.14/                 v10.1.0.15Virtual Server Static Compute nodesCluster SGLogin SGCloud ServicesKey Protect(Optional)SecretsManager(Optional)IBM Cloud Logs(Optional)COS(Optional)IBM CloudMonitoring(Optional)DNS ServicesSecurity and ComplianceWorkload Protection (Optional)VPCFlowLogs(Optional)
SSH Tunnel
SSH Tunnel
\ No newline at end of file diff --git a/README.md b/README.md index e0903fac..a6dea173 100644 --- a/README.md +++ b/README.md @@ -115,7 +115,7 @@ ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ServerAliveIn ``` * where `floating_IP_address` is the floating IP address for the bastion node and `LDAP_server_IP` is the IP address for the OpenLDAP node. -* Verify the LDAP service status: +* Verifiy the LDAP service status: ``` systemctl status slapd diff --git a/Scale-DA-Architecture.drawio.svg b/Scale-DA-Architecture.drawio.svg deleted file mode 100644 index a4e0e26e..00000000 --- a/Scale-DA-Architecture.drawio.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - -IBM CloudRegionAvailability Zone
Storage subnet:ACL
VPC  Public GatewayFloating IPBastion SGBastion nodeDeployer SGDeployer node  GKLM SGGKLM node
Storage nodes
Storage SG
NFS mount
    LDAP SGLDAP node
Compute nodes
Compute SG
Login subnet:ACL
Client subnet:ACL
Client nodes
Client SG
Protocol subnet:ACL
Protocol nodes
Storage SG
Compute subnet:ACL
GPFS mount
Cloud ServicesObject Storage(Optional)Secrets ManagerIBM CloudMonitoring(Optional)IBM CloudLogs(Optional)Key ProtectSecurity and Compliance WorkloadProtection(Optional)
PublicNetworkInternetUser
SSH
VPCFlow LogsDNS Services
\ No newline at end of file diff --git a/cra-config.yaml b/cra-config.yaml index 466f667f..3e617358 100644 --- a/cra-config.yaml +++ b/cra-config.yaml @@ -7,13 +7,4 @@ CRA_TARGETS: CRA_ENVIRONMENT_VARIABLES: TF_VAR_ssh_keys: "[\"geretain-hpc\"]" TF_VAR_remote_allowed_ips: "[\"49.207.216.50\"]" - TF_VAR_app_center_gui_password: "Craconfig@12345" #pragma: allowlist secret - - CRA_TARGET: "solutions/scale" - CRA_IGNORE_RULES_FILE: "cra-tf-validate-ignore-rules.json" - PROFILE_ID: "48279384-3d29-4089-8259-8ed354774b4a" # SCC profile ID (currently set to 'CIS IBM Cloud Foundations Benchmark v1.1.0' '1.1.0' profile). - CRA_ENVIRONMENT_VARIABLES: - TF_VAR_ssh_keys: "[\"geretain-hpc\"]" - TF_VAR_remote_allowed_ips: "[\"49.207.216.50\"]" - TF_VAR_storage_gui_username: "storagegui" - TF_VAR_storage_type: "evaluation" - TF_VAR_storage_gui_password: "Stor@ge1234" #pragma: allowlist secret + TF_VAR_app_center_gui_password: "Craconfig@123" #pragma: allowlist secret diff --git a/datasource.tf b/datasource.tf index 23de1886..7953cf00 100644 --- a/datasource.tf +++ b/datasource.tf @@ -33,25 +33,24 @@ data "ibm_is_subnet" "subnet" { # name = var.existing_resource_group # } -data "ibm_is_subnet" "existing_compute_subnets" { - count = var.vpc_name != null && var.compute_subnet_id != null ? 1 : 0 - identifier = var.compute_subnet_id +data "ibm_is_subnet" "existing_cluster_subnets" { + count = var.vpc_name != null && var.cluster_subnet_id != null ? 1 : 0 + identifier = var.cluster_subnet_id } - data "ibm_is_subnet" "existing_storage_subnets" { - count = var.vpc_name != null && var.storage_subnet_id != null ? 1 : 0 - identifier = var.storage_subnet_id + count = var.vpc_name != null && var.storage_subnets != null ? 1 : 0 + name = var.storage_subnets[count.index] } data "ibm_is_subnet" "existing_protocol_subnets" { - count = var.vpc_name != null && var.protocol_subnet_id != null ? 1 : 0 - identifier = var.protocol_subnet_id + count = var.vpc_name != null && var.protocol_subnets != null ? 1 : 0 + name = var.protocol_subnets[count.index] } data "ibm_is_subnet" "existing_client_subnets" { - count = var.vpc_name != null && var.client_subnet_id != null ? 1 : 0 - identifier = var.client_subnet_id + count = var.vpc_name != null && var.client_subnets != null ? 1 : 0 + name = var.client_subnets[count.index] } data "ibm_is_subnet" "existing_login_subnets" { @@ -65,8 +64,8 @@ data "ibm_is_ssh_key" "ssh_keys" { } data "ibm_is_subnet" "compute_subnet_crn" { - count = var.vpc_name != null && var.compute_subnet_id != null ? 1 : 0 - identifier = local.compute_subnet + count = var.vpc_name != null && var.cluster_subnet_id != null ? 1 : 0 + identifier = local.compute_subnet_id } data "ibm_is_instance_profile" "compute_profile" { @@ -78,7 +77,7 @@ data "ibm_is_instance_profile" "storage_profile" { } data "ibm_is_bare_metal_server_profile" "storage_bms_profile" { - count = var.scheduler == "Scale" && var.storage_type == "persistent" ? 1 : 0 + count = var.scheduler == "Scale" ? 1 : 0 name = local.storage_bms_profile[0] } @@ -91,27 +90,12 @@ data "ibm_is_instance_profile" "protocol_profile" { name = local.protocol_vsi_profile[0] } -data "ibm_is_bare_metal_server_profile" "protocol_bm_profile" { - count = local.ces_server_type == true && (local.scale_ces_enabled == true && var.colocate_protocol_instances == false) ? 1 : 0 - name = local.protocol_vsi_profile[0] -} - data "ibm_is_subnet_reserved_ips" "protocol_subnet_reserved_ips" { - count = var.enable_deployer == false && local.scale_ces_enabled == true ? 1 : 0 - subnet = local.protocol_subnet + count = local.scale_ces_enabled == true ? 1 : 0 + subnet = local.protocol_subnet_id } data "ibm_is_instance_profile" "afm_server_profile" { count = local.afm_server_type == false ? 1 : 0 name = local.afm_vsi_profile[0] } - -data "ibm_is_bare_metal_server_profile" "afm_bm_profile" { - count = local.afm_server_type == true ? 1 : 0 - name = local.afm_vsi_profile[0] -} - -data "ibm_is_security_group" "login_security_group" { - count = var.login_security_group_name != null ? 1 : 0 - name = var.login_security_group_name -} diff --git a/ibm_catalog.json b/ibm_catalog.json index 0a36d3df..8bd5af3e 100644 --- a/ibm_catalog.json +++ b/ibm_catalog.json @@ -5,15 +5,16 @@ "label": "IBM Spectrum LSF", "product_kind": "solution", "tags": [ + "ibm_created", + "target_terraform", + "terraform", + "solution", "Deployable Architecture", "DA", "LSF", "Spectrum LSF", - "ibm_created", - "target_terraform", - "terraform", "reference_architecture", - "solution" + "converged_infra" ], "keywords": [ "LSF", @@ -47,7 +48,6 @@ "name": "Cluster-with-LSF", "install_type": "fullstack", "working_directory": "solutions/lsf", - "terraform_version": "1.10.5", "compliance": { "authority": "scc-v3", "profiles": [ @@ -216,7 +216,7 @@ ] }, { - "displayname": "Sydney 3", + "displayname": "Syndney 3", "value": [ "au-syd-3" ] @@ -256,42 +256,6 @@ "value": [ "br-sao-3" ] - }, - { - "displayname": "Montreal 1", - "value": [ - "ca-mon-1" - ] - }, - { - "displayname": "Montreal 2", - "value": [ - "ca-mon-2" - ] - }, - { - "displayname": "Montreal 3", - "value": [ - "ca-mon-3" - ] - }, - { - "displayname": "Madrid 1", - "value": [ - "eu-es-1" - ] - }, - { - "displayname": "Madrid 2", - "value": [ - "eu-es-2" - ] - }, - { - "displayname": "Madrid 3", - "value": [ - "eu-es-3" - ] } ] }, @@ -414,6 +378,10 @@ "displayname": "basic", "value": "basic" }, + { + "displayname": "lite", + "value": "lite" + }, { "displayname": "standardv2", "value": "standardv2" @@ -465,7 +433,7 @@ "key": "vpc_name" }, { - "key": "compute_subnet_id" + "key": "cluster_subnet_id" }, { "key": "login_subnet_id" @@ -603,7 +571,7 @@ { "key": "ldap_instance", "type": "array", - "default_value": "[\n {\n \"profile\": \"cx2-2x4\",\n \"image\": \"ibm-ubuntu-22-04-5-minimal-amd64-5\"\n }\n]", + "default_value": "[\n {\n \"profile\": \"cx2-2x4\",\n \"image\": \"ibm-ubuntu-22-04-5-minimal-amd64-3\"\n }\n]", "required": false, "custom_config": { "type": "json_editor", @@ -688,6 +656,12 @@ "crn:v1:bluemix:public:iam::::role:Administrator" ] }, + { + "service_name": "is.share", + "role_crns": [ + "crn:v1:bluemix:public:iam::::role:Editor" + ] + }, { "service_name": "All Identity and Access enabled services", "role_crns": [ @@ -712,10 +686,16 @@ "role_crns": [ "crn:v1:bluemix:public:iam::::role:Editor" ], - "service_name": "VPC Infrastructure Services" + "service_name": "is.vpc" }, { - "service_name": "Cloud Monitoring", + "service_name": "is.flow-log-collector", + "role_crns": [ + "crn:v1:bluemix:public:iam::::role:Editor" + ] + }, + { + "service_name": "sysdig-monitor", "role_crns": [ "crn:v1:bluemix:public:iam::::serviceRole:Manager", "crn:v1:bluemix:public:iam::::role:Administrator" @@ -762,7 +742,7 @@ { "diagram": { "caption": "IBM Spectrum LSF", - "url": "https://raw.githubusercontent.com/terraform-ibm-modules/terraform-ibm-hpc/main/LSF-DA-Architecture.drawio.svg", + "url": "https://raw.githubusercontent.com/terraform-ibm-modules/terraform-ibm-hpc/main/LSF_DA_New.drawio.svg", "type": "image/svg+xml" }, "description": "This deployable architecture sets up a VPC on IBM Cloud to run HPC workloads within a single zone. A login node is deployed in a dedicated subnet and security group to facilitate secure access to the HPC environment. The HPC management nodes and static compute nodes reside in a separate subnet and security group.\nClusters of virtual server instances are provisioned with the IBM Spectrum LSF scheduler pre-installed for HPC workload job management. The LSF scheduler dynamically provisions compute nodes as needed and removes them once jobs are completed.\nThe solution supports either IBM Cloud File Storage for VPC or a dedicated clustered shared file system using IBM Storage Scale which is a high performance, highly available, clustered file system with advanced features like File Audit Logging for security and Active File Management for hybrid cloud connectivity. IBM Storage Scale provides more performance and scalability than standard file storage solutions." @@ -771,847 +751,6 @@ } } ] - }, - { - "name": "deploy-arch-ibm-storage-scale", - "label": "IBM Storage Scale", - "product_kind": "solution", - "tags": [ - "Deployable Architecture", - "DA", - "HPC", - "IBM Storage Scale", - "ibm_created", - "target_terraform", - "terraform", - "reference_architecture", - "solution" - ], - "keywords": [ - "scale", - "vpc", - "DA", - "Deployable Architecture", - "terraform", - "solution" - ], - "short_description": "Deploy your HPC cluster with IBM Storage Scale for high performance, highly available, clustered file system.", - "long_description": "**Before you begin deploying IBM Storage Scale, make sure that you meet the prerequisites listed in [the step-by-step guide](https://cloud.ibm.com/docs/allowlist/hpc-service?topic=hpc-service-before-you-begin-deploying).**\n\nIBM Storage Scale is a deployable architecture where you can deploy dedicated Storage Scale cluster. Storage Scale supports the configuration of both compute and storage nodes, allowing you to build a complete, end-to-end Storage cluster.", - "provider_name": "IBM", - "offering_docs_url": "https://cloud.ibm.com/docs/allowlist/hpc-service?topic=hpc-service-overview", - "features": [ - { - "title": "Cluster file system", - "description": "IBM Storage scale is clustered file system that provides concurrent access to a single file system or set of file systems from multiple nodes. It's designed for high-performance, scalable storage solutions, particularly for I/O-intensive applications and large-scale data environments." - }, - { - "title": "Data intensive workflows", - "description": "Create a cluster with IBM Storage Scale with its high-performance, distributed file system, IBM Storage Scale accelerates data-intensive workflows by providing fast, concurrent access to massive datasets." - }, - { - "title": "Unified storage for files and objects", - "description": "Unified storage allows simultaneous access to the same data as both files and objects, simplifying data management and enabling seamless workflows across traditional and cloud-native applications." - } - ], - "flavors": [ - { - "label": "IBM Storage Scale", - "name": "Cluster-with-Scale", - "install_type": "fullstack", - "working_directory": "solutions/scale", - "terraform_version": "1.10.5", - "compliance": { - "authority": "scc-v3", - "profiles": [ - { - "profile_name": "IBM Cloud Framework for Financial Services", - "profile_version": "1.5.0" - } - ] - }, - "release_notes_url": "https://cloud.ibm.com/docs/allowlist/hpc-service?topic=hpc-service-release-notes", - "configuration": [ - { - "key": "ibmcloud_api_key", - "required": true - }, - { - "key": "zones", - "required": true, - "default_value": [ - "us-east-1" - ], - "options": [ - { - "displayname": "Washington DC 1", - "value": [ - "us-east-1" - ] - }, - { - "displayname": "Washington DC 2", - "value": [ - "us-east-2" - ] - }, - { - "displayname": "Washington DC 3", - "value": [ - "us-east-3" - ] - }, - { - "displayname": "Frankfurt 1", - "value": [ - "eu-de-1" - ] - }, - { - "displayname": "Frankfurt 2", - "value": [ - "eu-de-2" - ] - }, - { - "displayname": "Frankfurt 3", - "value": [ - "eu-de-3" - ] - }, - { - "displayname": "Dallas 1", - "value": [ - "us-south-1" - ] - }, - { - "displayname": "Dallas 2", - "value": [ - "us-south-2" - ] - }, - { - "displayname": "Dallas 3", - "value": [ - "us-south-3" - ] - }, - { - "displayname": "Toronto 1", - "value": [ - "ca-tor-1" - ] - }, - { - "displayname": "Toronto 2", - "value": [ - "ca-tor-2" - ] - }, - { - "displayname": "Toronto 3", - "value": [ - "ca-tor-3" - ] - }, - { - "displayname": "Tokyo 1", - "value": [ - "jp-tok-1" - ] - }, - { - "displayname": "Tokyo 2", - "value": [ - "jp-tok-2" - ] - }, - { - "displayname": "Tokyo 3", - "value": [ - "jp-tok-3" - ] - }, - { - "displayname": "London 1", - "value": [ - "eu-gb-1" - ] - }, - { - "displayname": "London 2", - "value": [ - "eu-gb-2" - ] - }, - { - "displayname": "London 3", - "value": [ - "eu-gb-3" - ] - }, - { - "displayname": "Sydney 1", - "value": [ - "au-syd-1" - ] - }, - { - "displayname": "Sydney 2", - "value": [ - "au-syd-2" - ] - }, - { - "displayname": "Sydney 3", - "value": [ - "au-syd-3" - ] - }, - { - "displayname": "Osaka 1", - "value": [ - "jp-osa-1" - ] - }, - { - "displayname": "Osaka 2", - "value": [ - "jp-osa-2" - ] - }, - { - "displayname": "Osaka 3", - "value": [ - "jp-osa-3" - ] - }, - { - "displayname": "Sao Paulo 1", - "value": [ - "br-sao-1" - ] - }, - { - "displayname": "Sao Paulo 2", - "value": [ - "br-sao-2" - ] - }, - { - "displayname": "Sao Paulo 3", - "value": [ - "br-sao-3" - ] - }, - { - "displayname": "Montreal 1", - "value": [ - "ca-mon-1" - ] - }, - { - "displayname": "Montreal 2", - "value": [ - "ca-mon-2" - ] - }, - { - "displayname": "Montreal 3", - "value": [ - "ca-mon-3" - ] - }, - { - "displayname": "Madrid 1", - "value": [ - "eu-es-1" - ] - }, - { - "displayname": "Madrid 2", - "value": [ - "eu-es-2" - ] - }, - { - "displayname": "Madrid 3", - "value": [ - "eu-es-3" - ] - } - ] - }, - { - "key": "ssh_keys", - "type": "array", - "required": true, - "custom_config": { - "type": "vpc_ssh_key", - "grouping": "deployment", - "original_grouping": "deployment", - "config_constraints": { - "selection": "multi_select", - "valueType": "name" - } - } - }, - { - "key": "remote_allowed_ips", - "required": true - }, - { - "key": "storage_gui_username", - "required": true - }, - { - "key": "storage_gui_password", - "required": true - }, - { - "key": "existing_resource_group", - "required": true - }, - { - "key": "cluster_prefix" - }, - { - "key": "storage_type", - "default_value": "scratch", - "options": [ - { - "displayname": "scratch", - "value": "scratch" - }, - { - "displayname": "persistent", - "value": "persistent" - }, - { - "displayname": "evaluation", - "value": "evaluation" - } - ] - }, - { - "key": "ibm_customer_number" - }, - { - "key": "vpc_cidr" - }, - { - "key": "login_subnets_cidr" - }, - { - "key": "compute_subnets_cidr" - }, - { - "key": "storage_subnets_cidr" - }, - { - "key": "client_subnets_cidr" - }, - { - "key": "protocol_subnets_cidr" - }, - { - "key": "enable_vpc_flow_logs" - }, - { - "key": "skip_flowlogs_s2s_auth_policy" - }, - { - "key": "vpc_name" - }, - { - "key": "login_subnet_id" - }, - { - "key": "compute_subnet_id" - }, - { - "key": "storage_subnet_id" - }, - { - "key": "protocol_subnet_id" - }, - { - "key": "client_subnet_id" - }, - { - "key": "enable_sg_validation" - }, - { - "key": "login_security_group_name" - }, - { - "key": "storage_security_group_name" - }, - { - "key": "compute_security_group_name" - }, - { - "key": "client_security_group_name" - }, - { - "key": "gklm_security_group_name" - }, - { - "key": "ldap_security_group_name" - }, - { - "key": "dns_domain_names" - }, - { - "key": "dns_instance_id" - }, - { - "key": "dns_custom_resolver_id" - }, - { - "key": "scale_management_vsi_profile" - }, - { - "key": "bastion_instance", - "type": "object", - "default_value": "{\n \"image\": \"ibm-ubuntu-22-04-5-minimal-amd64-5\",\n \"profile\": \"cx2-4x8\"\n}", - "required": false, - "custom_config": { - "type": "json_editor", - "grouping": "deployment", - "original_grouping": "deployment", - "config_constraints": { - "type": "mixed" - } - } - }, - { - "key": "deployer_instance", - "type": "object", - "default_value": "{\n \"image\": \"hpcc-scale-deployer-v1\",\n \"profile\": \"bx2-8x32\"\n}", - "required": false, - "custom_config": { - "type": "json_editor", - "grouping": "deployment", - "original_grouping": "deployment", - "config_constraints": { - "type": "mixed" - } - } - }, - { - "key": "client_instances", - "type": "object", - "default_value": "[\n {\n \"count\": 2,\n \"profile\": \"cx2-2x4\",\n \"image\": \"ibm-redhat-8-10-minimal-amd64-6\"\n }\n]", - "required": false, - "custom_config": { - "type": "json_editor", - "grouping": "deployment", - "original_grouping": "deployment", - "config_constraints": { - "type": "mixed" - } - } - }, - { - "key": "compute_gui_username" - }, - { - "key": "compute_gui_password" - }, - { - "key": "compute_instances", - "type": "object", - "default_value": "[\n {\n \"count\": 0,\n \"profile\": \"bx2-2x8\",\n \"filesystem\": \"/gpfs/fs1\",\n \"image\": \"hpcc-scale5232-rhel810-v1\"\n }\n]", - "required": false, - "custom_config": { - "type": "json_editor", - "grouping": "deployment", - "original_grouping": "deployment", - "config_constraints": { - "type": "mixed" - } - } - }, - { - "key": "storage_instances", - "type": "object", - "default_value": "[\n {\n \"count\": 2,\n \"profile\": \"bx2d-32x128\",\n \"filesystem\": \"/gpfs/fs1\",\n \"image\": \"hpcc-scale5232-rhel810-v1\"\n }\n]", - "required": false, - "custom_config": { - "type": "json_editor", - "grouping": "deployment", - "original_grouping": "deployment", - "config_constraints": { - "type": "mixed" - } - } - }, - { - "key": "storage_baremetal_server", - "type": "object", - "default_value": "[\n {\n \"count\": 0,\n \"profile\": \"cx2d-metal-96x192\",\n \"filesystem\": \"/gpfs/fs1\",\n \"image\": \"hpcc-scale5232-rhel810-v1\"\n }\n]", - "required": false, - "custom_config": { - "type": "json_editor", - "grouping": "deployment", - "original_grouping": "deployment", - "config_constraints": { - "type": "mixed" - } - } - }, - { - "key": "bms_boot_drive_encryption" - }, - { - "key": "tie_breaker_baremetal_server_profile" - }, - { - "key": "afm_instances", - "type": "object", - "default_value": "[\n {\n \"count\": 0,\n \"profile\": \"bx2-32x128\"\n }\n]", - "required": false, - "custom_config": { - "type": "json_editor", - "grouping": "deployment", - "original_grouping": "deployment", - "config_constraints": { - "type": "mixed" - } - } - }, - { - "key": "afm_cos_config", - "type": "object", - "default_value": "[\n {\n \"afm_fileset\": \"afm_fileset\",\n \"mode\": \"iw\",\n \"cos_instance\": \"\",\n \"bucket_name\": \"\",\n \"bucket_region\": \"us-south\",\n \"cos_service_cred_key\": \"\",\n \"bucket_storage_class\": \"smart\",\n \"bucket_type\": \"region_location\"\n }\n]", - "required": false, - "custom_config": { - "type": "json_editor", - "grouping": "deployment", - "original_grouping": "deployment", - "config_constraints": { - "type": "mixed" - } - } - }, - { - "key": "protocol_instances", - "type": "object", - "default_value": "[\n {\n \"count\": 2,\n \"profile\": \"cx2-32x64\"\n }\n]", - "required": false, - "custom_config": { - "type": "json_editor", - "grouping": "deployment", - "original_grouping": "deployment", - "config_constraints": { - "type": "mixed" - } - } - }, - { - "key": "colocate_protocol_instances" - }, - { - "key": "scale_encryption_enabled" - }, - { - "key": "key_protect_instance_id" - }, - { - "key": "scale_encryption_type", - "default_value": "null", - "options": [ - { - "displayname": "null", - "value": "null" - }, - { - "displayname": "key_protect", - "value": "key_protect" - }, - { - "displayname": "gklm", - "value": "gklm" - } - ] - }, - { - "key": "gklm_instances", - "type": "object", - "default_value": "[\n {\n \"count\": 2,\n \"profile\": \"bx2-2x8\",\n \"image\": \"hpcc-scale-gklm4202-v2-5-3\"\n }\n]", - "required": false, - "custom_config": { - "type": "json_editor", - "grouping": "deployment", - "original_grouping": "deployment", - "config_constraints": { - "type": "mixed" - } - } - }, - { - "key": "scale_encryption_admin_password" - }, - { - "key": "filesystem_config", - "type": "object", - "default_value": "[\n {\n \"filesystem\": \"/gpfs/fs1\",\n \"block_size\": \"4M\",\n \"default_data_replica\": 2,\n \"default_metadata_replica\": 2,\n \"max_data_replica\": 3,\n \"max_metadata_replica\": 3 }\n]", - "required": false, - "custom_config": { - "type": "json_editor", - "grouping": "deployment", - "original_grouping": "deployment", - "config_constraints": { - "type": "mixed" - } - } - }, - { - "key": "filesets_config", - "type": "object", - "default_value": "[\n {\n \"client_mount_path\": \"/mnt/scale/tools\",\n \"quota\": 0\n },\n {\n \"client_mount_path\": \"/mnt/scale/data\",\n \"quota\": 0\n }\n]", - "required": false, - "custom_config": { - "type": "json_editor", - "grouping": "deployment", - "original_grouping": "deployment", - "config_constraints": { - "type": "mixed" - } - } - }, - { - "key": "enable_cos_integration" - }, - { - "key": "cos_instance_name" - }, - { - "key": "enable_ldap" - }, - { - "key": "ldap_basedns" - }, - { - "key": "ldap_server" - }, - { - "key": "ldap_admin_password" - }, - { - "key": "ldap_user_name" - }, - { - "key": "ldap_user_password" - }, - { - "key": "ldap_instance", - "type": "array", - "default_value": "[\n {\n \"profile\": \"cx2-2x4\",\n \"image\": \"ibm-ubuntu-22-04-5-minimal-amd64-5\"\n }\n]", - "required": false, - "custom_config": { - "type": "json_editor", - "grouping": "deployment", - "original_grouping": "deployment", - "config_constraints": { - "type": "mixed" - } - } - }, - { - "key": "ldap_server_cert" - }, - { - "key": "observability_atracker_enable" - }, - { - "key": "observability_atracker_target_type" - }, - { - "key": "sccwp_enable" - }, - { - "key": "sccwp_service_plan", - "default_value": "free-trial", - "options": [ - { - "displayname": "free-trial", - "value": "free-trial" - }, - { - "displayname": "graduated-tier", - "value": "graduated-tier" - } - ] - }, - { - "key": "cspm_enabled" - }, - { - "key": "app_config_plan", - "default_value": "basic", - "options": [ - { - "displayname": "basic", - "value": "basic" - }, - { - "displayname": "standardv2", - "value": "standardv2" - }, - { - "displayname": "enterprise", - "value": "enterprise" - } - ] - }, - { - "key": "existing_bastion_instance_name" - }, - { - "key": "existing_bastion_instance_public_ip" - }, - { - "key": "existing_bastion_security_group_id" - }, - { - "key": "existing_bastion_ssh_private_key", - "type": "multiline_secure_value", - "required": false, - "custom_config": { - "grouping": "deployment", - "original_grouping": "deployment", - "type": "multiline_secure_value" - } - }, - { - "hidden": true, - "key": "TF_VERSION" - }, - { - "hidden": true, - "key": "TF_PARALLELISM" - }, - { - "hidden": true, - "key": "override" - }, - { - "hidden": true, - "key": "override_json_string" - } - ], - "iam_permissions": [ - { - "role_crns": [ - "crn:v1:bluemix:public:iam::::serviceRole:Manager" - ], - "service_name": "cloud-object-storage" - }, - { - "role_crns": [ - "crn:v1:bluemix:public:iam::::serviceRole:Manager", - "crn:v1:bluemix:public:iam::::role:Editor" - ], - "service_name": "dns-svcs" - }, - { - "role_crns": [ - "crn:v1:bluemix:public:iam::::role:Administrator" - ], - "service_name": "iam-identity" - }, - { - "service_name": "kms", - "role_crns": [ - "crn:v1:bluemix:public:iam::::serviceRole:Manager", - "crn:v1:bluemix:public:iam::::role:ConfigReader" - ] - }, - { - "service_name": "Security and Compliance Center Workload Protection", - "role_crns": [ - "crn:v1:bluemix:public:iam::::role:Administrator" - ] - }, - { - "role_crns": [ - "crn:v1:bluemix:public:iam::::role:Administrator" - ], - "service_name": "VPC Infrastructure Services" - }, - { - "service_name": "All Identity and Access enabled services", - "role_crns": [ - "crn:v1:bluemix:public:iam::::serviceRole:Manager", - "crn:v1:bluemix:public:iam::::role:Administrator" - ] - }, - { - "service_name": "apprapp", - "role_crns": [ - "crn:v1:bluemix:public:iam::::serviceRole:Manager", - "crn:v1:bluemix:public:iam::::role:Administrator" - ] - }, - { - "service_name": "secrets-manager", - "role_crns": [ - "crn:v1:bluemix:public:iam::::serviceRole:Manager", - "crn:v1:bluemix:public:iam::::role:Administrator" - ] - } - ], - "architecture": { - "descriptions": "", - "features": [ - { - "title": "Separate VPC for HPC workloads", - "description": "Yes" - }, - { - "title": "Virtual Server Instances for every subnet", - "description": "Yes" - }, - { - "title": "Increases security with Key Protect", - "description": "Yes" - }, - { - "title": "Reduces failure events by using multizone regions", - "description": "No" - }, - { - "title": "Collects and stores Internet Protocol (IP) traffic information with Activity Tracker and Flow Logs", - "description": "Yes" - }, - { - "title": "Securely connects to multiple networks with a site-to-site virtual private network", - "description": "Yes" - }, - { - "title": "Simplifies risk management and demonstrates regulatory compliance with Financial Services", - "description": "Yes" - }, - { - "title": "Uses Floating IP address for access through the public internet", - "description": "No" - } - ], - "diagrams": [ - { - "diagram": { - "caption": "IBM Storage Scale", - "url": "https://raw.githubusercontent.com/terraform-ibm-modules/terraform-ibm-hpc/refs/heads/main/Scale-DA-Architecture.drawio.svg", - "type": "image/svg+xml" - }, - "description": "This deployable architecture provisions a VPC on IBM Cloud to run a clustered file system within a single availability zone. A Bastion node is deployed in a dedicated subnet and security group to enable secure access to the storage environment. The IBM Spectrum Scale (GPFS) storage nodes and compute nodes are hosted in a separate subnet and security group\n. Each GPFS storage node operates as part of the Spectrum Scale cluster, contributing to the system’s high-performance, distributed storage by adding capacity and enabling application access through the compute nodes. The NFS server, running on a protocol node, interacts with the underlying distributed file system to serve client requests. By separating storage management from protocol services, the NFS storage node acts as an intelligent gateway, translating NFS client requests into operations on the highly scalable and resilient Spectrum Scale backend file system." - } - ] - } - } - ] } ] } diff --git a/locals.tf b/locals.tf index 9da6bd0d..ce3bc585 100644 --- a/locals.tf +++ b/locals.tf @@ -3,17 +3,15 @@ locals { # Region and Zone calculations region = join("-", slice(split("-", var.zones[0]), 0, 2)) - # Scheduler name change in lower_case - scheduler_lowercase = var.scheduler != null ? lower(var.scheduler) : "null" - # SSH key calculations # Combining the common ssh keys with host specific ssh keys - # ldap_instance_key_pair = distinct(concat(coalesce(var.ldap_instance_key_pair, []), coalesce(var.ssh_keys, []))) - ssh_keys = distinct(coalesce(var.ssh_keys, [])) - key_management = var.key_management == "null" ? null : var.key_management - ldap_server = var.ldap_server == null ? "null" : var.ldap_server - ldap_admin_password = var.ldap_admin_password == null ? "" : var.ldap_admin_password - ldap_server_cert = var.ldap_server_cert == null ? "null" : var.ldap_server_cert + gklm_instance_key_pair = distinct(concat(coalesce(var.gklm_instance_key_pair, []), coalesce(var.ssh_keys, []))) + ldap_instance_key_pair = distinct(concat(coalesce(var.ldap_instance_key_pair, []), coalesce(var.ssh_keys, []))) + ssh_keys = distinct(coalesce(var.ssh_keys, [])) + key_management = var.key_management == "null" ? null : var.key_management + ldap_server = var.ldap_server == null ? "null" : var.ldap_server + ldap_admin_password = var.ldap_admin_password == null ? "" : var.ldap_admin_password + ldap_server_cert = var.ldap_server_cert == null ? "null" : var.ldap_server_cert } # locals needed for deployer @@ -23,76 +21,47 @@ locals { vpc_name = var.vpc_name == null ? one(module.landing_zone.vpc_name) : var.vpc_name kms_encryption_enabled = local.key_management != null ? true : false boot_volume_encryption_key = local.key_management != null && var.enable_deployer ? one(module.landing_zone.boot_volume_encryption_key)["crn"] : null - existing_kms_instance_guid = local.key_management != null || (var.scale_encryption_enabled && var.scale_encryption_type == "key_protect" && var.key_protect_instance_id == null) ? module.landing_zone.key_management_guid : null - - encryption_filesystem_mountpoint = jsonencode( - var.scale_encryption_type == "key_protect" ? ( - try(var.storage_instances[0].filesystem, "") != "" ? - element( - split("/", var.storage_instances[0].filesystem), - length(split("/", var.storage_instances[0].filesystem)) - 1 - ) : - try(var.storage_servers[0].filesystem, "") != "" ? - element( - split("/", var.storage_servers[0].filesystem), - length(split("/", var.storage_servers[0].filesystem)) - 1 - ) : - element( - split("/", var.filesystem_config[0].filesystem), - length(split("/", var.filesystem_config[0].filesystem)) - 1 - ) - ) : "" - ) - - filesystem_mountpoint = var.storage_type == "persistent" ? (var.storage_servers[0]["filesystem"] != "" && var.storage_servers[0]["filesystem"] != null ? var.storage_servers[0]["filesystem"] : var.filesystem_config[0]["filesystem"]) : (var.storage_instances[0]["filesystem"] != "" && var.storage_instances[0]["filesystem"] != null ? var.storage_instances[0]["filesystem"] : var.filesystem_config[0]["filesystem"]) - - cos_data = module.landing_zone.cos_buckets_data + existing_kms_instance_guid = local.key_management != null ? module.landing_zone.key_management_guid : null + cos_data = module.landing_zone.cos_buckets_data # Future use # When we implement the existing bastion concept we need the changes to implemented like below. Which is already there on our LSF DA # skip_iam_authorization_policy = true # skip_iam_authorization_policy = var.bastion_instance_name != null ? false : local.skip_iam_authorization_policy # Cluster node details: - compute_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].management_vsi_data, module.landing_zone_vsi[0].compute_vsi_data]) - comp_mgmt_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].compute_management_vsi_data]) - storage_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].storage_vsi_data]) - storage_servers = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].storage_bms_data]) - storage_tie_brkr_bm = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].storage_tie_breaker_bms_data]) - protocol_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].protocol_vsi_data]) - protocol_bm_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].protocol_bms_data]) - gklm_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].gklm_vsi_data]) - client_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].client_vsi_data]) - afm_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].afm_vsi_data]) - afm_bm_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].afm_bms_data]) - ldap_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].ldap_vsi_data]) - tie_brkr_instances = var.enable_deployer ? [] : flatten(module.landing_zone_vsi[0].storage_cluster_tie_breaker_vsi_data) - strg_mgmt_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].storage_cluster_management_vsi]) - login_instance = var.enable_deployer ? [] : flatten(module.landing_zone_vsi[0].login_vsi_data) - - storage_bm_name_with_vol_mapping = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].storage_bm_name_with_vol_mapping]) - storage_tie_breaker_bms_name_with_vol_mapping = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].storage_tie_breaker_bms_name_with_vol_mapping]) + compute_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].management_vsi_data, module.landing_zone_vsi[0].compute_vsi_data]) + comp_mgmt_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].compute_management_vsi_data]) + storage_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].storage_vsi_data]) + storage_servers = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].storage_bms_data]) + protocol_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].protocol_vsi_data]) + gklm_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].gklm_vsi_data]) + client_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].client_vsi_data]) + afm_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].afm_vsi_data]) + ldap_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].ldap_vsi_data]) + tie_brkr_instances = var.enable_deployer ? [] : flatten(module.landing_zone_vsi[0].storage_cluster_tie_breaker_vsi_data) + strg_mgmt_instances = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].storage_cluster_management_vsi]) + login_instance = var.enable_deployer ? [] : flatten(module.landing_zone_vsi[0].login_vsi_data) management_instance_count = sum(var.management_instances[*]["count"]) storage_instance_count = var.storage_type == "persistent" ? sum(var.storage_servers[*]["count"]) : sum(var.storage_instances[*]["count"]) client_instance_count = sum(var.client_instances[*]["count"]) protocol_instance_count = sum(var.protocol_instances[*]["count"]) static_compute_instance_count = sum(var.static_compute_instances[*]["count"]) - afm_instance_count = sum(var.afm_instances[*]["count"]) + # afm_instance_count = sum(var.afm_instances[*]["count"]) } # locals needed for landing_zone_vsi locals { # dependency: landing_zone -> deployer -> landing_zone_vsi - login_security_group_name_id = var.login_security_group_name != null ? data.ibm_is_security_group.login_security_group[*].id : [] - bastion_security_group_id = var.login_security_group_name == null ? module.deployer.bastion_security_group_id : local.login_security_group_name_id[0] - bastion_public_key_content = module.deployer.bastion_public_key_content - bastion_private_key_content = module.deployer.bastion_private_key_content + bastion_security_group_id = module.deployer.bastion_security_group_id + bastion_public_key_content = module.deployer.bastion_public_key_content + bastion_private_key_content = module.deployer.bastion_private_key_content deployer_hostname = var.enable_deployer ? flatten(module.deployer.deployer_vsi_data[*].list)[0].name : "" deployer_ip = module.deployer.deployer_ip # Existing subnets details - existing_compute_subnets = [ - for subnet in data.ibm_is_subnet.existing_compute_subnets : + existing_cluster_subnets = [ + for subnet in data.ibm_is_subnet.existing_cluster_subnets : { cidr = subnet.ipv4_cidr_block id = subnet.id @@ -142,33 +111,18 @@ locals { ] # dependency: landing_zone -> landing_zone_vsi - use_existing_client_subnets = var.vpc_name != null && var.client_subnet_id != null - use_existing_compute_subnets = var.vpc_name != null && var.compute_subnet_id != null - use_existing_storage_subnets = var.vpc_name != null && var.storage_subnet_id != null - use_existing_protocol_subnets = var.vpc_name != null && var.protocol_subnet_id != null - use_existing_login_subnets = var.vpc_name != null && var.login_subnet_id != null - - client_subnets = (var.vpc_name == null ? module.landing_zone.client_subnets : - (local.use_existing_client_subnets ? local.existing_client_subnets : module.landing_zone.client_subnets)) - - compute_subnets = (var.vpc_name == null ? module.landing_zone.compute_subnets : - (local.use_existing_compute_subnets ? local.existing_compute_subnets : module.landing_zone.compute_subnets)) - - storage_subnets = (var.vpc_name == null ? module.landing_zone.storage_subnets : - (local.use_existing_storage_subnets ? local.existing_storage_subnets : module.landing_zone.storage_subnets)) - - protocol_subnets = (var.vpc_name == null ? module.landing_zone.protocol_subnets : - (local.use_existing_protocol_subnets ? local.existing_protocol_subnets : module.landing_zone.protocol_subnets)) - - login_subnets = (var.vpc_name == null ? module.landing_zone.bastion_subnets : - (local.use_existing_login_subnets ? local.existing_login_subnets : module.landing_zone.bastion_subnets)) - - # update the subnet_id - storage_subnet = length(local.storage_subnets) > 0 ? [for subnet in local.storage_subnets : subnet.id][0] : "" - protocol_subnet = length(local.protocol_subnets) > 0 ? [for subnet in local.protocol_subnets : subnet.id][0] : "" - compute_subnet = length(local.compute_subnets) > 0 ? [for subnet in local.compute_subnets : subnet.id][0] : "" - client_subnet = length(local.client_subnets) > 0 ? [for subnet in local.client_subnets : subnet.id][0] : "" - login_subnet = length(local.login_subnets) > 0 ? [for subnet in local.login_subnets : subnet.id][0] : "" + client_subnets = var.vpc_name != null && var.client_subnets != null ? local.existing_client_subnets : module.landing_zone.client_subnets + cluster_subnets = var.vpc_name != null && var.cluster_subnet_id != null ? local.existing_cluster_subnets : module.landing_zone.compute_subnets + storage_subnets = var.vpc_name != null && var.storage_subnets != null ? local.existing_storage_subnets : module.landing_zone.storage_subnets + protocol_subnets = var.vpc_name != null && var.protocol_subnets != null ? local.existing_protocol_subnets : module.landing_zone.protocol_subnets + login_subnets = var.vpc_name != null && var.login_subnet_id != null ? local.existing_login_subnets : module.landing_zone.bastion_subnets + + storage_subnet = [for subnet in local.storage_subnets : subnet.name] + protocol_subnet = [for subnet in local.protocol_subnets : subnet.name] + protocol_subnet_id = local.protocol_instance_count > 0 ? [for subnet in local.protocol_subnets : subnet.id][0] : "" + cluster_subnet = [for subnet in local.cluster_subnets : subnet.id][0] + client_subnet = [for subnet in local.client_subnets : subnet.name] + login_subnet = [for subnet in local.login_subnets : subnet.id][0] #boot_volume_encryption_key = local.key_management != null ? one(module.landing_zone.boot_volume_encryption_key)["crn"] : null #skip_iam_authorization_policy = true @@ -177,11 +131,9 @@ locals { # locals needed for file-storage locals { # dependency: landing_zone_vsi -> file-share - compute_subnet_id = (var.enable_deployer && var.vpc_name != null && var.compute_subnet_id != null) ? local.existing_compute_subnets[0].id : "" + compute_subnet_id = (var.vpc_name == null && var.cluster_subnet_id == null ? local.cluster_subnets[0].id : (var.vpc_name != null && var.cluster_subnet_id != null ? [for subnet in data.ibm_is_subnet.existing_cluster_subnets : subnet.id][0] : (var.vpc_name != null && var.cluster_subnet_id == null ? local.cluster_subnets[0].id : ""))) bastion_subnet_id = (var.enable_deployer && var.vpc_name != null && var.login_subnet_id != null) ? local.existing_login_subnets[0].id : "" - protocol_subnet_id = (var.enable_deployer && var.vpc_name != null && var.protocol_subnet_id != null) ? local.existing_protocol_subnets[0].id : "" - client_subnet_id = (var.enable_deployer && var.vpc_name != null && var.client_subnet_id != null) ? local.existing_client_subnets[0].id : "" - storage_subnet_id = (var.enable_deployer && var.vpc_name != null && var.storage_subnet_id != null) ? local.existing_storage_subnets[0].id : "" + subnet_id = (var.enable_deployer && var.vpc_name != null && var.cluster_subnet_id != null) ? local.existing_cluster_subnets[0].id : "" compute_security_group_id = var.enable_deployer ? [] : module.landing_zone_vsi[0].compute_sg_id nfs_shares_map = { @@ -190,10 +142,10 @@ locals { if share.nfs_share != "" && share.nfs_share != null } - fileset_size_map = try({ for details in var.filesets_config : details.client_mount_path => details.quota }, {}) + fileset_size_map = try({ for details in var.custom_file_shares : details.mount_path => details.size }, {}) # Original file share map from module - original_map = var.enable_deployer ? {} : var.scheduler == "LSF" ? module.file_storage[0].name_mount_path_map : {} + original_map = var.enable_deployer ? {} : module.file_storage[0].name_mount_path_map # Extract keyword-to-target mapping from file share names keyword_to_target_map = var.enable_deployer ? {} : { @@ -290,14 +242,14 @@ locals { vpc_crn = var.vpc_name == null ? one(module.landing_zone.vpc_crn) : one(data.ibm_is_vpc.existing_vpc[*].crn) # TODO: Fix existing subnet logic #subnets_crn = var.vpc_name == null ? module.landing_zone.subnets_crn : ### - existing_compute_subnet_crns = [for subnet in data.ibm_is_subnet.existing_compute_subnets : subnet.crn] + existing_compute_subnet_crns = [for subnet in data.ibm_is_subnet.existing_cluster_subnets : subnet.crn] existing_storage_subnet_crns = [for subnet in data.ibm_is_subnet.existing_storage_subnets : subnet.crn] existing_protocol_subnet_crns = [for subnet in data.ibm_is_subnet.existing_protocol_subnets : subnet.crn] existing_client_subnet_crns = [for subnet in data.ibm_is_subnet.existing_client_subnets : subnet.crn] existing_bastion_subnet_crns = [for subnet in data.ibm_is_subnet.existing_login_subnets : subnet.crn] subnets_crn = concat(local.existing_compute_subnet_crns, local.existing_storage_subnet_crns, local.existing_protocol_subnet_crns, local.existing_client_subnet_crns, local.existing_bastion_subnet_crns) - # subnets_crn = var.vpc_name == null && var.compute_subnet_id == null ? module.landing_zone.subnets_crn : concat(local.existing_subnet_crns, module.landing_zone.subnets_crn) - # subnets = flatten([local.compute_subnets, local.storage_subnets, local.protocol_subnets]) + # subnets_crn = var.vpc_name == null && var.cluster_subnet_id == null ? module.landing_zone.subnets_crn : concat(local.existing_subnet_crns, module.landing_zone.subnets_crn) + # subnets = flatten([local.cluster_subnets, local.storage_subnets, local.protocol_subnets]) # subnets_crns = data.ibm_is_subnet.itself[*].crn # subnets_crn = module.landing_zone.subnets_crn # boot_volume_encryption_key = local.key_management != null ? one(module.landing_zone.boot_volume_encryption_key)["crn"] : null @@ -335,56 +287,6 @@ locals { } ] - raw_bm_storage_servers_dns_record_details = [ - for server in local.storage_servers : { - id = server.id - ipv4_address = server.ipv4_address - name = server.name - vni_id = server.bms_primary_vni_id - } - ] - - raw_bm_tie_breaker_dns_record_details = [ - for server in local.storage_tie_brkr_bm : { - id = server.id - ipv4_address = server.ipv4_address - name = server.name - vni_id = server.bms_primary_vni_id - } - ] - - raw_bm_protocol_dns_record_details = [ - for server in local.protocol_bm_instances : { - id = server.id - ipv4_address = server.ipv4_address - name = server.name - vni_id = server.bms_primary_vni_id - } - ] - - raw_bm_afm_dns_record_details = [ - for server in local.afm_bm_instances : { - id = server.id - ipv4_address = server.ipv4_address - name = server.name - vni_id = server.bms_primary_vni_id - } - ] - - raw_compute_sec_vnic_dns_record_details = local.enable_sec_interface_compute ? [ - for record in flatten([for details in local.compute_instances : details.secondary_network_interface_detail]) : { - ipv4_address = record.primary_ipv4_address - name = record.name - } - ] : [] - - raw_compute_mgmt_sec_vnic_dns_record_details = local.enable_sec_interface_compute ? [ - for record in flatten([for details in local.comp_mgmt_instances : details.secondary_network_interface_detail]) : { - ipv4_address = record.primary_ipv4_address - name = record.name - } - ] : [] - compute_dns_records = [ for instance in concat(local.compute_instances, local.comp_mgmt_instances, local.deployer_instances, local.login_instance) : { @@ -393,7 +295,7 @@ locals { } ] storage_dns_records = [ - for instance in concat(local.storage_instances, local.protocol_instances, local.raw_bm_protocol_dns_record_details, local.afm_instances, local.raw_bm_afm_dns_record_details, local.tie_brkr_instances, local.strg_mgmt_instances, local.raw_bm_storage_servers_dns_record_details, local.raw_bm_tie_breaker_dns_record_details, local.raw_compute_sec_vnic_dns_record_details, local.raw_compute_mgmt_sec_vnic_dns_record_details) : + for instance in concat(local.storage_instances, local.protocol_instances, local.afm_instances, local.tie_brkr_instances, local.strg_mgmt_instances, local.storage_servers) : { name = instance["name"] rdata = instance["ipv4_address"] @@ -420,26 +322,14 @@ locals { compute_hosts = try([for name in local.compute_instances[*]["name"] : "${name}.${var.dns_domain_names["compute"]}"], []) # storage_hosts = try([for name in local.storage_instances[*]["name"] : "${name}.${var.dns_domain_names["storage"]}"], []) ldap_hosts = try([for instance in local.ldap_instances : instance["ipv4_address"]], []) - client_hosts = try([for instance in local.client_instances : instance["ipv4_address"]], []) - afm_hosts = try([for instance in local.afm_instances : instance["ipv4_address"]], []) - gklm_hosts = try([for instance in local.gklm_instances : instance["ipv4_address"]], []) - storage_hosts = try([for instance in local.storage_instances : instance["ipv4_address"]], []) - strg_mgmt_hosts = try([for instance in local.strg_mgmt_instances : instance["ipv4_address"]], []) - all_storage_hosts = concat(local.storage_hosts, local.strg_mgmt_hosts) - protocol_hosts = try([for instance in local.protocol_instances : instance["ipv4_address"]], []) login_host_ip = try([for instance in local.login_instance : instance["ipv4_address"]], []) compute_inventory_path = var.enable_deployer ? "${path.root}/../../modules/ansible-roles/compute.ini" : "${path.root}/modules/ansible-roles/compute.ini" - compute_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/${local.scheduler_lowercase}/compute_hosts.ini" : "${path.root}/solutions/${local.scheduler_lowercase}/compute_hosts.ini" - mgmt_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/${local.scheduler_lowercase}/mgmt_hosts.ini" : "${path.root}/solutions/${local.scheduler_lowercase}/mgmt_hosts.ini" - bastion_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/${local.scheduler_lowercase}/bastion_hosts.ini" : "${path.root}/solutions/${local.scheduler_lowercase}/bastion_hosts.ini" - deployer_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/${local.scheduler_lowercase}/deployer_hosts.ini" : "${path.root}/solutions/${local.scheduler_lowercase}/deployer_hosts.ini" - ldap_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/${local.scheduler_lowercase}/ldap_hosts.ini" : "${path.root}/solutions/${local.scheduler_lowercase}/ldap_hosts.ini" - client_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/${local.scheduler_lowercase}/client_hosts.ini" : "${path.root}/solutions/${local.scheduler_lowercase}/client_hosts.ini" - storage_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/${local.scheduler_lowercase}/storage_hosts.ini" : "${path.root}/solutions/${local.scheduler_lowercase}/storage_hosts.ini" - afm_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/${local.scheduler_lowercase}/afm_hosts.ini" : "${path.root}/solutions/${local.scheduler_lowercase}/afm_hosts.ini" - gklm_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/${local.scheduler_lowercase}/gklm_hosts.ini" : "${path.root}/solutions/${local.scheduler_lowercase}/gklm_hosts.ini" - protocol_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/${local.scheduler_lowercase}/protocol_hosts.ini" : "${path.root}/solutions/${local.scheduler_lowercase}/protocol_hosts.ini" - login_host_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/${local.scheduler_lowercase}/login_host.ini" : "${path.root}/solutions/${local.scheduler_lowercase}/login_host.ini" + compute_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/lsf/compute_hosts.ini" : "${path.root}/solutions/lsf/compute_hosts.ini" + mgmt_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/lsf/mgmt_hosts.ini" : "${path.root}/solutions/lsf/mgmt_hosts.ini" + bastion_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/lsf/bastion_hosts.ini" : "${path.root}/solutions/lsf/bastion_hosts.ini" + deployer_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/lsf/deployer_hosts.ini" : "${path.root}/solutions/lsf/deployer_hosts.ini" + ldap_hosts_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/lsf/ldap_hosts.ini" : "${path.root}/solutions/lsf/ldap_hosts.ini" + login_host_inventory_path = var.enable_deployer ? "${path.root}/../../solutions/lsf/login_host.ini" : "${path.root}/solutions/lsf/login_host.ini" # storage_inventory_path = var.enable_deployer ? "${path.root}/../../modules/ansible-roles/storage.ini" : "${path.root}/modules/ansible-roles/storage.ini" } @@ -461,19 +351,17 @@ locals { # details needed for json file locals { - compute_instances_data = var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].compute_vsi_data]) - compute_hosts_ips = var.enable_deployer ? [] : local.compute_instances_data[*]["ipv4_address"] - compute_mgmt_instances_data = var.scheduler == "Scale" ? var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].compute_management_vsi_data]) : [] - compute_mgmt_hosts_ips = var.scheduler == "Scale" ? var.enable_deployer ? [] : local.compute_mgmt_instances_data[*]["ipv4_address"] : [] - all_compute_hosts = concat(local.compute_hosts_ips, local.compute_mgmt_hosts_ips) - bastion_hosts_ips = var.enable_deployer ? [module.deployer.bastion_fip] : [] - deployer_hosts_ips = var.enable_deployer ? [module.deployer.deployer_ip] : [] - mgmt_instances_data = var.scheduler == "LSF" ? var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].management_vsi_data]) : [] - mgmt_hosts_ips = var.scheduler == "LSF" ? var.enable_deployer ? [] : local.mgmt_instances_data[*]["ipv4_address"] : [] - ldap_hosts_ips = var.scheduler == "LSF" ? var.enable_deployer ? [] : (var.enable_ldap == true ? (var.ldap_server == "null" ? local.ldap_instances[*]["ipv4_address"] : [var.ldap_server]) : []) : [] - json_inventory_path = var.enable_deployer ? "${path.root}/../../modules/ansible-roles/all.json" : "${path.root}/modules/ansible-roles/all.json" - management_nodes = var.scheduler == "LSF" ? var.enable_deployer ? [] : (flatten([module.landing_zone_vsi[0].management_vsi_data]))[*]["name"] : [] - login_host = var.scheduler == "LSF" ? var.enable_deployer ? [] : try([for name in local.login_instance[*]["name"] : "${name}.${var.dns_domain_names["compute"]}"], []) : [] + compute_instances_data = var.scheduler == "LSF" ? var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].compute_vsi_data]) : [] + compute_hosts_ips = var.scheduler == "LSF" ? var.enable_deployer ? [] : local.compute_instances_data[*]["ipv4_address"] : [] + # bastion_instances_data = var.scheduler == "LSF" ? var.enable_deployer ? flatten([module.deployer.bastion_vsi_data]) : [] : [] + bastion_hosts_ips = var.scheduler == "LSF" ? var.enable_deployer ? [module.deployer.bastion_fip] : [] : [] + deployer_hosts_ips = var.scheduler == "LSF" ? var.enable_deployer ? [module.deployer.deployer_ip] : [] : [] + mgmt_instances_data = var.scheduler == "LSF" ? var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].management_vsi_data]) : [] + mgmt_hosts_ips = var.scheduler == "LSF" ? var.enable_deployer ? [] : local.mgmt_instances_data[*]["ipv4_address"] : [] + ldap_hosts_ips = var.scheduler == "LSF" ? var.enable_deployer ? [] : (var.enable_ldap == true ? (var.ldap_server == "null" ? local.ldap_instances[*]["ipv4_address"] : [var.ldap_server]) : []) : [] + json_inventory_path = var.enable_deployer ? "${path.root}/../../modules/ansible-roles/all.json" : "${path.root}/modules/ansible-roles/all.json" + management_nodes = var.scheduler == "LSF" ? var.enable_deployer ? [] : (flatten([module.landing_zone_vsi[0].management_vsi_data]))[*]["name"] : [] + login_host = var.scheduler == "LSF" ? var.enable_deployer ? [] : (flatten([module.landing_zone_vsi[0].login_vsi_data]))[*]["name"] : [] compute_nodes = var.scheduler == "LSF" ? ( var.enable_deployer ? [] : flatten([module.landing_zone_vsi[0].compute_vsi_data])[*]["name"] ) : [] @@ -482,7 +370,7 @@ locals { var.enable_deployer ? [] : ( length(local.compute_nodes) == 0 ? [] : distinct(flatten([ for prefix, nodes in { - for node in sort(local.compute_nodes) : + for node in local.compute_nodes : join("-", slice(split("-", node), 0, length(split("-", node)) - 1)) => node... } : length(nodes) > 1 ? [format( @@ -496,8 +384,8 @@ locals { ) : [] client_nodes = var.scheduler == "LSF" ? var.enable_deployer ? [] : (flatten([module.landing_zone_vsi[0].client_vsi_data]))[*]["name"] : [] - gui_hosts = var.scheduler == "LSF" ? (var.enable_deployer ? [] : [format("%s.%s", length(local.management_nodes) == 1 ? local.management_nodes[0] : local.management_nodes[1], var.dns_domain_names["compute"])]) : [] - db_hosts = var.scheduler == "LSF" ? (var.enable_deployer ? [] : (length(local.management_nodes) == 1 ? [local.management_nodes[0]] : [local.management_nodes[1]])) : [] + gui_hosts = var.scheduler == "LSF" ? var.enable_deployer ? [] : [local.management_nodes[0]] : [] # Without Pac HA + db_hosts = var.scheduler == "LSF" ? var.enable_deployer ? [] : [local.management_nodes[0]] : [] # Without Pac HA ha_shared_dir = var.scheduler == "LSF" ? "/mnt/lsf" : "" nfs_install_dir = var.scheduler == "LSF" ? "none" : "" enable_monitoring = var.scheduler == "LSF" ? false : false @@ -513,64 +401,24 @@ locals { bucket_crn = local.cloud_metrics_bucket.crn bucket_endpoint = local.cloud_metrics_bucket.s3_endpoint_direct } : null) + # scc_cos_bucket = length(module.landing_zone.cos_buckets_names) > 0 && var.scc_enable ? [for name in module.landing_zone.cos_buckets_names : name if strcontains(name, "scc-bucket")][0] : "" + # scc_cos_instance_crn = length(module.landing_zone.cos_instance_crns) > 0 && var.scc_enable ? module.landing_zone.cos_instance_crns[0] : "" - compute_subnet_crn = var.enable_deployer ? "" : (length(local.compute_subnets) > 0 ? data.ibm_is_subnet.compute_subnet_crn[0].crn : "") + compute_subnet_crn = var.enable_deployer ? "" : data.ibm_is_subnet.compute_subnet_crn[0].crn ssh_keys_ids = var.enable_deployer ? [] : [for name in var.ssh_keys : data.ibm_is_ssh_key.ssh_keys[name].id] - compute_public_key_content = var.enable_deployer ? "" : (local.static_compute_instance_count > 0 || local.management_instance_count > 0 ? jsonencode(base64encode(join("", flatten([module.landing_zone_vsi[0].compute_public_key_content])))) : "") - compute_private_key_content = var.enable_deployer ? "" : (local.static_compute_instance_count > 0 || local.management_instance_count > 0 ? jsonencode(base64encode(join("", flatten([module.landing_zone_vsi[0].compute_private_key_content])))) : "") + compute_public_key_content = var.enable_deployer ? "" : jsonencode(base64encode(join("", flatten([module.landing_zone_vsi[0].compute_public_key_content])))) + compute_private_key_content = var.enable_deployer ? "" : jsonencode(base64encode(join("", flatten([module.landing_zone_vsi[0].compute_private_key_content])))) - #For LSF mgmnt_host_entry = var.scheduler == "LSF" ? { for vsi in flatten([module.landing_zone_vsi[*].management_vsi_data]) : vsi.ipv4_address => vsi.name } : {} comp_host_entry = var.scheduler == "LSF" ? { for vsi in flatten([module.landing_zone_vsi[*].compute_vsi_data]) : vsi.ipv4_address => vsi.name } : {} login_host_entry = var.scheduler == "LSF" ? { for vsi in flatten([module.landing_zone_vsi[*].login_vsi_data]) : vsi.ipv4_address => vsi.name } : {} deployer_host_entry = var.scheduler == "LSF" ? { for inst in local.deployer_instances : inst.ipv4_address => inst.name if inst.ipv4_address != null } : {} - - - #For Scale - storage_host_entry = var.scheduler == "Scale" ? { for vsi in flatten([module.landing_zone_vsi[*].storage_vsi_data]) : vsi.ipv4_address => vsi.name } : {} - storage_mgmnt_host_entry = var.scheduler == "Scale" ? { for vsi in flatten([module.landing_zone_vsi[*].storage_cluster_management_vsi]) : vsi.ipv4_address => vsi.name } : {} - storage_tb_host_entry = var.scheduler == "Scale" ? { for vsi in flatten([module.landing_zone_vsi[*].storage_cluster_tie_breaker_vsi_data]) : vsi.ipv4_address => vsi.name } : {} - compute_host_entry = var.scheduler == "Scale" ? (local.enable_sec_interface_compute ? { for vsi in flatten([module.landing_zone_vsi[*].compute_vsi_data]) : vsi.secondary_ipv4_address => vsi.secondary_network_interface_detail.name } : { for vsi in flatten([module.landing_zone_vsi[*].compute_vsi_data]) : vsi.ipv4_address => vsi.name }) : {} - compute_mgmnt_host_entry = var.scheduler == "Scale" ? (local.enable_sec_interface_compute ? { for vsi in flatten([module.landing_zone_vsi[*].compute_management_vsi_data]) : vsi.secondary_ipv4_address => vsi.secondary_network_interface_detail.name } : { for vsi in flatten([module.landing_zone_vsi[*].compute_management_vsi_data]) : vsi.ipv4_address => vsi.name }) : {} - client_host_entry = var.scheduler == "Scale" ? { for vsi in flatten([module.landing_zone_vsi[*].client_vsi_data]) : vsi.ipv4_address => vsi.name } : {} - protocol_host_entry = var.scheduler == "Scale" ? { for vsi in flatten([module.landing_zone_vsi[*].protocol_vsi_data]) : vsi.ipv4_address => vsi.name } : {} - gklm_host_entry = var.scheduler == "Scale" ? { for vsi in flatten([module.landing_zone_vsi[*].gklm_vsi_data]) : vsi.ipv4_address => vsi.name } : {} - afm_host_entry = var.scheduler == "Scale" ? { for vsi in flatten([module.landing_zone_vsi[*].afm_vsi_data]) : vsi.ipv4_address => vsi.name } : {} - - storage_bms_host_entry = var.scheduler == "Scale" ? { - for server in local.raw_bm_storage_servers_dns_record_details : server.ipv4_address => - { - name = server.name - id = server.id - } - } : {} - storage_tb_bms_host_entry = var.scheduler == "Scale" ? { - for server in local.raw_bm_tie_breaker_dns_record_details : server.ipv4_address => - { - name = server.name - id = server.id - } - } : {} - protocol_bms_host_entry = var.scheduler == "Scale" ? { - for server in local.raw_bm_protocol_dns_record_details : server.ipv4_address => - { - name = server.name - id = server.id - } - } : {} - afm_bms_host_entry = var.scheduler == "Scale" ? { - for server in local.raw_bm_afm_dns_record_details : server.ipv4_address => - { - name = server.name - id = server.id - } - } : {} } locals { - gpfs_base_rpm_path = var.scheduler == "Scale" ? (var.enable_deployer ? [] : fileset(var.spectrumscale_rpms_path, "gpfs.base-*")) : [] - scale_org_version = var.scheduler == "Scale" ? (var.enable_deployer ? "" : regex("gpfs.base-(.*).x86_64.rpm", tolist(local.gpfs_base_rpm_path)[0])[0]) : "" - scale_version = var.scheduler == "Scale" ? (var.enable_deployer ? "" : replace(local.scale_org_version, "-", ".")) : "" + # gpfs_base_rpm_path = fileset(var.spectrumscale_rpms_path, "gpfs.base-*") + # scale_org_version = regex("gpfs.base-(.*).x86_64.rpm", tolist(local.gpfs_base_rpm_path)[0])[0] + scale_version = "5.2.2.1" #replace(local.scale_org_version, "-", ".") compute_vsi_profile = var.static_compute_instances[*]["profile"] storage_vsi_profile = var.storage_instances[*]["profile"] @@ -581,22 +429,20 @@ locals { afm_server_type = strcontains(local.afm_vsi_profile[0], "metal") ces_server_type = strcontains(local.protocol_vsi_profile[0], "metal") - scale_ces_enabled = local.protocol_instance_count > 0 ? true : false - is_colocate_protocol_subset = local.scale_ces_enabled && var.colocate_protocol_instances ? local.protocol_instance_count < local.storage_instance_count ? true : false : false - enable_sec_interface_compute = local.scale_ces_enabled == false && data.ibm_is_instance_profile.compute_profile.bandwidth[0].value >= 64000 ? true : false - enable_sec_interface_storage = local.scale_ces_enabled == false && var.storage_type != "persistent" && data.ibm_is_instance_profile.storage_profile.bandwidth[0].value >= 64000 ? true : false - enable_mrot_conf = local.enable_sec_interface_compute && local.enable_sec_interface_storage ? true : false - enable_afm = local.afm_instance_count > 0 ? true : false - scale_afm_bucket_config_details = module.landing_zone.scale_afm_bucket_config_details - scale_afm_cos_hmac_key_params = module.landing_zone.scale_afm_cos_hmac_key_params + scale_ces_enabled = local.protocol_instance_count > 0 ? true : false + is_colocate_protocol_subset = local.scale_ces_enabled && var.colocate_protocol_instances ? local.protocol_instance_count < local.storage_instance_count ? true : false : false + enable_sec_interface_compute = local.scale_ces_enabled == false && data.ibm_is_instance_profile.compute_profile.bandwidth[0].value >= 64000 ? true : false + enable_sec_interface_storage = local.scale_ces_enabled == false && var.storage_type != "persistent" && data.ibm_is_instance_profile.storage_profile.bandwidth[0].value >= 64000 ? true : false + enable_mrot_conf = local.enable_sec_interface_compute && local.enable_sec_interface_storage ? true : false + enable_afm = sum(var.afm_instances[*]["count"]) > 0 ? true : false - compute_instance_private_ips = local.enable_sec_interface_compute ? flatten([for ip in local.compute_instances : ip.secondary_network_interface_detail[*]["primary_ipv4_address"]]) : flatten(local.compute_instances[*]["ipv4_address"]) - compute_instance_ids = local.enable_sec_interface_compute ? flatten([for id in local.compute_instances : id.secondary_network_interface_detail[*]["id"]]) : flatten(local.compute_instances[*]["id"]) - compute_instance_names = local.enable_sec_interface_compute ? flatten([for name in local.compute_instances : [for nic in name.secondary_network_interface_detail[*]["name"] : "${nic}.${var.dns_domain_names["storage"]}"]]) : try(tolist([for name_details in flatten(local.compute_instances[*]["name"]) : "${name_details}.${var.dns_domain_names["compute"]}"]), []) + compute_instance_private_ips = flatten(local.compute_instances[*]["ipv4_address"]) + compute_instance_ids = flatten(local.compute_instances[*]["id"]) + compute_instance_names = try(tolist([for name_details in flatten(local.compute_instances[*]["name"]) : "${name_details}.${var.dns_domain_names["compute"]}"]), []) - compute_mgmt_instance_private_ips = local.enable_sec_interface_compute ? flatten([for ip in local.comp_mgmt_instances : ip.secondary_network_interface_detail[*]["primary_ipv4_address"]]) : flatten(local.comp_mgmt_instances[*]["ipv4_address"]) - compute_mgmt_instance_ids = local.enable_sec_interface_compute ? flatten([for id in local.comp_mgmt_instances : id.secondary_network_interface_detail[*]["id"]]) : flatten(local.comp_mgmt_instances[*]["id"]) - compute_mgmt_instance_names = local.enable_sec_interface_compute ? flatten([for name in local.comp_mgmt_instances : [for nic in name.secondary_network_interface_detail[*]["name"] : "${nic}.${var.dns_domain_names["storage"]}"]]) : try(tolist([for name_details in flatten(local.comp_mgmt_instances[*]["name"]) : "${name_details}.${var.dns_domain_names["compute"]}"]), []) + compute_mgmt_instance_private_ips = flatten(local.comp_mgmt_instances[*]["ipv4_address"]) + compute_mgmt_instance_ids = flatten(local.comp_mgmt_instances[*]["id"]) + compute_mgmt_instance_names = try(tolist([for name_details in flatten(local.comp_mgmt_instances[*]["name"]) : "${name_details}.${var.dns_domain_names["compute"]}"]), []) strg_instance_private_ips = flatten(local.storage_instances[*]["ipv4_address"]) strg_instance_ids = flatten(local.storage_instances[*]["id"]) @@ -606,10 +452,6 @@ locals { strg_servers_ids = flatten(local.storage_servers[*]["id"]) strg_servers_names = try(tolist([for name_details in flatten(local.storage_servers[*]["name"]) : "${name_details}.${var.dns_domain_names["storage"]}"]), []) - bm_tie_breaker_private_ips = flatten(local.storage_tie_brkr_bm[*]["ipv4_address"]) - bm_tie_breaker_ids = flatten(local.storage_tie_brkr_bm[*]["id"]) - bm_tie_breaker_names = try(tolist([for name_details in flatten(local.storage_tie_brkr_bm[*]["name"]) : "${name_details}.${var.dns_domain_names["storage"]}"]), []) - strg_mgmt_instance_private_ips = flatten(local.strg_mgmt_instances[*]["ipv4_address"]) strg_mgmtt_instance_ids = flatten(local.strg_mgmt_instances[*]["id"]) strg_mgmt_instance_names = try(tolist([for name_details in flatten(local.strg_mgmt_instances[*]["name"]) : "${name_details}.${var.dns_domain_names["storage"]}"]), []) @@ -618,30 +460,14 @@ locals { strg_tie_breaker_instance_ids = flatten(local.tie_brkr_instances[*]["id"]) strg_tie_breaker_instance_names = try(tolist([for name_details in flatten(local.tie_brkr_instances[*]["name"]) : "${name_details}.${var.dns_domain_names["storage"]}"]), []) - # secondary_compute_instance_private_ips = flatten(local.compute_instances[*]["secondary_ipv4_address"]) + secondary_compute_instance_private_ips = flatten(local.compute_instances[*]["secondary_ipv4_address"]) # secondary_storage_instance_private_ips = flatten(local.storage_instances[*]["secondary_ipv4_address"]) protocol_instance_private_ips = flatten(local.protocol_instances[*]["ipv4_address"]) protocol_instance_ids = flatten(local.protocol_instances[*]["id"]) protocol_instance_names = try(tolist([for name_details in flatten(local.protocol_instances[*]["name"]) : "${name_details}.${var.dns_domain_names["storage"]}"]), []) - protocol_bm_instance_private_ips = flatten(local.protocol_bm_instances[*]["ipv4_address"]) - protocol_bm_instance_ids = flatten(local.protocol_bm_instances[*]["id"]) - protocol_bm_instance_names = try(tolist([for name_details in flatten(local.protocol_bm_instances[*]["name"]) : "${name_details}.${var.dns_domain_names["storage"]}"]), []) - - protocol_cluster_instance_names = var.enable_deployer ? [] : slice((concat(local.protocol_instance_names, local.protocol_bm_instance_names, (var.storage_type == "persistent" ? local.strg_servers_names : local.strg_instance_names))), 0, local.protocol_instance_count) - - afm_instance_private_ips = flatten(local.afm_instances[*]["ipv4_address"]) - afm_instance_ids = flatten(local.afm_instances[*]["id"]) - afm_instance_names = try(tolist([for name_details in flatten(local.afm_instances[*]["name"]) : "${name_details}.${var.dns_domain_names["storage"]}"]), []) - - afm_bm_private_ips = flatten(local.afm_bm_instances[*]["ipv4_address"]) - afm_bm_ids = flatten(local.afm_bm_instances[*]["id"]) - afm_bm_names = try(tolist([for name_details in flatten(local.afm_bm_instances[*]["name"]) : "${name_details}.${var.dns_domain_names["storage"]}"]), []) - - afm_private_ips_final = concat(local.afm_instance_private_ips, local.afm_bm_private_ips) - afm_ids_final = concat(local.afm_instance_ids, local.afm_bm_ids) - afm_names_final = concat(local.afm_instance_names, local.afm_bm_names) + protocol_cluster_instance_names = var.enable_deployer ? [] : slice((concat(local.protocol_instance_names, (var.storage_type == "persistent" ? [] : local.strg_instance_names))), 0, local.protocol_instance_count) # client_instance_private_ips = flatten(local.client_instances[*]["ipv4_address"]) # client_instance_ids = flatten(local.client_instances[*]["id"]) @@ -656,33 +482,49 @@ locals { # ldap_instance_names = flatten(local.ldap_instances[*]["name"]) } +locals { + afm_instance_private_ips = flatten(local.afm_instances[*]["ipv4_address"]) + afm_instance_ids = flatten(local.afm_instances[*]["id"]) + afm_instance_names = try(tolist([for name_details in flatten(local.afm_instances[*]["name"]) : "${name_details}.${var.dns_domain_names["storage"]}"]), []) + + new_instance_bucket_hmac = [for details in var.afm_cos_config : details if(details.cos_instance == "" && details.bucket_name == "" && details.cos_service_cred_key == "")] + exstng_instance_new_bucket_hmac = [for details in var.afm_cos_config : details if(details.cos_instance != "" && details.bucket_name == "" && details.cos_service_cred_key == "")] + exstng_instance_bucket_new_hmac = [for details in var.afm_cos_config : details if(details.cos_instance != "" && details.bucket_name != "" && details.cos_service_cred_key == "")] + exstng_instance_hmac_new_bucket = [for details in var.afm_cos_config : details if(details.cos_instance != "" && details.bucket_name == "" && details.cos_service_cred_key != "")] + exstng_instance_bucket_hmac = [for details in var.afm_cos_config : details if(details.cos_instance != "" && details.bucket_name != "" && details.cos_service_cred_key != "")] + + afm_cos_bucket_details = local.enable_afm == true ? flatten(module.cos[*].afm_cos_bucket_details) : [] + afm_cos_config = local.enable_afm == true ? flatten(module.cos[*].afm_config_details) : [] +} + + locals { - storage_instance_private_ips = var.storage_type != "persistent" ? local.enable_afm == true ? concat(local.strg_instance_private_ips, local.afm_private_ips_final) : local.strg_instance_private_ips : [] - storage_instance_ids = var.storage_type != "persistent" ? local.enable_afm == true ? concat(local.strg_instance_ids, local.afm_ids_final) : local.strg_instance_ids : [] - storage_instance_names = var.storage_type != "persistent" ? local.enable_afm == true ? concat(local.strg_instance_names, local.afm_names_final) : local.strg_instance_names : [] - storage_ips_with_vol_mapping = var.storage_type != "persistent" ? module.landing_zone_vsi[*].instance_ips_with_vol_mapping : local.storage_bm_name_with_vol_mapping + storage_instance_private_ips = var.storage_type != "persistent" ? local.enable_afm == true ? concat(local.strg_instance_private_ips, local.afm_instance_private_ips) : local.strg_instance_private_ips : [] + storage_instance_ids = var.storage_type != "persistent" ? local.enable_afm == true ? concat(local.strg_instance_ids, local.afm_instance_ids) : local.strg_instance_ids : [] + storage_instance_names = var.storage_type != "persistent" ? local.enable_afm == true ? concat(local.strg_instance_names, local.afm_instance_names) : local.strg_instance_names : [] + storage_ips_with_vol_mapping = module.landing_zone_vsi[*].instance_ips_with_vol_mapping storage_cluster_instance_private_ips = local.scale_ces_enabled == false ? local.storage_instance_private_ips : concat(local.storage_instance_private_ips, local.protocol_instance_private_ips) storage_cluster_instance_ids = local.scale_ces_enabled == false ? local.storage_instance_ids : concat(local.storage_instance_ids, local.protocol_instance_ids) storage_cluster_instance_names = local.scale_ces_enabled == false ? local.storage_instance_names : concat(local.storage_instance_names, local.protocol_instance_names) - baremetal_instance_private_ips = var.storage_type == "persistent" ? local.enable_afm == true ? concat(local.strg_servers_private_ips, local.afm_private_ips_final) : local.strg_servers_private_ips : [] - baremetal_instance_ids = var.storage_type == "persistent" ? local.enable_afm == true ? concat(local.strg_servers_ids, local.afm_ids_final) : local.strg_servers_ids : [] - baremetal_instance_names = var.storage_type == "persistent" ? local.enable_afm == true ? concat(local.strg_servers_names, local.afm_names_final) : local.strg_servers_names : [] + baremetal_instance_private_ips = var.storage_type == "persistent" ? local.enable_afm == true ? concat(local.strg_servers_private_ips, local.afm_instance_private_ips) : local.strg_servers_private_ips : [] + baremetal_instance_ids = var.storage_type == "persistent" ? local.enable_afm == true ? concat(local.strg_servers_ids, local.afm_instance_ids) : local.strg_servers_ids : [] + baremetal_instance_names = var.storage_type == "persistent" ? local.enable_afm == true ? concat(local.strg_servers_names, local.afm_instance_names) : local.strg_servers_names : [] - baremetal_cluster_instance_private_ips = var.storage_type == "persistent" && local.scale_ces_enabled == false ? local.baremetal_instance_private_ips : concat(local.baremetal_instance_private_ips, local.protocol_instance_private_ips, local.protocol_bm_instance_private_ips) - baremetal_cluster_instance_ids = var.storage_type == "persistent" && local.scale_ces_enabled == false ? local.baremetal_instance_ids : concat(local.baremetal_instance_ids, local.protocol_instance_ids, local.protocol_bm_instance_ids) - baremetal_cluster_instance_names = var.storage_type == "persistent" && local.scale_ces_enabled == false ? local.baremetal_instance_names : concat(local.baremetal_instance_names, local.protocol_instance_names, local.protocol_bm_instance_names) + baremetal_cluster_instance_private_ips = var.storage_type == "persistent" && local.scale_ces_enabled == false ? local.baremetal_instance_private_ips : concat(local.baremetal_instance_private_ips, local.protocol_instance_private_ips) + baremetal_cluster_instance_ids = var.storage_type == "persistent" && local.scale_ces_enabled == false ? local.baremetal_instance_ids : concat(local.baremetal_instance_ids, local.protocol_instance_ids) + baremetal_cluster_instance_names = var.storage_type == "persistent" && local.scale_ces_enabled == false ? local.baremetal_instance_names : concat(local.baremetal_instance_names, local.protocol_instance_names) - tie_breaker_storage_instance_private_ips = var.storage_type != "persistent" ? local.strg_tie_breaker_private_ips : local.bm_tie_breaker_private_ips - tie_breaker_storage_instance_ids = var.storage_type != "persistent" ? local.strg_tie_breaker_instance_ids : local.bm_tie_breaker_ids - tie_breaker_storage_instance_names = var.storage_type != "persistent" ? local.strg_tie_breaker_instance_names : local.bm_tie_breaker_names - tie_breaker_ips_with_vol_mapping = var.storage_type != "persistent" ? module.landing_zone_vsi[*].instance_ips_with_vol_mapping_tie_breaker : local.storage_tie_breaker_bms_name_with_vol_mapping + tie_breaker_storage_instance_private_ips = var.storage_type != "persistent" ? local.strg_tie_breaker_private_ips : local.baremetal_instance_private_ips + tie_breaker_storage_instance_ids = var.storage_type != "persistent" ? local.strg_tie_breaker_instance_ids : local.baremetal_instance_ids + tie_breaker_storage_instance_names = var.storage_type != "persistent" ? local.strg_tie_breaker_instance_names : local.baremetal_instance_names + tie_breaker_ips_with_vol_mapping = module.landing_zone_vsi[*].instance_ips_with_vol_mapping_tie_breaker - storage_subnet_cidr = local.storage_instance_count > 0 && var.storage_subnet_id != null ? jsonencode((data.ibm_is_subnet.existing_storage_subnets[*].ipv4_cidr_block)[0]) : "" - compute_subnet_cidr = local.static_compute_instance_count > 0 && var.compute_subnet_id != null ? jsonencode((data.ibm_is_subnet.existing_compute_subnets[*].ipv4_cidr_block)[0]) : "" - client_subnet_cidr = local.client_instance_count > 0 && var.client_subnet_id != null ? jsonencode((data.ibm_is_subnet.existing_client_subnets[*].ipv4_cidr_block)[0]) : "" + storage_subnet_cidr = var.enable_deployer ? "" : local.storage_instance_count > 0 ? jsonencode((data.ibm_is_subnet.existing_storage_subnets[*].ipv4_cidr_block)[0]) : "" + cluster_subnet_cidr = var.enable_deployer ? "" : jsonencode((data.ibm_is_subnet.existing_cluster_subnets[*].ipv4_cidr_block)[0]) + client_subnet_cidr = var.enable_deployer ? "" : local.client_instance_count > 0 ? jsonencode((data.ibm_is_subnet.existing_client_subnets[*].ipv4_cidr_block)[0]) : "" compute_memory = data.ibm_is_instance_profile.compute_profile.memory[0].value compute_vcpus_count = data.ibm_is_instance_profile.compute_profile.vcpu_count[0].value @@ -695,29 +537,25 @@ locals { storage_desc_bandwidth = data.ibm_is_instance_profile.storage_profile.bandwidth[0].value storage_memory = var.storage_type == "persistent" ? data.ibm_is_bare_metal_server_profile.storage_bms_profile[0].memory[0].value : data.ibm_is_instance_profile.storage_profile.memory[0].value storage_vcpus_count = var.storage_type == "persistent" ? data.ibm_is_bare_metal_server_profile.storage_bms_profile[0].cpu_core_count[0].value : data.ibm_is_instance_profile.storage_profile.vcpu_count[0].value - storage_bandwidth = var.storage_type == "persistent" ? local.sapphire_rapids_profile_check == true ? 200000 : 100000 : data.ibm_is_instance_profile.storage_profile.bandwidth[0].value - protocol_memory = (local.scale_ces_enabled == true && var.colocate_protocol_instances == false) ? local.ces_server_type == false ? data.ibm_is_instance_profile.protocol_profile[0].memory[0].value : data.ibm_is_bare_metal_server_profile.protocol_bm_profile[0].memory[0].value : jsonencode(0) - protocol_vcpus_count = (local.scale_ces_enabled == true && var.colocate_protocol_instances == false) ? local.ces_server_type == false ? data.ibm_is_instance_profile.protocol_profile[0].vcpu_count[0].value : data.ibm_is_bare_metal_server_profile.protocol_bm_profile[0].cpu_core_count[0].value : jsonencode(0) - protocol_bandwidth = (local.scale_ces_enabled == true && var.colocate_protocol_instances == false) ? local.ces_server_type == false ? data.ibm_is_instance_profile.protocol_profile[0].bandwidth[0].value : local.sapphire_rapids_profile_check == true ? 200000 : 100000 : jsonencode(0) + storage_bandwidth = var.storage_type == "persistent" ? data.ibm_is_bare_metal_server_profile.storage_bms_profile[0].bandwidth[0].value : data.ibm_is_instance_profile.storage_profile.bandwidth[0].value + protocol_memory = (local.scale_ces_enabled == true && var.colocate_protocol_instances == false) ? local.ces_server_type == false ? data.ibm_is_instance_profile.protocol_profile[0].memory[0].value : jsonencode(0) : jsonencode(0) + protocol_vcpus_count = (local.scale_ces_enabled == true && var.colocate_protocol_instances == false) ? local.ces_server_type == false ? data.ibm_is_instance_profile.protocol_profile[0].vcpu_count[0].value : jsonencode(0) : jsonencode(0) + protocol_bandwidth = (local.scale_ces_enabled == true && var.colocate_protocol_instances == false) ? local.ces_server_type == false ? data.ibm_is_instance_profile.protocol_profile[0].bandwidth[0].value : jsonencode(0) : jsonencode(0) storage_protocol_memory = var.storage_type == "persistent" ? data.ibm_is_bare_metal_server_profile.storage_bms_profile[0].memory[0].value : data.ibm_is_instance_profile.storage_profile.memory[0].value storage_protocol_vcpus_count = var.storage_type == "persistent" ? data.ibm_is_bare_metal_server_profile.storage_bms_profile[0].cpu_core_count[0].value : data.ibm_is_instance_profile.storage_profile.vcpu_count[0].value - storage_protocol_bandwidth = var.storage_type == "persistent" ? local.sapphire_rapids_profile_check == true ? 200000 : 100000 : data.ibm_is_instance_profile.storage_profile.bandwidth[0].value - afm_memory = local.afm_server_type == true ? data.ibm_is_bare_metal_server_profile.afm_bm_profile[0].memory[0].value : data.ibm_is_instance_profile.afm_server_profile[0].memory[0].value - afm_vcpus_count = local.afm_server_type == true ? data.ibm_is_bare_metal_server_profile.afm_bm_profile[0].cpu_core_count[0].value : data.ibm_is_instance_profile.afm_server_profile[0].vcpu_count[0].value - afm_bandwidth = local.afm_server_type == true ? local.sapphire_rapids_profile_check == true ? 200000 : 100000 : data.ibm_is_instance_profile.afm_server_profile[0].bandwidth[0].value + storage_protocol_bandwidth = var.storage_type == "persistent" ? data.ibm_is_bare_metal_server_profile.storage_bms_profile[0].bandwidth[0].value : data.ibm_is_instance_profile.storage_profile.bandwidth[0].value + afm_memory = local.afm_server_type == true ? jsonencode("") : data.ibm_is_instance_profile.afm_server_profile[0].memory[0].value + afm_vcpus_count = local.afm_server_type == true ? jsonencode("") : data.ibm_is_instance_profile.afm_server_profile[0].vcpu_count[0].value + afm_bandwidth = local.afm_server_type == true ? jsonencode("") : data.ibm_is_instance_profile.afm_server_profile[0].bandwidth[0].value protocol_reserved_name_ips_map = try({ for details in data.ibm_is_subnet_reserved_ips.protocol_subnet_reserved_ips[0].reserved_ips : details.name => details.address }, {}) - protocol_subnet_gateway_ip = var.enable_deployer ? "" : local.scale_ces_enabled == true ? local.protocol_reserved_name_ips_map.ibm-default-gateway : "" + protocol_subnet_gateway_ip = local.scale_ces_enabled == true ? local.protocol_reserved_name_ips_map.ibm-default-gateway : "" } # Existing bastion Variables locals { bastion_instance_public_ip = var.existing_bastion_instance_name != null ? var.existing_bastion_instance_public_ip : null bastion_ssh_private_key = var.existing_bastion_instance_name != null ? var.existing_bastion_ssh_private_key : null - sapphire_rapids_profile_check = [ - for server in var.storage_servers : - strcontains(server.profile, "3-metal") || strcontains(server.profile, "3d-metal") - ] } locals { @@ -727,24 +565,11 @@ locals { # locals needed for ssh connection locals { - ssh_forward_host = var.enable_deployer ? "" : var.scheduler == "LSF" ? (length(local.mgmt_hosts_ips) == 1 ? local.mgmt_hosts_ips[0] : local.mgmt_hosts_ips[1]) : "" - ssh_forwards = var.enable_deployer ? "" : var.scheduler == "LSF" ? "-L 8443:localhost:8443 -L 6080:localhost:6080 -L 8444:localhost:8444" : "" - ssh_jump_host = var.enable_deployer ? "" : var.scheduler == "LSF" ? local.bastion_instance_public_ip != null ? local.bastion_instance_public_ip : var.bastion_fip : "" - ssh_jump_option = var.enable_deployer ? "" : var.scheduler == "LSF" ? "-J ubuntu@${local.ssh_jump_host}" : "" - ssh_cmd = var.enable_deployer ? "" : var.scheduler == "LSF" ? "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ServerAliveInterval=5 -o ServerAliveCountMax=1 ${local.ssh_forwards} ${local.ssh_jump_option} lsfadmin@${local.ssh_forward_host}" : "" - webservice_ssh_forwards = var.enable_deployer ? "" : var.scheduler == "LSF" && var.lsf_version == "fixpack_15" ? "-L 8448:localhost:8448" : "" - webservice_ssh_cmd = var.enable_deployer ? "" : var.scheduler == "LSF" && var.lsf_version == "fixpack_15" ? "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ServerAliveInterval=5 -o ServerAliveCountMax=1 ${local.webservice_ssh_forwards} ${local.ssh_jump_option} lsfadmin@${local.ssh_forward_host}" : "" -} - -locals { - scale_encryption_admin_username = "SKLMAdmin" # pragma: allowlist secret - scale_encryption_admin_default_password = "SKLM@dmin123" # pragma: allowlist secret -} - -#For Baremetal Userdata -locals { - enable_protocol = local.storage_instance_count > 0 && local.protocol_instance_count > 0 - bms_interfaces = ["eth0", "eth1"] + ssh_forward_host = var.enable_deployer ? "" : local.mgmt_hosts_ips[0] + ssh_forwards = var.enable_deployer ? "" : "-L 8443:${local.ssh_forward_host}:8443 -L 6080:${local.ssh_forward_host}:6080 -L 8444:${local.ssh_forward_host}:8444" + ssh_jump_host = var.enable_deployer ? "" : local.bastion_instance_public_ip != null ? local.bastion_instance_public_ip : var.bastion_fip + ssh_jump_option = var.enable_deployer ? "" : "-J ubuntu@${local.ssh_jump_host}" + ssh_cmd = var.enable_deployer ? "" : "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ServerAliveInterval=5 -o ServerAliveCountMax=1 ${local.ssh_forwards} ${local.ssh_jump_option} lsfadmin@${join(",", local.login_host_ip)}" } #locals { diff --git a/main.tf b/main.tf index 64e9aa9e..6dbfd1a6 100644 --- a/main.tf +++ b/main.tf @@ -1,14 +1,10 @@ module "landing_zone" { source = "./modules/landing_zone" enable_landing_zone = var.enable_landing_zone - scheduler = var.scheduler vpc_cluster_private_subnets_cidr_blocks = [var.vpc_cluster_private_subnets_cidr_blocks] cos_instance_name = var.cos_instance_name bastion_subnet_id = local.bastion_subnet_id - compute_subnet_id = local.compute_subnet_id - protocol_subnet_id = local.protocol_subnet_id - client_subnet_id = local.client_subnet_id - storage_subnet_id = local.storage_subnet_id + compute_subnet_id = local.subnet_id enable_atracker = var.observability_atracker_enable && (var.observability_atracker_target_type == "cos") ? true : false enable_cos_integration = var.enable_cos_integration enable_vpc_flow_logs = var.enable_vpc_flow_logs @@ -37,12 +33,6 @@ module "landing_zone" { skip_flowlogs_s2s_auth_policy = var.skip_flowlogs_s2s_auth_policy skip_kms_s2s_auth_policy = var.skip_kms_s2s_auth_policy observability_logs_enable = var.observability_logs_enable_for_management || var.observability_logs_enable_for_compute || (var.observability_atracker_enable && var.observability_atracker_target_type == "cloudlogs") ? true : false - scale_encryption_type = var.scale_encryption_type - scale_encryption_enabled = var.scale_encryption_enabled - key_protect_instance_id = var.key_protect_instance_id - afm_instances = var.afm_instances - afm_cos_config = var.afm_cos_config - filesystem_config = var.filesystem_config # hpcs_instance_name = var.hpcs_instance_name # clusters = var.clusters } @@ -57,11 +47,8 @@ module "deployer" { cluster_cidr = local.cluster_cidr ext_login_subnet_id = var.login_subnet_id bastion_subnets = local.login_subnets - ext_compute_subnet_id = var.compute_subnet_id - compute_subnets = local.compute_subnets - client_subnets = local.client_subnets - storage_subnets = local.storage_subnets - protocol_subnets = local.protocol_subnets + ext_cluster_subnet_id = var.cluster_subnet_id + cluster_subnets = local.cluster_subnets bastion_instance = var.bastion_instance enable_deployer = var.enable_deployer deployer_instance = var.deployer_instance @@ -69,71 +56,61 @@ module "deployer" { allowed_cidr = var.remote_allowed_ips kms_encryption_enabled = local.kms_encryption_enabled boot_volume_encryption_key = local.boot_volume_encryption_key + existing_kms_instance_guid = local.existing_kms_instance_guid dns_domain_names = var.dns_domain_names skip_iam_authorization_policy = var.skip_iam_block_storage_authorization_policy ext_vpc_name = var.vpc_name bastion_instance_name = var.existing_bastion_instance_name bastion_instance_public_ip = local.bastion_instance_public_ip existing_bastion_security_group_id = var.existing_bastion_instance_name != null ? var.existing_bastion_security_group_id : null - ext_client_subnet_id = var.client_subnet_id - ext_storage_subnet_id = var.storage_subnet_id - ext_protocol_subnet_id = var.protocol_subnet_id - login_security_group_name = var.login_security_group_name - enable_sec_interface_compute = local.enable_sec_interface_compute } module "landing_zone_vsi" { - count = var.enable_deployer == false ? 1 : 0 - source = "./modules/landing_zone_vsi" - resource_group = var.resource_group_ids["workload_rg"] - prefix = var.cluster_prefix - vpc_id = local.vpc_id - zones = var.zones - bastion_security_group_id = var.bastion_security_group_id - bastion_public_key_content = local.bastion_public_key_content - ssh_keys = var.ssh_keys - client_subnets = local.client_subnets - client_instances = var.client_instances - compute_subnet_id = local.compute_subnets - management_instances = var.management_instances - static_compute_instances = var.static_compute_instances - dynamic_compute_instances = var.dynamic_compute_instances - storage_subnets = local.storage_subnets - storage_instances = var.storage_instances - storage_servers = var.storage_servers - storage_type = var.storage_type - protocol_subnets = local.protocol_subnets - protocol_instances = var.protocol_instances - nsd_details = var.nsd_details - dns_domain_names = var.dns_domain_names - kms_encryption_enabled = local.kms_encryption_enabled - boot_volume_encryption_key = var.boot_volume_encryption_key - enable_deployer = var.enable_deployer - afm_instances = var.afm_instances - enable_dedicated_host = var.enable_dedicated_host - enable_ldap = var.enable_ldap - ldap_instances = var.ldap_instance - ldap_server = local.ldap_server - scale_encryption_enabled = var.scale_encryption_enabled - scale_encryption_type = var.scale_encryption_type - gklm_instances = var.gklm_instances - vpc_region = local.region - scheduler = var.scheduler - ibm_customer_number = var.ibm_customer_number - colocate_protocol_instances = var.colocate_protocol_instances - storage_security_group_id = var.storage_security_group_id - login_instance = var.login_instance - bastion_subnets = local.login_subnets - cluster_cidr = local.cluster_cidr - bms_boot_drive_encryption = var.bms_boot_drive_encryption - tie_breaker_bm_server_profile = var.tie_breaker_bm_server_profile - scale_management_vsi_profile = var.scale_management_vsi_profile - login_security_group_name = var.login_security_group_name - storage_security_group_name = var.storage_security_group_name - compute_security_group_name = var.compute_security_group_name - client_security_group_name = var.client_security_group_name - gklm_security_group_name = var.gklm_security_group_name - ldap_security_group_name = var.ldap_security_group_name + count = var.enable_deployer == false ? 1 : 0 + source = "./modules/landing_zone_vsi" + resource_group = var.resource_group_ids["workload_rg"] + prefix = var.cluster_prefix + vpc_id = local.vpc_id + zones = var.zones + bastion_security_group_id = var.bastion_security_group_id + bastion_public_key_content = local.bastion_public_key_content + ssh_keys = var.ssh_keys + client_subnets = local.client_subnets + client_instances = var.client_instances + cluster_subnet_id = local.cluster_subnets + management_instances = var.management_instances + static_compute_instances = var.static_compute_instances + dynamic_compute_instances = var.dynamic_compute_instances + storage_subnets = local.storage_subnets + storage_instances = var.storage_instances + storage_servers = var.storage_servers + storage_type = var.storage_type + protocol_subnets = local.protocol_subnets + protocol_instances = var.protocol_instances + nsd_details = var.nsd_details + dns_domain_names = var.dns_domain_names + kms_encryption_enabled = local.kms_encryption_enabled + boot_volume_encryption_key = var.boot_volume_encryption_key + existing_kms_instance_guid = var.existing_kms_instance_guid + enable_deployer = var.enable_deployer + afm_instances = var.afm_instances + enable_dedicated_host = var.enable_dedicated_host + enable_ldap = var.enable_ldap + ldap_instances = var.ldap_instance + ldap_server = local.ldap_server + ldap_instance_key_pair = local.ldap_instance_key_pair + scale_encryption_enabled = var.scale_encryption_enabled + scale_encryption_type = var.scale_encryption_type + gklm_instance_key_pair = local.gklm_instance_key_pair + gklm_instances = var.gklm_instances + vpc_region = local.region + scheduler = var.scheduler + ibm_customer_number = var.ibm_customer_number + colocate_protocol_instances = var.colocate_protocol_instances + storage_security_group_id = var.storage_security_group_id + login_instance = var.login_instance + bastion_subnets = local.login_subnets + cluster_cidr = local.cluster_cidr } module "prepare_tf_input" { @@ -165,11 +142,11 @@ module "prepare_tf_input" { enable_dedicated_host = var.enable_dedicated_host remote_allowed_ips = var.remote_allowed_ips vpc_name = local.vpc_name - compute_subnet_id = local.compute_subnet + storage_subnets = local.storage_subnet + protocol_subnets = local.protocol_subnet + cluster_subnet_id = local.cluster_subnet + client_subnets = local.client_subnet login_subnet_id = local.login_subnet - client_subnet_id = local.client_subnet - storage_subnet_id = local.storage_subnet - protocol_subnet_id = local.protocol_subnet login_instance = var.login_instance dns_domain_names = var.dns_domain_names key_management = local.key_management @@ -201,21 +178,17 @@ module "prepare_tf_input" { ldap_basedns = var.ldap_basedns ldap_server_cert = local.ldap_server_cert ldap_admin_password = local.ldap_admin_password + ldap_instance_key_pair = local.ldap_instance_key_pair ldap_user_password = var.ldap_user_password ldap_user_name = var.ldap_user_name afm_instances = var.afm_instances afm_cos_config = var.afm_cos_config + gklm_instance_key_pair = local.gklm_instance_key_pair gklm_instances = var.gklm_instances scale_encryption_type = var.scale_encryption_type filesystem_config = var.filesystem_config - filesets_config = var.filesets_config - storage_gui_username = var.storage_gui_username - storage_gui_password = var.storage_gui_password - compute_gui_username = var.compute_gui_username - compute_gui_password = var.compute_gui_password scale_encryption_admin_password = var.scale_encryption_admin_password scale_encryption_enabled = var.scale_encryption_enabled - key_protect_instance_id = var.key_protect_instance_id storage_security_group_id = var.storage_security_group_id custom_file_shares = var.custom_file_shares existing_bastion_instance_name = var.existing_bastion_instance_name @@ -226,17 +199,6 @@ module "prepare_tf_input" { cspm_enabled = var.cspm_enabled app_config_plan = var.app_config_plan existing_resource_group = var.existing_resource_group - tie_breaker_bm_server_profile = var.tie_breaker_bm_server_profile - scale_management_vsi_profile = var.scale_management_vsi_profile - login_security_group_name = var.login_security_group_name - storage_security_group_name = var.storage_security_group_name - compute_security_group_name = var.compute_security_group_name - client_security_group_name = var.client_security_group_name - gklm_security_group_name = var.gklm_security_group_name - ldap_security_group_name = var.ldap_security_group_name - bms_boot_drive_encryption = var.bms_boot_drive_encryption - scale_afm_bucket_config_details = local.scale_afm_bucket_config_details - scale_afm_cos_hmac_key_params = local.scale_afm_cos_hmac_key_params depends_on = [module.deployer] } @@ -264,15 +226,33 @@ module "resource_provisioner" { depends_on = [module.deployer, module.prepare_tf_input, module.validate_ldap_server_connection] } +module "cos" { + count = var.scheduler == "Scale" && local.enable_afm == true ? 1 : 0 + source = "./modules/cos" + prefix = "${var.cluster_prefix}-" + resource_group_id = local.resource_group_ids["service_rg"] + cos_instance_plan = "standard" + cos_instance_location = "global" + cos_instance_service = "cloud-object-storage" + cos_hmac_role = "Manager" + new_instance_bucket_hmac = local.new_instance_bucket_hmac + exstng_instance_new_bucket_hmac = local.exstng_instance_new_bucket_hmac + exstng_instance_bucket_new_hmac = local.exstng_instance_bucket_new_hmac + exstng_instance_hmac_new_bucket = local.exstng_instance_hmac_new_bucket + exstng_instance_bucket_hmac = local.exstng_instance_bucket_hmac + filesystem = var.storage_instances[*]["filesystem"] != "" ? var.storage_instances[0]["filesystem"] : var.filesystem_config[0]["filesystem"] + depends_on = [module.landing_zone_vsi] +} + module "file_storage" { - count = var.enable_deployer == false && var.scheduler == "LSF" ? 1 : 0 + count = var.enable_deployer == false ? 1 : 0 source = "./modules/file_storage" zone = var.zones[0] # always the first zone resource_group_id = var.resource_group_ids["workload_rg"] file_shares = local.file_shares - encryption_key_crn = var.boot_volume_encryption_key + encryption_key_crn = local.boot_volume_encryption_key security_group_ids = local.compute_security_group_id - subnet_id = local.compute_subnet + subnet_id = local.compute_subnet_id existing_kms_instance_guid = var.existing_kms_instance_guid skip_iam_share_authorization_policy = var.skip_iam_share_authorization_policy kms_encryption_enabled = local.kms_encryption_enabled @@ -309,7 +289,7 @@ module "storage_dns_records" { } module "protocol_reserved_ip" { - count = var.scheduler == "Scale" && var.enable_deployer == false && var.protocol_subnet_id != null ? 1 : 0 + count = var.scheduler == "Scale" && var.enable_deployer == false && var.protocol_subnets != null ? 1 : 0 source = "./modules/protocol_reserved_ip" total_reserved_ips = local.protocol_instance_count subnet_id = [local.protocol_subnets[0].id] @@ -338,10 +318,9 @@ module "gklm_dns_records" { depends_on = [module.dns] } -resource "time_sleep" "wait_for_vsi_syncup" { - count = var.enable_deployer == false && var.scheduler == "Scale" && var.storage_type != "persistent" && (can(regex("^ibm-redhat-8-10-minimal-amd64-.*$", (var.storage_instances[*]["image"])[0])) || local.enable_sec_interface_compute || local.enable_sec_interface_storage) ? 1 : 0 - create_duration = local.enable_sec_interface_compute || local.enable_sec_interface_storage ? "180s" : "300s" - depends_on = [module.storage_dns_records, module.protocol_reserved_ip, module.compute_dns_records, module.landing_zone_vsi] +resource "time_sleep" "wait_60_seconds" { + create_duration = "60s" + depends_on = [module.storage_dns_records, module.protocol_reserved_ip, module.compute_dns_records] } module "write_compute_cluster_inventory" { @@ -378,7 +357,7 @@ module "write_compute_cluster_inventory" { compute_subnet_crn = local.compute_subnet_crn kms_encryption_enabled = local.kms_encryption_enabled boot_volume_encryption_key = var.boot_volume_encryption_key - depends_on = [time_sleep.wait_for_vsi_syncup, module.landing_zone_vsi] + depends_on = [time_sleep.wait_60_seconds, module.landing_zone_vsi] } module "write_compute_scale_cluster_inventory" { @@ -393,12 +372,12 @@ module "write_compute_scale_cluster_inventory" { vpc_region = jsonencode(local.region) vpc_availability_zones = var.zones scale_version = jsonencode(local.scale_version) - compute_cluster_filesystem_mountpoint = jsonencode((var.static_compute_instances[*].filesystem)[0]) + compute_cluster_filesystem_mountpoint = jsonencode(var.scale_compute_cluster_filesystem_mountpoint) storage_cluster_filesystem_mountpoint = jsonencode("None") filesystem_block_size = jsonencode("None") - compute_cluster_instance_private_ips = concat(local.compute_instance_private_ips, local.compute_mgmt_instance_private_ips) - compute_cluster_instance_ids = concat(local.compute_instance_ids, local.compute_mgmt_instance_ids) - compute_cluster_instance_names = concat(local.compute_instance_names, local.compute_mgmt_instance_names) + compute_cluster_instance_private_ips = concat((local.enable_sec_interface_compute ? local.secondary_compute_instance_private_ips : local.compute_instance_private_ips), local.compute_mgmt_instance_private_ips) + compute_cluster_instance_ids = concat((local.enable_sec_interface_compute ? local.secondary_compute_instance_private_ips : local.compute_instance_ids), local.compute_mgmt_instance_ids) + compute_cluster_instance_names = concat((local.enable_sec_interface_compute ? local.secondary_compute_instance_private_ips : local.compute_instance_names), local.compute_mgmt_instance_names) compute_cluster_instance_private_dns_ip_map = {} storage_cluster_instance_ids = [] storage_cluster_instance_private_ips = [] @@ -410,8 +389,8 @@ module "write_compute_scale_cluster_inventory" { storage_cluster_desc_instance_private_dns_ip_map = {} storage_cluster_instance_names = [] storage_subnet_cidr = local.enable_mrot_conf ? local.storage_subnet_cidr : jsonencode("") - compute_subnet_cidr = local.enable_mrot_conf ? local.compute_subnet_cidr : jsonencode("") - scale_remote_cluster_clustername = local.enable_mrot_conf ? jsonencode(format("%s.%s", var.cluster_prefix, var.dns_domain_names["storage"])) : jsonencode("") + compute_subnet_cidr = local.enable_mrot_conf ? local.cluster_subnet_cidr : jsonencode("") + scale_remote_cluster_clustername = local.enable_mrot_conf ? jsonencode(format("%s.%s", var.cluster_prefix, var.cluster_prefix, var.dns_domain_names["storage"])) : jsonencode("") protocol_cluster_instance_names = [] client_cluster_instance_names = [] protocol_cluster_reserved_names = "" @@ -423,12 +402,12 @@ module "write_compute_scale_cluster_inventory" { filesystem = jsonencode("") mountpoint = jsonencode("") protocol_gateway_ip = jsonencode("") - filesets = local.fileset_size_map + filesets = local.fileset_size_map #{} afm_cos_bucket_details = [] afm_config_details = [] afm_cluster_instance_names = [] - filesystem_mountpoint = local.encryption_filesystem_mountpoint - depends_on = [time_sleep.wait_for_vsi_syncup, module.landing_zone_vsi] + filesystem_mountpoint = var.scale_encryption_type == "key_protect" ? (var.storage_instances[*]["filesystem"] != "" ? var.storage_instances[*]["filesystem"] : jsonencode(var.filesystem_config[0]["filesystem"])) : jsonencode("") + depends_on = [time_sleep.wait_60_seconds] } module "write_storage_scale_cluster_inventory" { @@ -444,15 +423,15 @@ module "write_storage_scale_cluster_inventory" { vpc_availability_zones = var.zones scale_version = jsonencode(local.scale_version) compute_cluster_filesystem_mountpoint = jsonencode("None") - storage_cluster_filesystem_mountpoint = jsonencode(local.filesystem_mountpoint) + storage_cluster_filesystem_mountpoint = jsonencode(var.filesystem_config[0]["mount_point"]) #jsonencode(var.storage_instances[count.index].filesystem) filesystem_block_size = jsonencode(var.filesystem_config[0]["block_size"]) compute_cluster_instance_ids = [] compute_cluster_instance_private_ips = [] compute_cluster_instance_private_dns_ip_map = {} compute_cluster_instance_names = [] - storage_cluster_instance_ids = var.storage_type == "persistent" ? concat(local.baremetal_cluster_instance_ids, local.strg_mgmtt_instance_ids, local.bm_tie_breaker_ids) : concat(local.storage_cluster_instance_ids, local.strg_mgmtt_instance_ids, local.tie_breaker_storage_instance_ids) - storage_cluster_instance_private_ips = var.storage_type == "persistent" ? concat(local.baremetal_cluster_instance_private_ips, local.strg_mgmt_instance_private_ips, local.bm_tie_breaker_private_ips) : concat(local.storage_cluster_instance_private_ips, local.strg_mgmt_instance_private_ips, local.tie_breaker_storage_instance_private_ips) - storage_cluster_instance_names = var.storage_type == "persistent" ? concat(local.baremetal_cluster_instance_names, local.strg_mgmt_instance_names, local.bm_tie_breaker_names) : concat(local.storage_cluster_instance_names, local.strg_mgmt_instance_names, local.tie_breaker_storage_instance_names) + storage_cluster_instance_ids = var.storage_type == "persistent" ? concat(local.baremetal_cluster_instance_ids, local.strg_mgmtt_instance_ids, local.tie_breaker_storage_instance_ids) : concat(local.storage_cluster_instance_ids, local.strg_mgmtt_instance_ids, local.tie_breaker_storage_instance_ids) + storage_cluster_instance_private_ips = var.storage_type == "persistent" ? concat(local.baremetal_cluster_instance_private_ips, local.strg_mgmt_instance_private_ips, local.tie_breaker_storage_instance_private_ips) : concat(local.storage_cluster_instance_private_ips, local.strg_mgmt_instance_private_ips, local.tie_breaker_storage_instance_private_ips) + storage_cluster_instance_names = var.storage_type == "persistent" ? concat(local.baremetal_cluster_instance_names, local.strg_mgmt_instance_names, local.tie_breaker_storage_instance_names) : concat(local.storage_cluster_instance_names, local.strg_mgmt_instance_names, local.tie_breaker_storage_instance_names) storage_cluster_with_data_volume_mapping = local.storage_ips_with_vol_mapping[0] storage_cluster_instance_private_dns_ip_map = {} storage_cluster_desc_instance_private_ips = local.strg_tie_breaker_private_ips @@ -460,7 +439,7 @@ module "write_storage_scale_cluster_inventory" { storage_cluster_desc_data_volume_mapping = local.tie_breaker_ips_with_vol_mapping[0] storage_cluster_desc_instance_private_dns_ip_map = {} storage_subnet_cidr = local.enable_mrot_conf ? local.storage_subnet_cidr : jsonencode("") - compute_subnet_cidr = local.enable_mrot_conf ? local.compute_subnet_cidr : local.scale_ces_enabled == true && local.client_instance_count > 0 ? local.client_subnet_cidr : jsonencode("") + compute_subnet_cidr = local.enable_mrot_conf ? local.cluster_subnet_cidr : local.scale_ces_enabled == true ? local.client_subnet_cidr : jsonencode("") scale_remote_cluster_clustername = local.enable_mrot_conf ? jsonencode(format("%s.%s", var.cluster_prefix, var.dns_domain_names["compute"])) : jsonencode("") protocol_cluster_instance_names = local.scale_ces_enabled == true ? local.protocol_cluster_instance_names : [] client_cluster_instance_names = [] @@ -471,14 +450,14 @@ module "write_storage_scale_cluster_inventory" { interface = [] export_ip_pool = local.scale_ces_enabled == true ? values(one(module.protocol_reserved_ip[*].instance_name_ip_map)) : [] filesystem = local.scale_ces_enabled == true ? jsonencode("cesSharedRoot") : jsonencode("") - mountpoint = local.scale_ces_enabled == true ? jsonencode(local.filesystem_mountpoint) : jsonencode("") + mountpoint = local.scale_ces_enabled == true ? jsonencode(var.filesystem_config[0]["mount_point"]) : jsonencode("") protocol_gateway_ip = jsonencode(local.protocol_subnet_gateway_ip) filesets = local.fileset_size_map - afm_config_details = var.scale_afm_bucket_config_details - afm_cos_bucket_details = var.scale_afm_cos_hmac_key_params - afm_cluster_instance_names = local.afm_names_final - filesystem_mountpoint = local.encryption_filesystem_mountpoint - depends_on = [time_sleep.wait_for_vsi_syncup, module.landing_zone_vsi] + afm_cos_bucket_details = local.enable_afm == true ? local.afm_cos_bucket_details : [] + afm_config_details = local.enable_afm == true ? local.afm_cos_config : [] + afm_cluster_instance_names = local.afm_instance_names + filesystem_mountpoint = var.scale_encryption_type == "key_protect" ? (var.storage_instances[*]["filesystem"] != "" ? var.storage_instances[*]["filesystem"] : jsonencode(var.filesystem_config[0]["filesystem"])) : jsonencode("") + depends_on = [time_sleep.wait_60_seconds] } module "write_client_scale_cluster_inventory" { @@ -498,7 +477,7 @@ module "write_client_scale_cluster_inventory" { compute_cluster_instance_ids = [] compute_cluster_instance_private_ips = [] compute_cluster_instance_private_dns_ip_map = {} - storage_cluster_filesystem_mountpoint = local.scale_ces_enabled == true ? jsonencode(local.filesystem_mountpoint) : jsonencode("") + storage_cluster_filesystem_mountpoint = local.scale_ces_enabled == true ? jsonencode(var.filesystem_config[0]["mount_point"]) : jsonencode("") storage_cluster_instance_ids = [] storage_cluster_instance_private_ips = [] storage_cluster_with_data_volume_mapping = {} @@ -528,70 +507,6 @@ module "write_client_scale_cluster_inventory" { afm_config_details = [] afm_cluster_instance_names = [] filesystem_mountpoint = jsonencode("") - depends_on = [time_sleep.wait_for_vsi_syncup, module.landing_zone_vsi] -} - -module "key_protect_scale" { - count = var.scale_encryption_enabled == true && var.scale_encryption_type == "key_protect" && var.enable_deployer == false ? 1 : 0 - source = "./modules/key_protect" - key_protect_instance_id = var.key_protect_instance_id != null ? var.key_protect_instance_id : var.existing_kms_instance_guid - resource_prefix = var.cluster_prefix - vpc_region = local.region - scale_config_path = format("%s/key_protect", var.scale_config_path) - vpc_storage_cluster_dns_domain = var.dns_domain_names["storage"] -} - -module "ldap_configuration" { - count = var.scheduler == "Scale" && var.enable_deployer == false && var.enable_ldap && var.ldap_server == "null" ? 1 : 0 - source = "./modules/common/ldap_configuration" - turn_on = var.enable_ldap - clone_path = var.scale_ansible_repo_clone_path - create_scale_cluster = var.create_scale_cluster - bastion_user = jsonencode(var.bastion_user) - write_inventory_complete = module.write_storage_scale_cluster_inventory[0].write_scale_inventory_complete - ldap_cluster_prefix = var.cluster_prefix - using_jumphost_connection = var.using_jumphost_connection - bastion_instance_public_ip = local.bastion_fip - bastion_ssh_private_key = local.bastion_ssh_private_key != null ? local.bastion_ssh_private_key : local.bastion_private_key_content - ldap_basedns = var.ldap_basedns - ldap_admin_password = var.ldap_admin_password - ldap_user_name = var.ldap_user_name - ldap_user_password = var.ldap_user_password - ldap_server = var.enable_ldap ? (var.ldap_server != "null" ? var.ldap_server : local.ldap_instance_private_ips[0]) : null - meta_private_key = module.landing_zone_vsi[0].storage_private_key_content - depends_on = [module.validate_ldap_server_connection, module.landing_zone_vsi] -} - -module "host_resolution_add" { - count = var.scheduler == "Scale" && var.enable_deployer == false ? 1 : 0 - source = "./modules/host_resolution_add" - scheduler = var.scheduler - clone_path = var.scale_ansible_repo_clone_path - storage_hosts = local.storage_host_entry - storage_mgmnt_hosts = local.storage_mgmnt_host_entry - storage_tb_hosts = local.storage_tb_host_entry - compute_hosts = local.compute_host_entry - compute_mgmnt_hosts = local.compute_mgmnt_host_entry - client_hosts = local.client_host_entry - protocol_hosts = local.protocol_host_entry - gklm_hosts = local.gklm_host_entry - afm_hosts = local.afm_host_entry - storage_bms_hosts = local.storage_bms_host_entry - storage_tb_bms_hosts = local.storage_tb_bms_host_entry - protocol_bms_hosts = local.protocol_bms_host_entry - afm_bms_hosts = local.afm_bms_host_entry - domain_names = var.dns_domain_names - storage_type = var.storage_type - storage_interface = local.bms_interfaces[0] - protocol_interface = local.bms_interfaces[1] - enable_protocol = local.enable_protocol && var.colocate_protocol_instances ? true : false - vpc_region = local.region - resource_group = var.resource_group_ids["service_rg"] - protocol_subnets = local.enable_protocol ? (length(local.protocol_subnets) > 0 ? local.protocol_subnets[0].id : "") : "" - bms_boot_drive_encryption = var.bms_boot_drive_encryption - ibmcloud_api_key = var.ibmcloud_api_key - scale_encryption_type = var.scale_encryption_type - depends_on = [module.landing_zone_vsi] } module "compute_cluster_configuration" { @@ -620,16 +535,15 @@ module "compute_cluster_configuration" { enable_mrot_conf = local.enable_mrot_conf enable_ces = false enable_afm = false - scale_encryption_type = var.scale_encryption_type != null ? var.scale_encryption_type : null scale_encryption_enabled = var.scale_encryption_enabled - scale_encryption_admin_password = var.scale_encryption_admin_password == null ? "null" : var.scale_encryption_admin_password + scale_encryption_admin_password = var.scale_encryption_admin_password scale_encryption_servers = var.scale_encryption_enabled && var.scale_encryption_type == "gklm" ? local.gklm_instance_private_ips : [] enable_ldap = var.enable_ldap ldap_basedns = var.ldap_basedns - ldap_server = var.enable_ldap ? (var.ldap_server != "null" ? var.ldap_server : local.ldap_instance_private_ips[0]) : null + ldap_server = var.enable_ldap ? local.ldap_instance_private_ips[0] : null ldap_admin_password = local.ldap_admin_password == "" ? jsonencode(null) : local.ldap_admin_password - enable_key_protect = var.scale_encryption_type == "key_protect" ? "True" : "False" - depends_on = [module.write_compute_scale_cluster_inventory, module.key_protect_scale, module.ldap_configuration, module.host_resolution_add] + enable_key_protect = var.scale_encryption_type + depends_on = [module.write_compute_scale_cluster_inventory] } module "storage_cluster_configuration" { @@ -641,7 +555,6 @@ module "storage_cluster_configuration" { inventory_format = var.inventory_format create_scale_cluster = var.create_scale_cluster clone_path = var.scale_ansible_repo_clone_path - scale_config_path = var.scale_config_path inventory_path = format("%s/storage_cluster_inventory.json", var.scale_ansible_repo_clone_path) using_packer_image = var.using_packer_image using_jumphost_connection = var.using_jumphost_connection @@ -650,7 +563,6 @@ module "storage_cluster_configuration" { storage_cluster_gui_password = var.storage_gui_password colocate_protocol_instances = var.colocate_protocol_instances is_colocate_protocol_subset = local.is_colocate_protocol_subset - bms_boot_drive_encryption = var.bms_boot_drive_encryption mgmt_memory = local.management_memory mgmt_vcpus_count = local.management_vcpus_count mgmt_bandwidth = local.management_bandwidth @@ -684,22 +596,21 @@ module "storage_cluster_configuration" { enable_afm = local.enable_afm scale_encryption_enabled = var.scale_encryption_enabled scale_encryption_type = var.scale_encryption_type != null ? var.scale_encryption_type : null - scale_encryption_admin_password = var.scale_encryption_admin_password == null ? "null" : var.scale_encryption_admin_password + scale_encryption_admin_password = var.scale_encryption_admin_password scale_encryption_servers = var.scale_encryption_enabled && var.scale_encryption_type == "gklm" ? local.gklm_instance_private_ips : [] enable_ldap = var.enable_ldap ldap_basedns = var.ldap_basedns - ldap_server = var.enable_ldap ? (var.ldap_server != "null" ? var.ldap_server : local.ldap_instance_private_ips[0]) : null + ldap_server = var.enable_ldap ? local.ldap_instance_private_ips[0] : null ldap_admin_password = local.ldap_admin_password == "" ? jsonencode(null) : local.ldap_admin_password ldap_server_cert = local.ldap_server_cert - enable_key_protect = var.scale_encryption_type == "key_protect" ? "True" : "False" - storage_type = var.storage_type - depends_on = [module.write_storage_scale_cluster_inventory, module.key_protect_scale, module.ldap_configuration, module.host_resolution_add] + enable_key_protect = var.scale_encryption_type + depends_on = [module.write_storage_scale_cluster_inventory] } module "client_configuration" { count = var.scheduler == "Scale" && var.enable_deployer == false ? 1 : 0 - source = "./modules/common/client_configuration" - turn_on = (local.client_instance_count > 0 && var.create_separate_namespaces == true && local.scale_ces_enabled == true) || (local.client_instance_count > 0 && var.create_separate_namespaces == true && var.colocate_protocol_instances) ? true : false + source = "./modules/common//client_configuration" + turn_on = (local.client_instance_count > 0 && var.create_separate_namespaces == true && local.scale_ces_enabled == true) ? true : false create_scale_cluster = var.create_scale_cluster storage_cluster_create_complete = module.storage_cluster_configuration[0].storage_cluster_create_complete clone_path = var.scale_ansible_repo_clone_path @@ -708,13 +619,13 @@ module "client_configuration" { bastion_user = jsonencode(var.bastion_user) bastion_instance_public_ip = jsonencode(local.bastion_fip) bastion_ssh_private_key = var.bastion_ssh_private_key - client_meta_private_key = module.landing_zone_vsi[0].client_private_key_content + client_meta_private_key = module.landing_zone_vsi[0].compute_private_key_content write_inventory_complete = module.write_storage_scale_cluster_inventory[0].write_scale_inventory_complete enable_ldap = var.enable_ldap ldap_basedns = var.ldap_basedns - ldap_server = var.enable_ldap ? (var.ldap_server != "null" ? var.ldap_server : local.ldap_instance_private_ips[0]) : null + ldap_server = var.enable_ldap ? jsonencode(local.ldap_instance_private_ips[0]) : jsonencode(null) ldap_admin_password = local.ldap_admin_password == "" ? jsonencode(null) : local.ldap_admin_password - depends_on = [module.storage_cluster_configuration, module.ldap_configuration, module.host_resolution_add] + depends_on = [module.compute_cluster_configuration, module.storage_cluster_configuration] } module "remote_mount_configuration" { @@ -741,73 +652,11 @@ module "remote_mount_configuration" { depends_on = [module.compute_cluster_configuration, module.storage_cluster_configuration] } -module "invoke_compute_network_playbook" { - count = var.scheduler == "Scale" ? 1 : 0 - source = "./modules/common/network_playbook" - turn_on = (var.create_separate_namespaces == true && local.static_compute_instance_count > 0) ? true : false - create_scale_cluster = var.create_scale_cluster - compute_cluster_create_complete = var.enable_deployer ? false : module.compute_cluster_configuration[0].compute_cluster_create_complete - storage_cluster_create_complete = var.enable_deployer ? false : module.storage_cluster_configuration[0].storage_cluster_create_complete - inventory_path = format("%s/%s/compute_inventory.ini", var.scale_ansible_repo_clone_path, "ibm-spectrum-scale-install-infra") - network_playbook_path = format("%s/%s/collections/ansible_collections/ibm/spectrum_scale/samples/playbook_cloud_network_config.yaml", var.scale_ansible_repo_clone_path, "ibm-spectrum-scale-install-infra") - depends_on = [module.compute_cluster_configuration, module.storage_cluster_configuration] -} - -module "invoke_storage_network_playbook" { - count = var.scheduler == "Scale" ? 1 : 0 - source = "./modules/common/network_playbook" - turn_on = (var.create_separate_namespaces == true && local.storage_instance_count > 0) ? true : false - create_scale_cluster = var.create_scale_cluster - compute_cluster_create_complete = var.enable_deployer ? false : module.compute_cluster_configuration[0].compute_cluster_create_complete - storage_cluster_create_complete = var.enable_deployer ? false : module.storage_cluster_configuration[0].storage_cluster_create_complete - inventory_path = format("%s/%s/storage_inventory.ini", var.scale_ansible_repo_clone_path, "ibm-spectrum-scale-install-infra") - network_playbook_path = format("%s/%s/collections/ansible_collections/ibm/spectrum_scale/samples/playbook_cloud_network_config.yaml", var.scale_ansible_repo_clone_path, "ibm-spectrum-scale-install-infra") - depends_on = [module.compute_cluster_configuration, module.storage_cluster_configuration] -} - -module "encryption_configuration" { - source = "./modules/common/encryption_configuration" - count = var.scheduler == "Scale" && var.enable_deployer == false && var.scale_encryption_enabled && var.scale_encryption_type == "gklm" ? 1 : 0 - turn_on = (var.create_separate_namespaces == true && local.storage_instance_count > 0) ? true : false - clone_path = var.scale_ansible_repo_clone_path - create_scale_cluster = var.create_scale_cluster - meta_private_key = module.landing_zone_vsi[0].storage_private_key_content - scale_encryption_type = var.scale_encryption_type != null ? var.scale_encryption_type : null - scale_encryption_admin_password = var.scale_encryption_admin_password - scale_encryption_servers = var.scale_encryption_enabled && var.scale_encryption_type == "gklm" ? local.gklm_instance_private_ips : [] - scale_encryption_servers_dns = var.scale_encryption_type == "gklm" ? [for instance in local.gklm_instances : "${instance.name}.${var.dns_domain_names["gklm"]}"] : [] - scale_cluster_clustername = var.cluster_prefix - scale_encryption_admin_default_password = local.scale_encryption_admin_default_password - scale_encryption_admin_username = local.scale_encryption_admin_username - compute_cluster_create_complete = module.compute_cluster_configuration[0].compute_cluster_create_complete - storage_cluster_create_complete = module.storage_cluster_configuration[0].storage_cluster_create_complete - remote_mount_create_complete = module.remote_mount_configuration[0].remote_mount_create_complete - compute_cluster_encryption = (var.create_separate_namespaces == true && local.static_compute_instance_count > 0) ? true : false - storage_cluster_encryption = (var.create_separate_namespaces == true && local.storage_instance_count > 0) ? true : false - depends_on = [module.client_configuration, module.compute_cluster_configuration, module.storage_cluster_configuration] -} - -module "key_protect_encryption_configuration" { - source = "./modules/common/key_protect_configuration" - count = var.scheduler == "Scale" && var.enable_deployer == false && var.scale_encryption_enabled && var.scale_encryption_type == "key_protect" ? 1 : 0 - turn_on = (var.create_separate_namespaces == true && local.storage_instance_count > 0) ? true : false - clone_path = var.scale_ansible_repo_clone_path - create_scale_cluster = var.create_scale_cluster - scale_encryption_type = var.scale_encryption_type != null ? var.scale_encryption_type : null - compute_cluster_create_complete = module.compute_cluster_configuration[0].compute_cluster_create_complete - storage_cluster_create_complete = module.storage_cluster_configuration[0].storage_cluster_create_complete - remote_mount_create_complete = module.remote_mount_configuration[0].remote_mount_create_complete - compute_cluster_encryption = (var.create_separate_namespaces == true && local.static_compute_instance_count > 0) ? true : false - storage_cluster_encryption = (var.create_separate_namespaces == true && local.storage_instance_count > 0) ? true : false - depends_on = [module.client_configuration, module.compute_cluster_configuration, module.storage_cluster_configuration] -} - module "compute_inventory" { count = var.enable_deployer == false ? 1 : 0 source = "./modules/inventory" scheduler = var.scheduler hosts = local.compute_hosts - gui_hosts = local.gui_hosts login_host = local.login_host inventory_path = local.compute_inventory_path name_mount_path_map = local.fileshare_name_mount_path_map @@ -835,7 +684,7 @@ module "compute_inventory" { } module "ldap_inventory" { - count = var.enable_deployer == false && var.scheduler == "LSF" && var.enable_ldap && local.ldap_server == "null" ? 1 : 0 + count = var.enable_deployer == false && var.enable_ldap && local.ldap_server == "null" ? 1 : 0 source = "./modules/inventory" prefix = var.cluster_prefix name_mount_path_map = local.fileshare_name_mount_path_map @@ -851,7 +700,7 @@ module "ldap_inventory" { } module "mgmt_inventory_hosts" { - count = var.enable_deployer == false && var.scheduler == "LSF" ? 1 : 0 + count = var.enable_deployer == false ? 1 : 0 source = "./modules/inventory_hosts" hosts = local.mgmt_hosts_ips inventory_path = local.mgmt_hosts_inventory_path @@ -860,12 +709,12 @@ module "mgmt_inventory_hosts" { module "compute_inventory_hosts" { count = var.enable_deployer == false ? 1 : 0 source = "./modules/inventory_hosts" - hosts = var.scheduler == "Scale" ? local.all_compute_hosts : local.compute_hosts_ips + hosts = local.compute_hosts_ips inventory_path = local.compute_hosts_inventory_path } module "login_inventory_host" { - count = var.enable_deployer == false && var.scheduler == "LSF" ? 1 : 0 + count = var.enable_deployer == false ? 1 : 0 source = "./modules/inventory_hosts" hosts = local.login_host_ip inventory_path = local.login_host_inventory_path @@ -892,50 +741,6 @@ module "ldap_inventory_hosts" { inventory_path = local.ldap_hosts_inventory_path } -module "client_inventory_hosts" { - count = var.enable_deployer == false && var.scheduler == "Scale" ? 1 : 0 - source = "./modules/inventory_hosts" - hosts = local.client_hosts - inventory_path = local.client_hosts_inventory_path -} - -module "protocol_inventory_hosts" { - count = var.enable_deployer == false && var.scheduler == "Scale" ? 1 : 0 - source = "./modules/inventory_hosts" - hosts = local.protocol_hosts - inventory_path = local.protocol_hosts_inventory_path -} - -module "afm_inventory_hosts" { - count = var.enable_deployer == false && var.scheduler == "Scale" ? 1 : 0 - source = "./modules/inventory_hosts" - hosts = local.afm_hosts - inventory_path = local.afm_hosts_inventory_path -} - -module "gklm_inventory_hosts" { - count = var.enable_deployer == false && var.scheduler == "Scale" ? 1 : 0 - source = "./modules/inventory_hosts" - hosts = local.gklm_hosts - inventory_path = local.gklm_hosts_inventory_path -} - -module "storage_inventory_hosts" { - count = var.enable_deployer == false && var.scheduler == "Scale" ? 1 : 0 - source = "./modules/inventory_hosts" - hosts = local.all_storage_hosts - inventory_path = local.storage_hosts_inventory_path -} - -module "host_resolution_remove" { - count = var.scheduler == "Scale" && var.enable_deployer == false ? 1 : 0 - turn_on = var.create_separate_namespaces - source = "./modules/host_resolution_remove" - create_scale_cluster = var.create_scale_cluster - clone_path = var.scale_ansible_repo_clone_path - depends_on = [module.compute_cluster_configuration, module.storage_cluster_configuration, module.remote_mount_configuration, module.encryption_configuration, module.invoke_compute_network_playbook, module.invoke_storage_network_playbook] -} - module "compute_playbook" { count = var.enable_deployer == false ? 1 : 0 source = "./modules/playbook" @@ -982,17 +787,16 @@ module "cloud_monitoring_instance_creation" { cloud_logs_as_atracker_target = var.observability_atracker_enable && (var.observability_atracker_target_type == "cloudlogs") ? true : false cloud_logs_data_bucket = var.cloud_logs_data_bucket cloud_metrics_data_bucket = var.cloud_metrics_data_bucket - tags = [local.scheduler_lowercase, var.cluster_prefix] + tags = ["lsf", var.cluster_prefix] } -# Code for SCC Instance module "scc_workload_protection" { source = "./modules/security/sccwp" resource_group_name = var.existing_resource_group != "null" ? var.existing_resource_group : "${var.cluster_prefix}-service-rg" prefix = var.cluster_prefix region = local.region sccwp_service_plan = var.sccwp_service_plan - resource_tags = [local.scheduler_lowercase, var.cluster_prefix] + resource_tags = ["lsf", var.cluster_prefix] enable_deployer = var.enable_deployer sccwp_enable = var.sccwp_enable cspm_enabled = var.cspm_enabled diff --git a/modules/ansible-roles/roles/cloudmonitoring/tasks/mgmt-cloudmonitoring-configure.yml b/modules/ansible-roles/roles/cloudmonitoring/tasks/mgmt-cloudmonitoring-configure.yml index c5456052..48f7b26b 100644 --- a/modules/ansible-roles/roles/cloudmonitoring/tasks/mgmt-cloudmonitoring-configure.yml +++ b/modules/ansible-roles/roles/cloudmonitoring/tasks/mgmt-cloudmonitoring-configure.yml @@ -211,13 +211,13 @@ when: - monitoring_enable_for_management | bool -# - name: Restart lsfd service to apply scheduler metric changes -# ansible.builtin.systemd: -# name: lsfd -# state: restarted -# enabled: yes -# when: -# - monitoring_enable_for_management | bool +- name: Restart lsfd service to apply scheduler metric changes + ansible.builtin.systemd: + name: lsfd + state: restarted + enabled: yes + when: + - monitoring_enable_for_management | bool - name: Reload systemd and start Prometheus Agent ansible.builtin.systemd: diff --git a/modules/ansible-roles/roles/lsf_login_config/tasks/login_node_configuration.yml b/modules/ansible-roles/roles/lsf_login_config/tasks/login_node_configuration.yml index 9afcc25a..6fa39b96 100644 --- a/modules/ansible-roles/roles/lsf_login_config/tasks/login_node_configuration.yml +++ b/modules/ansible-roles/roles/lsf_login_config/tasks/login_node_configuration.yml @@ -43,7 +43,7 @@ - name: Ensure login node entry exists in LSF hosts file lineinfile: path: /mnt/lsf/lsf/conf/hosts - line: "{{ ip_result.stdout }} {{ login_node_host }}" + line: "{{ ip_result.stdout }} {{ login_node_host }}.{{ dns_domain_names }}" state: present insertafter: EOF create: yes @@ -52,7 +52,7 @@ lineinfile: path: "{{ LSF_CLUSTER_FILE }}" insertafter: "^#prune" - line: "{{ login_node_host }} Intel_E5 X86_64 0 ()" + line: "{{ login_node_host }}.{{ dns_domain_names }} Intel_E5 X86_64 0 ()" state: present - name: Ensure LSF profile is sourced in root's .bashrc diff --git a/modules/ansible-roles/roles/lsf_mgmt_config/tasks/app_center_configure.yml b/modules/ansible-roles/roles/lsf_mgmt_config/tasks/app_center_configure.yml index 73a9f5f8..5f72237a 100644 --- a/modules/ansible-roles/roles/lsf_mgmt_config/tasks/app_center_configure.yml +++ b/modules/ansible-roles/roles/lsf_mgmt_config/tasks/app_center_configure.yml @@ -6,11 +6,13 @@ register: https_check changed_when: "'was already enabled' not in https_check.stdout" failed_when: false + run_once: true - name: PAC | Debug HTTPS status ansible.builtin.debug: msg: "HTTPS is already enabled" when: "'was already enabled' in https_check.stdout" + run_once: true - name: PAC | Configure HTTPS for AppCenter block: @@ -68,3 +70,4 @@ msg: "AppCenter HTTPS configuration block completed (success or failure)." when: "'was already enabled' not in https_check.stdout" + run_once: true diff --git a/modules/ansible-roles/roles/lsf_mgmt_config/tasks/configure_management_nodes.yml b/modules/ansible-roles/roles/lsf_mgmt_config/tasks/configure_management_nodes.yml index a57822b9..670fc5f3 100644 --- a/modules/ansible-roles/roles/lsf_mgmt_config/tasks/configure_management_nodes.yml +++ b/modules/ansible-roles/roles/lsf_mgmt_config/tasks/configure_management_nodes.yml @@ -32,7 +32,7 @@ # ANSIBLE MANAGED: QUEUE_NAME added Begin Queue QUEUE_NAME=das_q - DATA_TRANSFER=N + DATA_TRANSFER=Y RC_HOSTS=all HOSTS=all RES_REQ=type==any diff --git a/modules/ansible-roles/roles/lsf_mgmt_config/tasks/hosts_file_update.yml b/modules/ansible-roles/roles/lsf_mgmt_config/tasks/hosts_file_update.yml index e8199959..ca3c63fa 100644 --- a/modules/ansible-roles/roles/lsf_mgmt_config/tasks/hosts_file_update.yml +++ b/modules/ansible-roles/roles/lsf_mgmt_config/tasks/hosts_file_update.yml @@ -60,10 +60,6 @@ path: "{{ LSF_HOSTS_FILE }}" register: lsf_hosts_stat -- name: Management Config | Pause for 5 seconds - ansible.builtin.pause: - seconds: 5 - - name: Management Config | Restore LSF hosts file if missing copy: src: "{{ HA_shared_dir }}/lsf/conf/hosts" diff --git a/modules/ansible-roles/roles/lsf_mgmt_config/tasks/main.yml b/modules/ansible-roles/roles/lsf_mgmt_config/tasks/main.yml index 0e3c3ea6..1f745d31 100644 --- a/modules/ansible-roles/roles/lsf_mgmt_config/tasks/main.yml +++ b/modules/ansible-roles/roles/lsf_mgmt_config/tasks/main.yml @@ -7,9 +7,5 @@ - import_tasks: hosts_file_update.yml - import_tasks: app_center_configure.yml - when: inventory_hostname in groups['gui_hosts'] - -- import_tasks: web_services.yml - when: lsf_version == "fixpack_15" and inventory_hostname in groups['gui_hosts'] - import_tasks: configure_dynamic_nodes_templates.yml diff --git a/modules/ansible-roles/roles/lsf_mgmt_config/tasks/web_services.yml b/modules/ansible-roles/roles/lsf_mgmt_config/tasks/web_services.yml deleted file mode 100644 index 29de05d8..00000000 --- a/modules/ansible-roles/roles/lsf_mgmt_config/tasks/web_services.yml +++ /dev/null @@ -1,14 +0,0 @@ -# Enabling HTTPS for Web Services ---- - -- name: Web Services | Enable HTTPS with lwsadmin - ansible.builtin.shell: lwsadmin https enable --password {{ app_center_gui_password }} --validhosts {{ lsf_masters[0] }},localhost - register: https_enable_result - changed_when: "'already enabled' not in https_enable_result.stdout" - no_log: true - -- name: Web Services | Restart lwsd service if HTTPS was enabled - ansible.builtin.systemd: - name: lwsd - state: restarted - when: "'already enabled' not in https_enable_result.stdout" diff --git a/modules/ansible-roles/roles/lsf_mgmt_config/templates/user_data.sh b/modules/ansible-roles/roles/lsf_mgmt_config/templates/user_data.sh index 346ada6a..4d5336e1 100644 --- a/modules/ansible-roles/roles/lsf_mgmt_config/templates/user_data.sh +++ b/modules/ansible-roles/roles/lsf_mgmt_config/templates/user_data.sh @@ -209,21 +209,6 @@ LSF_GPU_AUTOCONFIG=Y LSB_GPU_NEW_SYNTAX=extend EOF -# Support rc_account resource to enable RC_ACCOUNT policy -sed -i '$ a LSF_LOCAL_RESOURCES=\"[resource icgen2host]\"' $LSF_CONF_FILE - -# shellcheck disable=SC2154 -sed -i "s/\(LSF_LOCAL_RESOURCES=.*\)\"/\1 [resourcemap ${rc_account}*rc_account]\"/" $LSF_CONF_FILE - -# Add additional local resources if needed -instance_id=$(dmidecode | grep Family | cut -d ' ' -f 2 |head -1) -if [ -n "$instance_id" ]; then - sed -i "s/\(LSF_LOCAL_RESOURCES=.*\)\"/\1 [resourcemap ${instance_id}\*instanceID]\"/" "$LSF_CONF_FILE" - echo "Update LSF_LOCAL_RESOURCES in $LSF_CONF_FILE successfully, add [resourcemap ${instance_id}*instanceID]" >> "$logfile" -else - echo "Can not get instance ID" >> $logfile -fi - # source profile.lsf echo "source ${LSF_CONF}/profile.lsf" >>~/.bashrc echo "source ${LSF_CONF}/profile.lsf" >>"$LDAP_DIR"/.bashrc @@ -233,7 +218,7 @@ source "$LDAP_DIR/.bashrc" chown -R lsfadmin $LSF_TOP chown -R lsfadmin $LSF_WORK -# Restart the lsfd service +# Restart the lsfd servive service lsfd stop && sleep 2 && service lsfd start sleep 10 diff --git a/modules/ansible-roles/roles/lsf_post_config/tasks/cluster_validation.yml b/modules/ansible-roles/roles/lsf_post_config/tasks/cluster_validation.yml index 6e3cefe8..f16b9361 100644 --- a/modules/ansible-roles/roles/lsf_post_config/tasks/cluster_validation.yml +++ b/modules/ansible-roles/roles/lsf_post_config/tasks/cluster_validation.yml @@ -21,7 +21,7 @@ when: inventory_hostname == groups['management_nodes'][0] - name: Cluster Status | Fetch node status using bhosts - ansible.builtin.shell: su - lsfadmin -c 'bhosts -w' + ansible.builtin.shell: bhosts -w register: cluster_status_output changed_when: false when: inventory_hostname == groups['management_nodes'][0] @@ -31,11 +31,13 @@ msg: "{{ cluster_status_output.stdout }}" when: inventory_hostname == groups['management_nodes'][0] -- name: Duplicate Logs | Remove duplicate logs from LSF log directory if not deleted - shell: | - rm -rf /opt/ibm/lsflogs/* /opt/ibm/lsflogs/.* 2>/dev/null || true - args: - warn: false - ignore_errors: true - become: true - when: inventory_hostname in groups['management_nodes'] +- name: Cluster Health | Restart lsfd if any node is unreach or unavail + ansible.builtin.shell: | + if bhosts -w | grep -Eq 'unreach|unavail'; then + systemctl restart lsfd + sleep 5 + echo "lsfd restarted" + fi + register: lsfd_restart_result + changed_when: "'lsfd restarted' in lsfd_restart_result.stdout" + when: inventory_hostname == groups['management_nodes'][0] diff --git a/modules/ansible-roles/roles/lsf_post_config/tasks/configure_shared_folders.yml b/modules/ansible-roles/roles/lsf_post_config/tasks/configure_shared_folders.yml index 28e4013e..fde333e0 100644 --- a/modules/ansible-roles/roles/lsf_post_config/tasks/configure_shared_folders.yml +++ b/modules/ansible-roles/roles/lsf_post_config/tasks/configure_shared_folders.yml @@ -1,5 +1,11 @@ --- +# - name: Log directories | Remove duplicate logs +# ansible.builtin.shell: > +# find /opt/ibm/lsflogs -type f ! -name "*.{{ dns_domain_names }}" ! -name "ibmcloudgen2*" -delete +# become: true +# when: inventory_hostname in groups['management_nodes'] + - name: Log directories | Setup shared base directories file: path: "{{ item.path }}" @@ -108,31 +114,3 @@ group: root become: yes when: inventory_hostname == groups['login_node'][0] - -- name: Duplicate Logs | Remove duplicate logs from LSF log directory - shell: | - rm -rf /opt/ibm/lsflogs/* /opt/ibm/lsflogs/.* 2>/dev/null || true - args: - warn: false - ignore_errors: true - become: true - when: inventory_hostname in groups['management_nodes'] - -- name: Set permissions on LSF event file - ansible.builtin.file: - path: "{{ LSF_EVENT }}" - mode: '0644' - owner: lsfadmin - group: lsfadmin - state: file - ignore_errors: true - when: inventory_hostname in groups['management_nodes'] - -- name: Change ownership and permissions of PEM directory using shell - ansible.builtin.shell: | - chown lsfadmin:lsfadmin /tmp/.pem.{{ prefix }} && chmod 775 /tmp/.pem.{{ prefix }} - become: yes - args: - warn: false - ignore_errors: true - when: inventory_hostname in groups['management_nodes'] diff --git a/modules/ansible-roles/roles/lsf_post_config/tasks/reload_services.yml b/modules/ansible-roles/roles/lsf_post_config/tasks/reload_services.yml index a5200d1c..53d5712e 100644 --- a/modules/ansible-roles/roles/lsf_post_config/tasks/reload_services.yml +++ b/modules/ansible-roles/roles/lsf_post_config/tasks/reload_services.yml @@ -5,7 +5,7 @@ service: name: lsfd state: restarted - when: inventory_hostname in groups['mgmt_compute_nodes'] + when: inventory_hostname == groups['management_nodes'][0] # Restart the NetworkManager service on all nodes - name: Restart NetworkManager diff --git a/modules/ansible-roles/roles/lsf_post_config/vars/main.yml b/modules/ansible-roles/roles/lsf_post_config/vars/main.yml index eadb4acc..6e28bb54 100644 --- a/modules/ansible-roles/roles/lsf_post_config/vars/main.yml +++ b/modules/ansible-roles/roles/lsf_post_config/vars/main.yml @@ -4,4 +4,3 @@ SHARED_PATH: "/mnt/lsf" LSF_SUITE: "/opt/ibm/lsfsuite" LSF_TOP: "{{ LSF_SUITE }}/lsf" LSF_LOGS: "/opt/ibm/lsflogs" -LSF_EVENT: "{{ LSF_TOP }}/work/{{ prefix }}/logdir/lsb.ncb.events" diff --git a/modules/ansible-roles/roles/lsf_prereq_config/tasks/disable_ansible_repo.yml b/modules/ansible-roles/roles/lsf_prereq_config/tasks/disable_ansible_repo.yml index cd963628..2c87eaa3 100644 --- a/modules/ansible-roles/roles/lsf_prereq_config/tasks/disable_ansible_repo.yml +++ b/modules/ansible-roles/roles/lsf_prereq_config/tasks/disable_ansible_repo.yml @@ -10,7 +10,3 @@ - name: Ansible Repo | Disable Ansible repo command: subscription-manager repos --disable=ansible-2-for-rhel-8-x86_64-rpms when: ansible_repo_check.rc == 0 - register: disable_repo - retries: 3 - delay: 10 - until: disable_repo.rc == 0 diff --git a/modules/ansible-roles/roles/lsf_template_config/templates/fp14-inventory.j2 b/modules/ansible-roles/roles/lsf_template_config/templates/fp14-inventory.j2 index 82480067..b15eabfb 100644 --- a/modules/ansible-roles/roles/lsf_template_config/templates/fp14-inventory.j2 +++ b/modules/ansible-roles/roles/lsf_template_config/templates/fp14-inventory.j2 @@ -40,7 +40,7 @@ localhost # by using both its public IP address and the host # name reported by the hostname command and vice versa. [GUI_Hosts] -{% for host in db_hosts %} +{% for host in gui_hosts %} {{ host }} {% endfor %} diff --git a/modules/ansible-roles/roles/lsf_template_config/templates/fp15-inventory.j2 b/modules/ansible-roles/roles/lsf_template_config/templates/fp15-inventory.j2 index ba3ab48f..ed5a37bc 100644 --- a/modules/ansible-roles/roles/lsf_template_config/templates/fp15-inventory.j2 +++ b/modules/ansible-roles/roles/lsf_template_config/templates/fp15-inventory.j2 @@ -41,10 +41,11 @@ localhost # by using both its public IP address and the host # name reported by the hostname command and vice versa. [GUI_Hosts] -{% for host in db_hosts %} +{% for host in gui_hosts %} {{ host }} {% endfor %} + # DB_HOST is optional, and is the machine that hosts the database # used by the Application Center component in LSF Suite. # However, this database is not configured for High Availability (HA). @@ -59,6 +60,6 @@ localhost # LSF_WebService is an optional role for hosts LSF will use to run the # LSF Web Service. The default is for the primary host to be the LWS host. [LSF_WebService] -{% for host in db_hosts %} +{% for host in gui_hosts %} {{ host }} {% endfor %} diff --git a/modules/baremetal/datasource.tf b/modules/baremetal/datasource.tf index 358eae54..71e65999 100644 --- a/modules/baremetal/datasource.tf +++ b/modules/baremetal/datasource.tf @@ -1,13 +1,8 @@ -# data "ibm_resource_group" "existing_resource_group" { -# name = var.existing_resource_group -# } - -# data "ibm_is_image" "storage" { -# count = length(var.storage_servers) -# name = var.storage_servers[count.index]["image"] -# } +data "ibm_resource_group" "existing_resource_group" { + name = var.existing_resource_group +} -data "ibm_is_bare_metal_server_profile" "itself" { +data "ibm_is_image" "storage" { count = length(var.storage_servers) - name = var.storage_servers[count.index]["profile"] + name = var.storage_servers[count.index]["image"] } diff --git a/modules/baremetal/locals.tf b/modules/baremetal/locals.tf index 39150624..6fba7bc8 100644 --- a/modules/baremetal/locals.tf +++ b/modules/baremetal/locals.tf @@ -1,49 +1,14 @@ # define variables locals { - # storage_image_id = data.ibm_is_image.storage[*].id - # resource_group_id = data.ibm_resource_group.existing_resource_group.id - # bms_interfaces = ["ens1", "ens2"] - # bms_interfaces = ["eth0", "eth1"] - # storage_ssh_keys = [for name in var.storage_ssh_keys : data.ibm_is_ssh_key.storage[name].id] + prefix = var.prefix + storage_image_id = data.ibm_is_image.storage[*].id + storage_node_name = format("%s-%s", local.prefix, "strg") + resource_group_id = data.ibm_resource_group.existing_resource_group.id + bms_interfaces = ["ens1", "ens2"] + #storage_ssh_keys = [for name in var.storage_ssh_keys : data.ibm_is_ssh_key.storage[name].id] # TODO: explore (DA always keep it true) #skip_iam_authorization_policy = true - #storage_server_count = sum(var.storage_servers[*]["count"]) - #enable_storage = local.storage_server_count > 0 - - raw_bm_details = flatten([ - for module_instance in module.storage_baremetal : [ - for server_key, server_details in module_instance.baremetal_servers : - { - id = server_details.bms_server_id - name = server_details.bms_server_name - ipv4_address = try(server_details.bms_server_primary_ip, null) - bms_primary_vni_id = try(server_details.bms_primary_vni_id, null) - bms_server_secondary_ip = try(server_details.bms_server_secondary_ip, null) - bms_secondary_vni_id = try(server_details.bms_secondary_vni_id, null) - } - ] - ]) - - bm_server_name = flatten(local.raw_bm_details[*].name) - # bm_serve_ips = flatten([for server in local.raw_bm_details : server[*].ipv4_address]) - - disk0_interface_type = (data.ibm_is_bare_metal_server_profile.itself[*].disks[0].supported_interface_types[0].default)[0] - disk_count = (data.ibm_is_bare_metal_server_profile.itself[*].disks[1].quantity[0].value)[0] - - # Determine starting disk based on disk0 interface type - nvme_start_disk = local.disk0_interface_type == "sata" ? "0" : "1" - - # Generate NVMe device list up to 36 disks - all_disks = [ - "/dev/nvme0n1", "/dev/nvme1n1", "/dev/nvme2n1", "/dev/nvme3n1", "/dev/nvme4n1", "/dev/nvme5n1", - "/dev/nvme6n1", "/dev/nvme7n1", "/dev/nvme8n1", "/dev/nvme9n1", "/dev/nvme10n1", "/dev/nvme11n1", - "/dev/nvme12n1", "/dev/nvme13n1", "/dev/nvme14n1", "/dev/nvme15n1", "/dev/nvme16n1", "/dev/nvme17n1", - "/dev/nvme18n1", "/dev/nvme19n1", "/dev/nvme20n1", "/dev/nvme21n1", "/dev/nvme22n1", "/dev/nvme23n1", - "/dev/nvme24n1", "/dev/nvme25n1", "/dev/nvme26n1", "/dev/nvme27n1", "/dev/nvme28n1", "/dev/nvme29n1", - "/dev/nvme30n1", "/dev/nvme31n1", "/dev/nvme32n1", "/dev/nvme33n1", "/dev/nvme34n1", "/dev/nvme35n1" - ] - - # Select only the required number of disks - selected_disks = slice(local.all_disks, local.nvme_start_disk, local.disk_count + local.nvme_start_disk) + storage_server_count = sum(var.storage_servers[*]["count"]) + enable_storage = local.storage_server_count > 0 } diff --git a/modules/baremetal/main.tf b/modules/baremetal/main.tf index 6767d4a6..9f77bbb3 100644 --- a/modules/baremetal/main.tf +++ b/modules/baremetal/main.tf @@ -1,27 +1,23 @@ -# module "storage_key" { -# count = local.enable_storage ? 1 : 0 -# source = "./../key" -# } +module "storage_key" { + count = local.enable_storage ? 1 : 0 + source = "./../key" +} module "storage_baremetal" { - source = "terraform-ibm-modules/bare-metal-vpc/ibm" - version = "1.3.0" - count = length(var.storage_servers) - server_count = var.storage_servers[count.index]["count"] - prefix = var.prefix - profile = var.storage_servers[count.index]["profile"] - image_id = var.image_id - create_security_group = false - subnet_ids = var.storage_subnets - ssh_key_ids = var.storage_ssh_keys - bandwidth = var.sapphire_rapids_profile_check == true ? 200000 : 100000 - allowed_vlan_ids = var.allowed_vlan_ids - access_tags = null - resource_group_id = var.existing_resource_group - security_group_ids = var.security_group_ids - user_data = var.user_data - secondary_vni_enabled = var.secondary_vni_enabled - secondary_subnet_ids = length(var.protocol_subnets) == 0 ? [] : [var.protocol_subnets[0].id] - secondary_security_group_ids = var.secondary_security_group_ids - tpm_mode = "tpm_2" + source = "terraform-ibm-modules/bare-metal-vpc/ibm" + version = "1.1.0" + count = length(var.storage_servers) + server_count = var.storage_servers[count.index]["count"] + prefix = count.index == 0 ? local.storage_node_name : format("%s-%s", local.storage_node_name, count.index) + profile = var.storage_servers[count.index]["profile"] + image_id = local.storage_image_id[count.index] + create_security_group = false + subnet_ids = var.storage_subnets + ssh_key_ids = var.storage_ssh_keys + bandwidth = var.bandwidth + allowed_vlan_ids = var.allowed_vlan_ids + access_tags = null + resource_group_id = local.resource_group_id + security_group_ids = var.security_group_ids + user_data = data.template_file.storage_user_data.rendered } diff --git a/modules/baremetal/outputs.tf b/modules/baremetal/outputs.tf index b3446c89..1f429c38 100644 --- a/modules/baremetal/outputs.tf +++ b/modules/baremetal/outputs.tf @@ -4,19 +4,11 @@ output "list" { for module_instance in module.storage_baremetal : [ for server_key, server_details in module_instance.baremetal_servers : { - id = server_details.bms_server_id - name = server_details.bms_server_name - ipv4_address = try(server_details.bms_server_primary_ip, null) - bms_primary_vni_id = try(server_details.bms_primary_vni_id, null) - bms_server_secondary_ip = try(server_details.bms_server_secondary_ip, null) - bms_secondary_vni_id = try(server_details.bms_secondary_vni_id, null) + id = server_details.bms_server_id + name = server_details.bms_server_name + ipv4_address = try(server_details.bms_server_ip, "") + vni_id = server_details.bms_vni_id } ] ]) - depends_on = [module.storage_baremetal] -} - -output "instance_ips_with_vol_mapping" { - value = { for instance_details in local.bm_server_name : instance_details => local.selected_disks } - description = "Instance ips with vol mapping" } diff --git a/modules/baremetal/template_files.tf b/modules/baremetal/template_files.tf new file mode 100644 index 00000000..a7117b26 --- /dev/null +++ b/modules/baremetal/template_files.tf @@ -0,0 +1,10 @@ +data "template_file" "storage_user_data" { + template = file("${path.module}/templates/storage_user_data.tpl") + vars = { + bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : "" + storage_public_key_content = local.enable_storage ? module.storage_key[0].public_key_content : "" + storage_private_key_content = local.enable_storage ? module.storage_key[0].private_key_content : "" + storage_interfaces = local.bms_interfaces[0] + storage_dns_domain = var.dns_domain_names["storage"] + } +} diff --git a/modules/baremetal/templates/storage_user_data.tpl b/modules/baremetal/templates/storage_user_data.tpl new file mode 100644 index 00000000..31f15e6b --- /dev/null +++ b/modules/baremetal/templates/storage_user_data.tpl @@ -0,0 +1,120 @@ +#!/usr/bin/bash + +################################################### +# Copyright (C) IBM Corp. 2023 All Rights Reserved. +# Licensed under the Apache License v2.0 +################################################### + +#!/usr/bin/env bash +exec > >(tee /var/log/ibm_spectrumscale_user-data.log) + +if grep -E -q "CentOS|Red Hat" /etc/os-release +then + USER=vpcuser +elif grep -q "Ubuntu" /etc/os-release +then + USER=ubuntu +fi +sed -i -e "s/^/no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo \'Please client as the user \\\\\"$USER\\\\\" rather than the user \\\\\"root\\\\\".\';echo;sleep 5; exit 142\" /" /root/.ssh/authorized_keys + +# input parameters +echo "${bastion_public_key_content}" >> ~/.ssh/authorized_keys +echo "${storage_public_key_content}" >> ~/.ssh/authorized_keys +echo "StrictHostKeyChecking no" >> ~/.ssh/config +echo "${storage_private_key_content}" > ~/.ssh/id_rsa +chmod 600 ~/.ssh/id_rsa + +# if grep -q "Red Hat" /etc/os-release +if grep -q "CentOS|Red Hat" /etc/os-release +then + USER=vpcuser + REQ_PKG_INSTALLED=0 + if grep -q "platform:el9" /etc/os-release + then + PACKAGE_MGR=dnf + package_list="python3 kernel-devel-$(uname -r) kernel-headers-$(uname -r) firewalld numactl make gcc-c++ elfutils-libelf-devel bind-utils iptables-nft nfs-utils elfutils elfutils-devel python3-dnf-plugin-versionlock" + elif grep -q "platform:el8" /etc/os-release + then + PACKAGE_MGR=dnf + package_list="python38 kernel-devel-$(uname -r) kernel-headers-$(uname -r) firewalld numactl jq make gcc-c++ elfutils-libelf-devel bind-utils iptables nfs-utils elfutils elfutils-devel python3-dnf-plugin-versionlock" + else + PACKAGE_MGR=yum + package_list="python3 kernel-devel-$(uname -r) kernel-headers-$(uname -r) firewalld numactl make gcc-c++ elfutils-libelf-devel bind-utils iptables nfs-utils elfutils elfutils-devel yum-plugin-versionlock" + fi + + RETRY_LIMIT=5 + retry_count=0 + all_pkg_installed=1 + + while [[ $all_pkg_installed -ne 0 && $retry_count -lt $RETRY_LIMIT ]] + do + # Install all required packages + echo "INFO: Attempting to install packages" + $PACKAGE_MGR install -y $package_list + + # Check to ensure packages are installed + pkg_installed=0 + for pkg in $package_list + do + pkg_query=$($PACKAGE_MGR list installed $pkg) + pkg_installed=$(($? + $pkg_installed)) + done + if [[ $pkg_installed -ne 0 ]] + then + # The minimum required packages have not been installed. + echo "WARN: Required packages not installed. Sleeping for 60 seconds and retrying..." + touch /var/log/scale-rerun-package-install + echo "INFO: Cleaning and repopulating repository data" + $PACKAGE_MGR clean all + $PACKAGE_MGR makecache + sleep 60 + else + all_pkg_installed=0 + fi + retry_count=$(( $retry_count+1 )) + done + +elif grep -q "Ubuntu" /etc/os-release +then + USER=ubuntu +fi + +yum update --security -y +yum versionlock $package_list +yum versionlock list +echo 'export PATH=$PATH:/usr/lpp/mmfs/bin' >> /root/.bashrc + +echo "###########################################################################################" >> /etc/motd +echo "# You have logged in to Storage BareMetal Server. #" >> /etc/motd +echo "# - Server storage is temporary storage that's available only while your Baremetal #" >> /etc/motd +echo "# server is running. #" >> /etc/motd +echo "# - Data on the drive is unrecoverable after server shutdown, disruptive maintenance, #" >> /etc/motd +echo "# or hardware failure. #" >> /etc/motd +echo "# #" >> /etc/motd +echo "# Refer: https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-storage #" >> /etc/motd +echo "###########################################################################################" >> /etc/motd + +echo "DOMAIN=${storage_dns_domain}" >> "/etc/sysconfig/network-scripts/ifcfg-${storage_interfaces}" +echo "MTU=9000" >> "/etc/sysconfig/network-scripts/ifcfg-${storage_interfaces}" +chage -I -1 -m 0 -M 99999 -E -1 -W 14 vpcuser +sleep 120 +systemctl restart NetworkManager + +systemctl stop firewalld +firewall-offline-cmd --zone=public --add-port=1191/tcp +firewall-offline-cmd --zone=public --add-port=4444/tcp +firewall-offline-cmd --zone=public --add-port=4444/udp +firewall-offline-cmd --zone=public --add-port=4739/udp +firewall-offline-cmd --zone=public --add-port=4739/tcp +firewall-offline-cmd --zone=public --add-port=9084/tcp +firewall-offline-cmd --zone=public --add-port=9085/tcp +firewall-offline-cmd --zone=public --add-service=http +firewall-offline-cmd --zone=public --add-service=https +firewall-offline-cmd --zone=public --add-port=2049/tcp +firewall-offline-cmd --zone=public --add-port=2049/udp +firewall-offline-cmd --zone=public --add-port=111/tcp +firewall-offline-cmd --zone=public --add-port=111/udp +firewall-offline-cmd --zone=public --add-port=30000-61000/tcp +firewall-offline-cmd --zone=public --add-port=30000-61000/udp +systemctl start firewalld +systemctl enable firewalld diff --git a/modules/baremetal/variables.tf b/modules/baremetal/variables.tf index a6c8241b..f24d57c6 100644 --- a/modules/baremetal/variables.tf +++ b/modules/baremetal/variables.tf @@ -18,10 +18,6 @@ variable "prefix" { } } -variable "image_id" { - description = "This is the image id required for baremetal" - type = string -} ############################################################################## # Scale Storage Variables ############################################################################## @@ -52,21 +48,23 @@ variable "storage_servers" { object({ profile = string count = number - filesystem = optional(string) + image = string + filesystem = string }) ) default = [{ profile = "cx2d-metal-96x192" count = 0 + image = "ibm-redhat-8-10-minimal-amd64-4" filesystem = "/gpfs/fs1" }] description = "Number of BareMetal Servers to be launched for storage cluster." } -variable "sapphire_rapids_profile_check" { - type = bool - default = false - description = "Check whether the profile uses Cascade Lake processors (x2) or Intel Sapphire Rapids processors (x3)." +variable "bandwidth" { + description = "The allocated bandwidth (in Mbps) for the bare metal server to manage network traffic. If unset, default values apply." + type = number + default = 100000 } variable "allowed_vlan_ids" { @@ -81,31 +79,31 @@ variable "security_group_ids" { default = [] } -variable "secondary_security_group_ids" { - description = "A list of secondary security group ID's" - type = list(string) - default = [] -} - -variable "secondary_vni_enabled" { - description = "Whether to enable a secondary virtual network interface" - type = bool - default = false -} +############################################################################## +# Access Variables +############################################################################## -variable "user_data" { - description = "User Data script path" +variable "bastion_public_key_content" { type = string + sensitive = true default = null + description = "Bastion security group id." } -variable "protocol_subnets" { - type = list(object({ - name = string - id = string - zone = string - cidr = string - })) - default = [] - description = "Subnets to launch the bastion host." +############################################################################## +# DNS Template Variables +############################################################################## + +variable "dns_domain_names" { + type = object({ + compute = string + storage = string + protocol = string + }) + default = { + compute = "comp.com" + storage = "strg.com" + protocol = "ces.com" + } + description = "IBM Cloud HPC DNS domain names." } diff --git a/modules/baremetal/version.tf b/modules/baremetal/version.tf index 886be456..b87bee94 100644 --- a/modules/baremetal/version.tf +++ b/modules/baremetal/version.tf @@ -10,5 +10,9 @@ terraform { source = "IBM-Cloud/ibm" version = ">= 1.68.1, < 2.0.0" } + template = { + source = "hashicorp/template" + version = "~> 2" + } } } diff --git a/modules/common/client_configuration/client_configuration.tf b/modules/common/client_configuration/client_configuration.tf index 7142341a..fe3ad0bb 100644 --- a/modules/common/client_configuration/client_configuration.tf +++ b/modules/common/client_configuration/client_configuration.tf @@ -5,23 +5,11 @@ resource "local_sensitive_file" "write_client_meta_private_key" { file_permission = "0600" } -resource "null_resource" "scale_host_play" { - count = (tobool(var.turn_on) == true && tobool(var.storage_cluster_create_complete) == true && tobool(var.create_scale_cluster) == true) ? 1 : 0 - provisioner "local-exec" { - interpreter = ["/bin/bash", "-c"] - command = "sudo ansible-playbook -f 50 -i ${local.scale_all_inventory} -l 'client' -e @${local.scale_cluster_hosts} -e @${local.domain_name_file} ${local.scale_hostentry_playbook_path}" - } - - triggers = { - build = timestamp() - } -} - resource "null_resource" "prepare_client_inventory_using_jumphost_connection" { count = (tobool(var.turn_on) == true && tobool(var.storage_cluster_create_complete) == true && tobool(var.using_jumphost_connection) == true && tobool(var.create_scale_cluster) == true) ? 1 : 0 provisioner "local-exec" { interpreter = ["/bin/bash", "-c"] - command = "python3 ${local.ansible_inv_script_path} --client_tf_inv_path ${var.client_inventory_path} --install_infra_path ${var.clone_path} --instance_private_key ${local.client_private_key} --bastion_user ${var.bastion_user} --bastion_ip ${var.bastion_instance_public_ip} --bastion_ssh_private_key ${var.bastion_ssh_private_key} --enable_ldap ${var.enable_ldap} --ldap_basedns ${var.ldap_basedns} --ldap_server ${local.ldap_server} --ldap_admin_password ${var.ldap_admin_password}" + command = "python3 ${local.ansible_inv_script_path} --client_tf_inv_path ${var.client_inventory_path} --install_infra_path ${var.clone_path} --instance_private_key ${local.client_private_key} --bastion_user ${var.bastion_user} --bastion_ip ${var.bastion_instance_public_ip} --bastion_ssh_private_key ${var.bastion_ssh_private_key} --enable_ldap ${var.enable_ldap} --ldap_basedns ${var.ldap_basedns} --ldap_server ${var.ldap_server} --ldap_admin_password ${var.ldap_admin_password}" } triggers = { build = timestamp() @@ -33,7 +21,7 @@ resource "null_resource" "prepare_client_inventory" { count = (tobool(var.turn_on) == true && tobool(var.storage_cluster_create_complete) == true && tobool(var.using_jumphost_connection) == false && tobool(var.create_scale_cluster) == true) ? 1 : 0 provisioner "local-exec" { interpreter = ["/bin/bash", "-c"] - command = "python3 ${local.ansible_inv_script_path} --client_tf_inv_path ${var.client_inventory_path} --install_infra_path ${var.clone_path} --instance_private_key ${local.client_private_key} --enable_ldap ${var.enable_ldap} --ldap_basedns ${var.ldap_basedns} --ldap_server ${local.ldap_server} --ldap_admin_password ${var.ldap_admin_password}" + command = "python3 ${local.ansible_inv_script_path} --client_tf_inv_path ${var.client_inventory_path} --install_infra_path ${var.clone_path} --instance_private_key ${local.client_private_key} --enable_ldap ${var.enable_ldap} --ldap_basedns ${var.ldap_basedns} --ldap_server ${var.ldap_server} --ldap_admin_password ${var.ldap_admin_password}" } triggers = { build = timestamp() @@ -45,7 +33,7 @@ resource "null_resource" "perform_client_configuration" { count = (tobool(var.turn_on) == true && tobool(var.storage_cluster_create_complete) == true && tobool(var.create_scale_cluster) == true) ? 1 : 0 provisioner "local-exec" { interpreter = ["/bin/bash", "-c"] - command = "sudo ansible-playbook -i ${local.client_inventory_path} ${local.client_playbook}" + command = "ansible-playbook -i ${local.client_inventory_path} ${local.client_playbook}" } triggers = { build = timestamp() diff --git a/modules/common/client_configuration/locals.tf b/modules/common/client_configuration/locals.tf index e3bb1c64..58b2457d 100644 --- a/modules/common/client_configuration/locals.tf +++ b/modules/common/client_configuration/locals.tf @@ -1,13 +1,8 @@ locals { - client_inventory_path = format("%s/%s/client_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra") - client_playbook = format("%s/%s/client_cloud_playbook.yaml", var.clone_path, "ibm-spectrum-scale-install-infra") - scripts_path = replace(path.module, "client_configuration", "scripts") - ansible_inv_script_path = format("%s/prepare_client_inv.py", local.scripts_path) - client_private_key = format("%s/client_key/id_rsa", var.clone_path) - ldap_server = jsonencode(var.ldap_server) - scale_hostentry_playbook_path = format("%s/%s/scale_host_entry_play.yml", var.clone_path, "ibm-spectrum-scale-install-infra") - scale_cluster_hosts = format("%s/%s/scale_cluster_hosts.yml", var.clone_path, "ibm-spectrum-scale-install-infra") - scale_all_inventory = format("%s/%s/scale_all_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra") - domain_name_file = format("%s/%s/domain_names.yml", var.clone_path, "ibm-spectrum-scale-install-infra") + client_inventory_path = format("%s/%s/client_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra") + client_playbook = format("%s/%s/client_cloud_playbook.yaml", var.clone_path, "ibm-spectrum-scale-install-infra") + scripts_path = replace(path.module, "client_configuration", "scripts") + ansible_inv_script_path = format("%s/prepare_client_inv.py", local.scripts_path) + client_private_key = format("%s/client_key/id_rsa", var.clone_path) } diff --git a/modules/common/compute_configuration/compute_configuration.tf b/modules/common/compute_configuration/compute_configuration.tf index d7923bf4..49a273a4 100644 --- a/modules/common/compute_configuration/compute_configuration.tf +++ b/modules/common/compute_configuration/compute_configuration.tf @@ -1,5 +1,5 @@ /* - Executes ansible playbook to install IBM Spectrum Scale compute cluster. + Excutes ansible playbook to install IBM Spectrum Scale compute cluster. */ resource "local_file" "create_compute_tuning_parameters" { @@ -26,18 +26,6 @@ resource "local_sensitive_file" "write_meta_private_key" { file_permission = "0600" } -resource "null_resource" "scale_host_play" { - count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.create_scale_cluster) == true) ? 1 : 0 - provisioner "local-exec" { - interpreter = ["/bin/bash", "-c"] - command = "sudo ansible-playbook -f 50 -i ${local.scale_all_inventory} -l 'compute' -e @${local.scale_cluster_hosts} -e @${local.domain_name_file} ${local.scale_hostentry_playbook_path}" - } - - triggers = { - build = timestamp() - } -} - resource "null_resource" "prepare_ansible_inventory_using_jumphost_connection" { count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.using_jumphost_connection) == true && tobool(var.scale_encryption_enabled) == false) ? 1 : 0 provisioner "local-exec" { @@ -54,7 +42,7 @@ resource "null_resource" "prepare_ansible_inventory_using_jumphost_connection_en count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.using_jumphost_connection) == true && tobool(var.scale_encryption_enabled) == true) ? 1 : 0 provisioner "local-exec" { interpreter = ["/bin/bash", "-c"] - command = "python3 ${local.ansible_inv_script_path} --tf_inv_path ${var.inventory_path} --install_infra_path ${var.clone_path} --instance_private_key ${local.compute_private_key} --bastion_user ${var.bastion_user} --bastion_ip ${var.bastion_instance_public_ip} --bastion_ssh_private_key ${var.bastion_ssh_private_key} --using_packer_image ${var.using_packer_image} --using_rest_initialization ${var.using_rest_initialization} --gui_username ${var.compute_cluster_gui_username} --gui_password ${var.compute_cluster_gui_password} --enable_mrot_conf ${local.enable_mrot_conf} --enable_ces ${local.enable_ces} --scale_encryption_enabled ${var.scale_encryption_enabled} --scale_encryption_servers ${local.scale_encryption_servers} --scale_encryption_admin_password ${var.scale_encryption_admin_password} --scale_encryption_type ${var.scale_encryption_type} --enable_ldap ${var.enable_ldap} --ldap_basedns ${var.ldap_basedns} --ldap_server ${local.ldap_server} --ldap_admin_password ${var.ldap_admin_password} --comp_memory ${var.comp_memory} --comp_vcpus_count ${var.comp_vcpus_count} --comp_bandwidth ${var.comp_bandwidth} --enable_afm ${local.enable_afm} --enable_key_protect ${local.enable_key_protect}" + command = "python3 ${local.ansible_inv_script_path} --tf_inv_path ${var.inventory_path} --install_infra_path ${var.clone_path} --instance_private_key ${local.compute_private_key} --bastion_user ${var.bastion_user} --bastion_ip ${var.bastion_instance_public_ip} --bastion_ssh_private_key ${var.bastion_ssh_private_key} --using_packer_image ${var.using_packer_image} --using_rest_initialization ${var.using_rest_initialization} --gui_username ${var.compute_cluster_gui_username} --gui_password ${var.compute_cluster_gui_password} --enable_mrot_conf ${local.enable_mrot_conf} --enable_ces ${local.enable_ces} --scale_encryption_enabled ${var.scale_encryption_enabled} --scale_encryption_servers ${local.scale_encryption_servers} --scale_encryption_admin_password ${var.scale_encryption_admin_password} --enable_ldap ${var.enable_ldap} --ldap_basedns ${var.ldap_basedns} --ldap_server ${local.ldap_server} --ldap_admin_password ${var.ldap_admin_password} --comp_memory ${var.comp_memory} --comp_vcpus_count ${var.comp_vcpus_count} --comp_bandwidth ${var.comp_bandwidth} --enable_afm ${local.enable_afm} --enable_key_protect ${local.enable_key_protect}" } depends_on = [local_file.create_compute_tuning_parameters, local_sensitive_file.write_meta_private_key] triggers = { @@ -78,7 +66,7 @@ resource "null_resource" "prepare_ansible_inventory_encryption" { count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.using_jumphost_connection) == false && tobool(var.scale_encryption_enabled) == true) ? 1 : 0 provisioner "local-exec" { interpreter = ["/bin/bash", "-c"] - command = "python3 ${local.ansible_inv_script_path} --tf_inv_path ${var.inventory_path} --install_infra_path ${var.clone_path} --instance_private_key ${local.compute_private_key} --using_packer_image ${var.using_packer_image} --using_rest_initialization ${var.using_rest_initialization} --gui_username ${var.compute_cluster_gui_username} --gui_password ${var.compute_cluster_gui_password} --enable_mrot_conf ${local.enable_mrot_conf} --enable_ces ${local.enable_ces} --scale_encryption_enabled ${var.scale_encryption_enabled} --scale_encryption_servers ${local.scale_encryption_servers} --scale_encryption_admin_password ${var.scale_encryption_admin_password} --scale_encryption_type ${var.scale_encryption_type} --enable_ldap ${var.enable_ldap} --ldap_basedns ${var.ldap_basedns} --ldap_server ${local.ldap_server} --ldap_admin_password ${var.ldap_admin_password} --comp_memory ${var.comp_memory} --comp_vcpus_count ${var.comp_vcpus_count} --comp_bandwidth ${var.comp_bandwidth} --enable_afm ${local.enable_afm} --enable_key_protect ${local.enable_key_protect}" + command = "python3 ${local.ansible_inv_script_path} --tf_inv_path ${var.inventory_path} --install_infra_path ${var.clone_path} --instance_private_key ${local.compute_private_key} --using_packer_image ${var.using_packer_image} --using_rest_initialization ${var.using_rest_initialization} --gui_username ${var.compute_cluster_gui_username} --gui_password ${var.compute_cluster_gui_password} --enable_mrot_conf ${local.enable_mrot_conf} --enable_ces ${local.enable_ces} --scale_encryption_enabled ${var.scale_encryption_enabled} --scale_encryption_servers ${local.scale_encryption_servers} --scale_encryption_admin_password ${var.scale_encryption_admin_password} --enable_ldap ${var.enable_ldap} --ldap_basedns ${var.ldap_basedns} --ldap_server ${local.ldap_server} --ldap_admin_password ${var.ldap_admin_password} --comp_memory ${var.comp_memory} --comp_vcpus_count ${var.comp_vcpus_count} --comp_bandwidth ${var.comp_bandwidth} --enable_afm ${local.enable_afm} --enable_key_protect ${local.enable_key_protect}" } depends_on = [local_file.create_compute_tuning_parameters, local_sensitive_file.write_meta_private_key] triggers = { @@ -108,7 +96,7 @@ resource "null_resource" "perform_scale_deployment" { count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.create_scale_cluster) == true) ? 1 : 0 provisioner "local-exec" { interpreter = ["/bin/bash", "-c"] - command = "sudo ansible-playbook -f 32 -i ${local.compute_inventory_path} ${local.compute_playbook_path} --extra-vars \"scale_version=${var.scale_version}\" --extra-vars \"scale_install_directory_pkg_path=${var.spectrumscale_rpms_path}\"" + command = "ansible-playbook -f 32 -i ${local.compute_inventory_path} ${local.compute_playbook_path} --extra-vars \"scale_version=${var.scale_version}\" --extra-vars \"scale_install_directory_pkg_path=${var.spectrumscale_rpms_path}\"" } depends_on = [time_sleep.wait_60_seconds, null_resource.wait_for_ssh_availability, null_resource.prepare_ansible_inventory, null_resource.prepare_ansible_inventory_using_jumphost_connection, null_resource.prepare_ansible_inventory_encryption, null_resource.prepare_ansible_inventory_using_jumphost_connection_encryption] triggers = { diff --git a/modules/common/compute_configuration/locals.tf b/modules/common/compute_configuration/locals.tf index d70691b7..c58a6db3 100644 --- a/modules/common/compute_configuration/locals.tf +++ b/modules/common/compute_configuration/locals.tf @@ -1,19 +1,15 @@ locals { - scripts_path = replace(path.module, "compute_configuration", "scripts") - ansible_inv_script_path = var.inventory_format == "ini" ? format("%s/prepare_scale_inv_ini.py", local.scripts_path) : format("%s/prepare_scale_inv_json.py", local.scripts_path) - wait_for_ssh_script_path = format("%s/wait_for_ssh_availability.py", local.scripts_path) - scale_tuning_config_path = format("%s/%s", var.clone_path, "computesncparams.profile") - compute_private_key = format("%s/compute_key/id_rsa", var.clone_path) #tfsec:ignore:GEN002 - compute_inventory_path = format("%s/%s/compute_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra") - compute_playbook_path = format("%s/%s/compute_cloud_playbook.yaml", var.clone_path, "ibm-spectrum-scale-install-infra") - scale_encryption_servers = jsonencode(var.scale_encryption_servers) - enable_mrot_conf = var.enable_mrot_conf ? "True" : "False" - enable_ces = var.enable_ces ? "True" : "False" - enable_afm = var.enable_afm ? "True" : "False" - enable_key_protect = var.scale_encryption_enabled && var.enable_key_protect == "True" ? "True" : "False" - ldap_server = jsonencode(var.ldap_server) - scale_hostentry_playbook_path = format("%s/%s/scale_host_entry_play.yml", var.clone_path, "ibm-spectrum-scale-install-infra") - scale_cluster_hosts = format("%s/%s/scale_cluster_hosts.yml", var.clone_path, "ibm-spectrum-scale-install-infra") - scale_all_inventory = format("%s/%s/scale_all_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra") - domain_name_file = format("%s/%s/domain_names.yml", var.clone_path, "ibm-spectrum-scale-install-infra") + scripts_path = replace(path.module, "compute_configuration", "scripts") + ansible_inv_script_path = var.inventory_format == "ini" ? format("%s/prepare_scale_inv_ini.py", local.scripts_path) : format("%s/prepare_scale_inv_json.py", local.scripts_path) + wait_for_ssh_script_path = format("%s/wait_for_ssh_availability.py", local.scripts_path) + scale_tuning_config_path = format("%s/%s", var.clone_path, "computesncparams.profile") + compute_private_key = format("%s/compute_key/id_rsa", var.clone_path) #tfsec:ignore:GEN002 + compute_inventory_path = format("%s/%s/compute_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra") + compute_playbook_path = format("%s/%s/compute_cloud_playbook.yaml", var.clone_path, "ibm-spectrum-scale-install-infra") + scale_encryption_servers = jsonencode(var.scale_encryption_servers) + enable_mrot_conf = var.enable_mrot_conf ? "True" : "False" + enable_ces = var.enable_ces ? "True" : "False" + enable_afm = var.enable_afm ? "True" : "False" + enable_key_protect = var.enable_key_protect == "key_protect" ? "True" : "False" + ldap_server = jsonencode(var.ldap_server) } diff --git a/modules/common/compute_configuration/variables.tf b/modules/common/compute_configuration/variables.tf index ea7abb58..13c9fd1e 100644 --- a/modules/common/compute_configuration/variables.tf +++ b/modules/common/compute_configuration/variables.tf @@ -110,11 +110,6 @@ variable "scale_encryption_enabled" { description = "To enable the encryption for the filesystem. Select true or false" } -variable "scale_encryption_type" { - type = string - description = "To enable filesystem encryption, specify either 'key_protect' or 'gklm'. If neither is specified, the default value will be 'null' and encryption is disabled" -} - variable "scale_encryption_admin_password" { type = string description = "Password that is used for performing administrative operations for the GKLM.The password must contain at least 8 characters and at most 20 characters. For a strong password, at least three alphabetic characters are required, with at least one uppercase and one lowercase letter. Two numbers, and at least one special character from this(~@_+:). Make sure that the password doesn't include the username. Visit this [page](https://www.ibm.com/docs/en/gklm/3.0.1?topic=roles-password-policy) to know more about password policy of GKLM. " diff --git a/modules/common/encryption_configuration/locals.tf b/modules/common/encryption_configuration/locals.tf deleted file mode 100644 index ad1d4bc2..00000000 --- a/modules/common/encryption_configuration/locals.tf +++ /dev/null @@ -1,13 +0,0 @@ -locals { - gklm_private_key = format("%s/gklm_key/id_rsa", var.clone_path) - scale_encryption_servers = jsonencode(var.scale_encryption_servers) - scale_encryption_servers_dns = jsonencode(var.scale_encryption_servers_dns) - compute_inventory_path = format("%s/%s/compute_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra") - storage_inventory_path = format("%s/%s/storage_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra") - encryption_gklm_playbook = format("%s/%s/encryption_gklm_playbook.yaml", var.clone_path, "ibm-spectrum-scale-install-infra") - encryption_cluster_playbook = format("%s/%s/encryption_cluster_playbook.yaml", var.clone_path, "ibm-spectrum-scale-install-infra") - scale_hostentry_playbook_path = format("%s/%s/scale_host_entry_play.yml", var.clone_path, "ibm-spectrum-scale-install-infra") - scale_cluster_hosts = format("%s/%s/scale_cluster_hosts.yml", var.clone_path, "ibm-spectrum-scale-install-infra") - scale_all_inventory = format("%s/%s/scale_all_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra") - domain_name_file = format("%s/%s/domain_names.yml", var.clone_path, "ibm-spectrum-scale-install-infra") -} diff --git a/modules/common/encryption_configuration/main.tf b/modules/common/encryption_configuration/main.tf deleted file mode 100644 index cc042827..00000000 --- a/modules/common/encryption_configuration/main.tf +++ /dev/null @@ -1,55 +0,0 @@ - -resource "local_sensitive_file" "write_meta_private_key" { - count = (tobool(var.turn_on) == true && var.scale_encryption_type == "gklm") ? 1 : 0 - content = var.meta_private_key - filename = local.gklm_private_key - file_permission = "0600" -} - -resource "null_resource" "scale_host_play" { - count = (tobool(var.turn_on) == true && tobool(var.create_scale_cluster) == true && var.scale_encryption_type == "gklm") ? 1 : 0 - provisioner "local-exec" { - interpreter = ["/bin/bash", "-c"] - command = "sudo ansible-playbook -f 50 -i ${local.scale_all_inventory} -l 'gklm' -e @${local.scale_cluster_hosts} -e @${local.domain_name_file} ${local.scale_hostentry_playbook_path}" - } - - triggers = { - build = timestamp() - } -} - -resource "null_resource" "perform_encryption_prepare" { - count = (tobool(var.turn_on) == true && tobool(var.create_scale_cluster) == true && var.scale_encryption_type == "gklm") ? 1 : 0 - provisioner "local-exec" { - interpreter = ["/bin/bash", "-c"] - command = "sudo /usr/local/bin/ansible-playbook -f 32 ${local.encryption_gklm_playbook} -e scale_cluster_clustername=${var.scale_cluster_clustername} -e ansible_ssh_private_key_file=${local.gklm_private_key} -e scale_encryption_admin_default_password=${var.scale_encryption_admin_default_password} -e scale_encryption_admin_password=${var.scale_encryption_admin_password} -e scale_encryption_admin_user=${var.scale_encryption_admin_username} -e '{\"scale_encryption_servers_list\": ${local.scale_encryption_servers}}'" - } - depends_on = [local_sensitive_file.write_meta_private_key] - triggers = { - build = timestamp() - } -} - -resource "null_resource" "perform_encryption_storage" { - count = (tobool(var.turn_on) == true && tobool(var.storage_cluster_encryption) == true && tobool(var.storage_cluster_create_complete) == true && tobool(var.remote_mount_create_complete) == true && tobool(var.create_scale_cluster) == true && var.scale_encryption_type == "gklm") ? 1 : 0 - provisioner "local-exec" { - interpreter = ["/bin/bash", "-c"] - command = "sudo /usr/local/bin/ansible-playbook -f 32 -i ${local.storage_inventory_path} ${local.encryption_cluster_playbook} -e '{\"scale_encryption_servers_dns\": ${local.scale_encryption_servers_dns}}'" - } - depends_on = [null_resource.perform_encryption_prepare] - triggers = { - build = timestamp() - } -} - -resource "null_resource" "perform_encryption_compute" { - count = (tobool(var.turn_on) == true && tobool(var.compute_cluster_encryption) == true && tobool(var.compute_cluster_create_complete) == true && tobool(var.remote_mount_create_complete) == true && tobool(var.create_scale_cluster) == true && var.scale_encryption_type == "gklm") ? 1 : 0 - provisioner "local-exec" { - interpreter = ["/bin/bash", "-c"] - command = "sudo /usr/local/bin/ansible-playbook -f 32 -i ${local.compute_inventory_path} ${local.encryption_cluster_playbook} -e '{\"scale_encryption_servers_dns\": ${local.scale_encryption_servers_dns}}'" - } - depends_on = [null_resource.perform_encryption_prepare, null_resource.perform_encryption_storage] - triggers = { - build = timestamp() - } -} diff --git a/modules/common/encryption_configuration/outputs.tf b/modules/common/encryption_configuration/outputs.tf deleted file mode 100644 index e69de29b..00000000 diff --git a/modules/common/encryption_configuration/variables.tf b/modules/common/encryption_configuration/variables.tf deleted file mode 100644 index 36223165..00000000 --- a/modules/common/encryption_configuration/variables.tf +++ /dev/null @@ -1,78 +0,0 @@ -variable "turn_on" { - type = string - description = "To turn on the null resources based on conditions." -} - -variable "clone_path" { - type = string - description = "Scale repo clone path" -} - -variable "create_scale_cluster" { - type = string - description = "Eenables scale cluster configuration." -} - -variable "meta_private_key" { - type = string - description = "Meta private key." -} - -variable "scale_cluster_clustername" { - type = string - description = "Name of the cluster." -} - -variable "scale_encryption_servers" { - type = list(string) - description = "GKLM encryption server IP's." -} - -variable "scale_encryption_servers_dns" { - type = list(string) - description = "GKLM encryption server hostnames." -} - -variable "scale_encryption_admin_default_password" { - type = string - description = "The default administrator password used for resetting the admin password based on the user input. The password has to be updated which was configured during the GKLM installation." -} - -variable "scale_encryption_admin_username" { - type = string - description = "The default Admin username for Security Key Lifecycle Manager(GKLM)." -} - -variable "scale_encryption_admin_password" { - type = string - description = "Password that is used for performing administrative operations for the GKLM.The password must contain at least 8 characters and at most 20 characters. For a strong password, at least three alphabetic characters are required, with at least one uppercase and one lowercase letter. Two numbers, and at least one special character from this(~@_+:). Make sure that the password doesn't include the username. Visit this [page](https://www.ibm.com/docs/en/gklm/3.0.1?topic=roles-password-policy) to know more about password policy of GKLM. " -} - -variable "scale_encryption_type" { - type = string - description = "To enable filesystem encryption, specify either 'key_protect' or 'gklm'. If neither is specified, the default value will be 'null' and encryption is disabled" -} - -variable "compute_cluster_create_complete" { - type = bool - description = "Status of the compute cluster complete" -} - -variable "storage_cluster_create_complete" { - type = bool - description = "Status of the compute cluster complete" -} -variable "remote_mount_create_complete" { - type = bool - description = "Status of the compute cluster complete" -} - -variable "compute_cluster_encryption" { - type = bool - description = "Status of the compute cluster complete" -} - -variable "storage_cluster_encryption" { - type = bool - description = "Status of the compute cluster complete" -} diff --git a/modules/common/encryption_configuration/version.tf b/modules/common/encryption_configuration/version.tf deleted file mode 100644 index 4ba00afc..00000000 --- a/modules/common/encryption_configuration/version.tf +++ /dev/null @@ -1,18 +0,0 @@ -############################################################################## -# Terraform Providers -############################################################################## - -terraform { - required_version = ">= 1.9.0" - # Use "greater than or equal to" range for root level modules - required_providers { - local = { - source = "hashicorp/local" - version = "~> 2" - } - null = { - source = "hashicorp/null" - version = ">= 3.0.0" - } - } -} diff --git a/modules/common/key_protect_configuration/locals.tf b/modules/common/key_protect_configuration/locals.tf deleted file mode 100644 index 695bb5a4..00000000 --- a/modules/common/key_protect_configuration/locals.tf +++ /dev/null @@ -1,7 +0,0 @@ -locals { - compute_inventory_path = format("%s/%s/compute_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra") - storage_inventory_path = format("%s/%s/storage_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra") - compute_kp_encryption_playbook = format("%s/%s/compute_kp_encryption_playbook.yaml", var.clone_path, "ibm-spectrum-scale-install-infra") - storage_kp_encryption_playbook = format("%s/%s/storage_kp_encryption_playbook.yaml", var.clone_path, "ibm-spectrum-scale-install-infra") - gpfs_restart_playbook_path = format("%s/%s/scale_gpfs_restart.yml", var.clone_path, "ibm-spectrum-scale-install-infra") -} diff --git a/modules/common/key_protect_configuration/main.tf b/modules/common/key_protect_configuration/main.tf deleted file mode 100644 index 81ecf4ed..00000000 --- a/modules/common/key_protect_configuration/main.tf +++ /dev/null @@ -1,24 +0,0 @@ -resource "null_resource" "perform_encryption_storage" { - count = (tobool(var.turn_on) == true && tobool(var.storage_cluster_encryption) == true && tobool(var.storage_cluster_create_complete) == true && tobool(var.remote_mount_create_complete) == true && tobool(var.create_scale_cluster) == true && var.scale_encryption_type == "key_protect") ? 1 : 0 - provisioner "local-exec" { - interpreter = ["/bin/bash", "-c"] - command = "sudo /usr/local/bin/ansible-playbook -f 32 -i ${local.storage_inventory_path} ${local.storage_kp_encryption_playbook}" - } -} - -resource "null_resource" "perform_encryption_compute" { - count = (tobool(var.turn_on) == true && tobool(var.compute_cluster_encryption) == true && tobool(var.compute_cluster_create_complete) == true && tobool(var.remote_mount_create_complete) == true && tobool(var.create_scale_cluster) == true && var.scale_encryption_type == "key_protect") ? 1 : 0 - provisioner "local-exec" { - interpreter = ["/bin/bash", "-c"] - command = "sudo /usr/local/bin/ansible-playbook -f 32 -i ${local.compute_inventory_path} ${local.compute_kp_encryption_playbook}" - } -} - -resource "null_resource" "perform_encryption_gpfs_restart" { - count = (tobool(var.turn_on) == true && tobool(var.compute_cluster_encryption) == true && tobool(var.compute_cluster_create_complete) == true && tobool(var.remote_mount_create_complete) == true && tobool(var.create_scale_cluster) == true && var.scale_encryption_type == "key_protect") ? 1 : 0 - provisioner "local-exec" { - interpreter = ["/bin/bash", "-c"] - command = "sudo /usr/local/bin/ansible-playbook -f 32 -i ${local.compute_inventory_path} ${local.gpfs_restart_playbook_path}" - } - depends_on = [null_resource.perform_encryption_compute] -} diff --git a/modules/common/key_protect_configuration/outputs.tf b/modules/common/key_protect_configuration/outputs.tf deleted file mode 100644 index e69de29b..00000000 diff --git a/modules/common/key_protect_configuration/variables.tf b/modules/common/key_protect_configuration/variables.tf deleted file mode 100644 index 16c4ed87..00000000 --- a/modules/common/key_protect_configuration/variables.tf +++ /dev/null @@ -1,43 +0,0 @@ -variable "turn_on" { - type = string - description = "To turn on the null resources based on conditions." -} - -variable "clone_path" { - type = string - description = "Scale repo clone path" -} - -variable "create_scale_cluster" { - type = string - description = "Eenables scale cluster configuration." -} - -variable "scale_encryption_type" { - type = string - description = "To enable filesystem encryption, specify either 'key_protect' or 'gklm'. If neither is specified, the default value will be 'null' and encryption is disabled" -} - -variable "compute_cluster_create_complete" { - type = bool - description = "Status of the compute cluster complete" -} - -variable "storage_cluster_create_complete" { - type = bool - description = "Status of the compute cluster complete" -} -variable "remote_mount_create_complete" { - type = bool - description = "Status of the compute cluster complete" -} - -variable "compute_cluster_encryption" { - type = bool - description = "Status of the compute cluster complete" -} - -variable "storage_cluster_encryption" { - type = bool - description = "Status of the compute cluster complete" -} diff --git a/modules/common/key_protect_configuration/version.tf b/modules/common/key_protect_configuration/version.tf deleted file mode 100644 index 8f7d5d9c..00000000 --- a/modules/common/key_protect_configuration/version.tf +++ /dev/null @@ -1,14 +0,0 @@ -############################################################################## -# Terraform Providers -############################################################################## - -terraform { - required_version = ">= 1.9.0" - # Use "greater than or equal to" range for root level modules - required_providers { - null = { - source = "hashicorp/null" - version = ">= 3.0.0" - } - } -} diff --git a/modules/common/ldap_configuration/ldap_configuration.tf b/modules/common/ldap_configuration/ldap_configuration.tf new file mode 100644 index 00000000..d315ca41 --- /dev/null +++ b/modules/common/ldap_configuration/ldap_configuration.tf @@ -0,0 +1,46 @@ +/* + Ansible playbook to enable scnryption using ldap. +*/ + +resource "local_sensitive_file" "write_meta_private_key" { + count = (tobool(var.turn_on) == true) ? 1 : 0 + content = var.meta_private_key + filename = local.ldap_private_key + file_permission = "0600" +} + +resource "null_resource" "prepare_ansible_inventory" { + count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.using_jumphost_connection) == false) ? 1 : 0 + provisioner "local-exec" { + interpreter = ["/bin/bash", "-c"] + command = "python3 ${var.script_path} --install_infra_path ${var.clone_path} --instance_private_key ${local.ldap_private_key} --ldap_nodes ${local.ldap_server} --ldap_basedns ${var.ldap_basedns} --ldap_admin_password ${var.ldap_admin_password} --ldap_user_name ${var.ldap_user_name} --ldap_user_password ${var.ldap_user_password} --resource_prefix ${var.ldap_cluster_prefix}" + } + depends_on = [local_sensitive_file.write_meta_private_key] + triggers = { + build = timestamp() + } +} + +resource "null_resource" "prepare_ansible_inventory_using_jumphost_connection" { + count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.using_jumphost_connection) == true) ? 1 : 0 + provisioner "local-exec" { + interpreter = ["/bin/bash", "-c"] + command = "python3 ${var.script_path} --install_infra_path ${var.clone_path} --bastion_user ${var.bastion_user} --bastion_ip ${var.bastion_instance_public_ip} --bastion_ssh_private_key ${var.bastion_ssh_private_key} --instance_private_key ${local.ldap_private_key} --ldap_nodes ${local.ldap_server} --ldap_basedns ${var.ldap_basedns} --ldap_admin_password ${var.ldap_admin_password} --ldap_user_name ${var.ldap_user_name} --ldap_user_password ${var.ldap_user_password} --resource_prefix ${var.ldap_cluster_prefix}" + } + depends_on = [local_sensitive_file.write_meta_private_key] + triggers = { + build = timestamp() + } +} + +resource "null_resource" "perform_ldap_prepare" { + count = (tobool(var.turn_on) == true && tobool(var.create_scale_cluster) == true) ? 1 : 0 + provisioner "local-exec" { + interpreter = ["/bin/bash", "-c"] + command = "/usr/local/bin/ansible-playbook -f 32 -i ${local.ldap_inventory_path} ${local.ldap_configure_playbook} -e ldap_server=${local.ldap_server}" + } + depends_on = [local_sensitive_file.write_meta_private_key, null_resource.prepare_ansible_inventory, null_resource.prepare_ansible_inventory_using_jumphost_connection] + triggers = { + build = timestamp() + } +} diff --git a/modules/common/ldap_configuration/locals.tf b/modules/common/ldap_configuration/locals.tf index 8cf21d0a..2f78aafc 100644 --- a/modules/common/ldap_configuration/locals.tf +++ b/modules/common/ldap_configuration/locals.tf @@ -1,6 +1,4 @@ locals { - scripts_path = replace(path.module, "ldap_configuration", "scripts") - ansible_inv_script_path = format("%s/prepare_ldap_inv.py", local.scripts_path) ldap_private_key = format("%s/ldap_key/id_rsa", var.clone_path) ldap_server = jsonencode(var.ldap_server) ldap_inventory_path = format("%s/%s/ldap_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra") diff --git a/modules/common/ldap_configuration/main.tf b/modules/common/ldap_configuration/main.tf index 2976a831..e69de29b 100644 --- a/modules/common/ldap_configuration/main.tf +++ b/modules/common/ldap_configuration/main.tf @@ -1,46 +0,0 @@ -/* - LDAP Configurations and Ansible Plays -*/ - -resource "local_sensitive_file" "write_meta_private_key" { - count = (tobool(var.turn_on) == true) ? 1 : 0 - content = var.meta_private_key - filename = local.ldap_private_key - file_permission = "0600" -} - -resource "null_resource" "prepare_ansible_inventory" { - count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.using_jumphost_connection) == false) ? 1 : 0 - provisioner "local-exec" { - interpreter = ["/bin/bash", "-c"] - command = "python3 ${local.ansible_inv_script_path} --install_infra_path ${var.clone_path} --instance_private_key ${local.ldap_private_key} --ldap_nodes ${local.ldap_server} --ldap_basedns ${var.ldap_basedns} --ldap_admin_password ${var.ldap_admin_password} --ldap_user_name ${var.ldap_user_name} --ldap_user_password ${var.ldap_user_password} --resource_prefix ${var.ldap_cluster_prefix}" - } - depends_on = [local_sensitive_file.write_meta_private_key] - triggers = { - build = timestamp() - } -} - -resource "null_resource" "prepare_ansible_inventory_using_jumphost_connection" { - count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.using_jumphost_connection) == true) ? 1 : 0 - provisioner "local-exec" { - interpreter = ["/bin/bash", "-c"] - command = "python3 ${local.ansible_inv_script_path} --install_infra_path ${var.clone_path} --bastion_user ${var.bastion_user} --bastion_ip ${var.bastion_instance_public_ip} --bastion_ssh_private_key ${var.bastion_ssh_private_key} --instance_private_key ${local.ldap_private_key} --ldap_nodes ${local.ldap_server} --ldap_basedns ${var.ldap_basedns} --ldap_admin_password ${var.ldap_admin_password} --ldap_user_name ${var.ldap_user_name} --ldap_user_password ${var.ldap_user_password} --resource_prefix ${var.ldap_cluster_prefix}" - } - depends_on = [local_sensitive_file.write_meta_private_key] - triggers = { - build = timestamp() - } -} - -resource "null_resource" "perform_ldap_prepare" { - count = (tobool(var.turn_on) == true && tobool(var.create_scale_cluster) == true) ? 1 : 0 - provisioner "local-exec" { - interpreter = ["/bin/bash", "-c"] - command = "sudo /usr/local/bin/ansible-playbook -f 32 -i ${local.ldap_inventory_path} ${local.ldap_configure_playbook} -e ldap_server=${local.ldap_server}" - } - depends_on = [local_sensitive_file.write_meta_private_key, null_resource.prepare_ansible_inventory, null_resource.prepare_ansible_inventory_using_jumphost_connection] - triggers = { - build = timestamp() - } -} diff --git a/modules/common/ldap_configuration/variables.tf b/modules/common/ldap_configuration/variables.tf index 56c35601..89cb86d6 100644 --- a/modules/common/ldap_configuration/variables.tf +++ b/modules/common/ldap_configuration/variables.tf @@ -8,6 +8,11 @@ variable "clone_path" { description = "Scale repo clone path" } +variable "script_path" { + type = string + description = "Python script path" +} + variable "create_scale_cluster" { type = string description = "It enables scale cluster configuration." diff --git a/modules/common/network_playbook/network_playbook.tf b/modules/common/network_playbook/network_playbook.tf deleted file mode 100644 index 0a612f0c..00000000 --- a/modules/common/network_playbook/network_playbook.tf +++ /dev/null @@ -1,14 +0,0 @@ -/* - Executes network playbook. -*/ - -resource "null_resource" "perform_scale_deployment" { - count = (tobool(var.turn_on) == true && tobool(var.compute_cluster_create_complete) == true && tobool(var.storage_cluster_create_complete) == true && tobool(var.create_scale_cluster) == true) ? 1 : 0 - provisioner "local-exec" { - interpreter = ["/bin/bash", "-c"] - command = "sudo ansible-playbook -i ${var.inventory_path} ${var.network_playbook_path}" - } - triggers = { - build = timestamp() - } -} diff --git a/modules/common/network_playbook/outputs.tf b/modules/common/network_playbook/outputs.tf deleted file mode 100644 index 6809db99..00000000 --- a/modules/common/network_playbook/outputs.tf +++ /dev/null @@ -1 +0,0 @@ -#Place holder for output diff --git a/modules/common/network_playbook/variables.tf b/modules/common/network_playbook/variables.tf deleted file mode 100644 index dcde1211..00000000 --- a/modules/common/network_playbook/variables.tf +++ /dev/null @@ -1,29 +0,0 @@ -variable "compute_cluster_create_complete" { - type = bool - description = "Compute cluster creation completed." -} - -variable "storage_cluster_create_complete" { - type = bool - description = "Storage cluster creation completed." -} - -variable "network_playbook_path" { - type = string - description = "Path for network playbook." -} - -variable "turn_on" { - type = string - description = "It is used to turn on the null resources based on conditions." -} - -variable "create_scale_cluster" { - type = string - description = "It enables scale cluster configuration." -} - -variable "inventory_path" { - type = string - description = "Scale JSON inventory path" -} diff --git a/modules/common/network_playbook/version.tf b/modules/common/network_playbook/version.tf deleted file mode 100644 index cb7b1cd8..00000000 --- a/modules/common/network_playbook/version.tf +++ /dev/null @@ -1,14 +0,0 @@ -############################################################################## -# Terraform Providers -############################################################################## - -terraform { - required_version = ">= 1.3" - # Use "greater than or equal to" range for root level modules - required_providers { - null = { - source = "hashicorp/null" - version = ">= 3.0.0" - } - } -} diff --git a/modules/common/remote_mount_configuration/outputs.tf b/modules/common/remote_mount_configuration/outputs.tf index 9d4b898c..3e47c993 100644 --- a/modules/common/remote_mount_configuration/outputs.tf +++ b/modules/common/remote_mount_configuration/outputs.tf @@ -1,6 +1,6 @@ output "remote_mount_create_complete" { value = true - depends_on = [time_sleep.wait_for_gui_db_initialization, null_resource.prepare_remote_mnt_inventory, null_resource.prepare_remote_mnt_inventory_using_jumphost_connection, null_resource.perform_scale_deployment] + depends_on = [time_sleep.wait_for_gui_db_initializion, null_resource.prepare_remote_mnt_inventory, null_resource.prepare_remote_mnt_inventory_using_jumphost_connection, null_resource.perform_scale_deployment] description = "Remote mount create complete" } diff --git a/modules/common/remote_mount_configuration/remote_mount_configuration.tf b/modules/common/remote_mount_configuration/remote_mount_configuration.tf index a84cc3c6..7d242470 100644 --- a/modules/common/remote_mount_configuration/remote_mount_configuration.tf +++ b/modules/common/remote_mount_configuration/remote_mount_configuration.tf @@ -1,5 +1,5 @@ /* - Executes ansible playbook to configure remote mount between IBM Spectrum Scale compute and storage cluster. + Excutes ansible playbook to configure remote mount between IBM Spectrum Scale compute and storage cluster. */ resource "null_resource" "prepare_remote_mnt_inventory_using_jumphost_connection" { @@ -24,7 +24,7 @@ resource "null_resource" "prepare_remote_mnt_inventory" { } } -resource "time_sleep" "wait_for_gui_db_initialization" { +resource "time_sleep" "wait_for_gui_db_initializion" { count = (tobool(var.turn_on) == true && tobool(var.storage_cluster_create_complete) == true && tobool(var.create_scale_cluster) == true) ? 1 : 0 create_duration = "180s" depends_on = [null_resource.prepare_remote_mnt_inventory, null_resource.prepare_remote_mnt_inventory_using_jumphost_connection] @@ -34,9 +34,9 @@ resource "null_resource" "perform_scale_deployment" { count = (tobool(var.turn_on) == true && tobool(var.compute_cluster_create_complete) == true && tobool(var.storage_cluster_create_complete) == true && tobool(var.create_scale_cluster) == true) ? 1 : 0 provisioner "local-exec" { interpreter = ["/bin/bash", "-c"] - command = "sudo ansible-playbook -i ${local.remote_mnt_inventory_path} ${local.remote_mnt_playbook_path}" + command = "ansible-playbook -i ${local.remote_mnt_inventory_path} ${local.remote_mnt_playbook_path}" } - depends_on = [time_sleep.wait_for_gui_db_initialization, null_resource.prepare_remote_mnt_inventory, null_resource.prepare_remote_mnt_inventory_using_jumphost_connection] + depends_on = [time_sleep.wait_for_gui_db_initializion, null_resource.prepare_remote_mnt_inventory, null_resource.prepare_remote_mnt_inventory_using_jumphost_connection] triggers = { build = timestamp() } diff --git a/modules/common/scripts/prepare_client_inv.py b/modules/common/scripts/prepare_client_inv.py index 3d11af74..e7521a2f 100755 --- a/modules/common/scripts/prepare_client_inv.py +++ b/modules/common/scripts/prepare_client_inv.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 +# -*- coding: utf-8 -*- """ Copyright IBM Corporation 2023 @@ -37,11 +38,12 @@ def read_json_file(json_path): tf_inv = json.load(json_handler) except json.decoder.JSONDecodeError: print( - f"Provided terraform inventory file ({json_path}) is not a valid json." + "Provided terraform inventory file (%s) is not a valid json." + % json_path ) sys.exit(1) except OSError: - print(f"Provided terraform inventory file ({json_path}) does not exist.") + print("Provided terraform inventory file (%s) does not exist." % json_path) sys.exit(1) return tf_inv @@ -55,7 +57,7 @@ def write_to_file(filepath, filecontent): def prepare_ansible_playbook_mount_fileset_client(hosts_config): """Write to playbook""" - content = f"""--- + content = """--- # Mounting mount filesets on client nodes - hosts: {hosts_config} collections: @@ -66,7 +68,9 @@ def prepare_ansible_playbook_mount_fileset_client(hosts_config): - nfs_client_prepare - nfs_client_configure - {{ role: auth_configure, when: enable_ldap }} -""" +""".format( + hosts_config=hosts_config + ) return content @@ -164,27 +168,24 @@ def initialize_node_details(client_cluster_instance_names, key_file): # Step-1: Read the inventory file STRG_TF = read_json_file(ARGUMENTS.client_tf_inv_path) if ARGUMENTS.verbose: - print(f"Parsed storage terraform output: {json.dumps(STRG_TF, indent=4)}") + print("Parsed storage terraform output: %s" % json.dumps(STRG_TF, indent=4)) # Step-2: Cleanup the Client Playbook file cleanup( - "{}/{}/{}_mount_cloud_playbook.yaml".format( - ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", "client" - ) + "%s/%s/%s_mount_cloud_playbook.yaml" + % (ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", "client") ) - # Step-3: Cleanup the Client inventory file + # Step-3: Cleanup the Clinet inventory file cleanup( - "{}/{}/{}_mount_inventory.ini".format( - ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", "client" - ) + "%s/%s/%s_mount_inventory.ini" + % (ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", "client") ) # Step-4: Create playbook playbook_content = prepare_ansible_playbook_mount_fileset_client("client_nodes") write_to_file( - "{}/{}/client_cloud_playbook.yaml".format( - ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra" - ), + "%s/%s/client_cloud_playbook.yaml" + % (ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra"), playbook_content, ) @@ -210,9 +211,8 @@ def initialize_node_details(client_cluster_instance_names, key_file): node_template = node_template + each_entry + "\n" with open( - "{}/{}/client_inventory.ini".format( - ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra" - ), + "%s/%s/client_inventory.ini" + % (ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra"), "w", ) as configfile: configfile.write("[client_nodes]" + "\n") @@ -228,9 +228,8 @@ def initialize_node_details(client_cluster_instance_names, key_file): ARGUMENTS.ldap_admin_password, ) with open( - "{}/{}/client_inventory.ini".format( - ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra" - ), + "%s/%s/client_inventory.ini" + % (ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra"), "w", ) as configfile: configfile.write("[client_nodes]" + "\n") diff --git a/modules/common/scripts/prepare_ldap_inv.py b/modules/common/scripts/prepare_ldap_inv.py index 17d93aef..6fdaae08 100644 --- a/modules/common/scripts/prepare_ldap_inv.py +++ b/modules/common/scripts/prepare_ldap_inv.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 +# -*- coding: utf-8 -*- """ Copyright IBM Corporation 2023 @@ -126,9 +127,8 @@ def initialize_node_details(ldap_instance_ips, key_file): if ARGUMENTS.ldap_basedns != "null": ldap_playbook_content = prepare_ansible_playbook_ldap_server("ldap_nodes") write_to_file( - "{}/{}/ldap_configure_playbook.yaml".format( - ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra" - ), + "%s/%s/ldap_configure_playbook.yaml" + % (ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra"), ldap_playbook_content, ) if ARGUMENTS.verbose: @@ -155,9 +155,8 @@ def initialize_node_details(ldap_instance_ips, key_file): node_template = node_template + each_entry + "\n" with open( - "{}/{}/ldap_inventory.ini".format( - ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra" - ), + "%s/%s/ldap_inventory.ini" + % (ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra"), "w", ) as configfile: configfile.write("[ldap_nodes]" + "\n") @@ -171,9 +170,8 @@ def initialize_node_details(ldap_instance_ips, key_file): ARGUMENTS.ldap_user_password, ) with open( - "{}/{}/ldap_inventory.ini".format( - ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra" - ), + "%s/%s/ldap_inventory.ini" + % (ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra"), "w", ) as configfile: configfile.write("[ldap_nodes]" + "\n") diff --git a/modules/common/scripts/prepare_remote_mount_inv.py b/modules/common/scripts/prepare_remote_mount_inv.py index 24036d70..4d6028f5 100755 --- a/modules/common/scripts/prepare_remote_mount_inv.py +++ b/modules/common/scripts/prepare_remote_mount_inv.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 +# -*- coding: utf-8 -*- """ Copyright IBM Corporation 2018 @@ -38,11 +39,12 @@ def read_json_file(json_path): tf_inv = json.load(json_handler) except json.decoder.JSONDecodeError: print( - f"Provided terraform inventory file ({json_path}) is not a valid json." + "Provided terraform inventory file (%s) is not a valid json." + % json_path ) sys.exit(1) except OSError: - print(f"Provided terraform inventory file ({json_path}) does not exist.") + print("Provided terraform inventory file (%s) does not exist." % json_path) sys.exit(1) return tf_inv @@ -189,18 +191,18 @@ def initialize_node_details(storage_gui_ip, user, key_file): # Step-1: Read the inventory file COMP_TF = read_json_file(ARGUMENTS.compute_tf_inv_path) if ARGUMENTS.verbose: - print(f"Parsed compute terraform output: {json.dumps(COMP_TF, indent=4)}") + print("Parsed compute terraform output: %s" % json.dumps(COMP_TF, indent=4)) STRG_TF = read_json_file(ARGUMENTS.storage_tf_inv_path) if ARGUMENTS.verbose: - print(f"Parsed storage terraform output: {json.dumps(STRG_TF, indent=4)}") + print("Parsed storage terraform output: %s" % json.dumps(STRG_TF, indent=4)) # Step-2: Read the GUI inventory file COMP_GUI = read_json_file(ARGUMENTS.compute_gui_inv_path) if ARGUMENTS.verbose: - print(f"Parsed compute terraform output: {json.dumps(COMP_GUI, indent=4)}") + print("Parsed compute terraform output: %s" % json.dumps(COMP_GUI, indent=4)) STRG_GUI = read_json_file(ARGUMENTS.storage_gui_inv_path) if ARGUMENTS.verbose: - print(f"Parsed storage terraform output: {json.dumps(STRG_GUI, indent=4)}") + print("Parsed storage terraform output: %s" % json.dumps(STRG_GUI, indent=4)) # Step-3: Create playbook remote_mount = {} @@ -220,9 +222,8 @@ def initialize_node_details(storage_gui_ip, user, key_file): playbook_content = prepare_remote_mount_playbook("scale_nodes", remote_mount) write_to_file( - "{}/{}/remote_mount_cloud_playbook.yaml".format( - ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra" - ), + "%s/%s/remote_mount_cloud_playbook.yaml" + % (ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra"), playbook_content, ) @@ -249,9 +250,8 @@ def initialize_node_details(storage_gui_ip, user, key_file): node_template = node_template + each_entry + "\n" with open( - "{}/{}/remote_mount_inventory.ini".format( - ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra" - ), + "%s/%s/remote_mount_inventory.ini" + % (ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra"), "w", ) as configfile: configfile.write("[scale_nodes]" + "\n") diff --git a/modules/common/scripts/prepare_scale_inv_ini.py b/modules/common/scripts/prepare_scale_inv_ini.py index bcb9c603..1057b62e 100755 --- a/modules/common/scripts/prepare_scale_inv_ini.py +++ b/modules/common/scripts/prepare_scale_inv_ini.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 +# -*- coding: utf-8 -*- """ Copyright IBM Corporation 2018 @@ -42,7 +43,7 @@ def calculate_pagepool(nodeclass, memory): else: pagepool_gb = min(int((memory * 0.25) // 1), 32) - return f"{pagepool_gb}G" + return "{}G".format(pagepool_gb) def calculate_maxStatCache(nodeclass, memory): @@ -133,12 +134,12 @@ def read_json_file(json_path): tf_inv = json.load(json_handler) except json.decoder.JSONDecodeError: print( - f"Provided terraform inventory file ({json_path}) is not a valid " - "json." + "Provided terraform inventory file (%s) is not a valid " + "json." % json_path ) sys.exit(1) except OSError: - print(f"Provided terraform inventory file ({json_path}) does not exist.") + print("Provided terraform inventory file (%s) does not exist." % json_path) sys.exit(1) return tf_inv @@ -158,9 +159,9 @@ def write_to_file(filepath, filecontent): def prepare_ansible_playbook(hosts_config, cluster_config, cluster_key_file): """Write to playbook""" - content = f"""--- + content = """--- # Ensure provisioned VMs are up and Passwordless SSH setup -# has been completed and operational +# has been compleated and operational - name: Check passwordless SSH connection is setup hosts: {hosts_config} any_errors_fatal: true @@ -240,13 +241,20 @@ def prepare_ansible_playbook(hosts_config, cluster_config, cluster_key_file): - {{ role: afm_cos_prepare, when: enable_afm }} - {{ role: afm_cos_install, when: "enable_afm and scale_packages_installed is false" }} - {{ role: afm_cos_configure, when: enable_afm }} -""" + - {{ role: kp_encryption_prepare, when: "enable_key_protect and scale_cluster_type == 'storage'" }} + - {{ role: kp_encryption_configure, when: enable_key_protect }} + - {{ role: kp_encryption_apply, when: "enable_key_protect and scale_cluster_type == 'storage'" }} +""".format( + hosts_config=hosts_config, + cluster_config=cluster_config, + cluster_key_file=cluster_key_file, + ) return content def prepare_packer_ansible_playbook(hosts_config, cluster_config): """Write to playbook""" - content = f"""--- + content = """--- # Install and config Spectrum Scale on nodes - hosts: {hosts_config} collections: @@ -260,13 +268,15 @@ def prepare_packer_ansible_playbook(hosts_config, cluster_config): - gui_verify - perfmon_configure - perfmon_verify -""" +""".format( + hosts_config=hosts_config, cluster_config=cluster_config + ) return content def prepare_nogui_ansible_playbook(hosts_config, cluster_config): """Write to playbook""" - content = f"""--- + content = """--- # Install and config Spectrum Scale on nodes - hosts: {hosts_config} collections: @@ -278,13 +288,15 @@ def prepare_nogui_ansible_playbook(hosts_config, cluster_config): - core_prepare - core_install - core_configure -""" +""".format( + hosts_config=hosts_config, cluster_config=cluster_config + ) return content def prepare_nogui_packer_ansible_playbook(hosts_config, cluster_config): """Write to playbook""" - content = f"""--- + content = """--- # Install and config Spectrum Scale on nodes - hosts: {hosts_config} collections: @@ -294,7 +306,9 @@ def prepare_nogui_packer_ansible_playbook(hosts_config, cluster_config): - include_vars: group_vars/{cluster_config} roles: - core_configure -""" +""".format( + hosts_config=hosts_config, cluster_config=cluster_config + ) return content @@ -328,26 +342,6 @@ def prepare_ansible_playbook_encryption_cluster(hosts_config): return content.format(hosts_config=hosts_config) -def prepare_ansible_playbook_key_protect_encryption(hosts_config, cluster_config): - # Write to playbook - content = f"""--- -# Install and config Spectrum Scale on nodes -- hosts: {hosts_config} - collections: - - ibm.spectrum_scale - any_errors_fatal: true - vars: - - scale_node_update_check: false - pre_tasks: - - include_vars: group_vars/{cluster_config} - roles: - - {{ role: kp_encryption_prepare, when: "enable_key_protect and scale_cluster_type == 'storage'" }} - - {{ role: kp_encryption_configure, when: enable_key_protect }} - - {{ role: kp_encryption_apply, when: "enable_key_protect and scale_cluster_type == 'storage'" }} -""" - return content - - def initialize_cluster_details( scale_version, cluster_name, @@ -482,7 +476,7 @@ def initialize_node_details( "scale_protocol_node": False, "scale_cluster_gateway": False, } - # Scale Management node definition + # Scale Management node defination elif ( compute_cluster_instance_names.index(each_ip) == total_compute_node - 1 ): @@ -503,13 +497,14 @@ def initialize_node_details( } write_json_file( {"compute_cluster_gui_ip_address": each_ip}, - "{}/{}".format( + "%s/%s" + % ( str(pathlib.PurePath(ARGUMENTS.tf_inv_path).parent), "compute_cluster_gui_details.json", ), ) else: - # Non-quorum node definition + # Non-quorum node defination node = { "ip_addr": each_ip, "is_quorum": False, @@ -563,7 +558,7 @@ def initialize_node_details( "scale_protocol_node": is_protocol, "scale_cluster_gateway": is_afm, } - # Tie-breaker node definition + # Tie-breaker node defination elif ( storage_cluster_instance_names.index(each_ip) == total_storage_node - 1 ): @@ -582,7 +577,7 @@ def initialize_node_details( "scale_protocol_node": False, "scale_cluster_gateway": False, } - # Scale Management node definition + # Scale Management node defination elif ( storage_cluster_instance_names.index(each_ip) == total_storage_node - 2 ): @@ -603,13 +598,14 @@ def initialize_node_details( } write_json_file( {"storage_cluster_gui_ip_address": each_ip}, - "{}/{}".format( + "%s/%s" + % ( str(pathlib.PurePath(ARGUMENTS.tf_inv_path).parent), "storage_cluster_gui_details.json", ), ) else: - # Non-quorum node definition + # Non-quorum node defination node = { "ip_addr": each_ip, "is_quorum": False, @@ -1132,7 +1128,7 @@ def initialize_scale_ces_details( # Step-1: Read the inventory file TF = read_json_file(ARGUMENTS.tf_inv_path) if ARGUMENTS.verbose: - print(f"Parsed terraform output: {json.dumps(TF, indent=4)}") + print("Parsed terraform output: %s" % json.dumps(TF, indent=4)) # Step-2: Identify the cluster type if ( @@ -1141,33 +1137,37 @@ def initialize_scale_ces_details( ): cluster_type = "compute" cleanup( - "{}/{}/{}_inventory.ini".format( + "%s/%s/%s_inventory.ini" + % ( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, ) ) cleanup( - f"{str(pathlib.PurePath(ARGUMENTS.tf_inv_path).parent)}/{cluster_type}_cluster_gui_details.json" + "%s/%s_cluster_gui_details.json" + % (str(pathlib.PurePath(ARGUMENTS.tf_inv_path).parent), cluster_type) ) cleanup( - "/{}/{}/{}_cloud_playbook.yaml".format( + "/%s/%s/%s_cloud_playbook.yaml" + % ( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, ) ) cleanup( - "{}/{}/{}/{}".format( + "%s/%s/%s/%s" + % ( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", "group_vars", - f"{cluster_type}_cluster_config.yaml", + "%s_cluster_config.yaml" % cluster_type, ) ) gui_username = ARGUMENTS.gui_username gui_password = ARGUMENTS.gui_password - profile_path = f"{ARGUMENTS.install_infra_path}/computesncparams" + profile_path = "%s/computesncparams" % ARGUMENTS.install_infra_path replica_config = False computenodegrp = generate_nodeclass_config( "computenodegrp", @@ -1192,33 +1192,37 @@ def initialize_scale_ces_details( # single az storage cluster cluster_type = "storage" cleanup( - "{}/{}/{}_inventory.ini".format( + "%s/%s/%s_inventory.ini" + % ( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, ) ) cleanup( - f"{str(pathlib.PurePath(ARGUMENTS.tf_inv_path).parent)}/{cluster_type}_cluster_gui_details.json" + "%s/%s_cluster_gui_details.json" + % (str(pathlib.PurePath(ARGUMENTS.tf_inv_path).parent), cluster_type) ) cleanup( - "/{}/{}/{}_cloud_playbook.yaml".format( + "/%s/%s/%s_cloud_playbook.yaml" + % ( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, ) ) cleanup( - "{}/{}/{}/{}".format( + "%s/%s/%s/%s" + % ( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", "group_vars", - f"{cluster_type}_cluster_config.yaml", + "%s_cluster_config.yaml" % cluster_type, ) ) gui_username = ARGUMENTS.gui_username gui_password = ARGUMENTS.gui_password - profile_path = f"{ARGUMENTS.install_infra_path}/storagesncparams" + profile_path = "%s/storagesncparams" % ARGUMENTS.install_infra_path replica_config = bool(len(TF["vpc_availability_zones"]) > 1) managementnodegrp = generate_nodeclass_config( @@ -1283,33 +1287,37 @@ def initialize_scale_ces_details( # multi az storage cluster cluster_type = "storage" cleanup( - "{}/{}/{}_inventory.ini".format( + "%s/%s/%s_inventory.ini" + % ( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, ) ) cleanup( - f"{str(pathlib.PurePath(ARGUMENTS.tf_inv_path).parent)}/{cluster_type}_cluster_gui_details.json" + "%s/%s_cluster_gui_details.json" + % (str(pathlib.PurePath(ARGUMENTS.tf_inv_path).parent), cluster_type) ) cleanup( - "/{}/{}/{}_cloud_playbook.yaml".format( + "/%s/%s/%s_cloud_playbook.yaml" + % ( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, ) ) cleanup( - "{}/{}/{}/{}".format( + "%s/%s/%s/%s" + % ( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", "group_vars", - f"{cluster_type}_cluster_config.yaml", + "%s_cluster_config.yaml" % cluster_type, ) ) gui_username = ARGUMENTS.gui_username gui_password = ARGUMENTS.gui_password - profile_path = f"{ARGUMENTS.install_infra_path}/storagesncparams" + profile_path = "%s/storagesncparams" % ARGUMENTS.install_infra_path replica_config = bool(len(TF["vpc_availability_zones"]) > 1) managementnodegrp = generate_nodeclass_config( @@ -1368,30 +1376,33 @@ def initialize_scale_ces_details( else: cluster_type = "combined" cleanup( - "{}/{}/{}_inventory.ini".format( + "%s/%s/%s_inventory.ini" + % ( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, ) ) cleanup( - "/{}/{}/{}_cloud_playbook.yaml".format( + "/%s/%s/%s_cloud_playbook.yaml" + % ( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, ) ) cleanup( - "{}/{}/{}/{}".format( + "%s/%s/%s/%s" + % ( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", "group_vars", - f"{cluster_type}_cluster_config.yaml", + "%s_cluster_config.yaml" % cluster_type, ) ) gui_username = ARGUMENTS.gui_username gui_password = ARGUMENTS.gui_password - profile_path = f"{ARGUMENTS.install_infra_path}/scalesncparams" + profile_path = "%s/scalesncparams" % ARGUMENTS.install_infra_path replica_config = bool(len(TF["vpc_availability_zones"]) > 1) computenodegrp = generate_nodeclass_config( @@ -1469,7 +1480,7 @@ def initialize_scale_ces_details( nodeclassgrp.append(afmgatewaygrp) scale_config = initialize_scale_config_details(nodeclassgrp) - print(f"Identified cluster type: {cluster_type}") + print("Identified cluster type: %s" % cluster_type) # Step-3: Identify if tie breaker needs to be counted for storage if len(TF["vpc_availability_zones"]) > 1: @@ -1509,11 +1520,12 @@ def initialize_scale_ces_details( ): playbook_content = prepare_ansible_playbook( "scale_nodes", - f"{cluster_type}_cluster_config.yaml", + "%s_cluster_config.yaml" % cluster_type, ARGUMENTS.instance_private_key, ) write_to_file( - "/{}/{}/{}_cloud_playbook.yaml".format( + "/%s/%s/%s_cloud_playbook.yaml" + % ( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, @@ -1525,10 +1537,11 @@ def initialize_scale_ces_details( and ARGUMENTS.using_rest_initialization == "true" ): playbook_content = prepare_packer_ansible_playbook( - "scale_nodes", f"{cluster_type}_cluster_config.yaml" + "scale_nodes", "%s_cluster_config.yaml" % cluster_type ) write_to_file( - "/{}/{}/{}_cloud_playbook.yaml".format( + "/%s/%s/%s_cloud_playbook.yaml" + % ( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, @@ -1540,10 +1553,11 @@ def initialize_scale_ces_details( and ARGUMENTS.using_rest_initialization == "false" ): playbook_content = prepare_nogui_ansible_playbook( - "scale_nodes", f"{cluster_type}_cluster_config.yaml" + "scale_nodes", "%s_cluster_config.yaml" % cluster_type ) write_to_file( - "/{}/{}/{}_cloud_playbook.yaml".format( + "/%s/%s/%s_cloud_playbook.yaml" + % ( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, @@ -1555,10 +1569,11 @@ def initialize_scale_ces_details( and ARGUMENTS.using_rest_initialization == "false" ): playbook_content = prepare_nogui_packer_ansible_playbook( - "scale_nodes", f"{cluster_type}_cluster_config.yaml" + "scale_nodes", "%s_cluster_config.yaml" % cluster_type ) write_to_file( - "/{}/{}/{}_cloud_playbook.yaml".format( + "/%s/%s/%s_cloud_playbook.yaml" + % ( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, @@ -1575,18 +1590,16 @@ def initialize_scale_ces_details( ): encryption_playbook_content = prepare_ansible_playbook_encryption_gklm() write_to_file( - "{}/{}/encryption_gklm_playbook.yaml".format( - ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra" - ), + "%s/%s/encryption_gklm_playbook.yaml" + % (ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra"), encryption_playbook_content, ) encryption_playbook_content = prepare_ansible_playbook_encryption_cluster( "scale_nodes" ) write_to_file( - "{}/{}/encryption_cluster_playbook.yaml".format( - ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra" - ), + "%s/%s/encryption_cluster_playbook.yaml" + % (ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra"), encryption_playbook_content, ) if ARGUMENTS.verbose: @@ -1594,31 +1607,6 @@ def initialize_scale_ces_details( "Content of ansible playbook for encryption:\n", encryption_playbook_content ) - # Step-4.2: Create Key Protect Encryption playbook - if ( - ARGUMENTS.scale_encryption_enabled == "true" - and ARGUMENTS.scale_encryption_type == "key_protect" - and ARGUMENTS.enable_key_protect == "True" - ): - kp_encryption_playbook_content = ( - prepare_ansible_playbook_key_protect_encryption( - "scale_nodes", f"{cluster_type}_cluster_config.yaml" - ) - ) - write_to_file( - "/{}/{}/{}_kp_encryption_playbook.yaml".format( - ARGUMENTS.install_infra_path, - "ibm-spectrum-scale-install-infra", - cluster_type, - ), - kp_encryption_playbook_content, - ) - if ARGUMENTS.verbose: - print( - "Content of ansible playbook for key protect encryption:\n", - kp_encryption_playbook_content, - ) - # Step-5: Create hosts config = configparser.ConfigParser(allow_no_value=True) node_details = initialize_node_details( @@ -1654,7 +1642,7 @@ def initialize_scale_ces_details( if TF["resource_prefix"]: cluster_name = TF["resource_prefix"] else: - cluster_name = "{}.{}".format("spectrum-scale", cluster_type) + cluster_name = "%s.%s" % ("spectrum-scale", cluster_type) config["all:vars"] = initialize_cluster_details( TF["scale_version"], @@ -1686,7 +1674,8 @@ def initialize_scale_ces_details( TF["afm_config_details"], ) with open( - "{}/{}/{}_inventory.ini".format( + "%s/%s/%s_inventory.ini" + % ( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, @@ -1699,14 +1688,16 @@ def initialize_scale_ces_details( if ARGUMENTS.verbose: config.read( - "{}/{}/{}_inventory.ini".format( + "%s/%s/%s_inventory.ini" + % ( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, ) ) print( - "Content of {}/{}/{}_inventory.ini".format( + "Content of %s/%s/%s_inventory.ini" + % ( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, @@ -1716,11 +1707,12 @@ def initialize_scale_ces_details( print(node_template) print("[all:vars]") for each_key in config["all:vars"]: - print("{}: {}".format(each_key, config.get("all:vars", each_key))) + print("%s: %s" % (each_key, config.get("all:vars", each_key))) # Step-6: Create group_vars directory create_directory( - "{}/{}/{}".format( + "%s/%s/%s" + % ( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", "group_vars", @@ -1728,18 +1720,20 @@ def initialize_scale_ces_details( ) # Step-7: Create group_vars with open( - "{}/{}/{}/{}".format( + "%s/%s/%s/%s" + % ( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", "group_vars", - f"{cluster_type}_cluster_config.yaml", + "%s_cluster_config.yaml" % cluster_type, ), "w", ) as groupvar: yaml.dump(scale_config, groupvar, default_flow_style=False) if ARGUMENTS.verbose: print( - f"group_vars content:\n{yaml.dump(scale_config, default_flow_style=False)}" + "group_vars content:\n%s" + % yaml.dump(scale_config, default_flow_style=False) ) if cluster_type in ["storage", "combined"]: @@ -1776,16 +1770,18 @@ def initialize_scale_ces_details( "scale_storage": scale_storage["scale_storage"], } with open( - "{}/{}/{}/{}".format( + "%s/%s/%s/%s" + % ( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", "group_vars", - f"{cluster_type}_cluster_config.yaml", + "%s_cluster_config.yaml" % cluster_type, ), "a", ) as groupvar: yaml.dump(scale_storage_cluster, groupvar, default_flow_style=False) if ARGUMENTS.verbose: print( - f"group_vars content:\n{yaml.dump(scale_storage_cluster, default_flow_style=False)}" + "group_vars content:\n%s" + % yaml.dump(scale_storage_cluster, default_flow_style=False) ) diff --git a/modules/common/scripts/wait_for_ssh_availability.py b/modules/common/scripts/wait_for_ssh_availability.py index e02ca35c..ada2d58a 100755 --- a/modules/common/scripts/wait_for_ssh_availability.py +++ b/modules/common/scripts/wait_for_ssh_availability.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 +# -*- coding: utf-8 -*- """ Copyright IBM Corporation 2018 @@ -30,11 +31,12 @@ def read_json_file(json_path): tf_inv = json.load(json_handler) except json.decoder.JSONDecodeError: print( - f"Provided terraform inventory file ({json_path}) is not a valid json." + "Provided terraform inventory file (%s) is not a valid json." + % json_path ) sys.exit(1) except OSError: - print(f"Provided terraform inventory file ({json_path}) does not exist.") + print("Provided terraform inventory file (%s) does not exist." % json_path) sys.exit(1) return tf_inv @@ -61,7 +63,7 @@ def aws_ec2_wait_running(instance_ids, region): Wait for EC2 instances to obtain running-ok state. :args: region(string), instance_ids(list) """ - print(f"Waiting for instance's ({instance_ids}) to obtain running-ok state.") + print("Waiting for instance's (%s) to obtain running-ok state." % instance_ids) command = [ "aws", "ec2", @@ -75,7 +77,7 @@ def aws_ec2_wait_running(instance_ids, region): if code: print("Instance's did not obtain running-ok state. Existing!") - print("{}: {} {}: {}".format("stdout", out, "stderr", err)) + print("%s: %s %s: %s" % ("stdout", out, "stderr", err)) sys.exit(1) @@ -97,7 +99,7 @@ def aws_ec2_wait_running(instance_ids, region): # Step-1: Read the inventory file TF = read_json_file(ARGUMENTS.tf_inv_path) if ARGUMENTS.verbose: - print(f"Parsed terraform output: {json.dumps(TF, indent=4)}") + print("Parsed terraform output: %s" % json.dumps(TF, indent=4)) # Step-2: Identify instance id's based cluster_type target_instance_ids = [] diff --git a/modules/common/storage_configuration/locals.tf b/modules/common/storage_configuration/locals.tf index 226ffb84..6780560f 100644 --- a/modules/common/storage_configuration/locals.tf +++ b/modules/common/storage_configuration/locals.tf @@ -1,29 +1,21 @@ locals { - scripts_path = replace(path.module, "storage_configuration", "scripts") - ansible_inv_script_path = var.inventory_format == "ini" ? format("%s/prepare_scale_inv_ini.py", local.scripts_path) : format("%s/prepare_scale_inv_json.py", local.scripts_path) - wait_for_ssh_script_path = format("%s/wait_for_ssh_availability.py", local.scripts_path) - scale_tuning_config_path = format("%s/%s", var.clone_path, "storagesncparams.profile") - storage_private_key = format("%s/storage_key/id_rsa", var.clone_path) #tfsec:ignore:GEN002 - default_metadata_replicas = var.default_metadata_replicas == null ? jsonencode("None") : jsonencode(var.default_metadata_replicas) - default_data_replicas = var.default_data_replicas == null ? jsonencode("None") : jsonencode(var.default_data_replicas) - storage_inventory_path = format("%s/%s/storage_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra") - storage_playbook_path = format("%s/%s/storage_cloud_playbook.yaml", var.clone_path, "ibm-spectrum-scale-install-infra") - scale_encryption_servers = var.scale_encryption_enabled && var.scale_encryption_type == "gklm" ? jsonencode(var.scale_encryption_servers) : jsonencode("None") - scale_encryption_admin_password = var.scale_encryption_enabled ? var.scale_encryption_admin_password : "None" - ldap_server_cert_path = format("%s/ldap_key/ldap_cacert.pem", var.scale_config_path) - colocate_protocol_instances = var.colocate_protocol_instances ? "True" : "False" - is_colocate_protocol_subset = var.is_colocate_protocol_subset ? "True" : "False" - enable_mrot_conf = var.enable_mrot_conf ? "True" : "False" - enable_ces = var.enable_ces ? "True" : "False" - enable_afm = var.enable_afm ? "True" : "False" - enable_key_protect = var.scale_encryption_enabled && var.enable_key_protect == "True" ? "True" : "False" - ldap_server = jsonencode(var.ldap_server) - scale_baremetal_ssh_check_playbook_path = format("%s/%s/scale_baremetal_ssh_check_playbook.yml", var.clone_path, "ibm-spectrum-scale-install-infra") - scale_baremetal_bootdrive_playbook_path = format("%s/%s/scale_baremetal_bootdrive.yml", var.clone_path, "ibm-spectrum-scale-install-infra") - scale_baremetal_prerequisite_vars = format("%s/%s/scale_baremetal_vars.yml", var.clone_path, "ibm-spectrum-scale-install-infra") - scale_baremetal_prerequisite_playbook_path = format("%s/%s/scale_baremetal_prerequisite.yml", var.clone_path, "ibm-spectrum-scale-install-infra") - scale_hostentry_playbook_path = format("%s/%s/scale_host_entry_play.yml", var.clone_path, "ibm-spectrum-scale-install-infra") - scale_cluster_hosts = format("%s/%s/scale_cluster_hosts.yml", var.clone_path, "ibm-spectrum-scale-install-infra") - scale_all_inventory = format("%s/%s/scale_all_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra") - domain_name_file = format("%s/%s/domain_names.yml", var.clone_path, "ibm-spectrum-scale-install-infra") + scripts_path = replace(path.module, "storage_configuration", "scripts") + ansible_inv_script_path = var.inventory_format == "ini" ? format("%s/prepare_scale_inv_ini.py", local.scripts_path) : format("%s/prepare_scale_inv_json.py", local.scripts_path) + wait_for_ssh_script_path = format("%s/wait_for_ssh_availability.py", local.scripts_path) + scale_tuning_config_path = format("%s/%s", var.clone_path, "storagesncparams.profile") + storage_private_key = format("%s/storage_key/id_rsa", var.clone_path) #tfsec:ignore:GEN002 + default_metadata_replicas = var.default_metadata_replicas == null ? jsonencode("None") : jsonencode(var.default_metadata_replicas) + default_data_replicas = var.default_data_replicas == null ? jsonencode("None") : jsonencode(var.default_data_replicas) + storage_inventory_path = format("%s/%s/storage_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra") + storage_playbook_path = format("%s/%s/storage_cloud_playbook.yaml", var.clone_path, "ibm-spectrum-scale-install-infra") + scale_encryption_servers = var.scale_encryption_enabled && var.scale_encryption_type == "gklm" ? jsonencode(var.scale_encryption_servers) : jsonencode("None") + scale_encryption_admin_password = var.scale_encryption_enabled ? var.scale_encryption_admin_password : "None" + ldap_server_cert_path = format("%s/ldap_key/ldap_cacert.pem", var.clone_path) + colocate_protocol_instances = var.colocate_protocol_instances ? "True" : "False" + is_colocate_protocol_subset = var.is_colocate_protocol_subset ? "True" : "False" + enable_mrot_conf = var.enable_mrot_conf ? "True" : "False" + enable_ces = var.enable_ces ? "True" : "False" + enable_afm = var.enable_afm ? "True" : "False" + enable_key_protect = var.enable_key_protect == "key_protect" ? "True" : "False" + ldap_server = jsonencode(var.ldap_server) } diff --git a/modules/common/storage_configuration/storage_configuration.tf b/modules/common/storage_configuration/storage_configuration.tf index 383f2764..162d5d68 100644 --- a/modules/common/storage_configuration/storage_configuration.tf +++ b/modules/common/storage_configuration/storage_configuration.tf @@ -1,5 +1,5 @@ /* - Executes ansible playbook to install IBM Spectrum Scale storage cluster. + Excutes ansible playbook to install IBM Spectrum Scale storage cluster. */ resource "local_file" "create_storage_tuning_parameters" { @@ -41,60 +41,6 @@ resource "local_sensitive_file" "write_existing_ldap_cert" { file_permission = "0600" } -resource "time_sleep" "wait_300_seconds" { - count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true) && var.storage_type == "persistent" ? 1 : 0 - create_duration = "300s" - depends_on = [local_sensitive_file.write_meta_private_key] -} - -resource "null_resource" "scale_baremetal_ssh_check_play" { - count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.create_scale_cluster) == true) && var.storage_type == "persistent" ? 1 : 0 - provisioner "local-exec" { - interpreter = ["/bin/bash", "-c"] - command = "sudo ansible-playbook -f 50 -i ${local.scale_all_inventory} -l 'storage' -e @${local.scale_baremetal_prerequisite_vars} ${local.scale_baremetal_ssh_check_playbook_path}" - } - depends_on = [local_sensitive_file.write_meta_private_key, time_sleep.wait_300_seconds] - triggers = { - build = timestamp() - } -} - -resource "null_resource" "scale_host_play" { - count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.create_scale_cluster) == true) ? 1 : 0 - provisioner "local-exec" { - interpreter = ["/bin/bash", "-c"] - command = "sudo ansible-playbook -f 50 -i ${local.scale_all_inventory} -l 'storage' -e @${local.scale_cluster_hosts} -e @${local.domain_name_file} ${local.scale_hostentry_playbook_path}" - } - depends_on = [null_resource.scale_baremetal_ssh_check_play, time_sleep.wait_300_seconds] - triggers = { - build = timestamp() - } -} - -resource "null_resource" "scale_baremetal_bootdrive_play" { - count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.create_scale_cluster) == true) && var.storage_type == "persistent" && var.bms_boot_drive_encryption == true ? 1 : 0 - provisioner "local-exec" { - interpreter = ["/bin/bash", "-c"] - command = "sudo ansible-playbook -f 50 -i ${local.scale_all_inventory} -l 'storage' -e @${local.scale_baremetal_prerequisite_vars} ${local.scale_baremetal_bootdrive_playbook_path}" - } - depends_on = [null_resource.scale_baremetal_ssh_check_play, null_resource.scale_host_play] - triggers = { - build = timestamp() - } -} - -resource "null_resource" "scale_baremetal_prerequisite_play" { - count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.create_scale_cluster) == true) && var.storage_type == "persistent" ? 1 : 0 - provisioner "local-exec" { - interpreter = ["/bin/bash", "-c"] - command = "sudo ansible-playbook -f 50 -i ${local.scale_all_inventory} -l 'storage' -e @${local.scale_baremetal_prerequisite_vars} ${local.scale_baremetal_prerequisite_playbook_path}" - } - depends_on = [null_resource.scale_baremetal_ssh_check_play, null_resource.scale_host_play] - triggers = { - build = timestamp() - } -} - resource "null_resource" "prepare_ansible_inventory_using_jumphost_connection" { count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.using_jumphost_connection) == true && tobool(var.scale_encryption_enabled) == false) && var.bastion_instance_public_ip != null && var.bastion_ssh_private_key != null ? 1 : 0 provisioner "local-exec" { @@ -165,9 +111,9 @@ resource "null_resource" "perform_scale_deployment" { count = (tobool(var.turn_on) == true && tobool(var.write_inventory_complete) == true && tobool(var.create_scale_cluster) == true) ? 1 : 0 provisioner "local-exec" { interpreter = ["/bin/bash", "-c"] - command = "sudo ansible-playbook -f 32 -i ${local.storage_inventory_path} ${local.storage_playbook_path} --extra-vars \"scale_version=${var.scale_version}\" --extra-vars \"scale_install_directory_pkg_path=${var.spectrumscale_rpms_path}\"" + command = "ansible-playbook -f 32 -i ${local.storage_inventory_path} ${local.storage_playbook_path} --extra-vars \"scale_version=${var.scale_version}\" --extra-vars \"scale_install_directory_pkg_path=${var.spectrumscale_rpms_path}\"" } - depends_on = [null_resource.scale_host_play, null_resource.scale_baremetal_prerequisite_play, null_resource.scale_baremetal_bootdrive_play, time_sleep.wait_60_seconds, null_resource.wait_for_ssh_availability, null_resource.prepare_ansible_inventory, null_resource.prepare_ansible_inventory_using_jumphost_connection, null_resource.prepare_ansible_inventory, null_resource.prepare_ansible_inventory_using_jumphost_connection] + depends_on = [time_sleep.wait_60_seconds, null_resource.wait_for_ssh_availability, null_resource.prepare_ansible_inventory, null_resource.prepare_ansible_inventory_using_jumphost_connection, null_resource.prepare_ansible_inventory, null_resource.prepare_ansible_inventory_using_jumphost_connection] triggers = { build = timestamp() } diff --git a/modules/common/storage_configuration/variables.tf b/modules/common/storage_configuration/variables.tf index a91e4639..953761e2 100644 --- a/modules/common/storage_configuration/variables.tf +++ b/modules/common/storage_configuration/variables.tf @@ -24,11 +24,6 @@ variable "inventory_path" { description = "Scale JSON inventory path" } -variable "scale_config_path" { - type = string - description = "Path to clone github.com/IBM/ibm-spectrum-scale-install-infra." -} - variable "inventory_format" { type = string description = "Scale inventory format" @@ -281,15 +276,3 @@ variable "afm_bandwidth" { type = string description = "AFM node bandwidth" } - -variable "storage_type" { - type = string - default = "scratch" - description = "Select the required storage type(scratch/persistent/eval)." -} - -variable "bms_boot_drive_encryption" { - type = bool - default = false - description = "To enable the encryption for the boot drive of bare metal server. Select true or false" -} diff --git a/modules/cos/cos.tf b/modules/cos/cos.tf new file mode 100644 index 00000000..8268f57c --- /dev/null +++ b/modules/cos/cos.tf @@ -0,0 +1,553 @@ +############################################################################################################# +# 1. It creates new COS instance, Bucket and Hmac Key +############################################################################################################# + +locals { + path_elements = split("/", var.filesystem) + filesystem = element(local.path_elements, length(local.path_elements) - 1) + + new_cos_instance = distinct([for instance in var.new_instance_bucket_hmac : instance.cos_instance]) + # New bucket single Site + new_bucket_single_site_region = [for region in var.new_instance_bucket_hmac : region.bucket_region if region.bucket_type == "single_site_location"] + storage_class_single_site = [for class in var.new_instance_bucket_hmac : class.bucket_storage_class if class.bucket_type == "single_site_location"] + mode_single_site = [for mode in var.new_instance_bucket_hmac : mode.mode if mode.bucket_type == "single_site_location"] + afm_fileset_single_site = [for fileset in var.new_instance_bucket_hmac : fileset.afm_fileset if fileset.bucket_type == "single_site_location"] + # New bucket regional + new_bucket_regional_region = [for region in var.new_instance_bucket_hmac : region.bucket_region if region.bucket_type == "region_location" || region.bucket_type == ""] + storage_class_regional = [for class in var.new_instance_bucket_hmac : class.bucket_storage_class if class.bucket_type == "region_location" || class.bucket_type == ""] + mode_regional = [for mode in var.new_instance_bucket_hmac : mode.mode if mode.bucket_type == "region_location" || mode.bucket_type == ""] + afm_fileset_regional = [for fileset in var.new_instance_bucket_hmac : fileset.afm_fileset if fileset.bucket_type == "region_location" || fileset.bucket_type == ""] + # New bucket cross region + new_bucket_cross_region = [for region in var.new_instance_bucket_hmac : region.bucket_region if region.bucket_type == "cross_region_location"] + storage_class_cross_regional = [for class in var.new_instance_bucket_hmac : class.bucket_storage_class if class.bucket_type == "cross_region_location"] + mode_cross_regional = [for mode in var.new_instance_bucket_hmac : mode.mode if mode.bucket_type == "cross_region_location"] + fileset_cross_regional = [for fileset in var.new_instance_bucket_hmac : fileset.afm_fileset if fileset.bucket_type == "cross_region_location"] +} + +resource "ibm_resource_instance" "cos_instance" { + for_each = { + for idx, count_number in range(1, length(local.new_cos_instance) + 1) : idx => { + sequence_string = tostring(count_number) + } + } + name = format("%s-%03s", "${var.prefix}instance", each.value.sequence_string) + resource_group_id = var.resource_group_id + plan = var.cos_instance_plan + location = var.cos_instance_location + service = var.cos_instance_service +} + +resource "ibm_cos_bucket" "cos_bucket_single_site" { + for_each = { + for idx, count_number in range(1, length(local.new_bucket_single_site_region) + 1) : idx => { + sequence_string = tostring(count_number) + cos_instance = element(flatten([for instance_id in ibm_resource_instance.cos_instance : instance_id[*].id]), idx) + region_location = element(local.new_bucket_single_site_region, idx) + storage_class = element(local.storage_class_single_site, idx) + } + } + bucket_name = format("%s-%03s", "${var.prefix}bucket-new", each.value.sequence_string) + resource_instance_id = each.value.cos_instance + single_site_location = each.value.region_location + storage_class = each.value.storage_class == "" ? "smart" : each.value.storage_class + depends_on = [ibm_resource_instance.cos_instance] +} + +resource "ibm_cos_bucket" "cos_bucket_regional" { + for_each = { + for idx, count_number in range(1, length(local.new_bucket_regional_region) + 1) : idx => { + sequence_string = tostring(count_number) + cos_instance = element(flatten([for instance_id in ibm_resource_instance.cos_instance : instance_id[*].id]), idx) + region_location = element(local.new_bucket_regional_region, idx) + storage_class = element(local.storage_class_regional, idx) + } + } + bucket_name = format("%s-%03s", "${var.prefix}bucket-new", (each.value.sequence_string + length(local.new_bucket_single_site_region))) + resource_instance_id = each.value.cos_instance + region_location = each.value.region_location + storage_class = each.value.storage_class == "" ? "smart" : each.value.storage_class + depends_on = [ibm_resource_instance.cos_instance] +} + +resource "ibm_cos_bucket" "cos_bucket_cross_region" { + for_each = { + for idx, count_number in range(1, length(local.new_bucket_cross_region) + 1) : idx => { + sequence_string = tostring(count_number) + cos_instance = element(flatten([for instance_id in ibm_resource_instance.cos_instance : instance_id[*].id]), idx) + region_location = element(local.new_bucket_cross_region, idx) + storage_class = element(local.storage_class_cross_regional, idx) + } + } + bucket_name = format("%s-%03s", "${var.prefix}bucket-new", (each.value.sequence_string + (length(local.new_bucket_single_site_region) + length(local.new_bucket_regional_region)))) + resource_instance_id = each.value.cos_instance + cross_region_location = each.value.region_location + storage_class = each.value.storage_class == "" ? "smart" : each.value.storage_class + depends_on = [ibm_resource_instance.cos_instance] +} + +resource "ibm_resource_key" "hmac_key" { + for_each = { + for idx, count_number in range(1, length(local.new_cos_instance) + 1) : idx => { + sequence_string = tostring(count_number) + cos_instance = element(flatten([for instance_id in ibm_resource_instance.cos_instance : instance_id[*].id]), idx) + } + } + name = format("%s-%03s", "${var.prefix}hmac-key-new", each.value.sequence_string) + resource_instance_id = each.value.cos_instance + parameters = { "HMAC" = true } + role = var.cos_hmac_role +} + +locals { + buckets = concat((flatten([for bucket in ibm_cos_bucket.cos_bucket_single_site : bucket[*].bucket_name])), (flatten([for bucket in ibm_cos_bucket.cos_bucket_regional : bucket[*].bucket_name])), (flatten([for bucket in ibm_cos_bucket.cos_bucket_cross_region : bucket[*].bucket_name]))) + endpoints = concat((flatten([for endpoint in ibm_cos_bucket.cos_bucket_single_site : endpoint[*].s3_endpoint_direct])), (flatten([for endpoint in ibm_cos_bucket.cos_bucket_regional : endpoint[*].s3_endpoint_direct])), (flatten([for endpoint in ibm_cos_bucket.cos_bucket_cross_region : endpoint[*].s3_endpoint_direct]))) + modes = concat(local.mode_single_site, local.mode_regional, local.mode_cross_regional) + filesets = concat(local.afm_fileset_single_site, local.afm_fileset_regional, local.fileset_cross_regional) + + + afm_cos_bucket_details_1 = [for idx, config in var.new_instance_bucket_hmac : { + akey = ibm_resource_key.hmac_key[0].credentials["cos_hmac_keys.access_key_id"] + bucket = (local.buckets)[idx] + skey = ibm_resource_key.hmac_key[0].credentials["cos_hmac_keys.secret_access_key"] + }] + + afm_config_details_1 = [for idx, config in var.new_instance_bucket_hmac : { + bucket = (local.buckets)[idx] + filesystem = local.filesystem + fileset = (local.filesets)[idx] + mode = (local.modes)[idx] + endpoint = "https://${(local.endpoints)[idx]}" + }] +} + +############################################################################################################# +# 2. It uses existing COS instance and creates new COS Bucket and Hmac Key in that instance. +############################################################################################################# + +locals { + exstng_instance_new_bkt_hmac = [for instance in var.exstng_instance_new_bucket_hmac : instance.cos_instance] + # New bucket single Site + exstng_instance_new_bkt_hmac_single_site = [for instance in var.exstng_instance_new_bucket_hmac : instance.cos_instance if instance.bucket_type == "single_site_location"] + exstng_instance_single_site_region = [for region in var.exstng_instance_new_bucket_hmac : region.bucket_region if region.bucket_type == "single_site_location"] + exstng_instance_storage_class_single_site = [for class in var.exstng_instance_new_bucket_hmac : class.bucket_storage_class if class.bucket_type == "single_site_location"] + exstng_instance_mode_single_site = [for mode in var.exstng_instance_new_bucket_hmac : mode.mode if mode.bucket_type == "single_site_location"] + exstng_instance_fileset_single_site = [for fileset in var.exstng_instance_new_bucket_hmac : fileset.afm_fileset if fileset.bucket_type == "single_site_location"] + # New bucket regional + exstng_instance_new_bkt_hmac_regional = [for instance in var.exstng_instance_new_bucket_hmac : instance.cos_instance if instance.bucket_type == "region_location" || instance.bucket_type == ""] + exstng_instance_regional_region = [for region in var.exstng_instance_new_bucket_hmac : region.bucket_region if region.bucket_type == "region_location" || region.bucket_type == ""] + exstng_instance_storage_class_regional = [for class in var.exstng_instance_new_bucket_hmac : class.bucket_storage_class if class.bucket_type == "region_location" || class.bucket_type == ""] + exstng_instance_mode_regional = [for mode in var.exstng_instance_new_bucket_hmac : mode.mode if mode.bucket_type == "region_location" || mode.bucket_type == ""] + exstng_instance_fileset_regional = [for fileset in var.exstng_instance_new_bucket_hmac : fileset.afm_fileset if fileset.bucket_type == "region_location" || fileset.bucket_type == ""] + # New bucket cross region + exstng_instance_new_bkt_hmac_cross_regional = [for instance in var.exstng_instance_new_bucket_hmac : instance.cos_instance if instance.bucket_type == "cross_region_location"] + exstng_instance_cross_regional = [for region in var.exstng_instance_new_bucket_hmac : region.bucket_region if region.bucket_type == "cross_region_location"] + exstng_instance_storage_class_cross_regional = [for class in var.exstng_instance_new_bucket_hmac : class.bucket_storage_class if class.bucket_type == "cross_region_location"] + exstng_instance_mode_cross_regional = [for mode in var.exstng_instance_new_bucket_hmac : mode.mode if mode.bucket_type == "cross_region_location"] + exstng_instance_fileset_cross_regional = [for fileset in var.exstng_instance_new_bucket_hmac : fileset.afm_fileset if fileset.bucket_type == "cross_region_location"] +} + +data "ibm_resource_instance" "existing_cos_instance_single_site" { + for_each = { + for idx, value in local.exstng_instance_new_bkt_hmac_single_site : idx => { + cos_instance = element(local.exstng_instance_new_bkt_hmac_single_site, idx) + } + } + name = each.value.cos_instance + service = var.cos_instance_service +} + +resource "ibm_cos_bucket" "existing_instance_new_cos_bucket_single_site" { + for_each = { + for idx, count_number in range(1, length(local.exstng_instance_single_site_region) + 1) : idx => { + sequence_string = tostring(count_number) + cos_instance = element(flatten([for instance_id in data.ibm_resource_instance.existing_cos_instance_single_site : instance_id[*].id]), idx) + region_location = element(local.exstng_instance_single_site_region, idx) + storage_class = element(local.exstng_instance_storage_class_single_site, idx) + } + } + bucket_name = format("%s-%03s", "${var.prefix}bucket", each.value.sequence_string) + resource_instance_id = each.value.cos_instance + single_site_location = each.value.region_location + storage_class = each.value.storage_class == "" ? "smart" : each.value.storage_class + depends_on = [data.ibm_resource_instance.existing_cos_instance_single_site] +} + +data "ibm_resource_instance" "existing_cos_instance_bucket_regional" { + for_each = { + for idx, value in local.exstng_instance_new_bkt_hmac_regional : idx => { + cos_instance = element(local.exstng_instance_new_bkt_hmac_regional, idx) + } + } + name = each.value.cos_instance + service = var.cos_instance_service +} + +resource "ibm_cos_bucket" "existing_instance_new_cos_bucket_regional" { + for_each = { + for idx, count_number in range(1, length(local.exstng_instance_regional_region) + 1) : idx => { + sequence_string = tostring(count_number) + cos_instance = element(flatten([for instance_id in data.ibm_resource_instance.existing_cos_instance_bucket_regional : instance_id[*].id]), idx) + region_location = element(local.exstng_instance_regional_region, idx) + storage_class = element(local.exstng_instance_storage_class_regional, idx) + } + } + bucket_name = format("%s-%03s", "${var.prefix}bucket", (each.value.sequence_string + length(local.exstng_instance_single_site_region))) + resource_instance_id = each.value.cos_instance + region_location = each.value.region_location + storage_class = each.value.storage_class == "" ? "smart" : each.value.storage_class + depends_on = [data.ibm_resource_instance.existing_cos_instance_bucket_regional] +} + +data "ibm_resource_instance" "existing_cos_instancecross_regional" { + for_each = { + for idx, value in local.exstng_instance_new_bkt_hmac_cross_regional : idx => { + cos_instance = element(local.exstng_instance_new_bkt_hmac_cross_regional, idx) + } + } + name = each.value.cos_instance + service = var.cos_instance_service +} + +resource "ibm_cos_bucket" "existing_instance_new_cos_bucket_cross_regional" { + for_each = { + for idx, count_number in range(1, length(local.exstng_instance_cross_regional) + 1) : idx => { + sequence_string = tostring(count_number) + cos_instance = element(flatten([for instance_id in data.ibm_resource_instance.existing_cos_instancecross_regional : instance_id[*].id]), idx) + region_location = element(local.exstng_instance_cross_regional, idx) + storage_class = element(local.exstng_instance_storage_class_cross_regional, idx) + } + } + bucket_name = format("%s-%03s", "${var.prefix}bucket", (each.value.sequence_string + (length(local.exstng_instance_single_site_region) + length(local.exstng_instance_regional_region)))) + resource_instance_id = each.value.cos_instance + cross_region_location = each.value.region_location + storage_class = each.value.storage_class == "" ? "smart" : each.value.storage_class + depends_on = [data.ibm_resource_instance.existing_cos_instancecross_regional] +} + +data "ibm_resource_instance" "existing_cos_instance" { + for_each = { + for idx, value in local.exstng_instance_new_bkt_hmac : idx => { + cos_instance = element(local.exstng_instance_new_bkt_hmac, idx) + } + } + name = each.value.cos_instance + service = var.cos_instance_service +} + +resource "ibm_resource_key" "existing_instance_new_hmac_keys" { + for_each = { + for idx, count_number in range(1, length(local.exstng_instance_new_bkt_hmac) + 1) : idx => { + sequence_string = tostring(count_number) + cos_instance = element(flatten([for instance_id in data.ibm_resource_instance.existing_cos_instance : instance_id[*].id]), idx) + } + } + name = format("%s-%03s", "${var.prefix}hmac-key", each.value.sequence_string) + resource_instance_id = each.value.cos_instance + parameters = { "HMAC" = true } + role = var.cos_hmac_role + depends_on = [data.ibm_resource_instance.existing_cos_instance] +} + +locals { + exstng_instance_buckets = concat((flatten([for bucket in ibm_cos_bucket.existing_instance_new_cos_bucket_single_site : bucket[*].bucket_name])), (flatten([for bucket in ibm_cos_bucket.existing_instance_new_cos_bucket_regional : bucket[*].bucket_name])), (flatten([for bucket in ibm_cos_bucket.existing_instance_new_cos_bucket_cross_regional : bucket[*].bucket_name]))) + exstng_instance_endpoints = concat((flatten([for endpoint in ibm_cos_bucket.existing_instance_new_cos_bucket_single_site : endpoint[*].s3_endpoint_direct])), (flatten([for endpoint in ibm_cos_bucket.existing_instance_new_cos_bucket_regional : endpoint[*].s3_endpoint_direct])), (flatten([for endpoint in ibm_cos_bucket.existing_instance_new_cos_bucket_cross_regional : endpoint[*].s3_endpoint_direct]))) + exstng_instance_modes = concat(local.exstng_instance_mode_single_site, local.exstng_instance_mode_regional, local.exstng_instance_mode_cross_regional) + exstng_instance_filesets = concat(local.exstng_instance_fileset_single_site, local.exstng_instance_fileset_regional, local.exstng_instance_fileset_cross_regional) + + afm_cos_bucket_details_2 = [for idx, config in var.exstng_instance_new_bucket_hmac : { + akey = (flatten([for access_key in ibm_resource_key.existing_instance_new_hmac_keys : access_key[*].credentials["cos_hmac_keys.access_key_id"]]))[idx] + bucket = (local.exstng_instance_buckets)[idx] + skey = (flatten([for secret_access_key in ibm_resource_key.existing_instance_new_hmac_keys : secret_access_key[*].credentials["cos_hmac_keys.secret_access_key"]]))[idx] + }] + + afm_config_details_2 = [for idx, config in var.exstng_instance_new_bucket_hmac : { + bucket = (local.exstng_instance_buckets)[idx] + filesystem = local.filesystem + fileset = (local.exstng_instance_filesets)[idx] + mode = (local.exstng_instance_modes)[idx] + endpoint = "https://${(local.exstng_instance_endpoints)[idx]}" + }] +} + +############################################################################################################# +# 3. It uses existing COS instance and existing Bucket and creates new Hmac Key in that instance. +############################################################################################################# + +locals { + exstng_instance_bkt_new_hmac = [for instance in var.exstng_instance_bucket_new_hmac : instance.cos_instance] + exstng_instance_exstng_bucket = [for bucket in var.exstng_instance_bucket_new_hmac : bucket.bucket_name] + region_exstng_instance_bucket_new_hmac = [for region in var.exstng_instance_bucket_new_hmac : region.bucket_region] + exstng_instance_exstng_bucket_type = [for type in var.exstng_instance_bucket_new_hmac : type.bucket_type] +} + +data "ibm_resource_instance" "existing_cos_instance_bucket_new_hmac" { + for_each = { + for idx, value in var.exstng_instance_bucket_new_hmac : idx => { + cos_instance = element(local.exstng_instance_bkt_new_hmac, idx) + } + } + name = each.value.cos_instance + service = var.cos_instance_service +} + +data "ibm_cos_bucket" "existing_cos_instance_bucket" { + for_each = { + for idx, value in var.exstng_instance_bucket_new_hmac : idx => { + bucket_name = element(local.exstng_instance_exstng_bucket, idx) + resource_instance_id = element(flatten([for instance in data.ibm_resource_instance.existing_cos_instance_bucket_new_hmac : instance[*].id]), idx) + bucket_region = element(local.region_exstng_instance_bucket_new_hmac, idx) + bucket_type = element(local.exstng_instance_exstng_bucket_type, idx) + } + } + bucket_name = each.value.bucket_name + resource_instance_id = each.value.resource_instance_id + bucket_region = each.value.bucket_region + bucket_type = each.value.bucket_type + depends_on = [data.ibm_resource_instance.existing_cos_instance_bucket_new_hmac] +} + +resource "ibm_resource_key" "existing_instance_bkt_new_hmac_keys" { + for_each = { + for idx, count_number in range(1, length(var.exstng_instance_bucket_new_hmac) + 1) : idx => { + sequence_string = tostring(count_number) + cos_instance = element(flatten([for instance_id in data.ibm_resource_instance.existing_cos_instance_bucket_new_hmac : instance_id[*].id]), idx) + } + } + name = format("%s-%03s", "${var.prefix}new-hmac-key", each.value.sequence_string) + resource_instance_id = each.value.cos_instance + parameters = { "HMAC" = true } + role = var.cos_hmac_role + depends_on = [data.ibm_resource_instance.existing_cos_instance_bucket_new_hmac] +} + +locals { + afm_cos_bucket_details_3 = [for idx, config in var.exstng_instance_bucket_new_hmac : { + akey = (flatten([for access_key in ibm_resource_key.existing_instance_bkt_new_hmac_keys : access_key[*].credentials["cos_hmac_keys.access_key_id"]]))[idx] + bucket = (flatten([for bucket in data.ibm_cos_bucket.existing_cos_instance_bucket : bucket[*].bucket_name]))[idx] + skey = (flatten([for secret_access_key in ibm_resource_key.existing_instance_bkt_new_hmac_keys : secret_access_key[*].credentials["cos_hmac_keys.secret_access_key"]]))[idx] + }] + + afm_config_details_3 = [for idx, config in var.exstng_instance_bucket_new_hmac : { + bucket = (flatten([for bucket in data.ibm_cos_bucket.existing_cos_instance_bucket : bucket[*].bucket_name]))[idx] + filesystem = local.filesystem + fileset = ([for fileset in var.exstng_instance_bucket_new_hmac : fileset.afm_fileset])[idx] + mode = ([for mode in var.exstng_instance_bucket_new_hmac : mode.mode])[idx] + endpoint = "https://${(flatten([for endpoint in data.ibm_cos_bucket.existing_cos_instance_bucket : endpoint[*].s3_endpoint_direct]))[idx]}" + }] +} + +############################################################################################################# +# 4. It uses existing COS instance and existing Hmac Key and creates new Bucket in that instance. +############################################################################################################# + +locals { + exstng_instance_hmac_new_bkt = [for instance in var.exstng_instance_hmac_new_bucket : instance.cos_instance] + exstng_instance_exstng_hmac = [for hmac in var.exstng_instance_hmac_new_bucket : hmac.cos_service_cred_key] + + # New bucket single Site + exstng_instance_hmac_new_bkt_single_site = [for instance in var.exstng_instance_hmac_new_bucket : instance.cos_instance if instance.bucket_type == "single_site_location"] + exstng_instance_hmac_single_site_region = [for region in var.exstng_instance_hmac_new_bucket : region.bucket_region if region.bucket_type == "single_site_location"] + exstng_instance_hmac_storage_class_single_site = [for class in var.exstng_instance_hmac_new_bucket : class.bucket_storage_class if class.bucket_type == "single_site_location"] + exstng_instance_hmac_mode_single_site = [for mode in var.exstng_instance_hmac_new_bucket : mode.mode if mode.bucket_type == "single_site_location"] + exstng_instance_hmac_fileset_single_site = [for fileset in var.exstng_instance_hmac_new_bucket : fileset.afm_fileset if fileset.bucket_type == "single_site_location"] + # New bucket regional + exstng_instance_hmac_new_bkt_regional = [for instance in var.exstng_instance_hmac_new_bucket : instance.cos_instance if instance.bucket_type == "region_location" || instance.bucket_type == ""] + exstng_instance_hmac_regional_region = [for region in var.exstng_instance_hmac_new_bucket : region.bucket_region if region.bucket_type == "region_location" || region.bucket_type == ""] + exstng_instance_hmac_storage_class_regional = [for class in var.exstng_instance_hmac_new_bucket : class.bucket_storage_class if class.bucket_type == "region_location" || class.bucket_type == ""] + exstng_instance_hmac_mode_regional = [for mode in var.exstng_instance_hmac_new_bucket : mode.mode if mode.bucket_type == "region_location" || mode.bucket_type == ""] + exstng_instance_hmac_fileset_regional = [for fileset in var.exstng_instance_hmac_new_bucket : fileset.afm_fileset if fileset.bucket_type == "region_location" || fileset.bucket_type == ""] + # New bucket cross region + exstng_instance_hmac_new_bkt_cross_region = [for instance in var.exstng_instance_hmac_new_bucket : instance.cos_instance if instance.bucket_type == "cross_region_location"] + exstng_instance_hmac_cross_region = [for region in var.exstng_instance_hmac_new_bucket : region.bucket_region if region.bucket_type == "cross_region_location"] + exstng_instance_hmac_storage_class_cross_regional = [for class in var.exstng_instance_hmac_new_bucket : class.bucket_storage_class if class.bucket_type == "cross_region_location"] + exstng_instance_hmac_mode_cross_regional = [for mode in var.exstng_instance_hmac_new_bucket : mode.mode if mode.bucket_type == "cross_region_location"] + exstng_instance_hmac_fileset_cross_regional = [for fileset in var.exstng_instance_hmac_new_bucket : fileset.afm_fileset if fileset.bucket_type == "cross_region_location"] +} + +data "ibm_resource_instance" "exstng_cos_instance_hmac_new_bucket_single_site" { + for_each = length(local.exstng_instance_hmac_new_bkt_single_site) == 0 ? {} : { + for idx, value in local.exstng_instance_hmac_new_bkt_single_site : idx => { + cos_instance = element(local.exstng_instance_hmac_new_bkt_single_site, idx) + } + } + name = each.value.cos_instance + service = var.cos_instance_service +} + +resource "ibm_cos_bucket" "existing_cos_instance_hmac_new_cos_bucket_single_site" { + for_each = { + for idx, count_number in range(1, length(local.exstng_instance_hmac_single_site_region) + 1) : idx => { + sequence_string = tostring(count_number) + cos_instance = element(flatten([for instance_id in data.ibm_resource_instance.exstng_cos_instance_hmac_new_bucket_single_site : instance_id[*].id]), idx) + region_location = element(local.exstng_instance_hmac_single_site_region, idx) + storage_class = element(local.exstng_instance_hmac_storage_class_single_site, idx) + } + } + bucket_name = format("%s-%03s", "${var.prefix}new-bucket", each.value.sequence_string) + resource_instance_id = each.value.cos_instance + single_site_location = each.value.region_location + storage_class = each.value.storage_class == "" ? "smart" : each.value.storage_class + depends_on = [data.ibm_resource_instance.exstng_cos_instance_hmac_new_bucket_single_site] +} + +data "ibm_resource_instance" "exstng_cos_instance_hmac_new_bucket_regional" { + for_each = length(local.exstng_instance_hmac_new_bkt_regional) == 0 ? {} : { + for idx, value in local.exstng_instance_hmac_new_bkt_regional : idx => { + cos_instance = element(local.exstng_instance_hmac_new_bkt_regional, idx) + } + } + name = each.value.cos_instance + service = var.cos_instance_service +} + +resource "ibm_cos_bucket" "existing_cos_instance_hmac_new_cos_bucket_regional" { + for_each = { + for idx, count_number in range(1, length(local.exstng_instance_hmac_regional_region) + 1) : idx => { + sequence_string = tostring(count_number) + cos_instance = element(flatten([for instance_id in data.ibm_resource_instance.exstng_cos_instance_hmac_new_bucket_regional : instance_id[*].id]), idx) + region_location = element(local.exstng_instance_hmac_regional_region, idx) + storage_class = element(local.exstng_instance_hmac_storage_class_regional, idx) + } + } + bucket_name = format("%s-%03s", "${var.prefix}new-bucket", (each.value.sequence_string + length(local.exstng_instance_hmac_single_site_region))) + resource_instance_id = each.value.cos_instance + region_location = each.value.region_location + storage_class = each.value.storage_class == "" ? "smart" : each.value.storage_class + depends_on = [data.ibm_resource_instance.exstng_cos_instance_hmac_new_bucket_regional] +} + +data "ibm_resource_instance" "exstng_cos_instance_hmac_new_bucket_cross_region" { + for_each = length(local.exstng_instance_hmac_new_bkt_cross_region) == 0 ? {} : { + for idx, value in local.exstng_instance_hmac_new_bkt_cross_region : idx => { + cos_instance = element(local.exstng_instance_hmac_new_bkt_cross_region, idx) + } + } + name = each.value.cos_instance + service = var.cos_instance_service +} + +resource "ibm_cos_bucket" "existing_cos_instance_hmac_new_cos_bucket_cross_region" { + for_each = { + for idx, count_number in range(1, length(local.exstng_instance_hmac_cross_region) + 1) : idx => { + sequence_string = tostring(count_number) + cos_instance = element(flatten([for instance_id in data.ibm_resource_instance.exstng_cos_instance_hmac_new_bucket_cross_region : instance_id[*].id]), idx) + region_location = element(local.exstng_instance_hmac_cross_region, idx) + storage_class = element(local.exstng_instance_hmac_storage_class_cross_regional, idx) + } + } + bucket_name = format("%s-%03s", "${var.prefix}new-bucket", (each.value.sequence_string + (length(local.exstng_instance_hmac_single_site_region) + length(local.exstng_instance_hmac_regional_region)))) + resource_instance_id = each.value.cos_instance + cross_region_location = each.value.region_location + storage_class = each.value.storage_class == "" ? "smart" : each.value.storage_class + depends_on = [data.ibm_resource_instance.exstng_cos_instance_hmac_new_bucket_cross_region] +} + +data "ibm_resource_instance" "exstng_cos_instance_hmac_new_bucket" { + for_each = { + for idx, value in local.exstng_instance_hmac_new_bkt : idx => { + cos_instance = element(local.exstng_instance_hmac_new_bkt, idx) + } + } + name = each.value.cos_instance + service = var.cos_instance_service +} + +data "ibm_resource_key" "existing_hmac_key" { + for_each = { + for idx, value in local.exstng_instance_exstng_hmac : idx => { + hmac_key = element(local.exstng_instance_exstng_hmac, idx) + resource_instance_id = element(flatten([for instance in data.ibm_resource_instance.exstng_cos_instance_hmac_new_bucket : instance[*].id]), idx) + } + } + name = each.value.hmac_key + resource_instance_id = each.value.resource_instance_id + depends_on = [data.ibm_resource_instance.exstng_cos_instance_hmac_new_bucket] +} + +locals { + exstng_instance_hmac_buckets = concat((flatten([for bucket in ibm_cos_bucket.existing_cos_instance_hmac_new_cos_bucket_single_site : bucket[*].bucket_name])), (flatten([for bucket in ibm_cos_bucket.existing_cos_instance_hmac_new_cos_bucket_regional : bucket[*].bucket_name])), (flatten([for bucket in ibm_cos_bucket.existing_cos_instance_hmac_new_cos_bucket_cross_region : bucket[*].bucket_name]))) + exstng_instance_hmac_endpoints = concat((flatten([for endpoint in ibm_cos_bucket.existing_cos_instance_hmac_new_cos_bucket_single_site : endpoint[*].s3_endpoint_direct])), (flatten([for endpoint in ibm_cos_bucket.existing_cos_instance_hmac_new_cos_bucket_regional : endpoint[*].s3_endpoint_direct])), (flatten([for endpoint in ibm_cos_bucket.existing_cos_instance_hmac_new_cos_bucket_cross_region : endpoint[*].s3_endpoint_direct]))) + exstng_instance_hmac_modes = concat(local.exstng_instance_hmac_mode_single_site, local.exstng_instance_hmac_mode_regional, local.exstng_instance_hmac_mode_cross_regional) + exstng_instance_hmac_filesets = concat(local.exstng_instance_hmac_fileset_single_site, local.exstng_instance_hmac_fileset_regional, local.exstng_instance_hmac_fileset_cross_regional) + + afm_cos_bucket_details_4 = [for idx, config in var.exstng_instance_hmac_new_bucket : { + akey = (flatten([for access_key in data.ibm_resource_key.existing_hmac_key : access_key[*].credentials["cos_hmac_keys.access_key_id"]]))[idx] + bucket = (local.exstng_instance_hmac_buckets)[idx] + skey = (flatten([for secret_access_key in data.ibm_resource_key.existing_hmac_key : secret_access_key[*].credentials["cos_hmac_keys.secret_access_key"]]))[idx] + }] + + afm_config_details_4 = [for idx, config in var.exstng_instance_hmac_new_bucket : { + bucket = (local.exstng_instance_hmac_buckets)[idx] + filesystem = local.filesystem + fileset = (local.exstng_instance_hmac_filesets)[idx] + mode = (local.exstng_instance_hmac_modes)[idx] + endpoint = "https://${(local.exstng_instance_hmac_endpoints)[idx]}" + }] +} + +############################################################################################################# +# 5. It uses existing COS instance, Bucket and Hmac Key +############################################################################################################# + +locals { + exstng_instance_bkt_hmac = [for instance in var.exstng_instance_bucket_hmac : instance.cos_instance] + exstng_instance_exstng_bkt = [for bucket in var.exstng_instance_bucket_hmac : bucket.bucket_name] + exstng_instance_hmac_bkt = [for hmac in var.exstng_instance_bucket_hmac : hmac.cos_service_cred_key] + region_exstng_instance_bucket_hmac = [for region in var.exstng_instance_bucket_hmac : region.bucket_region] + exstng_instance_bkt_type = [for type in var.exstng_instance_bucket_hmac : type.bucket_type] +} + + +data "ibm_resource_instance" "exstng_cos_instance_bucket_hmac" { + for_each = { + for idx, value in var.exstng_instance_bucket_hmac : idx => { + cos_instance = element(local.exstng_instance_bkt_hmac, idx) + } + } + name = each.value.cos_instance + service = var.cos_instance_service +} + +data "ibm_cos_bucket" "exstng_cos_instance_bucket" { + for_each = { + for idx, value in var.exstng_instance_bucket_hmac : idx => { + bucket_name = element(local.exstng_instance_exstng_bkt, idx) + resource_instance_id = element(flatten([for instance in data.ibm_resource_instance.exstng_cos_instance_bucket_hmac : instance[*].id]), idx) + bucket_region = element(local.region_exstng_instance_bucket_hmac, idx) + bucket_type = element(local.exstng_instance_bkt_type, idx) + } + } + bucket_name = each.value.bucket_name + resource_instance_id = each.value.resource_instance_id + bucket_region = each.value.bucket_region + bucket_type = each.value.bucket_type + depends_on = [data.ibm_resource_instance.exstng_cos_instance_bucket_hmac] +} + +data "ibm_resource_key" "exstng_cos_instance_hmac" { + for_each = { + for idx, value in var.exstng_instance_bucket_hmac : idx => { + hmac_key = element(local.exstng_instance_hmac_bkt, idx) + resource_instance_id = element(flatten([for instance in data.ibm_resource_instance.exstng_cos_instance_bucket_hmac : instance[*].id]), idx) + } + } + name = each.value.hmac_key + resource_instance_id = each.value.resource_instance_id + depends_on = [data.ibm_resource_instance.exstng_cos_instance_bucket_hmac] +} + +locals { + afm_cos_bucket_details_5 = [for idx, config in var.exstng_instance_bucket_hmac : { + akey = (flatten([for access_key in data.ibm_resource_key.exstng_cos_instance_hmac : access_key[*].credentials["cos_hmac_keys.access_key_id"]]))[idx] + bucket = (flatten([for bucket in data.ibm_cos_bucket.exstng_cos_instance_bucket : bucket[*].bucket_name]))[idx] + skey = (flatten([for secret_access_key in data.ibm_resource_key.exstng_cos_instance_hmac : secret_access_key[*].credentials["cos_hmac_keys.secret_access_key"]]))[idx] + }] + + afm_config_details_5 = [for idx, config in var.exstng_instance_bucket_hmac : { + bucket = (flatten([for bucket in data.ibm_cos_bucket.exstng_cos_instance_bucket : bucket[*].bucket_name]))[idx] + filesystem = local.filesystem + fileset = ([for fileset in var.exstng_instance_bucket_hmac : fileset.afm_fileset])[idx] + mode = ([for mode in var.exstng_instance_bucket_hmac : mode.mode])[idx] + endpoint = "https://${(flatten([for endpoint in data.ibm_cos_bucket.exstng_cos_instance_bucket : endpoint[*].s3_endpoint_direct]))[idx]}" + }] +} diff --git a/modules/common/network_playbook/main.tf b/modules/cos/main.tf similarity index 100% rename from modules/common/network_playbook/main.tf rename to modules/cos/main.tf diff --git a/modules/cos/outputs.tf b/modules/cos/outputs.tf new file mode 100644 index 00000000..44aa1d99 --- /dev/null +++ b/modules/cos/outputs.tf @@ -0,0 +1,9 @@ +output "afm_cos_bucket_details" { + value = concat(local.afm_cos_bucket_details_1, local.afm_cos_bucket_details_2, local.afm_cos_bucket_details_3, local.afm_cos_bucket_details_4, local.afm_cos_bucket_details_5) + description = "AFM cos bucket details" +} + +output "afm_config_details" { + value = concat(local.afm_config_details_1, local.afm_config_details_2, local.afm_config_details_3, local.afm_config_details_4, local.afm_config_details_5) + description = "AFM configuration details" +} diff --git a/modules/cos/variables.tf b/modules/cos/variables.tf new file mode 100644 index 00000000..5008046d --- /dev/null +++ b/modules/cos/variables.tf @@ -0,0 +1,109 @@ +variable "prefix" { + type = string + description = "A unique identifier for resources. Must begin with a letter and end with a letter or number. This prefix will be prepended to any resources provisioned by this template. Prefixes must be 16 or fewer characters." +} + +variable "resource_group_id" { + type = string + description = "Resource group id." +} + +variable "cos_instance_plan" { + type = string + description = "COS instance plan." +} +variable "cos_instance_location" { + type = string + description = "COS instance location." +} + +variable "cos_instance_service" { + type = string + description = "COS instance service." +} + +variable "cos_hmac_role" { + type = string + description = "HMAC key role." +} + +variable "new_instance_bucket_hmac" { + type = list( + object({ + afm_fileset = string, + mode = string, + cos_instance = string, + bucket_name = string, + bucket_region = string, + cos_service_cred_key = string, + bucket_type = string, + bucket_storage_class = string + }) + ) + description = "It creates new COS instance, Bucket and Hmac Key" +} +variable "exstng_instance_new_bucket_hmac" { + type = list( + object({ + afm_fileset = string, + mode = string, + cos_instance = string, + bucket_name = string, + bucket_region = string, + cos_service_cred_key = string, + bucket_type = string, + bucket_storage_class = string + }) + ) + description = "It creates new COS instance, Bucket and Hmac Key" +} +variable "exstng_instance_bucket_new_hmac" { + type = list( + object({ + afm_fileset = string, + mode = string, + cos_instance = string, + bucket_name = string, + bucket_region = string, + cos_service_cred_key = string, + bucket_type = string, + bucket_storage_class = string + }) + ) + description = "It creates new COS instance, Bucket and Hmac Key" +} +variable "exstng_instance_hmac_new_bucket" { + type = list( + object({ + afm_fileset = string, + mode = string, + cos_instance = string, + bucket_name = string, + bucket_region = string, + cos_service_cred_key = string, + bucket_type = string, + bucket_storage_class = string + }) + ) + description = "It creates new COS instance, Bucket and Hmac Key" +} +variable "exstng_instance_bucket_hmac" { + type = list( + object({ + afm_fileset = string, + mode = string, + cos_instance = string, + bucket_name = string, + bucket_region = string, + cos_service_cred_key = string, + bucket_type = string, + bucket_storage_class = string + }) + ) + description = "It creates new COS instance, Bucket and Hmac Key" +} + +variable "filesystem" { + type = string + description = "Storage filesystem name." +} diff --git a/modules/key_protect/version.tf b/modules/cos/version.tf similarity index 64% rename from modules/key_protect/version.tf rename to modules/cos/version.tf index 4edd14fc..913bf325 100644 --- a/modules/key_protect/version.tf +++ b/modules/cos/version.tf @@ -3,20 +3,20 @@ ############################################################################## terraform { - required_version = ">= 1.9.0" + required_version = ">= 1.3" # Use "greater than or equal to" range for root level modules required_providers { ibm = { source = "IBM-Cloud/ibm" version = ">= 1.68.1, < 2.0.0" } - local = { - source = "hashicorp/local" - version = "~> 2" - } - null = { - source = "hashicorp/null" - version = ">= 3.0.0" - } + # local = { + # source = "hashicorp/local" + # version = "~> 2" + # } + # ansible = { + # source = "ansible/ansible" + # version = "~> 1.3.0" + # } } } diff --git a/modules/deployer/datasource.tf b/modules/deployer/datasource.tf index a993c051..fc391e8a 100644 --- a/modules/deployer/datasource.tf +++ b/modules/deployer/datasource.tf @@ -26,8 +26,3 @@ data "ibm_is_instance" "bastion_instance_name" { data "ibm_is_public_gateways" "public_gateways" { count = var.ext_vpc_name != null ? 1 : 0 } - -data "ibm_is_security_group" "login_security_group" { - count = var.login_security_group_name != null ? 1 : 0 - name = var.login_security_group_name -} diff --git a/modules/deployer/image_map.tf b/modules/deployer/image_map.tf index d4d29018..292e2b49 100644 --- a/modules/deployer/image_map.tf +++ b/modules/deployer/image_map.tf @@ -25,18 +25,6 @@ locals { "br-sao" = "r042-93c1a769-c138-4765-91d2-5796965b6a98" "ca-tor" = "r038-9448213f-22ce-4a6a-b6b0-22dd6ed9fbb3" "ca-mon" = "r058-b3211406-9eec-4148-aafb-d6ab7c26a6eb" - }, - "hpcc-scale-deployer-v1" = { - "eu-es" = "r050-9ae3a0b4-6353-4c8e-84eb-c3b1cd4255fa" - "eu-gb" = "r018-73480732-827e-440f-82aa-9cd2221b71ee" - "eu-de" = "r010-0a275d00-6d18-49b1-a961-3082e5376864" - "us-east" = "r014-7304cbc7-61f8-43cf-9098-ebdef3287b81" - "us-south" = "r006-be93bb57-4226-49c1-8089-d6ed95df071d" - "jp-tok" = "r022-e84d39a4-0726-467e-aa1c-d482665ecc6f" - "jp-osa" = "r034-6900c41a-b3f8-4c57-ae1a-fc54de86668f" - "au-syd" = "r026-0f3084e9-53eb-4a32-8b7f-2e222ce843cd" - "br-sao" = "r042-a97879cb-1e8a-4b9a-ba80-adcf7b8b37e7" - "ca-tor" = "r038-73dfb66d-b9c3-4fbf-a6fe-8cd7c81325c6" } } } diff --git a/modules/deployer/locals.tf b/modules/deployer/locals.tf index 312d185a..140240bc 100644 --- a/modules/deployer/locals.tf +++ b/modules/deployer/locals.tf @@ -75,8 +75,7 @@ locals { # resource_group_id = data.ibm_resource_group.existing_resource_group.id # Subnets - bastion_subnets = var.bastion_subnets - login_security_group_name_id = var.login_security_group_name != null ? data.ibm_is_security_group.login_security_group[*].id : [] + bastion_subnets = var.bastion_subnets } locals { @@ -89,12 +88,3 @@ locals { public_gateways_list = var.ext_vpc_name != null ? data.ibm_is_public_gateways.public_gateways[0].public_gateways : [] zone_1_pgw_ids = var.ext_vpc_name != null ? [for gateway in local.public_gateways_list : gateway.id if gateway.vpc == var.vpc_id && gateway.zone == var.zones[0]] : [] } - -locals { - storage_secondary_security_group = [ - for i, subnet in var.compute_subnets : { - security_group_id = one(module.bastion_sg[*].security_group_id) - interface_name = subnet.name - } - ] -} diff --git a/modules/deployer/main.tf b/modules/deployer/main.tf index 18b8914e..adf15701 100644 --- a/modules/deployer/main.tf +++ b/modules/deployer/main.tf @@ -1,31 +1,13 @@ resource "ibm_is_subnet_public_gateway_attachment" "zone_1_attachment" { - count = (var.ext_vpc_name != null && var.ext_compute_subnet_id == null && length(var.compute_subnets) > 0 && length(local.zone_1_pgw_ids) > 0) ? 1 : 0 - subnet = var.compute_subnets[0].id - public_gateway = local.zone_1_pgw_ids[0] + count = (var.ext_vpc_name != null && var.ext_cluster_subnet_id == null) ? 1 : 0 + subnet = var.cluster_subnets[0].id + public_gateway = length(local.zone_1_pgw_ids) > 0 ? local.zone_1_pgw_ids[0] : "" } resource "ibm_is_subnet_public_gateway_attachment" "bastion_attachment" { - count = (var.ext_vpc_name != null && var.ext_login_subnet_id == null && length(var.bastion_subnets) > 0 && length(local.zone_1_pgw_ids) > 0) ? 1 : 0 + count = (var.ext_vpc_name != null && var.ext_login_subnet_id == null) ? 1 : 0 subnet = local.bastion_subnets[0].id - public_gateway = local.zone_1_pgw_ids[0] -} - -resource "ibm_is_subnet_public_gateway_attachment" "storage_attachment" { - count = (var.ext_vpc_name != null && var.ext_storage_subnet_id == null && length(var.storage_subnets) > 0 && length(local.zone_1_pgw_ids) > 0) ? 1 : 0 - subnet = var.storage_subnets[0].id - public_gateway = local.zone_1_pgw_ids[0] -} - -resource "ibm_is_subnet_public_gateway_attachment" "client_attachment" { - count = (var.ext_vpc_name != null && var.ext_client_subnet_id == null && length(var.client_subnets) > 0 && length(local.zone_1_pgw_ids) > 0) ? 1 : 0 - subnet = var.client_subnets[0].id - public_gateway = local.zone_1_pgw_ids[0] -} - -resource "ibm_is_subnet_public_gateway_attachment" "protocol_attachment" { - count = (var.ext_vpc_name != null && var.ext_protocol_subnet_id == null && length(var.protocol_subnets) > 0 && length(local.zone_1_pgw_ids) > 0) ? 1 : 0 - subnet = var.protocol_subnets[0].id - public_gateway = local.zone_1_pgw_ids[0] + public_gateway = length(local.zone_1_pgw_ids) > 0 ? local.zone_1_pgw_ids[0] : "" } module "ssh_key" { @@ -35,7 +17,7 @@ module "ssh_key" { } module "bastion_sg" { - count = var.enable_deployer && var.login_security_group_name == null ? 1 : 0 + count = var.enable_deployer ? 1 : 0 source = "terraform-ibm-modules/security-group/ibm" version = "2.6.2" add_ibm_cloud_internal_rules = true @@ -48,7 +30,7 @@ module "bastion_sg" { module "bastion_vsi" { count = (var.enable_deployer && var.bastion_instance_name == null) ? 1 : 0 source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "5.4.16" + version = "5.0.0" vsi_per_subnet = 1 create_security_group = false security_group = null @@ -57,24 +39,22 @@ module "bastion_vsi" { prefix = local.bastion_node_name resource_group_id = var.resource_group enable_floating_ip = true - security_group_ids = var.login_security_group_name == null ? module.bastion_sg[*].security_group_id : local.login_security_group_name_id + security_group_ids = module.bastion_sg[*].security_group_id ssh_key_ids = local.bastion_ssh_keys - subnets = var.scheduler == "Scale" && var.enable_sec_interface_compute ? var.storage_subnets : local.bastion_subnets + subnets = local.bastion_subnets tags = local.tags user_data = data.template_file.bastion_user_data.rendered vpc_id = var.vpc_id kms_encryption_enabled = var.kms_encryption_enabled skip_iam_authorization_policy = true boot_volume_encryption_key = var.boot_volume_encryption_key - secondary_security_groups = var.scheduler == "Scale" && var.enable_sec_interface_compute ? local.storage_secondary_security_group : [] - secondary_subnets = var.scheduler == "Scale" && var.enable_sec_interface_compute ? var.compute_subnets : [] - manage_reserved_ips = var.scheduler == "Scale" && var.enable_sec_interface_compute ? true : false + existing_kms_instance_guid = var.existing_kms_instance_guid } module "deployer_vsi" { count = local.enable_deployer ? 1 : 0 source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "5.4.6" + version = "5.0.0" vsi_per_subnet = 1 create_security_group = false security_group = null @@ -83,13 +63,14 @@ module "deployer_vsi" { prefix = local.deployer_node_name resource_group_id = var.resource_group enable_floating_ip = false - security_group_ids = var.login_security_group_name == null ? module.bastion_sg[*].security_group_id : local.login_security_group_name_id + security_group_ids = module.bastion_sg[*].security_group_id ssh_key_ids = local.bastion_ssh_keys - subnets = var.scheduler == "Scale" && var.enable_sec_interface_compute ? var.storage_subnets : local.bastion_subnets + subnets = local.bastion_subnets tags = local.tags user_data = data.template_file.deployer_user_data.rendered vpc_id = var.vpc_id kms_encryption_enabled = var.kms_encryption_enabled skip_iam_authorization_policy = var.skip_iam_authorization_policy boot_volume_encryption_key = var.boot_volume_encryption_key + existing_kms_instance_guid = var.existing_kms_instance_guid } diff --git a/modules/deployer/templates/deployer_user_data.tpl b/modules/deployer/templates/deployer_user_data.tpl index 24bc6895..ebaacb88 100644 --- a/modules/deployer/templates/deployer_user_data.tpl +++ b/modules/deployer/templates/deployer_user_data.tpl @@ -6,36 +6,13 @@ ################################################### #!/usr/bin/env bash -set -e - -# Detect OS and set user -if grep -E -q "CentOS|Red Hat" /etc/os-release; then +if grep -E -q "CentOS|Red Hat" /etc/os-release +then USER=vpcuser - yum install -y nc curl unzip jq -elif grep -q "Ubuntu" /etc/os-release; then +elif grep -q "Ubuntu" /etc/os-release +then USER=ubuntu - apt-get update -y - apt-get install -y netcat curl unzip jq fi - -# Install IBM Cloud CLI -echo "Installing IBM Cloud CLI..." -curl -fsSL https://clis.cloud.ibm.com/install/linux | sh - -# Add CLI to PATH for immediate use -export PATH=$PATH:/usr/local/bin - -# Install infrastructure service plugin (is) -echo "Installing IBM Cloud plugins..." -ibmcloud plugin install infrastructure-service -f - -# Verify installation -echo "Verifying installation..." -ibmcloud --version -ibmcloud plugin list | grep infrastructure-service || echo "plugin not found!" - -echo "IBM Cloud CLI and IS plugin installed successfully." - sed -i -e "s/^/no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo \'Please login as the user \\\\\"$USER\\\\\" rather than the user \\\\\"root\\\\\".\';echo;sleep 5; exit 142\" /" /root/.ssh/authorized_keys echo "DOMAIN=${compute_dns_domain}" >> "/etc/sysconfig/network-scripts/ifcfg-${compute_interfaces}" echo "MTU=9000" >> "/etc/sysconfig/network-scripts/ifcfg-${compute_interfaces}" diff --git a/modules/deployer/variables.tf b/modules/deployer/variables.tf index 21446863..e1c9fb2e 100644 --- a/modules/deployer/variables.tf +++ b/modules/deployer/variables.tf @@ -43,7 +43,7 @@ variable "cluster_cidr" { default = "10.241.0.0/18" } -variable "compute_subnets" { +variable "cluster_subnets" { type = list(object({ name = string id = string @@ -60,25 +60,7 @@ variable "ext_login_subnet_id" { description = "Name of an existing subnets in which the bastion and cluster resources will be deployed. If no value is given, then new subnet(s) will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)" } -variable "ext_compute_subnet_id" { - type = string - default = null - description = "Name of an existing subnets in which the bastion and cluster resources will be deployed. If no value is given, then new subnet(s) will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)" -} - -variable "ext_client_subnet_id" { - type = string - default = null - description = "Name of an existing subnets in which the bastion and cluster resources will be deployed. If no value is given, then new subnet(s) will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)" -} - -variable "ext_storage_subnet_id" { - type = string - default = null - description = "Name of an existing subnets in which the bastion and cluster resources will be deployed. If no value is given, then new subnet(s) will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)" -} - -variable "ext_protocol_subnet_id" { +variable "ext_cluster_subnet_id" { type = string default = null description = "Name of an existing subnets in which the bastion and cluster resources will be deployed. If no value is given, then new subnet(s) will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)" @@ -90,7 +72,7 @@ variable "ext_protocol_subnet_id" { variable "scheduler" { type = string default = null - description = "Select one of the scheduler (Scale/LSF/Symphony/Slurm/null)" + description = "Select one of the scheduler (LSF/Symphony/Slurm/null)" } ############################################################################## @@ -165,6 +147,12 @@ variable "boot_volume_encryption_key" { description = "CRN of boot volume encryption key" } +variable "existing_kms_instance_guid" { + type = string + default = null + description = "GUID of boot volume encryption key" +} + variable "skip_iam_authorization_policy" { type = bool default = true @@ -206,48 +194,3 @@ variable "zones" { description = "Region where VPC will be created. To find your VPC region, use `ibmcloud is regions` command to find available regions." type = list(string) } - -variable "storage_subnets" { - type = list(object({ - name = string - id = string - zone = string - cidr = string - })) - default = [] - description = "Subnets to launch the storage host." -} - -variable "client_subnets" { - type = list(object({ - name = string - id = string - zone = string - cidr = string - })) - default = [] - description = "Subnets to launch the client host." -} - -variable "protocol_subnets" { - type = list(object({ - name = string - id = string - zone = string - cidr = string - })) - default = [] - description = "Subnets to launch the protocol host." -} - -variable "login_security_group_name" { - type = string - default = null - description = "Provide the security group name to provision the bastion node. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the bastion node to function properly." -} - -variable "enable_sec_interface_compute" { - type = bool - default = false - description = "Specifies whether the secondary interface is enabled for the Scale compute cluster." -} diff --git a/modules/file_storage/main.tf b/modules/file_storage/main.tf index c14ee31e..b91a0c07 100644 --- a/modules/file_storage/main.tf +++ b/modules/file_storage/main.tf @@ -43,6 +43,6 @@ resource "ibm_is_share_mount_target" "share_target_sg" { name = format("%s-fs-vni", var.file_shares[count.index]["name"]) security_groups = var.security_group_ids } - # TODO: update transit_encryption value conditionally; it fails with + # TODO: update transit_encryption value conditionaly; it fails with # transit_encryption = "user_managed" } diff --git a/modules/host_resolution_add/locals.tf b/modules/host_resolution_add/locals.tf deleted file mode 100644 index dd0c4c33..00000000 --- a/modules/host_resolution_add/locals.tf +++ /dev/null @@ -1,40 +0,0 @@ -locals { - deployer_hostentry_playbook_path = format("%s/%s/deployer_host_entry_play.yml", var.clone_path, "ibm-spectrum-scale-install-infra") - scale_hostentry_playbook_path = format("%s/%s/scale_host_entry_play.yml", var.clone_path, "ibm-spectrum-scale-install-infra") - scale_cluster_hosts = format("%s/%s/scale_cluster_hosts.yml", var.clone_path, "ibm-spectrum-scale-install-infra") - scale_baremetal_prerequisite_vars = format("%s/%s/scale_baremetal_vars.yml", var.clone_path, "ibm-spectrum-scale-install-infra") - scale_all_inventory = format("%s/%s/scale_all_inventory.ini", var.clone_path, "ibm-spectrum-scale-install-infra") - remove_hostentry_playbooks_path = format("%s/%s/remove_host_entry_play.yml", var.clone_path, "ibm-spectrum-scale-install-infra") - scale_baremetal_ssh_check_playbook_path = format("%s/%s/scale_baremetal_ssh_check_playbook.yml", var.clone_path, "ibm-spectrum-scale-install-infra") - scale_baremetal_bootdrive_playbook_path = format("%s/%s/scale_baremetal_bootdrive.yml", var.clone_path, "ibm-spectrum-scale-install-infra") - scale_baremetal_prerequisite_playbook_path = format("%s/%s/scale_baremetal_prerequisite.yml", var.clone_path, "ibm-spectrum-scale-install-infra") - gpfs_restart_playbook_path = format("%s/%s/scale_gpfs_restart.yml", var.clone_path, "ibm-spectrum-scale-install-infra") - domain_name_file = format("%s/%s/domain_names.yml", var.clone_path, "ibm-spectrum-scale-install-infra") - storage_domain = try(var.domain_names.storage, null) - protocol_domain = try(var.domain_names.protocol, null) - client_private_key = format("%s/client_key/id_rsa", var.clone_path) #tfsec:ignore:GEN002 - compute_private_key = format("%s/compute_key/id_rsa", var.clone_path) #tfsec:ignore:GEN002 - storage_private_key = format("%s/storage_key/id_rsa", var.clone_path) #tfsec:ignore:GEN002 - gklm_private_key = format("%s/gklm_key/id_rsa", var.clone_path) #tfsec:ignore:GEN002 -} - -locals { - normalize_hosts = { - # groups with string values → wrap into {name=...} - compute_hosts = { for k, v in try(var.compute_hosts, {}) : k => { name = v, id = null } } - compute_mgmnt_hosts = { for k, v in try(var.compute_mgmnt_hosts, {}) : k => { name = v, id = null } } - client_hosts = { for k, v in try(var.client_hosts, {}) : k => { name = v, id = null } } - gklm_hosts = { for k, v in try(var.gklm_hosts, {}) : k => { name = v, id = null } } - afm_hosts = { for k, v in try(var.afm_hosts, {}) : k => { name = v, id = null } } - protocol_hosts = { for k, v in try(var.protocol_hosts, {}) : k => { name = v, id = null } } - storage_hosts = { for k, v in try(var.storage_hosts, {}) : k => { name = v, id = null } } - storage_tb_hosts = { for k, v in try(var.storage_tb_hosts, {}) : k => { name = v, id = null } } - storage_mgmnt_hosts = { for k, v in try(var.storage_mgmnt_hosts, {}) : k => { name = v, id = null } } - - # groups that already have {id,name} - storage_bms_hosts = try(var.storage_bms_hosts, {}) - storage_tb_bms_hosts = try(var.storage_tb_bms_hosts, {}) - afm_bms_hosts = try(var.afm_bms_hosts, {}) - protocol_bms_hosts = try(var.protocol_bms_hosts, {}) - } -} diff --git a/modules/host_resolution_add/main.tf b/modules/host_resolution_add/main.tf deleted file mode 100644 index b0df99e5..00000000 --- a/modules/host_resolution_add/main.tf +++ /dev/null @@ -1,805 +0,0 @@ -resource "local_file" "scale_cluster_hosts" { - filename = local.scale_cluster_hosts - content = yamlencode({ - storage_hosts = var.storage_hosts - storage_mgmnt_hosts = var.storage_mgmnt_hosts - storage_tb_hosts = var.storage_tb_hosts - compute_hosts = var.compute_hosts - compute_mgmnt_hosts = var.compute_mgmnt_hosts - client_hosts = var.client_hosts - protocol_hosts = var.protocol_hosts - gklm_hosts = var.gklm_hosts - afm_hosts = var.afm_hosts - storage_bms_hosts = var.storage_bms_hosts - storage_tb_bms_hosts = var.storage_tb_bms_hosts - protocol_bms_hosts = var.protocol_bms_hosts - afm_bms_hosts = var.afm_bms_hosts - }) -} - -resource "local_file" "domain_file" { - filename = local.domain_name_file - - content = yamlencode({ - domain_names = { - compute = try(var.domain_names.compute, null) - storage = try(var.domain_names.storage, null) - protocol = try(var.domain_names.protocol, null) - client = try(var.domain_names.client, null) - gklm = try(var.domain_names.gklm, null) - } - }) -} - -resource "local_file" "deployer_host_entry_playbook" { - count = var.scheduler == "Scale" ? 1 : 0 - content = < 0 ? [ - "[storage]", - join("\n", flatten([ - # Non-persistent storage hosts - [ - for host in flatten([ - values(local.normalize_hosts.storage_hosts), - values(local.normalize_hosts.storage_tb_hosts), - values(local.normalize_hosts.storage_mgmnt_hosts) - ]) : "${host.name} ansible_ssh_private_key_file=${local.storage_private_key} storage_type=scratch colocate_protocol_instances=${var.colocate_protocol_instances} scale_protocol_node=${var.enable_protocol}" - ], - # Persistent storage hosts - [ - for host in flatten([ - values(local.normalize_hosts.storage_bms_hosts), - values(local.normalize_hosts.storage_tb_bms_hosts) - ]) : "${host.name} id=${host.id} ansible_ssh_private_key_file=${local.storage_private_key} storage_type=persistent scale_protocol_node=${var.enable_protocol} colocate_protocol_instances=${var.colocate_protocol_instances} bms_boot_drive_encryption=${var.bms_boot_drive_encryption}" - ], - # AFM hosts - [ - for host in values(local.normalize_hosts.afm_hosts) : - "${host.name} ansible_ssh_private_key_file=${local.storage_private_key} storage_type=scratch scale_protocol_node=false" - ], - # AFM BMS hosts - [ - for host in values(local.normalize_hosts.afm_bms_hosts) : - "${host.name} id=${host.id} ansible_ssh_private_key_file=${local.storage_private_key} storage_type=persistent scale_protocol_node=false bms_boot_drive_encryption=${var.bms_boot_drive_encryption}" - ], - # Protocol hosts - [ - for host in values(local.normalize_hosts.protocol_hosts) : - "${host.name} ansible_ssh_private_key_file=${local.storage_private_key} storage_type=scratch scale_protocol_node=true colocate_protocol_instances=false" - ], - # Protocol BMS hosts - [ - for host in values(local.normalize_hosts.protocol_bms_hosts) : - "${host.name} id=${host.id} ansible_ssh_private_key_file=${local.storage_private_key} storage_type=persistent scale_protocol_node=true colocate_protocol_instances=false bms_boot_drive_encryption=${var.bms_boot_drive_encryption}" - ] - ])), - "" - ] : [], - - # COMPUTE - length(flatten([ - values(local.normalize_hosts.compute_hosts), - values(local.normalize_hosts.compute_mgmnt_hosts) - ])) > 0 ? [ - "[compute]", - join("\n", [ - for host in flatten([ - values(local.normalize_hosts.compute_hosts), - values(local.normalize_hosts.compute_mgmnt_hosts) - ]) : "${host.name} ansible_ssh_private_key_file=${local.compute_private_key}" - ]), - "" - ] : [], - - # CLIENT - length(values(local.normalize_hosts.client_hosts)) > 0 ? [ - "[client]", - join("\n", [ - for host in values(local.normalize_hosts.client_hosts) : - "${host.name} ansible_ssh_private_key_file=${local.client_private_key}" - ]), - "" - ] : [], - - # GKLM - length(values(local.normalize_hosts.gklm_hosts)) > 0 ? [ - "[gklm]", - join("\n", [ - for host in values(local.normalize_hosts.gklm_hosts) : - "${host.name} ansible_ssh_private_key_file=${local.gklm_private_key}" - ]), - "" - ] : [] - - ]))) -} - -resource "local_file" "scale_host_entry_playbook" { - count = var.scheduler == "Scale" ? 1 : 0 - content = <- - -o ConnectTimeout=20 - -o StrictHostKeyChecking=accept-new - -o UserKnownHostsFile=/dev/null - -o ServerAliveInterval=15 - -o ServerAliveCountMax=3 - - tasks: - # Verify required variables are set - - name: Validate required variables - block: - - name: Check for IBM Cloud API key - ansible.builtin.fail: - msg: "ibmcloud_api_key is not defined" - when: ibmcloud_api_key is not defined - - - name: Check for resource group - ansible.builtin.fail: - msg: "resource_group is not defined" - when: resource_group is not defined - - - name: Check for VPC region - ansible.builtin.fail: - msg: "vpc_region is not defined" - when: vpc_region is not defined - - # Connection verification - - name: Attempt SSH connection - ansible.builtin.wait_for: - port: 22 - host: "{{ inventory_hostname }}" - timeout: 20 - delay: 5 - connect_timeout: 20 - register: ssh_check - until: ssh_check is success - retries: "{{ max_ssh_attempts }}" - delay: "{{ ssh_retry_delay }}" - ignore_errors: true - delegate_to: localhost - changed_when: false - - - name: Check SSH port status - ansible.builtin.shell: | - nc -zv -w 5 "{{ inventory_hostname }}" 22 && echo "OPEN" || echo "CLOSED" - register: port_check - ignore_errors: true - changed_when: false - delegate_to: localhost - when: ssh_check is failed - - - name: Debug connection status - ansible.builtin.debug: - msg: | - Server: {{ inventory_hostname }} - SSH Status: {{ ssh_check | default('undefined') }} - Port Status: {{ port_check.stdout | default('undefined') }} - Server ID: {{ id | default('undefined') }} - when: ssh_check is failed - - # Server recovery for unresponsive systems - - name: Recover unresponsive server (via IBM Cloud CLI) - block: - - name: Login to IBM Cloud (local) - ansible.builtin.shell: | - /usr/local/bin/ibmcloud logout || true - /usr/local/bin/ibmcloud login --apikey "{{ ibmcloud_api_key }}" -q - /usr/local/bin/ibmcloud target -g "{{ resource_group }}" -r "{{ vpc_region }}" - args: - executable: /bin/bash - delegate_to: localhost - changed_when: false - - - name: Get current server status (local) - ansible.builtin.shell: | - /usr/local/bin/ibmcloud is bm {{ id }} --output JSON | jq -r '.status' - args: - executable: /bin/bash - register: current_status - delegate_to: localhost - changed_when: false - - - name: Stop server if not already stopped (local) - ansible.builtin.shell: | - status=$(/usr/local/bin/ibmcloud is bm {{ id }} --output JSON | jq -r '.status') - if [ "$status" != "stopped" ]; then - /usr/local/bin/ibmcloud is bm-stop {{ id }} --type hard --force --quiet - fi - args: - executable: /bin/bash - async: 300 - poll: 0 - delegate_to: localhost - - - name: Wait for server to stop - ansible.builtin.shell: | - # Set timeout to 15 minutes (900 seconds) - end_time=$(( $(date +%s) + 900 )) - while [ $(date +%s) -lt $end_time ]; do - # Get status with full path and proper error handling - status=$(/usr/local/bin/ibmcloud is bm {{ id }} --output JSON 2>/dev/null | jq -r '.status' || echo "ERROR") - - # Exit immediately if stopped - if [ "$status" == "stopped" ]; then - exit 0 - fi - - # Log current status - echo "Current status: $status" - sleep 30 - done - - # If we get here, timeout was reached - echo "Timeout waiting for server to stop" - exit 1 - args: - executable: /bin/bash - register: stop_wait - delegate_to: localhost - changed_when: false - until: stop_wait.rc == 0 - retries: 10 - delay: 30 - - - name: Show stop wait debug info - ansible.builtin.debug: - var: stop_wait.stdout_lines - when: stop_wait is defined - - - name: Start server (local) - ansible.builtin.shell: | - /usr/local/bin/ibmcloud is bm-start {{ id }} --quiet - args: - executable: /bin/bash - async: 300 - poll: 0 - delegate_to: localhost - - - name: Wait for server to come online - ansible.builtin.wait_for: - port: 22 - host: "{{ inventory_hostname }}" - timeout: 900 - delay: 30 - connect_timeout: 30 - delegate_to: localhost - - when: - - ssh_check is failed - - port_check.stdout is defined - - "'CLOSED' in port_check.stdout" - - - name: Fail if still unresponsive - ansible.builtin.fail: - msg: | - Server {{ inventory_hostname }} remains unresponsive after recovery attempts - Last SSH Status: {{ ssh_check | default('undefined') }} - Last Port Status: {{ port_check.stdout | default('undefined') }} - Server Status: {{ current_status.stdout | default('undefined') }} - when: - - ssh_check is failed - - port_check.stdout is defined - - "'OPEN' in port_check.stdout" -EOT - filename = local.scale_baremetal_ssh_check_playbook_path -} - -resource "local_file" "bms_bootdrive_playbook" { - count = var.scheduler == "Scale" ? 1 : 0 - content = <- - -o ConnectTimeout=20 - -o StrictHostKeyChecking=accept-new - -o UserKnownHostsFile=/dev/null - -o ServerAliveInterval=15 - -o ServerAliveCountMax=3 - - tasks: - # Main boot drive encryption tasks - - name: Handle boot drive encryption for persistent storage - when: - - bms_boot_drive_encryption | default(false) - - storage_type | default("") == "persistent" - - "'mgmt' not in inventory_hostname" - block: - # Post-recovery verification - - name: Verify encryption setup - block: - - name: Check for encrypted drives - ansible.builtin.command: lsblk -o NAME,FSTYPE,MOUNTPOINT - register: lsblk_output - changed_when: false - - - name: Debug storage configuration - ansible.builtin.debug: - var: lsblk_output.stdout_lines - - - name: Restart NetworkManager - ansible.builtin.service: - name: NetworkManager - state: restarted - async: 60 - poll: 0 - - - name: Verify NetworkManager status - ansible.builtin.service: - name: NetworkManager - state: started - changed_when: false -EOT - filename = local.scale_baremetal_bootdrive_playbook_path -} - -resource "local_file" "scale_baremetal_prerequisite_playbook" { - count = var.scheduler == "Scale" && var.storage_type == "persistent" ? 1 : 0 - content = </dev/null || true - echo "Installing consumer RPM..." - rpm -Uvh "http://$${capsule}/pub/katello-ca-consumer-latest.noarch.rpm" || true - subscription-manager config --server.hostname="$${capsule}" || true - subscription-manager config --rhsm.baseurl="https://$${capsule}/pulp/repos" || true - if [ -f /etc/rhsm/facts/katello.facts ]; then - mv /etc/rhsm/facts/katello.facts "/etc/rhsm/facts/katello.facts.bak.$(date +%s)" - fi - echo "{\"network.hostname-override\":\"$${profileName}\"}" > /etc/rhsm/facts/katello.facts - echo "Registering system..." - subscription-manager register --org="$${organization}" --activationkey="$${activationKey}" --force - dest: /tmp/register_rhel.sh - mode: '0755' - when: - - ansible_os_family == "RedHat" - - subscription_status.rc != 0 or "not registered" in subscription_status.stderr - - - name: Execute subscription registration script - command: /bin/bash /tmp/register_rhel.sh - args: - warn: false - register: registration_result - failed_when: registration_result.rc != 0 and "This system is already registered" not in registration_result.stderr and "is already registered" not in registration_result.stderr - when: - - ansible_os_family == "RedHat" - - subscription_status.rc != 0 or "not registered" in subscription_status.stderr - - - name: Clean up registration script - file: - path: /tmp/register_rhel.sh - state: absent - when: ansible_os_family == "RedHat" - - # --- OS detection and package installation --- - - name: Gather OS facts - ansible.builtin.setup: - filter: "ansible_distribution*" - - - name: Set RHEL vars - set_fact: - package_mgr: "dnf" - package_list: >- - {% if 'RedHat' in ansible_distribution %} - {% if '9' in ansible_distribution_version %} - python3 kernel-devel-{{ ansible_kernel }} kernel-headers-{{ ansible_kernel }} firewalld numactl make gcc-c++ elfutils-libelf-devel bind-utils iptables-nft nfs-utils elfutils elfutils-devel python3-dnf-plugin-versionlock - {% else %} - python38 kernel-devel-{{ ansible_kernel }} kernel-headers-{{ ansible_kernel }} firewalld numactl jq make gcc-c++ elfutils-libelf-devel bind-utils iptables nfs-utils elfutils elfutils-devel python3-dnf-plugin-versionlock - {% endif %} - {% else %} - "" - {% endif %} - when: ansible_os_family == "RedHat" - - - name: Enable RHEL 9 supplementary repo - command: "subscription-manager repos --enable=rhel-9-for-x86_64-supplementary-eus-rpms" - ignore_errors: yes - when: ansible_distribution_major_version == "9" and ansible_os_family == "RedHat" - - - name: Install required packages - yum: - name: "{{ package_list.split() }}" - state: present - register: package_install - until: package_install is succeeded - retries: 3 - delay: 10 - when: package_list != "" - - - name: Security update - yum: - name: "*" - security: yes - state: latest - ignore_errors: yes - when: ansible_os_family == "RedHat" - - - name: Version lock packages - command: "yum versionlock add {{ package_list }}" - ignore_errors: yes - when: ansible_os_family == "RedHat" - - - name: Add GPFS bin path to root bashrc - lineinfile: - path: "/root/.bashrc" - line: "export PATH=$PATH:/usr/lpp/mmfs/bin" - - # --- Firewall --- - - name: Stop firewalld - service: - name: "firewalld" - state: stopped - - - name: Configure firewall ports and services (permanent) - firewalld: - port: "{{ item.port }}/{{ item.proto }}" - permanent: true - state: enabled - loop: - - { port: 1191, proto: tcp } - - { port: 4444, proto: tcp } - - { port: 4444, proto: udp } - - { port: 4739, proto: udp } - - { port: 4739, proto: tcp } - - { port: 9084, proto: tcp } - - { port: 9085, proto: tcp } - - { port: 2049, proto: tcp } - - { port: 2049, proto: udp } - - { port: 111, proto: tcp } - - { port: 111, proto: udp } - - { port: 30000-61000, proto: tcp } - - { port: 30000-61000, proto: udp } - - - name: Enable HTTP/HTTPS services (permanent) - firewalld: - service: "{{ item }}" - permanent: true - state: enabled - loop: - - "http" - - "https" - - - name: Start and enable firewalld - service: - name: "firewalld" - state: started - enabled: true - - when: - - storage_type | default("") == "persistent" - - "'mgmt' not in inventory_hostname" - - # Protocol-specific configuration - - block: - # --- Hostname --- - - name: Configure hostname with DNS domain - hostname: - name: "{{ ansible_hostname }}.{{ protocol_domain }}" - - - name: Remove existing eth1 connection - shell: | - sec_interface=$(nmcli -t con show --active | grep eth1 | cut -d ':' -f 1) - nmcli conn del "$sec_interface" - ignore_errors: yes - - - name: Add eth1 ethernet connection - command: nmcli con add type ethernet con-name eth1 ifname eth1 - - - name: Add DOMAIN to protocol interface config - lineinfile: - path: "/etc/sysconfig/network-scripts/ifcfg-{{ protocol_interface }}" - line: "DOMAIN={{ protocol_domain }}" - create: yes - - - name: Set MTU to 9000 for protocol interface - lineinfile: - path: "/etc/sysconfig/network-scripts/ifcfg-{{ protocol_interface }}" - line: "MTU=9000" - create: yes - - - name: Add IC_REGION to root bashrc - lineinfile: - path: "/root/.bashrc" - line: "export IC_REGION={{ vpc_region }}" - - - name: Add IC_SUBNET to root bashrc - lineinfile: - path: "/root/.bashrc" - line: "export IC_SUBNET={{ protocol_subnet }}" - - - name: Add IC_RG to root bashrc - lineinfile: - path: "/root/.bashrc" - line: "export IC_RG={{ resource_group }}" - when: - - storage_type | default("") == "persistent" - - scale_protocol_node | default(false) | bool -EOT - filename = local.scale_baremetal_prerequisite_playbook_path -} - -resource "local_file" "scale_gpfs_restart_playbook" { - count = var.scheduler == "Scale" && var.scale_encryption_type == "key_protect" ? 1 : 0 - content = < "${local.key_protect_path}/Key_Protect_Server.cert" - # Create a Key Protect Server Root and CA certs - [ -f "${local.key_protect_path}/Key_Protect_Server.cert" ] && awk '/-----BEGIN CERTIFICATE-----/,/-----END CERTIFICATE-----/' "${local.key_protect_path}/Key_Protect_Server.cert" > "${local.key_protect_path}/Key_Protect_Server_CA.cert" - [ -f "${local.key_protect_path}/Key_Protect_Server_CA.cert" ] && awk '/-----BEGIN CERTIFICATE-----/{x="${local.key_protect_path}/Key_Protect_Server.chain"i".cert"; i++} {print > x}' "${local.key_protect_path}/Key_Protect_Server_CA.cert" - [ -f "${local.key_protect_path}/Key_Protect_Server.chain.cert" ] && mv "${local.key_protect_path}/Key_Protect_Server.chain.cert" "${local.key_protect_path}/Key_Protect_Server.chain0.cert" - # Create a Self Signed Certificates - [ ! -f "${local.key_protect_path}/${var.resource_prefix}.key" ] && openssl genpkey -algorithm RSA -out "${local.key_protect_path}/${var.resource_prefix}.key" - [ ! -f "${local.key_protect_path}/${var.resource_prefix}.csr" ] && openssl req -new -key "${local.key_protect_path}/${var.resource_prefix}.key" -out "${local.key_protect_path}/${var.resource_prefix}.csr" -subj "/CN=${var.vpc_storage_cluster_dns_domain}" - [ ! -f "${local.key_protect_path}/${var.resource_prefix}.cert" ] && openssl x509 -req -days 3650 -in "${local.key_protect_path}/${var.resource_prefix}.csr" -signkey "${local.key_protect_path}/${var.resource_prefix}.key" -out "${local.key_protect_path}/${var.resource_prefix}.cert" - EOT - } -} - -resource "ibm_kms_key" "scale_key" { - instance_id = var.key_protect_instance_id - key_name = "key" - standard_key = false -} - -resource "ibm_kms_kmip_adapter" "sclae_kmip_adapter" { - instance_id = var.key_protect_instance_id - profile = "native_1.0" - profile_data = { - "crk_id" = ibm_kms_key.scale_key.key_id - } - description = "Key Protect adapter" - name = format("%s-kp-adapter", var.resource_prefix) -} - -resource "ibm_kms_kmip_client_cert" "mycert" { - instance_id = var.key_protect_instance_id - adapter_id = ibm_kms_kmip_adapter.sclae_kmip_adapter.adapter_id - certificate = data.local_file.kpclient_cert.content - name = format("%s-kp-cert", var.resource_prefix) - depends_on = [data.local_file.kpclient_cert] -} diff --git a/modules/key_protect/outputs.tf b/modules/key_protect/outputs.tf deleted file mode 100644 index e69de29b..00000000 diff --git a/modules/key_protect/variables.tf b/modules/key_protect/variables.tf deleted file mode 100644 index 884dbf37..00000000 --- a/modules/key_protect/variables.tf +++ /dev/null @@ -1,29 +0,0 @@ -variable "key_protect_instance_id" { - type = string - default = null - description = "An existing Key Protect instance used for filesystem encryption" -} - -variable "resource_prefix" { - type = string - default = "scale" - description = "A unique identifier for resources. Must begin with a letter and end with a letter or number. This prefix will be prepended to any resources provisioned by this template. Prefixes must be 16 or fewer characters." -} - -variable "vpc_region" { - type = string - default = null - description = "vpc region" -} - -variable "scale_config_path" { - type = string - default = "/opt/IBM/ibm-spectrumscale-cloud-deploy" - description = "Path to clone github.com/IBM/ibm-spectrum-scale-install-infra." -} - -variable "vpc_storage_cluster_dns_domain" { - type = string - default = "ldap.com" - description = "Base domain for the LDAP Server" -} diff --git a/modules/landing_zone/datasource.tf b/modules/landing_zone/datasource.tf index 29fd3e9f..028278ec 100644 --- a/modules/landing_zone/datasource.tf +++ b/modules/landing_zone/datasource.tf @@ -19,208 +19,3 @@ data "ibm_is_subnet" "subnet" { count = (var.vpc_name != null && length(var.compute_subnet_id) > 0) ? 1 : 0 identifier = var.compute_subnet_id } - -############################################################################################################# - -############################################################################################################# - -locals { - exstng_cos_instance_bkt_hmc_key = var.scheduler == "Scale" ? [for details in var.afm_cos_config : details.cos_instance if(details.cos_instance != "" && details.bucket_name != "" && details.cos_service_cred_key != "")] : [] - # exstng_cos_instance = var.scheduler == "Scale" ? [for details in var.afm_cos_config : details.cos_instance if(details.cos_instance != "")] : [] -} - -data "ibm_resource_instance" "afm_cos_instances" { - for_each = { - for idx, value in local.exstng_cos_instance_bkt_hmc_key : idx => { - total_cos_instance = element(local.exstng_cos_instance_bkt_hmc_key, idx) - } - } - name = each.value.total_cos_instance - service = "cloud-object-storage" -} - -locals { - instance_data = [for key, value in data.ibm_resource_instance.afm_cos_instances : value] - cos_instance_data = concat(flatten(module.landing_zone[*].cos_data), local.instance_data) - total_instance = [ - for item in local.cos_instance_data : { - name = item.resource_name - resource_instance_id = item.guid - } - ] -} - -# data "ibm_resource_instance" "exstng_cos_instances" { -# for_each = { -# for idx, value in local.exstng_cos_instance : idx => { -# total_cos_instance = element(local.exstng_cos_instance, idx) -# } -# } -# name = each.value.total_cos_instance -# service = "cloud-object-storage" -# } - -locals { - # existing_instance_data = [for key, value in data.ibm_resource_instance.exstng_cos_instances : value] - # total_existing_instances = setsubtract(([for item in local.cos_instance_data : item]), ([for item in local.existing_instance_data : item])) - - total_new_instance = [ - for item in local.cos_instance_data : { - name = item.resource_name - resource_instance_id = item.guid - } - ] - newly_created_instance_afm = [for instance in local.total_new_instance : instance.resource_instance_id if(join("-", slice(split("-", instance.name), 0, length(split("-", instance.name)) - 1))) != "${local.prefix}-hpc-cos"] - - config_details = flatten([ - for instance in local.total_instance : [ - for config in var.afm_cos_config : { - afm_fileset = config.afm_fileset - mode = config.mode - resource_instance_id = instance.resource_instance_id - } if config.cos_instance == instance.name - ] - ]) -} - -# Existing Bucket Data - -locals { - total_exstng_bucket_instance = var.scheduler == "Scale" ? [for bucket in var.afm_cos_config : bucket.cos_instance if(bucket.bucket_name != "")] : [] - - total_exstng_bucket_name = var.scheduler == "Scale" ? [for bucket in var.afm_cos_config : bucket.bucket_name if(bucket.bucket_name != "")] : [] - - total_exstng_bucket_region = var.scheduler == "Scale" ? [for bucket in var.afm_cos_config : bucket.bucket_region if(bucket.bucket_name != "")] : [] - - total_exstng_bucket_type = var.scheduler == "Scale" ? [for bucket in var.afm_cos_config : bucket.bucket_type if(bucket.bucket_name != "")] : [] -} - -data "ibm_resource_instance" "afm_exstng_bucket_cos_instance" { - for_each = { - for idx, value in local.total_exstng_bucket_instance : idx => { - total_cos_instance = element(local.total_exstng_bucket_instance, idx) - } - } - name = each.value.total_cos_instance - service = "cloud-object-storage" -} - -data "ibm_cos_bucket" "afm_exstng_cos_buckets" { - for_each = { - for idx, value in local.total_exstng_bucket_instance : idx => { - bucket_name = element(local.total_exstng_bucket_name, idx) - resource_instance_id = element(flatten([for instance in data.ibm_resource_instance.afm_exstng_bucket_cos_instance : instance[*].id]), idx) - bucket_region = element(local.total_exstng_bucket_region, idx) - bucket_type = element(local.total_exstng_bucket_type, idx) - } - } - bucket_name = each.value.bucket_name - resource_instance_id = each.value.resource_instance_id - bucket_region = each.value.bucket_region - bucket_type = each.value.bucket_type - depends_on = [data.ibm_resource_instance.afm_exstng_bucket_cos_instance] -} - -# Existing Hmac Key Data - -locals { - total_exstng_hmac_key_instance = var.scheduler == "Scale" ? [for key in var.afm_cos_config : key.cos_instance if(key.cos_service_cred_key != "")] : [] - total_exstng_hmac_key_name = var.scheduler == "Scale" ? [for key in var.afm_cos_config : key.cos_service_cred_key if(key.cos_service_cred_key != "")] : [] -} - -data "ibm_resource_instance" "afm_exstng_hmac_key_cos_instance" { - for_each = { - for idx, value in local.total_exstng_hmac_key_instance : idx => { - total_cos_instance = element(local.total_exstng_hmac_key_instance, idx) - } - } - name = each.value.total_cos_instance - service = "cloud-object-storage" -} - -data "ibm_resource_key" "afm_exstng_cos_hmac_keys" { - for_each = { - for idx, value in local.total_exstng_hmac_key_instance : idx => { - hmac_key = element(local.total_exstng_hmac_key_name, idx) - resource_instance_id = element(flatten([for instance in data.ibm_resource_instance.afm_exstng_hmac_key_cos_instance : instance[*].id]), idx) - } - } - name = each.value.hmac_key - resource_instance_id = each.value.resource_instance_id - depends_on = [data.ibm_resource_instance.afm_exstng_hmac_key_cos_instance] -} - -locals { - # Final Bucket Data - existing_buckets = [for num, bucket in data.ibm_cos_bucket.afm_exstng_cos_buckets : bucket] - total_buckets_data = concat(local.existing_buckets, flatten(module.landing_zone[*].cos_bucket_data)) - total_buckets = [ - for item in local.total_buckets_data : { - endpoint = item.s3_endpoint_direct - bucket = item.bucket_name - resource_instance_id = split(":", item.resource_instance_id)[7] - } - ] - - newly_created_instance_bucket = [ - for item in local.total_buckets : { - endpoint = item.endpoint - bucket = item.bucket - resource_instance_id = item.resource_instance_id - } if item.resource_instance_id == (var.enable_landing_zone && local.enable_afm ? local.newly_created_instance_afm[0] : "") - ] - - afm_config_details_0 = flatten([ - for bucket in local.total_buckets : [ - for config in local.config_details : { - bucket = bucket.bucket - fileset = config.afm_fileset - filesystem = local.filesystem - mode = config.mode - endpoint = "https://${bucket.endpoint}" - } if bucket.resource_instance_id == config.resource_instance_id - ] - ]) - - afm_config_details_1 = [ - for i in range(length(local.newly_created_instance_bucket)) : { - bucket = local.newly_created_instance_bucket[i].bucket - endpoint = "https://${local.newly_created_instance_bucket[i].endpoint}" - fileset = local.new_instance_bucket_hmac[i].afm_fileset - filesystem = local.filesystem - mode = local.new_instance_bucket_hmac[i].mode - } - ] - - scale_afm_bucket_config_details = concat(local.afm_config_details_0, local.afm_config_details_1) - - # Final Hmac Key Data - existing_hmac_keys = [ - for item in [for num, keys in([for key in [for num, keys in data.ibm_resource_key.afm_exstng_cos_hmac_keys : keys] : key]) : keys] : { - credentials = item.credentials - credentials_json = item.credentials_json - resource_instance_id = split(":", item.id)[7] - name = item.name - } - ] - - new_hmac_keys = [ - for item in [for num, keys in((var.enable_landing_zone ? [for key in flatten(module.landing_zone[*].cos_key_credentials_map)[0] : key] : [])) : keys] : { - credentials = item.credentials - credentials_json = item.credentials_json - resource_instance_id = split(":", item.id)[7] - name = item.name - } - ] - total_hmac_keys = concat(local.existing_hmac_keys, local.new_hmac_keys) - - scale_afm_cos_hmac_key_params = flatten([ - for key in local.total_hmac_keys : [ - for bucket in local.total_buckets : { - akey = key.credentials["cos_hmac_keys.access_key_id"] - bucket = bucket.bucket - skey = key.credentials["cos_hmac_keys.secret_access_key"] - } if key.resource_instance_id == bucket.resource_instance_id - ] - ]) -} diff --git a/modules/landing_zone/locals.tf b/modules/landing_zone/locals.tf index 74b6b29c..0c045ef9 100644 --- a/modules/landing_zone/locals.tf +++ b/modules/landing_zone/locals.tf @@ -1,6 +1,6 @@ locals { # Defined values - name = lower(var.scheduler) + name = "lsf" prefix = var.prefix tags = [local.prefix, local.name] @@ -62,7 +62,6 @@ locals { public_gateway = true no_addr_prefix = true } : null, - # Compute subnet is always created without any conditions { name = "compute-subnet-${zone}" acl_name = "hpc-acl" @@ -85,7 +84,7 @@ locals { no_addr_prefix = true } : null, zone == local.active_zones[0] ? { - name = "bastion-subnet-${zone}" + name = "bastion-subnet" acl_name = "hpc-acl" cidr = var.vpc_cluster_login_private_subnets_cidr_blocks public_gateway = true @@ -134,28 +133,16 @@ locals { vpcs = [ { existing_vpc_id = var.vpc_name == null ? null : data.ibm_is_vpc.existing_vpc[0].id - existing_subnets = var.vpc_name != null ? flatten([ - var.compute_subnet_id != "" && var.compute_subnet_id != null ? [{ + existing_subnets = (var.vpc_name != null && length(var.compute_subnet_id) > 0) ? [ + { id = var.compute_subnet_id public_gateway = false - }] : [], - var.bastion_subnet_id != "" && var.bastion_subnet_id != null ? [{ + }, + { id = var.bastion_subnet_id public_gateway = false - }] : [], - var.storage_subnet_id != "" && var.storage_subnet_id != null ? [{ - id = var.storage_subnet_id - public_gateway = false - }] : [], - var.protocol_subnet_id != "" && var.protocol_subnet_id != null ? [{ - id = var.protocol_subnet_id - public_gateway = false - }] : [], - var.client_subnet_id != "" && var.client_subnet_id != null ? [{ - id = var.client_subnet_id - public_gateway = false - }] : [] - ]) : null + } + ] : null prefix = local.name resource_group = var.existing_resource_group == "null" ? "${local.prefix}-workload-rg" : var.existing_resource_group clean_default_security_group = true @@ -200,86 +187,10 @@ locals { transit_gateway_resource_group = local.service_resource_group transit_gateway_connections = [var.vpc_name] - ############################################################################################################## - # AFM Related Calculation - ############################################################################################################## - - enable_afm = sum(var.afm_instances[*]["count"]) > 0 ? true : false - new_instance_bucket_hmac = var.scheduler == "Scale" ? [for details in var.afm_cos_config : details if(details.cos_instance == "" && details.bucket_name == "" && details.cos_service_cred_key == "")] : [] - exstng_instance_new_bucket_hmac = var.scheduler == "Scale" ? [for details in var.afm_cos_config : details if(details.cos_instance != "" && details.bucket_name == "" && details.cos_service_cred_key == "")] : [] - exstng_instance_bucket_new_hmac = var.scheduler == "Scale" ? [for details in var.afm_cos_config : details if(details.cos_instance != "" && details.bucket_name != "" && details.cos_service_cred_key == "")] : [] - exstng_instance_hmac_new_bucket = var.scheduler == "Scale" ? [for details in var.afm_cos_config : details if(details.cos_instance != "" && details.bucket_name == "" && details.cos_service_cred_key != "")] : [] - - path_elements = split("/", var.storage_instances[0]["filesystem"] != "" ? var.storage_instances[0]["filesystem"] : var.filesystem_config[0]["filesystem"]) - filesystem = element(local.path_elements, length(local.path_elements) - 1) - total = concat(local.exstng_instance_new_bucket_hmac, local.exstng_instance_bucket_new_hmac, local.exstng_instance_hmac_new_bucket) - - total_new_data = local.enable_afm && length(local.new_instance_bucket_hmac) > 0 ? [{ - name = "hpc-instance" - resource_group = local.service_resource_group - plan = "standard" - random_suffix = true - use_data = false - skip_flowlogs_s2s_auth_policy = var.skip_flowlogs_s2s_auth_policy - skip_kms_s2s_auth_policy = var.skip_kms_s2s_auth_policy - buckets = [ - for idx, all in local.new_instance_bucket_hmac : { - name = all.bucket_name == "" ? format("hpcc-bucket%d", idx) : all.bucket_name - storage_class = all.bucket_storage_class - endpoint_type = "public" - force_delete = true - kms_key = null - expire_rule = null - single_site_location = all.bucket_type == "single_site_location" ? all.bucket_region : null - region_location = all.bucket_type == "region_location" ? all.bucket_region : null - cross_region_location = all.bucket_type == "cross_region_location" ? all.bucket_region : null - } - ] - keys = [{ - name = "hpcc-key" - role = "Manager" - enable_HMAC = true - }] - } - ] : [] - - total_existing_data = [for idx, all in tolist(local.total) : { - name = all.cos_instance == "" ? format("hpcc-instance%d", idx) : all.cos_instance - resource_group = local.service_resource_group - plan = "standard" - random_suffix = true - use_data = all.cos_instance == "" ? false : true - skip_flowlogs_s2s_auth_policy = var.skip_flowlogs_s2s_auth_policy - skip_kms_s2s_auth_policy = var.skip_kms_s2s_auth_policy - buckets = all.bucket_name == "" ? [ - { - name = format("hpc-bucket%d", idx) - storage_class = all.bucket_storage_class - endpoint_type = "public" - force_delete = true - kms_key = null - expire_rule = null - single_site_location = all.bucket_type == "single_site_location" ? all.bucket_region : null - region_location = all.bucket_type == "region_location" ? all.bucket_region : null - cross_region_location = all.bucket_type == "cross_region_location" ? all.bucket_region : null - }, - ] : [] - keys = all.cos_service_cred_key == "" ? [{ - name = format("hpc-key%d", idx) - role = "Manager" - enable_HMAC = true - }] : [] - } if local.enable_afm && length(local.total) > 0 - ] - - ############################################################################################################## - - ############################################################################################################## - final_instance_bucket_hmac_creation = concat(local.total_new_data, local.total_existing_data) - - active_cos = concat(local.final_instance_bucket_hmac_creation, [ - - (var.enable_cos_integration || var.enable_vpc_flow_logs || var.enable_atracker || var.observability_logs_enable) ? { + active_cos = [ + ( + var.enable_cos_integration || var.enable_vpc_flow_logs || var.enable_atracker || var.observability_logs_enable + ) ? { name = var.cos_instance_name == null ? "hpc-cos" : var.cos_instance_name resource_group = local.service_resource_group plan = "standard" @@ -292,25 +203,19 @@ locals { # Extra bucket for solution specific object storage buckets = [ var.enable_cos_integration ? { - name = "hpc-bucket" - storage_class = "standard" - endpoint_type = "public" - force_delete = true - single_site_location = null - region_location = null - cross_region_location = null - kms_key = var.key_management == "key_protect" ? (var.kms_key_name == null ? format("%s-key", var.prefix) : var.kms_key_name) : null - expire_rule = null + name = "hpc-bucket" + storage_class = "standard" + endpoint_type = "public" + force_delete = true + kms_key = var.key_management == "key_protect" ? (var.kms_key_name == null ? format("%s-key", var.prefix) : var.kms_key_name) : null + expire_rule = null } : null, var.enable_vpc_flow_logs ? { - name = "vpc-flow-logs-bucket" - storage_class = "standard" - endpoint_type = "public" - force_delete = true - single_site_location = null - region_location = null - cross_region_location = null - kms_key = var.key_management == "key_protect" ? (var.kms_key_name == null ? format("%s-slz-key", var.prefix) : var.kms_key_name) : null + name = "vpc-flow-logs-bucket" + storage_class = "standard" + endpoint_type = "public" + force_delete = true + kms_key = var.key_management == "key_protect" ? (var.kms_key_name == null ? format("%s-slz-key", var.prefix) : var.kms_key_name) : null expire_rule = { days = 30 enable = true @@ -318,14 +223,11 @@ locals { } } : null, var.enable_atracker ? { - name = "atracker-bucket" - storage_class = "standard" - endpoint_type = "public" - force_delete = true - single_site_location = null - region_location = null - cross_region_location = null - kms_key = var.key_management == "key_protect" ? (var.kms_key_name == null ? format("%s-atracker-key", var.prefix) : var.kms_key_name) : null + name = "atracker-bucket" + storage_class = "standard" + endpoint_type = "public" + force_delete = true + kms_key = var.key_management == "key_protect" ? (var.kms_key_name == null ? format("%s-atracker-key", var.prefix) : var.kms_key_name) : null expire_rule = { days = 30 enable = true @@ -333,14 +235,11 @@ locals { } } : null, var.observability_logs_enable ? { - name = "logs-data-bucket" - storage_class = "standard" - endpoint_type = "public" - force_delete = true - single_site_location = null - region_location = null - cross_region_location = null - kms_key = var.key_management == "key_protect" ? (var.kms_key_name == null ? format("%s-logs-data-key", var.prefix) : var.kms_key_name) : null + name = "logs-data-bucket" + storage_class = "standard" + endpoint_type = "public" + force_delete = true + kms_key = var.key_management == "key_protect" ? (var.kms_key_name == null ? format("%s-logs-data-key", var.prefix) : var.kms_key_name) : null expire_rule = { days = 30 enable = true @@ -348,14 +247,11 @@ locals { } } : null, var.observability_logs_enable ? { - name = "metrics-data-bucket" - storage_class = "standard" - endpoint_type = "public" - force_delete = true - single_site_location = null - region_location = null - cross_region_location = null - kms_key = var.key_management == "key_protect" ? (var.kms_key_name == null ? format("%s-metrics-data-key", var.prefix) : var.kms_key_name) : null + name = "metrics-data-bucket" + storage_class = "standard" + endpoint_type = "public" + force_delete = true + kms_key = var.key_management == "key_protect" ? (var.kms_key_name == null ? format("%s-metrics-data-key", var.prefix) : var.kms_key_name) : null expire_rule = { days = 30 enable = true @@ -364,8 +260,7 @@ locals { } : null ] } : null - ] - ) + ] cos = [ for instance in local.active_cos : @@ -381,15 +276,12 @@ locals { buckets = [ for bucket in instance.buckets : { - name = bucket.name - storage_class = bucket.storage_class - endpoint_type = bucket.endpoint_type - force_delete = bucket.force_delete - kms_key = bucket.kms_key - expire_rule = bucket.expire_rule - single_site_location = bucket.single_site_location - region_location = bucket.region_location - cross_region_location = bucket.cross_region_location + name = bucket.name + storage_class = bucket.storage_class + endpoint_type = bucket.endpoint_type + force_delete = bucket.force_delete + kms_key = bucket.kms_key + expire_rule = bucket.expire_rule } if bucket != null ] @@ -423,11 +315,11 @@ locals { } ]) : null - key_management = var.key_management == "key_protect" || (var.scale_encryption_enabled && var.scale_encryption_type == "key_protect" && var.key_protect_instance_id == null) ? { + key_management = var.key_management == "key_protect" ? { name = var.kms_instance_name != null ? var.kms_instance_name : format("%s-kms", var.prefix) # var.key_management == "hs_crypto" ? var.hpcs_instance_name : format("%s-kms", var.prefix) resource_group = local.service_resource_group use_hs_crypto = false - keys = [for each in coalesce(local.active_keys, []) : each if each != null] + keys = [for each in local.active_keys : each if each != null] use_data = var.kms_instance_name != null ? true : false } : { name = null @@ -509,6 +401,5 @@ locals { f5_vsi = local.f5_vsi f5_template_data = local.f5_template_data skip_kms_block_storage_s2s_auth_policy = local.skip_kms_block_storage_s2s_auth_policy - } } diff --git a/modules/landing_zone/main.tf b/modules/landing_zone/main.tf index 54a15b7b..80688431 100644 --- a/modules/landing_zone/main.tf +++ b/modules/landing_zone/main.tf @@ -1,7 +1,7 @@ module "landing_zone" { count = var.enable_landing_zone ? 1 : 0 source = "terraform-ibm-modules/landing-zone/ibm" - version = "8.4.3" + version = "8.2.0" prefix = local.prefix region = local.region tags = local.tags diff --git a/modules/landing_zone/outputs.tf b/modules/landing_zone/outputs.tf index 0f6afeb8..37d17a3b 100644 --- a/modules/landing_zone/outputs.tf +++ b/modules/landing_zone/outputs.tf @@ -30,7 +30,7 @@ output "bastion_subnets" { id = subnet["id"] zone = subnet["zone"] cidr = subnet["cidr"] - } if strcontains(subnet["name"], "-${local.name}-bastion-subnet") + } if strcontains(subnet["name"], "-lsf-bastion-subnet") ] } @@ -41,7 +41,7 @@ output "client_subnets" { id = subnet["id"] zone = subnet["zone"] cidr = subnet["cidr"] - } if strcontains(subnet["name"], "-${local.name}-client-subnet") + } if strcontains(subnet["name"], "-lsf-client-subnet") ] } @@ -52,7 +52,7 @@ output "compute_subnets" { id = subnet["id"] zone = subnet["zone"] cidr = subnet["cidr"] - } if strcontains(subnet["name"], "-${local.name}-compute-subnet-zone-") + } if strcontains(subnet["name"], "-lsf-compute-subnet-zone-") ] } @@ -63,7 +63,7 @@ output "storage_subnets" { id = subnet["id"] zone = subnet["zone"] cidr = subnet["cidr"] - } if strcontains(subnet["name"], "-${local.name}-storage-subnet-zone-") + } if strcontains(subnet["name"], "-lsf-storage-subnet-zone-") ] } @@ -74,7 +74,7 @@ output "protocol_subnets" { id = subnet["id"] zone = subnet["zone"] cidr = subnet["cidr"] - } if strcontains(subnet["name"], "-${local.name}-protocol-subnet-zone-") + } if strcontains(subnet["name"], "-lsf-protocol-subnet-zone-") ] } @@ -91,7 +91,7 @@ output "boot_volume_encryption_key" { output "key_management_guid" { description = "GUID for KMS instance" - value = var.enable_landing_zone ? var.key_management != null || (var.scale_encryption_enabled && var.scale_encryption_type == "key_protect" && var.key_protect_instance_id == null) ? module.landing_zone[0].key_management_guid : null : null + value = var.enable_landing_zone ? var.key_management != null ? module.landing_zone[0].key_management_guid : null : null } output "cos_buckets_data" { @@ -105,33 +105,8 @@ output "cos_instance_crns" { } output "cos_buckets_names" { - description = "List of names for COS buckets created" + description = "Name of the COS Bucket created for SCC Instance" value = flatten(module.landing_zone[*].cos_bucket_names) } -output "cos_data" { - description = "COS buckets data" - value = flatten(module.landing_zone[*].cos_data) -} - -output "hmac_key_data" { - description = "COS hmac data" - value = var.enable_landing_zone ? [for key in flatten(module.landing_zone[*].cos_key_credentials_map)[0] : key] : [] -} - -output "cos_names" { - description = "List of Cloud Object Storage instance names" - value = flatten(module.landing_zone[*].cos_names) -} - -output "scale_afm_bucket_config_details" { - description = "Scale AFM COS Bucket and Configuration Details" - value = local.scale_afm_bucket_config_details -} - -output "scale_afm_cos_hmac_key_params" { - description = "Scale AFM COS HMAC Key Details" - value = local.scale_afm_cos_hmac_key_params -} - # TODO: Observability data diff --git a/modules/landing_zone/variables.tf b/modules/landing_zone/variables.tf index 92ac981d..2cfc7a32 100644 --- a/modules/landing_zone/variables.tf +++ b/modules/landing_zone/variables.tf @@ -8,15 +8,6 @@ variable "enable_landing_zone" { description = "Run landing zone module." } -############################################################################## -# Offering Variations -############################################################################## -variable "scheduler" { - type = string - default = null - description = "Select one of the scheduler (Scale/LSF/Symphony/Slurm/null)" -} - ############################################################################## # Resource Groups Variables ############################################################################## @@ -141,17 +132,13 @@ variable "management_instances" { variable "compute_instances" { type = list( object({ - profile = string - count = number - image = string - filesystem = optional(string) + profile = string + count = number }) ) default = [{ - profile = "cx2-2x4" - count = 0 - image = "ibm-redhat-8-10-minimal-amd64-4" - filesystem = "/ibm/fs1" + profile = "cx2-2x4" + count = 0 }] description = "Min Number of instances to be launched for compute cluster." } @@ -175,17 +162,13 @@ variable "storage_subnets_cidr" { variable "storage_instances" { type = list( object({ - profile = string - count = number - image = string - filesystem = optional(string) + profile = string + count = number }) ) default = [{ - profile = "bx2d-32x128" - count = 0 - image = "ibm-redhat-8-10-minimal-amd64-4" - filesystem = "/ibm/fs1" + profile = "bx2-2x8" + count = 3 }] description = "Number of instances to be launched for storage cluster." } @@ -193,19 +176,15 @@ variable "storage_instances" { variable "storage_servers" { type = list( object({ - profile = string - count = number - image = string - filesystem = optional(string) + profile = string + count = number }) ) default = [{ - profile = "cx2d-metal-96x192" - count = 0 - image = "ibm-redhat-8-10-minimal-amd64-4" - filesystem = "/ibm/fs1" + profile = "cx2d-metal-96x192" + count = 2 }] - description = "Number of BareMetal Servers to be launched for storage cluster." + description = "Number of Bareemetal servers to be launched for storage cluster." } variable "protocol_subnets_cidr" { @@ -228,51 +207,6 @@ variable "protocol_instances" { description = "Number of instances to be launched for protocol hosts." } -variable "afm_instances" { - type = list( - object({ - profile = string - count = number - }) - ) - default = [{ - profile = "bx2-32x128" - count = 1 - }] - description = "Number of instances to be launched for afm hosts." -} - -variable "filesystem_config" { - type = list( - object({ - filesystem = string - block_size = string - default_data_replica = number - default_metadata_replica = number - max_data_replica = number - max_metadata_replica = number - }) - ) - default = null - description = "File system configurations." -} - -variable "afm_cos_config" { - type = list( - object({ - afm_fileset = string, - mode = string, - cos_instance = string, - bucket_name = string, - bucket_region = string, - cos_service_cred_key = string, - bucket_type = string, - bucket_storage_class = string - }) - ) - nullable = false - description = "AFM configurations." -} ############################################################################## # Observability Variables ############################################################################## @@ -323,27 +257,6 @@ variable "kms_key_name" { description = "Provide the existing KMS encryption key name that you want to use for the IBM Cloud HPC cluster. (for example kms_key_name: my-encryption-key)." } - -##Scale Encryption Variables - -variable "scale_encryption_enabled" { - type = bool - default = false - description = "To enable the encryption for the filesystem. Select true or false" -} - -variable "scale_encryption_type" { - type = string - default = null - description = "To enable filesystem encryption, specify either 'key_protect' or 'gklm'. If neither is specified, the default value will be 'null' and encryption is disabled" -} - -variable "key_protect_instance_id" { - type = string - default = null - description = "An existing Key Protect instance used for filesystem encryption" -} - # variable "hpcs_instance_name" { # type = string # default = null @@ -377,27 +290,6 @@ variable "enable_vpn" { default = false description = "The solution supports multiple ways to connect to your HPC cluster for example, using bastion node, via VPN or direct connection. If connecting to the HPC cluster via VPN, set this value to true." } - -############################################################################## -# Subnet_id Variables -############################################################################## -variable "client_subnet_id" { - type = string - description = "Name of an existing subnet for protocol nodes. If no value is given, a new subnet will be created" - default = null -} - -variable "storage_subnet_id" { - type = string - description = "Name of an existing subnet for storage nodes. If no value is given, a new subnet will be created" - default = null -} - -variable "protocol_subnet_id" { - type = string - description = "Name of an existing subnet for protocol nodes. If no value is given, a new subnet will be created" - default = null -} ############################################################################## # Landing Zone Variables ############################################################################## diff --git a/modules/landing_zone_vsi/datasource.tf b/modules/landing_zone_vsi/datasource.tf index 6f4d52b9..55b62f4b 100644 --- a/modules/landing_zone_vsi/datasource.tf +++ b/modules/landing_zone_vsi/datasource.tf @@ -1,8 +1,22 @@ +# data "ibm_resource_group" "existing_resource_group" { +# name = var.existing_resource_group +# } + data "ibm_is_image" "management_stock_image" { count = local.image_mapping_entry_found ? 0 : length(var.management_instances) name = var.management_instances[count.index]["image"] } +# data "ibm_is_image" "management" { +# name = var.management_instances[0]["image"] +# count = local.image_mapping_entry_found ? 0 : 1 +# } + +# data "ibm_is_image" "compute" { +# name = var.static_compute_instances[0]["image"] +# count = local.compute_image_found_in_map ? 1 : 0 +# } + # TODO: Verify distinct profiles /* data "ibm_is_instance_profile" "management" { @@ -19,48 +33,25 @@ data "ibm_is_instance_profile" "protocol" { */ data "ibm_is_image" "client" { - count = var.scheduler == "Scale" ? length(var.client_instances) : 0 + count = length(var.client_instances) name = var.client_instances[count.index]["image"] } -# data "ibm_is_image" "compute_stock_image" { -# count = local.compute_image_found_in_map ? 0 : length(var.static_compute_instances) -# name = var.static_compute_instances[count.index]["image"] -# } - data "ibm_is_image" "compute_stock_image" { - count = var.scheduler == "LSF" && !local.compute_image_found_in_map ? length(var.static_compute_instances) : 0 + count = local.compute_image_found_in_map ? 0 : length(var.static_compute_instances) name = var.static_compute_instances[count.index]["image"] } -data "ibm_is_image" "scale_compute_stock_image" { - count = ( - var.scheduler == "Scale" && - !local.scale_compute_image_found_in_map - ) ? length(var.static_compute_instances) : 0 - name = var.static_compute_instances[count.index]["image"] -} - -data "ibm_is_instance_profile" "compute_profile" { - count = length(var.static_compute_instances) - name = var.static_compute_instances[count.index]["profile"] +data "ibm_is_image" "storage" { + count = length(var.storage_instances) + name = var.storage_instances[count.index]["image"] } -data "ibm_is_image" "storage_vsi" { - count = ( - var.scheduler == "Scale" && - !local.scale_storage_image_found_in_map - ) ? length(var.storage_instances) : 0 - name = var.storage_instances[count.index]["image"] -} +# data "ibm_is_image" "protocol" { +# count = length(var.protocol_instances) +# name = var.protocol_instances[count.index]["image"] +# } -data "ibm_is_image" "baremetal_storage" { - count = ( - var.scheduler == "Scale" && - !local.storage_bare_metal_image_mapping_entry_found - ) ? length(var.storage_servers) : 0 - name = var.storage_servers[count.index]["image"] -} data "ibm_is_ssh_key" "ssh_keys" { for_each = toset(var.ssh_keys) @@ -77,82 +68,36 @@ data "ibm_is_instance_profile" "storage_tie_instance" { name = var.storage_instances[count.index]["profile"] } +data "ibm_is_ssh_key" "gklm" { + for_each = toset(var.gklm_instance_key_pair) + name = each.key +} + +data "ibm_is_ssh_key" "ldap" { + for_each = toset(var.ldap_instance_key_pair) + name = each.key +} + data "ibm_is_image" "ldap_vsi_image" { count = var.enable_ldap != null && var.ldap_server == "null" ? 1 : 0 name = var.ldap_instances[count.index]["image"] } -# data "ibm_is_image" "afm_vsi" { -# count = var.scheduler == "Scale" ? ( -# (!local.scale_storage_image_found_in_map) -# ? length(var.afm_instances) -# : 0 -# ) : 0 -# name = var.afm_instances[count.index]["image"] -# } - -# data "ibm_is_image" "baremetal_afm" { -# count = var.scheduler == "Scale" ? ( -# (!local.storage_bare_metal_image_mapping_entry_found) -# ? length(var.afm_instances) -# : 0 -# ) : 0 -# name = var.afm_instances[count.index]["image"] -# } - -# data "ibm_is_image" "protocol_vsi" { -# count = var.scheduler == "Scale" ? ( -# (!local.scale_storage_image_found_in_map) -# ? length(var.protocol_instances) -# : 0 -# ) : 0 -# name = var.protocol_instances[count.index]["image"] -# } - -# data "ibm_is_image" "baremetal_protocol" { -# count = var.scheduler == "Scale" ? ( -# (!local.storage_bare_metal_image_mapping_entry_found) -# ? length(var.protocol_instances) -# : 0 -# ) : 0 -# name = var.protocol_instances[count.index]["image"] -# } +data "ibm_is_image" "afm" { + count = length(var.afm_instances) + name = var.afm_instances[count.index]["image"] +} data "ibm_is_image" "gklm" { - count = var.scheduler == "Scale" ? (var.scale_encryption_enabled && var.scale_encryption_type == "gklm" && length(var.gklm_instances) > 0 && !local.scale_encryption_image_mapping_entry_found ? 1 : 0) : 0 + count = length(var.gklm_instances) name = var.gklm_instances[count.index]["image"] } data "ibm_is_image" "login_vsi_image" { - count = var.scheduler == "LSF" ? (local.login_image_found_in_map ? 0 : 1) : 0 + count = local.login_image_found_in_map ? 0 : 1 name = var.login_instance[count.index]["image"] } data "ibm_is_dedicated_host_profiles" "profiles" { count = var.enable_dedicated_host ? 1 : 0 } - -data "ibm_is_security_group" "storage_security_group" { - count = var.storage_security_group_name != null ? 1 : 0 - name = var.storage_security_group_name -} - -data "ibm_is_security_group" "compute_security_group" { - count = var.compute_security_group_name != null ? 1 : 0 - name = var.compute_security_group_name -} - -data "ibm_is_security_group" "gklm_security_group" { - count = var.gklm_security_group_name != null ? 1 : 0 - name = var.gklm_security_group_name -} - -data "ibm_is_security_group" "ldap_security_group" { - count = var.ldap_security_group_name != null ? 1 : 0 - name = var.ldap_security_group_name -} - -data "ibm_is_security_group" "client_security_group" { - count = var.client_security_group_name != null ? 1 : 0 - name = var.client_security_group_name -} diff --git a/modules/landing_zone_vsi/image_map.tf b/modules/landing_zone_vsi/image_map.tf index 6783e351..f58ee9d7 100644 --- a/modules/landing_zone_vsi/image_map.tf +++ b/modules/landing_zone_vsi/image_map.tf @@ -47,48 +47,6 @@ locals { "au-syd" = "r026-11aee148-c938-4524-91e6-8e6da5933a42" "br-sao" = "r042-5cb62448-e771-4caf-a556-28fdf88acab9" "ca-tor" = "r038-fa815ec1-d52e-42b2-8221-5b8c2145a248" - }, - } - storage_image_region_map = { - "hpcc-scale5232-rhel810-v1" = { - "eu-es" = "r050-7f28959f-74a4-4ad7-be30-8107da85406f" - "eu-gb" = "r018-5286d07b-527f-49a2-b0a7-2c88278349e8" - "eu-de" = "r010-1e558d55-bc2e-4e96-9164-b4b1139ba06b" - "us-east" = "r014-8befe151-c36d-4056-9955-3480210adf98" - "us-south" = "r006-7ab41080-5af0-47e5-ad44-abc18589197a" - "jp-tok" = "r022-d60e9e5f-264d-4e37-9fc0-9ad6270a054e" - "jp-osa" = "r034-eac88b73-0978-4340-9188-e28e99aeae2a" - "au-syd" = "r026-221f1bb0-1ba3-40c3-a83f-59334a2fda4b" - "br-sao" = "r042-e3d377a0-69f6-4079-9cbe-021021fb4a84" - "ca-tor" = "r038-73809daf-d414-4319-bc46-1bdd26a8e85d" - } - } - evaluation_image_region_map = { - "hpcc-scale5232-dev-rhel810" = { - "eu-es" = "r050-eb14661e-8290-4c03-a198-3e65a1b17a6b" - "eu-gb" = "r018-46ec71d2-2137-48c1-b348-a2ff0a671d91" - "eu-de" = "r010-cf5e0560-cbbf-43a6-9ba7-39fb4d4e82ff" - "us-east" = "r014-27ceeecc-c5bc-461e-a687-11e5b843274d" - "us-south" = "r006-12668685-f580-4cc8-86c5-335f1a979278" - "jp-tok" = "r022-bfe30f3f-c68f-4f61-ba90-7fbaa1a29665" - "jp-osa" = "r034-320617e2-b565-4843-bd8d-9f4bd2dd4641" - "au-syd" = "r026-ad179ec6-37a0-4d0c-9816-d065768414cf" - "br-sao" = "r042-ed759187-cd74-4d13-b475-bd0ed443197b" - "ca-tor" = "r038-90ca620e-5bf9-494e-a6ba-7e5ee663a54b" - } - } - encryption_image_region_map = { - "hpcc-scale-gklm4202-v2-5-3" = { - "eu-es" = "r050-fda24f7a-f395-487f-8179-d3c505d7fa8b" - "eu-gb" = "r018-74d533de-03b6-43ea-9f3f-dcd0d76ebb94" - "eu-de" = "r010-a5ff7b80-8ccc-451d-b384-e14bc119200f" - "us-east" = "r014-23d9f6b8-5c3f-43c5-8953-6e4cbbc01b47" - "us-south" = "r006-e12a939e-cd76-4394-bc38-4166d4df5818" - "jp-tok" = "r022-e27ef40e-82b2-481c-86c6-53032d8bda38" - "jp-osa" = "r034-a42046c2-60c3-4a43-9234-c06edd27dd84" - "au-syd" = "r026-5f90526b-5da6-4fae-ad16-33bbb5448cfc" - "br-sao" = "r042-a9a29acf-6810-4749-9c4e-757c7abb7c59" - "ca-tor" = "r038-95be651c-35a4-4b41-a629-dc46efe38442" } } } diff --git a/modules/landing_zone_vsi/locals.tf b/modules/landing_zone_vsi/locals.tf index bd9888d7..99f1755d 100644 --- a/modules/landing_zone_vsi/locals.tf +++ b/modules/landing_zone_vsi/locals.tf @@ -1,12 +1,12 @@ # define variables locals { # Future use - name = lower(var.scheduler) + # products = "scale" + name = "lsf" prefix = var.prefix tags = [local.prefix, local.name] vsi_interfaces = ["eth0", "eth1"] - bms_interfaces = ["eth0", "eth1"] - # bms_interfaces = ["ens1", "ens2"] + bms_interfaces = ["ens1", "ens2"] # TODO: explore (DA always keep it true) skip_iam_authorization_policy = true # Region and Zone calculations @@ -29,19 +29,6 @@ locals { # If not found, assume the name is the id already (customer provided image) new_login_image_id = local.login_image_found_in_map ? local.image_region_map[var.login_instance[0]["image"]][local.region] : "Image not found with the given name" - scale_storage_image_found_in_map = contains(keys(local.storage_image_region_map), var.storage_instances[0]["image"]) - evaluation_image_id = local.evaluation_image_region_map[one(keys(local.evaluation_image_region_map))][local.region] - new_storage_image_id = var.storage_type != "evaluation" ? (local.scale_storage_image_found_in_map ? local.storage_image_region_map[var.storage_instances[0]["image"]][local.region] : "Image not found with the given name") : local.evaluation_image_id - - storage_bare_metal_image_mapping_entry_found = contains(keys(local.storage_image_region_map), var.storage_servers[0]["image"]) - storage_bare_metal_image_id = local.storage_bare_metal_image_mapping_entry_found ? local.storage_image_region_map[var.storage_servers[0]["image"]][local.region] : "Image not found with the given name" - - scale_encryption_image_mapping_entry_found = contains(keys(local.encryption_image_region_map), var.gklm_instances[0]["image"]) - scale_encryption_image_id = (var.scale_encryption_enabled == true && var.scale_encryption_type == "gklm") ? (local.scale_encryption_image_mapping_entry_found ? local.encryption_image_region_map[var.gklm_instances[0]["image"]][local.region] : "Image not found with the given name") : "Either encryption is not enabled or encryption type is not gklm" - - scale_compute_image_found_in_map = contains(keys(local.storage_image_region_map), var.static_compute_instances[0]["image"]) - scale_compute_image_id = local.scale_compute_image_found_in_map ? local.storage_image_region_map[var.static_compute_instances[0]["image"]][local.region] : "Image not found with the given name" - products = var.scheduler == "Scale" ? "scale" : "lsf" block_storage_volumes = [for volume in coalesce(var.nsd_details, []) : { name = format("nsd-%s", index(var.nsd_details, volume) + 1) @@ -76,14 +63,12 @@ locals { storage_instance_count = var.storage_type == "persistent" ? sum(var.storage_servers[*]["count"]) : sum(var.storage_instances[*]["count"]) protocol_instance_count = sum(var.protocol_instances[*]["count"]) static_compute_instance_count = sum(var.static_compute_instances[*]["count"]) - afm_instances_count = sum(var.afm_instances[*]["count"]) enable_client = local.client_instance_count > 0 enable_management = local.management_instance_count > 0 enable_compute = local.management_instance_count > 0 || local.static_compute_instance_count > 0 enable_storage = local.storage_instance_count > 0 enable_protocol = local.storage_instance_count > 0 && local.protocol_instance_count > 0 - enable_afm = local.afm_instances_count > 0 # TODO: Fix the logic enable_block_storage = var.storage_type == "scratch" ? true : false @@ -91,18 +76,17 @@ locals { # TODO: Fix the logic # enable_load_balancer = false - client_node_name = format("%s-%s", local.prefix, "client") - management_node_name = format("%s-%s", local.prefix, "mgmt") - compute_node_name = format("%s-%s", local.prefix, "comp") - storage_node_name = format("%s-%s", local.prefix, "strg") - storage_tie_breaker_node_name = format("%s-%s", local.prefix, "strg-tie") - protocol_node_name = format("%s-%s", local.prefix, "proto") - storage_management_node_name = format("%s-%s", local.prefix, "strg-mgmt") - ldap_node_name = format("%s-%s", local.prefix, "ldap") - afm_node_name = format("%s-%s", local.prefix, "afm") - gklm_node_name = format("%s-%s", local.prefix, "gklm") - compute_management_node_name = format("%s-%s", local.prefix, "comp-mgmt") - login_node_name = format("%s-%s", local.prefix, "login") + client_node_name = format("%s-%s", local.prefix, "client") + management_node_name = format("%s-%s", local.prefix, "mgmt") + compute_node_name = format("%s-%s", local.prefix, "comp") + storage_node_name = format("%s-%s", local.prefix, "strg") + protocol_node_name = format("%s-%s", local.prefix, "proto") + storage_management_node_name = format("%s-%s", local.prefix, "strg-mgmt") + ldap_node_name = format("%s-%s", local.prefix, "ldap") + afm_node_name = format("%s-%s", local.prefix, "afm") + gklm_node_name = format("%s-%s", local.prefix, "gklm") + cpmoute_management_node_name = format("%s-%s", local.prefix, "comp-mgmt") + login_node_name = format("%s-%s", local.prefix, "login") # Future use /* @@ -124,16 +108,16 @@ locals { protocol_image_name = var.storage_image_name */ - # client_image_id = data.ibm_is_image.client[*].id - # storage_image_id = data.ibm_is_image.storage[*].id - # protocol_image_id = data.ibm_is_image.storage[*].id - ldap_image_id = data.ibm_is_image.ldap_vsi_image[*].id - # afm_image_id = data.ibm_is_image.afm[*].id - # gklm_image_id = data.ibm_is_image.gklm[*].id + client_image_id = data.ibm_is_image.client[*].id + storage_image_id = data.ibm_is_image.storage[*].id + protocol_image_id = data.ibm_is_image.storage[*].id + ldap_image_id = data.ibm_is_image.ldap_vsi_image[*].id + afm_image_id = data.ibm_is_image.afm[*].id + gklm_image_id = data.ibm_is_image.gklm[*].id - ssh_keys = [for name in var.ssh_keys : data.ibm_is_ssh_key.ssh_keys[name].id] - #ldap_ssh_keys = [for name in var.ldap_instance_key_pair : data.ibm_is_ssh_key.ldap[name].id] - # gklm_ssh_keys = [for name in var.gklm_instance_key_pair : data.ibm_is_ssh_key.gklm[name].id] + ssh_keys = [for name in var.ssh_keys : data.ibm_is_ssh_key.ssh_keys[name].id] + ldap_ssh_keys = [for name in var.ldap_instance_key_pair : data.ibm_is_ssh_key.ldap[name].id] + gklm_ssh_keys = [for name in var.gklm_instance_key_pair : data.ibm_is_ssh_key.gklm[name].id] # Future use /* @@ -168,66 +152,22 @@ locals { # TODO: Multi-zone multi-vNIC VSIs deployment support (bug #https://github.ibm.com/GoldenEye/issues/issues/5830) # Findings: Singe zone multi-vNICs VSIs deployment & multi-zone single vNIC VSIs deployment are supported. client_subnets = var.client_subnets - compute_subnet_id = var.compute_subnet_id + cluster_subnet_id = var.cluster_subnet_id storage_subnets = var.storage_subnets protocol_subnets = var.protocol_subnets compute_public_key_content = one(module.compute_key[*].public_key_content) compute_private_key_content = one(module.compute_key[*].private_key_content) - storage_public_key_content = one(module.storage_key[*].public_key_content) - storage_private_key_content = one(module.storage_key[*].private_key_content) - - client_public_key_content = one(module.client_key[*].public_key_content) - client_private_key_content = one(module.client_key[*].private_key_content) - - protocol_vsi_profile = var.protocol_instances[*]["profile"] - ces_server_type = strcontains(local.protocol_vsi_profile[0], "metal") - afm_vsi_profile = var.afm_instances[*]["profile"] - afm_server_type = strcontains(local.afm_vsi_profile[0], "metal") - - sapphire_rapids_profile_check = strcontains(local.protocol_vsi_profile[0], "3-metal") || strcontains(local.protocol_vsi_profile[0], "3d-metal") - - tie_breaker_bm_server = [{ - profile = var.tie_breaker_bm_server_profile == null ? (var.storage_servers[*]["profile"])[0] : var.tie_breaker_bm_server_profile - count = 1 - image = (var.storage_servers[*]["image"])[0] - filesystem = (var.storage_servers[*]["filesystem"])[0] - }] - - user_data_vars = { - dns_domain = var.dns_domain_names["storage"], - enable_protocol = local.enable_protocol, - protocol_domain = var.dns_domain_names["protocol"], - vpc_region = var.vpc_region, - protocol_subnet_id = length(var.protocol_subnets) == 0 ? "" : var.protocol_subnets[0].id, - resource_group_id = var.resource_group, - bastion_public_key_content = base64encode(var.bastion_public_key_content != null ? var.bastion_public_key_content : ""), - storage_private_key_content = var.scheduler == "Scale" ? base64encode(module.storage_key[0].private_key_content) : "", - storage_public_key_content = var.scheduler == "Scale" ? base64encode(module.storage_key[0].public_key_content) : "" - } - - enable_sec_interface_compute = local.enable_protocol == false && data.ibm_is_instance_profile.compute_profile[0].bandwidth[0].value >= 64000 ? true : false - enable_sec_interface_storage = local.enable_protocol == false && var.storage_type != "persistent" && data.ibm_is_instance_profile.storage[0].bandwidth[0].value >= 64000 ? true : false - # Security Groups - protocol_secondary_security_group = distinct(flatten([ + protocol_secondary_security_group = flatten([ for subnet_index, subnet in local.protocol_subnets : [ for i in range(var.protocol_instances[subnet_index]["count"]) : { - security_group_id = one(var.storage_security_group_name == null ? module.storage_sg[*].security_group_id : local.storage_security_group_name_id) - interface_name = subnet["name"] - } - ] - ])) - - storage_secondary_security_group = distinct(flatten([ - for subnet_index, subnet in local.storage_subnets : [ - for i in range(var.static_compute_instances[subnet_index]["count"]) : { security_group_id = one(module.storage_sg[*].security_group_id) - interface_name = subnet["name"] + interface_name = "${subnet["name"]}-${i}" } ] - ])) + ]) # ldap_instance_image_id = var.enable_ldap == true && var.ldap_server == "null" ? data.ibm_is_image.ldap_vsi_image[0].id : "null" } @@ -328,39 +268,32 @@ locals { bastion_security_group = var.bastion_security_group_id # Security group id - client_security_group = local.client_instance_count > 0 ? (local.enable_client && var.client_security_group_name == null ? module.client_sg[0].security_group_id_for_ref : local.client_security_group_name_id[0]) : "" - compute_security_group = local.static_compute_instance_count > 0 ? (local.enable_compute && var.compute_security_group_name == null ? module.compute_sg[0].security_group_id_for_ref : local.compute_security_group_name_id[0]) : "" - storage_security_group = local.storage_instance_count > 0 ? (local.enable_storage && var.storage_security_group_name == null ? module.storage_sg[0].security_group_id_for_ref : local.storage_security_group_name_id[0]) : "" + client_security_group = local.enable_client ? module.client_sg[0].security_group_id_for_ref : null + compute_security_group = local.enable_compute ? module.compute_sg[0].security_group_id_for_ref : null + storage_security_group = local.enable_storage ? module.storage_sg[0].security_group_id_for_ref : null client_security_group_rules = local.enable_client ? (local.enable_compute ? [ { name = "client-allow-bastionsg-inbound", direction = "inbound", remote = local.bastion_security_group }, { name = "client-allow-clientsg-inbound", direction = "inbound", remote = local.client_security_group }, { name = "client-allow-computesg-inbound", direction = "inbound", remote = local.compute_security_group }, - { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr }, - { name = "storage-allow-storagesg-inbound", direction = "inbound", remote = local.storage_security_group }, - { name = "client-allow-all-outbound", direction = "outbound", remote = "0.0.0.0/0" } + { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr } ] : [ { name = "client-allow-bastionsg-inbound", direction = "inbound", remote = local.bastion_security_group }, { name = "client-allow-clientsg-inbound", direction = "inbound", remote = local.client_security_group }, - { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr }, - { name = "storage-allow-storagesg-inbound", direction = "inbound", remote = local.storage_security_group }, - { name = "client-allow-all-outbound", direction = "outbound", remote = "0.0.0.0/0" } + { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr } ] ) : (local.enable_compute ? [ { name = "client-allow-bastionsg-inbound", direction = "inbound", remote = local.bastion_security_group }, { name = "client-allow-computesg-inbound", direction = "inbound", remote = local.compute_security_group }, - { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr }, - { name = "client-allow-all-outbound", direction = "outbound", remote = "0.0.0.0/0" } + { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr } ] : [ { name = "client-allow-bastionsg-inbound", direction = "inbound", remote = local.bastion_security_group }, - { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr }, - { name = "storage-allow-storagesg-inbound", direction = "inbound", remote = local.storage_security_group }, - { name = "client-allow-all-outbound", direction = "outbound", remote = "0.0.0.0/0" } + { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr } ] ) @@ -378,7 +311,7 @@ locals { { name = "compute-allow-clientsg-inbound", direction = "inbound", remote = local.client_security_group }, { name = "compute-allow-computesg-inbound", direction = "inbound", remote = local.compute_security_group }, { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr }, - { name = "compute-allow-all-outbound", direction = "outbound", remote = "0.0.0.0/0" }, + { name = "compute-allow-all-outbound", direction = "outbound", remote = "0.0.0.0/0" } ] ) : (local.enable_storage ? [ @@ -429,32 +362,22 @@ locals { { name = "storage-allow-bastionsg-inbound", direction = "inbound", remote = local.bastion_security_group }, { name = "storage-allow-computesg-inbound", direction = "inbound", remote = local.compute_security_group }, { name = "storage-allow-storagesg-inbound", direction = "inbound", remote = local.storage_security_group }, - { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr }, - { name = "client-allow-clientsg-inbound", direction = "inbound", remote = local.client_security_group }, - { name = "client-allow-all-outbound", direction = "outbound", remote = "0.0.0.0/0" } + { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr } ] : [ { name = "storage-allow-bastionsg-inbound", direction = "inbound", remote = local.bastion_security_group }, { name = "storage-allow-computesg-inbound", direction = "inbound", remote = local.compute_security_group }, - { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr }, - { name = "client-allow-all-outbound", direction = "outbound", remote = "0.0.0.0/0" }, - { name = "compute-allow-all-outbound", direction = "outbound", remote = "0.0.0.0/0" } - + { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr } ] ) : (local.enable_storage ? [ { name = "storage-allow-bastionsg-inbound", direction = "inbound", remote = local.bastion_security_group }, { name = "storage-allow-storagesg-inbound", direction = "inbound", remote = local.storage_security_group }, - { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr }, - { name = "client-allow-clientsg-inbound", direction = "inbound", remote = local.client_security_group }, - { name = "client-allow-all-outbound", direction = "outbound", remote = "0.0.0.0/0" } + { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr } ] : [ { name = "storage-allow-bastionsg-inbound", direction = "inbound", remote = local.bastion_security_group }, - { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr }, - { name = "client-allow-all-outbound", direction = "outbound", remote = "0.0.0.0/0" }, - { name = "compute-allow-all-outbound", direction = "outbound", remote = "0.0.0.0/0" } - + { name = "client-allow-network-inbound", direction = "inbound", remote = var.cluster_cidr } ] ) @@ -474,11 +397,3 @@ locals { { name = "bastion-allow-client-sg", direction = "inbound", remote = local.client_security_group }] : [] )) } - -locals { - storage_security_group_name_id = var.storage_security_group_name != null ? data.ibm_is_security_group.storage_security_group[*].id : [] - client_security_group_name_id = var.client_security_group_name != null ? data.ibm_is_security_group.client_security_group[*].id : [] - gklm_security_group_name_id = var.gklm_security_group_name != null ? data.ibm_is_security_group.gklm_security_group[*].id : [] - ldap_security_group_name_id = var.ldap_security_group_name != null ? data.ibm_is_security_group.ldap_security_group[*].id : [] - compute_security_group_name_id = var.compute_security_group_name != null ? data.ibm_is_security_group.compute_security_group[*].id : [] -} diff --git a/modules/landing_zone_vsi/main.tf b/modules/landing_zone_vsi/main.tf index 1e867ad2..e0b3930f 100644 --- a/modules/landing_zone_vsi/main.tf +++ b/modules/landing_zone_vsi/main.tf @@ -4,12 +4,6 @@ module "compute_key" { # private_key_path = "./../../modules/ansible-roles/compute_id_rsa" #checkov:skip=CKV_SECRET_6 } -module "client_key" { - count = local.enable_client ? 1 : 0 - source = "./../key" - # private_key_path = "./../../modules/ansible-roles/compute_id_rsa" #checkov:skip=CKV_SECRET_6 -} - resource "null_resource" "entitlement_check" { count = var.scheduler == "Scale" && var.storage_type != "evaluation" ? 1 : 0 provisioner "local-exec" { @@ -77,7 +71,7 @@ module "storage_key" { } module "client_sg" { - count = local.enable_client && var.client_security_group_name == null ? 1 : 0 + count = local.enable_client ? 1 : 0 source = "terraform-ibm-modules/security-group/ibm" version = "2.6.2" add_ibm_cloud_internal_rules = true @@ -88,7 +82,7 @@ module "client_sg" { } module "compute_sg" { - count = local.enable_compute && var.compute_security_group_name == null ? 1 : 0 + count = local.enable_compute ? 1 : 0 source = "terraform-ibm-modules/security-group/ibm" version = "2.6.2" add_ibm_cloud_internal_rules = true @@ -99,7 +93,6 @@ module "compute_sg" { } module "bastion_sg_existing" { - count = var.login_security_group_name == null ? 1 : 0 source = "terraform-ibm-modules/security-group/ibm" version = "2.6.2" resource_group = var.resource_group @@ -123,7 +116,7 @@ module "nfs_storage_sg" { } module "storage_sg" { - count = local.enable_storage && var.storage_security_group_name == null ? 1 : 0 + count = local.enable_storage ? 1 : 0 source = "terraform-ibm-modules/security-group/ibm" version = "2.6.2" add_ibm_cloud_internal_rules = true @@ -136,7 +129,7 @@ module "storage_sg" { module "login_vsi" { count = var.scheduler == "LSF" ? 1 : 0 source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "5.4.6" + version = "5.0.0" vsi_per_subnet = 1 create_security_group = false security_group = null @@ -154,6 +147,7 @@ module "login_vsi" { kms_encryption_enabled = var.kms_encryption_enabled skip_iam_authorization_policy = local.skip_iam_authorization_policy boot_volume_encryption_key = var.boot_volume_encryption_key + existing_kms_instance_guid = var.existing_kms_instance_guid placement_group_id = var.placement_group_ids #placement_group_id = var.placement_group_ids[(var.management_instances[count.index]["count"])%(length(var.placement_group_ids))] } @@ -161,7 +155,7 @@ module "login_vsi" { module "management_vsi" { count = length(var.management_instances) source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "5.4.16" + version = "5.0.0" vsi_per_subnet = var.management_instances[count.index]["count"] create_security_group = false security_group = null @@ -172,117 +166,119 @@ module "management_vsi" { enable_floating_ip = false security_group_ids = module.compute_sg[*].security_group_id ssh_key_ids = local.ssh_keys - subnets = local.compute_subnet_id + subnets = local.cluster_subnet_id tags = local.tags user_data = data.template_file.management_user_data.rendered vpc_id = var.vpc_id kms_encryption_enabled = var.kms_encryption_enabled skip_iam_authorization_policy = local.skip_iam_authorization_policy boot_volume_encryption_key = var.boot_volume_encryption_key + existing_kms_instance_guid = var.existing_kms_instance_guid placement_group_id = var.placement_group_ids } module "compute_vsi" { count = length(var.static_compute_instances) source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "5.4.16" + version = "5.0.0" vsi_per_subnet = var.static_compute_instances[count.index]["count"] create_security_group = false security_group = null - image_id = var.scheduler == "LSF" ? (local.compute_image_found_in_map ? local.new_compute_image_id : data.ibm_is_image.compute_stock_image[0].id) : (var.storage_type != "evaluation" ? (local.scale_compute_image_found_in_map ? local.scale_compute_image_id : data.ibm_is_image.scale_compute_stock_image[0].id) : local.evaluation_image_id) + image_id = local.compute_image_found_in_map ? local.new_compute_image_id : data.ibm_is_image.compute_stock_image[0].id machine_type = var.static_compute_instances[count.index]["profile"] prefix = format("%s-%s", local.compute_node_name, count.index + 1) resource_group_id = var.resource_group enable_floating_ip = false - security_group_ids = var.compute_security_group_name == null ? module.compute_sg[*].security_group_id : local.compute_security_group_name_id + security_group_ids = module.compute_sg[*].security_group_id ssh_key_ids = local.ssh_keys - subnets = local.compute_subnet_id + subnets = local.cluster_subnet_id tags = local.tags user_data = var.scheduler == "Scale" ? data.template_file.scale_compute_user_data.rendered : data.template_file.lsf_compute_user_data.rendered vpc_id = var.vpc_id kms_encryption_enabled = var.kms_encryption_enabled skip_iam_authorization_policy = local.skip_iam_authorization_policy boot_volume_encryption_key = var.boot_volume_encryption_key + existing_kms_instance_guid = var.existing_kms_instance_guid placement_group_id = var.enable_dedicated_host ? null : var.placement_group_ids enable_dedicated_host = var.enable_dedicated_host dedicated_host_id = var.enable_dedicated_host && length(var.static_compute_instances) > 0 ? local.dedicated_host_map[var.static_compute_instances[count.index]["profile"]] : null - secondary_security_groups = local.enable_sec_interface_compute ? local.storage_secondary_security_group : [] - secondary_subnets = local.enable_sec_interface_compute ? local.storage_subnets : [] - manage_reserved_ips = local.enable_sec_interface_compute ? true : false depends_on = [module.dedicated_host, null_resource.dedicated_host_validation] } module "compute_cluster_management_vsi" { count = var.scheduler == "Scale" && local.enable_compute ? 1 : 0 source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "5.4.16" + version = "5.0.0" vsi_per_subnet = 1 create_security_group = false security_group = null - image_id = var.storage_type != "evaluation" ? (local.scale_compute_image_found_in_map ? local.scale_compute_image_id : data.ibm_is_image.scale_compute_stock_image[0].id) : local.evaluation_image_id - machine_type = var.scale_management_vsi_profile - prefix = count.index == 0 ? local.compute_management_node_name : format("%s-%s", local.compute_management_node_name, count.index) + image_id = data.ibm_is_image.compute_stock_image[0].id + machine_type = var.static_compute_instances[count.index]["profile"] + prefix = count.index == 0 ? local.cpmoute_management_node_name : format("%s-%s", local.cpmoute_management_node_name, count.index) resource_group_id = var.resource_group enable_floating_ip = false - security_group_ids = var.compute_security_group_name == null ? module.compute_sg[*].security_group_id : local.compute_security_group_name_id + security_group_ids = module.compute_sg[*].security_group_id ssh_key_ids = local.ssh_keys - subnets = local.compute_subnet_id + subnets = local.cluster_subnet_id tags = local.tags user_data = data.template_file.scale_compute_user_data.rendered vpc_id = var.vpc_id kms_encryption_enabled = var.kms_encryption_enabled skip_iam_authorization_policy = local.skip_iam_authorization_policy boot_volume_encryption_key = var.boot_volume_encryption_key + existing_kms_instance_guid = var.existing_kms_instance_guid placement_group_id = var.placement_group_ids - secondary_security_groups = local.enable_sec_interface_compute ? local.storage_secondary_security_group : [] - secondary_subnets = local.enable_sec_interface_compute ? local.storage_subnets : [] - manage_reserved_ips = local.enable_sec_interface_compute ? true : false } module "storage_vsi" { - count = var.scheduler == "Scale" ? (length(var.storage_instances) > 0 && var.storage_type != "persistent" ? 1 : 0) : 0 - source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "5.4.16" - vsi_per_subnet = var.storage_instances[count.index]["count"] - create_security_group = false - security_group = null - image_id = var.storage_type != "evaluation" ? (local.scale_storage_image_found_in_map ? local.new_storage_image_id : data.ibm_is_image.storage_vsi[0].id) : local.evaluation_image_id - machine_type = var.storage_instances[count.index]["profile"] - prefix = count.index == 0 ? local.storage_node_name : format("%s-%s", local.storage_node_name, count.index) - resource_group_id = var.resource_group - enable_floating_ip = false - security_group_ids = var.storage_security_group_name == null ? module.storage_sg[*].security_group_id : local.storage_security_group_name_id - ssh_key_ids = local.ssh_keys - subnets = local.storage_subnets - tags = local.tags - user_data = data.template_file.storage_user_data.rendered - vpc_id = var.vpc_id - block_storage_volumes = local.enable_block_storage ? local.block_storage_volumes : [] - kms_encryption_enabled = var.kms_encryption_enabled - skip_iam_authorization_policy = local.skip_iam_authorization_policy - boot_volume_encryption_key = var.boot_volume_encryption_key - placement_group_id = var.placement_group_ids - secondary_allow_ip_spoofing = local.enable_protocol && var.colocate_protocol_instances ? true : false - secondary_security_groups = local.enable_sec_interface_storage ? local.storage_secondary_security_group : (local.enable_protocol && var.colocate_protocol_instances) ? local.protocol_secondary_security_group : [] - secondary_subnets = local.enable_sec_interface_storage ? local.storage_subnets : local.enable_protocol && var.colocate_protocol_instances ? local.protocol_subnets : [] - manage_reserved_ips = local.enable_sec_interface_storage || (local.enable_protocol && var.colocate_protocol_instances) ? true : false - depends_on = [resource.null_resource.entitlement_check] + count = length(var.storage_instances) > 0 && var.storage_type != "persistent" ? 1 : 0 + source = "terraform-ibm-modules/landing-zone-vsi/ibm" + version = "5.0.0" + vsi_per_subnet = var.storage_instances[count.index]["count"] + create_security_group = false + security_group = null + image_id = local.storage_image_id[count.index] + machine_type = var.storage_instances[count.index]["profile"] + prefix = count.index == 0 ? local.storage_node_name : format("%s-%s", local.storage_node_name, count.index) + resource_group_id = var.resource_group + enable_floating_ip = false + security_group_ids = module.storage_sg[*].security_group_id + ssh_key_ids = local.ssh_keys + subnets = local.storage_subnets + tags = local.tags + user_data = data.template_file.storage_user_data.rendered + vpc_id = var.vpc_id + block_storage_volumes = local.enable_block_storage ? local.block_storage_volumes : [] + kms_encryption_enabled = var.kms_encryption_enabled + skip_iam_authorization_policy = local.skip_iam_authorization_policy + boot_volume_encryption_key = var.boot_volume_encryption_key + existing_kms_instance_guid = var.existing_kms_instance_guid + placement_group_id = var.placement_group_ids + secondary_allow_ip_spoofing = local.enable_protocol && var.colocate_protocol_instances ? true : false + secondary_security_groups = local.protocol_secondary_security_group + secondary_subnets = local.enable_protocol && var.colocate_protocol_instances ? local.protocol_subnets : [] + manage_reserved_ips = local.enable_protocol && var.colocate_protocol_instances ? true : false + primary_vni_additional_ip_count = local.enable_protocol && var.colocate_protocol_instances ? var.protocol_instances[count.index]["count"] : 0 + depends_on = [resource.null_resource.entitlement_check] + # manage_reserved_ips = true + # primary_vni_additional_ip_count = var.storage_instances[count.index]["count"] + # placement_group_id = var.placement_group_ids[(var.storage_instances[count.index]["count"])%(length(var.placement_group_ids))] } module "storage_cluster_management_vsi" { - count = var.scheduler == "Scale" ? length(var.storage_instances) : 0 + count = length(var.storage_instances) source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "5.4.16" + version = "5.0.0" vsi_per_subnet = 1 create_security_group = false security_group = null - image_id = var.storage_type != "evaluation" ? (local.scale_storage_image_found_in_map ? local.new_storage_image_id : data.ibm_is_image.storage_vsi[0].id) : local.evaluation_image_id - machine_type = var.scale_management_vsi_profile + image_id = local.storage_image_id[count.index] + machine_type = var.management_instances[count.index]["profile"] prefix = count.index == 0 ? local.storage_management_node_name : format("%s-%s", local.storage_management_node_name, count.index) resource_group_id = var.resource_group enable_floating_ip = false - security_group_ids = var.storage_security_group_name == null ? module.storage_sg[*].security_group_id : local.storage_security_group_name_id + security_group_ids = module.storage_sg[*].security_group_id ssh_key_ids = local.ssh_keys subnets = local.storage_subnets tags = local.tags @@ -292,27 +288,27 @@ module "storage_cluster_management_vsi" { kms_encryption_enabled = var.kms_encryption_enabled skip_iam_authorization_policy = local.skip_iam_authorization_policy boot_volume_encryption_key = var.boot_volume_encryption_key + existing_kms_instance_guid = var.existing_kms_instance_guid placement_group_id = var.placement_group_ids - secondary_security_groups = local.enable_sec_interface_storage ? local.storage_secondary_security_group : [] - secondary_subnets = local.enable_sec_interface_storage ? local.storage_subnets : [] - manage_reserved_ips = local.enable_sec_interface_storage ? true : false + depends_on = [resource.null_resource.entitlement_check] + #placement_group_id = var.placement_group_ids[(var.storage_instances[count.index]["count"])%(length(var.placement_group_ids))] } module "storage_cluster_tie_breaker_vsi" { - count = var.scheduler == "Scale" ? (var.storage_type != "persistent" ? 1 : 0) : 0 + count = var.storage_type != "persistent" ? 1 : 0 source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "5.4.16" + version = "5.0.0" vsi_per_subnet = 1 create_security_group = false security_group = null - image_id = var.storage_type != "evaluation" ? (local.scale_storage_image_found_in_map ? local.new_storage_image_id : data.ibm_is_image.storage_vsi[0].id) : local.evaluation_image_id + image_id = local.storage_image_id[count.index] machine_type = var.storage_instances[count.index]["profile"] prefix = format("%s-strg-tie", local.prefix) resource_group_id = var.resource_group enable_floating_ip = false - security_group_ids = var.storage_security_group_name == null ? module.storage_sg[*].security_group_id : local.storage_security_group_name_id + security_group_ids = module.storage_sg[*].security_group_id ssh_key_ids = local.ssh_keys - subnets = local.storage_subnets + subnets = local.storage_subnets #[local.storage_subnets[0]] tags = local.tags user_data = data.template_file.storage_user_data.rendered vpc_id = var.vpc_id @@ -320,26 +316,26 @@ module "storage_cluster_tie_breaker_vsi" { kms_encryption_enabled = var.kms_encryption_enabled skip_iam_authorization_policy = local.skip_iam_authorization_policy boot_volume_encryption_key = var.boot_volume_encryption_key + existing_kms_instance_guid = var.existing_kms_instance_guid placement_group_id = var.placement_group_ids - secondary_security_groups = local.enable_sec_interface_storage ? local.storage_secondary_security_group : [] - secondary_subnets = local.enable_sec_interface_storage ? local.storage_subnets : [] - manage_reserved_ips = local.enable_sec_interface_storage ? true : false - depends_on = [resource.null_resource.entitlement_check] + # manage_reserved_ips = true + # primary_vni_additional_ip_count = var.storage_instances[count.index]["count"] + # placement_group_id = var.placement_group_ids[(var.storage_instances[count.index]["count"])%(length(var.placement_group_ids))] } module "client_vsi" { - count = var.scheduler == "Scale" ? length(var.client_instances) : 0 + count = length(var.client_instances) source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "5.4.6" + version = "5.0.0" vsi_per_subnet = var.client_instances[count.index]["count"] create_security_group = false security_group = null - image_id = data.ibm_is_image.client[0].id + image_id = local.client_image_id[count.index] machine_type = var.client_instances[count.index]["profile"] prefix = count.index == 0 ? local.client_node_name : format("%s-%s", local.client_node_name, count.index) resource_group_id = var.resource_group enable_floating_ip = false - security_group_ids = var.client_security_group_name == null ? module.client_sg[*].security_group_id : local.client_security_group_name_id + security_group_ids = module.client_sg[*].security_group_id ssh_key_ids = local.ssh_keys subnets = local.client_subnets tags = local.tags @@ -348,21 +344,23 @@ module "client_vsi" { kms_encryption_enabled = var.kms_encryption_enabled skip_iam_authorization_policy = local.skip_iam_authorization_policy boot_volume_encryption_key = var.boot_volume_encryption_key + existing_kms_instance_guid = var.existing_kms_instance_guid + depends_on = [resource.null_resource.entitlement_check] } module "protocol_vsi" { - count = var.scheduler == "Scale" ? ((local.enable_protocol && var.colocate_protocol_instances == false && local.ces_server_type == false) ? 1 : 0) : 0 + count = var.colocate_protocol_instances == true ? 0 : length(var.protocol_instances) source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "5.4.16" + version = "5.0.0" vsi_per_subnet = var.protocol_instances[count.index]["count"] create_security_group = false security_group = null - image_id = var.storage_type != "evaluation" ? (local.scale_storage_image_found_in_map ? local.new_storage_image_id : data.ibm_is_image.storage_vsi[0].id) : local.evaluation_image_id + image_id = local.protocol_image_id[count.index] machine_type = var.protocol_instances[count.index]["profile"] prefix = count.index == 0 ? local.protocol_node_name : format("%s-%s", local.protocol_node_name, count.index) resource_group_id = var.resource_group enable_floating_ip = false - security_group_ids = var.storage_security_group_name == null ? module.storage_sg[*].security_group_id : local.storage_security_group_name_id + security_group_ids = module.storage_sg[*].security_group_id ssh_key_ids = local.ssh_keys subnets = local.storage_subnets tags = local.tags @@ -371,28 +369,32 @@ module "protocol_vsi" { kms_encryption_enabled = var.kms_encryption_enabled skip_iam_authorization_policy = local.skip_iam_authorization_policy boot_volume_encryption_key = var.boot_volume_encryption_key + existing_kms_instance_guid = var.existing_kms_instance_guid # Bug: 5847 - LB profile & subnets are not configurable # load_balancers = local.enable_load_balancer ? local.load_balancers : [] - secondary_allow_ip_spoofing = true - secondary_security_groups = local.protocol_secondary_security_group - secondary_subnets = local.protocol_subnets - manage_reserved_ips = true - depends_on = [resource.null_resource.entitlement_check] + secondary_allow_ip_spoofing = true + secondary_security_groups = local.protocol_secondary_security_group + secondary_subnets = local.protocol_subnets + placement_group_id = var.placement_group_ids + manage_reserved_ips = true + primary_vni_additional_ip_count = var.protocol_instances[count.index]["count"] + depends_on = [resource.null_resource.entitlement_check] + # placement_group_id = var.placement_group_ids[(var.protocol_instances[count.index]["count"])%(length(var.placement_group_ids))] } module "afm_vsi" { - count = var.scheduler == "Scale" ? ((local.afm_server_type == false && local.enable_afm) ? 1 : 0) : 0 + count = length(var.afm_instances) source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "5.4.6" + version = "5.0.0" vsi_per_subnet = var.afm_instances[count.index]["count"] create_security_group = false security_group = null - image_id = var.storage_type != "evaluation" ? (local.scale_storage_image_found_in_map ? local.new_storage_image_id : data.ibm_is_image.storage_vsi[0].id) : local.evaluation_image_id + image_id = local.afm_image_id[count.index] machine_type = var.afm_instances[count.index]["profile"] prefix = count.index == 0 ? local.afm_node_name : format("%s-%s", local.afm_node_name, count.index) resource_group_id = var.resource_group enable_floating_ip = false - security_group_ids = var.storage_security_group_name == null ? module.storage_sg[*].security_group_id : local.storage_security_group_name_id + security_group_ids = module.storage_sg[*].security_group_id ssh_key_ids = local.ssh_keys subnets = local.storage_subnets tags = local.tags @@ -401,23 +403,25 @@ module "afm_vsi" { kms_encryption_enabled = var.kms_encryption_enabled skip_iam_authorization_policy = local.skip_iam_authorization_policy boot_volume_encryption_key = var.boot_volume_encryption_key - depends_on = [resource.null_resource.entitlement_check] + existing_kms_instance_guid = var.existing_kms_instance_guid + # manage_reserved_ips = true + # primary_vni_additional_ip_count = var.afm_instances[count.index]["count"] } module "gklm_vsi" { - count = var.scheduler == "Scale" ? (var.scale_encryption_enabled == true && var.scale_encryption_type == "gklm" ? 1 : 0) : 0 + count = var.scale_encryption_enabled == true && var.scale_encryption_type == "gklm" ? 1 : 0 source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "5.4.6" + version = "5.0.0" vsi_per_subnet = var.gklm_instances[count.index]["count"] create_security_group = false security_group = null - image_id = local.scale_encryption_image_mapping_entry_found ? local.scale_encryption_image_id : data.ibm_is_image.gklm[0].id + image_id = local.gklm_image_id[count.index] machine_type = var.gklm_instances[count.index]["profile"] prefix = count.index == 0 ? local.gklm_node_name : format("%s-%s", local.gklm_node_name, count.index) resource_group_id = var.resource_group enable_floating_ip = false - security_group_ids = var.gklm_security_group_name == null ? module.storage_sg[*].security_group_id : local.gklm_security_group_name_id - ssh_key_ids = local.ssh_keys + security_group_ids = module.storage_sg[*].security_group_id + ssh_key_ids = local.gklm_ssh_keys subnets = local.storage_subnets tags = local.tags user_data = data.template_file.gklm_user_data.rendered @@ -425,12 +429,13 @@ module "gklm_vsi" { kms_encryption_enabled = var.kms_encryption_enabled skip_iam_authorization_policy = local.skip_iam_authorization_policy boot_volume_encryption_key = var.boot_volume_encryption_key + existing_kms_instance_guid = var.existing_kms_instance_guid } module "ldap_vsi" { count = var.enable_ldap == true && var.ldap_server == "null" ? 1 : 0 source = "terraform-ibm-modules/landing-zone-vsi/ibm" - version = "5.4.6" + version = "5.0.0" vsi_per_subnet = 1 create_security_group = false security_group = null @@ -439,9 +444,9 @@ module "ldap_vsi" { prefix = local.ldap_node_name resource_group_id = var.resource_group enable_floating_ip = false - security_group_ids = local.products == "lsf" ? module.compute_sg[*].security_group_id : (var.ldap_security_group_name == null ? module.storage_sg[*].security_group_id : local.ldap_security_group_name_id) - ssh_key_ids = local.ssh_keys - subnets = local.products == "lsf" ? local.compute_subnet_id : [local.storage_subnets[0]] + security_group_ids = local.products == "lsf" ? module.compute_sg[*].security_group_id : module.storage_sg[*].security_group_id + ssh_key_ids = local.products == "lsf" ? local.ssh_keys : local.ldap_ssh_keys + subnets = local.products == "lsf" ? local.cluster_subnet_id : [local.storage_subnets[0]] tags = local.tags user_data = data.template_file.ldap_user_data.rendered vpc_id = var.vpc_id @@ -449,6 +454,7 @@ module "ldap_vsi" { kms_encryption_enabled = var.kms_encryption_enabled skip_iam_authorization_policy = local.skip_iam_authorization_policy boot_volume_encryption_key = var.boot_volume_encryption_key + existing_kms_instance_guid = var.existing_kms_instance_guid placement_group_id = var.placement_group_ids #placement_group_id = var.placement_group_ids[(var.storage_instances[count.index]["count"])%(length(var.placement_group_ids))] } @@ -474,70 +480,14 @@ module "dedicated_host" { ######################################################################## module "storage_baremetal" { - count = length(var.storage_servers) > 0 && var.storage_type == "persistent" ? 1 : 0 - source = "../baremetal" - existing_resource_group = var.resource_group - image_id = local.storage_bare_metal_image_mapping_entry_found ? local.storage_bare_metal_image_id : data.ibm_is_image.baremetal_storage[0].id - prefix = format("%s-%s", local.storage_node_name, substr(local.storage_subnets[count.index].id, length(local.storage_subnets[count.index].id) - 4, 4)) - storage_subnets = [for subnet in local.storage_subnets : subnet.id] - storage_ssh_keys = local.ssh_keys - storage_servers = var.storage_servers - security_group_ids = module.storage_sg[*].security_group_id - user_data = var.bms_boot_drive_encryption == false ? data.template_file.storage_bm_user_data.rendered : templatefile("${path.module}/templates/storage_bootdrive_user_data/cloud_init.yml", local.user_data_vars) - secondary_vni_enabled = local.enable_protocol && var.colocate_protocol_instances ? true : false - protocol_subnets = local.enable_protocol && var.colocate_protocol_instances ? local.protocol_subnets : [] - secondary_security_group_ids = local.enable_protocol && var.colocate_protocol_instances ? module.storage_sg[*].security_group_id : [] - # manage_reserved_ips = local.enable_protocol && var.colocate_protocol_instances ? true : false - sapphire_rapids_profile_check = local.sapphire_rapids_profile_check -} -module "storage_baremetal_tie_breaker" { - count = length(var.storage_servers) > 0 && var.storage_type == "persistent" ? 1 : 0 - source = "../baremetal" - existing_resource_group = var.resource_group - image_id = local.storage_bare_metal_image_mapping_entry_found ? local.storage_bare_metal_image_id : data.ibm_is_image.baremetal_storage[0].id - prefix = format("%s-%s", local.storage_tie_breaker_node_name, substr(local.storage_subnets[count.index].id, length(local.storage_subnets[count.index].id) - 4, 4)) - storage_subnets = [for subnet in local.storage_subnets : subnet.id] - storage_ssh_keys = local.ssh_keys - storage_servers = local.tie_breaker_bm_server - security_group_ids = module.storage_sg[*].security_group_id - user_data = var.bms_boot_drive_encryption == false ? data.template_file.storage_bmtb_user_data.rendered : templatefile("${path.module}/templates/storage_tb_bootdrive_user_data/cloud_init.yml", local.user_data_vars) - secondary_vni_enabled = false - protocol_subnets = local.protocol_subnets - secondary_security_group_ids = [] - sapphire_rapids_profile_check = local.sapphire_rapids_profile_check -} - -module "protocol_baremetal_server" { - count = (var.colocate_protocol_instances == false && local.ces_server_type == true && local.enable_protocol) ? 1 : 0 - source = "../baremetal" - existing_resource_group = var.resource_group - image_id = local.storage_bare_metal_image_mapping_entry_found ? local.storage_bare_metal_image_id : data.ibm_is_image.baremetal_storage[0].id - prefix = format("%s-%s", local.protocol_node_name, substr(local.protocol_subnets[count.index].id, length(local.protocol_subnets[count.index].id) - 4, 4)) - storage_subnets = [for subnet in local.storage_subnets : subnet.id] - storage_ssh_keys = local.ssh_keys - storage_servers = var.protocol_instances - security_group_ids = module.storage_sg[*].security_group_id - user_data = var.bms_boot_drive_encryption == false ? data.template_file.protocol_bm_user_data.rendered : templatefile("${path.module}/templates/protocol_bootdrive_user_data/cloud_init.yml", local.user_data_vars) - secondary_vni_enabled = true - protocol_subnets = local.protocol_subnets - secondary_security_group_ids = module.storage_sg[*].security_group_id - sapphire_rapids_profile_check = local.sapphire_rapids_profile_check -} - -module "afm_baremetal_server" { - count = (local.afm_server_type == true && local.enable_afm) ? 1 : 0 - source = "../baremetal" - existing_resource_group = var.resource_group - image_id = local.storage_bare_metal_image_mapping_entry_found ? local.storage_bare_metal_image_id : data.ibm_is_image.baremetal_storage[0].id - prefix = format("%s-%s", local.afm_node_name, substr(local.storage_subnets[count.index].id, length(local.storage_subnets[count.index].id) - 4, 4)) - storage_subnets = [for subnet in local.storage_subnets : subnet.id] - storage_ssh_keys = local.ssh_keys - storage_servers = var.afm_instances - security_group_ids = module.storage_sg[*].security_group_id - user_data = var.bms_boot_drive_encryption == false ? data.template_file.afm_bm_user_data.rendered : templatefile("${path.module}/templates/afm_bootdrive_user_data/cloud_init.yml", local.user_data_vars) - secondary_vni_enabled = false - protocol_subnets = local.protocol_subnets - secondary_security_group_ids = [] - sapphire_rapids_profile_check = local.sapphire_rapids_profile_check + count = length(var.storage_servers) > 0 && var.storage_type == "persistent" ? 1 : 0 + source = "../baremetal" + existing_resource_group = var.resource_group + prefix = var.prefix + storage_subnets = [for subnet in local.storage_subnets : subnet.id] + storage_ssh_keys = local.ssh_keys + storage_servers = var.storage_servers + security_group_ids = module.storage_sg[*].security_group_id + bastion_public_key_content = var.bastion_public_key_content } diff --git a/modules/landing_zone_vsi/outputs.tf b/modules/landing_zone_vsi/outputs.tf index 7d31ef84..c8d3523d 100644 --- a/modules/landing_zone_vsi/outputs.tf +++ b/modules/landing_zone_vsi/outputs.tf @@ -31,35 +31,6 @@ output "storage_vsi_data" { output "storage_bms_data" { description = "Storage BareMetal Server data" value = flatten(module.storage_baremetal[*].list) - depends_on = [module.storage_baremetal] -} - -output "storage_bm_name_with_vol_mapping" { - description = "Storage BareMetal Server data" - value = flatten(module.storage_baremetal[*].instance_ips_with_vol_mapping) -} - -output "storage_tie_breaker_bms_data" { - description = "Storage Tie- Breaker BareMetal Server data" - value = flatten(module.storage_baremetal_tie_breaker[*].list) - depends_on = [module.storage_baremetal_tie_breaker] -} - -output "storage_tie_breaker_bms_name_with_vol_mapping" { - description = "Storage BareMetal Server data" - value = flatten(module.storage_baremetal_tie_breaker[*].instance_ips_with_vol_mapping) -} - -output "protocol_bms_data" { - description = "Protocol BareMetal Server data" - value = flatten(module.protocol_baremetal_server[*].list) - depends_on = [module.protocol_baremetal_server] -} - -output "afm_bms_data" { - description = "AFM BareMetal Server data" - value = flatten(module.afm_baremetal_server[*].list) - depends_on = [module.afm_baremetal_server] } output "storage_cluster_management_vsi" { @@ -89,18 +60,6 @@ output "compute_private_key_content" { value = one(module.compute_key[*].private_key_content) } -output "client_public_key_content" { - description = "Client public key content" - sensitive = true - value = one(module.client_key[*].public_key_content) -} - -output "client_private_key_content" { - description = "Client private key content" - sensitive = true - value = one(module.client_key[*].private_key_content) -} - output "afm_vsi_data" { description = "AFM VSI data" value = module.afm_vsi[*]["list"] diff --git a/modules/landing_zone_vsi/template_files.tf b/modules/landing_zone_vsi/template_files.tf index 911f27c0..6b1cc54f 100644 --- a/modules/landing_zone_vsi/template_files.tf +++ b/modules/landing_zone_vsi/template_files.tf @@ -1,22 +1,11 @@ data "template_file" "ldap_user_data" { template = file("${path.module}/templates/ldap_user_data.tpl") vars = { - bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : "" - - cluster_public_key_content = ( - var.scheduler == "LSF" && local.enable_compute ? try(local.compute_public_key_content, "") : - var.scheduler == "Scale" && local.enable_storage ? try(local.storage_public_key_content, "") : - "" - ) - - cluster_private_key_content = ( - var.scheduler == "LSF" && local.enable_compute ? try(local.compute_private_key_content, "") : - var.scheduler == "Scale" && local.enable_storage ? try(local.storage_private_key_content, "") : - "" - ) - - compute_interfaces = var.storage_type == "scratch" ? local.vsi_interfaces[0] : local.bms_interfaces[0] - cluster_dns_domain = var.scheduler == "LSF" && local.enable_compute ? var.dns_domain_names["compute"] : "ldap.com" + bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : "" + compute_public_key_content = local.enable_compute ? local.compute_public_key_content != null ? local.compute_public_key_content : "" : "" + compute_private_key_content = local.enable_compute ? local.compute_private_key_content != null ? local.compute_private_key_content : "" : "" + compute_interfaces = var.storage_type == "scratch" ? local.vsi_interfaces[0] : local.bms_interfaces[0] + compute_dns_domain = var.dns_domain_names["compute"] } } @@ -24,10 +13,10 @@ data "template_file" "client_user_data" { template = file("${path.module}/templates/client_user_data.tpl") vars = { bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : "" - client_public_key_content = local.enable_client ? local.client_public_key_content != null ? local.client_public_key_content : "" : "" - client_private_key_content = local.enable_client ? local.client_private_key_content != null ? local.client_private_key_content : "" : "" - client_interfaces = local.vsi_interfaces[0] - client_dns_domain = local.enable_client ? var.dns_domain_names["client"] : "" + client_public_key_content = local.enable_client ? local.compute_public_key_content != null ? local.compute_public_key_content : "" : "" + client_private_key_content = local.enable_client ? local.compute_private_key_content != null ? local.compute_private_key_content : "" : "" + client_interfaces = var.storage_type == "scratch" ? local.vsi_interfaces[0] : local.bms_interfaces[0] + client_dns_domain = var.dns_domain_names["compute"] } } @@ -70,33 +59,28 @@ data "template_file" "login_user_data" { data "template_file" "scale_compute_user_data" { template = file("${path.module}/templates/scale_compute_user_data.tpl") vars = { - bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : "" - compute_public_key_content = local.enable_compute ? local.compute_public_key_content != null ? local.compute_public_key_content : "" : "" - compute_private_key_content = local.enable_compute ? local.compute_private_key_content != null ? local.compute_private_key_content : "" : "" - compute_interfaces = local.vsi_interfaces[0] - compute_dns_domain = local.enable_compute ? var.dns_domain_names["compute"] : "" - storage_dns_domain = local.enable_storage && local.enable_sec_interface_compute ? var.dns_domain_names["storage"] : "" - protocol_interfaces = local.vsi_interfaces[1] - enable_sec_interface_compute = local.enable_sec_interface_compute + bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : "" + compute_public_key_content = local.enable_compute ? local.compute_public_key_content != null ? local.compute_public_key_content : "" : "" + compute_private_key_content = local.enable_compute ? local.compute_private_key_content != null ? local.compute_private_key_content : "" : "" + compute_interfaces = var.storage_type == "scratch" ? local.vsi_interfaces[0] : local.bms_interfaces[0] + compute_dns_domain = var.dns_domain_names["compute"] } } data "template_file" "storage_user_data" { template = file("${path.module}/templates/storage_user_data.tpl") vars = { - bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : "" - storage_public_key_content = local.enable_storage ? module.storage_key[0].public_key_content : "" - storage_private_key_content = local.enable_storage ? module.storage_key[0].private_key_content : "" - storage_interfaces = local.vsi_interfaces[0] - protocol_interfaces = local.vsi_interfaces[1] - storage_dns_domain = local.enable_storage ? var.dns_domain_names["storage"] : "" - storage_disk_type = var.storage_type == "scratch" ? data.ibm_is_instance_profile.storage[0].disks[0].quantity[0].type : "" - protocol_dns_domain = local.enable_protocol && var.colocate_protocol_instances ? var.dns_domain_names["protocol"] : "" - enable_protocol = local.enable_protocol && var.colocate_protocol_instances ? true : false - vpc_region = local.enable_protocol && var.colocate_protocol_instances ? var.vpc_region : "" - resource_group_id = local.enable_protocol && var.colocate_protocol_instances ? var.resource_group : "" - protocol_subnets = local.enable_protocol && var.colocate_protocol_instances ? (length(local.protocol_subnets) > 0 ? local.protocol_subnets[0].id : "") : "" - enable_sec_interface_storage = local.enable_sec_interface_storage + bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : "" + storage_public_key_content = local.enable_storage ? module.storage_key[0].public_key_content : "" + storage_private_key_content = local.enable_storage ? module.storage_key[0].private_key_content : "" + storage_interfaces = var.storage_type == "scratch" ? local.vsi_interfaces[0] : local.bms_interfaces[0] + storage_dns_domain = local.enable_storage ? var.dns_domain_names["storage"] : "" + storage_disk_type = var.storage_type == "scratch" ? data.ibm_is_instance_profile.storage[0].disks[0].quantity[0].type : "" + protocol_dns_domain = local.enable_protocol ? var.dns_domain_names["protocol"] : "" + enable_protocol = local.enable_protocol + vpc_region = var.vpc_region + resource_group_id = var.resource_group + protocol_subnets = local.enable_protocol ? local.protocol_subnets[0].id : "" } } @@ -106,13 +90,13 @@ data "template_file" "protocol_user_data" { bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : "" storage_public_key_content = local.enable_protocol ? module.storage_key[0].public_key_content : "" storage_private_key_content = local.enable_protocol ? module.storage_key[0].private_key_content : "" - storage_interfaces = local.vsi_interfaces[0] - protocol_interfaces = local.vsi_interfaces[1] + storage_interfaces = var.storage_type == "scratch" ? local.vsi_interfaces[0] : local.bms_interfaces[0] + protocol_interfaces = var.storage_type == "scratch" ? local.vsi_interfaces[1] : local.bms_interfaces[1] storage_dns_domain = local.enable_storage ? var.dns_domain_names["storage"] : "" protocol_dns_domain = local.enable_protocol ? var.dns_domain_names["protocol"] : "" vpc_region = var.vpc_region resource_group_id = var.resource_group - protocol_subnets = local.enable_protocol ? (length(local.protocol_subnets) > 0 ? local.protocol_subnets[0].id : "") : "" + protocol_subnets = local.enable_protocol ? local.protocol_subnets[0].id : "" } } @@ -122,7 +106,7 @@ data "template_file" "afm_user_data" { bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : "" storage_public_key_content = local.enable_storage ? module.storage_key[0].public_key_content : "" storage_private_key_content = local.enable_storage ? module.storage_key[0].private_key_content : "" - storage_interfaces = local.vsi_interfaces[0] + storage_interfaces = var.storage_type == "scratch" ? local.vsi_interfaces[0] : local.bms_interfaces[0] storage_dns_domain = local.enable_storage ? var.dns_domain_names["storage"] : "" } } @@ -133,67 +117,5 @@ data "template_file" "gklm_user_data" { bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : "" storage_public_key_content = local.enable_storage ? module.storage_key[0].public_key_content : "" storage_private_key_content = local.enable_storage ? module.storage_key[0].private_key_content : "" - domain_name = local.enable_storage ? var.dns_domain_names["gklm"] : "" - } -} - -data "template_file" "storage_bm_user_data" { - template = file("${path.module}/templates/storage_bm_user_data.tpl") - vars = { - bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : "" - storage_public_key_content = local.enable_storage ? module.storage_key[0].public_key_content : "" - storage_private_key_content = local.enable_storage ? module.storage_key[0].private_key_content : "" - storage_interfaces = local.bms_interfaces[0] - protocol_interfaces = local.bms_interfaces[1] - storage_dns_domain = local.enable_storage ? var.dns_domain_names["storage"] : "" - protocol_dns_domain = local.enable_protocol && var.colocate_protocol_instances ? var.dns_domain_names["protocol"] : "" - enable_protocol = local.enable_protocol && var.colocate_protocol_instances ? true : false - vpc_region = local.enable_protocol && var.colocate_protocol_instances ? var.vpc_region : "" - resource_group_id = local.enable_protocol && var.colocate_protocol_instances ? var.resource_group : "" - protocol_subnets = local.enable_protocol && var.colocate_protocol_instances ? (length(local.protocol_subnets) > 0 ? local.protocol_subnets[0].id : "") : "" - } -} - -data "template_file" "storage_bmtb_user_data" { - template = file("${path.module}/templates/storage_bmtb_user_data.tpl") - vars = { - bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : "" - storage_public_key_content = local.enable_storage ? module.storage_key[0].public_key_content : "" - storage_private_key_content = local.enable_storage ? module.storage_key[0].private_key_content : "" - storage_interfaces = local.bms_interfaces[0] - protocol_interfaces = local.bms_interfaces[1] - storage_dns_domain = local.enable_storage ? var.dns_domain_names["storage"] : "" - protocol_dns_domain = local.enable_protocol && var.colocate_protocol_instances ? var.dns_domain_names["protocol"] : "" - enable_protocol = local.enable_protocol && var.colocate_protocol_instances ? true : false - vpc_region = local.enable_protocol && var.colocate_protocol_instances ? var.vpc_region : "" - resource_group_id = local.enable_protocol && var.colocate_protocol_instances ? var.resource_group : "" - protocol_subnets = local.enable_protocol && var.colocate_protocol_instances ? (length(local.protocol_subnets) > 0 ? local.protocol_subnets[0].id : "") : "" - } -} - -data "template_file" "protocol_bm_user_data" { - template = file("${path.module}/templates/protocol_bm_user_data.tpl") - vars = { - bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : "" - storage_public_key_content = local.enable_protocol ? module.storage_key[0].public_key_content : "" - storage_private_key_content = local.enable_protocol ? module.storage_key[0].private_key_content : "" - storage_interfaces = local.bms_interfaces[0] - protocol_interfaces = local.bms_interfaces[1] - storage_dns_domain = local.enable_storage ? var.dns_domain_names["storage"] : "" - protocol_dns_domain = local.enable_protocol ? var.dns_domain_names["protocol"] : "" - vpc_region = var.vpc_region - resource_group_id = var.resource_group - protocol_subnets = local.enable_protocol ? (length(local.protocol_subnets) > 0 ? local.protocol_subnets[0].id : "") : "" - } -} - -data "template_file" "afm_bm_user_data" { - template = file("${path.module}/templates/afm_bm_user_data.tpl") - vars = { - bastion_public_key_content = var.bastion_public_key_content != null ? var.bastion_public_key_content : "" - storage_public_key_content = local.enable_afm ? module.storage_key[0].public_key_content : "" - storage_private_key_content = local.enable_afm ? module.storage_key[0].private_key_content : "" - storage_interfaces = local.bms_interfaces[0] - storage_dns_domain = local.enable_afm ? var.dns_domain_names["storage"] : "" } } diff --git a/modules/landing_zone_vsi/templates/afm_bm_user_data.tpl b/modules/landing_zone_vsi/templates/afm_bm_user_data.tpl deleted file mode 100644 index cccf8fc2..00000000 --- a/modules/landing_zone_vsi/templates/afm_bm_user_data.tpl +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -################################################### -# Copyright (C) IBM Corp. 2023 All Rights Reserved. -# Licensed under the Apache License v2.0 -################################################### - -# Setup logging -exec > >(tee /var/log/ibm_spectrumscale_user-data.log) -exec 2>&1 -set -e - -# Configure SSH -mkdir -p ~/.ssh -chmod 700 ~/.ssh -echo "${storage_public_key_content}" >> ~/.ssh/authorized_keys -echo "${bastion_public_key_content}" >> ~/.ssh/authorized_keys -echo "StrictHostKeyChecking no" >> ~/.ssh/config -echo "${storage_private_key_content}" > ~/.ssh/id_rsa -chmod 600 ~/.ssh/id_rsa ~/.ssh/authorized_keys -ethtool -L eth0 combined 16 - -# Banner configuration -echo "###########################################################################################" >> /etc/motd -echo "# You have logged in to AFM BareMetal Server. #" >> /etc/motd -echo "# #" >> /etc/motd -echo "# Refer: https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-storage #" >> /etc/motd -echo "###########################################################################################" >> /etc/motd - -# Create completion marker -touch /var/user_data_complete -echo "User data script completed successfully at $(date)" diff --git a/modules/landing_zone_vsi/templates/afm_bootdrive_user_data/cloud_init.yml b/modules/landing_zone_vsi/templates/afm_bootdrive_user_data/cloud_init.yml deleted file mode 100644 index e6a44322..00000000 --- a/modules/landing_zone_vsi/templates/afm_bootdrive_user_data/cloud_init.yml +++ /dev/null @@ -1,123 +0,0 @@ -#cloud-config -growpart: - mode: off - devices: ['/'] -resize_rootfs: false -write_files: - - content: | - #!/usr/bin/env bash - # Setup logging - exec > >(tee /var/log/ibm_spectrumscale_user-data.log) - exec 2>&1 - set -e - - # Configure SSH - mkdir -p ~/.ssh - chmod 700 ~/.ssh - echo "${storage_public_key_content}" | base64 --decode >> /root/.ssh/authorized_keys - echo "${bastion_public_key_content}" | base64 --decode >> /root/.ssh/authorized_keys - echo "StrictHostKeyChecking no" >> ~/.ssh/config - echo "${storage_private_key_content}" | base64 --decode >> ~/.ssh/id_rsa - chmod 600 ~/.ssh/id_rsa ~/.ssh/authorized_keys - ethtool -L eth0 combined 16 - - # Banner configuration - echo "###########################################################################################" >> /etc/motd - echo "# You have logged in to Storage BareMetal Server. #" >> /etc/motd - echo "# #" >> /etc/motd - echo "# Refer: https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-storage #" >> /etc/motd - echo "###########################################################################################" >> /etc/motd - USER=vpcuser - PACKAGE_MGR=dnf - package_list="python38 kernel-devel-$(uname -r) kernel-headers-$(uname -r) firewalld numactl jq make gcc-c++ elfutils-libelf-devel bind-utils iptables nfs-utils elfutils elfutils-devel python3-dnf-plugin-versionlock cryptsetup clevis clevis-luks clevis-dracut tpm2-tools" - $PACKAGE_MGR install -y $package_list - yum update --security -y - yum versionlock add $package_list - yum versionlock list - echo 'export PATH=$PATH:/usr/lpp/mmfs/bin' >> /root/.bashrc - # Create completion marker - touch /var/user_data_complete - echo "User data script completed successfully at $(date)" - path: /usr/local/bin/scale_user_data.sh - permissions: '0755' - - content: | - #!/bin/bash - # This script encrypts the root partition of a Redhat 8/9 stock IBM Cloud - # image using the TPM to encrypt the LUKS keys. It assumes there is plenty - # of unpartition space on the drive, and leaves the current root partition - # for rescue boot (but this could be deleted on a subsequent boot). - # - # * Create a new partition on the drive using all free space - # * Encrypt the new partition using LUKS with a known passphrase - # * Use 'clevis' to create an additional LUKS passphrase that is bound to the TPM - # * Re-generate initramfs via dracut to ensure the root drive is auto-unlocked on boot - # * Copy the current root filesystem to the new drive - # * Update fstab and crypttab for auto-mounting - # * Update grub to boot using the newly encrypted root drive - # - echo "Encrypt my boot drive" - # Determine the boot device (minus partition name) - # Assumes 'sdaX' or 'nvmeXnYpZ' - device=$(mount | grep "on / type" | awk '{print $1}') - if [[ "$device" =~ "nvme" ]]; then - device=$${device%??} - else - device=$${device%?} - fi - echo $device - # Create a root partition filling up the rest of the drive - echo -e 'n\np\n\n\n\nw' | fdisk $${device} - partition=$(fdisk -l $device | grep $device | tail -1 | awk '{print $1}') - echo $partition - # Setup encryption on the drive with a well known passphrase, and format the filesystem - echo -n n0tsecret | cryptsetup luksFormat --type luks2 -q --force-password $partition - echo -n n0tsecret | cryptsetup open $partition root - mkfs.xfs /dev/mapper/root - # Add the TPM key to the LUKS encrypted drive. - # For additional security, you can bind it to specific TPM PCR banks, but this will cause the TPM unlock - # to fail when the bank changes (EG firmware is updated). If you want to bind it to a PCR: - # ,"pcr_bank":"sha256","pcr_ids":"7" - echo -n n0tsecret | clevis luks bind -y -k - -d $partition tpm2 '{"hash":"sha256","key":"rsa"}' - # Regenerate dracut initramfs to allow unlock on boot - dracut -fv --regenerate-all - # Copy the OS into the encrypted partition - mkdir /mnt/encryptedroot - mount /dev/mapper/root /mnt/encryptedroot - rsync -a --exclude='/proc/*' --exclude='/sys/*' --exclude='/boot' --exclude='/mnt/encryptedroot' / /mnt/encryptedroot - # Grab the UUID for the encrypted partition and setup the crypttab - uuid=$(lsblk -lfi -o NAME,FSTYPE,UUID | grep crypto_LUKS | awk '{print $3}') - echo "root UUID=$${uuid} none luks" > /mnt/encryptedroot/etc/crypttab - # Replace root with '/dev/mapper/root / xfs defaults 0 1' in fstab - sed -i "/\t\/\t/c/dev/mapper/root\t/\txfs\tdefaults\t0\t1" /mnt/encryptedroot/etc/fstab - # Setup grub - # Grab default cmdline args - args=$(grep CMDLINE_LINUX /etc/default/grub | sed 's/.*GRUB_CMDLINE_LINUX=//' | sed 's/\"//g') - # Update grub and set the new entry to be the default. - grubby --add-kernel="/boot/vmlinuz-$(uname -r)" \ - --title="Boot from encrypted root" \ - --initrd="/boot/initramfs-$(uname -r).img" \ - --args "$${args} root=/dev/mapper/root rd.luks.name=$${uuid}=root" \ - --make-default - # Since we use EFI, copy the grubenv over (note the \cp is not a typo, - # it ensures that the 'cp' alias isn't used.) - efidir=$(ls /boot/efi/EFI/ | grep -v BOOT) - \cp -f /boot/grub2/grubenv /boot/efi/EFI/$${efidir}/ - # We MUST have a separate /boot partition to host the kernel and initramfs unencrypted - # as these are needed to unlock the root drive. The IBM CLoud RHEL 9.x images have - # a separate boot partition, but 8.x do not. - # If we dont have a separate /boot partition, we'll use the current root partition - # as /boot. So copy the current /boot content into the root of the filesystem. - if ! lsblk -l | grep /boot$; then - rsync -a --exclude='/efi*' /boot/ / - # Current root device UUID - it will become boot device uuid - curr_root_uuid=$(lsblk -fl | grep /$ | awk '{print $4}') - # Add the new /boot partition to fstab for auto-mounting. - echo -e "UUID=$${curr_root_uuid}\t/boot\txfs\tdefaults\t0\t0" >> /mnt/encryptedroot/etc/fstab - fi - # Reboot the system - shutdown -r now - path: /usr/local/bin/boot_drive_encryption.sh - permissions: '0755' -runcmd: - - /usr/local/bin/scale_user_data.sh - - /usr/local/bin/boot_drive_encryption.sh diff --git a/modules/landing_zone_vsi/templates/afm_user_data.tpl b/modules/landing_zone_vsi/templates/afm_user_data.tpl index acb947fd..c2f936af 100644 --- a/modules/landing_zone_vsi/templates/afm_user_data.tpl +++ b/modules/landing_zone_vsi/templates/afm_user_data.tpl @@ -8,6 +8,13 @@ #!/usr/bin/env bash exec > >(tee /var/log/ibm_spectrumscale_user-data.log) +if grep -E -q "CentOS|Red Hat" /etc/os-release +then + USER=vpcuser +elif grep -q "Ubuntu" /etc/os-release +then + USER=ubuntu +fi sed -i -e "s/^/no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo \'Please client as the user \\\\\"$USER\\\\\" rather than the user \\\\\"root\\\\\".\';echo;sleep 5; exit 142\" /" /root/.ssh/authorized_keys # input parameters @@ -17,7 +24,8 @@ echo "StrictHostKeyChecking no" >> ~/.ssh/config echo "${storage_private_key_content}" > ~/.ssh/id_rsa chmod 600 ~/.ssh/id_rsa -if grep -q "Red Hat" /etc/os-release +# if grep -q "Red Hat" /etc/os-release +if grep -q "CentOS|Red Hat" /etc/os-release then USER=vpcuser REQ_PKG_INSTALLED=0 @@ -79,8 +87,8 @@ echo 'export PATH=$PATH:/usr/lpp/mmfs/bin' >> /root/.bashrc echo "DOMAIN=${storage_dns_domain}" >> "/etc/sysconfig/network-scripts/ifcfg-${storage_interfaces}" echo "MTU=9000" >> "/etc/sysconfig/network-scripts/ifcfg-${storage_interfaces}" chage -I -1 -m 0 -M 99999 -E -1 -W 14 vpcuser +sleep 120 systemctl restart NetworkManager -hostnamectl set-hostname "$(hostname).${storage_dns_domain}" systemctl stop firewalld firewall-offline-cmd --zone=public --add-port=1191/tcp diff --git a/modules/landing_zone_vsi/templates/client_user_data.tpl b/modules/landing_zone_vsi/templates/client_user_data.tpl index e85c082d..24abf3d3 100644 --- a/modules/landing_zone_vsi/templates/client_user_data.tpl +++ b/modules/landing_zone_vsi/templates/client_user_data.tpl @@ -9,14 +9,13 @@ exec > >(tee /var/log/ibm_spectrumscale_user-data.log) -sed -i -e "s/^/no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo \'Please client as the user \\\\\"$USER\\\\\" rather than the user \\\\\"root\\\\\".\';echo;sleep 5; exit 142\" /" /root/.ssh/authorized_keys - -# input parameters -echo "${bastion_public_key_content}" >> ~/.ssh/authorized_keys -echo "${client_public_key_content}" >> ~/.ssh/authorized_keys -echo "StrictHostKeyChecking no" >> ~/.ssh/config -echo "${client_private_key_content}" > ~/.ssh/id_rsa -chmod 600 ~/.ssh/id_rsa +if grep -E -q "CentOS|Red Hat" /etc/os-release +then + USER=vpcuser +elif grep -q "Ubuntu" /etc/os-release +then + USER=ubuntu +fi if grep -q "Red Hat" /etc/os-release then @@ -77,8 +76,15 @@ yum versionlock add $package_list yum versionlock list echo 'export PATH=$PATH:/usr/lpp/mmfs/bin' >> /root/.bashrc +sed -i -e "s/^/no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo \'Please login as the user \\\\\"$USER\\\\\" rather than the user \\\\\"root\\\\\".\';echo;sleep 10; exit 142\" /" ~/.ssh/authorized_keys +echo "${bastion_public_key_content}" >> /~/.ssh/authorized_keys +echo "${client_public_key_content}" >> ~/.ssh/authorized_keys +echo "StrictHostKeyChecking no" >> ~/.ssh/config +echo "${client_private_key_content}" > ~/.ssh/id_rsa +chmod 600 ~/.ssh/id_rsa + echo "DOMAIN=${client_dns_domain}" >> "/etc/sysconfig/network-scripts/ifcfg-${client_interfaces}" echo "MTU=9000" >> "/etc/sysconfig/network-scripts/ifcfg-${client_interfaces}" chage -I -1 -m 0 -M 99999 -E -1 -W 14 vpcuser +sleep 120 systemctl restart NetworkManager -hostnamectl set-hostname "$(hostname).${client_dns_domain}" diff --git a/modules/landing_zone_vsi/templates/gklm_user_data.tpl b/modules/landing_zone_vsi/templates/gklm_user_data.tpl index 124288d3..cb14c0eb 100644 --- a/modules/landing_zone_vsi/templates/gklm_user_data.tpl +++ b/modules/landing_zone_vsi/templates/gklm_user_data.tpl @@ -1,21 +1,17 @@ -#!/bin/bash - ################################################### # Copyright (C) IBM Corp. 2023 All Rights Reserved. # Licensed under the Apache License v2.0 ################################################### #!/bin/bash -echo "0 $(hostname).${domain_name} 0" > /home/klmdb42/sqllib/db2nodes.cfg +echo "0 $(hostname) 0" > /home/klmdb42/sqllib/db2nodes.cfg systemctl start db2c_klmdb42.service sleep 10 systemctl status db2c_klmdb42.service sleep 10 #Copying SSH for passwordless authentication -echo "${bastion_public_key_content}" >> ~/.ssh/authorized_keys -echo "${storage_public_key_content}" >> ~/.ssh/authorized_keys -echo "StrictHostKeyChecking no" >> ~/.ssh/config echo "${storage_private_key_content}" > ~/.ssh/id_rsa chmod 600 ~/.ssh/id_rsa -hostnamectl set-hostname "$(hostname).${domain_name}" +echo "${bastion_public_key_content}" >> ~/.ssh/authorized_keys +echo "StrictHostKeyChecking no" >> ~/.ssh/config reboot diff --git a/modules/landing_zone_vsi/templates/ldap_user_data.tpl b/modules/landing_zone_vsi/templates/ldap_user_data.tpl index 4e9cd337..1ffc145f 100644 --- a/modules/landing_zone_vsi/templates/ldap_user_data.tpl +++ b/modules/landing_zone_vsi/templates/ldap_user_data.tpl @@ -18,8 +18,7 @@ sed -i -e "s/^/no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command= #input parameters # input parameters echo "${bastion_public_key_content}" >> ~/.ssh/authorized_keys -echo "${cluster_public_key_content}" >> ~/.ssh/authorized_keys +echo "${compute_public_key_content}" >> ~/.ssh/authorized_keys echo "StrictHostKeyChecking no" >> ~/.ssh/config -echo "${cluster_private_key_content}" > ~/.ssh/id_rsa -hostnamectl set-hostname "$(hostname).${cluster_dns_domain}" +echo "${compute_private_key_content}" > ~/.ssh/id_rsa chmod 600 ~/.ssh/id_rsa diff --git a/modules/landing_zone_vsi/templates/protocol_bm_user_data.tpl b/modules/landing_zone_vsi/templates/protocol_bm_user_data.tpl deleted file mode 100644 index 18ad5558..00000000 --- a/modules/landing_zone_vsi/templates/protocol_bm_user_data.tpl +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -################################################### -# Copyright (C) IBM Corp. 2023 All Rights Reserved. -# Licensed under the Apache License v2.0 -################################################### - -# Setup logging -exec > >(tee /var/log/ibm_spectrumscale_user-data.log) -exec 2>&1 -set -e - -# Configure SSH -mkdir -p ~/.ssh -chmod 700 ~/.ssh -echo "${storage_public_key_content}" >> ~/.ssh/authorized_keys -echo "${bastion_public_key_content}" >> ~/.ssh/authorized_keys -echo "StrictHostKeyChecking no" >> ~/.ssh/config -echo "${storage_private_key_content}" > ~/.ssh/id_rsa -chmod 600 ~/.ssh/id_rsa ~/.ssh/authorized_keys -ethtool -L eth0 combined 16 - -# Banner configuration -echo "###########################################################################################" >> /etc/motd -echo "# You have logged in to Protocol BareMetal Server. #" >> /etc/motd -echo "# #" >> /etc/motd -echo "# Refer: https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-storage #" >> /etc/motd -echo "###########################################################################################" >> /etc/motd - -# Create completion marker -touch /var/user_data_complete -echo "User data script completed successfully at $(date)" diff --git a/modules/landing_zone_vsi/templates/protocol_bootdrive_user_data/cloud_init.yml b/modules/landing_zone_vsi/templates/protocol_bootdrive_user_data/cloud_init.yml deleted file mode 100644 index e6a44322..00000000 --- a/modules/landing_zone_vsi/templates/protocol_bootdrive_user_data/cloud_init.yml +++ /dev/null @@ -1,123 +0,0 @@ -#cloud-config -growpart: - mode: off - devices: ['/'] -resize_rootfs: false -write_files: - - content: | - #!/usr/bin/env bash - # Setup logging - exec > >(tee /var/log/ibm_spectrumscale_user-data.log) - exec 2>&1 - set -e - - # Configure SSH - mkdir -p ~/.ssh - chmod 700 ~/.ssh - echo "${storage_public_key_content}" | base64 --decode >> /root/.ssh/authorized_keys - echo "${bastion_public_key_content}" | base64 --decode >> /root/.ssh/authorized_keys - echo "StrictHostKeyChecking no" >> ~/.ssh/config - echo "${storage_private_key_content}" | base64 --decode >> ~/.ssh/id_rsa - chmod 600 ~/.ssh/id_rsa ~/.ssh/authorized_keys - ethtool -L eth0 combined 16 - - # Banner configuration - echo "###########################################################################################" >> /etc/motd - echo "# You have logged in to Storage BareMetal Server. #" >> /etc/motd - echo "# #" >> /etc/motd - echo "# Refer: https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-storage #" >> /etc/motd - echo "###########################################################################################" >> /etc/motd - USER=vpcuser - PACKAGE_MGR=dnf - package_list="python38 kernel-devel-$(uname -r) kernel-headers-$(uname -r) firewalld numactl jq make gcc-c++ elfutils-libelf-devel bind-utils iptables nfs-utils elfutils elfutils-devel python3-dnf-plugin-versionlock cryptsetup clevis clevis-luks clevis-dracut tpm2-tools" - $PACKAGE_MGR install -y $package_list - yum update --security -y - yum versionlock add $package_list - yum versionlock list - echo 'export PATH=$PATH:/usr/lpp/mmfs/bin' >> /root/.bashrc - # Create completion marker - touch /var/user_data_complete - echo "User data script completed successfully at $(date)" - path: /usr/local/bin/scale_user_data.sh - permissions: '0755' - - content: | - #!/bin/bash - # This script encrypts the root partition of a Redhat 8/9 stock IBM Cloud - # image using the TPM to encrypt the LUKS keys. It assumes there is plenty - # of unpartition space on the drive, and leaves the current root partition - # for rescue boot (but this could be deleted on a subsequent boot). - # - # * Create a new partition on the drive using all free space - # * Encrypt the new partition using LUKS with a known passphrase - # * Use 'clevis' to create an additional LUKS passphrase that is bound to the TPM - # * Re-generate initramfs via dracut to ensure the root drive is auto-unlocked on boot - # * Copy the current root filesystem to the new drive - # * Update fstab and crypttab for auto-mounting - # * Update grub to boot using the newly encrypted root drive - # - echo "Encrypt my boot drive" - # Determine the boot device (minus partition name) - # Assumes 'sdaX' or 'nvmeXnYpZ' - device=$(mount | grep "on / type" | awk '{print $1}') - if [[ "$device" =~ "nvme" ]]; then - device=$${device%??} - else - device=$${device%?} - fi - echo $device - # Create a root partition filling up the rest of the drive - echo -e 'n\np\n\n\n\nw' | fdisk $${device} - partition=$(fdisk -l $device | grep $device | tail -1 | awk '{print $1}') - echo $partition - # Setup encryption on the drive with a well known passphrase, and format the filesystem - echo -n n0tsecret | cryptsetup luksFormat --type luks2 -q --force-password $partition - echo -n n0tsecret | cryptsetup open $partition root - mkfs.xfs /dev/mapper/root - # Add the TPM key to the LUKS encrypted drive. - # For additional security, you can bind it to specific TPM PCR banks, but this will cause the TPM unlock - # to fail when the bank changes (EG firmware is updated). If you want to bind it to a PCR: - # ,"pcr_bank":"sha256","pcr_ids":"7" - echo -n n0tsecret | clevis luks bind -y -k - -d $partition tpm2 '{"hash":"sha256","key":"rsa"}' - # Regenerate dracut initramfs to allow unlock on boot - dracut -fv --regenerate-all - # Copy the OS into the encrypted partition - mkdir /mnt/encryptedroot - mount /dev/mapper/root /mnt/encryptedroot - rsync -a --exclude='/proc/*' --exclude='/sys/*' --exclude='/boot' --exclude='/mnt/encryptedroot' / /mnt/encryptedroot - # Grab the UUID for the encrypted partition and setup the crypttab - uuid=$(lsblk -lfi -o NAME,FSTYPE,UUID | grep crypto_LUKS | awk '{print $3}') - echo "root UUID=$${uuid} none luks" > /mnt/encryptedroot/etc/crypttab - # Replace root with '/dev/mapper/root / xfs defaults 0 1' in fstab - sed -i "/\t\/\t/c/dev/mapper/root\t/\txfs\tdefaults\t0\t1" /mnt/encryptedroot/etc/fstab - # Setup grub - # Grab default cmdline args - args=$(grep CMDLINE_LINUX /etc/default/grub | sed 's/.*GRUB_CMDLINE_LINUX=//' | sed 's/\"//g') - # Update grub and set the new entry to be the default. - grubby --add-kernel="/boot/vmlinuz-$(uname -r)" \ - --title="Boot from encrypted root" \ - --initrd="/boot/initramfs-$(uname -r).img" \ - --args "$${args} root=/dev/mapper/root rd.luks.name=$${uuid}=root" \ - --make-default - # Since we use EFI, copy the grubenv over (note the \cp is not a typo, - # it ensures that the 'cp' alias isn't used.) - efidir=$(ls /boot/efi/EFI/ | grep -v BOOT) - \cp -f /boot/grub2/grubenv /boot/efi/EFI/$${efidir}/ - # We MUST have a separate /boot partition to host the kernel and initramfs unencrypted - # as these are needed to unlock the root drive. The IBM CLoud RHEL 9.x images have - # a separate boot partition, but 8.x do not. - # If we dont have a separate /boot partition, we'll use the current root partition - # as /boot. So copy the current /boot content into the root of the filesystem. - if ! lsblk -l | grep /boot$; then - rsync -a --exclude='/efi*' /boot/ / - # Current root device UUID - it will become boot device uuid - curr_root_uuid=$(lsblk -fl | grep /$ | awk '{print $4}') - # Add the new /boot partition to fstab for auto-mounting. - echo -e "UUID=$${curr_root_uuid}\t/boot\txfs\tdefaults\t0\t0" >> /mnt/encryptedroot/etc/fstab - fi - # Reboot the system - shutdown -r now - path: /usr/local/bin/boot_drive_encryption.sh - permissions: '0755' -runcmd: - - /usr/local/bin/scale_user_data.sh - - /usr/local/bin/boot_drive_encryption.sh diff --git a/modules/landing_zone_vsi/templates/protocol_user_data.tpl b/modules/landing_zone_vsi/templates/protocol_user_data.tpl index 952600a2..4fafedc1 100644 --- a/modules/landing_zone_vsi/templates/protocol_user_data.tpl +++ b/modules/landing_zone_vsi/templates/protocol_user_data.tpl @@ -8,6 +8,13 @@ #!/usr/bin/env bash exec > >(tee /var/log/ibm_spectrumscale_user-data.log) +if grep -E -q "CentOS|Red Hat" /etc/os-release +then + USER=vpcuser +elif grep -q "Ubuntu" /etc/os-release +then + USER=ubuntu +fi sed -i -e "s/^/no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo \'Please client as the user \\\\\"$USER\\\\\" rather than the user \\\\\"root\\\\\".\';echo;sleep 5; exit 142\" /" /root/.ssh/authorized_keys # input parameters @@ -80,8 +87,8 @@ echo 'export PATH=$PATH:/usr/lpp/mmfs/bin' >> /root/.bashrc echo "DOMAIN=${storage_dns_domain}" >> "/etc/sysconfig/network-scripts/ifcfg-${storage_interfaces}" echo "MTU=9000" >> "/etc/sysconfig/network-scripts/ifcfg-${storage_interfaces}" chage -I -1 -m 0 -M 99999 -E -1 -W 14 vpcuser +sleep 120 systemctl restart NetworkManager -hostnamectl set-hostname "$(hostname).${storage_dns_domain}" systemctl stop firewalld firewall-offline-cmd --zone=public --add-port=1191/tcp diff --git a/modules/landing_zone_vsi/templates/scale_compute_user_data.tpl b/modules/landing_zone_vsi/templates/scale_compute_user_data.tpl index 27cd26ac..605ea6f8 100644 --- a/modules/landing_zone_vsi/templates/scale_compute_user_data.tpl +++ b/modules/landing_zone_vsi/templates/scale_compute_user_data.tpl @@ -1,17 +1,25 @@ -#!/usr/bin/bash - ################################################### # Copyright (C) IBM Corp. 2023 All Rights Reserved. # Licensed under the Apache License v2.0 ################################################### +################################################################################################################## +# Scale Compute Cluter User Data +################################################################################################################## + #!/usr/bin/env bash exec > >(tee /var/log/ibm_spectrumscale_user-data.log) -sed -i -e "s/^/no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo \'Please client as the user \\\\\"$USER\\\\\" rather than the user \\\\\"root\\\\\".\';echo;sleep 5; exit 142\" /" /root/.ssh/authorized_keys +if grep -E -q "CentOS|Red Hat" /etc/os-release +then + USER=vpcuser +elif grep -q "Ubuntu" /etc/os-release +then + USER=ubuntu +fi -# input parameters +sed -i -e "s/^/no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo \'Please login as the user \\\\\"$USER\\\\\" rather than the user \\\\\"root\\\\\".\';echo;sleep 10; exit 142\" /" ~/.ssh/authorized_keys echo "${bastion_public_key_content}" >> ~/.ssh/authorized_keys echo "${compute_public_key_content}" >> ~/.ssh/authorized_keys echo "StrictHostKeyChecking no" >> ~/.ssh/config @@ -80,8 +88,8 @@ echo 'export PATH=$PATH:/usr/lpp/mmfs/bin' >> /root/.bashrc echo "DOMAIN=${compute_dns_domain}" >> "/etc/sysconfig/network-scripts/ifcfg-${compute_interfaces}" echo "MTU=9000" >> "/etc/sysconfig/network-scripts/ifcfg-${compute_interfaces}" chage -I -1 -m 0 -M 99999 -E -1 -W 14 vpcuser +sleep 120 systemctl restart NetworkManager -hostnamectl set-hostname "$(hostname).${compute_dns_domain}" systemctl stop firewalld firewall-offline-cmd --zone=public --add-port=1191/tcp @@ -101,12 +109,3 @@ firewall-offline-cmd --zone=public --add-service=https systemctl start firewalld systemctl enable firewalld - -if [ "${enable_sec_interface_compute}" == true ]; then - sec_interface=$(nmcli -t con show --active | grep eth1 | cut -d ':' -f 1) - nmcli conn del "$sec_interface" - nmcli con add type ethernet con-name eth1 ifname eth1 - echo "DOMAIN=\"${storage_dns_domain}\"" >> "/etc/sysconfig/network-scripts/ifcfg-${protocol_interfaces}" - echo "MTU=9000" >> "/etc/sysconfig/network-scripts/ifcfg-${protocol_interfaces}" - systemctl restart NetworkManager -fi diff --git a/modules/landing_zone_vsi/templates/storage_bm_user_data.tpl b/modules/landing_zone_vsi/templates/storage_bm_user_data.tpl deleted file mode 100644 index bfdad10a..00000000 --- a/modules/landing_zone_vsi/templates/storage_bm_user_data.tpl +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -################################################### -# Copyright (C) IBM Corp. 2023 All Rights Reserved. -# Licensed under the Apache License v2.0 -################################################### - -# Setup logging -exec > >(tee /var/log/ibm_spectrumscale_user-data.log) -exec 2>&1 -set -e - -# Configure SSH -mkdir -p ~/.ssh -chmod 700 ~/.ssh -echo "${storage_public_key_content}" >> ~/.ssh/authorized_keys -echo "${bastion_public_key_content}" >> ~/.ssh/authorized_keys -echo "StrictHostKeyChecking no" >> ~/.ssh/config -echo "${storage_private_key_content}" > ~/.ssh/id_rsa -chmod 600 ~/.ssh/id_rsa ~/.ssh/authorized_keys -ethtool -L eth0 combined 16 - -# Banner configuration -echo "###########################################################################################" >> /etc/motd -echo "# You have logged in to Storage BareMetal Server. #" >> /etc/motd -echo "# #" >> /etc/motd -echo "# Refer: https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-storage #" >> /etc/motd -echo "###########################################################################################" >> /etc/motd - -# Create completion marker -touch /var/user_data_complete -echo "User data script completed successfully at $(date)" diff --git a/modules/landing_zone_vsi/templates/storage_bmtb_user_data.tpl b/modules/landing_zone_vsi/templates/storage_bmtb_user_data.tpl deleted file mode 100644 index bfdad10a..00000000 --- a/modules/landing_zone_vsi/templates/storage_bmtb_user_data.tpl +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -################################################### -# Copyright (C) IBM Corp. 2023 All Rights Reserved. -# Licensed under the Apache License v2.0 -################################################### - -# Setup logging -exec > >(tee /var/log/ibm_spectrumscale_user-data.log) -exec 2>&1 -set -e - -# Configure SSH -mkdir -p ~/.ssh -chmod 700 ~/.ssh -echo "${storage_public_key_content}" >> ~/.ssh/authorized_keys -echo "${bastion_public_key_content}" >> ~/.ssh/authorized_keys -echo "StrictHostKeyChecking no" >> ~/.ssh/config -echo "${storage_private_key_content}" > ~/.ssh/id_rsa -chmod 600 ~/.ssh/id_rsa ~/.ssh/authorized_keys -ethtool -L eth0 combined 16 - -# Banner configuration -echo "###########################################################################################" >> /etc/motd -echo "# You have logged in to Storage BareMetal Server. #" >> /etc/motd -echo "# #" >> /etc/motd -echo "# Refer: https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-storage #" >> /etc/motd -echo "###########################################################################################" >> /etc/motd - -# Create completion marker -touch /var/user_data_complete -echo "User data script completed successfully at $(date)" diff --git a/modules/landing_zone_vsi/templates/storage_bootdrive_user_data/cloud_init.yml b/modules/landing_zone_vsi/templates/storage_bootdrive_user_data/cloud_init.yml deleted file mode 100644 index e6a44322..00000000 --- a/modules/landing_zone_vsi/templates/storage_bootdrive_user_data/cloud_init.yml +++ /dev/null @@ -1,123 +0,0 @@ -#cloud-config -growpart: - mode: off - devices: ['/'] -resize_rootfs: false -write_files: - - content: | - #!/usr/bin/env bash - # Setup logging - exec > >(tee /var/log/ibm_spectrumscale_user-data.log) - exec 2>&1 - set -e - - # Configure SSH - mkdir -p ~/.ssh - chmod 700 ~/.ssh - echo "${storage_public_key_content}" | base64 --decode >> /root/.ssh/authorized_keys - echo "${bastion_public_key_content}" | base64 --decode >> /root/.ssh/authorized_keys - echo "StrictHostKeyChecking no" >> ~/.ssh/config - echo "${storage_private_key_content}" | base64 --decode >> ~/.ssh/id_rsa - chmod 600 ~/.ssh/id_rsa ~/.ssh/authorized_keys - ethtool -L eth0 combined 16 - - # Banner configuration - echo "###########################################################################################" >> /etc/motd - echo "# You have logged in to Storage BareMetal Server. #" >> /etc/motd - echo "# #" >> /etc/motd - echo "# Refer: https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-storage #" >> /etc/motd - echo "###########################################################################################" >> /etc/motd - USER=vpcuser - PACKAGE_MGR=dnf - package_list="python38 kernel-devel-$(uname -r) kernel-headers-$(uname -r) firewalld numactl jq make gcc-c++ elfutils-libelf-devel bind-utils iptables nfs-utils elfutils elfutils-devel python3-dnf-plugin-versionlock cryptsetup clevis clevis-luks clevis-dracut tpm2-tools" - $PACKAGE_MGR install -y $package_list - yum update --security -y - yum versionlock add $package_list - yum versionlock list - echo 'export PATH=$PATH:/usr/lpp/mmfs/bin' >> /root/.bashrc - # Create completion marker - touch /var/user_data_complete - echo "User data script completed successfully at $(date)" - path: /usr/local/bin/scale_user_data.sh - permissions: '0755' - - content: | - #!/bin/bash - # This script encrypts the root partition of a Redhat 8/9 stock IBM Cloud - # image using the TPM to encrypt the LUKS keys. It assumes there is plenty - # of unpartition space on the drive, and leaves the current root partition - # for rescue boot (but this could be deleted on a subsequent boot). - # - # * Create a new partition on the drive using all free space - # * Encrypt the new partition using LUKS with a known passphrase - # * Use 'clevis' to create an additional LUKS passphrase that is bound to the TPM - # * Re-generate initramfs via dracut to ensure the root drive is auto-unlocked on boot - # * Copy the current root filesystem to the new drive - # * Update fstab and crypttab for auto-mounting - # * Update grub to boot using the newly encrypted root drive - # - echo "Encrypt my boot drive" - # Determine the boot device (minus partition name) - # Assumes 'sdaX' or 'nvmeXnYpZ' - device=$(mount | grep "on / type" | awk '{print $1}') - if [[ "$device" =~ "nvme" ]]; then - device=$${device%??} - else - device=$${device%?} - fi - echo $device - # Create a root partition filling up the rest of the drive - echo -e 'n\np\n\n\n\nw' | fdisk $${device} - partition=$(fdisk -l $device | grep $device | tail -1 | awk '{print $1}') - echo $partition - # Setup encryption on the drive with a well known passphrase, and format the filesystem - echo -n n0tsecret | cryptsetup luksFormat --type luks2 -q --force-password $partition - echo -n n0tsecret | cryptsetup open $partition root - mkfs.xfs /dev/mapper/root - # Add the TPM key to the LUKS encrypted drive. - # For additional security, you can bind it to specific TPM PCR banks, but this will cause the TPM unlock - # to fail when the bank changes (EG firmware is updated). If you want to bind it to a PCR: - # ,"pcr_bank":"sha256","pcr_ids":"7" - echo -n n0tsecret | clevis luks bind -y -k - -d $partition tpm2 '{"hash":"sha256","key":"rsa"}' - # Regenerate dracut initramfs to allow unlock on boot - dracut -fv --regenerate-all - # Copy the OS into the encrypted partition - mkdir /mnt/encryptedroot - mount /dev/mapper/root /mnt/encryptedroot - rsync -a --exclude='/proc/*' --exclude='/sys/*' --exclude='/boot' --exclude='/mnt/encryptedroot' / /mnt/encryptedroot - # Grab the UUID for the encrypted partition and setup the crypttab - uuid=$(lsblk -lfi -o NAME,FSTYPE,UUID | grep crypto_LUKS | awk '{print $3}') - echo "root UUID=$${uuid} none luks" > /mnt/encryptedroot/etc/crypttab - # Replace root with '/dev/mapper/root / xfs defaults 0 1' in fstab - sed -i "/\t\/\t/c/dev/mapper/root\t/\txfs\tdefaults\t0\t1" /mnt/encryptedroot/etc/fstab - # Setup grub - # Grab default cmdline args - args=$(grep CMDLINE_LINUX /etc/default/grub | sed 's/.*GRUB_CMDLINE_LINUX=//' | sed 's/\"//g') - # Update grub and set the new entry to be the default. - grubby --add-kernel="/boot/vmlinuz-$(uname -r)" \ - --title="Boot from encrypted root" \ - --initrd="/boot/initramfs-$(uname -r).img" \ - --args "$${args} root=/dev/mapper/root rd.luks.name=$${uuid}=root" \ - --make-default - # Since we use EFI, copy the grubenv over (note the \cp is not a typo, - # it ensures that the 'cp' alias isn't used.) - efidir=$(ls /boot/efi/EFI/ | grep -v BOOT) - \cp -f /boot/grub2/grubenv /boot/efi/EFI/$${efidir}/ - # We MUST have a separate /boot partition to host the kernel and initramfs unencrypted - # as these are needed to unlock the root drive. The IBM CLoud RHEL 9.x images have - # a separate boot partition, but 8.x do not. - # If we dont have a separate /boot partition, we'll use the current root partition - # as /boot. So copy the current /boot content into the root of the filesystem. - if ! lsblk -l | grep /boot$; then - rsync -a --exclude='/efi*' /boot/ / - # Current root device UUID - it will become boot device uuid - curr_root_uuid=$(lsblk -fl | grep /$ | awk '{print $4}') - # Add the new /boot partition to fstab for auto-mounting. - echo -e "UUID=$${curr_root_uuid}\t/boot\txfs\tdefaults\t0\t0" >> /mnt/encryptedroot/etc/fstab - fi - # Reboot the system - shutdown -r now - path: /usr/local/bin/boot_drive_encryption.sh - permissions: '0755' -runcmd: - - /usr/local/bin/scale_user_data.sh - - /usr/local/bin/boot_drive_encryption.sh diff --git a/modules/landing_zone_vsi/templates/storage_tb_bootdrive_user_data/cloud_init.yml b/modules/landing_zone_vsi/templates/storage_tb_bootdrive_user_data/cloud_init.yml deleted file mode 100644 index e6a44322..00000000 --- a/modules/landing_zone_vsi/templates/storage_tb_bootdrive_user_data/cloud_init.yml +++ /dev/null @@ -1,123 +0,0 @@ -#cloud-config -growpart: - mode: off - devices: ['/'] -resize_rootfs: false -write_files: - - content: | - #!/usr/bin/env bash - # Setup logging - exec > >(tee /var/log/ibm_spectrumscale_user-data.log) - exec 2>&1 - set -e - - # Configure SSH - mkdir -p ~/.ssh - chmod 700 ~/.ssh - echo "${storage_public_key_content}" | base64 --decode >> /root/.ssh/authorized_keys - echo "${bastion_public_key_content}" | base64 --decode >> /root/.ssh/authorized_keys - echo "StrictHostKeyChecking no" >> ~/.ssh/config - echo "${storage_private_key_content}" | base64 --decode >> ~/.ssh/id_rsa - chmod 600 ~/.ssh/id_rsa ~/.ssh/authorized_keys - ethtool -L eth0 combined 16 - - # Banner configuration - echo "###########################################################################################" >> /etc/motd - echo "# You have logged in to Storage BareMetal Server. #" >> /etc/motd - echo "# #" >> /etc/motd - echo "# Refer: https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-storage #" >> /etc/motd - echo "###########################################################################################" >> /etc/motd - USER=vpcuser - PACKAGE_MGR=dnf - package_list="python38 kernel-devel-$(uname -r) kernel-headers-$(uname -r) firewalld numactl jq make gcc-c++ elfutils-libelf-devel bind-utils iptables nfs-utils elfutils elfutils-devel python3-dnf-plugin-versionlock cryptsetup clevis clevis-luks clevis-dracut tpm2-tools" - $PACKAGE_MGR install -y $package_list - yum update --security -y - yum versionlock add $package_list - yum versionlock list - echo 'export PATH=$PATH:/usr/lpp/mmfs/bin' >> /root/.bashrc - # Create completion marker - touch /var/user_data_complete - echo "User data script completed successfully at $(date)" - path: /usr/local/bin/scale_user_data.sh - permissions: '0755' - - content: | - #!/bin/bash - # This script encrypts the root partition of a Redhat 8/9 stock IBM Cloud - # image using the TPM to encrypt the LUKS keys. It assumes there is plenty - # of unpartition space on the drive, and leaves the current root partition - # for rescue boot (but this could be deleted on a subsequent boot). - # - # * Create a new partition on the drive using all free space - # * Encrypt the new partition using LUKS with a known passphrase - # * Use 'clevis' to create an additional LUKS passphrase that is bound to the TPM - # * Re-generate initramfs via dracut to ensure the root drive is auto-unlocked on boot - # * Copy the current root filesystem to the new drive - # * Update fstab and crypttab for auto-mounting - # * Update grub to boot using the newly encrypted root drive - # - echo "Encrypt my boot drive" - # Determine the boot device (minus partition name) - # Assumes 'sdaX' or 'nvmeXnYpZ' - device=$(mount | grep "on / type" | awk '{print $1}') - if [[ "$device" =~ "nvme" ]]; then - device=$${device%??} - else - device=$${device%?} - fi - echo $device - # Create a root partition filling up the rest of the drive - echo -e 'n\np\n\n\n\nw' | fdisk $${device} - partition=$(fdisk -l $device | grep $device | tail -1 | awk '{print $1}') - echo $partition - # Setup encryption on the drive with a well known passphrase, and format the filesystem - echo -n n0tsecret | cryptsetup luksFormat --type luks2 -q --force-password $partition - echo -n n0tsecret | cryptsetup open $partition root - mkfs.xfs /dev/mapper/root - # Add the TPM key to the LUKS encrypted drive. - # For additional security, you can bind it to specific TPM PCR banks, but this will cause the TPM unlock - # to fail when the bank changes (EG firmware is updated). If you want to bind it to a PCR: - # ,"pcr_bank":"sha256","pcr_ids":"7" - echo -n n0tsecret | clevis luks bind -y -k - -d $partition tpm2 '{"hash":"sha256","key":"rsa"}' - # Regenerate dracut initramfs to allow unlock on boot - dracut -fv --regenerate-all - # Copy the OS into the encrypted partition - mkdir /mnt/encryptedroot - mount /dev/mapper/root /mnt/encryptedroot - rsync -a --exclude='/proc/*' --exclude='/sys/*' --exclude='/boot' --exclude='/mnt/encryptedroot' / /mnt/encryptedroot - # Grab the UUID for the encrypted partition and setup the crypttab - uuid=$(lsblk -lfi -o NAME,FSTYPE,UUID | grep crypto_LUKS | awk '{print $3}') - echo "root UUID=$${uuid} none luks" > /mnt/encryptedroot/etc/crypttab - # Replace root with '/dev/mapper/root / xfs defaults 0 1' in fstab - sed -i "/\t\/\t/c/dev/mapper/root\t/\txfs\tdefaults\t0\t1" /mnt/encryptedroot/etc/fstab - # Setup grub - # Grab default cmdline args - args=$(grep CMDLINE_LINUX /etc/default/grub | sed 's/.*GRUB_CMDLINE_LINUX=//' | sed 's/\"//g') - # Update grub and set the new entry to be the default. - grubby --add-kernel="/boot/vmlinuz-$(uname -r)" \ - --title="Boot from encrypted root" \ - --initrd="/boot/initramfs-$(uname -r).img" \ - --args "$${args} root=/dev/mapper/root rd.luks.name=$${uuid}=root" \ - --make-default - # Since we use EFI, copy the grubenv over (note the \cp is not a typo, - # it ensures that the 'cp' alias isn't used.) - efidir=$(ls /boot/efi/EFI/ | grep -v BOOT) - \cp -f /boot/grub2/grubenv /boot/efi/EFI/$${efidir}/ - # We MUST have a separate /boot partition to host the kernel and initramfs unencrypted - # as these are needed to unlock the root drive. The IBM CLoud RHEL 9.x images have - # a separate boot partition, but 8.x do not. - # If we dont have a separate /boot partition, we'll use the current root partition - # as /boot. So copy the current /boot content into the root of the filesystem. - if ! lsblk -l | grep /boot$; then - rsync -a --exclude='/efi*' /boot/ / - # Current root device UUID - it will become boot device uuid - curr_root_uuid=$(lsblk -fl | grep /$ | awk '{print $4}') - # Add the new /boot partition to fstab for auto-mounting. - echo -e "UUID=$${curr_root_uuid}\t/boot\txfs\tdefaults\t0\t0" >> /mnt/encryptedroot/etc/fstab - fi - # Reboot the system - shutdown -r now - path: /usr/local/bin/boot_drive_encryption.sh - permissions: '0755' -runcmd: - - /usr/local/bin/scale_user_data.sh - - /usr/local/bin/boot_drive_encryption.sh diff --git a/modules/landing_zone_vsi/templates/storage_user_data.tpl b/modules/landing_zone_vsi/templates/storage_user_data.tpl index 8dfcd250..3b33285e 100644 --- a/modules/landing_zone_vsi/templates/storage_user_data.tpl +++ b/modules/landing_zone_vsi/templates/storage_user_data.tpl @@ -8,6 +8,13 @@ #!/usr/bin/env bash exec > >(tee /var/log/ibm_spectrumscale_user-data.log) +if grep -E -q "CentOS|Red Hat" /etc/os-release +then + USER=vpcuser +elif grep -q "Ubuntu" /etc/os-release +then + USER=ubuntu +fi sed -i -e "s/^/no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo \'Please client as the user \\\\\"$USER\\\\\" rather than the user \\\\\"root\\\\\".\';echo;sleep 5; exit 142\" /" /root/.ssh/authorized_keys # input parameters @@ -17,7 +24,8 @@ echo "StrictHostKeyChecking no" >> ~/.ssh/config echo "${storage_private_key_content}" > ~/.ssh/id_rsa chmod 600 ~/.ssh/id_rsa -if grep -q "Red Hat" /etc/os-release +# if grep -q "Red Hat" /etc/os-release +if grep -q "CentOS|Red Hat" /etc/os-release then USER=vpcuser REQ_PKG_INSTALLED=0 @@ -92,8 +100,8 @@ fi echo "DOMAIN=${storage_dns_domain}" >> "/etc/sysconfig/network-scripts/ifcfg-${storage_interfaces}" echo "MTU=9000" >> "/etc/sysconfig/network-scripts/ifcfg-${storage_interfaces}" chage -I -1 -m 0 -M 99999 -E -1 -W 14 vpcuser +sleep 120 systemctl restart NetworkManager -hostnamectl set-hostname "$(hostname).${storage_dns_domain}" systemctl stop firewalld firewall-offline-cmd --zone=public --add-port=1191/tcp @@ -114,21 +122,12 @@ firewall-offline-cmd --zone=public --add-port=30000-61000/udp systemctl start firewalld systemctl enable firewalld -if [ "${enable_sec_interface_storage}" == true ]; then - sec_interface=$(nmcli -t con show --active | grep eth1 | cut -d ':' -f 1) - nmcli conn del "$sec_interface" - nmcli con add type ethernet con-name eth1 ifname eth1 - echo "DOMAIN=\"${storage_dns_domain}\"" >> "/etc/sysconfig/network-scripts/ifcfg-${protocol_interfaces}" - echo "MTU=9000" >> "/etc/sysconfig/network-scripts/ifcfg-${protocol_interfaces}" - systemctl restart NetworkManager -fi - if [ "${enable_protocol}" == true ]; then sec_interface=$(nmcli -t con show --active | grep eth1 | cut -d ':' -f 1) nmcli conn del "$sec_interface" nmcli con add type ethernet con-name eth1 ifname eth1 - echo "DOMAIN=${protocol_dns_domain}" >> "/etc/sysconfig/network-scripts/ifcfg-${protocol_interfaces}" - echo "MTU=9000" >> "/etc/sysconfig/network-scripts/ifcfg-${protocol_interfaces}" + echo "DOMAIN=\"${protocol_dns_domain}\"" >> "/etc/sysconfig/network-scripts/ifcfg-eth1" + echo "MTU=9000" >> "/etc/sysconfig/network-scripts/ifcfg-eth1" systemctl restart NetworkManager ###### TODO: Fix Me ###### echo 'export IC_REGION=${vpc_region}' >> /root/.bashrc diff --git a/modules/landing_zone_vsi/variables.tf b/modules/landing_zone_vsi/variables.tf index a2a52378..b22f7e97 100644 --- a/modules/landing_zone_vsi/variables.tf +++ b/modules/landing_zone_vsi/variables.tf @@ -111,7 +111,7 @@ variable "client_instances" { description = "Number of instances to be launched for client." } -variable "compute_subnet_id" { +variable "cluster_subnet_id" { type = list(object({ name = string id = string @@ -215,8 +215,8 @@ variable "storage_instances" { }) ) default = [{ - profile = "bx2d-32x128" - count = 0 + profile = "bx2-2x8" + count = 2 image = "ibm-redhat-8-10-minimal-amd64-4" filesystem_name = "fs1" }] @@ -229,7 +229,7 @@ variable "storage_servers" { profile = string count = number image = string - filesystem = optional(string) + filesystem = string }) ) default = [{ @@ -241,17 +241,6 @@ variable "storage_servers" { description = "Number of BareMetal Servers to be launched for storage cluster." } -variable "tie_breaker_bm_server_profile" { - type = string - default = null - description = "Specify the bare metal server profile type name to be used for creating the bare metal Tie breaker node. If no value is provided, the storage bare metal server profile will be used as the default. For more information, see [bare metal server profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-profile&interface=ui). [Tie Breaker Node](https://www.ibm.com/docs/en/storage-scale/5.2.2?topic=quorum-node-tiebreaker-disks)" -} - -variable "scale_management_vsi_profile" { - type = string - description = "The virtual server instance profile type name to be used to create the Management node. For more information, see [Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)." -} - variable "protocol_subnets" { type = list(object({ name = string @@ -268,11 +257,13 @@ variable "protocol_instances" { object({ profile = string count = number + image = string }) ) default = [{ profile = "bx2-2x8" count = 2 + image = "ibm-redhat-8-10-minimal-amd64-4" }] description = "Number of instances to be launched for protocol hosts." } @@ -334,6 +325,12 @@ variable "boot_volume_encryption_key" { description = "CRN of boot volume encryption key" } +variable "existing_kms_instance_guid" { + type = string + default = null + description = "The existing KMS instance guid." +} + ############################################################################## # TODO: Auth Server (LDAP/AD) Variables ############################################################################## @@ -373,11 +370,11 @@ variable "ldap_server" { description = "Provide the IP address for the existing LDAP server. If no address is given, a new LDAP server will be created." } -# variable "ldap_instance_key_pair" { -# type = list(string) -# default = null -# description = "Name of the SSH key configured in your IBM Cloud account that is used to establish a connection to the LDAP Server. Make sure that the SSH key is present in the same resource group and region where the LDAP Servers are provisioned. If you do not have an SSH key in your IBM Cloud account, create one by using the [SSH keys](https://cloud.ibm.com/docs/vpc?topic=vpc-ssh-keys) instructions." -# } +variable "ldap_instance_key_pair" { + type = list(string) + default = null + description = "Name of the SSH key configured in your IBM Cloud account that is used to establish a connection to the LDAP Server. Make sure that the SSH key is present in the same resource group and region where the LDAP Servers are provisioned. If you do not have an SSH key in your IBM Cloud account, create one by using the [SSH keys](https://cloud.ibm.com/docs/vpc?topic=vpc-ssh-keys) instructions." +} variable "ldap_instances" { type = list( @@ -398,11 +395,13 @@ variable "afm_instances" { object({ profile = string count = number + image = string }) ) default = [{ profile = "bx2-32x128" count = 1 + image = "ibm-redhat-8-10-minimal-amd64-4" }] description = "Number of instances to be launched for afm hosts." } @@ -422,6 +421,12 @@ variable "scale_encryption_type" { description = "To enable filesystem encryption, specify either 'key_protect' or 'gklm'. If neither is specified, the default value will be 'null' and encryption is disabled" } +variable "gklm_instance_key_pair" { + type = list(string) + default = null + description = "The key pair to use to launch the GKLM host." +} + variable "gklm_instances" { type = list( object({ @@ -447,7 +452,7 @@ variable "vpc_region" { variable "scheduler" { type = string default = null - description = "Select one of the scheduler (Scale/LSF/Symphony/Slurm/null)" + description = "Select one of the scheduler (LSF/Symphony/Slurm/null)" } variable "ibm_customer_number" { @@ -494,45 +499,3 @@ variable "bastion_subnets" { default = [] description = "Subnets to launch the bastion host." } - -variable "bms_boot_drive_encryption" { - type = bool - default = false - description = "To enable the encryption for the boot drive of bare metal server. Select true or false" -} - -variable "login_security_group_name" { - type = string - default = null - description = "Provide the security group name to provision the bastion node. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the bastion node to function properly." -} - -variable "storage_security_group_name" { - type = string - default = null - description = "Provide the security group name to provision the storage nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the storage nodes to function properly." -} - -variable "compute_security_group_name" { - type = string - default = null - description = "Provide the security group name to provision the compute nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the compute nodes to function properly." -} - -variable "client_security_group_name" { - type = string - default = null - description = "Provide the security group name to provision the gklm nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the gklm nodes to function properly." -} - -variable "gklm_security_group_name" { - type = string - default = null - description = "Provide the security group name to provision the gklm nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the gklm nodes to function properly." -} - -variable "ldap_security_group_name" { - type = string - default = null - description = "Provide the security group name to provision the ldap nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the ldap nodes to function properly." -} diff --git a/modules/playbook/main.tf b/modules/playbook/main.tf index 191834a5..00f34915 100644 --- a/modules/playbook/main.tf +++ b/modules/playbook/main.tf @@ -11,7 +11,6 @@ locals { lsf_hostentry_playbook_path = format("%s/lsf_host_entry_play.yml", var.playbooks_path) remove_hostentry_playbooks_path = format("%s/remove_host_entry_play.yml", var.playbooks_path) lsf_prerequesite_playbook_path = format("%s/lsf_prerequesite_play.yml", var.playbooks_path) - lsfd_self_healing_playbook_path = format("%s/lsfd_self_healing_playbook_path.yml", var.playbooks_path) deployer_host = jsonencode(var.deployer_host) mgmnt_hosts = jsonencode(var.mgmnt_hosts) comp_hosts = jsonencode(var.comp_hosts) @@ -105,7 +104,7 @@ resource "null_resource" "deploy_host_playbook" { resource "local_file" "lsf_host_entry_playbook" { count = var.inventory_path != null && var.scheduler == "LSF" ? 1 : 0 content = <> "$LOGFILE" - sudo systemctl restart lsfd - crontab -l | grep -v "check_lsfd.sh" | crontab - - fi - owner: lsfadmin - group: lsfadmin - mode: '0755' - - - name: Schedule cron job for self-healing - ansible.builtin.cron: - name: "LSFD Self-Healing" - user: lsfadmin - minute: "*/2" - job: /home/lsfadmin/check_lsfd.sh -EOT - filename = local.lsfd_self_healing_playbook_path -} - -resource "null_resource" "execute_lsfd_self_healing" { - count = var.inventory_path != null && var.scheduler == "LSF" ? 1 : 0 - provisioner "local-exec" { - interpreter = ["/bin/bash", "-c"] - command = "sudo ansible-playbook -i ${var.inventory_path} ${local.lsfd_self_healing_playbook_path}" - } - triggers = { - build = timestamp() - } - depends_on = [ - local_file.lsfd_self_healing_playbook, - null_resource.remove_host_entry_play - ] + depends_on = [local_file.remove_host_entry_playbook, null_resource.run_playbook_for_mgmt_config, null_resource.run_ldap_client_playbooks] } diff --git a/modules/playbook/variables.tf b/modules/playbook/variables.tf index 6dbe4153..0407fe44 100644 --- a/modules/playbook/variables.tf +++ b/modules/playbook/variables.tf @@ -73,7 +73,7 @@ variable "cloudlogs_provision" { variable "scheduler" { default = null type = string - description = "Select one of the scheduler (Scale/LSF/Symphony/Slurm/null)" + description = "Select one of the scheduler (LSF/Symphony/Slurm/null)" } variable "mgmnt_hosts" { diff --git a/modules/prepare_tf_input/locals.tf b/modules/prepare_tf_input/locals.tf index 885d41be..9ae9dcce 100644 --- a/modules/prepare_tf_input/locals.tf +++ b/modules/prepare_tf_input/locals.tf @@ -7,17 +7,15 @@ locals { list_ssh_keys = jsonencode(var.ssh_keys) list_storage_instances = jsonencode(var.storage_instances) list_storage_servers = jsonencode(var.storage_servers) - list_tie_breaker_bm_server = jsonencode(var.tie_breaker_bm_server_profile) - list_scale_management_vsi_profile = jsonencode(var.scale_management_vsi_profile) list_management_instances = jsonencode(var.management_instances) list_protocol_instances = jsonencode(var.protocol_instances) list_compute_instances = jsonencode(var.static_compute_instances) list_client_instances = jsonencode(var.client_instances) remote_allowed_ips = jsonencode(var.remote_allowed_ips) - list_storage_subnet_id = jsonencode(length(var.storage_subnet_id) == 0 ? null : var.storage_subnet_id) - list_client_subnet_id = jsonencode(length(var.client_subnet_id) == 0 ? null : var.client_subnet_id) - list_protocol_subnet_id = jsonencode(length(var.protocol_subnet_id) == 0 ? null : var.protocol_subnet_id) - list_compute_subnet_id = jsonencode(length(var.compute_subnet_id) == 0 ? null : var.compute_subnet_id) + list_storage_subnets = jsonencode(length(var.storage_subnets) == 0 ? null : var.storage_subnets) + list_protocol_subnets = jsonencode(length(var.protocol_subnets) == 0 ? null : var.protocol_subnets) + list_cluster_subnet_id = jsonencode(length(var.cluster_subnet_id) == 0 ? null : var.cluster_subnet_id) + list_client_subnets = jsonencode(length(var.client_subnets) == 0 ? null : var.client_subnets) list_login_subnet_ids = jsonencode(length(var.login_subnet_id) == 0 ? null : var.login_subnet_id) dns_domain_names = jsonencode(var.dns_domain_names) dynamic_compute_instances = jsonencode(var.dynamic_compute_instances) @@ -31,25 +29,18 @@ locals { list_ldap_instances = jsonencode(var.ldap_instance) ldap_server = jsonencode(var.ldap_server) ldap_basedns = jsonencode(var.ldap_basedns) + list_ldap_ssh_keys = jsonencode(var.ldap_instance_key_pair) list_afm_instances = jsonencode(var.afm_instances) afm_cos_config_details = jsonencode(var.afm_cos_config) + list_gklm_ssh_keys = jsonencode(var.gklm_instance_key_pair) list_gklm_instances = jsonencode(var.gklm_instances) scale_encryption_type = jsonencode(var.scale_encryption_type) filesystem_config = jsonencode(var.filesystem_config) scale_encryption_admin_password = jsonencode(var.scale_encryption_admin_password) - key_protect_instance_id = jsonencode(var.key_protect_instance_id) custom_file_shares = jsonencode(var.custom_file_shares) resource_group_ids = jsonencode(var.resource_group_ids) existing_bastion_instance_name = jsonencode(var.existing_bastion_instance_name == null ? null : var.existing_bastion_instance_name) existing_bastion_security_group_id = jsonencode(var.existing_bastion_security_group_id == null ? null : var.existing_bastion_security_group_id) login_instance = jsonencode(var.login_instance) - filesets_config = jsonencode(var.filesets_config) - login_security_group_name = jsonencode(var.login_security_group_name == null ? null : var.login_security_group_name) - storage_security_group_name = jsonencode(var.storage_security_group_name == null ? null : var.storage_security_group_name) - compute_security_group_name = jsonencode(var.compute_security_group_name == null ? null : var.compute_security_group_name) - client_security_group_name = jsonencode(var.client_security_group_name == null ? null : var.client_security_group_name) - gklm_security_group_name = jsonencode(var.gklm_security_group_name == null ? null : var.gklm_security_group_name) - ldap_security_group_name = jsonencode(var.ldap_security_group_name == null ? null : var.ldap_security_group_name) - scale_afm_bucket_config_details = jsonencode(var.scale_afm_bucket_config_details) - scale_afm_cos_hmac_key_params = jsonencode(var.scale_afm_cos_hmac_key_params) + } diff --git a/modules/prepare_tf_input/main.tf b/modules/prepare_tf_input/main.tf index b3020f10..2f45d195 100644 --- a/modules/prepare_tf_input/main.tf +++ b/modules/prepare_tf_input/main.tf @@ -15,8 +15,6 @@ resource "local_sensitive_file" "prepare_tf_input" { "ssh_keys": ${local.list_ssh_keys}, "storage_instances": ${local.list_storage_instances}, "storage_servers": ${local.list_storage_servers}, - "tie_breaker_bm_server_profile": ${local.list_tie_breaker_bm_server}, - "scale_management_vsi_profile": ${local.list_scale_management_vsi_profile}, "storage_type": "${var.storage_type}", "management_instances": ${local.list_management_instances}, "protocol_instances": ${local.list_protocol_instances}, @@ -25,22 +23,16 @@ resource "local_sensitive_file" "prepare_tf_input" { "static_compute_instances": ${local.list_compute_instances}, "dynamic_compute_instances": ${local.dynamic_compute_instances}, "client_instances": ${local.list_client_instances}, - "login_security_group_name": ${local.login_security_group_name}, - "storage_security_group_name": ${local.storage_security_group_name}, - "compute_security_group_name": ${local.compute_security_group_name}, - "client_security_group_name": ${local.client_security_group_name}, - "gklm_security_group_name": ${local.gklm_security_group_name}, - "ldap_security_group_name": ${local.ldap_security_group_name}, "enable_cos_integration": ${var.enable_cos_integration}, "enable_atracker": ${var.enable_atracker}, "enable_vpc_flow_logs": ${var.enable_vpc_flow_logs}, "remote_allowed_ips": ${local.remote_allowed_ips}, "vpc_name": "${var.vpc_name}", - "compute_subnet_id": ${local.list_compute_subnet_id}, + "storage_subnets": ${local.list_storage_subnets}, + "protocol_subnets": ${local.list_protocol_subnets}, + "cluster_subnet_id": ${local.list_cluster_subnet_id}, + "client_subnets": ${local.list_client_subnets}, "login_subnet_id": ${local.list_login_subnet_ids}, - "protocol_subnet_id": ${local.list_protocol_subnet_id}, - "storage_subnet_id": ${local.list_storage_subnet_id}, - "client_subnet_id": ${local.list_client_subnet_id}, "dns_domain_names": ${local.dns_domain_names}, "key_management": ${local.key_management}, "kms_instance_name" : ${local.kms_instance_name}, @@ -59,6 +51,7 @@ resource "local_sensitive_file" "prepare_tf_input" { "enable_ldap": ${var.enable_ldap}, "ldap_server": ${local.ldap_server}, "ldap_basedns": ${local.ldap_basedns}, + "ldap_instance_key_pair": ${local.list_ldap_ssh_keys}, "ldap_admin_password": "${var.ldap_admin_password}", "ldap_user_name": "${var.ldap_user_name}", "ldap_user_password": "${var.ldap_user_password}", @@ -67,15 +60,10 @@ resource "local_sensitive_file" "prepare_tf_input" { "afm_cos_config": ${local.afm_cos_config_details}, "scale_encryption_enabled": ${var.scale_encryption_enabled}, "scale_encryption_type": ${local.scale_encryption_type}, + "gklm_instance_key_pair": ${local.list_gklm_ssh_keys}, "gklm_instances": ${local.list_gklm_instances}, - "scale_encryption_admin_password": ${local.scale_encryption_admin_password}, - "key_protect_instance_id": ${local.key_protect_instance_id}, + "scale_encryption_admin_password": "${local.scale_encryption_admin_password}", "filesystem_config": ${local.filesystem_config}, - "filesets_config": ${local.filesets_config}, - "storage_gui_username": "${var.storage_gui_username}", - "storage_gui_password": "${var.storage_gui_password}", - "compute_gui_username": "${var.compute_gui_username}", - "compute_gui_password": "${var.compute_gui_password}", "enable_hyperthreading": ${var.enable_hyperthreading}, "cloud_logs_data_bucket": ${var.cloud_logs_data_bucket}, "cloud_metrics_data_bucket": ${var.cloud_metrics_data_bucket}, @@ -94,14 +82,11 @@ resource "local_sensitive_file" "prepare_tf_input" { "custom_file_shares": ${local.custom_file_shares}, "login_instance": ${local.login_instance}, "vpc_cluster_private_subnets_cidr_blocks": "${var.vpc_cluster_private_subnets_cidr_blocks}", - "bms_boot_drive_encryption": ${var.bms_boot_drive_encryption}, "existing_resource_group": "${var.existing_resource_group}", "sccwp_service_plan": "${var.sccwp_service_plan}", "sccwp_enable": ${var.sccwp_enable}, "cspm_enabled": ${var.cspm_enabled}, - "app_config_plan": "${var.app_config_plan}", - "scale_afm_bucket_config_details": ${local.scale_afm_bucket_config_details}, - "scale_afm_cos_hmac_key_params": ${local.scale_afm_cos_hmac_key_params} + "app_config_plan": "${var.app_config_plan}" } EOT filename = local.schematics_inputs_path diff --git a/modules/prepare_tf_input/variables.tf b/modules/prepare_tf_input/variables.tf index d439db95..45b9b377 100644 --- a/modules/prepare_tf_input/variables.tf +++ b/modules/prepare_tf_input/variables.tf @@ -47,6 +47,11 @@ variable "resource_group_ids" { ############################################################################## # Compute Variables ############################################################################## +variable "client_subnets" { + type = list(string) + default = null + description = "Name of an existing subnets in which the cluster resources will be deployed. If no value is given, then new subnet(s) will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)" +} variable "ssh_keys" { type = list(string) @@ -65,7 +70,7 @@ variable "client_instances" { description = "Number of instances to be launched for client." } -variable "compute_subnet_id" { +variable "cluster_subnet_id" { type = string default = null description = "Name of an existing subnets in which the cluster resources will be deployed. If no value is given, then new subnet(s) will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)" @@ -85,10 +90,9 @@ variable "management_instances" { variable "static_compute_instances" { type = list( object({ - profile = string - count = number - image = string - filesystem = optional(string) + profile = string + count = number + image = string }) ) description = "Min Number of instances to be launched for compute cluster." @@ -117,6 +121,11 @@ variable "login_subnet_id" { ############################################################################## # Storage Variables ############################################################################## +variable "storage_subnets" { + type = list(string) + default = null + description = "Name of an existing subnets in which the cluster resources will be deployed. If no value is given, then new subnet(s) will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)" +} variable "storage_instances" { type = list( @@ -127,12 +136,6 @@ variable "storage_instances" { filesystem = optional(string) }) ) - default = [{ - profile = "bx2d-32x128" - count = 0 - image = "ibm-redhat-8-10-minimal-amd64-4" - filesystem_name = "fs1" - }] description = "Number of instances to be launched for storage cluster." } @@ -142,7 +145,7 @@ variable "storage_servers" { profile = string count = number image = string - filesystem = optional(string) + filesystem = string }) ) default = [{ @@ -154,57 +157,21 @@ variable "storage_servers" { description = "Number of BareMetal Servers to be launched for storage cluster." } -variable "tie_breaker_bm_server_profile" { - type = string - default = null - description = "Specify the bare metal server profile type name to be used for creating the bare metal Tie breaker node. If no value is provided, the storage bare metal server profile will be used as the default. For more information, see [bare metal server profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-profile&interface=ui). [Tie Breaker Node](https://www.ibm.com/docs/en/storage-scale/5.2.2?topic=quorum-node-tiebreaker-disks)" -} - -variable "scale_management_vsi_profile" { - type = string - description = "The virtual server instance profile type name to be used to create the Management node. For more information, see [Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)." -} - variable "protocol_instances" { type = list( object({ profile = string count = number + image = string }) ) description = "Number of instances to be launched for protocol hosts." } -############################################################################## -# Scale GUI Variables -############################################################################## - -variable "storage_gui_username" { - type = string - default = "null" - sensitive = true - description = "GUI user to perform system management and monitoring tasks on storage cluster." -} - -variable "storage_gui_password" { - type = string - default = "null" - sensitive = true - description = "Password for storage cluster GUI" -} - -variable "compute_gui_username" { - type = string - default = "null" - sensitive = true - description = "GUI user to perform system management and monitoring tasks on compute cluster." -} - -variable "compute_gui_password" { - type = string - default = "null" - sensitive = true - description = "Password for compute cluster GUI" +variable "protocol_subnets" { + type = list(string) + default = null + description = "Name of an existing subnets in which the cluster resources will be deployed. If no value is given, then new subnet(s) will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)" } ############################################################################## @@ -281,9 +248,6 @@ variable "enable_vpc_flow_logs" { description = "Enable Activity tracker" } -############################################################################## -# SCC Variables -############################################################################## variable "enable_atracker" { type = bool default = false @@ -320,10 +284,6 @@ variable "bastion_fip" { description = "bastion node fip" } -############################################################################## -# SCC Variables -############################################################################## - variable "cloud_logs_data_bucket" { type = any default = null @@ -482,11 +442,11 @@ variable "ldap_user_password" { description = "The LDAP user password should be 8 to 20 characters long, with a mix of at least three alphabetic characters, including one uppercase and one lowercase letter. It must also include two numerical digits and at least one special character from (~@_+:) are required.It is important to avoid including the username in the password for enhanced security.[This value is ignored for an existing LDAP server]." } -# variable "ldap_instance_key_pair" { -# type = list(string) -# default = null -# description = "Name of the SSH key configured in your IBM Cloud account that is used to establish a connection to the LDAP Server. Make sure that the SSH key is present in the same resource group and region where the LDAP Servers are provisioned. If you do not have an SSH key in your IBM Cloud account, create one by using the [SSH keys](https://cloud.ibm.com/docs/vpc?topic=vpc-ssh-keys) instructions." -# } +variable "ldap_instance_key_pair" { + type = list(string) + default = null + description = "Name of the SSH key configured in your IBM Cloud account that is used to establish a connection to the LDAP Server. Make sure that the SSH key is present in the same resource group and region where the LDAP Servers are provisioned. If you do not have an SSH key in your IBM Cloud account, create one by using the [SSH keys](https://cloud.ibm.com/docs/vpc?topic=vpc-ssh-keys) instructions." +} variable "ldap_instance" { type = list( @@ -517,6 +477,12 @@ variable "scale_encryption_type" { description = "To enable filesystem encryption, specify either 'key_protect' or 'gklm'. If neither is specified, the default value will be 'null' and encryption is disabled" } +variable "gklm_instance_key_pair" { + type = list(string) + default = null + description = "The key pair to use to launch the GKLM host." +} + variable "gklm_instances" { type = list( object({ @@ -533,18 +499,24 @@ variable "gklm_instances" { description = "Number of instances to be launched for client." } +# variable "scale_encryption_admin_default_password" { +# type = string +# default = null +# description = "The default administrator password used for resetting the admin password based on the user input. The password has to be updated which was configured during the GKLM installation." +# } + +# variable "scale_encryption_admin_username" { +# type = string +# default = null +# description = "The default Admin username for Security Key Lifecycle Manager(GKLM)." +# } + variable "scale_encryption_admin_password" { type = string default = null description = "Password that is used for performing administrative operations for the GKLM.The password must contain at least 8 characters and at most 20 characters. For a strong password, at least three alphabetic characters are required, with at least one uppercase and one lowercase letter. Two numbers, and at least one special character from this(~@_+:). Make sure that the password doesn't include the username. Visit this [page](https://www.ibm.com/docs/en/gklm/3.0.1?topic=roles-password-policy) to know more about password policy of GKLM. " } -variable "key_protect_instance_id" { - type = string - default = null - description = "An existing Key Protect instance used for filesystem encryption" -} - variable "storage_type" { type = string default = "scratch" @@ -562,6 +534,7 @@ variable "afm_instances" { object({ profile = string count = number + image = string }) ) description = "Number of instances to be launched for afm hosts." @@ -580,32 +553,10 @@ variable "afm_cos_config" { bucket_storage_class = string }) ) - nullable = false + default = null description = "AFM configurations." } -variable "scale_afm_bucket_config_details" { - description = "Scale AFM COS Bucket and Configuration Details" - type = list(object({ - bucket = string - endpoint = string - fileset = string - filesystem = string - mode = string - })) - default = null -} - -variable "scale_afm_cos_hmac_key_params" { - description = "Scale AFM COS HMAC Key Details" - type = list(object({ - akey = string - bucket = string - skey = string - })) - default = null -} - variable "filesystem_config" { type = list( object({ @@ -615,23 +566,13 @@ variable "filesystem_config" { default_metadata_replica = number max_data_replica = number max_metadata_replica = number + mount_point = string }) ) default = null description = "File system configurations." } -variable "filesets_config" { - type = list( - object({ - client_mount_path = string - quota = number - }) - ) - default = null - description = "Fileset configurations." -} - variable "scheduler" { type = string default = null @@ -708,13 +649,6 @@ variable "custom_file_shares" { description = "Provide details for customizing your shared file storage layout, including mount points, sizes (in GB), and IOPS ranges for up to five file shares if using VPC file storage as the storage option.If using IBM Storage Scale as an NFS mount, update the appropriate mount path and nfs_share values created from the Storage Scale cluster. Note that VPC file storage supports attachment to a maximum of 256 nodes. Exceeding this limit may result in mount point failures due to attachment restrictions.For more information, see [Storage options](https://test.cloud.ibm.com/docs/hpc-ibm-spectrumlsf?topic=hpc-ibm-spectrumlsf-integrating-scale#integrate-scale-and-hpc)." } - -variable "bms_boot_drive_encryption" { - type = bool - default = false - description = "To enable the encryption for the boot drive of bare metal server. Select true or false" -} - ########################################################################### # Existing Bastion Support variables ########################################################################### @@ -801,57 +735,3 @@ variable "app_config_plan" { ) } } - -variable "protocol_subnet_id" { - type = string - description = "Name of an existing subnet for protocol nodes. If no value is given, a new subnet will be created" - default = null -} - -variable "client_subnet_id" { - type = string - description = "Name of an existing subnet for client nodes. If no value is given, a new subnet will be created" - default = null -} - -variable "storage_subnet_id" { - type = string - description = "Name of an existing subnet for storage nodes. If no value is given, a new subnet will be created" - default = null -} - -variable "login_security_group_name" { - type = string - default = null - description = "Provide the security group name to provision the bastion node. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the bastion node to function properly." -} - -variable "storage_security_group_name" { - type = string - default = null - description = "Provide the security group name to provision the storage nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the storage nodes to function properly." -} - -variable "compute_security_group_name" { - type = string - default = null - description = "Provide the security group name to provision the compute nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the compute nodes to function properly." -} - -variable "client_security_group_name" { - type = string - default = null - description = "Provide the security group name to provision the gklm nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the gklm nodes to function properly." -} - -variable "gklm_security_group_name" { - type = string - default = null - description = "Provide the security group name to provision the gklm nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the gklm nodes to function properly." -} - -variable "ldap_security_group_name" { - type = string - default = null - description = "Provide the security group name to provision the ldap nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the ldap nodes to function properly." -} diff --git a/modules/resource_provisioner/locals.tf b/modules/resource_provisioner/locals.tf index 5b09c239..c7a75cc5 100644 --- a/modules/resource_provisioner/locals.tf +++ b/modules/resource_provisioner/locals.tf @@ -1,15 +1,14 @@ locals { - schematics_inputs_path = format("/tmp/.schematics/%s/solution_terraform.auto.tfvars.json", var.cluster_prefix) - remote_inputs_path = format("%s/terraform.tfvars.json", "/tmp") - deployer_path = "/opt/ibm" - remote_terraform_path = format("%s/terraform-ibm-hpc", local.deployer_path) - # da_hpc_repo_url = "github.ibm.com/workload-eng-services/HPCaaS.git" + schematics_inputs_path = format("/tmp/.schematics/%s/solution_terraform.auto.tfvars.json", var.cluster_prefix) + remote_inputs_path = format("%s/terraform.tfvars.json", "/tmp") + deployer_path = "/opt/ibm" + remote_terraform_path = format("%s/terraform-ibm-hpc", local.deployer_path) da_hpc_repo_url = "github.com/terraform-ibm-modules/terraform-ibm-hpc.git" - da_hpc_repo_tag = "main" ###### change it to main in future + da_hpc_repo_tag = "main" remote_ansible_path = format("%s/ibm-spectrumscale-cloud-deploy", local.deployer_path) scale_cloud_infra_repo_url = "https://github.com/jayeshh123/ibm-spectrum-scale-install-infra" scale_cloud_infra_repo_name = "ibm-spectrum-scale-install-infra" - scale_cloud_infra_repo_tag = "jay_new_scale_da_infra" + scale_cloud_infra_repo_tag = "jay_scale_da_api" products = var.scheduler == "Scale" ? "scale" : "lsf" ssh_key_file = "${path.root}/../../solutions/${local.products}/bastion_id_rsa" bastion_public_key_content = var.existing_bastion_instance_name != null ? var.bastion_public_key_content : "" diff --git a/modules/resource_provisioner/main.tf b/modules/resource_provisioner/main.tf index 087dc3d9..67a491cd 100644 --- a/modules/resource_provisioner/main.tf +++ b/modules/resource_provisioner/main.tf @@ -18,11 +18,13 @@ resource "null_resource" "tf_resource_provisioner" { provisioner "remote-exec" { inline = [ - # Conditionally clone "terraform-ibm-hpc" repository from TIM - "if [ -f ${local.remote_terraform_path} ]; then sudo rm -f ${local.remote_terraform_path}; fi && if [ ! -d ${local.remote_terraform_path} ]; then echo 'Cloning repository with tag: ${local.da_hpc_repo_tag}' && sudo git clone -b ${local.da_hpc_repo_tag} https://${local.da_hpc_repo_url} ${local.remote_terraform_path}; fi", + # Remove and re-clone the remote terraform path repo + # "if [ -d ${local.remote_terraform_path} ]; then echo 'Removing existing repository at ${local.remote_terraform_path}' && sudo rm -rf ${local.remote_terraform_path}; fi", + # "echo 'Cloning repository with tag: ${local.da_hpc_repo_tag}' && sudo git clone -b ${local.da_hpc_repo_tag} https://${var.github_token}@${local.da_hpc_repo_url} ${local.remote_terraform_path}", + "if [ ! -d ${local.remote_terraform_path} ]; then echo 'Cloning repository with tag: ${local.da_hpc_repo_tag}' && sudo git clone -b ${local.da_hpc_repo_tag} https://${local.da_hpc_repo_url} ${local.remote_terraform_path}; fi", # Clone Spectrum Scale collection if it doesn't exist - "if [ \"${var.scheduler}\" = \"Scale\" ]; then if [ ! -d ${local.remote_ansible_path}/${local.scale_cloud_infra_repo_name}/collections/ansible_collections/ibm/spectrum_scale ]; then sudo git clone -b ${local.scale_cloud_infra_repo_tag} ${local.scale_cloud_infra_repo_url} ${local.remote_ansible_path}/${local.scale_cloud_infra_repo_name}/collections/ansible_collections/ibm/spectrum_scale; fi; fi", + "if [ ! -d ${local.remote_ansible_path}/${local.scale_cloud_infra_repo_name}/collections/ansible_collections/ibm/spectrum_scale ]; then sudo git clone -b ${local.scale_cloud_infra_repo_tag} ${local.scale_cloud_infra_repo_url} ${local.remote_ansible_path}/${local.scale_cloud_infra_repo_name}/collections/ansible_collections/ibm/spectrum_scale; fi", # Ensure ansible-playbook is available "sudo ln -fs /usr/local/bin/ansible-playbook /usr/bin/ansible-playbook", @@ -60,7 +62,7 @@ resource "null_resource" "ext_bastion_access" { } resource "null_resource" "fetch_host_details_from_deployer" { - count = var.enable_deployer == true ? 1 : 0 + count = var.enable_deployer == true && var.scheduler == "LSF" ? 1 : 0 provisioner "local-exec" { command = < 0) ? "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ServerAliveInterval=5 -o ServerAliveCountMax=1 -J ubuntu@${var.bastion_fip} ubuntu@${local.ldap_hosts_ips[0]}" : null +} output "cloud_monitoring_url" { - value = var.scheduler == "LSF" && var.observability_monitoring_enable && (var.enable_deployer == false) ? module.cloud_monitoring_instance_creation[0].cloud_monitoring_url : null + value = var.observability_monitoring_enable && (var.enable_deployer == false) ? module.cloud_monitoring_instance_creation[0].cloud_monitoring_url : null description = "IBM Cloud Monitoring URL" } output "cloud_logs_url" { - value = var.scheduler == "LSF" && (var.enable_deployer == false) && (var.observability_logs_enable_for_management || var.observability_logs_enable_for_compute) ? module.cloud_monitoring_instance_creation[0].cloud_logs_url : null + value = (var.enable_deployer == false) && (var.observability_logs_enable_for_management || var.observability_logs_enable_for_compute) ? module.cloud_monitoring_instance_creation[0].cloud_logs_url : null description = "IBM Cloud Logs URL" } output "application_center_tunnel" { description = "Available if IBM Spectrum LSF Application Center GUI is installed" - value = var.scheduler == "LSF" && var.enable_deployer == false ? local.ssh_cmd : null + value = var.enable_deployer ? "" : local.ssh_cmd } output "application_center_url" { description = "Available if IBM Spectrum LSF Application Center GUI is installed" - value = var.scheduler == "LSF" ? "https://localhost:8443" : null -} - -output "web_service_tunnel" { - description = "SSH command to connect to the LSF WebServices" - value = var.scheduler == "LSF" && var.lsf_version == "fixpack_15" && var.enable_deployer == false ? local.webservice_ssh_cmd : null -} - -############################################# -### Scale Outputs ### -############################################# - -output "scale_version" { - description = "Version of Scale" - value = var.scheduler == "Scale" ? local.scale_version : null -} - -############################################# -### Common Outputa ### -############################################# - -output "ssh_to_deployer" { - description = "SSH command to connect to the deployer" - value = (var.enable_deployer == false) ? "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -J ubuntu@${var.bastion_fip} vpcuser@${var.deployer_ip}" : null -} - -output "ssh_to_ldap_node" { - description = "SSH command to connect to LDAP node" - value = (var.enable_deployer == false && var.enable_ldap && length(local.ldap_hosts_ips) > 0) ? "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ServerAliveInterval=5 -o ServerAliveCountMax=1 -J ubuntu@${var.bastion_fip} ubuntu@${local.ldap_hosts_ips[0]}" : null + value = "https://localhost:8443" } diff --git a/samples/configs/hpc_catalog_values.json b/samples/configs/hpc_catalog_values.json index 54e19b10..f9ad780b 100644 --- a/samples/configs/hpc_catalog_values.json +++ b/samples/configs/hpc_catalog_values.json @@ -6,14 +6,14 @@ "ssh_keys": "[\"Please fill here\"]", "remote_allowed_ips": "[\"Please fill here\"]", "app_center_gui_password": "Please fill here", - "lsf_version": "fixpack_15", + "lsf_version": "Fixpack_15", "vpc_name": "__NULL__", - "cluster_subnet_id": "__NULL__", + "cluster_subnet_id": "[]", "login_subnet_id": "__NULL__", "vpc_cidr": "10.241.0.0/18", "vpc_cluster_private_subnets_cidr_blocks": "10.241.0.0/20", "vpc_cluster_login_private_subnets_cidr_blocks": "10.241.16.0/28", - "dns_domain_name": "{compute = \"hpc.local\"}", + "dns_domain_name": "{compute = \"lsf.com\"}", "dns_instance_id": "__NULL__", "dns_custom_resolver_id": "__NULL__", "bastion_instance": "{ image = \"ibm-ubuntu-22-04-5-minimal-amd64-3\", profile = \"cx2-4x8\" }", @@ -29,9 +29,9 @@ "kms_key_name": "__NULL__", "enable_vpc_flow_logs": "false", "enable_ldap": "false", - "ldap_basedns": "hpc.local", - "ldap_server": "__NULL__", - "ldap_server_cert": "__NULL__", + "ldap_basedns": "lsf.com", + "ldap_server": "null", + "ldap_server_cert": "null", "ldap_admin_password": "", "ldap_user_name": "", "ldap_user_password": "", @@ -51,7 +51,7 @@ "observability_logs_enable_for_compute": "false", "observability_enable_platform_logs": "false", "observability_enable_metrics_routing": "false", - "observability_logs_retention_period": "7", + "observability_logs_retention_period": "false", "observability_monitoring_on_compute_nodes_enable": "false", "observability_monitoring_plan": "graduated-tier", "existing_bastion_instance_name": "__NULL__", diff --git a/samples/configs/hpc_schematics_values.json b/samples/configs/hpc_schematics_values.json index fd6a9245..89273f7c 100644 --- a/samples/configs/hpc_schematics_values.json +++ b/samples/configs/hpc_schematics_values.json @@ -87,7 +87,7 @@ "value": "Please fill here", "type": "string", "secure": true, - "description": "Password required to access the IBM Spectrum LSF Application Center (App Center) GUI, which is enabled by default in both Fix Pack 15 and Fix Pack 14 with HTTPS. This is a mandatory value and omitting it will result in deployment failure. The password must meet the following requirements, at least 15 characters in length, and must include one uppercase letter, one lowercase letter, one number, and one special character. Spaces are not allowed." + "description": "Password required to access the IBM Spectrum LSF Application Center (App Center) GUI, which is enabled by default in both Fix Pack 15 and Fix Pack 14 with HTTPS. This is a mandatory value and omitting it will result in deployment failure. The password must meet the following requirements, at least 8 characters in length, and must include one uppercase letter, one lowercase letter, one number, and one special character." }, { "name": "remote_allowed_ips", @@ -147,7 +147,7 @@ }, { "name": "dns_domain_name", - "value": "{compute= \"hpc.local\"}", + "value": "{compute= \"lsf.com\"}", "type": "object({compute = string})", "secure": false, "description": "IBM Cloud DNS Services domain name to be used for the IBM Cloud HPC cluster." @@ -199,7 +199,7 @@ "value": "[{ profile = \"bx2-16x64\", count = 2, image = \"hpc-lsf-fp15-rhel810-v1\" }]", "type": "list(object({ profile = string, count = number, image = string }))", "secure": false, - "description": "Specify the list of management node configurations, including instance profile, image name, and count. By default, all management nodes are created using Fix Pack 15. If deploying with Fix Pack 14, set lsf_version to fixpack_14 and use the corresponding image hpc-lsf-fp14-rhel810-v1. The selected image must align with the specified lsf_version, any mismatch may lead to deployment failures. The solution allows customization of instance profiles and counts, but mixing custom images and IBM stock images across instances is not supported. If using IBM stock images, only Red Hat-based images are allowed. Management nodes must have a minimum of 9 GB RAM. Select a profile with 9 GB or higher." + "description": "Specify the list of management node configurations, including instance profile, image name, and count. By default, all management nodes are created using Fix Pack 15. If deploying with Fix Pack 14, set lsf_version to fixpack_14 and use the corresponding image hpc-lsf-fp14-rhel810-v1. The selected image must align with the specified lsf_version, any mismatch may lead to deployment failures. The solution allows customization of instance profiles and counts, but mixing custom images and IBM stock images across instances is not supported. If using IBM stock images, only Red Hat-based images are allowed." }, { "name": "static_compute_instances", @@ -287,7 +287,7 @@ }, { "name": "ldap_basedns", - "value": "hpc.local", + "value": "lsf.com", "type": "string", "secure": false, "description": "The dns domain name is used for configuring the LDAP server. If an LDAP server is already in existence, ensure to provide the associated DNS domain name." @@ -311,7 +311,7 @@ "value": "", "type": "string", "secure": true, - "description": "The LDAP admin password must be 15 to 32 characters long and include at least two alphabetic characters (with one uppercase and one lowercase), one number, and one special character from the set (!@#$%^&*()_+=-). The password must not contain the username or any spaces. [This value is ignored for an existing LDAP server]." + "description": "The LDAP administrative password should be 8 to 20 characters long, with a mix of at least three alphabetic characters, including one uppercase and one lowercase letter. It must also include two numerical digits and at least one special character from (~@_+:) are required. It is important to avoid including the username in the password for enhanced security.[This value is ignored for an existing LDAP server]." }, { "name": "ldap_user_name", diff --git a/samples/configs/scale_catalog_values.json b/samples/configs/scale_catalog_values.json deleted file mode 100644 index 92c1a45c..00000000 --- a/samples/configs/scale_catalog_values.json +++ /dev/null @@ -1,80 +0,0 @@ -{ - "ibmcloud_api_key": "Fill the value here", - "ibm_customer_number": "Fill the value here", - "existing_resource_group": "Default", - "zones": "[\"us-east-1\"]", - "cluster_prefix": "scale", - "ssh_keys": "[\"Fill the value here\"]", - "remote_allowed_ips": "[\"Fill the value here\"]", - "storage_gui_username": "Fill the value here", - "storage_gui_password": "Fill the value here", - "vpc_name": "__NULL__", - "vpc_cidr": "10.241.0.0/18", - "login_subnets_cidr": "10.241.16.0/28", - "compute_subnets_cidr": "10.241.0.0/20", - "storage_subnets_cidr": "10.241.30.0/24", - "protocol_subnets_cidr": "10.241.40.0/24", - "client_subnets_cidr": "10.241.50.0/24", - "compute_gui_username": "", - "compute_gui_password": "", - "bastion_instance": "{ image = \"ibm-ubuntu-22-04-5-minimal-amd64-3\", profile = \"cx2-4x8\" }", - "deployer_instance": "{ image = \"hpcc-scale-deployer-v1\", profile = \"mx2-4x32\" }", - "compute_instances": "[{ profile = \"cx2-2x4\", count = 0, image = \"hpcc-scale5232-rhel810-v1\", filesystem = \"/gpfs/fs1\" }]", - "client_instances": "[{ profile = \"cx2-2x4\", count = 0, image = \"ibm-redhat-8-10-minimal-amd64-6\" }]", - "storage_instances": "[{ profile = \"bx2d-32x128\", count =2, image = \"ibm-redhat-8-10-minimal-amd64-6\", filesystem = \"/gpfs/fs1\" }]", - "storage_baremetal_server": "[{ profile = \"cx2d-metal-96x192\", count = 2, image = \"hpcc-scale5232-rhel810-v1\", filesystem = \"/gpfs/fs1\" }]", - "tie_breaker_baremetal_server_profile": "__NULL__", - "scale_management_vsi_profile": "\"bx2-8x32\"", - "afm_instances": "[{ profile = \"bx2d-32x128\", count = 0, image = \"hpcc-scale5232-rhel810-v1\" }]", - "protocol_instances": "[{ profile = \"cx2-32x64\", count = 2, image = \"hpcc-scale5232-rhel810-v1\" }]", - "colocate_protocol_instances": "true", - "filesystem_config": "[{ filesystem = \"/gpfs/fs1\", block_size = \"4M\", default_data_replica = 2, default_metadata_replica = 2, max_data_replica = 3, max_metadata_replica = 3 }]", - "filesets_config": "[{ client_mount_path = \"/mnt/scale/tools\", quota = 0 } , { client_mount_path = \"/mnt/scale/data\", quota = 0 }]", - "afm_cos_config": "[{ afm_fileset = \"afm_fileset\", mode = \"iw\", cos_instance = \"\", bucket_name = \"\", bucket_region = \"us-south\", cos_service_cred_key = \"\", bucket_storage_class = \"smart\", bucket_type = \"region_location\" }]", - "dns_domain_name": "{ compute = \"comp.com\", storage = \"strg.com\", protocol = \"ces.com\", client = \"clnt.com\", gklm = \"gklm.com\"}", - "dns_instance_id": "__NULL__", - "dns_custom_resolver_id": "__NULL__", - "enable_vpc_flow_logs": "true", - "skip_flowlogs_s2s_auth_policy": "false", - "enable_ldap": "false", - "ldap_basedns": "ldapscale.com", - "ldap_server": "null", - "ldap_server_cert": "null", - "ldap_admin_password": "", - "ldap_user_name": "", - "ldap_user_password": "", - "ldap_instance": "[{ profile = \"cx2-2x4\", image = \"ibm-ubuntu-22-04-5-minimal-amd64-5\" }]", - "scale_encryption_enabled": "false", - "scale_encryption_type": "null", - "gklm_instances": "[{ profile = \"bx2-2x8\", count = 2, image = \"hpcc-scale-gklm4202-v2-5-3\" }]", - "scale_encryption_admin_password": "__NULL__", - "key_protect_instance_id": "__NULL__", - "storage_type": "scratch", - "observability_atracker_enable": "true", - "observability_atracker_target_type": "cloudlogs", - "sccwp_enable": "true", - "app_config_plan": "basic", - "cspm_enabled": "true", - "sccwp_service_plan": "free-trial", - "existing_bastion_instance_name": "__NULL__", - "existing_bastion_instance_public_ip": "__NULL__", - "existing_bastion_security_group_id": "__NULL__", - "existing_bastion_ssh_private_key": "__NULL__", - "enable_cos_integration": "false", - "cos_instance_name": "__NULL__", - "bms_boot_drive_encryption": "false", - "enable_sg_validation": "true", - "login_security_group_name": "__NULL__", - "storage_security_group_name": "__NULL__", - "compute_security_group_name": "__NULL__", - "client_security_group_name": "__NULL__", - "gklm_security_group_name": "__NULL__", - "login_subnet_id": "__NULL__", - "compute_subnet_id": "__NULL__", - "storage_subnet_id": "__NULL__", - "protocol_subnet_id": "__NULL__", - "client_subnet_id": "__NULL__", - "ldap_security_group_name": "__NULL__", - "TF_VERSION": "1.9", - "TF_PARALLELISM": "250" -} diff --git a/samples/configs/scale_schematics_values.json b/samples/configs/scale_schematics_values.json deleted file mode 100644 index 86ac1263..00000000 --- a/samples/configs/scale_schematics_values.json +++ /dev/null @@ -1,599 +0,0 @@ -{ - "name": "scale-test", - "type": [ - "terraform_v1.9" - ], - "location": "eu-de", - "resource_group": "Default", - "description": "", - "tags": [], - "template_repo": { - "url": "https://github.com/terraform-ibm-modules/terraform-ibm-hpc", - "branch": "11-aug-scale" - }, - "template_data": [ - { - "folder": "solutions/scale", - "type": "terraform_v1.9", - "env_values": [ - { - "TF_CLI_ARGS_apply": "-parallelism=250" - }, - { - "TF_CLI_ARGS_plan": "-parallelism=250" - }, - { - "TF_CLI_ARGS_destroy": "-parallelism=100" - }, - { - "VAR1": "" - }, - { - "VAR2": "" - } - ], - "variablestore": [ - { - "name": "ibm_customer_number", - "value": "Please fill here", - "type": "string", - "secure": true, - "description": "IBM Customer Number (ICN) used for Bring Your Own License (BYOL) entitlement check and not required if storage_type is evaluation, but must be provided if storage_type is scratch or persistent. Failing to provide an ICN will cause the deployment to fail to decrypt the packages. For more information on how to find your ICN, see [What is my IBM Customer Number (ICN)?](https://www.ibm.com/support/pages/what-my-ibm-customer-number-icn)." - }, - { - "name": "ibmcloud_api_key", - "value": "Please fill here", - "type": "string", - "secure": true, - "description": "Provide the IBM Cloud API key for the account where the IBM Storage Scale cluster will be deployed, this is a required value that must be provided as it is used to authenticate and authorize access during the deployment. For instructions on creating an API key, see [Managing user API keys](https://cloud.ibm.com/docs/account?topic=account-userapikey&interface=ui)." - }, - { - "name": "zones", - "value": "[\"us-east-1\"]", - "type": "list(string)", - "secure": false, - "description": "Specify the IBM Cloud zone within the chosen region where the IBM Storage scale cluster will be deployed. A single zone input is required, (for example, [\"us-east-1\"]) all the cluster nodes will all be provisioned in this zone.[Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-creating-a-vpc-in-a-different-region#get-zones-using-the-cli)." - }, - { - "name": "ssh_keys", - "value": "[\"Please fill here\"]", - "type": "list(string)", - "secure": false, - "description": "Provide the names of the SSH keys already configured in your IBM Cloud account to enable access to the Storage Scale nodes. The solution does not create new SSH keys, so ensure you provide existing ones. These keys must reside in the same resource group and region as the cluster being provisioned.To provide multiple SSH keys, use a comma-separated list in the format: [\"key-name-1\", \"key-name-2\"]. If you do not have an SSH key in your IBM Cloud account, you can create one by following the instructions [SSH Keys](https://cloud.ibm.com/docs/vpc?topic=vpc-ssh-keys)." - }, - { - "name": "remote_allowed_ips", - "value": "[\"Please fill here\"]", - "type": "list(string)", - "secure": false, - "description": "To ensure secure access to the IBM Storage Scale cluster via SSH, you must specify the public IP addresses of the devices that are permitted to connect. These IPs will be used to configure access restrictions and protect the environment from unauthorized connections. To allow access from multiple devices, provide the IP addresses as a comma-separated list in the format: [\"169.45.117.34\", \"203.0.113.25\"]. Identify your current public IP address, you can visit: https://ipv4.icanhazip.com." - }, - { - "name": "cluster_prefix", - "value": "scale", - "type": "string", - "secure": false, - "description": "Prefix that is used to name the IBM Cloud resources that are provisioned to build the Storage Scale cluster. Make sure that the prefix is unique, since you cannot create multiple resources with the same name. The maximum length of supported characters is 64. Preifx must begin with a letter and end with a letter or number." - }, - { - "name": "existing_resource_group", - "value": "Default", - "type": "string", - "secure": false, - "description": "Specify the name of the existing resource group in your IBM Cloud account where cluster resources will be deployed. By default, the resource group is set to 'Default.' In some older accounts, it may be 'default,' so please verify the resource group name before proceeding. If the value is set to \"null\", the automation will create two separate resource groups: 'workload-rg' and 'service-rg.' For more details, see [Managing resource groups](https://cloud.ibm.com/docs/account?topic=account-rgs&interface=ui)." - }, - { - "name": "vpc_name", - "value": "__NULL__", - "type": "string", - "secure": false, - "description": "Provide the name of an existing VPC in which the cluster resources will be deployed. If no value is given, the solution provisions a new VPC. [Learn more](https://cloud.ibm.com/docs/vpc). You can also choose to use existing subnets under this VPC or let the solution create new subnets as part of the deployment. If a custom DNS resolver is already configured for your VPC, specify its ID under the dns_custom_resolver_id input value." - }, - { - "name": "vpc_cidr", - "value": "10.241.0.0/18", - "type": "string", - "secure": false, - "description": "Provide an address prefix to create a new VPC when the vpc_name variable is set to null. VPC will be created using this address prefix, and subnets can then be defined within it using the specified subnet CIDR blocks. For more information on address prefix, see [Setting IP ranges](https://cloud.ibm.com/docs/vpc?topic=vpc-vpc-addressing-plan-design)." - }, - { - "name": "bastion_instance", - "value": "{image = \"ibm-ubuntu-22-04-5-minimal-amd64-5\", profile = \"cx2-4x8\"}", - "type": "object({ image = string, profile = string })", - "secure": false, - "description": "Bastion node functions as a jump server to enable secure SSH access to cluster nodes, ensuring controlled connectivity within the private network. Specify the configuration details for the bastion node, including the image and instance profile. Only Ubuntu 22.04 stock images are supported." - }, - { - "name": "deployer_instance", - "value": "{image = \"hpcc-scale-deployer-v1\", profile = \"mx2-4x32\"}", - "type": "object({ image = string, profile = string })", - "secure": false, - "description": "A deployer node is a dedicated virtual machine or server instance used to automate the deployment and configuration of infrastructure and applications for HPC cluster components. Specify the configuration for the deployer node, including the custom image and virtual server instance profile." - }, - { - "name": "login_subnets_cidr", - "value": "10.241.16.0/28", - "type": "string", - "secure": false, - "description": "Provide the CIDR block required for the creation of the login cluster private subnet. Single CIDR block is required. If using a hybrid environment, modify the CIDR block to avoid conflicts with any on-premises CIDR blocks. Since the login subnet is used only for the creation of login virtual server instances, provide a CIDR range of /28." - }, - { - "name": "compute_subnets_cidr", - "value": "10.241.0.0/20", - "type": "string", - "secure": false, - "description": "Provide the CIDR block required for the creation of the compute private subnet. Single CIDR block is required. If using a hybrid environment, modify the CIDR block to avoid conflicts with any on-premises CIDR blocks. Ensure the selected CIDR block size can accommodate the maximum number of scale compute nodes expected in your cluster. For more information on CIDR block size selection, refer to the documentation, see [Choosing IP ranges for your VPC](https://cloud.ibm.com/docs/vpc?topic=vpc-choosing-ip-ranges-for-your-vpc)." - }, - { - "name": "storage_subnets_cidr", - "value": "10.241.30.0/24", - "type": "string", - "secure": false, - "description": "Provide the CIDR block required for the creation of the storage private subnet. Single CIDR block is required. If using a hybrid environment, modify the CIDR block to avoid conflicts with any on-premises CIDR blocks. Ensure the selected CIDR block size can accommodate the maximum number of scale storage nodes expected in your cluster. For more information on CIDR block size selection, refer to the documentation, see [Choosing IP ranges for your VPC](https://cloud.ibm.com/docs/vpc?topic=vpc-choosing-ip-ranges-for-your-vpc)." - }, - { - "name": "protocol_subnets_cidr", - "value": "10.241.40.0/24", - "type": "string", - "secure": false, - "description": "Provide the CIDR block required for the creation of the protocol private subnet. Single CIDR block is required. If using a hybrid environment, modify the CIDR block to avoid conflicts with any on-premises CIDR blocks. Ensure the selected CIDR block size can accommodate the maximum number of protocol nodes expected in your cluster. For more information on CIDR block size selection, refer to the documentation, see [Choosing IP ranges for your VPC](https://cloud.ibm.com/docs/vpc?topic=vpc-choosing-ip-ranges-for-your-vpc)." - }, - { - "name": "client_subnets_cidr", - "value": "10.241.50.0/24", - "type": "string", - "secure": false, - "description": "Provide the CIDR block required for the creation of the client private subnet. Single CIDR block is required. If using a hybrid environment, modify the CIDR block to avoid conflicts with any on-premises CIDR blocks. Ensure the selected CIDR block size can accommodate the maximum number of scale client nodes expected in your cluster. For more information on CIDR block size selection, refer to the documentation, see [Choosing IP ranges for your VPC](https://cloud.ibm.com/docs/vpc?topic=vpc-choosing-ip-ranges-for-your-vpc)." - }, - { - "name": "compute_gui_username", - "value": "", - "type": "string", - "secure": true, - "description": "GUI username to perform system management and monitoring tasks on the compute cluster. The Username should be at least 4 characters, (any combination of lowercase and uppercase letters)." - }, - { - "name": "compute_gui_password", - "value": "", - "type": "string", - "secure": true, - "description": "Password for logging in to the compute cluster GUI. Must be at least 8 characters long and include a combination of uppercase and lowercase letters, a number, and a special character. It must not contain the username or start with a special character." - }, - { - "name": "compute_instances", - "value": "[{ profile = \"cx2-2x4\", count = 0, image = \"hpcc-scale5232-rhel810-v1\", filesystem = \"/gpfs/fs1\" }]", - "type": "list(object({ profile = string, count = number, image = string, filesystem = optional(string) }))", - "secure": false, - "description": "Specify the list of virtual server instances to be provisioned as compute nodes in the cluster. Each object includes the instance profile (machine type), number of instances (count), OS image to use, and an optional filesystem mount path. This configuration allows customization of the compute tier to suit specific performance and workload requirements. The profile must match a valid IBM Cloud VPC Gen2 instance profile format. A minimum of 3 compute nodes is required to form a cluster, and a maximum of 64 nodes is supported. For more details, refer[Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)." - }, - { - "name": "client_instances", - "value": "[{ profile = \"cx2-2x4\", count = 0, image = \"ibm-redhat-8-10-minimal-amd64-6\" }]", - "type": "list(object({ profile = string, count = number, image = string }))", - "secure": false, - "description": "Specify the list of virtual server instances to be provisioned as client nodes in the cluster. Each object includes the instance profile (machine type), number of instances (count), OS image to use. This configuration allows customization of the compute tier to suit specific performance and workload requirements. The profile must match a valid IBM Cloud VPC Gen2 instance profile format. For more details, refer [Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)." - }, - { - "name": "storage_instances", - "value": "[{ profile = \"bx2d-32x128\", count = 2, image = \"hpcc-scale5232-rhel810-v1\", filesystem = \"/gpfs/fs1\" }]", - "type": "list(object({ profile = string, count = number, image = string, filesystem = optional(string) }))", - "secure": false, - "description": "Specify the list of virtual server instances to be provisioned as storage nodes in the cluster. Each object includes the instance profile (machine type), number of instances (count), OS image to use, and an optional filesystem mount path. This configuration allows customization of the storage tier to suit specific storage performance cluster. The profile must match a valid IBM Cloud VPC Gen2 instance profile format. A minimum of 2 storage nodes is required to form a cluster, and a maximum of 64 nodes is supported. For more details, refer[Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)." - }, - { - "name": "storage_baremetal_server", - "value": "[{ profile = \"cx2d-metal-96x192\", count = 2, image = \"hpcc-scale5232-rhel810-v1\", filesystem = \"/gpfs/fs1\" }]", - "type": "list(object({ profile = string, count = number, image = string, filesystem = optional(string) }))", - "secure": false, - "description": "Specify the list of bare metal servers to be provisioned for the storage cluster. Each object in the list specifies the server profile (hardware configuration), the count (number of servers), the image (OS image to use), and an optional filesystem mount path. This configuration allows flexibility in scaling and customizing the storage cluster based on performance and capacity requirements. Only valid bare metal profiles supported in IBM Cloud VPC should be used. A minimum of 2 baremetal storage nodes is required to form a cluster, and a maximum of 64 nodes is supported For available bare metal profiles, refer to the [Baremetal Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-profile&interface=ui)." - }, - { - "name": "tie_breaker_baremetal_server_profile", - "value": "__NULL__", - "type": "string", - "secure": false, - "description": "Specify the bare metal server profile type name to be used for creating the bare metal Tie breaker node. If no value is provided, the storage bare metal server profile will be used as the default. For more information, see [bare metal server profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-profile&interface=ui). [Tie Breaker Node](https://www.ibm.com/docs/en/storage-scale/5.2.2?topic=quorum-node-tiebreaker-disks)" - }, - { - "name": "scale_management_vsi_profile", - "value": "bx2-8x32", - "type": "string", - "secure": false, - "description": "The virtual server instance profile type name to be used to create the Management node. For more information, see [Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)." - }, - { - "name": "afm_instances", - "value": "[{ profile = \"bx2d-32x128\", count = 0}]", - "type": "list(object({ profile = string, count = number}))", - "secure": false, - "description": "Specify the list of virtual server instances to be provisioned as AFM nodes in the cluster. Each object in the list includes the instance profile (machine type), the count (number of instances), the image (OS image to use). This configuration allows you to access remote data and high-performance computing needs.This input can be used to provision virtual server instances (VSI). If persistent, high-throughput storage is required, consider using bare metal instances instead. Ensure you provide valid instance profiles. Maximum of 16 afm nodes is supported. For more details, refer to [Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)." - }, - { - "name": "protocol_instances", - "value": "[{ profile = \"cx2-32x64\", count = 2}]", - "type": "list(object({ profile = string, count = number}))", - "secure": false, - "description": "Specify the list of virtual server instances to be provisioned as protocol nodes in the cluster. Each object in the list includes the instance profile (machine type), the count (number of instances), the image (OS image to use). This configuration allows allows for a unified data management solution, enabling different clients to access the same data using NFS protocol.This input can be used to provision virtual server instances (VSI). If persistent, high-throughput storage is required, consider using bare metal instances instead. Ensure you provide valid instance profiles. Maximum of 32 VSI or baremetal nodes are supported. For more details, refer to [Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)." - }, - { - "name": "colocate_protocol_instances", - "value": "true", - "type": "bool", - "secure": false, - "description": "Enable this option to colocate protocol services on the same virtual server instances used for storage. When set to true, the storage nodes will also act as protocol nodes for reducing the need for separate infrastructure. This can optimize resource usage and simplify the cluster setup, especially for smaller environments or cost-sensitive deployments. For larger or performance-intensive workloads, consider deploying dedicated protocol instances instead." - }, - { - "name": "storage_gui_username", - "value": "", - "type": "string", - "secure": true, - "description": "GUI username to perform system management and monitoring tasks on the storage cluster. Note: Username should be at least 4 characters, (any combination of lowercase and uppercase letters)." - }, - { - "name": "storage_gui_password", - "value": "", - "type": "string", - "secure": true, - "description": "The storage cluster GUI password is used for logging in to the storage cluster through the GUI. The password should contain a minimum of 8 characters. For a strong password, use a combination of uppercase and lowercase letters, one number, and a special character. Make sure that the password doesn't contain the username and it should not start with a special character." - }, - { - "name": "filesystem_config", - "value": "[{ filesystem = \"/gpfs/fs1\", block_size = \"4M\", default_data_replica = 2, default_metadata_replica = 2, max_data_replica = 3, max_metadata_replica = 3 }]", - "type": "list(object({ filesystem = string, block_size = string, default_data_replica = number, default_metadata_replica = number, max_data_replica = number, max_metadata_replica = number }))", - "secure": false, - "description": "Specify the configuration parameters for one or more IBM Storage Scale (GPFS) filesystems. Each object in the list includes the filesystem mount point, block size, and replica settings for both data and metadata. These settings determine how data is distributed and replicated across the cluster for performance and fault tolerance." - }, - { - "name": "filesets_config", - "value": "[{ client_mount_path = \"/mnt/scale/tools\", quota = 0 } , { client_mount_path = \"/mnt/scale/data\", quota = 0 }]", - "type": "list(object({ client_mount_path = string, quota = number}))", - "secure": false, - "description": "Specify a list of filesets with client mount paths and optional storage quotas (0 means no quota) to be created within the IBM Storage Scale filesystem.." - }, - { - "name": "afm_cos_config", - "value": "[{ afm_fileset = \"afm_fileset\", mode = \"iw\", cos_instance = \"\", bucket_name = \"\", bucket_region = \"us-south\", cos_service_cred_key = \"\", bucket_storage_class = \"smart\", bucket_type = \"region_location\" }]", - "type": "list(object({afm_fileset = string, mode = string, cos_instance = string, bucket_name = string, bucket_region = string, cos_service_cred_key = string, bucket_type = string, bucket_storage_class = string }))", - "secure": false, - "description": "Please provide details for the Cloud Object Storage (COS) instance, including information about the COS bucket, service credentials (HMAC key), AFM fileset, mode (such as Read-only (RO), Single writer (SW), Local updates (LU), and Independent writer (IW)), storage class (standard, vault, cold, or smart), and bucket type (single_site_location, region_location, cross_region_location). Note : The 'afm_cos_config' can contain up to 5 entries. For further details on COS bucket locations, refer to the relevant documentation https://cloud.ibm.com/docs/cloud-object-storage/basics?topic=cloud-object-storage-endpoints." - }, - { - "name": "dns_instance_id", - "value": "__NULL__", - "type": "string", - "secure": false, - "description": "Specify the ID of an existing IBM Cloud DNS service instance. When provided, domain names are created within the specified instance. If set to null, a new DNS service instance is created, and the required DNS zones are associated with it." - }, - { - "name": "dns_custom_resolver_id", - "value": "__NULL__", - "type": "string", - "secure": false, - "description": "Specify the ID of an existing IBM Cloud DNS custom resolver to avoid creating a new one. If set to null, a new custom resolver will be created and associated with the VPC. Note: A VPC can be associated with only one custom resolver. When using an existing VPC, if a custom resolver is already associated and this ID is not provided, the deployment will fail." - }, - { - "name": "dns_domain_names", - "value": "{ compute = \"comp.com\", storage = \"strg.com\", protocol = \"ces.com\", client = \"clnt.com\", gklm = \"gklm.com\"}", - "type": "object({ compute = string, storage = string, protocol = string, client = string, gklm = string })", - "secure": false, - "description": "DNS domain names are user-friendly addresses that map to systems within a network, making them easier to identify and access. Provide the DNS domain names for IBM Cloud HPC components: compute, storage, protocol, client, and GKLM. These domains will be assigned to the respective nodes that are part of the scale cluster." - }, - { - "name": "enable_cos_integration", - "value": "true", - "type": "bool", - "secure": false, - "description": "Set to true to create an extra cos bucket to integrate with scale cluster deployment." - }, - { - "name": "cos_instance_name", - "value": "__NULL__", - "type": "string", - "secure": false, - "description": "Provide the name of the existing COS instance where the logs for the enabled functionalities will be stored." - }, - { - "name": "enable_vpc_flow_logs", - "value": "true", - "type": "bool", - "secure": false, - "description": "This flag determines whether VPC flow logs are enabled. When set to true, a flow log collector will be created to capture and monitor network traffic data within the VPC. Enabling flow logs provides valuable insights for troubleshooting, performance monitoring, and security auditing by recording information about the traffic passing through your VPC. Consider enabling this feature to enhance visibility and maintain robust network management practices." - }, - { - "name": "override", - "value": "false", - "type": "bool", - "secure": false, - "description": "Override default values with custom JSON template. This uses the file `override.json` to allow users to create a fully customized environment." - }, - { - "name": "override_json_string", - "value": "__NULL__", - "type": "string", - "secure": false, - "description": "Override default values with a JSON object. Any JSON other than an empty string overrides other configuration changes." - }, - { - "name": "enable_ldap", - "value": "false", - "type": "bool", - "secure": false, - "description": "Set this option to true to enable LDAP for IBM Spectrum Scale (GPFS), with the default value set to false." - }, - { - "name": "ldap_basedns", - "value": "ldapscale.com", - "type": "string", - "secure": false, - "description": "The dns domain name is used for configuring the LDAP server. If an LDAP server is already in existence, ensure to provide the associated DNS domain name." - }, - { - "name": "ldap_server", - "value": "__NULL__", - "type": "string", - "secure": false, - "description": "Provide the IP address for the existing LDAP server. If no address is given, a new LDAP server will be created." - }, - { - "name": "ldap_server_cert", - "value": "__NULL__", - "type": "string", - "secure": true, - "description": "Provide the existing LDAP server certificate. This value is required if the 'ldap_server' variable is not set to null. If the certificate is not provided or is invalid, the LDAP configuration may fail. For more information on how to create or obtain the certificate, please refer [existing LDAP server certificate](https://cloud.ibm.com/docs/allowlist/hpc-service?topic=hpc-service-integrating-openldap)." - }, - { - "name": "ldap_admin_password", - "value": "__NULL__", - "type": "string", - "secure": true, - "description": "The LDAP admin password must be 8 to 20 characters long and include at least two alphabetic characters (with one uppercase and one lowercase), one number, and one special character from the set (!@#$%^&*()_+=-). The password must not contain the username or any spaces. [This value is ignored for an existing LDAP server]." - }, - { - "name": "ldap_user_name", - "value": "", - "type": "string", - "secure": false, - "description": "Custom LDAP User for performing cluster operations. Note: Username should be between 4 to 32 characters, (any combination of lowercase and uppercase letters).[This value is ignored for an existing LDAP server]" - }, - { - "name": "ldap_user_password", - "value": "", - "type": "string", - "secure": true, - "description": "The LDAP user password must be 8 to 20 characters long and include at least two alphabetic characters (with one uppercase and one lowercase), one numeric digit, and at least one special character from the set (!@#$%^&*()_+=-). Spaces are not allowed. The password must not contain the username for enhanced security. [This value is ignored for an existing LDAP server]." - }, - { - "name": "ldap_instance", - "value": "[{ profile = \"cx2-2x4\", image = \"ibm-ubuntu-22-04-5-minimal-amd64-1\" }]", - "type": "list(object({ profile = string, image = string }))", - "secure": false, - "description": "Specify the list of virtual server instances to be provisioned as ldap nodes in the cluster. Each object in the list defines the instance profile (machine type), the count (number of instances), the image (OS image to use). This configuration allows you to customize the server for setting up ldap server. The profile must match a valid IBM Cloud VPC Gen2 instance profile format. For more details, refer [Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)." - }, - { - "name": "scale_encryption_enabled", - "value": "false", - "type": "bool", - "secure": false, - "description": "Encryption ensures that data stored in the filesystem is protected from unauthorized access and secures sensitive information at rest. To enable the encryption for the filesystem. Select true or false" - }, - { - "name": "scale_encryption_type", - "value": "null", - "type": "string", - "secure": false, - "description": "To enable filesystem encryption, specify either 'key_protect' or 'gklm'. If neither is specified, the default value will be 'null' and encryption is disabled" - }, - { - "name": "gklm_instances", - "value": "[{ profile = \"bx2-2x8\", count = 2, image = \"hpcc-scale-gklm4202-v2-5-3\" }]", - "type": "list(object({ profile = string, count = number, image = string }))", - "secure": false, - "description": "Specify the list of virtual server instances to be provisioned as GKLM (Guardium Key Lifecycle Manager) nodes in the cluster. Each object in the list includes the instance profile (machine type), the count (number of instances), and the image (OS image to use). This configuration allows you to manage and securely store encryption keys used across the cluster components. The profile must match a valid IBM Cloud VPC Gen2 instance profile format. A minimum of 2 and maximum of 5 gklm nodes are supported. For more details, refer[Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)." - }, - { - "name": "scale_encryption_admin_password", - "value": "__NULL__", - "type": "string", - "secure": true, - "description": "Specifies the administrator password for GKLM-based encryption. This is required when encryption is enabled for IBM Spectrum Scale (GPFS) and the encryption type is set to 'gklm'. The password is used to authenticate administrative access to the Guardium Key Lifecycle Manager (GKLM) for managing encryption keys. Ensure the password meets your organization's security standards." - }, - { - "name": "key_protect_instance_id", - "value": "__NULL__", - "type": "string", - "secure": false, - "description": "Provide the ID of an existing IBM Key Protect instance to be used for filesystem encryption in IBM Storage Scale. If this value is provided, the automation will use the existing Key Protect instance and create a new encryption key within it. If not provided, a new Key Protect instance will be created automatically during deployment." - }, - { - "name": "storage_type", - "value": "scratch", - "type": "string", - "secure": false, - "description": "Select the Storage Scale file system deployment method. Note: The Storage Scale scratch and evaluation type deploys the Storage Scale file system on virtual server instances, and the persistent type deploys the Storage Scale file system on bare metal servers." - }, - { - "name": "observability_atracker_enable", - "value": "false", - "type": "bool", - "secure": false, - "description": "Activity Tracker Event Routing to configure how to route auditing events. While multiple Activity Tracker instances can be created, only one tracker is needed to capture all events. Creating additional trackers is unnecessary if an existing Activity Tracker is already integrated with a COS bucket. In such cases, set the value to false, as all events can be monitored and accessed through the existing Activity Tracker." - }, - { - "name": "observability_atracker_target_type", - "value": "cloudlogs", - "type": "string", - "secure": false, - "description": "All the events will be stored in either COS bucket or Cloud Logs on the basis of user input, so customers can retrieve or ingest them in their system." - }, - { - "name": "sccwp_service_plan", - "value": "free-trial", - "type": "string", - "secure": false, - "description": "Specify the plan type for the Security and Compliance Center (SCC) Workload Protection instance. Valid values are free-trial and graduated-tier only." - }, - { - "name": "sccwp_enable", - "value": "false", - "type": "bool", - "secure": false, - "description": "Set this flag to true to create an instance of IBM Security and Compliance Center (SCC) Workload Protection. When enabled, it provides tools to discover and prioritize vulnerabilities, monitor for security threats, and enforce configuration, permission, and compliance policies across the full lifecycle of your workloads. To view the data on the dashboard, enable the cspm to create the app configuration and required trusted profile policies.[Learn more](https://cloud.ibm.com/docs/workload-protection?topic=workload-protection-about)." - }, - { - "name": "cspm_enabled", - "value": "true", - "type": "bool", - "secure": false, - "description": "CSPM (Cloud Security Posture Management) is a set of tools and practices that continuously monitor and secure cloud infrastructure. When enabled, it creates a trusted profile with viewer access to the App Configuration and Enterprise services for the SCC Workload Protection instance. Make sure the required IAM permissions are in place, as missing permissions will cause deployment to fail. If CSPM is disabled, dashboard data will not be available.[Learn more](https://cloud.ibm.com/docs/workload-protection?topic=workload-protection-about)." - }, - { - "name": "app_config_plan", - "value": "basic", - "type": "string", - "secure": false, - "description": "Specify the IBM service pricing plan for the app configuration. Allowed values are 'basic', 'lite', 'standardv2', 'enterprise'." - }, - { - "name": "skip_flowlogs_s2s_auth_policy", - "value": "false", - "type": "bool", - "secure": false, - "description": "Skip auth policy between flow logs service and COS instance, set to true if this policy is already in place on account." - }, - { - "name": "existing_bastion_instance_name", - "value": "__NULL__", - "type": "string", - "secure": false, - "description": "Provide the name of the bastion instance. If none given then new bastion will be created." - }, - { - "name": "existing_bastion_instance_public_ip", - "value": "__NULL__", - "type": "string", - "secure": false, - "description": "Provide the public ip address of the bastion instance to establish the remote connection." - }, - { - "name": "existing_bastion_security_group_id", - "value": "__NULL__", - "type": "string", - "secure": false, - "description": "Specify the security group ID for the bastion server. This ID will be added as an allowlist rule on the HPC cluster nodes to facilitate secure SSH connections through the bastion node. By restricting access through a bastion server, this setup enhances security by controlling and monitoring entry points into the cluster environment. Ensure that the specified security group is correctly configured to permit only authorized traffic for secure and efficient management of cluster resources." - }, - { - "name": "existing_bastion_ssh_private_key", - "value": "__NULL__", - "type": "string", - "secure": true, - "description": "Provide the private SSH key (named id_rsa) used during the creation and configuration of the bastion server to securely authenticate and connect to the bastion server. This allows access to internal network resources from a secure entry point. Note: The corresponding public SSH key (named id_rsa.pub) must already be available in the ~/.ssh/authorized_keys file on the bastion host to establish authentication." - }, - { - "name": "bms_boot_drive_encryption", - "value": "false", - "type": "bool", - "secure": false, - "description": "Enable or disable encryption for the boot drive of bare metal servers. When set to true, the boot drive will be encrypted to enhance data security, protecting the operating system and any sensitive information stored on the root volume. This is especially recommended for workloads with strict compliance or security requirements. Set to false to disable boot drive encryption." - }, - { - "name": "enable_sg_validation", - "value": "true", - "type": "bool", - "secure": false, - "description": "Enable or disable security group validation. Security group validation ensures that the specified security groups are properly assigned" - }, - { - "name": "login_security_group_name", - "value": "__NULL__", - "type": "string", - "secure": false, - "description": "Provide the existing security group name to provision the bastion node. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the bastion node to function properly." - }, - { - "name": "storage_security_group_name", - "value": "__NULL__", - "type": "string", - "secure": false, - "description": "Provide the security group name to provision the storage nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the storage nodes to function properly. When using existing security groups, you must provide the corresponding group names for all other associated components as well." - }, - { - "name": "compute_security_group_name", - "value": "__NULL__", - "type": "string", - "secure": false, - "description": "Provide the security group name to provision the compute nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the storage nodes to function properly. When using existing security groups, you must provide the corresponding group names for all other associated components as well" - }, - { - "name": "client_security_group_name", - "value": "__NULL__", - "type": "string", - "secure": false, - "description": "Provide the security group name to provision the client nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the storage nodes to function properly. When using existing security groups, you must provide the corresponding group names for all other associated components as well" - }, - { - "name": "gklm_security_group_name", - "value": "__NULL__", - "type": "string", - "secure": false, - "description": "Provide the security group name to provision the gklm nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the storage nodes to function properly. When using existing security groups, you must provide the corresponding group names for all other associated components as well" - }, - { - "name": "ldap_security_group_name", - "value": "__NULL__", - "type": "string", - "secure": false, - "description": "Provide the security group name to provision the ldap nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the storage nodes to function properly. When using existing security groups, you must provide the corresponding group names for all other associated components as well" - }, - { - "name": "login_subnet_id", - "value": "__NULL__", - "type": "string", - "secure": false, - "description": "Provide ID of an existing subnet to be used for provisioning bastion/deployer node. This is required only when deploying into an existing VPC (i.e., when a value is provided for `vpc_name`). When specifying an existing subnet, ensure that a public gateway is attached to the subnet to enable outbound internet access if required. Additionally, if this subnet ID is provided, you must also provide subnet IDs for all other applicable components (e.g., storage , compute, client, protocol, gklm) to maintain consistency across the deployment." - }, - { - "name": "compute_subnet_id", - "value": "__NULL__", - "type": "string", - "secure": false, - "description": "Provide ID of an existing subnet to be used for provisioning compute nodes. This is required only when deploying into an existing VPC (i.e., when a value is provided for `vpc_name`). When specifying an existing subnet, ensure that a public gateway is attached to the subnet to enable outbound internet access if required. Additionally, if this subnet ID is provided, you must also provide subnet IDs for all other applicable components (e.g., storage , protocol, client, login, gklm) to maintain consistency across the deployment." - }, - { - "name": "storage_subnet_id", - "value": "__NULL__", - "type": "string", - "secure": false, - "description": "Provide ID of an existing subnet to be used for storage nodes. This is required only when deploying into an existing VPC (i.e., when a value is provided for `vpc_name`). When specifying an existing subnet, ensure that a public gateway is attached to the subnet to enable outbound internet access if required. Additionally, if this subnet ID is provided, you must also provide subnet IDs for all other applicable components (e.g., compute , protocol, client, login, gklm) to maintain consistency across the deployment." - }, - { - "name": "protocol_subnet_id", - "value": "__NULL__", - "type": "string", - "secure": false, - "description": "Provide ID of an existing subnet to be used for protocol nodes. This is required only when deploying into an existing VPC (i.e., when a value is provided for `vpc_name`). When specifying an existing subnet, ensure that a public gateway is attached to the subnet to enable outbound internet access if required. Additionally, if this subnet ID is provided, you must also provide subnet IDs for all other applicable components (e.g., storage , compute, client, login, gklm) to maintain consistency across the deployment." - }, - { - "name": "client_subnet_id", - "value": "__NULL__", - "type": "string", - "secure": false, - "description": "Provide ID of an existing subnet to be used for client nodes. This is required only when deploying into an existing VPC (i.e., when a value is provided for `vpc_name`). When specifying an existing subnet, ensure that a public gateway is attached to the subnet to enable outbound internet access if required. Additionally, if this subnet ID is provided, you must also provide subnet IDs for all other applicable components (e.g., storage , compute, protocol, login, gklm) to maintain consistency across the deployment." - }, - { - "name": "TF_VERSION", - "value": "1.9", - "type": "string", - "secure": false, - "description": "The version of the Terraform engine that's used in the Schematics workspace." - }, - { - "name": "TF_PARALLELISM", - "value": "250", - "type": "string", - "secure": false, - "description": "Parallelism/ concurrent operations limit. Valid values are between 1 and 256, both inclusive. [Learn more](https://www.terraform.io/docs/internals/graph.html#walking-the-graph)." - } - ] - } - ] -} diff --git a/solutions/custom/variables.tf b/solutions/custom/variables.tf index 149845ac..a9bb616f 100644 --- a/solutions/custom/variables.tf +++ b/solutions/custom/variables.tf @@ -4,7 +4,7 @@ variable "scheduler" { type = string default = "LSF" - description = "Select one of the scheduler (Scale/LSF/Symphony/Slurm/null)" + description = "Select one of the scheduler (LSF/Symphony/Slurm/null)" } variable "ibm_customer_number" { @@ -247,7 +247,7 @@ variable "storage_instances" { profile = string count = number image = string - filesystem = optional(string) + filesystem = string }) ) default = [{ @@ -480,6 +480,66 @@ variable "enable_vpc_flow_logs" { description = "Enable Activity tracker" } +############################################################################## +# Scale specific Variables +############################################################################## +# variable "filesystem_config" { +# type = list(object({ +# filesystem = string +# block_size = string +# default_data_replica = number +# default_metadata_replica = number +# max_data_replica = number +# max_metadata_replica = number +# mount_point = string +# })) +# default = null +# description = "File system configurations." +# } + +# variable "filesets_config" { +# type = list(object({ +# fileset = string +# filesystem = string +# junction_path = string +# client_mount_path = string +# quota = number +# })) +# default = null +# description = "Fileset configurations." +# } + +# variable "afm_instances" { +# type = list( +# object({ +# profile = string +# count = number +# image = string +# }) +# ) +# default = [{ +# profile = "bx2-2x8" +# count = 0 +# image = "ibm-redhat-8-10-minimal-amd64-2" +# }] +# description = "Number of instances to be launched for afm hosts." +# } + +# variable "afm_cos_config" { +# type = list(object({ +# afm_fileset = string, +# mode = string, +# cos_instance = string, +# bucket_name = string, +# bucket_region = string, +# cos_service_cred_key = string, +# bucket_type = string, +# bucket_storage_class = string +# })) +# default = null +# description = "AFM configurations." +# } + ############################################################################## # LSF specific Variables ############################################################################## diff --git a/solutions/hpcaas/variables.tf b/solutions/hpcaas/variables.tf index 015f07fb..eededab6 100644 --- a/solutions/hpcaas/variables.tf +++ b/solutions/hpcaas/variables.tf @@ -240,7 +240,7 @@ variable "storage_instances" { profile = string count = number image = string - filesystem = optional(string) + filesystem = string }) ) default = [{ diff --git a/solutions/lsf/datasource.tf b/solutions/lsf/datasource.tf index cac16716..afdf5435 100644 --- a/solutions/lsf/datasource.tf +++ b/solutions/lsf/datasource.tf @@ -8,11 +8,11 @@ data "ibm_is_vpc" "existing_vpc" { name = var.vpc_name } -data "ibm_is_subnet" "existing_compute_subnets" { - count = var.vpc_name != null && var.compute_subnet_id != null ? 1 : 0 - identifier = var.compute_subnet_id +data "ibm_is_subnet" "existing_cluster_subnets" { + count = var.vpc_name != null && var.cluster_subnet_id != null ? 1 : 0 + identifier = var.cluster_subnet_id } data "ibm_is_public_gateways" "public_gateways" { - count = var.vpc_name != null && var.compute_subnet_id == null && var.login_subnet_id == null ? 1 : 0 + count = var.vpc_name != null && var.cluster_subnet_id == null && var.login_subnet_id == null ? 1 : 0 } diff --git a/solutions/lsf/input_validation.tf b/solutions/lsf/input_validation.tf index 5c8ddeae..64e6f141 100644 --- a/solutions/lsf/input_validation.tf +++ b/solutions/lsf/input_validation.tf @@ -22,36 +22,36 @@ locals { (local.validate_login_subnet_id_vpc ? local.validate_login_subnet_id_vpc_msg : "")) # Validate existing subnet public gateways - validate_subnet_name_pg_msg = "Provided existing compute_subnet_id should have public gateway attached." - validate_subnet_name_pg = anytrue([var.compute_subnet_id == null, var.compute_subnet_id != null && var.vpc_name != null ? (data.ibm_is_subnet.existing_compute_subnets[0].public_gateway != "") : false]) + validate_subnet_name_pg_msg = "Provided existing cluster_subnet_id should have public gateway attached." + validate_subnet_name_pg = anytrue([var.cluster_subnet_id == null, var.cluster_subnet_id != null && var.vpc_name != null ? (data.ibm_is_subnet.existing_cluster_subnets[0].public_gateway != "") : false]) # tflint-ignore: terraform_unused_declarations validate_subnet_name_pg_chk = regex("^${local.validate_subnet_name_pg_msg}$", (local.validate_subnet_name_pg ? local.validate_subnet_name_pg_msg : "")) # Validate existing cluster subnet should be in the appropriate zone. validate_subnet_id_zone_msg = "Provided cluster subnets should be in appropriate zone." - validate_subnet_id_zone = anytrue([var.compute_subnet_id == null, var.compute_subnet_id != null && var.vpc_name != null ? alltrue([data.ibm_is_subnet.existing_compute_subnets[0].zone == var.zones[0]]) : false]) + validate_subnet_id_zone = anytrue([var.cluster_subnet_id == null, var.cluster_subnet_id != null && var.vpc_name != null ? alltrue([data.ibm_is_subnet.existing_cluster_subnets[0].zone == var.zones[0]]) : false]) # tflint-ignore: terraform_unused_declarations validate_subnet_id_zone_chk = regex("^${local.validate_subnet_id_zone_msg}$", (local.validate_subnet_id_zone ? local.validate_subnet_id_zone_msg : "")) # Validate existing cluster subnet should be the subset of vpc_name entered - validate_compute_subnet_id_vpc_msg = "Provided cluster subnet should be within the vpc entered." - validate_compute_subnet_id_vpc = anytrue([var.compute_subnet_id == null, var.compute_subnet_id != null && var.vpc_name != null ? alltrue([for subnet_id in [var.compute_subnet_id] : contains(data.ibm_is_vpc.existing_vpc[0].subnets[*].id, subnet_id)]) : false]) + validate_cluster_subnet_id_vpc_msg = "Provided cluster subnet should be within the vpc entered." + validate_cluster_subnet_id_vpc = anytrue([var.cluster_subnet_id == null, var.cluster_subnet_id != null && var.vpc_name != null ? alltrue([for subnet_id in [var.cluster_subnet_id] : contains(data.ibm_is_vpc.existing_vpc[0].subnets[*].id, subnet_id)]) : false]) # tflint-ignore: terraform_unused_declarations - validate_subnet_id_vpc_chk = regex("^${local.validate_compute_subnet_id_vpc_msg}$", - (local.validate_compute_subnet_id_vpc ? local.validate_compute_subnet_id_vpc_msg : "")) + validate_subnet_id_vpc_chk = regex("^${local.validate_cluster_subnet_id_vpc_msg}$", + (local.validate_cluster_subnet_id_vpc ? local.validate_cluster_subnet_id_vpc_msg : "")) # Validate existing vpc public gateways validate_existing_vpc_pgw_msg = "Provided existing vpc should have the public gateways created in the provided zones." - validate_existing_vpc_pgw = anytrue([(var.vpc_name == null), alltrue([var.vpc_name != null, var.compute_subnet_id != null]), alltrue([var.vpc_name != null, var.compute_subnet_id == null, var.login_subnet_id == null, length(local.zone_1_pgw_ids) > 0])]) + validate_existing_vpc_pgw = anytrue([(var.vpc_name == null), alltrue([var.vpc_name != null, var.cluster_subnet_id != null]), alltrue([var.vpc_name != null, var.cluster_subnet_id == null, var.login_subnet_id == null, length(local.zone_1_pgw_ids) > 0])]) # tflint-ignore: terraform_unused_declarations validate_existing_vpc_pgw_chk = regex("^${local.validate_existing_vpc_pgw_msg}$", (local.validate_existing_vpc_pgw ? local.validate_existing_vpc_pgw_msg : "")) } locals { - vpc_id = var.vpc_name != null && var.compute_subnet_id == null && var.login_subnet_id == null ? data.ibm_is_vpc.existing_vpc[0].id : null - public_gateways_list = var.vpc_name != null && var.compute_subnet_id == null && var.login_subnet_id == null ? data.ibm_is_public_gateways.public_gateways[0].public_gateways : [] - zone_1_pgw_ids = var.vpc_name != null && var.compute_subnet_id == null && var.login_subnet_id == null ? [for gateway in local.public_gateways_list : gateway.id if gateway.vpc == local.vpc_id && gateway.zone == var.zones[0]] : [] + vpc_id = var.vpc_name != null && var.cluster_subnet_id == null && var.login_subnet_id == null ? data.ibm_is_vpc.existing_vpc[0].id : null + public_gateways_list = var.vpc_name != null && var.cluster_subnet_id == null && var.login_subnet_id == null ? data.ibm_is_public_gateways.public_gateways[0].public_gateways : [] + zone_1_pgw_ids = var.vpc_name != null && var.cluster_subnet_id == null && var.login_subnet_id == null ? [for gateway in local.public_gateways_list : gateway.id if gateway.vpc == local.vpc_id && gateway.zone == var.zones[0]] : [] } diff --git a/solutions/lsf/locals.tf b/solutions/lsf/locals.tf index 7a85747e..ce4d1c41 100644 --- a/solutions/lsf/locals.tf +++ b/solutions/lsf/locals.tf @@ -23,7 +23,7 @@ locals { ssh_keys = var.ssh_keys vpc_cluster_login_private_subnets_cidr_blocks = var.vpc_cluster_login_private_subnets_cidr_blocks vpc_cluster_private_subnets_cidr_blocks = var.vpc_cluster_private_subnets_cidr_blocks - compute_subnet_id = var.compute_subnet_id + cluster_subnet_id = var.cluster_subnet_id cos_instance_name = var.cos_instance_name dns_custom_resolver_id = var.dns_custom_resolver_id dns_instance_id = var.dns_instance_id @@ -95,7 +95,7 @@ locals { vpc_cluster_login_private_subnets_cidr_blocks = lookup(local.override[local.override_type], "vpc_cluster_login_private_subnets_cidr_blocks", local.config.vpc_cluster_login_private_subnets_cidr_blocks) login_subnet_id = lookup(local.override[local.override_type], "login_subnet_id", local.config.login_subnet_id) vpc_cluster_private_subnets_cidr_blocks = lookup(local.override[local.override_type], "vpc_cluster_private_subnets_cidr_blocks", local.config.vpc_cluster_private_subnets_cidr_blocks) - compute_subnet_id = lookup(local.override[local.override_type], "compute_subnet_id", local.config.compute_subnet_id) + cluster_subnet_id = lookup(local.override[local.override_type], "cluster_subnet_id", local.config.cluster_subnet_id) cos_instance_name = lookup(local.override[local.override_type], "cos_instance_name", local.config.cos_instance_name) dns_custom_resolver_id = lookup(local.override[local.override_type], "dns_custom_resolver_id", local.config.dns_custom_resolver_id) dns_instance_id = lookup(local.override[local.override_type], "dns_instance_id", local.config.dns_instance_id) diff --git a/solutions/lsf/main.tf b/solutions/lsf/main.tf index c711c07c..774e2dec 100644 --- a/solutions/lsf/main.tf +++ b/solutions/lsf/main.tf @@ -9,7 +9,7 @@ module "lsf" { vpc_cluster_login_private_subnets_cidr_blocks = local.env.vpc_cluster_login_private_subnets_cidr_blocks login_subnet_id = local.env.login_subnet_id vpc_cluster_private_subnets_cidr_blocks = local.env.vpc_cluster_private_subnets_cidr_blocks - compute_subnet_id = local.env.compute_subnet_id + cluster_subnet_id = local.env.cluster_subnet_id cos_instance_name = local.env.cos_instance_name dns_custom_resolver_id = local.env.dns_custom_resolver_id dns_instance_id = local.env.dns_instance_id diff --git a/solutions/lsf/override.json b/solutions/lsf/override.json index f20df220..ddfc48ca 100644 --- a/solutions/lsf/override.json +++ b/solutions/lsf/override.json @@ -98,6 +98,10 @@ "observability_logs_retention_period": 7, "observability_monitoring_on_compute_nodes_enable": false, "observability_monitoring_plan": "graduated-tier", + "scc_enable": true, + "scc_profile": "CIS IBM Cloud Foundations Benchmark v1.1.0", + "scc_location": "us-south", + "scc_event_notification_plan": "lite", "skip_flowlogs_s2s_auth_policy": false, "skip_kms_s2s_auth_policy": false, "skip_iam_authorization_policy": false diff --git a/solutions/lsf/variables.tf b/solutions/lsf/variables.tf index 2abcde56..59d750e2 100644 --- a/solutions/lsf/variables.tf +++ b/solutions/lsf/variables.tf @@ -26,18 +26,18 @@ variable "app_center_gui_password" { type = string default = "" sensitive = true - description = "Password required to access the IBM Spectrum LSF Application Center (App Center) GUI, which is enabled by default in both Fix Pack 15 and Fix Pack 14 with HTTPS. This is a mandatory value and omitting it will result in deployment failure. The password must meet the following requirements, at least 15 characters in length, and must include one uppercase letter, one lowercase letter, one number, and one special character. Spaces are not allowed." + description = "Password required to access the IBM Spectrum LSF Application Center (App Center) GUI, which is enabled by default in both Fix Pack 15 and Fix Pack 14 with HTTPS. This is a mandatory value and omitting it will result in deployment failure. The password must meet the following requirements, at least 8 characters in length, and must include one uppercase letter, one lowercase letter, one number, and one special character. Spaces are not allowed." validation { condition = ( - can(regex("^.{15,}$", var.app_center_gui_password)) && + can(regex("^.{8,}$", var.app_center_gui_password)) && can(regex("[0-9]", var.app_center_gui_password)) && can(regex("[a-z]", var.app_center_gui_password)) && can(regex("[A-Z]", var.app_center_gui_password)) && can(regex("[!@#$%^&*()_+=-]", var.app_center_gui_password)) && !can(regex(".*\\s.*", var.app_center_gui_password)) ) - error_message = "The password must be at least 15 characters long and include at least one lowercase letter, one uppercase letter, one number, and one special character (!@#$%^&*()_+=-). Spaces are not allowed." + error_message = "The password must be at least 8 characters long and include at least one lowercase letter, one uppercase letter, one number, and one special character (!@#$%^&*()_+=-). Spaces are not allowed." } } @@ -146,18 +146,18 @@ variable "login_subnet_id" { default = null description = "Provide the ID of an existing subnet to deploy cluster resources, this is used only for provisioning bastion, deployer, and login nodes. If not provided, new subnet will be created.When providing an existing subnet ID, make sure that the subnet has an associated public gateway..[Learn more](https://cloud.ibm.com/docs/vpc)." validation { - condition = (var.compute_subnet_id == null && var.login_subnet_id == null) || (var.compute_subnet_id != null && var.login_subnet_id != null) - error_message = "In case of existing subnets, provide both login_subnet_id and compute_subnet_id." + condition = (var.cluster_subnet_id == null && var.login_subnet_id == null) || (var.cluster_subnet_id != null && var.login_subnet_id != null) + error_message = "In case of existing subnets, provide both login_subnet_id and cluster_subnet_id." } } -variable "compute_subnet_id" { +variable "cluster_subnet_id" { type = string default = null description = "Provide the ID of an existing subnet to deploy cluster resources; this is used only for provisioning VPC file storage shares, management, and compute nodes. If not provided, a new subnet will be created. Ensure that a public gateway is attached to enable VPC API communication. [Learn more](https://cloud.ibm.com/docs/vpc)." validation { - condition = anytrue([var.vpc_name != null && var.compute_subnet_id != null, var.compute_subnet_id == null]) - error_message = "If the compute_subnet_id are provided, the user should also provide the vpc_name." + condition = anytrue([var.vpc_name != null && var.cluster_subnet_id != null, var.cluster_subnet_id == null]) + error_message = "If the cluster_subnet_id are provided, the user should also provide the vpc_name." } } ############################################################################## @@ -262,7 +262,7 @@ variable "management_instances" { count = 2 image = "hpc-lsf-fp15-rhel810-v1" }] - description = "Specify the list of management node configurations, including instance profile, image name, and count. By default, all management nodes are created using Fix Pack 15. If deploying with Fix Pack 14, set lsf_version to fixpack_14 and use the corresponding image hpc-lsf-fp14-rhel810-v1. The selected image must align with the specified lsf_version, any mismatch may lead to deployment failures. The solution allows customization of instance profiles and counts, but mixing custom images and IBM stock images across instances is not supported. If using IBM stock images, only Red Hat-based images are allowed. Management nodes must have a minimum of 9 GB RAM. Select a profile with 9 GB or higher." + description = "Specify the list of management node configurations, including instance profile, image name, and count. By default, all management nodes are created using Fix Pack 15. If deploying with Fix Pack 14, set lsf_version to fixpack_14 and use the corresponding image hpc-lsf-fp14-rhel810-v1. The selected image must align with the specified lsf_version, any mismatch may lead to deployment failures. The solution allows customization of instance profiles and counts, but mixing custom images and IBM stock images across instances is not supported. If using IBM stock images, only Red Hat-based images are allowed." validation { condition = alltrue([for inst in var.management_instances : !contains([for i in var.management_instances : can(regex("^ibm", i.image))], true) || can(regex("^ibm-redhat", inst.image))]) error_message = "When defining management_instances, all instances must either use custom images or IBM stock images exclusively — mixing the two is not supported. If stock images are used, only Red Hat-based IBM images (e.g., ibm-redhat-*) are allowed." @@ -286,13 +286,6 @@ variable "management_instances" { ]) error_message = "Mismatch between management_instances image and lsf_version. Use an image with 'fp14' only when lsf_version is fixpack_14, and 'fp15' only with fixpack_15." } - validation { - condition = alltrue([ - for inst in var.management_instances : - tonumber(regex("\\d+$", inst.profile)) >= 9 - ]) - error_message = "Management node memory requirement not met. Minimum: 9 GB RAM. Please select a profile with 9 GB or higher." - } } variable "static_compute_instances" { @@ -442,12 +435,12 @@ variable "dns_domain_name" { compute = string }) default = { - compute = "hpc.local" + compute = "lsf.com" } description = "IBM Cloud DNS Services domain name to be used for the IBM Spectrum LSF cluster." validation { - condition = can(regex("^[a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(\\.[a-zA-Z]{2,})+$", var.dns_domain_name.compute)) - error_message = "The compute domain name must be a valid FQDN. It may include letters, digits, hyphens, and must start and end with an alphanumeric character." + condition = can(regex("^[a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\\.com$", var.dns_domain_name.compute)) + error_message = "The compute domain name must be a valid FQDN ending in '.com'. It may include letters, digits, hyphens, and must start and end with an alphanumeric character." } } @@ -506,7 +499,7 @@ variable "enable_ldap" { variable "ldap_basedns" { type = string - default = "hpc.local" + default = "lsf.com" description = "The dns domain name is used for configuring the LDAP server. If an LDAP server is already in existence, ensure to provide the associated DNS domain name." validation { condition = var.enable_ldap == false || (var.ldap_basedns != null ? (length(trimspace(var.ldap_basedns)) > 0 && var.ldap_basedns != "null") : false) @@ -520,7 +513,7 @@ variable "ldap_server" { description = "Provide the IP address for the existing LDAP server. If no address is given, a new LDAP server will be created." validation { condition = var.enable_ldap == false || var.ldap_server == null || (var.ldap_server != null ? (length(trimspace(var.ldap_server)) > 0 && var.ldap_server != "null") : true) - error_message = "If LDAP is enabled and you choose to use an existing server, you must provide a valid LDAP server IP address." + error_message = "If LDAP is enabled, an existing LDAP server IP should be provided." } } @@ -539,24 +532,10 @@ variable "ldap_admin_password" { type = string sensitive = true default = null - description = "The LDAP admin password must be 15 to 32 characters long and include at least two alphabetic characters (with one uppercase and one lowercase), one number, and one special character from the set (!@#$%^&*()_+=-). The password must not contain the username or any spaces. [This value is ignored for an existing LDAP server]." + description = "The LDAP admin password must be 8 to 20 characters long and include at least two alphabetic characters (with one uppercase and one lowercase), one number, and one special character from the set (!@#$%^&*()_+=-). The password must not contain the username or any spaces. [This value is ignored for an existing LDAP server]." validation { - condition = ( - var.enable_ldap ? ( - var.ldap_server == null ? ( - var.ldap_admin_password != null ? ( - try(length(var.ldap_admin_password)) >= 15 && - try(length(var.ldap_admin_password)) <= 32 && - try(can(regex(".*[0-9].*", var.ldap_admin_password)), false) && - try(can(regex(".*[A-Z].*", var.ldap_admin_password)), false) && - try(can(regex(".*[a-z].*", var.ldap_admin_password)), false) && - try(can(regex(".*[!@#$%^&*()_+=-].*", var.ldap_admin_password)), false) && - !try(can(regex(".*\\s.*", var.ldap_admin_password)), false) - ) : false - ) : true - ) : true - ) - error_message = "The LDAP admin password must be 15 to 32 characters long and include at least two alphabetic characters (with one uppercase and one lowercase), one number, and one special character from the set (!@#$%^&*()_+=-). The password must not contain any spaces." + condition = (!var.enable_ldap || var.ldap_server != null || can(var.ldap_admin_password != null && length(var.ldap_admin_password) >= 8 && length(var.ldap_admin_password) <= 20 && regex(".*[0-9].*", var.ldap_admin_password) != "" && regex(".*[A-Z].*", var.ldap_admin_password) != "" && regex(".*[a-z].*", var.ldap_admin_password) != "" && regex(".*[!@#$%^&*()_+=-].*", var.ldap_admin_password) != "" && !can(regex(".*\\s.*", var.ldap_admin_password)))) + error_message = "The LDAP admin password must be 8 to 20 characters long and include at least two alphabetic characters (with one uppercase and one lowercase), one number, and one special character from the set (!@#$%^&*()_+=-). The password must not contain the username or any spaces." } } @@ -574,10 +553,10 @@ variable "ldap_user_password" { type = string sensitive = true default = "" - description = "The LDAP user password must be 15 to 32 characters long and include at least two alphabetic characters (with one uppercase and one lowercase), one numeric digit, and at least one special character from the set (!@#$%^&*()_+=-). Spaces are not allowed. The password must not contain the username for enhanced security. [This value is ignored for an existing LDAP server]." + description = "The LDAP user password must be 8 to 20 characters long and include at least two alphabetic characters (with one uppercase and one lowercase), one numeric digit, and at least one special character from the set (!@#$%^&*()_+=-). Spaces are not allowed. The password must not contain the username for enhanced security. [This value is ignored for an existing LDAP server]." validation { - condition = !var.enable_ldap || var.ldap_server != null || ((replace(lower(var.ldap_user_password), lower(var.ldap_user_name), "") == lower(var.ldap_user_password)) && length(var.ldap_user_password) >= 15 && length(var.ldap_user_password) <= 32 && can(regex("^(.*[0-9]){1}.*$", var.ldap_user_password))) && can(regex("^(.*[A-Z]){1}.*$", var.ldap_user_password)) && can(regex("^(.*[a-z]){1}.*$", var.ldap_user_password)) && can(regex("^.*[!@#$%^&*()_+=-].*$", var.ldap_user_password)) && !can(regex(".*\\s.*", var.ldap_user_password)) - error_message = "The LDAP user password must be 15 to 32 characters long and include at least two alphabetic characters (with one uppercase and one lowercase), one number, and one special character from the set (!@#$%^&*()_+=-). The password must not contain the username or any spaces." + condition = !var.enable_ldap || var.ldap_server != null || ((replace(lower(var.ldap_user_password), lower(var.ldap_user_name), "") == lower(var.ldap_user_password)) && length(var.ldap_user_password) >= 8 && length(var.ldap_user_password) <= 20 && can(regex("^(.*[0-9]){1}.*$", var.ldap_user_password))) && can(regex("^(.*[A-Z]){1}.*$", var.ldap_user_password)) && can(regex("^(.*[a-z]){1}.*$", var.ldap_user_password)) && can(regex("^.*[!@#$%^&*()_+=-].*$", var.ldap_user_password)) && !can(regex(".*\\s.*", var.ldap_user_password)) + error_message = "The LDAP user password must be 8 to 20 characters long and include at least two alphabetic characters (with one uppercase and one lowercase), one number, and one special character from the set (!@#$%^&*()_+=-). The password must not contain the username or any spaces." } } @@ -852,9 +831,9 @@ variable "app_config_plan" { type = string default = "basic" validation { - error_message = "Plan for App configuration can only be basic, standardv2, enterprise.." + error_message = "Plan for App configuration can only be basic, lite, standardv2, enterprise.." condition = contains( - ["basic", "standardv2", "enterprise"], + ["basic", "lite", "standardv2", "enterprise"], var.app_config_plan ) } diff --git a/solutions/scale/catalogValidationValues.json.template b/solutions/scale/catalogValidationValues.json.template index f4ed6d44..bb5298d4 100644 --- a/solutions/scale/catalogValidationValues.json.template +++ b/solutions/scale/catalogValidationValues.json.template @@ -3,8 +3,5 @@ "cluster_prefix": $PREFIX, "zones": "[\"ca-tor-1\"]", "existing_resource_group": "geretain-hpc-rg", - "ssh_keys": "[\"geretain-hpc-ssh-key\"]", - "ibm_customer_number": $HPC_IBM_CUSTOMER_NUMBER, - "storage_gui_username": "storagegui", - "storage_gui_password": $RAND_PASSWORD + "ssh_keys": "[\"geretain-hpc-ssh-key\"]" } diff --git a/solutions/scale/datasource.tf b/solutions/scale/datasource.tf deleted file mode 100644 index f320a653..00000000 --- a/solutions/scale/datasource.tf +++ /dev/null @@ -1,29 +0,0 @@ -data "ibm_is_security_group" "storage_security_group" { - count = var.vpc_name != null && var.storage_security_group_name != null ? 1 : 0 - name = var.storage_security_group_name -} - -data "ibm_is_security_group" "compute_security_group" { - count = var.vpc_name != null && var.compute_security_group_name != null ? 1 : 0 - name = var.compute_security_group_name -} - -data "ibm_is_security_group" "gklm_security_group" { - count = var.vpc_name != null && var.gklm_security_group_name != null ? 1 : 0 - name = var.gklm_security_group_name -} - -data "ibm_is_security_group" "ldap_security_group" { - count = var.vpc_name != null && var.ldap_security_group_name != null ? 1 : 0 - name = var.ldap_security_group_name -} - -data "ibm_is_security_group" "client_security_group" { - count = var.vpc_name != null && var.client_security_group_name != null ? 1 : 0 - name = var.client_security_group_name -} - -data "ibm_is_security_group" "login_security_group" { - count = var.vpc_name != null && var.login_security_group_name != null ? 1 : 0 - name = var.login_security_group_name -} diff --git a/solutions/scale/input_validation.tf b/solutions/scale/input_validation.tf index 64a141f4..066abd17 100644 --- a/solutions/scale/input_validation.tf +++ b/solutions/scale/input_validation.tf @@ -11,199 +11,3 @@ locals { # tflint-ignore: terraform_unused_declarations icn_chk = regex("^${local.icn_msg}$", (local.icn_cnd ? local.icn_msg : "")) } - -locals { - total_compute_instance_count = sum(var.compute_instances[*]["count"]) - total_storage_instance_count = var.storage_type == "persistent" ? sum(var.storage_baremetal_server[*]["count"]) : sum(var.storage_instances[*]["count"]) - total_client_instance_count = sum(var.client_instances[*]["count"]) - total_gklm_instance_count = sum(var.gklm_instances[*]["count"]) - total_protocol_instance_count = sum(var.protocol_instances[*]["count"]) - - storage_sg_rules = flatten([for remote in data.ibm_is_security_group.storage_security_group[*].rules[*] : remote[*].remote]) - compute_sg_rules = flatten([for remote in data.ibm_is_security_group.compute_security_group[*].rules[*] : remote[*].remote]) - gklm_sg_rules = flatten([for remote in data.ibm_is_security_group.gklm_security_group[*].rules[*] : remote[*].remote]) - ldap_sg_rules = flatten([for remote in data.ibm_is_security_group.ldap_security_group[*].rules[*] : remote[*].remote]) - client_sg_rules = flatten([for remote in data.ibm_is_security_group.client_security_group[*].rules[*] : remote[*].remote]) - # bastion_sg_rules = flatten([for remote in data.ibm_is_security_group.login_security_group[*].rules[*] : remote[*].remote]) - - gklm_condition = var.enable_sg_validation == true && local.total_gklm_instance_count > 0 && var.scale_encryption_enabled == true && var.scale_encryption_type == "gklm" && var.gklm_security_group_name != null - strg_condition = var.enable_sg_validation == true && local.total_storage_instance_count > 0 && var.storage_security_group_name != null - clnt_condition = var.enable_sg_validation == true && local.total_client_instance_count > 0 && var.client_security_group_name != null - comp_condition = var.enable_sg_validation == true && local.total_compute_instance_count > 0 && var.compute_security_group_name != null - ldap_condition = var.enable_sg_validation == true && var.enable_ldap == true && var.ldap_security_group_name != null - bastion_condition = var.enable_sg_validation == true && var.login_security_group_name != null - - # Storage Security group validation - validate_strg_sg_in_strg_sg = local.strg_condition ? contains(local.storage_sg_rules, tolist(data.ibm_is_security_group.storage_security_group[*].id)[0]) : true - strg_sg_in_strg_sg_msg = "The Storage security group does not include the storage security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_strg_sg_in_strg_sg_chk = var.storage_security_group_name != null ? regex("^${local.strg_sg_in_strg_sg_msg}$", (local.validate_strg_sg_in_strg_sg ? local.strg_sg_in_strg_sg_msg : "")) : true - - validate_comp_sg_in_strg_sg = local.comp_condition ? contains(local.storage_sg_rules, tolist(data.ibm_is_security_group.compute_security_group[*].id)[0]) : true - comp_sg_in_strg_sg_msg = "The Storage security group does not include the compute security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_comp_sg_in_strg_sg_chk = var.storage_security_group_name != null ? regex("^${local.comp_sg_in_strg_sg_msg}$", (local.validate_comp_sg_in_strg_sg ? local.comp_sg_in_strg_sg_msg : "")) : true - - validate_client_sg_in_strg_sg = local.clnt_condition ? contains(local.storage_sg_rules, tolist(data.ibm_is_security_group.client_security_group[*].id)[0]) : true - client_sg_in_strg_sg_msg = "The Storage security group does not include the client security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_client_sg_in_strg_sg_chk = var.storage_security_group_name != null ? regex("^${local.client_sg_in_strg_sg_msg}$", (local.validate_client_sg_in_strg_sg ? local.client_sg_in_strg_sg_msg : "")) : true - - validate_gklm_sg_in_strg_sg = local.gklm_condition ? contains(local.storage_sg_rules, tolist(data.ibm_is_security_group.gklm_security_group[*].id)[0]) : true - gklm_sg_in_strg_sg_msg = "The Storage security group does not include the gklm security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_gklm_sg_in_strg_sg_chk = var.storage_security_group_name != null ? regex("^${local.gklm_sg_in_strg_sg_msg}$", (local.validate_gklm_sg_in_strg_sg ? local.gklm_sg_in_strg_sg_msg : "")) : true - - validate_ldap_sg_in_strg_sg = local.ldap_condition ? contains(local.storage_sg_rules, tolist(data.ibm_is_security_group.ldap_security_group[*].id)[0]) : true - ldap_sg_in_strg_sg_msg = "The Storage security group does not include the ldap security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_ldap_sg_in_strg_sg_chk = var.storage_security_group_name != null ? regex("^${local.ldap_sg_in_strg_sg_msg}$", (local.validate_ldap_sg_in_strg_sg ? local.ldap_sg_in_strg_sg_msg : "")) : true - - validate_bastion_in_strg_sg = local.bastion_condition ? contains(local.storage_sg_rules, tolist(data.ibm_is_security_group.login_security_group[*].id)[0]) : true - bastion_sg_in_strg_sg_msg = "The Storage security group does not include the bastion security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_bastion_in_strg_sg_chk = var.storage_security_group_name != null ? regex("^${local.bastion_sg_in_strg_sg_msg}$", (local.validate_bastion_in_strg_sg ? local.bastion_sg_in_strg_sg_msg : "")) : true - - - # Compute Security group validation - validate_strg_sg_in_comp_sg = local.strg_condition ? contains(local.compute_sg_rules, tolist(data.ibm_is_security_group.storage_security_group[*].id)[0]) : true - strg_sg_in_comp_sg_msg = "The Compute security group does not include the storage security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_strg_sg_in_comp_sg_chk = var.compute_security_group_name != null ? regex("^${local.strg_sg_in_comp_sg_msg}$", (local.validate_strg_sg_in_comp_sg ? local.strg_sg_in_comp_sg_msg : "")) : true - - validate_comp_sg_in_comp_sg = local.comp_condition ? contains(local.compute_sg_rules, tolist(data.ibm_is_security_group.compute_security_group[*].id)[0]) : true - comp_sg_in_comp_sg_msg = "The Compute security group does not include the compute security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_comp_sg_in_comp_sg_chk = var.compute_security_group_name != null ? regex("^${local.comp_sg_in_comp_sg_msg}$", (local.validate_comp_sg_in_comp_sg ? local.comp_sg_in_comp_sg_msg : "")) : true - - validate_client_sg_in_comp_sg = local.clnt_condition ? contains(local.compute_sg_rules, tolist(data.ibm_is_security_group.client_security_group[*].id)[0]) : true - client_sg_in_comp_sg_msg = "The Compute security group does not include the client security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_client_sg_in_comp_sg_chk = var.compute_security_group_name != null ? regex("^${local.client_sg_in_comp_sg_msg}$", (local.validate_client_sg_in_comp_sg ? local.client_sg_in_comp_sg_msg : "")) : true - - validate_gklm_sg_in_comp_sg = local.gklm_condition ? contains(local.compute_sg_rules, tolist(data.ibm_is_security_group.gklm_security_group[*].id)[0]) : true - gklm_sg_in_comp_sg_msg = "The Compute security group does not include the gklm security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_gklm_sg_in_comp_sg_chk = var.compute_security_group_name != null ? regex("^${local.gklm_sg_in_comp_sg_msg}$", (local.validate_gklm_sg_in_comp_sg ? local.gklm_sg_in_comp_sg_msg : "")) : true - - validate_ldap_sg_in_comp_sg = local.ldap_condition ? contains(local.compute_sg_rules, tolist(data.ibm_is_security_group.ldap_security_group[*].id)[0]) : true - ldap_sg_in_comp_sg_msg = "The Compute security group does not include the ldap security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_ldap_sg_in_comp_sg_chk = var.compute_security_group_name != null ? regex("^${local.ldap_sg_in_comp_sg_msg}$", (local.validate_ldap_sg_in_comp_sg ? local.ldap_sg_in_comp_sg_msg : "")) : true - - validate_bastion_sg_in_comp_sg = local.bastion_condition ? contains(local.compute_sg_rules, tolist(data.ibm_is_security_group.login_security_group[*].id)[0]) : true - bastion_sg_in_comp_sg_msg = "The Compute security group does not include the bastion security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_bastion_sg_in_comp_sg_chk = var.compute_security_group_name != null ? regex("^${local.bastion_sg_in_comp_sg_msg}$", (local.validate_bastion_sg_in_comp_sg ? local.bastion_sg_in_comp_sg_msg : "")) : true - - - # GKLM Security group validation - validate_strg_sg_in_gklm_sg = local.strg_condition ? contains(local.gklm_sg_rules, tolist(data.ibm_is_security_group.storage_security_group[*].id)[0]) : true - strg_sg_in_gklm_sg_msg = "The GKLM security group does not include the storage security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_strg_sg_in_gklm_sg_chk = var.gklm_security_group_name != null ? regex("^${local.strg_sg_in_gklm_sg_msg}$", (local.validate_strg_sg_in_gklm_sg ? local.strg_sg_in_gklm_sg_msg : "")) : true - - validate_comp_sg_in_gklm_sg = local.comp_condition ? contains(local.gklm_sg_rules, tolist(data.ibm_is_security_group.compute_security_group[*].id)[0]) : true - comp_sg_in_gklm_sg_msg = "The GKLM security group does not include the compute security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_comp_sg_in_gklm_sg_chk = var.gklm_security_group_name != null ? regex("^${local.comp_sg_in_gklm_sg_msg}$", (local.validate_comp_sg_in_gklm_sg ? local.comp_sg_in_gklm_sg_msg : "")) : true - - validate_gklm_sg_in_gklm_sg = local.gklm_condition ? contains(local.gklm_sg_rules, tolist(data.ibm_is_security_group.gklm_security_group[*].id)[0]) : true - gklm_sg_in_gklm_sg_msg = "The GKLM security group does not include the GKLM security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_gklm_sg_in_gklm_sg_chk = var.gklm_security_group_name != null ? regex("^${local.gklm_sg_in_gklm_sg_msg}$", (local.validate_gklm_sg_in_gklm_sg ? local.gklm_sg_in_gklm_sg_msg : "")) : true - - validate_client_sg_in_gklm_sg = local.clnt_condition ? contains(local.gklm_sg_rules, tolist(data.ibm_is_security_group.client_security_group[*].id)[0]) : true - client_sg_in_gklm_sg_msg = "The GKLM security group does not include the client security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_client_sg_in_gklm_sg_chk = var.gklm_security_group_name != null ? regex("^${local.client_sg_in_gklm_sg_msg}$", (local.validate_client_sg_in_gklm_sg ? local.client_sg_in_gklm_sg_msg : "")) : true - - validate_ldap_sg_in_gklm_sg = local.ldap_condition ? contains(local.gklm_sg_rules, tolist(data.ibm_is_security_group.ldap_security_group[*].id)[0]) : true - ldap_sg_in_gklm_sg_msg = "The GKLM security group does not include the ldap security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_ldap_sg_in_gklm_sg_chk = var.gklm_security_group_name != null ? regex("^${local.ldap_sg_in_gklm_sg_msg}$", (local.validate_ldap_sg_in_gklm_sg ? local.ldap_sg_in_gklm_sg_msg : "")) : true - - validate_bastion_sg_in_gklm_sg = local.bastion_condition ? contains(local.gklm_sg_rules, tolist(data.ibm_is_security_group.login_security_group[*].id)[0]) : true - bastion_sg_in_gklm_sg_msg = "The GKLM security group does not include the bastion security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_bastion_sg_in_gklm_sg_chk = var.gklm_security_group_name != null ? regex("^${local.bastion_sg_in_gklm_sg_msg}$", (local.validate_bastion_sg_in_gklm_sg ? local.bastion_sg_in_gklm_sg_msg : "")) : true - - - # LDAP Security group validation - validate_strg_sg_in_ldap_sg = local.strg_condition ? contains(local.ldap_sg_rules, tolist(data.ibm_is_security_group.storage_security_group[*].id)[0]) : true - strg_sg_in_ldap_sg_msg = "The LDAP security group does not include the storage security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_strg_sg_in_ldap_sg_chk = var.ldap_security_group_name != null ? regex("^${local.strg_sg_in_ldap_sg_msg}$", (local.validate_strg_sg_in_ldap_sg ? local.strg_sg_in_ldap_sg_msg : "")) : true - - validate_comp_sg_in_ldap_sg = local.comp_condition ? contains(local.ldap_sg_rules, tolist(data.ibm_is_security_group.compute_security_group[*].id)[0]) : true - comp_sg_in_ldap_sg_msg = "The LDAP security group does not include the compute security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_comp_sg_in_ldap_sg_chk = var.ldap_security_group_name != null ? regex("^${local.comp_sg_in_ldap_sg_msg}$", (local.validate_comp_sg_in_ldap_sg ? local.comp_sg_in_ldap_sg_msg : "")) : true - - validate_ldap_sg_in_ldap_sg = local.ldap_condition ? contains(local.ldap_sg_rules, tolist(data.ibm_is_security_group.ldap_security_group[*].id)[0]) : true - ldap_sg_in_ldap_sg_msg = "The LDAP security group does not include the LDAP security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_ldap_sg_in_ldap_sg_chk = var.ldap_security_group_name != null ? regex("^${local.ldap_sg_in_ldap_sg_msg}$", (local.validate_ldap_sg_in_ldap_sg ? local.ldap_sg_in_ldap_sg_msg : "")) : true - - validate_gklm_sg_in_ldap_sg = local.gklm_condition ? contains(local.ldap_sg_rules, tolist(data.ibm_is_security_group.gklm_security_group[*].id)[0]) : true - gklm_sg_in_ldap_sg_msg = "The LDAP security group does not include the GKLM security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_gklm_sg_in_ldap_sg_chk = var.ldap_security_group_name != null ? regex("^${local.gklm_sg_in_ldap_sg_msg}$", (local.validate_gklm_sg_in_ldap_sg ? local.gklm_sg_in_ldap_sg_msg : "")) : true - - validate_client_sg_in_ldap_sg = local.clnt_condition ? contains(local.ldap_sg_rules, tolist(data.ibm_is_security_group.client_security_group[*].id)[0]) : true - client_sg_in_ldap_sg_msg = "The LDAP security group does not include the client security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_client_sg_in_ldap_sg_chk = var.ldap_security_group_name != null ? regex("^${local.client_sg_in_ldap_sg_msg}$", (local.validate_client_sg_in_ldap_sg ? local.client_sg_in_ldap_sg_msg : "")) : true - - validate_bastion_sg_in_ldap_sg = local.bastion_condition ? contains(local.ldap_sg_rules, tolist(data.ibm_is_security_group.login_security_group[*].id)[0]) : true - bastion_sg_in_ldap_sg_msg = "The LDAP security group does not include the bastion security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_bastion_sg_in_ldap_sg_chk = var.ldap_security_group_name != null ? regex("^${local.bastion_sg_in_ldap_sg_msg}$", (local.validate_bastion_sg_in_ldap_sg ? local.bastion_sg_in_ldap_sg_msg : "")) : true - - # Client Security group validation - validate_strg_sg_in_client_sg = local.strg_condition ? contains(local.client_sg_rules, tolist(data.ibm_is_security_group.storage_security_group[*].id)[0]) : true - strg_sg_in_client_sg_msg = "The Client security group does not include the storage security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_strg_sg_in_client_sg_chk = var.client_security_group_name != null ? regex("^${local.strg_sg_in_client_sg_msg}$", (local.validate_strg_sg_in_client_sg ? local.strg_sg_in_client_sg_msg : "")) : true - - validate_comp_sg_in_client_sg = local.comp_condition ? contains(local.client_sg_rules, tolist(data.ibm_is_security_group.compute_security_group[*].id)[0]) : true - comp_sg_in_client_sg_msg = "The Client security group does not include the compute security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_comp_sg_in_client_sg_chk = var.client_security_group_name != null ? regex("^${local.comp_sg_in_client_sg_msg}$", (local.validate_comp_sg_in_client_sg ? local.comp_sg_in_client_sg_msg : "")) : true - - validate_ldap_sg_in_client_sg = local.ldap_condition ? contains(local.client_sg_rules, tolist(data.ibm_is_security_group.ldap_security_group[*].id)[0]) : true - ldap_sg_in_client_sg_msg = "The Client security group does not include the LDAP security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_ldap_sg_in_client_sg_chk = var.client_security_group_name != null ? regex("^${local.ldap_sg_in_client_sg_msg}$", (local.validate_ldap_sg_in_client_sg ? local.ldap_sg_in_client_sg_msg : "")) : true - - validate_gklm_sg_in_client_sg = local.gklm_condition ? contains(local.client_sg_rules, tolist(data.ibm_is_security_group.gklm_security_group[*].id)[0]) : true - gklm_sg_in_client_sg_msg = "The Client security group does not include the GKLM security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_gklm_sg_in_client_sg_chk = var.client_security_group_name != null ? regex("^${local.gklm_sg_in_client_sg_msg}$", (local.validate_gklm_sg_in_client_sg ? local.gklm_sg_in_client_sg_msg : "")) : true - - validate_client_sg_in_client_sg = local.clnt_condition ? contains(local.client_sg_rules, tolist(data.ibm_is_security_group.client_security_group[*].id)[0]) : true - client_sg_in_client_sg_msg = "The Client security group does not include the client security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_client_sg_in_client_sg_chk = var.client_security_group_name != null ? regex("^${local.client_sg_in_client_sg_msg}$", (local.validate_client_sg_in_client_sg ? local.client_sg_in_client_sg_msg : "")) : true - - validate_bastion_sg_in_client_sg = local.bastion_condition ? contains(local.client_sg_rules, tolist(data.ibm_is_security_group.login_security_group[*].id)[0]) : true - bastion_sg_in_client_sg_msg = "The Client security group does not include the bastion security group as a rule." - # tflint-ignore: terraform_unused_declarations - validate_bastion_sg_in_client_sg_chk = var.client_security_group_name != null ? regex("^${local.bastion_sg_in_client_sg_msg}$", (local.validate_bastion_sg_in_client_sg ? local.bastion_sg_in_client_sg_msg : "")) : true -} - -locals { - # Subnet ID validation for existing VPC with instances count greater than 0 - validate_subnet_id_ext_vpc_msg = "When 'subnet_id' is passed and any of the 'instance_count' values are greater than 0, you must provide the respective 'subnet_id' or set 'instance_count' to 0." - validate_subnet_id_ext_vpc = alltrue([ - var.vpc_name != null && (var.storage_subnet_id != null || var.compute_subnet_id != null || var.protocol_subnet_id != null || var.client_subnet_id != null || var.login_subnet_id != null) ? - ((local.total_storage_instance_count > 0 && var.storage_subnet_id != null) ? true : ((local.total_storage_instance_count == 0 && var.storage_subnet_id == null) ? true : false)) && - ((local.total_client_instance_count > 0 && var.client_subnet_id != null) ? true : ((local.total_client_instance_count == 0 && var.client_subnet_id == null) ? true : false)) && - ((local.total_protocol_instance_count > 0 && var.protocol_subnet_id != null) ? true : ((local.total_protocol_instance_count == 0 && var.protocol_subnet_id == null) ? true : false)) && - ((local.total_compute_instance_count > 0 && var.compute_subnet_id != null) ? true : ((local.total_compute_instance_count == 0 && var.compute_subnet_id == null) ? true : false)) && - ((var.login_subnet_id != null) ? true : false) - : true]) - # tflint-ignore: terraform_unused_declarations - validate_subnet_id_ext_vpc_chk = regex("^${local.validate_subnet_id_ext_vpc_msg}$", - (local.validate_subnet_id_ext_vpc ? local.validate_subnet_id_ext_vpc_msg : "")) -} diff --git a/solutions/scale/locals.tf b/solutions/scale/locals.tf index 82d4bb65..90f43d9c 100644 --- a/solutions/scale/locals.tf +++ b/solutions/scale/locals.tf @@ -18,158 +18,153 @@ locals { locals { config = { - existing_resource_group = var.existing_resource_group - remote_allowed_ips = var.remote_allowed_ips - ssh_keys = var.ssh_keys - login_subnets_cidr = var.login_subnets_cidr - compute_gui_password = var.compute_gui_password - compute_gui_username = var.compute_gui_username - compute_subnets_cidr = var.compute_subnets_cidr - cos_instance_name = var.cos_instance_name - dns_custom_resolver_id = var.dns_custom_resolver_id - dns_instance_id = var.dns_instance_id - dns_domain_names = var.dns_domain_names - bastion_instance = var.bastion_instance - deployer_instance = var.deployer_instance - enable_cos_integration = var.enable_cos_integration - enable_vpc_flow_logs = var.enable_vpc_flow_logs - client_instances = var.client_instances - client_subnets_cidr = var.client_subnets_cidr - vpc_cidr = var.vpc_cidr - cluster_prefix = var.cluster_prefix - protocol_instances = var.protocol_instances - protocol_subnets_cidr = var.protocol_subnets_cidr - compute_instances = var.compute_instances - storage_gui_password = var.storage_gui_password - storage_gui_username = var.storage_gui_username - storage_instances = var.storage_instances - storage_baremetal_server = var.storage_baremetal_server - storage_subnets_cidr = var.storage_subnets_cidr - vpc_name = var.vpc_name - observability_atracker_enable = var.observability_atracker_enable - observability_atracker_target_type = var.observability_atracker_target_type - sccwp_service_plan = var.sccwp_service_plan - sccwp_enable = var.sccwp_enable - cspm_enabled = var.cspm_enabled - app_config_plan = var.app_config_plan - skip_flowlogs_s2s_auth_policy = var.skip_flowlogs_s2s_auth_policy - ibmcloud_api_key = var.ibmcloud_api_key - afm_instances = var.afm_instances - afm_cos_config = var.afm_cos_config - enable_ldap = var.enable_ldap - ldap_basedns = var.ldap_basedns - ldap_admin_password = var.ldap_admin_password - ldap_user_name = var.ldap_user_name - ldap_user_password = var.ldap_user_password - ldap_server = var.ldap_server - ldap_server_cert = var.ldap_server_cert - ldap_instance = var.ldap_instance - scale_encryption_enabled = var.scale_encryption_enabled - scale_encryption_type = var.scale_encryption_type - gklm_instances = var.gklm_instances - storage_type = var.storage_type - colocate_protocol_instances = var.colocate_protocol_instances - scale_encryption_admin_password = var.scale_encryption_admin_password - key_protect_instance_id = var.key_protect_instance_id - filesystem_config = var.filesystem_config - existing_bastion_instance_name = var.existing_bastion_instance_name - existing_bastion_instance_public_ip = var.existing_bastion_instance_public_ip - existing_bastion_security_group_id = var.existing_bastion_security_group_id - existing_bastion_ssh_private_key = var.existing_bastion_ssh_private_key - bms_boot_drive_encryption = var.bms_boot_drive_encryption - tie_breaker_baremetal_server_profile = var.tie_breaker_baremetal_server_profile - filesets_config = var.filesets_config - login_security_group_name = var.login_security_group_name - storage_security_group_name = var.storage_security_group_name - compute_security_group_name = var.compute_security_group_name - client_security_group_name = var.client_security_group_name - gklm_security_group_name = var.gklm_security_group_name - ldap_security_group_name = var.ldap_security_group_name - login_subnet_id = var.login_subnet_id - compute_subnet_id = var.compute_subnet_id - storage_subnet_id = var.storage_subnet_id - protocol_subnet_id = var.protocol_subnet_id - client_subnet_id = var.client_subnet_id - scale_management_vsi_profile = var.scale_management_vsi_profile + existing_resource_group = var.existing_resource_group + remote_allowed_ips = var.remote_allowed_ips + ssh_keys = var.ssh_keys + vpc_cluster_login_private_subnets_cidr_blocks = var.vpc_cluster_login_private_subnets_cidr_blocks + compute_gui_password = var.compute_gui_password + compute_gui_username = var.compute_gui_username + vpc_cluster_private_subnets_cidr_blocks = var.vpc_cluster_private_subnets_cidr_blocks + cos_instance_name = var.cos_instance_name + dns_custom_resolver_id = var.dns_custom_resolver_id + dns_instance_id = var.dns_instance_id + dns_domain_names = var.dns_domain_names + enable_atracker = var.enable_atracker + # enable_bastion = var.enable_bastion + bastion_instance = var.bastion_instance + deployer_instance = var.deployer_instance + enable_cos_integration = var.enable_cos_integration + enable_vpc_flow_logs = var.enable_vpc_flow_logs + hpcs_instance_name = var.hpcs_instance_name + key_management = var.key_management + client_instances = var.client_instances + client_subnets_cidr = var.client_subnets_cidr + vpc_cidr = var.vpc_cidr + placement_strategy = var.placement_strategy + cluster_prefix = var.cluster_prefix + protocol_instances = var.protocol_instances + protocol_subnets_cidr = var.protocol_subnets_cidr + compute_instances = var.compute_instances + storage_gui_password = var.storage_gui_password + storage_gui_username = var.storage_gui_username + storage_instances = var.storage_instances + storage_servers = var.storage_servers + storage_subnets_cidr = var.storage_subnets_cidr + vpc_name = var.vpc_name + observability_atracker_enable = var.observability_atracker_enable + observability_atracker_target_type = var.observability_atracker_target_type + observability_monitoring_enable = var.observability_monitoring_enable + observability_logs_enable_for_management = var.observability_logs_enable_for_management + observability_logs_enable_for_compute = var.observability_logs_enable_for_compute + observability_enable_platform_logs = var.observability_enable_platform_logs + observability_enable_metrics_routing = var.observability_enable_metrics_routing + observability_logs_retention_period = var.observability_logs_retention_period + observability_monitoring_on_compute_nodes_enable = var.observability_monitoring_on_compute_nodes_enable + observability_monitoring_plan = var.observability_monitoring_plan + skip_flowlogs_s2s_auth_policy = var.skip_flowlogs_s2s_auth_policy + skip_kms_s2s_auth_policy = var.skip_kms_s2s_auth_policy + skip_iam_block_storage_authorization_policy = var.skip_iam_block_storage_authorization_policy + ibmcloud_api_key = var.ibmcloud_api_key + afm_instances = var.afm_instances + afm_cos_config = var.afm_cos_config + enable_ldap = var.enable_ldap + ldap_basedns = var.ldap_basedns + ldap_admin_password = var.ldap_admin_password + ldap_user_name = var.ldap_user_name + ldap_user_password = var.ldap_user_password + ldap_server = var.ldap_server + ldap_server_cert = var.ldap_server_cert + ldap_instance = var.ldap_instance + scale_encryption_enabled = var.scale_encryption_enabled + scale_encryption_type = var.scale_encryption_type + gklm_instance_key_pair = var.gklm_instance_key_pair + gklm_instances = var.gklm_instances + storage_type = var.storage_type + colocate_protocol_instances = var.colocate_protocol_instances + scale_encryption_admin_default_password = var.scale_encryption_admin_default_password + scale_encryption_admin_password = var.scale_encryption_admin_password + scale_encryption_admin_username = var.scale_encryption_admin_username + filesystem_config = var.filesystem_config + existing_bastion_instance_name = var.existing_bastion_instance_name + existing_bastion_instance_public_ip = var.existing_bastion_instance_public_ip + existing_bastion_security_group_id = var.existing_bastion_security_group_id + existing_bastion_ssh_private_key = var.existing_bastion_ssh_private_key } } - # Compile Environment for Config output locals { env = { - existing_resource_group = lookup(local.override[local.override_type], "existing_resource_group", local.config.existing_resource_group) - remote_allowed_ips = lookup(local.override[local.override_type], "remote_allowed_ips", local.config.remote_allowed_ips) - ssh_keys = lookup(local.override[local.override_type], "ssh_keys", local.config.ssh_keys) - login_subnets_cidr = lookup(local.override[local.override_type], "login_subnets_cidr", local.config.login_subnets_cidr) - compute_gui_password = lookup(local.override[local.override_type], "compute_gui_password", local.config.compute_gui_password) - compute_gui_username = lookup(local.override[local.override_type], "compute_gui_username", local.config.compute_gui_username) - compute_subnets_cidr = lookup(local.override[local.override_type], "compute_subnets_cidr", local.config.compute_subnets_cidr) - cos_instance_name = lookup(local.override[local.override_type], "cos_instance_name", local.config.cos_instance_name) - dns_custom_resolver_id = lookup(local.override[local.override_type], "dns_custom_resolver_id", local.config.dns_custom_resolver_id) - dns_instance_id = lookup(local.override[local.override_type], "dns_instance_id", local.config.dns_instance_id) - dns_domain_names = lookup(local.override[local.override_type], "dns_domain_names", local.config.dns_domain_names) - bastion_instance = lookup(local.override[local.override_type], "bastion_instance", local.config.bastion_instance) - deployer_instance = lookup(local.override[local.override_type], "deployer_instance", local.config.deployer_instance) - enable_cos_integration = lookup(local.override[local.override_type], "enable_cos_integration", local.config.enable_cos_integration) - enable_vpc_flow_logs = lookup(local.override[local.override_type], "enable_vpc_flow_logs", local.config.enable_vpc_flow_logs) - client_instances = lookup(local.override[local.override_type], "client_instances", local.config.client_instances) - client_subnets_cidr = lookup(local.override[local.override_type], "client_subnets_cidr", local.config.client_subnets_cidr) - vpc_cidr = lookup(local.override[local.override_type], "vpc_cidr", local.config.vpc_cidr) - cluster_prefix = lookup(local.override[local.override_type], "cluster_prefix", local.config.cluster_prefix) - protocol_instances = lookup(local.override[local.override_type], "protocol_instances", local.config.protocol_instances) - protocol_subnets_cidr = lookup(local.override[local.override_type], "protocol_subnets_cidr", local.config.protocol_subnets_cidr) - compute_instances = lookup(local.override[local.override_type], "compute_instances", local.config.compute_instances) - storage_gui_password = lookup(local.override[local.override_type], "storage_gui_password", local.config.storage_gui_password) - storage_gui_username = lookup(local.override[local.override_type], "storage_gui_username", local.config.storage_gui_username) - storage_instances = lookup(local.override[local.override_type], "storage_instances", local.config.storage_instances) - storage_baremetal_server = lookup(local.override[local.override_type], "storage_baremetal_server", local.config.storage_baremetal_server) - storage_subnets_cidr = lookup(local.override[local.override_type], "storage_subnets_cidr", local.config.storage_subnets_cidr) - vpc_name = lookup(local.override[local.override_type], "vpc_name", local.config.vpc_name) - observability_atracker_enable = lookup(local.override[local.override_type], "observability_atracker_enable", local.config.observability_atracker_enable) - observability_atracker_target_type = lookup(local.override[local.override_type], "observability_atracker_target_type", local.config.observability_atracker_target_type) - sccwp_enable = lookup(local.override[local.override_type], "scc_wp_enable", local.config.sccwp_enable) - cspm_enable = lookup(local.override[local.override_type], "cspm_enable", local.config.cspm_enabled) - sccwp_service_plan = lookup(local.override[local.override_type], "scc_wp_service_plan", local.config.sccwp_service_plan) - app_config_plan = lookup(local.override[local.override_type], "app_config_plan", local.config.app_config_plan) - skip_flowlogs_s2s_auth_policy = lookup(local.override[local.override_type], "skip_flowlogs_s2s_auth_policy", local.config.skip_flowlogs_s2s_auth_policy) - ibmcloud_api_key = lookup(local.override[local.override_type], "ibmcloud_api_key", local.config.ibmcloud_api_key) - afm_instances = lookup(local.override[local.override_type], "afm_instances", local.config.afm_instances) - afm_cos_config = lookup(local.override[local.override_type], "afm_cos_config", local.config.afm_cos_config) - enable_ldap = lookup(local.override[local.override_type], "enable_ldap", local.config.enable_ldap) - ldap_basedns = lookup(local.override[local.override_type], "ldap_basedns", local.config.ldap_basedns) - ldap_admin_password = lookup(local.override[local.override_type], "ldap_admin_password", local.config.ldap_admin_password) - ldap_user_name = lookup(local.override[local.override_type], "ldap_user_name", local.config.ldap_user_name) - ldap_user_password = lookup(local.override[local.override_type], "ldap_user_password", local.config.ldap_user_password) - ldap_server = lookup(local.override[local.override_type], "ldap_server", local.config.ldap_server) - ldap_server_cert = lookup(local.override[local.override_type], "ldap_server_cert", local.config.ldap_server_cert) - ldap_instance = lookup(local.override[local.override_type], "ldap_instance", local.config.ldap_instance) - scale_encryption_enabled = lookup(local.override[local.override_type], "scale_encryption_enabled", local.config.scale_encryption_enabled) - scale_encryption_type = lookup(local.override[local.override_type], "scale_encryption_type", local.config.scale_encryption_type) - gklm_instances = lookup(local.override[local.override_type], "gklm_instances", local.config.gklm_instances) - key_protect_instance_id = lookup(local.override[local.override_type], "key_protect_instance_id", local.config.key_protect_instance_id) - storage_type = lookup(local.override[local.override_type], "storage_type", local.config.storage_type) - colocate_protocol_instances = lookup(local.override[local.override_type], "colocate_protocol_instances", local.config.colocate_protocol_instances) - scale_encryption_admin_password = lookup(local.override[local.override_type], "scale_encryption_admin_password", local.config.scale_encryption_admin_password) - filesystem_config = lookup(local.override[local.override_type], "filesystem_config", local.config.filesystem_config) - existing_bastion_instance_name = lookup(local.override[local.override_type], "existing_bastion_instance_name", local.config.existing_bastion_instance_name) - existing_bastion_instance_public_ip = lookup(local.override[local.override_type], "existing_bastion_instance_public_ip", local.config.existing_bastion_instance_public_ip) - existing_bastion_security_group_id = lookup(local.override[local.override_type], "existing_bastion_security_group_id", local.config.existing_bastion_security_group_id) - existing_bastion_ssh_private_key = lookup(local.override[local.override_type], "existing_bastion_ssh_private_key", local.config.existing_bastion_ssh_private_key) - bms_boot_drive_encryption = lookup(local.override[local.override_type], "bms_boot_drive_encryption", local.config.bms_boot_drive_encryption) - tie_breaker_baremetal_server_profile = lookup(local.override[local.override_type], "tie_breaker_baremetal_server_profile", local.config.tie_breaker_baremetal_server_profile) - filesets_config = lookup(local.override[local.override_type], "filesets_config", local.config.filesets_config) - login_security_group_name = lookup(local.override[local.override_type], "login_security_group_name", local.config.login_security_group_name) - storage_security_group_name = lookup(local.override[local.override_type], "storage_security_group_name", local.config.storage_security_group_name) - compute_security_group_name = lookup(local.override[local.override_type], "compute_security_group_name", local.config.compute_security_group_name) - client_security_group_name = lookup(local.override[local.override_type], "client_security_group_name", local.config.client_security_group_name) - gklm_security_group_name = lookup(local.override[local.override_type], "gklm_security_group_name", local.config.gklm_security_group_name) - ldap_security_group_name = lookup(local.override[local.override_type], "ldap_security_group_name", local.config.ldap_security_group_name) - login_subnet_id = lookup(local.override[local.override_type], "login_subnet_id", local.config.login_subnet_id) - compute_subnet_id = lookup(local.override[local.override_type], "compute_subnet_id", local.config.compute_subnet_id) - storage_subnet_id = lookup(local.override[local.override_type], "storage_subnet_id", local.config.storage_subnet_id) - protocol_subnet_id = lookup(local.override[local.override_type], "protocol_subnet_id", local.config.protocol_subnet_id) - client_subnet_id = lookup(local.override[local.override_type], "client_subnet_id", local.config.client_subnet_id) - scale_management_vsi_profile = lookup(local.override[local.override_type], "scale_management_vsi_profile", local.config.scale_management_vsi_profile) + existing_resource_group = lookup(local.override[local.override_type], "existing_resource_group", local.config.existing_resource_group) + remote_allowed_ips = lookup(local.override[local.override_type], "remote_allowed_ips", local.config.remote_allowed_ips) + ssh_keys = lookup(local.override[local.override_type], "ssh_keys", local.config.ssh_keys) + vpc_cluster_login_private_subnets_cidr_blocks = lookup(local.override[local.override_type], "vpc_cluster_login_private_subnets_cidr_blocks", local.config.vpc_cluster_login_private_subnets_cidr_blocks) + compute_gui_password = lookup(local.override[local.override_type], "compute_gui_password", local.config.compute_gui_password) + compute_gui_username = lookup(local.override[local.override_type], "compute_gui_username", local.config.compute_gui_username) + vpc_cluster_private_subnets_cidr_blocks = lookup(local.override[local.override_type], "vpc_cluster_private_subnets_cidr_blocks", local.config.vpc_cluster_private_subnets_cidr_blocks) + cos_instance_name = lookup(local.override[local.override_type], "cos_instance_name", local.config.cos_instance_name) + dns_custom_resolver_id = lookup(local.override[local.override_type], "dns_custom_resolver_id", local.config.dns_custom_resolver_id) + dns_instance_id = lookup(local.override[local.override_type], "dns_instance_id", local.config.dns_instance_id) + dns_domain_names = lookup(local.override[local.override_type], "dns_domain_names", local.config.dns_domain_names) + enable_atracker = lookup(local.override[local.override_type], "enable_atracker", local.config.enable_atracker) + # enable_bastion = lookup(local.override[local.override_type], "enable_bastion", local.config.enable_bastion) + bastion_instance = lookup(local.override[local.override_type], "bastion_instance", local.config.bastion_instance) + deployer_instance = lookup(local.override[local.override_type], "deployer_instance", local.config.deployer_instance) + enable_cos_integration = lookup(local.override[local.override_type], "enable_cos_integration", local.config.enable_cos_integration) + enable_vpc_flow_logs = lookup(local.override[local.override_type], "enable_vpc_flow_logs", local.config.enable_vpc_flow_logs) + hpcs_instance_name = lookup(local.override[local.override_type], "hpcs_instance_name", local.config.hpcs_instance_name) + key_management = lookup(local.override[local.override_type], "key_management", local.config.key_management) + client_instances = lookup(local.override[local.override_type], "client_instances", local.config.client_instances) + client_subnets_cidr = lookup(local.override[local.override_type], "client_subnets_cidr", local.config.client_subnets_cidr) + vpc_cidr = lookup(local.override[local.override_type], "vpc_cidr", local.config.vpc_cidr) + placement_strategy = lookup(local.override[local.override_type], "placement_strategy", local.config.placement_strategy) + cluster_prefix = lookup(local.override[local.override_type], "cluster_prefix", local.config.cluster_prefix) + protocol_instances = lookup(local.override[local.override_type], "protocol_instances", local.config.protocol_instances) + protocol_subnets_cidr = lookup(local.override[local.override_type], "protocol_subnets_cidr", local.config.protocol_subnets_cidr) + compute_instances = lookup(local.override[local.override_type], "compute_instances", local.config.compute_instances) + storage_gui_password = lookup(local.override[local.override_type], "storage_gui_password", local.config.storage_gui_password) + storage_gui_username = lookup(local.override[local.override_type], "storage_gui_username", local.config.storage_gui_username) + storage_instances = lookup(local.override[local.override_type], "storage_instances", local.config.storage_instances) + storage_servers = lookup(local.override[local.override_type], "storage_servers", local.config.storage_servers) + storage_subnets_cidr = lookup(local.override[local.override_type], "storage_subnets_cidr", local.config.storage_subnets_cidr) + vpc_name = lookup(local.override[local.override_type], "vpc_name", local.config.vpc_name) + observability_atracker_enable = lookup(local.override[local.override_type], "observability_atracker_enable", local.config.observability_atracker_enable) + observability_atracker_target_type = lookup(local.override[local.override_type], "observability_atracker_target_type", local.config.observability_atracker_target_type) + observability_monitoring_enable = lookup(local.override[local.override_type], "observability_monitoring_enable", local.config.observability_monitoring_enable) + observability_logs_enable_for_management = lookup(local.override[local.override_type], "observability_logs_enable_for_management", local.config.observability_logs_enable_for_management) + observability_logs_enable_for_compute = lookup(local.override[local.override_type], "observability_logs_enable_for_compute", local.config.observability_logs_enable_for_compute) + observability_enable_platform_logs = lookup(local.override[local.override_type], "observability_enable_platform_logs", local.config.observability_enable_platform_logs) + observability_enable_metrics_routing = lookup(local.override[local.override_type], "observability_enable_metrics_routing", local.config.observability_enable_metrics_routing) + observability_logs_retention_period = lookup(local.override[local.override_type], "observability_logs_retention_period", local.config.observability_logs_retention_period) + observability_monitoring_on_compute_nodes_enable = lookup(local.override[local.override_type], "observability_monitoring_on_compute_nodes_enable", local.config.observability_monitoring_on_compute_nodes_enable) + observability_monitoring_plan = lookup(local.override[local.override_type], "observability_monitoring_plan", local.config.observability_monitoring_plan) + skip_flowlogs_s2s_auth_policy = lookup(local.override[local.override_type], "skip_flowlogs_s2s_auth_policy", local.config.skip_flowlogs_s2s_auth_policy) + skip_kms_s2s_auth_policy = lookup(local.override[local.override_type], "skip_kms_s2s_auth_policy", local.config.skip_kms_s2s_auth_policy) + skip_iam_block_storage_authorization_policy = lookup(local.override[local.override_type], "skip_iam_block_storage_authorization_policy", local.config.skip_iam_block_storage_authorization_policy) + ibmcloud_api_key = lookup(local.override[local.override_type], "ibmcloud_api_key", local.config.ibmcloud_api_key) + afm_instances = lookup(local.override[local.override_type], "afm_instances", local.config.afm_instances) + afm_cos_config = lookup(local.override[local.override_type], "afm_cos_config", local.config.afm_cos_config) + enable_ldap = lookup(local.override[local.override_type], "enable_ldap", local.config.enable_ldap) + ldap_basedns = lookup(local.override[local.override_type], "ldap_basedns", local.config.ldap_basedns) + ldap_admin_password = lookup(local.override[local.override_type], "ldap_admin_password", local.config.ldap_admin_password) + ldap_user_name = lookup(local.override[local.override_type], "ldap_user_name", local.config.ldap_user_name) + ldap_user_password = lookup(local.override[local.override_type], "ldap_user_password", local.config.ldap_user_password) + ldap_server = lookup(local.override[local.override_type], "ldap_server", local.config.ldap_server) + ldap_server_cert = lookup(local.override[local.override_type], "ldap_server_cert", local.config.ldap_server_cert) + ldap_instance = lookup(local.override[local.override_type], "ldap_instance", local.config.ldap_instance) + scale_encryption_enabled = lookup(local.override[local.override_type], "scale_encryption_enabled", local.config.scale_encryption_enabled) + scale_encryption_type = lookup(local.override[local.override_type], "scale_encryption_type", local.config.scale_encryption_type) + gklm_instance_key_pair = lookup(local.override[local.override_type], "gklm_instance_key_pair", local.config.gklm_instance_key_pair) + gklm_instances = lookup(local.override[local.override_type], "gklm_instances", local.config.gklm_instances) + storage_type = lookup(local.override[local.override_type], "storage_type", local.config.storage_type) + colocate_protocol_instances = lookup(local.override[local.override_type], "colocate_protocol_instances", local.config.colocate_protocol_instances) + scale_encryption_admin_default_password = lookup(local.override[local.override_type], "scale_encryption_admin_default_password", local.config.scale_encryption_admin_default_password) + scale_encryption_admin_password = lookup(local.override[local.override_type], "scale_encryption_admin_password", local.config.scale_encryption_admin_password) + scale_encryption_admin_username = lookup(local.override[local.override_type], "scale_encryption_admin_username", local.config.scale_encryption_admin_username) + filesystem_config = lookup(local.override[local.override_type], "filesystem_config", local.config.filesystem_config) + existing_bastion_instance_name = lookup(local.override[local.override_type], "existing_bastion_instance_name", local.config.existing_bastion_instance_name) + existing_bastion_instance_public_ip = lookup(local.override[local.override_type], "existing_bastion_instance_public_ip", local.config.existing_bastion_instance_public_ip) + existing_bastion_security_group_id = lookup(local.override[local.override_type], "existing_bastion_security_group_id", local.config.existing_bastion_security_group_id) + existing_bastion_ssh_private_key = lookup(local.override[local.override_type], "existing_bastion_ssh_private_key", local.config.existing_bastion_ssh_private_key) } } diff --git a/solutions/scale/main.tf b/solutions/scale/main.tf index a4981057..5c86c9ed 100644 --- a/solutions/scale/main.tf +++ b/solutions/scale/main.tf @@ -7,73 +7,71 @@ module "scale" { cluster_prefix = local.env.cluster_prefix ssh_keys = local.env.ssh_keys existing_resource_group = local.env.existing_resource_group - vpc_cluster_login_private_subnets_cidr_blocks = local.env.login_subnets_cidr - vpc_cluster_private_subnets_cidr_blocks = local.env.compute_subnets_cidr + vpc_cluster_login_private_subnets_cidr_blocks = local.env.vpc_cluster_login_private_subnets_cidr_blocks + vpc_cluster_private_subnets_cidr_blocks = local.env.vpc_cluster_private_subnets_cidr_blocks cos_instance_name = local.env.cos_instance_name dns_custom_resolver_id = local.env.dns_custom_resolver_id dns_instance_id = local.env.dns_instance_id dns_domain_names = local.env.dns_domain_names - bastion_instance = local.env.bastion_instance - deployer_instance = local.env.deployer_instance - enable_cos_integration = local.env.enable_cos_integration - enable_vpc_flow_logs = local.env.enable_vpc_flow_logs - client_instances = local.env.client_instances - vpc_cidr = local.env.vpc_cidr - protocol_instances = local.env.protocol_instances - protocol_subnets_cidr = [local.env.protocol_subnets_cidr] - colocate_protocol_instances = local.env.colocate_protocol_instances - static_compute_instances = local.env.compute_instances - storage_instances = local.env.storage_instances - storage_servers = local.env.storage_baremetal_server - storage_subnets_cidr = [local.env.storage_subnets_cidr] - vpc_name = local.env.vpc_name - compute_gui_password = local.env.compute_gui_password - compute_gui_username = local.env.compute_gui_username - storage_gui_password = local.env.storage_gui_password - storage_gui_username = local.env.storage_gui_username - observability_atracker_enable = local.env.observability_atracker_enable - observability_atracker_target_type = local.env.observability_atracker_target_type - sccwp_enable = local.env.sccwp_enable - sccwp_service_plan = local.env.sccwp_service_plan - cspm_enabled = local.env.cspm_enable - app_config_plan = local.env.app_config_plan - skip_flowlogs_s2s_auth_policy = local.env.skip_flowlogs_s2s_auth_policy - ibmcloud_api_key = local.env.ibmcloud_api_key - afm_instances = local.env.afm_instances - afm_cos_config = local.env.afm_cos_config - enable_ldap = local.env.enable_ldap - ldap_basedns = local.env.ldap_basedns - ldap_admin_password = local.env.ldap_admin_password - ldap_user_name = local.env.ldap_user_name - ldap_user_password = local.env.ldap_user_password - ldap_server = local.env.ldap_server - ldap_server_cert = local.env.ldap_server_cert - ldap_instance = local.env.ldap_instance - scale_encryption_enabled = local.env.scale_encryption_enabled - scale_encryption_type = local.env.scale_encryption_type - gklm_instances = local.env.gklm_instances - storage_type = local.env.storage_type - scale_encryption_admin_password = local.env.scale_encryption_admin_password - key_protect_instance_id = local.env.key_protect_instance_id - filesystem_config = local.env.filesystem_config - existing_bastion_instance_name = local.env.existing_bastion_instance_name - existing_bastion_instance_public_ip = local.env.existing_bastion_instance_public_ip - existing_bastion_security_group_id = local.env.existing_bastion_security_group_id - existing_bastion_ssh_private_key = local.env.existing_bastion_ssh_private_key - client_subnets_cidr = [local.env.client_subnets_cidr] - bms_boot_drive_encryption = local.env.bms_boot_drive_encryption - tie_breaker_bm_server_profile = local.env.tie_breaker_baremetal_server_profile - filesets_config = local.env.filesets_config - login_security_group_name = local.env.login_security_group_name - storage_security_group_name = local.env.storage_security_group_name - compute_security_group_name = local.env.compute_security_group_name - client_security_group_name = local.env.client_security_group_name - gklm_security_group_name = local.env.gklm_security_group_name - ldap_security_group_name = local.env.ldap_security_group_name - login_subnet_id = local.env.login_subnet_id - compute_subnet_id = local.env.compute_subnet_id - storage_subnet_id = local.env.storage_subnet_id - protocol_subnet_id = local.env.protocol_subnet_id - client_subnet_id = local.env.client_subnet_id - scale_management_vsi_profile = local.env.scale_management_vsi_profile + enable_atracker = local.env.enable_atracker + # enable_bastion = local.env.enable_bastion + bastion_instance = local.env.bastion_instance + deployer_instance = local.env.deployer_instance + enable_cos_integration = local.env.enable_cos_integration + enable_vpc_flow_logs = local.env.enable_vpc_flow_logs + key_management = local.env.key_management + client_instances = local.env.client_instances + vpc_cidr = local.env.vpc_cidr + placement_strategy = local.env.placement_strategy + protocol_instances = local.env.protocol_instances + protocol_subnets_cidr = [local.env.protocol_subnets_cidr] + colocate_protocol_instances = local.env.colocate_protocol_instances + static_compute_instances = local.env.compute_instances + storage_instances = local.env.storage_instances + storage_servers = local.env.storage_servers + storage_subnets_cidr = [local.env.storage_subnets_cidr] + vpc_name = local.env.vpc_name + compute_gui_password = local.env.compute_gui_password + compute_gui_username = local.env.compute_gui_username + storage_gui_password = local.env.storage_gui_password + storage_gui_username = local.env.storage_gui_username + observability_atracker_enable = local.env.observability_atracker_enable + observability_atracker_target_type = local.env.observability_atracker_target_type + observability_monitoring_enable = local.env.observability_monitoring_enable + observability_logs_enable_for_management = local.env.observability_logs_enable_for_management + observability_logs_enable_for_compute = local.env.observability_logs_enable_for_compute + observability_enable_platform_logs = local.env.observability_enable_platform_logs + observability_enable_metrics_routing = local.env.observability_enable_metrics_routing + observability_logs_retention_period = local.env.observability_logs_retention_period + observability_monitoring_on_compute_nodes_enable = local.env.observability_monitoring_on_compute_nodes_enable + observability_monitoring_plan = local.env.observability_monitoring_plan + skip_flowlogs_s2s_auth_policy = local.env.skip_flowlogs_s2s_auth_policy + skip_kms_s2s_auth_policy = local.env.skip_kms_s2s_auth_policy + skip_iam_block_storage_authorization_policy = local.env.skip_iam_block_storage_authorization_policy + ibmcloud_api_key = local.env.ibmcloud_api_key + afm_instances = local.env.afm_instances + afm_cos_config = local.env.afm_cos_config + enable_ldap = local.env.enable_ldap + ldap_basedns = local.env.ldap_basedns + ldap_admin_password = local.env.ldap_admin_password + ldap_user_name = local.env.ldap_user_name + ldap_user_password = local.env.ldap_user_password + ldap_server = local.env.ldap_server + ldap_server_cert = local.env.ldap_server_cert + ldap_instance = local.env.ldap_instance + scale_encryption_enabled = local.env.scale_encryption_enabled + scale_encryption_type = local.env.scale_encryption_type + gklm_instance_key_pair = local.env.gklm_instance_key_pair + gklm_instances = local.env.gklm_instances + storage_type = local.env.storage_type + scale_encryption_admin_password = local.env.scale_encryption_admin_password + filesystem_config = local.env.filesystem_config + existing_bastion_instance_name = local.env.existing_bastion_instance_name + existing_bastion_instance_public_ip = local.env.existing_bastion_instance_public_ip + existing_bastion_security_group_id = local.env.existing_bastion_security_group_id + existing_bastion_ssh_private_key = local.env.existing_bastion_ssh_private_key + client_subnets_cidr = [local.env.client_subnets_cidr] + # hpcs_instance_name = local.env.hpcs_instance_name + # scale_encryption_admin_username = local.env.scale_encryption_admin_username + # scale_encryption_admin_default_password = local.env.scale_encryption_admin_default_password } diff --git a/solutions/scale/override.json b/solutions/scale/override.json index 8d5d5398..894e99b2 100644 --- a/solutions/scale/override.json +++ b/solutions/scale/override.json @@ -3,11 +3,12 @@ "existing_resource_group": "Default", "vpc_name": null, "vpc_cidr": "10.0.0.0/8", + "placement_strategy": null, "ssh_keys": null, "enable_bastion": true, "enable_deployer": true, "deployer_instance_profile": "mx2-4x32", - "login_subnets_cidr": "10.0.0.0/24", + "vpc_cluster_login_private_subnets_cidr_blocks": "10.0.0.0/24", "client_subnets_cidr": "10.10.10.0/24", "client_instances": [ { @@ -16,7 +17,7 @@ "image": "ibm-redhat-8-10-minimal-amd64-2" } ], - "compute_subnets_cidr": "10.10.20.0/24", + "vpc_cluster_private_subnets_cidr_blocks": "10.10.20.0/24", "compute_instances": [ { "profile": "cx2-2x4", @@ -115,5 +116,7 @@ "cos_instance_name": null, "enable_atracker": true, "enable_vpc_flow_logs": true, + "key_management": "key_protect", + "hpcs_instance_name": null, "clusters": null } diff --git a/solutions/scale/variables.tf b/solutions/scale/variables.tf index 9c310b8b..dfa93ff8 100644 --- a/solutions/scale/variables.tf +++ b/solutions/scale/variables.tf @@ -1,66 +1,51 @@ ############################################################################## # Offering Variations ############################################################################## - variable "ibm_customer_number" { type = string sensitive = true default = null - description = "IBM Customer Number (ICN) used for Bring Your Own License (BYOL) entitlement check and not required if storage_type is evaluation, but must be provided if storage_type is scratch or persistent. Failing to provide an ICN will cause the deployment to fail to decrypt the packages. For more information on how to find your ICN, see [What is my IBM Customer Number (ICN)?](https://www.ibm.com/support/pages/what-my-ibm-customer-number-icn)." - # Format validation - Only if value is not null + description = "Comma-separated list of the IBM Customer Number(s) (ICN) that is used for the Bring Your Own License (BYOL) entitlement check. For more information on how to find your ICN, see [What is my IBM Customer Number (ICN)?](https://www.ibm.com/support/pages/what-my-ibm-customer-number-icn)." validation { condition = ( var.ibm_customer_number == null || - can(regex("^[0-9]+(,[0-9]+)*$", var.ibm_customer_number)) + can(regex("^[0-9A-Za-z]+(,[0-9A-Za-z]+)*$", var.ibm_customer_number)) ) - error_message = "The IBM customer number must be a comma-separated list of numeric values with no alphabets and special characters." - } - - # Presence validation - Must be set when storage_type is not evaluation - validation { - condition = ( - var.storage_type == "evaluation" || var.ibm_customer_number != null - ) - error_message = "The IBM customer number cannot be null when storage_type is 'scratch' or 'persistent'." + error_message = "The IBM customer number input value cannot have special characters." } } - ############################################################################## # Account Variables ############################################################################## variable "ibmcloud_api_key" { type = string sensitive = true - description = "Provide the IBM Cloud API key for the account where the IBM Storage Scale cluster will be deployed, this is a required value that must be provided as it is used to authenticate and authorize access during the deployment. For instructions on creating an API key, see [Managing user API keys](https://cloud.ibm.com/docs/account?topic=account-userapikey&interface=ui)." + description = "IBM Cloud API Key that will be used for authentication in scripts run in this module. Only required if certain options are required." } ############################################################################## # Cluster Level Variables ############################################################################## variable "zones" { - description = "Specify the IBM Cloud zone within the chosen region where the IBM Storage scale cluster will be deployed. A single zone input is required, (for example, [\"us-east-1\"]) all the cluster nodes will all be provisioned in this zone.[Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-creating-a-vpc-in-a-different-region#get-zones-using-the-cli)." + description = "Specify the IBM Cloud zone within the chosen region where the IBM Spectrum LSF cluster will be deployed. A single zone input is required, and the management nodes, file storage shares, and compute nodes will all be provisioned in this zone.[Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-creating-a-vpc-in-a-different-region#get-zones-using-the-cli)." type = list(string) default = ["us-east-1"] validation { condition = length(var.zones) == 1 - error_message = "Provide a value for a single zone from the supported regions." - } - validation { - condition = can(regex("^[a-z]{2}-[a-z]+-[1-3]$", var.zones[0])) - error_message = "Provide a value from the supported regions." + error_message = "HPC product deployment supports only a single zone. Provide a value for a single zone from the supported regions: eu-de-2 or eu-de-3 for eu-de, us-east-1 or us-east-3 for us-east, and us-south-1 for us-south." } } variable "ssh_keys" { type = list(string) default = null - description = "Provide the names of the SSH keys already configured in your IBM Cloud account to enable access to the Storage Scale nodes. The solution does not create new SSH keys, so ensure you provide existing ones. These keys must reside in the same resource group and region as the cluster being provisioned.To provide multiple SSH keys, use a comma-separated list in the format: [\"key-name-1\", \"key-name-2\"]. If you do not have an SSH key in your IBM Cloud account, you can create one by following the instructions [SSH Keys](https://cloud.ibm.com/docs/vpc?topic=vpc-ssh-keys)." + description = "The key pair to use to access the HPC cluster." } variable "remote_allowed_ips" { type = list(string) - description = "To ensure secure access to the IBM Storage Scale cluster via SSH, you must specify the public IP addresses of the devices that are permitted to connect. These IPs will be used to configure access restrictions and protect the environment from unauthorized connections. To allow access from multiple devices, provide the IP addresses as a comma-separated list in the format: [\"169.45.117.34\", \"203.0.113.25\"]. Identify your current public IP address, you can visit: https://ipv4.icanhazip.com." + description = "Comma-separated list of IP addresses that can access the IBM Spectrum LSF cluster instance through an SSH interface. For security purposes, provide the public IP addresses assigned to the devices that are authorized to establish SSH connections (for example, [\"169.45.117.34\"]). To fetch the IP address of the device, use [https://ipv4.icanhazip.com/](https://ipv4.icanhazip.com/)." validation { condition = alltrue([ for o in var.remote_allowed_ips : !contains(["0.0.0.0/0", "0.0.0.0"], o) @@ -69,8 +54,7 @@ variable "remote_allowed_ips" { } validation { condition = alltrue([ - for a in var.remote_allowed_ips : can(regex("^((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9][0-9]|[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9][0-9]|[0-9])(/(3[0-2]|2[0-9]|1[0-9]|[0-9]))?$", a)) - + for a in var.remote_allowed_ips : can(regex("^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(/(3[0-2]|2[0-9]|1[0-9]|[0-9]))?$", a)) ]) error_message = "The provided IP address format is not valid. Check if the IP address contains a comma instead of a dot, and ensure there are double quotation marks between each IP address range if using multiple IP ranges. For multiple IP address, use the format [\"169.45.117.34\",\"128.122.144.145\"]." } @@ -79,14 +63,14 @@ variable "remote_allowed_ips" { variable "cluster_prefix" { type = string default = "scale" - description = "Prefix that is used to name the IBM Cloud resources that are provisioned to build the Storage Scale cluster. Make sure that the prefix is unique, since you cannot create multiple resources with the same name. The maximum length of supported characters is 64. Preifx must begin with a letter and end with a letter or number." + description = "A unique identifier for resources. Must begin with a letter and end with a letter or number. This cluster_prefix will be prepended to any resources provisioned by this template. Prefixes must be 16 or fewer characters." validation { - error_message = "Prefix must begin with a lower case letter, should not end with '-' and contain only lower case letters, numbers, and '-' characters." - condition = can(regex("^[a-z](?:[a-z0-9]*(-[a-z0-9]+)*)?$", var.cluster_prefix)) + error_message = "Prefix must begin and end with a letter and contain only letters, numbers, and - characters." + condition = can(regex("^([A-z]|[a-z][-a-z0-9]*[a-z0-9])$", var.cluster_prefix)) } validation { - condition = length(trimspace(var.cluster_prefix)) > 0 && length(var.cluster_prefix) <= 16 - error_message = "The cluster_prefix must be 16 characters or fewer. No spaces allowed. " + condition = length(var.cluster_prefix) <= 16 + error_message = "The cluster_prefix must be 16 characters or fewer." } } @@ -96,11 +80,8 @@ variable "cluster_prefix" { variable "existing_resource_group" { type = string default = "Default" - description = "Specify the name of the existing resource group in your IBM Cloud account where cluster resources will be deployed. By default, the resource group is set to 'Default.' In some older accounts, it may be 'default,' so please verify the resource group name before proceeding. If the value is set to \"null\", the automation will create two separate resource groups: 'workload-rg' and 'service-rg.' For more details, see [Managing resource groups](https://cloud.ibm.com/docs/account?topic=account-rgs&interface=ui)." - validation { - condition = var.existing_resource_group != null && length(trimspace(var.existing_resource_group)) > 0 && var.existing_resource_group == trimspace(var.existing_resource_group) - error_message = "If you want to provide null for resource_group variable, it should be within double quotes and must not be null, empty, or contain leading/trailing spaces" - } + description = "String describing resource groups to create or reference" + } ############################################################################## @@ -109,39 +90,52 @@ variable "existing_resource_group" { variable "vpc_name" { type = string default = null - description = "Provide the name of an existing VPC in which the cluster resources will be deployed. If no value is given, the solution provisions a new VPC. [Learn more](https://cloud.ibm.com/docs/vpc). You can also choose to use existing subnets under this VPC or let the solution create new subnets as part of the deployment. If a custom DNS resolver is already configured for your VPC, specify its ID under the dns_custom_resolver_id input value." + description = "Name of an existing VPC in which the cluster resources will be deployed. If no value is given, then a new VPC will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)" } variable "vpc_cidr" { type = string default = "10.241.0.0/18" - description = "Provide an address prefix to create a new VPC when the vpc_name variable is set to null. VPC will be created using this address prefix, and subnets can then be defined within it using the specified subnet CIDR blocks. For more information on address prefix, see [Setting IP ranges](https://cloud.ibm.com/docs/vpc?topic=vpc-vpc-addressing-plan-design)." + description = "Network CIDR for the VPC. This is used to manage network ACL rules for cluster provisioning." +} + +variable "placement_strategy" { + type = string + default = null + description = "VPC placement groups to create (null / host_spread / power_spread)" } ############################################################################## # Access Variables ############################################################################## +# variable "enable_bastion" { +# type = bool +# default = true +# description = "The solution supports multiple ways to connect to your HPC cluster for example, using bastion node, via VPN or direct connection. If connecting to the HPC cluster via VPN or direct connection, set this value to false." +# } + variable "bastion_instance" { type = object({ image = string profile = string }) default = { - image = "ibm-ubuntu-22-04-5-minimal-amd64-5" + image = "ibm-ubuntu-22-04-5-minimal-amd64-3" profile = "cx2-4x8" } + description = "Configuration for the Bastion node, including the image and instance profile. Only Ubuntu stock images are supported." +} + +variable "vpc_cluster_login_private_subnets_cidr_blocks" { + type = string + default = "10.241.16.0/28" + description = "Provide the CIDR block required for the creation of the login cluster's private subnet. Only one CIDR block is needed. If using a hybrid environment, modify the CIDR block to avoid conflicts with any on-premises CIDR blocks. Since the login subnet is used only for the creation of login virtual server instances, provide a CIDR range of /28." validation { - condition = can(regex("^ibm-ubuntu", var.bastion_instance.image)) - error_message = "Only IBM Ubuntu stock images are supported for the Bastion node." - } - validation { - condition = can(regex("^[^\\s]+-[0-9]+x[0-9]+", var.bastion_instance.profile)) - error_message = "The profile must be a valid virtual server instance profile." + condition = tonumber(regex("^.*?/(\\d+)$", var.vpc_cluster_login_private_subnets_cidr_blocks)[0]) <= 28 + error_message = "This subnet is used to create only a login virtual server instance. Providing a larger CIDR size will waste the usage of available IPs. A CIDR range of /28 is sufficient for the creation of the login subnet." } - description = "Bastion node functions as a jump server to enable secure SSH access to cluster nodes, ensuring controlled connectivity within the private network. Specify the configuration details for the bastion node, including the image and instance profile. Only Ubuntu 22.04 stock images are supported." } - ############################################################################## # Deployer Variables ############################################################################## @@ -152,158 +146,82 @@ variable "deployer_instance" { profile = string }) default = { - image = "hpcc-scale-deployer-v1" - profile = "bx2-8x32" - } - validation { - condition = can(regex("^(b|c|m)x[0-9]+d?-[0-9]+x[0-9]+$", var.deployer_instance.profile)) - error_message = "The profile must be a valid virtual server instance profile and must be from the Balanced, Compute, Memory Categories" + image = "jay-lsf-new-image" + profile = "mx2-4x32" } - description = "A deployer node is a dedicated virtual machine or server instance used to automate the deployment and configuration of infrastructure and applications for HPC cluster components. Specify the configuration for the deployer node, including the custom image and virtual server instance profile." + description = "Configuration for the deployer node, including the custom image and instance profile. By default, uses fixpack_15 image and a bx2-8x32 profile." } ############################################################################## # Compute Variables ############################################################################## -variable "login_subnets_cidr" { - type = string - default = "10.241.16.0/28" - description = "Provide the CIDR block required for the creation of the login cluster private subnet. Single CIDR block is required. If using a hybrid environment, modify the CIDR block to avoid conflicts with any on-premises CIDR blocks. Since the login subnet is used only for the creation of login virtual server instances, provide a CIDR range of /28." - validation { - condition = ( - can( - regex( - "^((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])/(3[0-2]|[12]?[0-9])$", trimspace(var.login_subnets_cidr) - ) - ) - ) - error_message = "login_node_cidr must be a valid IPv4 CIDR (e.g., 192.168.1.0/28)." - } - - validation { - condition = can( - regex( - "^((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])/(2[8-9]|3[0-2])$", trimspace(var.login_subnets_cidr) - ) - ) - error_message = "This subnet is used to create only a login virtual server instance. Providing a larger CIDR size will waste the usage of available IPs. A CIDR range of /28 is sufficient for the creation of the login subnet." - } -} - -variable "compute_subnets_cidr" { - type = string - default = "10.241.0.0/20" - description = "Provide the CIDR block required for the creation of the compute private subnet. Single CIDR block is required. If using a hybrid environment, modify the CIDR block to avoid conflicts with any on-premises CIDR blocks. Ensure the selected CIDR block size can accommodate the maximum number of scale compute nodes expected in your cluster. For more information on CIDR block size selection, refer to the documentation, see [Choosing IP ranges for your VPC](https://cloud.ibm.com/docs/vpc?topic=vpc-choosing-ip-ranges-for-your-vpc)." -} - -variable "storage_subnets_cidr" { - type = string - default = "10.241.30.0/24" - description = "Provide the CIDR block required for the creation of the storage private subnet. Single CIDR block is required. If using a hybrid environment, modify the CIDR block to avoid conflicts with any on-premises CIDR blocks. Ensure the selected CIDR block size can accommodate the maximum number of scale storage nodes expected in your cluster. For more information on CIDR block size selection, refer to the documentation, see [Choosing IP ranges for your VPC](https://cloud.ibm.com/docs/vpc?topic=vpc-choosing-ip-ranges-for-your-vpc)." -} - -variable "protocol_subnets_cidr" { - type = string - default = "10.241.40.0/24" - description = "Provide the CIDR block required for the creation of the protocol private subnet. Single CIDR block is required. If using a hybrid environment, modify the CIDR block to avoid conflicts with any on-premises CIDR blocks. Ensure the selected CIDR block size can accommodate the maximum number of protocol nodes expected in your cluster. For more information on CIDR block size selection, refer to the documentation, see [Choosing IP ranges for your VPC](https://cloud.ibm.com/docs/vpc?topic=vpc-choosing-ip-ranges-for-your-vpc)." -} - variable "client_subnets_cidr" { type = string default = "10.241.50.0/24" - description = "Provide the CIDR block required for the creation of the client private subnet. Single CIDR block is required. If using a hybrid environment, modify the CIDR block to avoid conflicts with any on-premises CIDR blocks. Ensure the selected CIDR block size can accommodate the maximum number of scale client nodes expected in your cluster. For more information on CIDR block size selection, refer to the documentation, see [Choosing IP ranges for your VPC](https://cloud.ibm.com/docs/vpc?topic=vpc-choosing-ip-ranges-for-your-vpc)." + description = "Subnet CIDR block to launch the client host." } -variable "compute_gui_username" { - type = string - default = "" - sensitive = true - description = "GUI username to perform system management and monitoring tasks on the compute cluster. The Username should be at least 4 characters, (any combination of lowercase and uppercase letters)." - validation { - condition = sum([for inst in var.compute_instances : inst.count]) == 0 || (length(var.compute_gui_username) >= 4 && length(var.compute_gui_username) <= 30 && trimspace(var.compute_gui_username) != "") - error_message = "Specified input for \"compute_gui_username\" is not valid. Username should be greater or equal to 4 letters and less than equal to 30." - } - validation { - # Structural check - condition = sum([for inst in var.compute_instances : inst.count]) == 0 || can(regex("^[A-Za-z0-9]([._]?[A-Za-z0-9])*$", var.compute_gui_username)) - - error_message = "Specified input for \"compute_gui_username\" is not valid. Username should only have alphanumerics, dot(.) and underscore(_). No consecutive dots or underscores" - } +variable "client_instances" { + type = list( + object({ + profile = string + count = number + image = string + }) + ) + default = [{ + profile = "cx2-2x4" + count = 2 + image = "ibm-redhat-8-10-minimal-amd64-4" + }] + description = "Number of instances to be launched for client." } -variable "compute_gui_password" { +variable "vpc_cluster_private_subnets_cidr_blocks" { type = string - default = "" - sensitive = true - description = "Password for logging in to the compute cluster GUI. Must be at least 8 characters long and include a combination of uppercase and lowercase letters, a number, and a special character. It must not contain the username or start with a special character." - validation { - condition = ( - sum([for inst in var.compute_instances : inst.count]) == 0 || can(regex("^.{8,}$", var.compute_gui_password) != "") && can(regex("[0-9]{1,}", var.compute_gui_password) != "") && can(regex("[a-z]{1,}", var.compute_gui_password) != "") && can(regex("[A-Z]{1,}", var.compute_gui_password) != "") && can(regex("[!@#$%^&*()_+=-]{1,}", var.compute_gui_password) != "") && trimspace(var.compute_gui_password) != "" && can(regex("^[!@#$%^&*()_+=-]", var.compute_gui_password)) == false && (replace(lower(var.compute_gui_password), lower(var.compute_gui_username), "") == lower(var.compute_gui_password)) - ) - error_message = "If compute instances are used, the GUI password must be at least 8 characters long, include upper/lowercase letters, a number, a special character, must not start with a special character, and must not contain the username." - } + default = "10.241.0.0/20" + description = "Provide the CIDR block required for the creation of the compute cluster's private subnet. One CIDR block is required. If using a hybrid environment, modify the CIDR block to avoid conflicts with any on-premises CIDR blocks. Ensure the selected CIDR block size can accommodate the maximum number of management and dynamic compute nodes expected in your cluster. For more information on CIDR block size selection, refer to the documentation, see [Choosing IP ranges for your VPC](https://cloud.ibm.com/docs/vpc?topic=vpc-choosing-ip-ranges-for-your-vpc)." } -############################################################################## -# Storage Scale Variables -############################################################################## variable "compute_instances" { type = list( object({ profile = string count = number image = string - filesystem = optional(string) + filesystem = string }) ) default = [{ - profile = "bx2-2x8" - count = 0 - image = "hpcc-scale5232-rhel810-v1" - filesystem = "/gpfs/fs1" + profile = "cx2-2x4" + count = 3 + image = "ibm-redhat-8-10-minimal-amd64-4" + filesystem = "/ibm/fs1" }] - validation { - condition = alltrue([ - for inst in var.compute_instances : can(regex("^(b|c|m)x[0-9]+d?-[0-9]+x[0-9]+$", inst.profile)) - ]) - error_message = "Specified profile must be a valid IBM Cloud VPC GEN2 profile name and must be from the Balanced, Compute, Memory Categories [Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles)." - } - validation { - condition = alltrue([ - for inst in var.compute_instances : inst.count == 0 || (inst.count >= 3 && inst.count <= 64) - ]) - error_message = "Specified count must be 0 or in range 3 to 64" - } - description = "Specify the list of virtual server instances to be provisioned as compute nodes in the cluster. Each object includes the instance profile (machine type), number of instances (count), OS image to use, and an optional filesystem mount path. This configuration allows customization of the compute tier to suit specific performance and workload requirements. The profile must match a valid IBM Cloud VPC Gen2 instance profile format. A minimum of 3 compute nodes is required to form a cluster, and a maximum of 64 nodes is supported. For more details, refer[Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)." + description = "Total Number of instances to be launched for compute cluster." } -variable "client_instances" { - type = list( - object({ - profile = string - count = number - image = string - }) - ) - default = [{ - profile = "cx2-2x4" - count = 0 - image = "ibm-redhat-8-10-minimal-amd64-6" - }] - validation { - condition = alltrue([ - for inst in var.client_instances : can(regex("^(b|c|m)x[0-9]+d?-[0-9]+x[0-9]+$", inst.profile)) - ]) - error_message = "Specified profile must be a valid IBM Cloud VPC GEN2 profile name and must be from the Balanced, Compute, Memory Categories (e.g., bx2-4x16, cx2d-16x64). [Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles)" - } - validation { - condition = alltrue([ - for inst in var.client_instances : inst.count >= 0 && inst.count <= 2000 - ]) - error_message = "client_instances 'count' value must be between 0 and 2000." - } +variable "compute_gui_username" { + type = string + default = "admin" + sensitive = true + description = "GUI user to perform system management and monitoring tasks on compute cluster." +} - description = "Specify the list of virtual server instances to be provisioned as client nodes in the cluster. Each object includes the instance profile (machine type), number of instances (count), OS image to use. This configuration allows customization of the compute tier to suit specific performance and workload requirements. The profile must match a valid IBM Cloud VPC Gen2 instance profile format. For more details, refer [Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)." +variable "compute_gui_password" { + type = string + default = "hpc@IBMCloud" + sensitive = true + description = "Password for compute cluster GUI" +} + +############################################################################## +# Storage Scale Variables +############################################################################## +variable "storage_subnets_cidr" { + type = string + default = "10.241.30.0/24" + description = "Subnet CIDR block to launch the storage cluster host." } variable "storage_instances" { @@ -316,33 +234,15 @@ variable "storage_instances" { }) ) default = [{ - profile = "bx2d-32x128" - count = 2 - image = "hpcc-scale5232-rhel810-v1" - filesystem = "/gpfs/fs1" + profile = "bx2-2x8" + count = 0 + image = "ibm-redhat-8-10-minimal-amd64-4" + filesystem = "/ibm/fs1" }] - validation { - condition = var.storage_type != "persistent" ? alltrue([ - for inst in var.storage_instances : can(regex("^(b|c|m)x[0-9]+d-[0-9]+x[0-9]+$", inst.profile)) - ]) : true - error_message = "Specified profile must be a valid IBM Cloud VPC GEN2 profile name and must be from the Balanced, Compute, Memory Categories (e.g., bx2d-4x16, cx2d-16x64). [Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles)" - } - validation { - condition = alltrue([ - for inst in var.storage_instances : inst.count % 2 == 0 - ]) - error_message = "Storage count should always be an even number." - } - validation { - condition = var.storage_type != "persistent" ? alltrue([ - for inst in var.storage_instances : inst.count >= 2 && inst.count <= 64 - ]) : true - error_message = "storage_instances 'count' value must be in range 2 to 64." - } - description = "Specify the list of virtual server instances to be provisioned as storage nodes in the cluster. Each object includes the instance profile (machine type), number of instances (count), OS image to use, and an optional filesystem mount path. This configuration allows customization of the storage tier to suit specific storage performance cluster. The profile must match a valid IBM Cloud VPC Gen2 instance profile format. A minimum of 2 storage nodes is required to form a cluster, and a maximum of 64 nodes is supported. For more details, refer[Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)." + description = "Number of instances to be launched for storage cluster." } -variable "storage_baremetal_server" { +variable "storage_servers" { type = list( object({ profile = string @@ -351,136 +251,55 @@ variable "storage_baremetal_server" { filesystem = string }) ) - default = [{ profile = "cx2d-metal-96x192" - count = 2 - image = "hpcc-scale5232-rhel810-v1" + count = 0 + image = "ibm-redhat-8-10-minimal-amd64-4" filesystem = "/gpfs/fs1" }] - - validation { - condition = var.storage_type == "persistent" ? alltrue([ - for inst in var.storage_baremetal_server : can(regex("^[b|c|m]x[0-9]+d?-[a-z]+-[0-9]+x[0-9]+", inst.profile)) - ]) : true - error_message = "Specified profile must be a valid IBM Cloud VPC GEN2 profile name [Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles)." - } - - validation { - condition = var.storage_type == "persistent" ? alltrue([ - for inst in var.storage_baremetal_server : inst.count >= 2 && inst.count <= 64 - ]) : true - error_message = "Each storage_baremetal_server 'count' value must be between 2 and 64." - } - - description = "Specify the list of bare metal servers to be provisioned for the storage cluster. Each object in the list specifies the server profile (hardware configuration), the count (number of servers), the image (OS image to use), and an optional filesystem mount path. This configuration allows flexibility in scaling and customizing the storage cluster based on performance and capacity requirements. Only valid bare metal profiles supported in IBM Cloud VPC should be used. A minimum of 2 baremetal storage nodes is required to form a cluster, and a maximum of 64 nodes is supported For available bare metal profiles, refer to the [Baremetal Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-profile&interface=ui)." + description = "Number of BareMetal Servers to be launched for storage cluster." } -variable "tie_breaker_baremetal_server_profile" { - type = string - default = null - description = "Specify the bare metal server profile type name to be used for creating the bare metal Tie breaker node. If no value is provided, the storage bare metal server profile will be used as the default. For more information, see [bare metal server profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-profile&interface=ui). [Tie Breaker Node](https://www.ibm.com/docs/en/storage-scale/5.2.2?topic=quorum-node-tiebreaker-disks)" -} - -variable "scale_management_vsi_profile" { +variable "protocol_subnets_cidr" { type = string - default = "bx2-8x32" - description = "The virtual server instance profile type name to be used to create the Management node. For more information, see [Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)." - validation { - condition = can(regex("^[b|c|m]x[0-9]+d?-[0-9]+x[0-9]+", var.scale_management_vsi_profile)) - error_message = "Specified profile must be a valid IBM Cloud VPC GEN2 Instance Storage profile name [Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles)." - } -} - -variable "afm_instances" { - type = list( - object({ - profile = string - count = number - }) - ) - default = [{ - profile = "bx2-32x128" - count = 0 - }] - validation { - condition = alltrue([ - for inst in var.afm_instances : can(regex("^[bcm]x[0-9]+d?(-[a-z]+)?-[0-9]+x[0-9]+$", inst.profile)) - ]) - error_message = "Specified profile must be a valid IBM Cloud VPC GEN2 profile name and must be from the Balanced, Compute, Memory Categories [Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles)." - } - validation { - condition = alltrue([ - for inst in var.afm_instances : inst.count >= 0 && inst.count <= 16 - ]) - error_message = "afm_instances 'count' value must be between 0 and 16." - } - description = "Specify the list of virtual server instances to be provisioned as AFM nodes in the cluster. Each object in the list includes the instance profile (machine type), the count (number of instances), the image (OS image to use). This configuration allows you to access remote data and high-performance computing needs.This input can be used to provision virtual server instances (VSI). If persistent, high-throughput storage is required, consider using bare metal instances instead. Ensure you provide valid instance profiles. Maximum of 16 afm nodes is supported. For more details, refer to [Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)." + default = "10.241.40.0/24" + description = "Subnet CIDR block to launch the storage cluster host." } - variable "protocol_instances" { type = list( object({ profile = string count = number + image = string }) ) default = [{ - profile = "cx2-32x64" + profile = "bx2-2x8" count = 2 + image = "ibm-redhat-8-10-minimal-amd64-4" }] - validation { - condition = alltrue([ - for inst in var.protocol_instances : can(regex("^[bcm]x[0-9]+d?(-[a-z]+)?-[0-9]+x[0-9]+$", inst.profile)) - ]) - error_message = "Specified profile must be a valid IBM Cloud VPC GEN2 profile name [Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles)." - } - validation { - condition = alltrue([ - for inst in var.protocol_instances : inst.count >= 0 && inst.count <= 32 - ]) - error_message = "protocol_instances 'count' value must be between 0 and 32." - } - description = "Specify the list of virtual server instances to be provisioned as protocol nodes in the cluster. Each object in the list includes the instance profile (machine type), the count (number of instances), the image (OS image to use). This configuration allows allows for a unified data management solution, enabling different clients to access the same data using NFS protocol.This input can be used to provision virtual server instances (VSI). If persistent, high-throughput storage is required, consider using bare metal instances instead. Ensure you provide valid instance profiles. Maximum of 32 VSI or baremetal nodes are supported. For more details, refer to [Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)." + description = "Number of instances to be launched for protocol hosts." } variable "colocate_protocol_instances" { type = bool default = true - description = "Enable this option to colocate protocol services on the same virtual server instances used for storage. When set to true, the storage nodes will also act as protocol nodes for reducing the need for separate infrastructure. This can optimize resource usage and simplify the cluster setup, especially for smaller environments or cost-sensitive deployments. For larger or performance-intensive workloads, consider deploying dedicated protocol instances instead." - validation { - condition = anytrue([var.colocate_protocol_instances == true && var.storage_type != "persistent" && sum(var.protocol_instances[*]["count"]) <= sum(var.storage_instances[*]["count"]), var.colocate_protocol_instances == true && var.storage_type == "persistent" && sum(var.protocol_instances[*]["count"]) <= sum(var.storage_baremetal_server[*]["count"]), var.colocate_protocol_instances == false]) - error_message = "When colocation is true, protocol instance count should always be less than or equal to storage instance count" - } + description = "Enable it to use storage instances as protocol instances" } variable "storage_gui_username" { type = string - default = "" + default = "admin" sensitive = true - description = "GUI username to perform system management and monitoring tasks on the storage cluster. Note: Username should be at least 4 characters, (any combination of lowercase and uppercase letters)." - validation { - condition = length(var.storage_gui_username) >= 4 && length(var.storage_gui_username) <= 30 && trimspace(var.storage_gui_username) != "" - error_message = "Specified input for \"storage_gui_username\" is not valid. Username should be greater or equal to 4 letters and less than equal to 30." - } - validation { - # Structural check - condition = can(regex("^[A-Za-z0-9]([._]?[A-Za-z0-9])*$", var.storage_gui_username)) - - error_message = "Specified input for \"storage_gui_username\" is not valid. Username should only have alphanumerics, dot(.) and underscore(_). No consecutive dots or underscores" - } + description = "GUI user to perform system management and monitoring tasks on storage cluster." } variable "storage_gui_password" { type = string - default = "" + default = "hpc@IBMCloud" sensitive = true - description = "The storage cluster GUI password is used for logging in to the storage cluster through the GUI. The password should contain a minimum of 8 characters. For a strong password, use a combination of uppercase and lowercase letters, one number, and a special character. Make sure that the password doesn't contain the username and it should not start with a special character." - validation { - condition = can(regex("^.{8,20}$", var.storage_gui_password) != "") && can(regex("[0-9]", var.storage_gui_password) != "") && can(regex("[a-z]", var.storage_gui_password) != "") && can(regex("[A-Z]", var.storage_gui_password) != "") && can(regex("[!@#$%^&*()_+=-]", var.storage_gui_password) != "") && trimspace(var.storage_gui_password) != "" && can(regex("^[!@#$%^&*()_+=-]", var.storage_gui_password)) == false && can(regex(lower(var.storage_gui_username), lower(var.storage_gui_password))) == false - error_message = "The Storage GUI password must be 8 to 20 characters long and include at least two alphabetic characters (with one uppercase and one lowercase), one number, and one special character from the set (!@#$%^&*()_+=-). The password must not contain the username or any spaces." - } + description = "Password for storage cluster GUI" } variable "filesystem_config" { @@ -491,34 +310,52 @@ variable "filesystem_config" { default_metadata_replica = number max_data_replica = number max_metadata_replica = number + mount_point = string })) default = [{ - filesystem = "/gpfs/fs1" + filesystem = "fs1" block_size = "4M" default_data_replica = 2 default_metadata_replica = 2 max_data_replica = 3 max_metadata_replica = 3 + mount_point = "/ibm/fs1" }] - description = "Specify the configuration parameters for one or more IBM Storage Scale (GPFS) filesystems. Each object in the list includes the filesystem mount point, block size, and replica settings for both data and metadata. These settings determine how data is distributed and replicated across the cluster for performance and fault tolerance." -} + description = "File system configurations." +} + +# variable "filesets_config" { +# type = list(object({ +# fileset = string +# filesystem = string +# junction_path = string +# client_mount_path = string +# quota = number +# })) +# default = [{ +# fileset = "fileset1" +# filesystem = "fs1" +# junction_path = "/ibm/fs1/fileset1" +# client_mount_path = "/mnt" +# quota = 100 +# }] +# description = "Fileset configurations." +# } -variable "filesets_config" { - type = list(object({ - client_mount_path = string - quota = number - })) - default = [ - { - client_mount_path = "/mnt/scale/tools" - quota = 0 - }, - { - client_mount_path = "/mnt/scale/data" - quota = 0 - } - ] - description = "Specify a list of filesets with client mount paths and optional storage quotas (0 means no quota) to be created within the IBM Storage Scale filesystem.." +variable "afm_instances" { + type = list( + object({ + profile = string + count = number + image = string + }) + ) + default = [{ + profile = "bx2-2x8" + count = 0 + image = "ibm-redhat-8-10-minimal-amd64-4" + }] + description = "Number of instances to be launched for afm hosts." } variable "afm_cos_config" { @@ -542,48 +379,17 @@ variable "afm_cos_config" { bucket_storage_class = "smart" bucket_type = "region_location" }] - nullable = false - description = "Please provide details for the Cloud Object Storage (COS) instance, including information about the COS bucket, service credentials (HMAC key), AFM fileset, mode (such as Read-only (RO), Single writer (SW), Local updates (LU), and Independent writer (IW)), storage class (standard, vault, cold, or smart), and bucket type (single_site_location, region_location, cross_region_location). Note : The 'afm_cos_config' can contain up to 5 entries. For further details on COS bucket locations, refer to the relevant documentation https://cloud.ibm.com/docs/cloud-object-storage/basics?topic=cloud-object-storage-endpoints." - validation { - condition = length([for item in var.afm_cos_config : item]) >= 1 && length([for item in var.afm_cos_config : item]) <= 5 - error_message = "The length of \"afm_cos_config\" must be greater than or equal to 1 and less than or equal to 5." - } - validation { - condition = alltrue([for item in var.afm_cos_config : trimspace(item.mode) != "" && item.mode != null]) - error_message = "The \"mode\" field must not be empty or null." - } - validation { - condition = length(distinct([for item in var.afm_cos_config : item.afm_fileset])) == length(var.afm_cos_config) - error_message = "The \"afm_fileset\" name should be unique for each AFM COS bucket relation." - } - validation { - condition = alltrue([for item in var.afm_cos_config : trimspace(item.afm_fileset) != "" && item.afm_fileset != null]) - error_message = "The \"afm_fileset\" field must not be empty or null." - } - validation { - condition = alltrue([for config in var.afm_cos_config : !(config.bucket_type == "single_site_location") || contains(["ams03", "che01", "mil01", "mon01", "par01", "sjc04", "sng01"], config.bucket_region)]) - error_message = "When 'bucket_type' is 'single_site_location', 'bucket_region' must be one of ['ams03', 'che01', 'mil01', 'mon01', 'par01', 'sjc04', 'sng01']." - } - validation { - condition = alltrue([for config in var.afm_cos_config : !(config.bucket_type == "cross_region_location") || contains(["us", "eu", "ap"], config.bucket_region)]) - error_message = "When 'bucket_type' is 'cross_region_location', 'bucket_region' must be one of ['us', 'eu', 'ap']." - } - validation { - condition = alltrue([for config in var.afm_cos_config : !(config.bucket_type == "region_location") || contains(["us-south", "us-east", "eu-gb", "eu-de", "jp-tok", "au-syd", "jp-osa", "ca-tor", "br-sao", "eu-es"], config.bucket_region)]) - error_message = "When 'bucket_type' is 'region_location', 'bucket_region' must be one of ['us-south', 'us-east', 'eu-gb', 'eu-de', 'jp-tok', 'au-syd', 'jp-osa', 'ca-tor', 'br-sao', 'eu-es']." - } - validation { - condition = alltrue([for item in var.afm_cos_config : (item.bucket_type == "" || contains(["cross_region_location", "single_site_location", "region_location"], item.bucket_type))]) - error_message = "Each 'bucket_type' must be either empty or one of 'region_location', 'single_site_location', 'cross_region_location'." - } - validation { - condition = alltrue([for item in var.afm_cos_config : (item.bucket_storage_class == "" || (can(regex("^[a-z]+$", item.bucket_storage_class)) && contains(["smart", "standard", "cold", "vault"], item.bucket_storage_class)))]) - error_message = "Each 'bucket_storage_class' must be either empty or one of 'smart', 'standard', 'cold', or 'vault', and all in lowercase." - } - validation { - condition = alltrue([for item in var.afm_cos_config : trimspace(item.bucket_region) != "" && item.bucket_region != null]) - error_message = "The \"bucket_region\" field must not be empty or null." - } + # default = [{ + # afm_fileset = "afm_fileset" + # mode = "iw" + # cos_instance = null + # bucket_name = null + # bucket_region = "us-south" + # cos_service_cred_key = "" + # bucket_storage_class = "smart" + # bucket_type = "region_location" + # }] + description = "AFM configurations." } ############################################################################## @@ -593,17 +399,13 @@ variable "afm_cos_config" { variable "dns_instance_id" { type = string default = null - description = "Specify the ID of an existing IBM Cloud DNS service instance. When provided, domain names are created within the specified instance. If set to null, a new DNS service instance is created, and the required DNS zones are associated with it." + description = "IBM Cloud HPC DNS service instance id." } variable "dns_custom_resolver_id" { type = string default = null - description = "Specify the ID of an existing IBM Cloud DNS custom resolver to avoid creating a new one. If set to null, a new custom resolver will be created and associated with the VPC. Note: A VPC can be associated with only one custom resolver. When using an existing VPC, if a custom resolver is already associated and this ID is not provided, the deployment will fail." - validation { - condition = var.vpc_name != null || var.dns_custom_resolver_id == null - error_message = "If this is a new VPC deployment (vpc_name is null), do not provide dns_custom_resolver_id, as it may impact name resolution." - } + description = "IBM Cloud DNS custom resolver id." } variable "dns_domain_names" { @@ -621,7 +423,26 @@ variable "dns_domain_names" { client = "clnt.com" gklm = "gklm.com" } - description = "DNS domain names are user-friendly addresses that map to systems within a network, making them easier to identify and access. Provide the DNS domain names for IBM Cloud HPC components: compute, storage, protocol, client, and GKLM. These domains will be assigned to the respective nodes that are part of the scale cluster." + description = "IBM Cloud HPC DNS domain names." +} + +############################################################################## +# Encryption Variables +############################################################################## +variable "key_management" { + type = string + default = "key_protect" + description = "Set the value as key_protect to enable customer managed encryption for boot volume and file share. If the key_management is set as null, IBM Cloud resources will be always be encrypted through provider managed." + validation { + condition = var.key_management == "null" || var.key_management == null || var.key_management == "key_protect" + error_message = "key_management must be either 'null' or 'key_protect'." + } +} + +variable "hpcs_instance_name" { + type = string + default = null + description = "Hyper Protect Crypto Service instance" } ############################################################################## @@ -630,19 +451,25 @@ variable "dns_domain_names" { variable "enable_cos_integration" { type = bool default = true - description = "Set to true to create an extra cos bucket to integrate with scale cluster deployment." + description = "Integrate COS with HPC solution" } variable "cos_instance_name" { type = string default = null - description = "Provide the name of the existing COS instance where the logs for the enabled functionalities will be stored." + description = "Exiting COS instance name" +} + +variable "enable_atracker" { + type = bool + default = true + description = "Enable Activity tracker" } variable "enable_vpc_flow_logs" { type = bool default = true - description = "This flag determines whether VPC flow logs are enabled. When set to true, a flow log collector will be created to capture and monitor network traffic data within the VPC. Enabling flow logs provides valuable insights for troubleshooting, performance monitoring, and security auditing by recording information about the traffic passing through your VPC. Consider enabling this feature to enhance visibility and maintain robust network management practices." + description = "Enable Activity tracker" } ############################################################################## @@ -667,84 +494,54 @@ variable "override_json_string" { variable "enable_ldap" { type = bool default = false - description = "Set this option to true to enable LDAP for IBM Spectrum Scale (GPFS), with the default value set to false." + description = "Set this option to true to enable LDAP for IBM Cloud HPC, with the default value set to false." } variable "ldap_basedns" { type = string default = "ldapscale.com" description = "The dns domain name is used for configuring the LDAP server. If an LDAP server is already in existence, ensure to provide the associated DNS domain name." - validation { - condition = var.enable_ldap == false || (var.ldap_basedns != null ? (length(trimspace(var.ldap_basedns)) > 3 && length(var.ldap_basedns) <= 253 && var.ldap_basedns != "null" && !startswith(trimspace(var.ldap_basedns), "www.") && can(regex("^[a-zA-Z0-9]+([a-zA-Z0-9.]*[a-zA-Z0-9]+)*([a-zA-Z0-9]+[a-zA-Z0-9-]*[a-zA-Z0-9]+)*\\.[a-zA-Z]{2,63}$", trimspace(var.ldap_basedns)))) : false) - error_message = "If LDAP is enabled, then the base DNS should not be empty or null. Furthermore, DNS provided should be a properly formatted domain name (without www. prefix), between 4-253 characters, and matches standard DNS naming rules." - } } variable "ldap_server" { type = string default = null description = "Provide the IP address for the existing LDAP server. If no address is given, a new LDAP server will be created." - validation { - condition = var.enable_ldap == false || var.ldap_server == null || (var.ldap_server != null ? (length(trimspace(var.ldap_server)) > 0 && var.ldap_server != "null") : true) - error_message = "If LDAP is enabled, an existing LDAP server IP should be provided." - } } variable "ldap_server_cert" { type = string sensitive = true default = null - description = "Provide the existing LDAP server certificate. This value is required if the 'ldap_server' variable is not set to null. If the certificate is not provided or is invalid, the LDAP configuration may fail. For more information on how to create or obtain the certificate, please refer [existing LDAP server certificate](https://cloud.ibm.com/docs/allowlist/hpc-service?topic=hpc-service-integrating-openldap)." - validation { - condition = var.enable_ldap == true && var.ldap_server != null ? var.ldap_server_cert != null : true - error_message = "Provide the current LDAP server certificate. This is required if 'ldap_server' is set; otherwise, the LDAP configuration will not succeed." - } + description = "Provide the existing LDAP server certificate. This value is required if the 'ldap_server' variable is not set to null. If the certificate is not provided or is invalid, the LDAP configuration may fail." } variable "ldap_admin_password" { type = string sensitive = true default = null - description = "The LDAP admin password must be 8 to 20 characters long and include at least two alphabetic characters (with one uppercase and one lowercase), one number, and one special character from the set (!@#$%^&*()_+=-). The password must not contain the username or any spaces. [This value is ignored for an existing LDAP server]." - validation { - condition = ( - var.enable_ldap ? ( - var.ldap_admin_password != null ? ( - try(length(var.ldap_admin_password)) >= 8 && - try(length(var.ldap_admin_password)) <= 20 && - try(can(regex(".*[0-9].*", var.ldap_admin_password)), false) && - try(can(regex(".*[A-Z].*", var.ldap_admin_password)), false) && - try(can(regex(".*[a-z].*", var.ldap_admin_password)), false) && - try(can(regex(".*[!@#$%^&*()_+=-].*", var.ldap_admin_password)), false) && - !try(can(regex(".*\\s.*", var.ldap_admin_password)), false) - ) : false - ) : true - ) - error_message = "LDAP admin password must be provided whenever LDAP is enabled. The LDAP admin password must be 8 to 20 characters long and include at least two alphabetic characters (with one uppercase and one lowercase), one number, and one special character from the set (!@#$%^&*()_+=-). The password must not contain any spaces." - } + description = "The LDAP administrative password should be 8 to 20 characters long, with a mix of at least three alphabetic characters, including one uppercase and one lowercase letter. It must also include two numerical digits and at least one special character from (~@_+:) are required. It is important to avoid including the username in the password for enhanced security." } variable "ldap_user_name" { type = string default = "" description = "Custom LDAP User for performing cluster operations. Note: Username should be between 4 to 32 characters, (any combination of lowercase and uppercase letters).[This value is ignored for an existing LDAP server]" - validation { - condition = var.enable_ldap == false || var.ldap_server != null || (length(var.ldap_user_name) >= 4 && length(var.ldap_user_name) <= 32 && var.ldap_user_name != "" && can(regex("^[a-zA-Z0-9_-]*$", var.ldap_user_name)) && trimspace(var.ldap_user_name) != "") - error_message = "LDAP username must be between 4-32 characters long and can only contain letters, numbers, hyphens, and underscores. Spaces are not permitted." - } } variable "ldap_user_password" { type = string sensitive = true default = "" - description = "The LDAP user password must be 8 to 20 characters long and include at least two alphabetic characters (with one uppercase and one lowercase), one numeric digit, and at least one special character from the set (!@#$%^&*()_+=-). Spaces are not allowed. The password must not contain the username for enhanced security. [This value is ignored for an existing LDAP server]." - validation { - condition = !var.enable_ldap || var.ldap_server != null || ((replace(lower(var.ldap_user_password), lower(var.ldap_user_name), "") == lower(var.ldap_user_password)) && length(var.ldap_user_password) >= 8 && length(var.ldap_user_password) <= 20 && can(regex("^(.*[0-9]){1}.*$", var.ldap_user_password))) && can(regex("^(.*[A-Z]){1}.*$", var.ldap_user_password)) && can(regex("^(.*[a-z]){1}.*$", var.ldap_user_password)) && can(regex("^.*[!@#$%^&*()_+=-].*$", var.ldap_user_password)) && !can(regex(".*\\s.*", var.ldap_user_password)) - error_message = "The LDAP user password must be 8 to 20 characters long and include at least two alphabetic characters (with one uppercase and one lowercase), one number, and one special character from the set (!@#$%^&*()_+=-). The password must not contain the username or any spaces." - } + description = "The LDAP user password should be 8 to 20 characters long, with a mix of at least three alphabetic characters, including one uppercase and one lowercase letter. It must also include two numerical digits and at least one special character from (~@_+:) are required.It is important to avoid including the username in the password for enhanced security.[This value is ignored for an existing LDAP server]." } +# variable "ldap_instance_key_pair" { +# type = list(string) +# default = null +# description = "Name of the SSH key configured in your IBM Cloud account that is used to establish a connection to the LDAP Server. Make sure that the SSH key is present in the same resource group and region where the LDAP Servers are provisioned. If you do not have an SSH key in your IBM Cloud account, create one by using the [SSH keys](https://cloud.ibm.com/docs/vpc?topic=vpc-ssh-keys) instructions." +# } + variable "ldap_instance" { type = list( object({ @@ -754,47 +551,30 @@ variable "ldap_instance" { ) default = [{ profile = "cx2-2x4" - image = "ibm-ubuntu-22-04-5-minimal-amd64-5" + image = "ibm-ubuntu-22-04-5-minimal-amd64-1" }] - description = "Specify the list of virtual server instances to be provisioned as ldap nodes in the cluster. Each object in the list defines the instance profile (machine type), the count (number of instances), the image (OS image to use). This configuration allows you to customize the server for setting up ldap server. The profile must match a valid IBM Cloud VPC Gen2 instance profile format. For more details, refer [Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)." - validation { - condition = alltrue([ - for inst in var.ldap_instance : can(regex("^(b|c|m)x[0-9]+d?-[0-9]+x[0-9]+$", inst.profile)) - ]) - error_message = "Specified profile must be a valid IBM Cloud VPC GEN2 profile name [Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles)." - } - validation { - condition = alltrue([ - for inst in var.ldap_instance : can(regex("^ibm-ubuntu", inst.image)) - ]) - error_message = "Specified image should necessarily be an IBM Ubuntu image [Learn more](https://cloud.ibm.com/docs/vpc?group=stock-images)." - } + description = "Profile and Image name to be used for provisioning the LDAP instances. Note: Debian based OS are only supported for the LDAP feature" } - ############################################################################## # GKLM variables ############################################################################## variable "scale_encryption_enabled" { type = bool default = false - description = "Encryption ensures that data stored in the filesystem is protected from unauthorized access and secures sensitive information at rest. To enable the encryption for the filesystem. Select true or false" + description = "To enable the encryption for the filesystem. Select true or false" } variable "scale_encryption_type" { type = string - default = "null" + default = null description = "To enable filesystem encryption, specify either 'key_protect' or 'gklm'. If neither is specified, the default value will be 'null' and encryption is disabled" +} - validation { - condition = can(regex("^(key_protect|gklm|null)$", var.scale_encryption_type)) && (var.scale_encryption_type == "null" || var.scale_encryption_enabled) && (!var.scale_encryption_enabled || var.scale_encryption_type != "null") - error_message = <= 2 && inst.count <= 5 - ]) - ) - error_message = "Specified profile must be a valid IBM Cloud VPC GEN2 profile name [Learn more](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles)." - } - validation { - condition = (var.scale_encryption_type != "gklm" || (sum([for inst in var.gklm_instances : inst.count]) >= 2 && sum([for inst in var.gklm_instances : inst.count]) <= 5)) - #condition = (sum([for inst in var.gklm_instances : inst.count]) == 0 || (sum([for inst in var.gklm_instances : inst.count]) >= 2 && sum([for inst in var.gklm_instances : inst.count]) <= 5)) - error_message = "For High availability the GKLM instance type should be greater than 2 or less than 5" - } - description = "Specify the list of virtual server instances to be provisioned as GKLM (Guardium Key Lifecycle Manager) nodes in the cluster. Each object in the list includes the instance profile (machine type), the count (number of instances), and the image (OS image to use). This configuration allows you to manage and securely store encryption keys used across the cluster components. The profile must match a valid IBM Cloud VPC Gen2 instance profile format. A minimum of 2 and maximum of 5 gklm nodes are supported. For more details, refer[Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)." + description = "Number of instances to be launched for client." } -variable "scale_encryption_admin_password" { +variable "scale_encryption_admin_default_password" { type = string - sensitive = true default = null - description = "Specifies the administrator password for GKLM-based encryption. This is required when encryption is enabled for IBM Spectrum Scale (GPFS) and the encryption type is set to 'gklm'. The password is used to authenticate administrative access to the Guardium Key Lifecycle Manager (GKLM) for managing encryption keys. Ensure the password meets your organization's security standards." - - validation { - condition = ( - var.scale_encryption_enabled && var.scale_encryption_type == "gklm" - ? ( - var.scale_encryption_admin_password != null && - length(var.scale_encryption_admin_password) >= 8 && - length(var.scale_encryption_admin_password) <= 20 && - can(regex(".*[0-9].*", var.scale_encryption_admin_password)) && - can(regex(".*[A-Z].*", var.scale_encryption_admin_password)) && - can(regex(".*[a-z].*", var.scale_encryption_admin_password)) && - can(regex(".*[!@#$%^&*()_+=-].*", var.scale_encryption_admin_password)) && - !can(regex(".*\\s.*", var.scale_encryption_admin_password)) # no spaces - ) - : true - ) - - error_message = "You must provide scale_encryption_admin_password when scale_encryption_enabled is true and scale_encryption_type is 'gklm'. The scale encryption admin password must be 8 to 20 characters long and include at least two alphabetic characters (with one uppercase and one lowercase), one number, and one special character from the set (!@#$%^&*()_+=-). The password must not contain any spaces." - } + description = "The default administrator password used for resetting the admin password based on the user input. The password has to be updated which was configured during the GKLM installation." } -# Existing Key Protect Instance Details +variable "scale_encryption_admin_username" { + type = string + default = "SKLMAdmin" + description = "The default Admin username for Security Key Lifecycle Manager(GKLM)." +} -variable "key_protect_instance_id" { +variable "scale_encryption_admin_password" { type = string default = null - description = "Provide the ID of an existing IBM Key Protect instance to be used for filesystem encryption in IBM Storage Scale. If this value is provided, the automation will use the existing Key Protect instance and create a new encryption key within it. If not provided, a new Key Protect instance will be created automatically during deployment." + description = "Password that is used for performing administrative operations for the GKLM.The password must contain at least 8 characters and at most 20 characters. For a strong password, at least three alphabetic characters are required, with at least one uppercase and one lowercase letter. Two numbers, and at least one special character from this(~@_+:). Make sure that the password doesn't include the username. Visit this [page](https://www.ibm.com/docs/en/gklm/3.0.1?topic=roles-password-policy) to know more about password policy of GKLM. " } variable "storage_type" { type = string default = "scratch" - description = "Select the Storage Scale file system deployment method. Note: The Storage Scale scratch and evaluation type deploys the Storage Scale file system on virtual server instances, and the persistent type deploys the Storage Scale file system on bare metal servers." - validation { - condition = can(regex("^(scratch|persistent|evaluation)$", lower(var.storage_type))) - error_message = "The solution only support scratch, evaluation, and persistent; provide any one of the value." - } - validation { - condition = var.storage_type == "persistent" ? contains(["us-south-1", "us-south-2", "us-south-3", "us-east-1", "us-east-2", "eu-de-1", "eu-de-2", "eu-de-3", "eu-gb-1", "eu-es-3", "eu-es-1", "jp-tok-2", "jp-tok-3", "ca-tor-2", "ca-tor-3"], join(",", var.zones)) : true - error_message = "The solution supports bare metal server creation in only given availability zones i.e. us-south-1, us-south-3, us-south-2, eu-de-1, eu-de-2, eu-de-3, jp-tok-2, eu-gb-1, us-east-1, us-east-2, eu-es-3, eu-es-1, jp-tok-3, jp-tok-2, ca-tor-2 and ca-tor-3. To deploy persistent storage provide any one of the supported availability zones." - } -} + description = "Select the required storage type(scratch/persistent/evaluation)." +} + +# variable "custom_file_shares" { +# type = list( +# object({ +# mount_path = string, +# size = number, +# iops = number +# }) +# ) +# default = [{ +# mount_path = "/mnt/binaries" +# size = 100 +# iops = 1000 +# }, { +# mount_path = "/mnt/data" +# size = 100 +# iops = 1000 +# }] +# description = "Custom file shares to access shared storage" +# } ############################################################################## # Observability Variables @@ -882,7 +643,7 @@ variable "storage_type" { variable "observability_atracker_enable" { type = bool - default = false + default = true description = "Activity Tracker Event Routing to configure how to route auditing events. While multiple Activity Tracker instances can be created, only one tracker is needed to capture all events. Creating additional trackers is unnecessary if an existing Activity Tracker is already integrated with a COS bucket. In such cases, set the value to false, as all events can be monitored and accessed through the existing Activity Tracker." } @@ -896,231 +657,105 @@ variable "observability_atracker_target_type" { } } -############################################################################## -# SCC Workload Protection Variables -############################################################################## - -variable "sccwp_service_plan" { - description = "Specify the plan type for the Security and Compliance Center (SCC) Workload Protection instance. Valid values are free-trial and graduated-tier only." - type = string - default = "free-trial" - validation { - error_message = "Plan for SCC Workload Protection instances can only be `free-trial` or `graduated-tier`." - condition = contains( - ["free-trial", "graduated-tier"], - var.sccwp_service_plan - ) - } -} - -variable "sccwp_enable" { - type = bool - default = false - description = "Set this flag to true to create an instance of IBM Security and Compliance Center (SCC) Workload Protection. When enabled, it provides tools to discover and prioritize vulnerabilities, monitor for security threats, and enforce configuration, permission, and compliance policies across the full lifecycle of your workloads. To view the data on the dashboard, enable the cspm to create the app configuration and required trusted profile policies.[Learn more](https://cloud.ibm.com/docs/workload-protection?topic=workload-protection-about)." -} - -variable "cspm_enabled" { - description = "CSPM (Cloud Security Posture Management) is a set of tools and practices that continuously monitor and secure cloud infrastructure. When enabled, it creates a trusted profile with viewer access to the App Configuration and Enterprise services for the SCC Workload Protection instance. Make sure the required IAM permissions are in place, as missing permissions will cause deployment to fail. If CSPM is disabled, dashboard data will not be available.[Learn more](https://cloud.ibm.com/docs/workload-protection?topic=workload-protection-about)." +variable "observability_monitoring_enable" { + description = "Set false to disable IBM Cloud Monitoring integration. If enabled, infrastructure and LSF application metrics from Management Nodes will be ingested." type = bool default = true - nullable = false } -variable "app_config_plan" { - description = "Specify the IBM service pricing plan for the app configuration. Allowed values are 'basic', 'lite', 'standardv2', 'enterprise'." - type = string - default = "basic" - validation { - error_message = "Plan for App configuration can only be basic, standardv2, enterprise.." - condition = contains( - ["basic", "standardv2", "enterprise"], - var.app_config_plan - ) - } -} - -variable "skip_flowlogs_s2s_auth_policy" { +variable "observability_logs_enable_for_management" { + description = "Set false to disable IBM Cloud Logs integration. If enabled, infrastructure and LSF application logs from Management Nodes will be ingested." type = bool default = false - description = "Skip auth policy between flow logs service and COS instance, set to true if this policy is already in place on account." -} - -########################################################################### -# Existing Bastion Support variables -########################################################################### - -variable "existing_bastion_instance_name" { - type = string - default = null - description = "Provide the name of the bastion instance. If none given then new bastion will be created." -} - -variable "existing_bastion_instance_public_ip" { - type = string - default = null - description = "Provide the public ip address of the bastion instance to establish the remote connection." -} - -variable "existing_bastion_security_group_id" { - type = string - default = null - description = "Specify the security group ID for the bastion server. This ID will be added as an allowlist rule on the HPC cluster nodes to facilitate secure SSH connections through the bastion node. By restricting access through a bastion server, this setup enhances security by controlling and monitoring entry points into the cluster environment. Ensure that the specified security group is correctly configured to permit only authorized traffic for secure and efficient management of cluster resources." } -variable "existing_bastion_ssh_private_key" { - type = string - sensitive = true - default = null - description = "Provide the private SSH key (named id_rsa) used during the creation and configuration of the bastion server to securely authenticate and connect to the bastion server. This allows access to internal network resources from a secure entry point. Note: The corresponding public SSH key (named id_rsa.pub) must already be available in the ~/.ssh/authorized_keys file on the bastion host to establish authentication." +variable "observability_logs_enable_for_compute" { + description = "Set false to disable IBM Cloud Logs integration. If enabled, infrastructure and LSF application logs from Compute Nodes will be ingested." + type = bool + default = false } -variable "bms_boot_drive_encryption" { +variable "observability_enable_platform_logs" { + description = "Setting this to true will create a tenant in the same region that the Cloud Logs instance is provisioned to enable platform logs for that region. NOTE: You can only have 1 tenant per region in an account." type = bool default = false - description = "Enable or disable encryption for the boot drive of bare metal servers. When set to true, the boot drive will be encrypted to enhance data security, protecting the operating system and any sensitive information stored on the root volume. This is especially recommended for workloads with strict compliance or security requirements. Set to false to disable boot drive encryption." } -############################################################################## -# Existing VPC Storage Security Variables -############################################################################## -variable "enable_sg_validation" { +variable "observability_enable_metrics_routing" { + description = "Enable metrics routing to manage metrics at the account-level by configuring targets and routes that define where data points are routed." type = bool - default = true - description = "Enable or disable security group validation. Security group validation ensures that the specified security groups are properly assigned" + default = false } -variable "login_security_group_name" { - type = string - default = null - description = "Provide the existing security group name to provision the bastion node. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the bastion node to function properly." +variable "observability_logs_retention_period" { + description = "The number of days IBM Cloud Logs will retain the logs data in Priority insights. Allowed values: 7, 14, 30, 60, 90." + type = number + default = 7 validation { - condition = anytrue([var.vpc_name != null && var.login_security_group_name != null, var.login_security_group_name == null]) - error_message = "If the login_security_group_name are provided, the user should also provide the vpc_name." + condition = contains([7, 14, 30, 60, 90], var.observability_logs_retention_period) + error_message = "Allowed values for cloud logs retention period is 7, 14, 30, 60, 90." } } -variable "storage_security_group_name" { - type = string - default = null - description = "Provide the security group name to provision the storage nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the storage nodes to function properly. When using existing security groups, you must provide the corresponding group names for all other associated components as well." - validation { - condition = anytrue([var.vpc_name != null && var.storage_security_group_name != null, var.storage_security_group_name == null]) - error_message = "If the storage_security_group_name are provided, the user should also provide the vpc_name." - } +variable "observability_monitoring_on_compute_nodes_enable" { + description = "Set false to disable IBM Cloud Monitoring integration. If enabled, infrastructure metrics from Compute Nodes will be ingested." + type = bool + default = false } -variable "compute_security_group_name" { +variable "observability_monitoring_plan" { + description = "Type of service plan for IBM Cloud Monitoring instance. You can choose one of the following: lite, graduated-tier. For all details visit [IBM Cloud Monitoring Service Plans](https://cloud.ibm.com/docs/monitoring?topic=monitoring-service_plans)." type = string - default = null - description = "Provide the security group name to provision the compute nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the storage nodes to function properly. When using existing security groups, you must provide the corresponding group names for all other associated components as well" + default = "graduated-tier" validation { - condition = anytrue([var.vpc_name != null && var.compute_security_group_name != null, var.compute_security_group_name == null]) - error_message = "If the compute_security_group_name are provided, the user should also provide the vpc_name." + condition = can(regex("lite|graduated-tier", var.observability_monitoring_plan)) + error_message = "Please enter a valid plan for IBM Cloud Monitoring, for all details visit https://cloud.ibm.com/docs/monitoring?topic=monitoring-service_plans." } } -variable "client_security_group_name" { - type = string - default = null - description = "Provide the security group name to provision the client nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the storage nodes to function properly. When using existing security groups, you must provide the corresponding group names for all other associated components as well" - validation { - condition = anytrue([var.vpc_name != null && var.client_security_group_name != null, var.client_security_group_name == null]) - error_message = "If the client_security_group_name are provided, the user should also provide the vpc_name." - } +variable "skip_flowlogs_s2s_auth_policy" { + type = bool + default = false + description = "Skip auth policy between flow logs service and COS instance, set to true if this policy is already in place on account." } -variable "gklm_security_group_name" { - type = string - default = null - description = "Provide the security group name to provision the gklm nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the storage nodes to function properly. When using existing security groups, you must provide the corresponding group names for all other associated components as well" - validation { - condition = anytrue([var.vpc_name != null && var.gklm_security_group_name != null, var.gklm_security_group_name == null]) - error_message = "If the gklm_security_group_name are provided, the user should also provide the vpc_name." - } - validation { - condition = anytrue([var.vpc_name != null && var.storage_security_group_name != null && sum(var.gklm_instances[*]["count"]) >= 2 ? (var.gklm_security_group_name != null ? true : false) : true]) - error_message = "If the storage_security_group_name are provided with gklm_instances count more than or equal to 2, the user should also provide the gklm_security_group_name along with vpc_name. Note: Pass the value for gklm_security_group_name as storage_security_group_name." - } +variable "skip_kms_s2s_auth_policy" { + type = bool + default = false + description = "Skip auth policy between KMS service and COS instance, set to true if this policy is already in place on account." } -variable "ldap_security_group_name" { - type = string - default = null - description = "Provide the security group name to provision the ldap nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the storage nodes to function properly. When using existing security groups, you must provide the corresponding group names for all other associated components as well" - validation { - condition = anytrue([var.vpc_name != null && var.ldap_security_group_name != null, var.ldap_security_group_name == null]) - error_message = "If the ldap_security_group_name are provided, the user should also provide the vpc_name." - } - validation { - condition = anytrue([var.vpc_name != null && var.storage_security_group_name != null && var.enable_ldap ? (var.ldap_security_group_name != null ? true : false) : true]) - error_message = "If the storage_security_group_name are provided with enable_ldap as true, the user should also provide the ldap_security_group_name along with vpc_name. Note: Pass the value for ldap_security_group_name as storage_security_group_name." - } +variable "skip_iam_block_storage_authorization_policy" { + type = bool + default = false + description = "When using an existing KMS instance name, set this value to true if authorization is already enabled between KMS instance and the block storage volume. Otherwise, default is set to false. Ensuring proper authorization avoids access issues during deployment.For more information on how to create authorization policy manually, see [creating authorization policies for block storage volume](https://cloud.ibm.com/docs/vpc?topic=vpc-block-s2s-auth&interface=ui)." } -variable "login_subnet_id" { - type = string - default = null - description = "Provide ID of an existing subnet to be used for provisioning bastion/deployer node. This is required only when deploying into an existing VPC (i.e., when a value is provided for `vpc_name`). When specifying an existing subnet, ensure that a public gateway is attached to the subnet to enable outbound internet access if required. Additionally, if this subnet ID is provided, you must also provide subnet IDs for all other applicable components (e.g., storage , compute, client, protocol, gklm) to maintain consistency across the deployment." - validation { - condition = anytrue([var.vpc_name != null && var.login_subnet_id != null, var.login_subnet_id == null]) - error_message = "If the login_subnet_id are provided, the user should also provide the vpc_name." - } -} +########################################################################### +# Existing Bastion Support variables +########################################################################### -variable "compute_subnet_id" { +variable "existing_bastion_instance_name" { type = string default = null - description = "Provide ID of an existing subnet to be used for provisioning compute nodes. This is required only when deploying into an existing VPC (i.e., when a value is provided for `vpc_name`). When specifying an existing subnet, ensure that a public gateway is attached to the subnet to enable outbound internet access if required. Additionally, if this subnet ID is provided, you must also provide subnet IDs for all other applicable components (e.g., storage , protocol, client, login, gklm) to maintain consistency across the deployment." - validation { - condition = anytrue([var.vpc_name != null && var.compute_subnet_id != null, var.compute_subnet_id == null]) - error_message = "If the compute_subnet_id are provided, the user should also provide the vpc_name." - } + description = "Provide the name of the bastion instance. If none given then new bastion will be created." } -variable "storage_subnet_id" { +variable "existing_bastion_instance_public_ip" { type = string - description = "Provide ID of an existing subnet to be used for storage nodes. This is required only when deploying into an existing VPC (i.e., when a value is provided for `vpc_name`). When specifying an existing subnet, ensure that a public gateway is attached to the subnet to enable outbound internet access if required. Additionally, if this subnet ID is provided, you must also provide subnet IDs for all other applicable components (e.g., compute , protocol, client, login, gklm) to maintain consistency across the deployment." default = null - validation { - condition = anytrue([var.vpc_name != null && var.storage_subnet_id != null, var.storage_subnet_id == null]) - error_message = "If the storage_subnet_id are provided, the user should also provide the vpc_name." - } + description = "Provide the public ip address of the bastion instance to establish the remote connection." } -variable "protocol_subnet_id" { +variable "existing_bastion_security_group_id" { type = string - description = "Provide ID of an existing subnet to be used for protocol nodes. This is required only when deploying into an existing VPC (i.e., when a value is provided for `vpc_name`). When specifying an existing subnet, ensure that a public gateway is attached to the subnet to enable outbound internet access if required. Additionally, if this subnet ID is provided, you must also provide subnet IDs for all other applicable components (e.g., storage , compute, client, login, gklm) to maintain consistency across the deployment." default = null - validation { - condition = anytrue([var.vpc_name != null && var.protocol_subnet_id != null, var.protocol_subnet_id == null]) - error_message = "If the protocol_subnet_id are provided, the user should also provide the vpc_name." - } + description = "Specify the security group ID for the bastion server. This ID will be added as an allowlist rule on the HPC cluster nodes to facilitate secure SSH connections through the bastion node. By restricting access through a bastion server, this setup enhances security by controlling and monitoring entry points into the cluster environment. Ensure that the specified security group is correctly configured to permit only authorized traffic for secure and efficient management of cluster resources." } -variable "client_subnet_id" { +variable "existing_bastion_ssh_private_key" { type = string - description = "Provide ID of an existing subnet to be used for client nodes. This is required only when deploying into an existing VPC (i.e., when a value is provided for `vpc_name`). When specifying an existing subnet, ensure that a public gateway is attached to the subnet to enable outbound internet access if required. Additionally, if this subnet ID is provided, you must also provide subnet IDs for all other applicable components (e.g., storage , compute, protocol, login, gklm) to maintain consistency across the deployment." + sensitive = true default = null - validation { - condition = anytrue([var.vpc_name != null && var.client_subnet_id != null, var.client_subnet_id == null]) - error_message = "If the client_subnet_id are provided, the user should also provide the vpc_name." - } -} - -# tflint-ignore: all -variable "TF_VERSION" { - type = string - default = "1.9" - description = "The version of the Terraform engine that's used in the Schematics workspace." -} - -# tflint-ignore: all -variable "TF_PARALLELISM" { - type = string - default = "250" - description = "Parallelism/ concurrent operations limit. Valid values are between 1 and 256, both inclusive. [Learn more](https://www.terraform.io/docs/internals/graph.html#walking-the-graph)." - validation { - condition = 1 <= var.TF_PARALLELISM && var.TF_PARALLELISM <= 256 - error_message = "Input \"TF_PARALLELISM\" must be greater than or equal to 1 and less than or equal to 256." - } + description = "Provide the private SSH key (named id_rsa) used during the creation and configuration of the bastion server to securely authenticate and connect to the bastion server. This allows access to internal network resources from a secure entry point. Note: The corresponding public SSH key (named id_rsa.pub) must already be available in the ~/.ssh/authorized_keys file on the bastion host to establish authentication." } diff --git a/solutions/slurm/variables.tf b/solutions/slurm/variables.tf index cef4ee57..852efaaa 100644 --- a/solutions/slurm/variables.tf +++ b/solutions/slurm/variables.tf @@ -240,11 +240,11 @@ variable "storage_instances" { profile = string count = number image = string - filesystem = optional(string) + filesystem = string }) ) default = [{ - profile = "bx2d-32x128" + profile = "bx2-2x8" count = 0 image = "ibm-redhat-8-10-minimal-amd64-4" filesystem = "/ibm/fs1" diff --git a/solutions/symphony/variables.tf b/solutions/symphony/variables.tf index cef4ee57..852efaaa 100644 --- a/solutions/symphony/variables.tf +++ b/solutions/symphony/variables.tf @@ -240,11 +240,11 @@ variable "storage_instances" { profile = string count = number image = string - filesystem = optional(string) + filesystem = string }) ) default = [{ - profile = "bx2d-32x128" + profile = "bx2-2x8" count = 0 image = "ibm-redhat-8-10-minimal-amd64-4" filesystem = "/ibm/fs1" diff --git a/tests/data/lsf_fp14_config.yml b/tests/data/lsf_fp14_config.yml index 3aa52277..83ea99b4 100644 --- a/tests/data/lsf_fp14_config.yml +++ b/tests/data/lsf_fp14_config.yml @@ -28,7 +28,7 @@ login_instance: - profile: bx2-2x8 image: hpc-lsf-fp14-compute-rhel810-v1 management_instances: - - profile: bx2-4x16 + - profile: bx2d-4x16 count: 2 image: hpc-lsf-fp14-rhel810-v1 static_compute_instances: @@ -42,7 +42,7 @@ dynamic_compute_instances: placement_strategy: spread kms_instance_name: cicd-lsf-dnd-kms-instance kms_key_name: cicd-lsf-dnd-kms-key -app_center_gui_password: Password@123456 # pragma: allowlist secret +app_center_gui_password: Pass@1234 # pragma: allowlist secret observability_atracker_enable: true observability_atracker_target_type: cloudlogs observability_monitoring_enable: true @@ -60,9 +60,9 @@ app_config_plan: standardv2 enable_hyperthreading: true enable_ldap: true ldap_basedns: cicdldap.com -ldap_admin_password: Password@123456 # pragma: allowlist secret +ldap_admin_password: Pass@123 # pragma: allowlist secret ldap_user_name: tester -ldap_user_password: Password@123456 # pragma: allowlist secret +ldap_user_password: Pass@123 # pragma: allowlist secret ldap_instance: - profile: cx2-2x4 image: ibm-ubuntu-22-04-5-minimal-amd64-1 diff --git a/tests/data/lsf_fp15_config.yml b/tests/data/lsf_fp15_config.yml index e6bd9585..e0b4085b 100644 --- a/tests/data/lsf_fp15_config.yml +++ b/tests/data/lsf_fp15_config.yml @@ -28,7 +28,7 @@ login_instance: - profile: bx2-2x8 image: hpc-lsf-fp15-compute-rhel810-v1 management_instances: - - profile: bx2-4x16 + - profile: bx2d-4x16 count: 2 image: hpc-lsf-fp15-rhel810-v1 static_compute_instances: @@ -42,7 +42,7 @@ dynamic_compute_instances: placement_strategy: spread kms_instance_name: cicd-lsf-dnd-kms-instance kms_key_name: cicd-lsf-dnd-kms-key -app_center_gui_password: Password@123456 # pragma: allowlist secret +app_center_gui_password: Pass@1234 # pragma: allowlist secret observability_atracker_enable: true observability_atracker_target_type: cloudlogs observability_monitoring_enable: true @@ -60,9 +60,9 @@ app_config_plan: standardv2 enable_hyperthreading: true enable_ldap: true ldap_basedns: cicdldap.com -ldap_admin_password: Password@123456 # pragma: allowlist secret +ldap_admin_password: Pass@123 # pragma: allowlist secret ldap_user_name: tester -ldap_user_password: Password@123456 # pragma: allowlist secret +ldap_user_password: Pass@123 # pragma: allowlist secret ldap_instance: - profile: cx2-2x4 image: ibm-ubuntu-22-04-5-minimal-amd64-1 diff --git a/tests/data/scale_config.yml b/tests/data/scale_config.yml deleted file mode 100644 index a638297f..00000000 --- a/tests/data/scale_config.yml +++ /dev/null @@ -1,128 +0,0 @@ -# IBM Storage Scale Configuration -scale_version: 5.2.2 -zones: ["jp-tok-1"] -remote_allowed_ips: -existing_resource_group: "Default" -vpc_name: null -ibm_customer_number: 051700 - -# Storage Type -storage_type: "scratch" - -# SSH Configuration -ssh_keys: geretain-hpc -ssh_file_path: /artifacts/.ssh/id_rsa - -# Bastion Configuration -bastion_instance: - image: "ibm-ubuntu-22-04-5-minimal-amd64-6" - profile: "cx2-4x8" - -# Deployer Configuration -deployer_instance: - profile: "mx2-4x32" - image: "hpcc-scale-deployer-v1" - - -# Compute Configuration -compute_gui_username: "computeUsername" -compute_gui_password: "Pass@1234" # pragma: allowlist secret - -# Storage Configuration -storage_gui_username: "storageUsername" -storage_gui_password: "Pass@1234" # pragma: allowlist secret - -# Instance Configurations -compute_instances: - - profile: "cx2-2x4" - count: 3 - image: "hpcc-scale5232-rhel810-v1" - filesystem: "/comp/fs1" - -client_instances: - - profile: "cx2-2x4" - count: 2 - image: "ibm-redhat-8-10-minimal-amd64-5" - -storage_instances: - - profile: "bx2d-16x64" - count: 2 - image: "hpcc-scale5232-rhel810-v1" - filesystem: "/storage/fs1" - -storage_servers: - - profile: "cx2d-metal-96x192" - count: 0 - image: "hpcc-scale5232-rhel810-v1" - filesystem: "/gpfs/fs1" - -protocol_instances: - - profile: "bx2d-16x64" - count: 0 - image: "hpcc-scale5232-rhel810-v1" - -# AFM -afm_instances: - - profile: "bx2d-32x128" - count: 1 - image: "hpcc-scale5232-rhel810-v1" - -# Filesystem Configuration -filesystem_config: - - filesystem: "/ibm/fs1" - block_size: "4M" - default_data_replica: 2 - default_metadata_replica: 2 - max_data_replica: 3 - max_metadata_replica: 3 - -filesets_config: - - client_mount_path: "/mnt/scale/tools" - quota: 100 - - client_mount_path: "/mnt/scale/data" - quota: 100 - -# DNS Configuration -dns_domain_names: - compute: "comp.com" - storage: "strg.com" - protocol: "ces.com" - client: "clnt.com" - gklm: "gklm.com" - -# Observability -enable_cos_integration: false -enable_vpc_flow_logs: false - -# LDAP Configuration -enable_ldap: false -ldap_basedns: "ldapscale.com" -ldap_server: null -ldap_admin_password: "Pass@123" # pragma: allowlist secret -ldap_user_name: "tester" -ldap_user_password: "Pass@123" # pragma: allowlist secret -ldap_instance: - - profile: "cx2-2x4" - image: "ibm-ubuntu-22-04-5-minimal-amd64-1" - -# Encryption Configuration -scale_encryption_enabled: true -scale_encryption_type: "key_protect" -gklm_instances: - - profile: "bx2-2x8" - count: 2 - image: "hpcc-scale-gklm4202-v2-5-3" -scale_encryption_admin_password: "Pass@1234" # pragma: allowlist secret - - - - -# Security and Compliance -sccwp_enable: false -cspm_enabled: false -sccwp_service_plan: "graduated-tier" -app_config_plan: "basic" - -# Observability Configuration -observability_atracker_enable: false -observability_atracker_target_type: "cloudlogs" diff --git a/tests/deployment/lsf_deployment.go b/tests/deployment/lsf_deployment.go index e2478084..046ae0de 100644 --- a/tests/deployment/lsf_deployment.go +++ b/tests/deployment/lsf_deployment.go @@ -135,8 +135,8 @@ type Config struct { AttrackerTestZone string `yaml:"attracker_test_zone"` } -// GetLSFConfigFromYAML reads a YAML file and populates the Config struct. -func GetLSFConfigFromYAML(filePath string) (*Config, error) { +// GetConfigFromYAML reads a YAML file and populates the Config struct. +func GetConfigFromYAML(filePath string) (*Config, error) { file, err := os.Open(filePath) if err != nil { return nil, fmt.Errorf("failed to open YAML file %s: %w", filePath, err) diff --git a/tests/deployment/scale_deployment.go b/tests/deployment/scale_deployment.go deleted file mode 100644 index 58a44f8d..00000000 --- a/tests/deployment/scale_deployment.go +++ /dev/null @@ -1,317 +0,0 @@ -package tests - -import ( - "encoding/json" - "fmt" - "log" - "os" - "strconv" - "strings" - - "github.com/terraform-ibm-modules/ibmcloud-terratest-wrapper/common" - utils "github.com/terraform-ibm-modules/terraform-ibm-hpc/utilities" - "gopkg.in/yaml.v3" -) - -var scaleGlobalIP string -var IbmCustomerNumberValue string - -const yamlLocation = "../common-dev-assets/common-go-assets/common-permanent-resources.yaml" - -type ClientInstance struct { - Profile string `yaml:"profile" json:"profile"` - Count int `yaml:"count" json:"count"` - Image string `yaml:"image" json:"image"` -} - -type ProtocolInstance struct { - Profile string `yaml:"profile" json:"profile"` - Count int `yaml:"count" json:"count"` - Image string `yaml:"image" json:"image"` -} - -type ComputeInstance struct { - Profile string `yaml:"profile" json:"profile"` - Count int `yaml:"count" json:"count"` - Image string `yaml:"image" json:"image"` - Filesystem string `yaml:"filesystem" json:"filesystem"` -} - -type StorageInstance struct { - Profile string `yaml:"profile" json:"profile"` - Count int `yaml:"count" json:"count"` - Image string `yaml:"image" json:"image"` - Filesystem string `yaml:"filesystem" json:"filesystem"` -} - -type ScaleDeployerInstance struct { - Profile string `yaml:"profile" json:"profile"` - Image string `yaml:"image" json:"image"` -} - -// GKLMInstance represents GKLM node configuration -type GKLMInstance struct { - Profile string `yaml:"profile" json:"profile"` - Count int `yaml:"count" json:"count"` - Image string `yaml:"image" json:"image"` -} - -// FilesystemConfig represents filesystem configuration -type FilesystemConfig struct { - Filesystem string `yaml:"filesystem" json:"filesystem"` - BlockSize string `yaml:"block_size" json:"block_size"` - DefaultDataReplica int `yaml:"default_data_replica" json:"default_data_replica"` - DefaultMetadataReplica int `yaml:"default_metadata_replica" json:"default_metadata_replica"` - MaxDataReplica int `yaml:"max_data_replica" json:"max_data_replica"` - MaxMetadataReplica int `yaml:"max_metadata_replica" json:"max_metadata_replica"` -} - -// FilesetConfig represents fileset configuration -type FilesetConfig struct { - ClientMountPath string `yaml:"client_mount_path" json:"client_mount_path"` - Quota int `yaml:"quota" json:"quota"` -} - -// DNSDomainNames represents DNS configuration -type DNSDomainNames struct { - Compute string `yaml:"compute" json:"compute"` - Storage string `yaml:"storage" json:"storage"` - Protocol string `yaml:"protocol" json:"protocol"` - Client string `yaml:"client" json:"client"` - GKLM string `yaml:"gklm" json:"gklm"` -} - -type AfmInstance struct { - Profile string `yaml:"profile" json:"profile"` - Count int `yaml:"count" json:"count"` - Image string `yaml:"image" json:"image"` -} - -type ScaleConfig struct { - ScaleVersion string `yaml:"scale_version" json:"scale_version"` - IbmCustomerNumber string `yaml:"ibm_customer_number" json:"ibm_customer_number"` - Zones []string `yaml:"zones" json:"zones"` - RemoteAllowedIPs []string `yaml:"remote_allowed_ips" json:"remote_allowed_ips"` - ExistingResourceGroup string `yaml:"existing_resource_group" json:"existing_resource_group"` - StorageType string `yaml:"storage_type" json:"storage_type"` - SSHKeys string `yaml:"ssh_keys" json:"ssh_keys"` - ScaleDeployerInstance ScaleDeployerInstance `yaml:"deployer_instance" json:"deployer_instance"` - ComputeGUIUsername string `yaml:"compute_gui_username" json:"compute_gui_username"` - ComputeGUIPassword string `yaml:"compute_gui_password" json:"compute_gui_password"` - StorageGUIUsername string `yaml:"storage_gui_username" json:"storage_gui_username"` - StorageGUIPassword string `yaml:"storage_gui_password" json:"storage_gui_password"` - ComputeInstances []ComputeInstance `yaml:"compute_instances" json:"compute_instances"` - ClientInstances []ClientInstance `yaml:"client_instances" json:"client_instances"` - StorageInstances []StorageInstance `yaml:"storage_instances" json:"storage_instances"` - ScaleEncryptionEnabled bool `yaml:"scale_encryption_enabled" json:"scale_encryption_enabled"` - ScaleEncryptionType string `yaml:"scale_encryption_type" json:"scale_encryption_type"` - ScaleObservabilityAtrackerEnable bool `yaml:"observability_atracker_enable" json:"observability_atracker_enable"` - ScaleObservabilityAtrackerTargetType string `yaml:"observability_atracker_target_type" json:"observability_atracker_target_type"` - ScaleSCCWPEnable bool `yaml:"sccwp_enable" json:"sccwp_enable"` - ScaleCSPMEnabled bool `yaml:"cspm_enabled" json:"cspm_enabled"` - ScaleSCCWPServicePlan string `yaml:"sccwp_service_plan" json:"sccwp_service_plan"` - GKLMInstances []GKLMInstance `yaml:"gklm_instances" json:"gklm_instances"` - ScaleEncryptionAdminPassword string `yaml:"scale_encryption_admin_password" json:"scale_encryption_admin_password"` // pragma: allowlist secret - ScaleFilesystemConfig []FilesystemConfig `yaml:"filesystem_config" json:"filesystem_config"` - ScaleFilesetsConfig []FilesetConfig `yaml:"filesets_config" json:"filesets_config"` - ScaleDNSDomainNames DNSDomainNames `yaml:"dns_domain_names" json:"dns_domain_names"` - ScaleEnableCOSIntegration bool `yaml:"enable_cos_integration" json:"enable_cos_integration"` - ScaleEnableVPCFlowLogs bool `yaml:"enable_vpc_flow_logs" json:"enable_vpc_flow_logs"` - AfmInstances []AfmInstance `yaml:"afm_instances" json:"afm_instances"` - ProtocolInstances []ProtocolInstance `yaml:"protocol_instances" json:"protocol_instances"` -} - -func GetScaleConfigFromYAML(filePath string) (*ScaleConfig, error) { - file, err := os.Open(filePath) - if err != nil { - return nil, fmt.Errorf("failed to open YAML file: %w", err) - } - - defer func() { - if closeErr := file.Close(); closeErr != nil { - log.Printf("Warning: failed to close file %s: %v", filePath, closeErr) - } - }() - - var config ScaleConfig - if err := yaml.NewDecoder(file).Decode(&config); err != nil { - return nil, fmt.Errorf("failed to decode YAML: %w", err) - } - - scaleGlobalIP, err = utils.GetPublicIP() - if err != nil { - return nil, fmt.Errorf("failed to get public IP: %w", err) - } - - // Load permanent resources from YAML - permanentResources, err := common.LoadMapFromYaml(yamlLocation) - if err != nil { - return nil, fmt.Errorf("failed to load permanent resources from YAML: %v", err) - } - - // Retrieve ibmCustomerNumberSecretID from Secrets Manager // pragma: allowlist secret - ibmCustomerNumberSecretID, ok := permanentResources["hpc_ibm_customer_number_secret_id"].(string) - if !ok { - fmt.Println("Invalid type or nil value for hpc_ibm_customer_number_secret_id") - } else { - ibmCustomerNumberValue, err := utils.GetSecretsManagerKey( - permanentResources["secretsManagerGuid"].(string), - permanentResources["secretsManagerRegion"].(string), - ibmCustomerNumberSecretID, // Safely extracted value - ) - - if err != nil { - fmt.Printf("WARN : Retrieving ibmCustomerNumberSecretID from Secrets Manager") // pragma: allowlist secret - } else if ibmCustomerNumberValue != nil { - IbmCustomerNumberValue = *ibmCustomerNumberValue - } - } - - if err := scaleSetEnvFromConfig(&config); err != nil { - return nil, fmt.Errorf("failed to set environment variables: %w", err) - } - - return &config, nil -} - -func scaleSetEnvFromConfig(config *ScaleConfig) error { - envVars := map[string]interface{}{ - "SCALE_VERSION": config.ScaleVersion, - "IBM_CUSTOMER_NUMBER": config.IbmCustomerNumber, - "ZONES": strings.Join(config.Zones, ","), - "REMOTE_ALLOWED_IPS": strings.Join(config.RemoteAllowedIPs, ","), - "EXISTING_RESOURCE_GROUP": config.ExistingResourceGroup, - "STORAGE_TYPE": config.StorageType, - "SSH_KEYS": config.SSHKeys, - "SCALE_DEPLOYER_INSTANCE": config.ScaleDeployerInstance, - "COMPUTE_GUI_USERNAME": config.ComputeGUIUsername, - "COMPUTE_GUI_PASSWORD": config.ComputeGUIPassword, // # pragma: allowlist secret - "STORAGE_GUI_USERNAME": config.StorageGUIUsername, - "STORAGE_GUI_PASSWORD": config.StorageGUIPassword, // # pragma: allowlist secret - "COMPUTE_INSTANCES": config.ComputeInstances, - "CLIENT_INSTANCES": config.ClientInstances, - "STORAGE_INSTANCES": config.StorageInstances, - "SCALE_ENCRYPTION_ENABLED": config.ScaleEncryptionEnabled, - "SCALE_ENCRYPTION_TYPE": config.ScaleEncryptionType, - "SCALE_OBSERVABILITY_ATRACKER_ENABLE": config.ScaleObservabilityAtrackerEnable, - "SCALE_OBSERVABILITY_ATRACKER_TARGET_TYPE": config.ScaleObservabilityAtrackerTargetType, - "SCALE_SCCWP_ENABLE": config.ScaleSCCWPEnable, - "SCALE_CSPM_ENABLED": config.ScaleCSPMEnabled, - "SCALE_SCCWP_SERVICE_PLAN": config.ScaleSCCWPServicePlan, - "GKLM_INSTANCES": config.GKLMInstances, - "SCALE_ENCRYPTION_ADMIN_PASSWORD": config.ScaleEncryptionAdminPassword, // # pragma: allowlist secret - "SCALE_FILESYSTEM_CONFIG": config.ScaleFilesystemConfig, - "SCALE_FILESETS_CONFIG": config.ScaleFilesetsConfig, - "SCALE_DNS_DOMAIN_NAMES": config.ScaleDNSDomainNames, - "SCALE_ENABLE_COS_INTEGRATION": config.ScaleEnableCOSIntegration, - "SCALE_ENABLE_VPC_FLOW_LOGS": config.ScaleEnableVPCFlowLogs, - "AFM_INSTANCES": config.AfmInstances, - "PROTOCOL_INSTANCES": config.ProtocolInstances, - } - - if config.ScaleEncryptionType == "null" { - delete(envVars, "SCALE_ENCRYPTION_TYPE") - } - - if err := processScaleSliceConfigs(config, envVars); err != nil { - return fmt.Errorf("error processing slice configurations: %w", err) - } - - for key, value := range envVars { - if err := scaleSetEnvironmentVariable(key, value); err != nil { - return fmt.Errorf("failed to set %s: %w", key, err) - } - } - - return nil -} - -func processScaleSliceConfigs(config *ScaleConfig, envVars map[string]interface{}) error { - sliceProcessors := []struct { - name string - instances interface{} - }{ - {"COMPUTE_INSTANCES", config.ComputeInstances}, - {"CLIENT_INSTANCES", config.ClientInstances}, - {"STORAGE_INSTANCES", config.StorageInstances}, - {"AFM_INSTANCES", config.AfmInstances}, - {"PROTOCOL_INSTANCES", config.ProtocolInstances}, - } - - for _, processor := range sliceProcessors { - if err := scaleMarshalToEnv(processor.name, processor.instances, envVars); err != nil { - return err - } - } - - return nil -} - -func scaleMarshalToEnv(key string, data interface{}, envVars map[string]interface{}) error { - jsonBytes, err := json.Marshal(data) - if err != nil { - return fmt.Errorf("failed to marshal %s: %w", key, err) - } - envVars[key] = string(jsonBytes) - return nil -} - -func scaleSetEnvironmentVariable(key string, value interface{}) error { - if value == nil { - return nil - } - - if existing := os.Getenv(key); existing != "" { - log.Printf("Environment variable %s is already set. Skipping overwrite.", key) - return nil - } - - if key == "REMOTE_ALLOWED_IPS" { - return scaleHandleRemoteAllowedIPs(value) - } - - if key == "IBM_CUSTOMER_NUMBER" && IbmCustomerNumberValue != "" { - return os.Setenv(key, IbmCustomerNumberValue) - } - - switch v := value.(type) { - case string: - if v != "" { - return os.Setenv(key, v) - } - case bool: - return os.Setenv(key, strconv.FormatBool(v)) - case int: - return os.Setenv(key, strconv.Itoa(v)) - case float64: - return os.Setenv(key, strconv.FormatFloat(v, 'f', -1, 64)) - case []string: - if len(v) > 0 { - return os.Setenv(key, strings.Join(v, ",")) - } - default: - jsonBytes, err := json.Marshal(value) - if err != nil { - return fmt.Errorf("failed to marshal %s: %w", key, err) - } - return os.Setenv(key, string(jsonBytes)) - } - - return nil -} - -func scaleHandleRemoteAllowedIPs(value interface{}) error { - cidr, ok := value.(string) - if !ok { - return fmt.Errorf("remote_allowed_ips must be a string") - } - - if cidr == "" || cidr == "0.0.0.0/0" { - if scaleGlobalIP == "" { - return fmt.Errorf("scaleGlobalIP is empty, cannot set REMOTE_ALLOWED_IPS") - } - return os.Setenv("REMOTE_ALLOWED_IPS", scaleGlobalIP) - } - - return os.Setenv("REMOTE_ALLOWED_IPS", cidr) -} diff --git a/tests/go.mod b/tests/go.mod index 6101355b..eb935b78 100644 --- a/tests/go.mod +++ b/tests/go.mod @@ -5,26 +5,26 @@ go 1.24.2 toolchain go1.24.3 require ( - github.com/IBM/go-sdk-core/v5 v5.21.0 - github.com/IBM/secrets-manager-go-sdk/v2 v2.0.14 - github.com/gruntwork-io/terratest v0.50.0 + github.com/IBM/go-sdk-core/v5 v5.20.0 + github.com/IBM/secrets-manager-go-sdk/v2 v2.0.11 + github.com/gruntwork-io/terratest v0.49.0 github.com/stretchr/testify v1.10.0 - github.com/terraform-ibm-modules/ibmcloud-terratest-wrapper v1.58.12 - golang.org/x/crypto v0.41.0 + github.com/terraform-ibm-modules/ibmcloud-terratest-wrapper v1.50.1 + golang.org/x/crypto v0.39.0 gopkg.in/yaml.v3 v3.0.1 ) require ( - dario.cat/mergo v1.0.2 // indirect - github.com/IBM-Cloud/bluemix-go v0.0.0-20250818082648-8ebc393b4b26 // indirect - github.com/IBM-Cloud/power-go-client v1.12.0 // indirect - github.com/IBM/cloud-databases-go-sdk v0.8.0 // indirect - github.com/IBM/platform-services-go-sdk v0.85.1 // indirect + dario.cat/mergo v1.0.0 // indirect + github.com/IBM-Cloud/bluemix-go v0.0.0-20240719075425-078fcb3a55be // indirect + github.com/IBM-Cloud/power-go-client v1.11.0 // indirect + github.com/IBM/cloud-databases-go-sdk v0.7.1 // indirect + github.com/IBM/platform-services-go-sdk v0.81.1 // indirect github.com/IBM/project-go-sdk v0.3.6 // indirect github.com/IBM/schematics-go-sdk v0.4.0 // indirect - github.com/IBM/vpc-go-sdk v0.70.1 // indirect + github.com/IBM/vpc-go-sdk v0.68.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/ProtonMail/go-crypto v1.3.0 // indirect + github.com/ProtonMail/go-crypto v1.1.6 // indirect github.com/agext/levenshtein v1.2.3 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect @@ -33,28 +33,26 @@ require ( github.com/cyphar/filepath-securejoin v0.4.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emirpasic/gods v1.18.1 // indirect - github.com/fxamacker/cbor/v2 v2.9.0 // indirect - github.com/gabriel-vasile/mimetype v1.4.9 // indirect + github.com/gabriel-vasile/mimetype v1.4.8 // indirect github.com/ghodss/yaml v1.0.0 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.6.2 // indirect - github.com/go-git/go-git/v5 v5.16.2 // indirect - github.com/go-logr/logr v1.4.3 // indirect + github.com/go-git/go-git/v5 v5.16.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/analysis v0.23.0 // indirect - github.com/go-openapi/errors v0.22.2 // indirect - github.com/go-openapi/jsonpointer v0.21.2 // indirect - github.com/go-openapi/jsonreference v0.21.0 // indirect - github.com/go-openapi/loads v0.22.0 // indirect - github.com/go-openapi/runtime v0.28.0 // indirect - github.com/go-openapi/spec v0.21.0 // indirect + github.com/go-openapi/analysis v0.21.5 // indirect + github.com/go-openapi/errors v0.22.1 // indirect + github.com/go-openapi/jsonpointer v0.20.1 // indirect + github.com/go-openapi/jsonreference v0.20.3 // indirect + github.com/go-openapi/loads v0.21.3 // indirect + github.com/go-openapi/runtime v0.26.0 // indirect + github.com/go-openapi/spec v0.20.12 // indirect github.com/go-openapi/strfmt v0.23.0 // indirect - github.com/go-openapi/swag v0.23.1 // indirect - github.com/go-openapi/validate v0.24.0 // indirect + github.com/go-openapi/swag v0.22.5 // indirect + github.com/go-openapi/validate v0.22.4 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect - github.com/go-playground/validator/v10 v10.27.0 // indirect - github.com/gogo/protobuf v1.3.2 // indirect + github.com/go-playground/validator/v10 v10.26.0 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/uuid v1.6.0 // indirect @@ -62,56 +60,44 @@ require ( github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-getter/v2 v2.2.3 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-retryablehttp v0.7.8 // indirect + github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/hashicorp/go-safetemp v1.0.0 // indirect github.com/hashicorp/go-version v1.7.0 // indirect - github.com/hashicorp/hcl/v2 v2.24.0 // indirect - github.com/hashicorp/terraform-json v0.26.0 // indirect + github.com/hashicorp/hcl/v2 v2.22.0 // indirect + github.com/hashicorp/terraform-json v0.25.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jinzhu/copier v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/kevinburke/ssh_config v1.4.0 // indirect - github.com/klauspost/compress v1.18.0 // indirect + github.com/kevinburke/ssh_config v1.2.0 // indirect + github.com/klauspost/compress v1.16.7 // indirect github.com/leodido/go-urn v1.4.0 // indirect - github.com/mailru/easyjson v0.9.0 // indirect - github.com/mattn/go-zglob v0.0.6 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-zglob v0.0.4 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-testing-interface v1.14.1 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect - github.com/pjbgf/sha1cd v0.4.0 // indirect + github.com/pjbgf/sha1cd v0.3.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/sergi/go-diff v1.4.0 // indirect + github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect github.com/skeema/knownhosts v1.3.1 // indirect - github.com/stretchr/objx v0.5.2 // indirect - github.com/tmccombs/hcl2json v0.6.7 // indirect - github.com/ulikunitz/xz v0.5.12 // indirect - github.com/x448/float16 v0.8.4 // indirect + github.com/tmccombs/hcl2json v0.6.4 // indirect + github.com/ulikunitz/xz v0.5.11 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect - github.com/zclconf/go-cty v1.16.3 // indirect - go.mongodb.org/mongo-driver v1.17.4 // indirect - go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel v1.37.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect - go.opentelemetry.io/otel/trace v1.37.0 // indirect - go.yaml.in/yaml/v2 v2.4.2 // indirect - golang.org/x/mod v0.27.0 // indirect - golang.org/x/net v0.43.0 // indirect - golang.org/x/sync v0.16.0 // indirect - golang.org/x/sys v0.35.0 // indirect - golang.org/x/text v0.28.0 // indirect - golang.org/x/tools v0.36.0 // indirect + github.com/zclconf/go-cty v1.16.2 // indirect + go.mongodb.org/mongo-driver v1.17.3 // indirect + go.opentelemetry.io/otel v1.29.0 // indirect + go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/otel/trace v1.29.0 // indirect + golang.org/x/mod v0.25.0 // indirect + golang.org/x/net v0.40.0 // indirect + golang.org/x/sync v0.15.0 // indirect + golang.org/x/sys v0.33.0 // indirect + golang.org/x/text v0.26.0 // indirect + golang.org/x/tools v0.33.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/apimachinery v0.33.4 // indirect - k8s.io/client-go v0.33.4 // indirect - k8s.io/klog/v2 v2.130.1 // indirect - sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.7.0 // indirect - sigs.k8s.io/yaml v1.6.0 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/tests/go.sum b/tests/go.sum index c90c82f8..80416288 100644 --- a/tests/go.sum +++ b/tests/go.sum @@ -1,28 +1,28 @@ -dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= -dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= -github.com/IBM-Cloud/bluemix-go v0.0.0-20250818082648-8ebc393b4b26 h1:Gauwtw47rvv79uAgjah63G0zwmB4uzEEAHqthcqITnU= -github.com/IBM-Cloud/bluemix-go v0.0.0-20250818082648-8ebc393b4b26/go.mod h1:PVD407jrZx0i/TW5GaTRI12ouzUfrFlZshbnjs9aQvg= -github.com/IBM-Cloud/power-go-client v1.12.0 h1:tF9Mq5GLYHebpzQT6IYB89lIxEST1E9teuchjxSAaw0= -github.com/IBM-Cloud/power-go-client v1.12.0/go.mod h1:SpTK1ttW8bfMNUVQS8qOEuWn2KOkzaCLyzfze8MG1JE= -github.com/IBM/cloud-databases-go-sdk v0.8.0 h1:uMFqhnc/roVTzfCaUsJ23eaHKjChhGpM1F7Mpxik0bo= -github.com/IBM/cloud-databases-go-sdk v0.8.0/go.mod h1:JYucI1PdwqbAd8XGdDAchxzxRP7bxOh1zUnseovHKsc= -github.com/IBM/go-sdk-core/v5 v5.21.0 h1:DUnYhvC4SoC8T84rx5omnhY3+xcQg/Whyoa3mDPIMkk= -github.com/IBM/go-sdk-core/v5 v5.21.0/go.mod h1:Q3BYO6iDA2zweQPDGbNTtqft5tDcEpm6RTuqMlPcvbw= -github.com/IBM/platform-services-go-sdk v0.85.1 h1:lrBEeGaIajhSPMB6cPVAx53XTtVGrKOeA36gIXh2FYI= -github.com/IBM/platform-services-go-sdk v0.85.1/go.mod h1:aGD045m6I8pfcB77wft8w2cHqWOJjcM3YSSV55BX0Js= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +github.com/IBM-Cloud/bluemix-go v0.0.0-20240719075425-078fcb3a55be h1:USOcBHkYQ4o/ccoEvoHinrba8NQthLJpFXnAoBY+MI4= +github.com/IBM-Cloud/bluemix-go v0.0.0-20240719075425-078fcb3a55be/go.mod h1:/7hMjdZA6fEpd/dQAOEABxKEwN0t72P3PlpEDu0Y7bE= +github.com/IBM-Cloud/power-go-client v1.11.0 h1:4xlYXF2+S3s6Crb0D2+d5c1kb6gUE7eowMXLB7Q6cWY= +github.com/IBM-Cloud/power-go-client v1.11.0/go.mod h1:UDyXeIKEp6r7yWUXYu3r0ZnFSlNZ2YeQTHwM2Tmlgv0= +github.com/IBM/cloud-databases-go-sdk v0.7.1 h1:5kK4/3NUsGxZzmuUe+1ftajpOQbeDVh5VeemrPgROP4= +github.com/IBM/cloud-databases-go-sdk v0.7.1/go.mod h1:JYucI1PdwqbAd8XGdDAchxzxRP7bxOh1zUnseovHKsc= +github.com/IBM/go-sdk-core/v5 v5.20.0 h1:rG1fn5GmJfFzVtpDKndsk6MgcarluG8YIWf89rVqLP8= +github.com/IBM/go-sdk-core/v5 v5.20.0/go.mod h1:Q3BYO6iDA2zweQPDGbNTtqft5tDcEpm6RTuqMlPcvbw= +github.com/IBM/platform-services-go-sdk v0.81.1 h1:Ch9wUIigyA3HzW7MQnA1WTHAw+QA6W4bSP3ThgzDpx0= +github.com/IBM/platform-services-go-sdk v0.81.1/go.mod h1:XOowH+JnIih3FA7uilLVM/9VH7XgCmJ4T/i6eZi7gkw= github.com/IBM/project-go-sdk v0.3.6 h1:DRiANKnAePevFsIKSvR89SUaMa2xsd7YKK71Ka1eqKI= github.com/IBM/project-go-sdk v0.3.6/go.mod h1:FOJM9ihQV3EEAY6YigcWiTNfVCThtdY8bLC/nhQHFvo= github.com/IBM/schematics-go-sdk v0.4.0 h1:x01f/tPquYJYLQzJLGuxWfCbV/EdSMXRikOceNy/JLM= github.com/IBM/schematics-go-sdk v0.4.0/go.mod h1:Xe7R7xgwmXBHu09w2CbBe8lkWZaYxNQo19bS4dpLrUA= -github.com/IBM/secrets-manager-go-sdk/v2 v2.0.14 h1:xKcplIoyh6UknnZSM+xUZVmmAqJckN4CdLT6c6VxoXc= -github.com/IBM/secrets-manager-go-sdk/v2 v2.0.14/go.mod h1:B1RtnQGpMt9uU1GimIPv1f0amUSM0WAx8UKLOzKCt/c= -github.com/IBM/vpc-go-sdk v0.70.1 h1:6NsbRkiA5gDNxe7cjNx8Pi1j9s0PlhwNQj29wsKZxAo= -github.com/IBM/vpc-go-sdk v0.70.1/go.mod h1:K3vVlje72PYE3ZRt1iouE+jSIq+vCyYzT1HiFC06hUA= +github.com/IBM/secrets-manager-go-sdk/v2 v2.0.11 h1:RG/hnKvKSMrG3X5Jm/P/itg+y/FGPY7+B5N3XYQDbmQ= +github.com/IBM/secrets-manager-go-sdk/v2 v2.0.11/go.mod h1:7r0LOxg+K/y2fVbh2Uopu5r+VE76p1VTk/3gHAs5MQk= +github.com/IBM/vpc-go-sdk v0.68.0 h1:Zs65PWeWBG5IwafAJV0RdPVsi3hCjIkhFZkqr1sLt5g= +github.com/IBM/vpc-go-sdk v0.68.0/go.mod h1:VL7sy61ybg6tvA60SepoQx7TFe20m7JyNUt+se2tHP4= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw= -github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE= +github.com/ProtonMail/go-crypto v1.1.6 h1:ZcV+Ropw6Qn0AX9brlQLAUXfqLBc7Bl+f/DmNxpLfdw= +github.com/ProtonMail/go-crypto v1.1.6/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= @@ -37,8 +37,12 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -46,18 +50,16 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o= github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE= -github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= -github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= -github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= -github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY= -github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok= +github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM= +github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= @@ -68,57 +70,79 @@ github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UN github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= -github.com/go-git/go-git/v5 v5.16.2 h1:fT6ZIOjE5iEnkzKyxTHK1W4HGAsPhqEqiSAssSO77hM= -github.com/go-git/go-git/v5 v5.16.2/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8= +github.com/go-git/go-git/v5 v5.16.0 h1:k3kuOEpkc0DeY7xlL6NaaNg39xdgQbtH5mwCafHO9AQ= +github.com/go-git/go-git/v5 v5.16.0/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= -github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= -github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= -github.com/go-openapi/errors v0.22.2 h1:rdxhzcBUazEcGccKqbY1Y7NS8FDcMyIRr0934jrYnZg= -github.com/go-openapi/errors v0.22.2/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0= -github.com/go-openapi/jsonpointer v0.21.2 h1:AqQaNADVwq/VnkCmQg6ogE+M3FOsKTytwges0JdwVuA= -github.com/go-openapi/jsonpointer v0.21.2/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= -github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= -github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= -github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= -github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= -github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= -github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= -github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= -github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= +github.com/go-openapi/analysis v0.21.5 h1:3tHfEBh6Ia8eKc4M7khOGjPOAlWKJ10d877Cr9teujI= +github.com/go-openapi/analysis v0.21.5/go.mod h1:25YcZosX9Lwz2wBsrFrrsL8bmjjXdlyP6zsr2AMy29M= +github.com/go-openapi/errors v0.20.3/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= +github.com/go-openapi/errors v0.22.1 h1:kslMRRnK7NCb/CvR1q1VWuEQCEIsBGn5GgKD9e+HYhU= +github.com/go-openapi/errors v0.22.1/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0= +github.com/go-openapi/jsonpointer v0.20.1 h1:MkK4VEIEZMj4wT9PmjaUmGflVBr9nvud4Q4UVFbDoBE= +github.com/go-openapi/jsonpointer v0.20.1/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/jsonreference v0.20.3 h1:EjGcjTW8pD1mRis6+w/gmoBdqv5+RbE9B85D1NgDOVQ= +github.com/go-openapi/jsonreference v0.20.3/go.mod h1:FviDZ46i9ivh810gqzFLl5NttD5q3tSlMLqLr6okedM= +github.com/go-openapi/loads v0.21.3 h1:8sSH2FIm/SnbDUGv572md4YqVMFne/a9Eubvcd3anew= +github.com/go-openapi/loads v0.21.3/go.mod h1:Y3aMR24iHbKHppOj91nQ/SHc0cuPbAr4ndY4a02xydc= +github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc= +github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ= +github.com/go-openapi/spec v0.20.12 h1:cgSLbrsmziAP2iais+Vz7kSazwZ8rsUZd6TUzdDgkVI= +github.com/go-openapi/spec v0.20.12/go.mod h1:iSCgnBcwbMW9SfzJb8iYynXvcY6C/QFrI7otzF7xGM4= +github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= -github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= -github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= -github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= -github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= +github.com/go-openapi/swag v0.22.5 h1:fVS63IE3M0lsuWRzuom3RLwUMVI2peDH01s6M70ugys= +github.com/go-openapi/swag v0.22.5/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0= +github.com/go-openapi/validate v0.22.4 h1:5v3jmMyIPKTR8Lv9syBAIRxG6lY0RqeBPB1LKEijzk8= +github.com/go-openapi/validate v0.22.4/go.mod h1:qm6O8ZIcPVdSY5219468Jv7kBdGvkiZLPOmqnqTUZ2A= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.27.0 h1:w8+XrWVMhGkxOaaowyKH35gFydVHOvC0/uWoy2Fzwn4= -github.com/go-playground/validator/v10 v10.27.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo= +github.com/go-playground/validator/v10 v10.26.0 h1:SP05Nqhjcvz81uJaRfEV0YBSSSGMc/iMaVtFbr3Sw2k= +github.com/go-playground/validator/v10 v10.26.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-test/deep v1.0.7 h1:/VSMRlnY/JSyqxQUzQLKVMAskpY/NZKFA5j2P+0pP2M= github.com/go-test/deep v1.0.7/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= -github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= -github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gruntwork-io/terratest v0.50.0 h1:AbBJ7IRCpLZ9H4HBrjeoWESITv8nLjN6/f1riMNcAsw= -github.com/gruntwork-io/terratest v0.50.0/go.mod h1:see0lbKvAqz6rvzvN2wyfuFQQG4PWcAb2yHulF6B2q4= +github.com/gruntwork-io/terratest v0.49.0 h1:GurfpHEOEr8vntB77QcxDh+P7aiQRUgPFdgb6q9PuWI= +github.com/gruntwork-io/terratest v0.49.0/go.mod h1:/+dfGio9NqUpvvukuPo29B8zy6U5FYJn9PdmvwztK4A= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -130,30 +154,29 @@ github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB1 github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= -github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hcl/v2 v2.24.0 h1:2QJdZ454DSsYGoaE6QheQZjtKZSUs9Nh2izTWiwQxvE= -github.com/hashicorp/hcl/v2 v2.24.0/go.mod h1:oGoO1FIQYfn/AgyOhlg9qLC6/nOJPX3qGbkZpYAcqfM= -github.com/hashicorp/terraform-json v0.26.0 h1:+BnJavhRH+oyNWPnfzrfQwVWCZBFMvjdiH2Vi38Udz4= -github.com/hashicorp/terraform-json v0.26.0/go.mod h1:eyWCeC3nrZamyrKLFnrvwpc3LQPIJsx8hWHQ/nu2/v4= +github.com/hashicorp/hcl/v2 v2.22.0 h1:hkZ3nCtqeJsDhPRFz5EA9iwcG1hNWGePOTw6oyul12M= +github.com/hashicorp/hcl/v2 v2.22.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= +github.com/hashicorp/terraform-json v0.25.0 h1:rmNqc/CIfcWawGiwXmRuiXJKEiJu1ntGoxseG1hLhoQ= +github.com/hashicorp/terraform-json v0.25.0/go.mod h1:sMKS8fiRDX4rVlR6EJUMudg1WcanxCMoWwTLkgZP/vc= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8= github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/kevinburke/ssh_config v1.4.0 h1:6xxtP5bZ2E4NF5tuQulISpTO2z8XbtH8cg1PWkxoFkQ= -github.com/kevinburke/ssh_config v1.4.0/go.mod h1:q2RIzfka+BXARoNexmF9gkxEX7DmvbW9P4hIVx2Kg4M= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= -github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= +github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= +github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -163,14 +186,14 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= -github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= -github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-zglob v0.0.6 h1:mP8RnmCgho4oaUYDIDn6GNxYk+qJGUs8fJLn+twYj2A= -github.com/mattn/go-zglob v0.0.6/go.mod h1:MxxjyoXXnMxfIpxTK2GAkw1w8glPsQILx3N5wrKakiY= +github.com/mattn/go-zglob v0.0.4 h1:LQi2iOm0/fGgu80AioIJ/1j9w9Oh+9DZ39J4VAGzHQM= +github.com/mattn/go-zglob v0.0.4/go.mod h1:MxxjyoXXnMxfIpxTK2GAkw1w8glPsQILx3N5wrKakiY= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= @@ -179,161 +202,298 @@ github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQ github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/gomega v1.38.0 h1:c/WX+w8SLAinvuKKQFh77WEucCnPk4j2OTUr7lt7BeY= -github.com/onsi/gomega v1.38.0/go.mod h1:OcXcwId0b9QsE7Y49u+BTrL4IdKOBOKnD6VQNTJEB6o= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= +github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= +github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0= +github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw= +github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo= +github.com/onsi/ginkgo/v2 v2.8.1/go.mod h1:N1/NbDngAFcSLdyZ+/aYTYGSlq9qMCS/cNKGJjy+csc= +github.com/onsi/ginkgo/v2 v2.9.0/go.mod h1:4xkjoL/tZv4SMWeww56BU5kAt19mVB47gTWxmrTcxyk= +github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo= +github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts= +github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k= +github.com/onsi/ginkgo/v2 v2.9.7/go.mod h1:cxrmXWykAwTwhQsJOPfdIDiJ+l2RYq7U8hFU+M/1uw0= +github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= +github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= +github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM= +github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM= +github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= +github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw= +github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557cZ6Gw= +github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ= +github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRahPmr4= +github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ= +github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= +github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/gomega v1.31.1/go.mod h1:y40C95dwAD1Nz36SsEnxvfFe8FFfNxzI5eJ0EYGyAy0= +github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= +github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/pjbgf/sha1cd v0.4.0 h1:NXzbL1RvjTUi6kgYZCX3fPwwl27Q1LJndxtUDVfJGRY= -github.com/pjbgf/sha1cd v0.4.0/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= +github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4= +github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= -github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= -github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8= github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/terraform-ibm-modules/ibmcloud-terratest-wrapper v1.58.12 h1:c6/my1qhlnD7twSjZ66/1xsKQHu2OC9EF4rRQmsDKMU= -github.com/terraform-ibm-modules/ibmcloud-terratest-wrapper v1.58.12/go.mod h1:6Wz8vnBelmRZxD5qjm5K4MpvPPWpoCWRPzG76j0B36g= -github.com/tmccombs/hcl2json v0.6.7 h1:RYKTs4kd/gzRsEiv7J3M2WQ7TYRYZVc+0H0pZdERkxA= -github.com/tmccombs/hcl2json v0.6.7/go.mod h1:lJgBOOGDpbhjvdG2dLaWsqB4KBzul2HytfDTS3H465o= -github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= -github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= -github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/terraform-ibm-modules/ibmcloud-terratest-wrapper v1.50.1 h1:5t2x8tkTeEeLrVy141bLVTWfd8zC9pvidByXJxUH6k8= +github.com/terraform-ibm-modules/ibmcloud-terratest-wrapper v1.50.1/go.mod h1:DPxpxzMr8GCuuUzNlNWdAFAHfHRv1mETuEs2G47+7+M= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tmccombs/hcl2json v0.6.4 h1:/FWnzS9JCuyZ4MNwrG4vMrFrzRgsWEOVi+1AyYUVLGw= +github.com/tmccombs/hcl2json v0.6.4/go.mod h1:+ppKlIW3H5nsAsZddXPy2iMyvld3SHxyjswOZhavRDk= +github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= +github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/zclconf/go-cty v1.16.3 h1:osr++gw2T61A8KVYHoQiFbFd1Lh3JOCXc/jFLJXKTxk= -github.com/zclconf/go-cty v1.16.3/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zclconf/go-cty v1.16.2 h1:LAJSwc3v81IRBZyUVQDUdZ7hs3SYs9jv0eZJDWHD/70= +github.com/zclconf/go-cty v1.16.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= -go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFXVw= -go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= -go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= -go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= -go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= +go.mongodb.org/mongo-driver v1.17.3 h1:TQyXhnsWfWtgAhMtOgtYHMTkZIfBTpMTsMnd9ZBeHxQ= +go.mongodb.org/mongo-driver v1.17.3/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= -go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= -go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= -go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= -go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= -go.yaml.in/yaml/v3 v3.0.3 h1:bXOww4E/J3f66rav3pX3m8w6jDE4knZjGOw8b5Y6iNE= -go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= -golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= +golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= -golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= +golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= -golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= +golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= -golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= +golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= -golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= -golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= +golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= -golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= +golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= -golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= +golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= +golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/apimachinery v0.33.4 h1:SOf/JW33TP0eppJMkIgQ+L6atlDiP/090oaX0y9pd9s= -k8s.io/apimachinery v0.33.4/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= -k8s.io/client-go v0.33.4 h1:TNH+CSu8EmXfitntjUPwaKVPN0AYMbc9F1bBS8/ABpw= -k8s.io/client-go v0.33.4/go.mod h1:LsA0+hBG2DPwovjd931L/AoaezMPX9CmBgyVyBZmbCY= -k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= -k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= -k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= -sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= -sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= -sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI= -sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= -sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= -sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/tests/lsf/cluster_helpers.go b/tests/lsf/cluster_helpers.go index 8d242f5d..2dbd1639 100644 --- a/tests/lsf/cluster_helpers.go +++ b/tests/lsf/cluster_helpers.go @@ -6,7 +6,6 @@ import ( "strings" "testing" - "github.com/stretchr/testify/require" "github.com/terraform-ibm-modules/ibmcloud-terratest-wrapper/testhelper" utils "github.com/terraform-ibm-modules/terraform-ibm-hpc/utilities" "golang.org/x/crypto/ssh" @@ -177,47 +176,17 @@ func VerifyComputeNodeConfig( } -// VerifyAPPCenterConfig verifies the configuration of the Application Center by performing various checks. -// If more than one management node exists, validation runs on node 2; otherwise on node 1. +// VerifyAPPCenterConfig verifies the configuration of the application center by performing various checks. func VerifyAPPCenterConfig( t *testing.T, sshMgmtClient *ssh.Client, - publicHostIP, publicHostName, privateHostName string, - managementNodeIPs []string, logger *utils.AggregatedLogger, ) { - var targetSSHClient *ssh.Client - var nodeLabel string - if len(managementNodeIPs) > 1 { - // Connect to management node 2 - appCenterSSHClient, err := utils.ConnectToHost(publicHostName, publicHostIP, privateHostName, managementNodeIPs[1]) - if err != nil { - msg := fmt.Sprintf( - "Failed to SSH to management node 2 via bastion (%s) -> private IP (%s): %v", - publicHostIP, managementNodeIPs[1], err, - ) - logger.FAIL(t, msg) - require.FailNow(t, msg) - } - defer func() { - if cerr := appCenterSSHClient.Close(); cerr != nil { - logger.Warn(t, fmt.Sprintf("Failed to close SSH connection: %v", cerr)) - } - }() - targetSSHClient = appCenterSSHClient - nodeLabel = "Application Center (mgmt node 2)" - } else { - // Use the provided SSH client (mgmt node 1) - targetSSHClient = sshMgmtClient - nodeLabel = "Application Center (mgmt node 1)" - } - - // Run App Center validation - appCenterErr := LSFAPPCenterConfiguration(t, targetSSHClient, logger) - utils.LogVerificationResult(t, appCenterErr, nodeLabel, logger) + // Verify application center + appCenterErr := LSFAPPCenterConfiguration(t, sshMgmtClient, logger) + utils.LogVerificationResult(t, appCenterErr, "check Application center", logger) - logger.Info(t, fmt.Sprintf("Completed %s validation.", nodeLabel)) } // VerifyLoginNodeConfig validates the configuration of a login node by performing multiple checks. @@ -327,10 +296,10 @@ func VerifyJobs(t *testing.T, sshClient *ssh.Client, jobCommand string, logger * func VerifyFileShareEncryption(t *testing.T, sshMgmtClient *ssh.Client, apiKey, region, resourceGroup, clusterPrefix, keyManagement string, managementNodeIPList []string, logger *utils.AggregatedLogger) { // Validate encryption encryptErr := VerifyEncryption(t, apiKey, region, resourceGroup, clusterPrefix, keyManagement, logger) - utils.LogVerificationResult(t, encryptErr, "File share encryption validation", logger) + utils.LogVerificationResult(t, encryptErr, "File share encryption validation failed", logger) encryptCRNErr := VerifyEncryptionCRN(t, sshMgmtClient, keyManagement, managementNodeIPList, logger) - utils.LogVerificationResult(t, encryptCRNErr, "CRN encryption validation", logger) + utils.LogVerificationResult(t, encryptCRNErr, "CRN encryption validation failed", logger) } // VerifyManagementNodeLDAPConfig performs various checks on a management node's LDAP configuration. diff --git a/tests/lsf/cluster_utils.go b/tests/lsf/cluster_utils.go index e8841c3d..4d3aa2f8 100644 --- a/tests/lsf/cluster_utils.go +++ b/tests/lsf/cluster_utils.go @@ -1387,7 +1387,7 @@ func VerifyEncryption(t *testing.T, apiKey, region, resourceGroup, clusterPrefix } // // Retrieve the list of file shares (retry once after 2s if it fails) - fileSharesOutput, err := utils.RunCommandWithRetry(fileSharesCmd, 3, 90*time.Second) + fileSharesOutput, err := utils.RunCommandWithRetry(fileSharesCmd, 3, 60*time.Second) if err != nil { return fmt.Errorf("failed to retrieve file shares: %w", err) } @@ -1403,24 +1403,18 @@ func VerifyEncryption(t *testing.T, apiKey, region, resourceGroup, clusterPrefix return fmt.Errorf("failed to retrieve file share details for '%s': %w", fileShareName, err) } - outStr := string(output) - - if utils.VerifyDataContains(t, strings.ToLower(keyManagement), "key_protect", logger) { - // With KMS → expect user_managed + Encryption key present - if !utils.VerifyDataContains(t, outStr, "Encryption user_managed", logger) || - !utils.VerifyDataContains(t, outStr, "Encryption key", logger) { - return fmt.Errorf("expected user-managed encryption with an encryption key for file share '%s'", fileShareName) + if !utils.VerifyDataContains(t, strings.ToLower(keyManagement), "key_protect", logger) { + if !utils.VerifyDataContains(t, string(output), "provider_managed", logger) { + return fmt.Errorf("encryption-in-transit is unexpectedly enabled for the file shares") } } else { - // Without KMS → expect provider_managed + no Encryption key - if !utils.VerifyDataContains(t, outStr, "Encryption provider_managed", logger) || - utils.VerifyDataContains(t, outStr, "Encryption key", logger) { - return fmt.Errorf("expected provider-managed encryption without an encryption key for file share '%s'", fileShareName) + if !utils.VerifyDataContains(t, string(output), "user_managed", logger) { + return fmt.Errorf("encryption-in-transit is unexpectedly disabled for the file shares") } } } - logger.Info(t, "Encryption settings match the expected configuration") + logger.Info(t, "Encryption set as expected") return nil } diff --git a/tests/lsf/cluster_validation.go b/tests/lsf/cluster_validation.go index db308867..55b99220 100644 --- a/tests/lsf/cluster_validation.go +++ b/tests/lsf/cluster_validation.go @@ -67,6 +67,9 @@ func runClusterValidationsOnManagementNode(t *testing.T, sshClient *ssh.Client, // Run job VerifyJobs(t, sshClient, jobCmd, logger) + // Verify application center configuration + VerifyAPPCenterConfig(t, sshClient, logger) + // Verify noVNC configuration //VerifyNoVNCConfig(t, sshClient, logger) @@ -77,7 +80,7 @@ func runClusterValidationsOnManagementNode(t *testing.T, sshClient *ssh.Client, VerifyLSFDNS(t, sshClient, managementNodeIPs, expected.DnsDomainName, logger) // Perform failover and failback - //FailoverAndFailback(t, sshClient, jobCmd, logger) + FailoverAndFailback(t, sshClient, jobCmd, logger) // Restart LSF daemon RestartLsfDaemon(t, sshClient, logger) @@ -85,9 +88,6 @@ func runClusterValidationsOnManagementNode(t *testing.T, sshClient *ssh.Client, // Reboot instance RebootInstance(t, sshClient, bastionIP, LSF_PUBLIC_HOST_NAME, LSF_PRIVATE_HOST_NAME, managementNodeIPs[0], logger) - // Verify application center configuration - VerifyAPPCenterConfig(t, sshClient, bastionIP, LSF_PUBLIC_HOST_NAME, LSF_PRIVATE_HOST_NAME, managementNodeIPs, logger) - logger.Info(t, "Management node and App Center validations completed.") } @@ -293,17 +293,10 @@ func ValidateClusterConfigurationWithPACHA(t *testing.T, options *testhelper.Tes runClusterValidationsOnManagementNode(t, sshClient, bastionIP, managementNodeIPs, expected, jobCommandMed, logger) - var managementNodeIP string - if len(managementNodeIPs) == 1 { - managementNodeIP = managementNodeIPs[0] - } else { - managementNodeIP = managementNodeIPs[1] - } - // Reconnect to the management node after reboot - sshClient, connectionErr = utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIP) + sshClient, connectionErr = utils.ConnectToHost(LSF_PUBLIC_HOST_NAME, bastionIP, LSF_PRIVATE_HOST_NAME, managementNodeIPs[0]) if connectionErr != nil { - msg := fmt.Sprintf("SSH connection to master node via bastion (%s) -> private IP (%s) failed after reboot: %v", bastionIP, managementNodeIP, connectionErr) + msg := fmt.Sprintf("SSH connection to master node via bastion (%s) -> private IP (%s) failed after reboot: %v", bastionIP, managementNodeIPs[0], connectionErr) logger.FAIL(t, msg) require.FailNow(t, msg) } @@ -1117,9 +1110,6 @@ func ValidateBasicClusterConfigurationWithSCCWPAndCSPM(t *testing.T, options *te } }() - // Verify application center configuration - VerifyAPPCenterConfig(t, sshClient, bastionIP, LSF_PUBLIC_HOST_NAME, LSF_PRIVATE_HOST_NAME, managementNodeIPs, logger) - // Run job to verify job execution on the cluster VerifyJobs(t, sshClient, jobCommandLow, logger) @@ -1513,7 +1503,7 @@ func ValidateClusterConfigurationWithMultipleKeys(t *testing.T, options *testhel require.NoError(t, getClusterIPErr, "Failed to get cluster IPs from Terraform outputs - check network configuration") // Set job commands for low and medium memory tasks, ignoring high memory command - jobCommandLow, _, _ := GenerateLSFJobCommandsForMemoryTypes() + jobCommandLow, jobCommandMed, _ := GenerateLSFJobCommandsForMemoryTypes() // Log validation start logger.Info(t, t.Name()+" Validation started ......") @@ -1546,7 +1536,7 @@ func ValidateClusterConfigurationWithMultipleKeys(t *testing.T, options *testhel VerifySSHKey(t, sshClientOne, bastionIP, LSF_PUBLIC_HOST_NAME, LSF_PRIVATE_HOST_NAME, "management", managementNodeIPs, expected.NumOfKeys, logger) // Perform failover and failback - //FailoverAndFailback(t, sshClientOne, jobCommandMed, logger) + FailoverAndFailback(t, sshClientOne, jobCommandMed, logger) // Restart LSF daemon RestartLsfDaemon(t, sshClientOne, logger) @@ -1657,9 +1647,6 @@ func ValidateBasicClusterConfigurationForMultiProfileStaticAndDynamic(t *testing } }() - // Verify application center configuration - VerifyAPPCenterConfig(t, sshClient, bastionIP, LSF_PUBLIC_HOST_NAME, LSF_PRIVATE_HOST_NAME, managementNodeIPs, logger) - // Run job to trigger dynamic node behavior VerifyJobs(t, sshClient, jobCommandHigh, logger) diff --git a/tests/lsf_tests/lsf_constants.go b/tests/lsf_tests/lsf_constants.go index 83846e60..a7047cbd 100644 --- a/tests/lsf_tests/lsf_constants.go +++ b/tests/lsf_tests/lsf_constants.go @@ -13,10 +13,10 @@ const ( CLUSTER_ONE_VPC_CIDR = "10.241.0.0/18" CLUSTER_ONE_VPC_CLUSTER_PRIVATE_SUBNETS_CIDR_BLOCKS = "10.241.0.0/20" CLUSTER_ONE_VPC_CLUSTER_LOGIN_PRIVATE_SUBNETS_CIDR_BLOCKS = "10.241.16.0/28" - CLUSTER_TWO_VPC_CLUSTER_PRIVATE_SUBNETS_CIDR_BLOCKS = "10.241.61.0/24" - CLUSTER_TWO_VPC_CLUSTER_LOGIN_PRIVATE_SUBNETS_CIDR_BLOCKS = "10.241.16.16/28" + CLUSTER_TWO_VPC_CLUSTER_PRIVATE_SUBNETS_CIDR_BLOCKS = "10.241.17.0/24" + CLUSTER_TWO_VPC_CLUSTER_LOGIN_PRIVATE_SUBNETS_CIDR_BLOCKS = "10.241.18.0/24" CLUSTER_TWO_DNS_DOMAIN_NAME = "clustertwo.com" KMS_KEY_INSTANCE_NAME = "cicd-key-instance" KMS_KEY_NAME = "cicd-key-name" - APP_CENTER_GUI_PASSWORD = "Password@123456" // pragma: allowlist secret + APP_CENTER_GUI_PASSWORD = "Admin@1234" // pragma: allowlist secret ) diff --git a/tests/lsf_tests/lsf_e2e_test.go b/tests/lsf_tests/lsf_e2e_test.go index bbb1acc3..a99f07d6 100644 --- a/tests/lsf_tests/lsf_e2e_test.go +++ b/tests/lsf_tests/lsf_e2e_test.go @@ -42,7 +42,7 @@ func TestMain(m *testing.M) { log.Fatalf("❌ Config file not accessible: %v", err) } - if _, err := deploy.GetLSFConfigFromYAML(configFilePath); err != nil { + if _, err := deploy.GetConfigFromYAML(configFilePath); err != nil { log.Fatalf("❌ Config load failed: %v", err) } log.Printf("✅ Configuration loaded successfully from %s", filepath.Base(configFilePath)) @@ -254,16 +254,6 @@ func TestRunSCCWPAndCSPMEnabledClusterValidation(t *testing.T) { options, err := setupOptions(t, clusterNamePrefix, terraformDir, envVars.DefaultExistingResourceGroup) require.NoError(t, err, "Failed to initialize test options") - // Define multiple management instances - options.TerraformVars["management_instances"] = []map[string]interface{}{ - - { - "profile": "bx2-4x16", - "count": 1, - "image": envVars.ManagementInstancesImage, - }, - } - // SCCWP Specific Configuration options.TerraformVars["sccwp_enable"] = envVars.SccWPEnabled options.TerraformVars["cspm_enabled"] = envVars.CspmEnabled @@ -1813,12 +1803,12 @@ func TestRunMultiProfileStaticAndDynamic(t *testing.T) { options.TerraformVars["management_instances"] = []map[string]interface{}{ { - "profile": "bx2d-4x16", + "profile": "bx2d-16x64", "count": 1, "image": envVars.ManagementInstancesImage, }, { - "profile": "bx2-4x16", + "profile": "bx2-2x8", "count": 1, "image": envVars.ManagementInstancesImage, }, @@ -1970,7 +1960,7 @@ func RunCreateClusterWithExistingVpcCIDRs(t *testing.T, vpcName string) { testLogger.Info(t, fmt.Sprintf("Finished execution: %s", t.Name())) } -// RunCreateClusterWithExistingVpcSubnetsNoDns with compute and login subnet id. Both custom_resolver and dns_instance null +// RunCreateClusterWithExistingVpcSubnetsNoDns with compute and login subnet id. Both custom_resolver and dns_instace null func RunCreateClusterWithExistingVpcSubnetsNoDns(t *testing.T, vpcName string, bastionsubnetId string, computesubnetIds string) { // Set up the test suite and prepare the testing environment @@ -1991,7 +1981,7 @@ func RunCreateClusterWithExistingVpcSubnetsNoDns(t *testing.T, vpcName string, b options, err := setupOptions(t, clusterNamePrefix, terraformDir, envVars.DefaultExistingResourceGroup) options.TerraformVars["vpc_name"] = vpcName options.TerraformVars["login_subnet_id"] = bastionsubnetId - options.TerraformVars["compute_subnet_id"] = computesubnetIds + options.TerraformVars["cluster_subnet_id"] = computesubnetIds require.NoError(t, err, "Error setting up test options: %v", err) // Skip test teardown for further inspection @@ -2032,7 +2022,7 @@ func TestRunCreateVpcWithCustomDns(t *testing.T) { // Set up the test options with the relevant parameters, including environment variables and resource group, set up test environment options, err := setupOptionsVPC(t, clusterNamePrefix, createVpcTerraformDir, envVars.DefaultExistingResourceGroup) options.TerraformVars["enable_hub"] = true - options.TerraformVars["dns_zone_name"] = "hpc.local" + options.TerraformVars["dns_zone_name"] = "lsf.com" require.NoError(t, err, "Error setting up test options: %v", err) @@ -2085,7 +2075,7 @@ func RunCreateClusterWithDnsAndResolver(t *testing.T, vpcName string, bastionsub options, err := setupOptions(t, clusterNamePrefix, terraformDir, envVars.DefaultExistingResourceGroup) options.TerraformVars["vpc_name"] = vpcName options.TerraformVars["login_subnet_id"] = bastionsubnetId - options.TerraformVars["compute_subnet_id"] = computesubnetIds + options.TerraformVars["cluster_subnet_id"] = computesubnetIds options.TerraformVars["dns_instance_id"] = instanceId options.TerraformVars["dns_custom_resolver_id"] = customResolverId @@ -2129,7 +2119,7 @@ func RunCreateClusterWithOnlyResolver(t *testing.T, vpcName string, bastionsubne options, err := setupOptions(t, clusterNamePrefix, terraformDir, envVars.DefaultExistingResourceGroup) options.TerraformVars["vpc_name"] = vpcName options.TerraformVars["login_subnet_id"] = bastionsubnetId - options.TerraformVars["compute_subnet_id"] = computesubnetIds + options.TerraformVars["cluster_subnet_id"] = computesubnetIds options.TerraformVars["dns_custom_resolver_id"] = customResolverId require.NoError(t, err, "Error setting up test options: %v", err) @@ -2172,7 +2162,7 @@ func TestRunCreateVpcWithCustomDnsOnlyDNS(t *testing.T) { // Set up the test options with the relevant parameters, including environment variables and resource group, set up test environment options, err := setupOptionsVPC(t, clusterNamePrefix, createVpcTerraformDir, envVars.DefaultExistingResourceGroup) options.TerraformVars["enable_hub"] = true - options.TerraformVars["dns_zone_name"] = "hpc.local" + options.TerraformVars["dns_zone_name"] = "lsf.com" require.NoError(t, err, "Error setting up test options: %v", err) diff --git a/tests/lsf_tests/lsf_negative_test.go b/tests/lsf_tests/lsf_negative_test.go index 09fcaf8b..5b57aa33 100644 --- a/tests/lsf_tests/lsf_negative_test.go +++ b/tests/lsf_tests/lsf_negative_test.go @@ -127,14 +127,14 @@ func TestInvalidAppCenterPassword(t *testing.T) { t.Parallel() invalidPasswords := []string{ - "weak", // Too short (<15) - "PasswoRD123", // Too short, no special char - "password123", // Lowercase only - "Password@", // Missing numbers - "Password123", // Missing special char - "password@123456789012345678901234567890123", // Too long (>32) - "ValidPass123\\", // Backslash not in allowed set - "Pass word@1", // Contains space + "weak", // Too short + "PasswoRD123", // Contains dictionary word // pragma: allowlist secret + "password123", // All lowercase // pragma: allowlist secret + "Password@", // Missing numbers // pragma: allowlist secret + "Password123", // Common password pattern // pragma: allowlist secret + "password@12345678901234567890", // Too long // pragma: allowlist secret + "ValidPass123\\", //Backslash not in allowed special chars // pragma: allowlist secret + "Pass word@1", //Contains space // pragma: allowlist secret } setupTestSuite(t) @@ -160,9 +160,9 @@ func TestInvalidAppCenterPassword(t *testing.T) { _, err := terraform.PlanE(t, terraformOptions) require.Error(t, err, "Expected an error during plan") - validationPassed := utils.VerifyDataContains(t, err.Error(), "The password must be at least 15 characters long", testLogger) // pragma: allowlist secret - assert.True(t, validationPassed, "Should fail with invalid password error") // pragma: allowlist secret - testLogger.LogValidationResult(t, validationPassed, "Invalid App Center password validation") // pragma: allowlist secret + validationPassed := utils.VerifyDataContains(t, err.Error(), "The password must be at least 8 characters long", testLogger) // pragma: allowlist secret + assert.True(t, validationPassed, "Should fail with invalid password error") // pragma: allowlist secret + testLogger.LogValidationResult(t, validationPassed, "Invalid App Center password validation") // pragma: allowlist secret }) } } @@ -618,8 +618,8 @@ func TestInvalidLdapConfigurations(t *testing.T) { config: map[string]interface{}{ "enable_ldap": true, "ldap_user_name": "invalid user", - "ldap_user_password": "ValidPassword123!", // >=15 ✅ // pragma: allowlist secret - "ldap_admin_password": "AdminPassword123!", // >=15 ✅ // pragma: allowlist secret + "ldap_user_password": "ValidPass123!", // pragma: allowlist secret + "ldap_admin_password": "AdminPass123!", // pragma: allowlist secret }, expectedErrors: []string{ "LDAP username must be between 4-32 characters", @@ -633,11 +633,12 @@ func TestInvalidLdapConfigurations(t *testing.T) { config: map[string]interface{}{ "enable_ldap": true, "ldap_user_name": "usr", - "ldap_user_password": "ValidPassword123!", // >=15 ✅ // pragma: allowlist secret - "ldap_admin_password": "AdminPassword123!", // >=15 ✅ // pragma: allowlist secret + "ldap_user_password": "ValidPass123!", // pragma: allowlist secret + "ldap_admin_password": "AdminPass123!", // pragma: allowlist secret }, expectedErrors: []string{ "LDAP username must be between 4-32 characters long and can only contain", + "letters, numbers, hyphens, and underscores", }, description: "Username shorter than 4 characters should fail", }, @@ -646,11 +647,12 @@ func TestInvalidLdapConfigurations(t *testing.T) { config: map[string]interface{}{ "enable_ldap": true, "ldap_user_name": "thisusernameiswaytoolongandshouldfailvalidation", - "ldap_user_password": "ValidPassword123!", // >=15 ✅ // pragma: allowlist secret - "ldap_admin_password": "AdminPassword123!", // >=15 ✅ // pragma: allowlist secret + "ldap_user_password": "ValidPass123!", // pragma: allowlist secret + "ldap_admin_password": "AdminPass123!", // pragma: allowlist secret }, expectedErrors: []string{ "LDAP username must be between 4-32 characters long and can only contain", + "letters, numbers, hyphens, and underscores", }, description: "Username longer than 32 characters should fail", }, @@ -659,8 +661,8 @@ func TestInvalidLdapConfigurations(t *testing.T) { config: map[string]interface{}{ "enable_ldap": true, "ldap_user_name": "user@name#", - "ldap_user_password": "ValidPassword123!", // >=15 ✅ // pragma: allowlist secret - "ldap_admin_password": "AdminPassword123!", // >=15 ✅ // pragma: allowlist secret + "ldap_user_password": "ValidPass123!", // pragma: allowlist secret + "ldap_admin_password": "AdminPass123!", // pragma: allowlist secret }, expectedErrors: []string{ "LDAP username must be between 4-32 characters long and can only contain", @@ -675,47 +677,34 @@ func TestInvalidLdapConfigurations(t *testing.T) { config: map[string]interface{}{ "enable_ldap": true, "ldap_user_name": "validuser", - "ldap_user_password": "Short1!", // ❌ <15 // pragma: allowlist secret - "ldap_admin_password": "AdminPassword123!", // >=15 ✅ // pragma: allowlist secret + "ldap_user_password": "Short1!", // pragma: allowlist secret + "ldap_admin_password": "AdminPass123!", // pragma: allowlist secret }, expectedErrors: []string{ - "must be 15 to 32 characters long", + "must be 8 to 20 characters long", }, - description: "Password shorter than 15 characters should fail", + description: "Password shorter than 8 characters should fail", }, { - name: "UserPasswordTooLong", + name: "PasswordTooLong", config: map[string]interface{}{ "enable_ldap": true, "ldap_user_name": "validuser", - "ldap_user_password": "ThisPasswordIsWayTooLong1234567890!", // ❌ >32 // pragma: allowlist secret - "ldap_admin_password": "AdminPassword123!", // >=15 ✅ // pragma: allowlist secret + "ldap_user_password": "ThisPasswordIsWayTooLong123!", // pragma: allowlist secret + "ldap_admin_password": "AdminPass123!", // pragma: allowlist secret }, expectedErrors: []string{ - "must be 15 to 32 characters long", + "must be 8 to 20 characters long", }, - description: "Password longer than 32 characters should fail", + description: "Password longer than 20 characters should fail", }, { - name: "AdminUserPasswordTooLong", + name: "PasswordMissingUppercase", config: map[string]interface{}{ "enable_ldap": true, "ldap_user_name": "validuser", - "ldap_user_password": "AdminPassword123!", // >=15 ✅ // pragma: allowlist secret - "ldap_admin_password": "ThisPasswordIsWayTooLong1234567890!", // ❌ >32 // pragma: allowlist secret - }, - expectedErrors: []string{ - "must be 15 to 32 characters long", - }, - description: "Password longer than 32 characters should fail", - }, - { - name: "UserPasswordMissingUppercase", - config: map[string]interface{}{ - "enable_ldap": true, - "ldap_user_name": "validuser", - "ldap_user_password": "missinglowercase123!", // ❌ no uppercase // pragma: allowlist secret - "ldap_admin_password": "AdminPassword123!", // ✅ // pragma: allowlist secret + "ldap_user_password": "missingupper1!", // pragma: allowlist secret + "ldap_admin_password": "AdminPass123!", // pragma: allowlist secret }, expectedErrors: []string{ "two alphabetic characters (with one uppercase and one lowercase)", @@ -723,38 +712,12 @@ func TestInvalidLdapConfigurations(t *testing.T) { description: "Password missing uppercase letter should fail", }, { - name: "AdminPasswordMissingUppercase", + name: "PasswordMissingLowercase", config: map[string]interface{}{ "enable_ldap": true, "ldap_user_name": "validuser", - "ldap_user_password": "AdminPassword123!", // ✅ // pragma: allowlist secret - "ldap_admin_password": "missinglowercase123!", // ❌ no uppercase // pragma: allowlist secret - }, - expectedErrors: []string{ - "two alphabetic characters (with one uppercase and one lowercase)", - }, - description: "Password missing uppercase letter should fail", - }, - { - name: "UserPasswordMissingLowercase", - config: map[string]interface{}{ - "enable_ldap": true, - "ldap_user_name": "validuser", - "ldap_user_password": "MISSINGUPPERCASE123!", // ❌ no lowercase // pragma: allowlist secret - "ldap_admin_password": "AdminPassword123!", // ✅ // pragma: allowlist secret - }, - expectedErrors: []string{ - "two alphabetic characters (with one uppercase and one lowercase)", - }, - description: "Password missing lowercase letter should fail", - }, - { - name: "AdminPasswordMissingLowercase", - config: map[string]interface{}{ - "enable_ldap": true, - "ldap_user_name": "validuser", - "ldap_user_password": "AdminPassword123!", // ✅ // pragma: allowlist secret - "ldap_admin_password": "MISSINGUPPERCASE123!", // ❌ no lowercase // pragma: allowlist secret + "ldap_user_password": "MISSINGLOWER1!", // pragma: allowlist secret + "ldap_admin_password": "AdminPass123!", // pragma: allowlist secret }, expectedErrors: []string{ "two alphabetic characters (with one uppercase and one lowercase)", @@ -762,98 +725,52 @@ func TestInvalidLdapConfigurations(t *testing.T) { description: "Password missing lowercase letter should fail", }, { - name: "UserPasswordMissingNumber", - config: map[string]interface{}{ - "enable_ldap": true, - "ldap_user_name": "validuser", - "ldap_user_password": "MissingNumber!!!", // ❌ no digit // pragma: allowlist secret - "ldap_admin_password": "AdminPassword123!", // ✅ // pragma: allowlist secret - }, - expectedErrors: []string{ - "The LDAP user password must be 15 to 32 characters long and include at", - "least two alphabetic characters (with one uppercase and one lowercase), one", - "number, and one special character from the set", - "password must not contain the username or any spaces.", - }, - description: "Password missing number should fail", - }, - { - name: "AdminPasswordMissingNumber", + name: "PasswordMissingNumber", config: map[string]interface{}{ "enable_ldap": true, "ldap_user_name": "validuser", - "ldap_user_password": "AdminPassword123!", // ✅ // pragma: allowlist secret - "ldap_admin_password": "MissingNumber!!!", // ❌ no digit // pragma: allowlist secret + "ldap_user_password": "MissingNumber!", // pragma: allowlist secret + "ldap_admin_password": "AdminPass123!", // pragma: allowlist secret }, expectedErrors: []string{ - "The LDAP user password must be 15 to 32 characters long and include at", - "least two alphabetic characters (with one uppercase and one lowercase), one", - "number, and one special character from the set", + "two alphabetic characters (with one uppercase and one lowercase), one", + "number, and one special character", }, description: "Password missing number should fail", }, { - name: "UserPasswordMissingSpecialChar", + name: "PasswordMissingSpecialChar", config: map[string]interface{}{ "enable_ldap": true, "ldap_user_name": "validuser", - "ldap_user_password": "MissingSpecial123", // ❌ no special char // pragma: allowlist secret - "ldap_admin_password": "AdminPassword123!", // ✅ // pragma: allowlist secret + "ldap_user_password": "MissingSpecial1", // pragma: allowlist secret + "ldap_admin_password": "AdminPass123!", // pragma: allowlist secret }, expectedErrors: []string{ "one special character", }, description: "Password missing special character should fail", }, - { - name: "AdminPasswordMissingSpecialChar", + name: "PasswordWithSpace", config: map[string]interface{}{ "enable_ldap": true, "ldap_user_name": "validuser", - "ldap_user_password": "AdminPassword123!", // ✅ // pragma: allowlist secret - "ldap_admin_password": "MissingSpecial123", // ❌ no special char // pragma: allowlist secret - }, - expectedErrors: []string{ - "one special character", - }, - description: "Password missing special character should fail", - }, - { - name: "UserPasswordWithSpace", - config: map[string]interface{}{ - "enable_ldap": true, - "ldap_user_name": "validuser", - "ldap_user_password": "Invalid Pass123!", // ❌ contains space // pragma: allowlist secret - "ldap_admin_password": "AdminPassword123!", // ✅ // pragma: allowlist secret + "ldap_user_password": "Invalid Pass123!", // pragma: allowlist secret + "ldap_admin_password": "AdminPass123!", // pragma: allowlist secret }, expectedErrors: []string{ "password must not contain the username or any spaces", }, description: "Password containing space should fail", }, - { - name: "AdminPasswordWithSpace", - config: map[string]interface{}{ - "enable_ldap": true, - "ldap_user_name": "validuser", - "ldap_user_password": "AdminPassword123!", // ✅ // pragma: allowlist secret - "ldap_admin_password": "Invalid Pass123!", // ❌ contains space // pragma: allowlist secret - }, - expectedErrors: []string{ - "The LDAP admin password must be 15 to 32 characters long and include at", - "least two alphabetic characters (with one uppercase and one lowercase), one", - "number, and one special character from the set", - }, - description: "Password containing space should fail", - }, { name: "PasswordContainsUsername", config: map[string]interface{}{ "enable_ldap": true, - "ldap_user_name": "validuser", - "ldap_user_password": "validuser123!", // ❌ contains username // pragma: allowlist secret - "ldap_admin_password": "AdminPassword123!", // ✅ // pragma: allowlist secret + "ldap_user_name": "Validuser", + "ldap_user_password": "Validuser123!", // pragma: allowlist secret + "ldap_admin_password": "AdminPass123!", // pragma: allowlist secret }, expectedErrors: []string{ "password must not contain the username or any spaces", @@ -867,11 +784,12 @@ func TestInvalidLdapConfigurations(t *testing.T) { config: map[string]interface{}{ "enable_ldap": true, "ldap_user_name": "validuser", - "ldap_user_password": "ValidPassword123!", // ✅ // pragma: allowlist secret - "ldap_admin_password": "", // ❌ missing // pragma: allowlist secret + "ldap_user_password": "ValidPass123!", // pragma: allowlist secret + "ldap_admin_password": "", // pragma: allowlist secret }, expectedErrors: []string{ - "The LDAP admin password must be 15 to 32 characters long and include", + "The LDAP admin password must be 8 to 20 characters long and include", + "least two alphabetic characters (with one uppercase and one lowercase)", }, description: "Missing admin password should fail", }, @@ -880,11 +798,11 @@ func TestInvalidLdapConfigurations(t *testing.T) { config: map[string]interface{}{ "enable_ldap": true, "ldap_user_name": "validuser", - "ldap_user_password": "ValidPassword123!", // ✅ // pragma: allowlist secret - "ldap_admin_password": "Short1!", // ❌ <15 // pragma: allowlist secret + "ldap_user_password": "ValidPass123!", // pragma: allowlist secret + "ldap_admin_password": "Short1!", // pragma: allowlist secret }, expectedErrors: []string{ - "must be 15 to 32 characters long", + "must be 8 to 20 characters long", }, description: "Admin password too short should fail", }, @@ -896,8 +814,8 @@ func TestInvalidLdapConfigurations(t *testing.T) { "enable_ldap": true, "ldap_basedns": "", "ldap_user_name": "validuser", - "ldap_user_password": "ValidPassword123!", // ✅ // pragma: allowlist secret - "ldap_admin_password": "AdminPassword123!", // ✅ // pragma: allowlist secret + "ldap_user_password": "ValidPass123!", // pragma: allowlist secret + "ldap_admin_password": "AdminPass123!", // pragma: allowlist secret }, expectedErrors: []string{ "If LDAP is enabled, then the base DNS should not be empty or null.", diff --git a/tests/lsf_tests/lsf_resource_exemptions.go b/tests/lsf_tests/resource_exemptions.go similarity index 100% rename from tests/lsf_tests/lsf_resource_exemptions.go rename to tests/lsf_tests/resource_exemptions.go diff --git a/tests/pr_test.go b/tests/pr_test.go index 32ff30b5..e1f1780e 100644 --- a/tests/pr_test.go +++ b/tests/pr_test.go @@ -9,11 +9,10 @@ import ( "github.com/stretchr/testify/require" deploy "github.com/terraform-ibm-modules/terraform-ibm-hpc/deployment" lsf_tests "github.com/terraform-ibm-modules/terraform-ibm-hpc/lsf_tests" - scale_tests "github.com/terraform-ibm-modules/terraform-ibm-hpc/scale_tests" utils "github.com/terraform-ibm-modules/terraform-ibm-hpc/utilities" ) -func TestRunLSFDefault(t *testing.T) { +func TestRunDefault(t *testing.T) { t.Parallel() require.NoError(t, os.Setenv("ZONES", "us-east-3"), "Failed to set ZONES env variable") @@ -23,60 +22,29 @@ func TestRunLSFDefault(t *testing.T) { lsf_tests.DefaultTest(t) } -func TestRunScaleDefault(t *testing.T) { - t.Parallel() - - require.NoError(t, os.Setenv("ZONES", "us-east-3"), "Failed to set ZONES env variable") - require.NoError(t, os.Setenv("DEFAULT_EXISTING_RESOURCE_GROUP", "Default"), "Failed to set DEFAULT_EXISTING_RESOURCE_GROUP") - - t.Log("Running default LSF cluster test for region us-east-3") - scale_tests.DefaultTest(t) -} - // TestMain is the entry point for all tests func TestMain(m *testing.M) { // Load LSF version configuration - lsfProductFileName, err := lsf_tests.GetLSFVersionConfig() - if err != nil { - log.Fatalf("❌ Failed to get LSF version config: %v", err) - } - - // Load and validate configuration - lsfConfigFilePath, err := filepath.Abs("data/" + lsfProductFileName) - if err != nil { - log.Fatalf("❌ Failed to resolve config path: %v", err) - } - - if _, err := os.Stat(lsfConfigFilePath); err != nil { - log.Fatalf("❌ Config file not accessible: %v", err) - } - - if _, err := deploy.GetLSFConfigFromYAML(lsfConfigFilePath); err != nil { - log.Fatalf("❌ Config load failed: %v", err) - } - log.Printf("✅ lsf Configuration loaded successfully from %s", filepath.Base(lsfConfigFilePath)) - - // Load Scale version configuration - scaleProductFileName, err := scale_tests.GetScaleVersionConfig() + productFileName, err := lsf_tests.GetLSFVersionConfig() if err != nil { log.Fatalf("❌ Failed to get LSF version config: %v", err) } // Load and validate configuration - scaleConfigFilePath, err := filepath.Abs("data/" + scaleProductFileName) + configFilePath, err := filepath.Abs("data/" + productFileName) if err != nil { log.Fatalf("❌ Failed to resolve config path: %v", err) } - if _, err := os.Stat(scaleConfigFilePath); err != nil { + if _, err := os.Stat(configFilePath); err != nil { log.Fatalf("❌ Config file not accessible: %v", err) } - if _, err := deploy.GetScaleConfigFromYAML(scaleConfigFilePath); err != nil { + if _, err := deploy.GetConfigFromYAML(configFilePath); err != nil { log.Fatalf("❌ Config load failed: %v", err) } - log.Printf("✅ Scale Configuration loaded successfully from %s", filepath.Base(scaleConfigFilePath)) + log.Printf("✅ Configuration loaded successfully from %s", filepath.Base(configFilePath)) // Execute tests exitCode := m.Run() diff --git a/tests/scale_tests/scale_resource_exemptions.go b/tests/scale_tests/scale_resource_exemptions.go deleted file mode 100644 index b12f889f..00000000 --- a/tests/scale_tests/scale_resource_exemptions.go +++ /dev/null @@ -1,58 +0,0 @@ -package tests - -// ResourceExemptions contains lists of resources to ignore during Terraform operations -type ResourceExemptions struct { - Destroys []string // Resources to ignore during destroy operations - Updates []string // Resources to ignore during update operations -} - -// LSFIgnoreLists contains the standard resource exemptions for LSF cluster tests -var SCALEIgnoreLists = ResourceExemptions{ - Destroys: []string{ - // Null resources used for provisioning checks - "module.landing_zone_vsi.module.hpc.module.check_cluster_status.null_resource.remote_exec[0]", - "module.landing_zone_vsi.module.hpc.module.check_node_status.null_resource.remote_exec[0]", - "module.landing_zone_vsi.module.hpc.module.check_node_status.null_resource.remote_exec[1]", - "module.landing_zone_vsi.module.hpc.module.check_node_status.null_resource.remote_exec[2]", - "module.check_node_status.null_resource.remote_exec[0]", - "module.check_node_status.null_resource.remote_exec[1]", - "module.check_node_status.null_resource.remote_exec[2]", - "module.check_cluster_status.null_resource.remote_exec[0]", - "module.scale.module.resource_provisioner.null_resource.tf_resource_provisioner[0]", - - // Boot waiting resources - "module.landing_zone_vsi.module.wait_management_vsi_booted.null_resource.remote_exec[0]", - "module.landing_zone_vsi.module.wait_management_candidate_vsi_booted.null_resource.remote_exec[0]", - "module.landing_zone_vsi[0].module.wait_management_vsi_booted.null_resource.remote_exec[0]", - "module.landing_zone_vsi[0].module.wait_management_candidate_vsi_booted.null_resource.remote_exec[0]", - "module.landing_zone_vsi[0].module.wait_management_candidate_vsi_booted.null_resource.remote_exec[1]", - "module.landing_zone_vsi[0].module.wait_worker_vsi_booted[0].null_resource.remote_exec[0]", - "module.landing_zone_vsi[0].module.wait_worker_vsi_booted[0].null_resource.remote_exec[1]", - - // Configuration resources - "module.landing_zone_vsi.module.do_management_vsi_configuration.null_resource.remote_exec_script_cp_files[0]", - "module.landing_zone_vsi.module.do_management_vsi_configuration.null_resource.remote_exec_script_cp_files[1]", - "module.landing_zone_vsi.module.do_management_vsi_configuration.null_resource.remote_exec_script_new_file[0]", - "module.landing_zone_vsi.module.do_management_candidate_vsi_configuration.null_resource.remote_exec_script_new_file[0]", - "module.landing_zone_vsi.module.do_management_candidate_vsi_configuration.null_resource.remote_exec_script_run[0]", - "module.landing_zone_vsi[0].module.do_management_vsi_configuration.null_resource.remote_exec_script_run[0]", - - // Other temporary resources - "module.lsf.module.resource_provisioner.null_resource.tf_resource_provisioner[0]", - "module.landing_zone_vsi[0].module.lsf_entitlement[0].null_resource.remote_exec[0]", - "module.landing_zone_vsi.module.hpc.module.landing_zone_vsi.module.wait_management_candidate_vsi_booted.null_resource.remote_exec[0]", - "module.landing_zone_vsi.module.hpc.module.landing_zone_vsi.module.wait_management_vsi_booted.null_resource.remote_exec[0]", - "module.lsf.module.prepare_tf_input.local_sensitive_file.prepare_tf_input[0]", - "module.compute_playbook[0].null_resource.run_playbook[0]", - }, - - Updates: []string{ - // File storage resources that can be updated without cluster impact - "module.file_storage.ibm_is_share.share[0]", - "module.file_storage.ibm_is_share.share[1]", - "module.file_storage.ibm_is_share.share[2]", - "module.file_storage.ibm_is_share.share[3]", - "module.file_storage.ibm_is_share.share[4]", - "module.lsf.module.prepare_tf_input.local_sensitive_file.prepare_tf_input[0]", - }, -} diff --git a/tests/scale_tests/scale_setup.go b/tests/scale_tests/scale_setup.go deleted file mode 100644 index 457cab29..00000000 --- a/tests/scale_tests/scale_setup.go +++ /dev/null @@ -1,266 +0,0 @@ -package tests - -import ( - "fmt" - "os" - "reflect" - "strings" - "sync" - "testing" - "time" - - "github.com/gruntwork-io/terratest/modules/terraform" - "github.com/stretchr/testify/require" - "github.com/terraform-ibm-modules/ibmcloud-terratest-wrapper/testhelper" - - utils "github.com/terraform-ibm-modules/terraform-ibm-hpc/utilities" -) - -// Constants for configuration -const ( - // Terraform solution directory - terraformDir = "solutions/scale" - - // Default scheduler - Solution = "scale" - - // Configuration files for Scale version - defaultConfigFile = "scale_config.yml" // Use latest as default - - // Log file suffixes - defaultLogFileSuffix = ".log" - defaultJSONLogFileSuffix = ".json" -) - -// EnvVars represents all environment variables required for the test -type EnvVars struct { - ScaleVersion string - IbmCustomerNumber string - Zones string `required:"true"` - RemoteAllowedIPs string `required:"true"` - ExistingResourceGroup string `required:"true"` - StorageType string `required:"true"` - SSHKeys string `required:"true"` - ScaleDeployerInstance string - ComputeGUIUsername string - ComputeGUIPassword string // pragma: allowlist secret - StorageGUIUsername string `required:"true"` - StorageGUIPassword string `required:"true"` // pragma: allowlist secret - ComputeInstances string - ClientInstances string - StorageInstances string - ScaleEncryptionEnabled string - ScaleEncryptionType string - ScaleObservabilityAtrackerEnable string - ScaleObservabilityAtrackerTargetType string - ScaleSCCWPEnable string - ScaleCSPMEnabled string - ScaleSCCWPServicePlan string - GKLMInstances string - ScaleEncryptionAdminPassword string // pragma: allowlist secret - ScaleFilesystemConfig string - ScaleFilesetsConfig string - ScaleDNSDomainNames string - ScaleEnableCOSIntegration string - ScaleEnableVPCFlowLogs string - AfmInstances string - ProtocolInstances string -} - -func GetEnvVars() (*EnvVars, error) { - vars := &EnvVars{ - ScaleVersion: os.Getenv("SCALE_VERSION"), - IbmCustomerNumber: os.Getenv("IBM_CUSTOMER_NUMBER"), - Zones: os.Getenv("ZONES"), - RemoteAllowedIPs: os.Getenv("REMOTE_ALLOWED_IPS"), - ExistingResourceGroup: os.Getenv("EXISTING_RESOURCE_GROUP"), - StorageType: os.Getenv("STORAGE_TYPE"), - SSHKeys: os.Getenv("SSH_KEYS"), - ScaleDeployerInstance: os.Getenv("SCALE_DEPLOYER_INSTANCE"), - ComputeGUIUsername: os.Getenv("COMPUTE_GUI_USERNAME"), - ComputeGUIPassword: os.Getenv("COMPUTE_GUI_PASSWORD"), - StorageGUIUsername: os.Getenv("STORAGE_GUI_USERNAME"), - StorageGUIPassword: os.Getenv("STORAGE_GUI_PASSWORD"), - ComputeInstances: os.Getenv("COMPUTE_INSTANCES"), - ClientInstances: os.Getenv("CLIENT_INSTANCES"), - StorageInstances: os.Getenv("STORAGE_INSTANCES"), - ScaleEncryptionEnabled: os.Getenv("SCALE_ENCRYPTION_ENABLED"), - ScaleEncryptionType: os.Getenv("SCALE_ENCRYPTION_TYPE"), - ScaleObservabilityAtrackerEnable: os.Getenv("SCALE_OBSERVABILITY_ATRACKER_ENABLE"), - ScaleObservabilityAtrackerTargetType: os.Getenv("SCALE_OBSERVABILITY_ATRACKER_TARGET_TYPE"), - ScaleSCCWPEnable: os.Getenv("SCALE_SCCWP_ENABLE"), - ScaleCSPMEnabled: os.Getenv("SCALE_CSPM_ENABLED"), - ScaleSCCWPServicePlan: os.Getenv("SCALE_SCCWP_SERVICE_PLAN"), - GKLMInstances: os.Getenv("GKLM_INSTANCES"), - ScaleEncryptionAdminPassword: os.Getenv("SCALE_ENCRYPTION_ADMIN_PASSWORD"), - ScaleFilesystemConfig: os.Getenv("SCALE_FILESYSTEM_CONFIG"), - ScaleFilesetsConfig: os.Getenv("SCALE_FILESETS_CONFIG"), - ScaleDNSDomainNames: os.Getenv("SCALE_DNS_DOMAIN_NAMES"), - ScaleEnableCOSIntegration: os.Getenv("SCALE_ENABLE_COS_INTEGRATION"), - ScaleEnableVPCFlowLogs: os.Getenv("SCALE_ENABLE_VPC_FLOW_LOGS"), - AfmInstances: os.Getenv("AFM_INSTANCES"), - ProtocolInstances: os.Getenv("PROTOCOL_INSTANCES"), - } - - // Validate required fields - v := reflect.ValueOf(vars).Elem() - t := v.Type() - for i := 0; i < v.NumField(); i++ { - field := t.Field(i) - if tag, ok := field.Tag.Lookup("required"); ok && tag == "true" { - fieldValue := v.Field(i).String() - if fieldValue == "" { - return nil, fmt.Errorf("missing required environment variable: %s", field.Name) - } - } - } - - return vars, nil -} - -var ( - // testLogger stores the logger instance for logging test messages. - testLogger *utils.AggregatedLogger - - // once ensures that the test suite initialization logic (e.g., logger setup) runs only once, - // even when called concurrently by multiple test functions. - once sync.Once -) - -func setupTestSuite(t *testing.T) { - once.Do(func() { - timestamp := time.Now().Format("2006-01-02_15-04-05") - var logFileName string - - if validationLogFilePrefix, ok := os.LookupEnv("LOG_FILE_NAME"); ok { - fileName := strings.Split(validationLogFilePrefix, defaultJSONLogFileSuffix)[0] - logFileName = fmt.Sprintf("%s%s", fileName, defaultLogFileSuffix) - } else { - logFileName = fmt.Sprintf("%s%s", timestamp, defaultLogFileSuffix) - } - - _ = os.Setenv("LOG_FILE_NAME", fmt.Sprintf("%s%s", strings.Split(logFileName, ".")[0], defaultJSONLogFileSuffix)) - - var err error - testLogger, err = utils.NewAggregatedLogger(logFileName) - if err != nil { - t.Fatalf("Error initializing logger: %v", err) - } - testLogger.Info(t, "Logger initialized successfully") - }) -} - -var upgradeOnce sync.Once - -func UpgradeTerraformOnce(t *testing.T, terraformOptions *terraform.Options) { - upgradeOnce.Do(func() { - testLogger.Info(t, "Running Terraform upgrade with `-upgrade=true`...") - - output, err := terraform.RunTerraformCommandE(t, terraformOptions, "init", "-upgrade=true") - if err != nil { - testLogger.FAIL(t, fmt.Sprintf("Terraform upgrade failed: %v", err)) - testLogger.FAIL(t, fmt.Sprintf("Terraform upgrade output:\n%s", output)) - require.NoError(t, err, "Terraform upgrade failed") - } - testLogger.PASS(t, "Terraform upgrade completed successfully") - }) -} - -func checkRequiredEnvVars() error { - required := []string{"TF_VAR_ibmcloud_api_key", "ZONES", "REMOTE_ALLOWED_IPS", "SSH_KEYS"} - - for _, envVar := range required { - if os.Getenv(envVar) == "" { - return fmt.Errorf("environment variable %s is not set", envVar) - } - } - return nil -} - -func setupOptions(t *testing.T, clusterNamePrefix, terraformDir, existingResourceGroup string) (*testhelper.TestOptions, error) { - if err := checkRequiredEnvVars(); err != nil { - return nil, err - } - - envVars, err := GetEnvVars() - if err != nil { - return nil, fmt.Errorf("failed to get environment variables: %v", err) - } - - terraformVars := map[string]interface{}{ - "cluster_prefix": clusterNamePrefix, - "ibm_customer_number": envVars.IbmCustomerNumber, - "ssh_keys": utils.SplitAndTrim(envVars.SSHKeys, ","), - "zones": utils.SplitAndTrim(envVars.Zones, ","), - "remote_allowed_ips": utils.SplitAndTrim(envVars.RemoteAllowedIPs, ","), - "existing_resource_group": existingResourceGroup, - "storage_type": envVars.StorageType, - "deployer_instance": envVars.ScaleDeployerInstance, - "storage_gui_username": envVars.StorageGUIUsername, - "storage_gui_password": envVars.StorageGUIPassword, // # pragma: allowlist secret - "storage_instances": envVars.StorageInstances, - "enable_cos_integration": false, - "enable_vpc_flow_logs": false, - "observability_atracker_enable": false, - "colocate_protocol_instances": false, - "protocol_instances": envVars.ProtocolInstances, - } - - options := &testhelper.TestOptions{ - Testing: t, - TerraformDir: terraformDir, - IgnoreDestroys: testhelper.Exemptions{List: SCALEIgnoreLists.Destroys}, - IgnoreUpdates: testhelper.Exemptions{List: SCALEIgnoreLists.Updates}, - TerraformVars: terraformVars, - } - - // Remove empty values from TerraformVars - for key, value := range options.TerraformVars { - if value == "" { - delete(options.TerraformVars, key) - } - } - - return options, nil -} - -func GetScaleVersionConfig() (string, error) { - if defaultConfigFile == "" { - return "", fmt.Errorf("default config file path is empty") - } - return defaultConfigFile, nil -} - -// DefaultTest runs the default test using the provided Terraform directory and existing resource group. -// It provisions a cluster, waits for it to be ready, and then validates it. -func DefaultTest(t *testing.T) { - setupTestSuite(t) - if testLogger == nil { - t.Fatal("Logger initialization failed") - } - testLogger.Info(t, fmt.Sprintf("Test %s starting execution", t.Name())) - - clusterNamePrefix := utils.GenerateTimestampedClusterPrefix(utils.GenerateRandomString()) - testLogger.Info(t, fmt.Sprintf("Generated cluster prefix: %s", clusterNamePrefix)) - - envVars, err := GetEnvVars() - if err != nil { - testLogger.Error(t, fmt.Sprintf("Environment config error: %v", err)) - } - require.NoError(t, err, "Environment configuration failed") - - options, err := setupOptions(t, clusterNamePrefix, terraformDir, envVars.ExistingResourceGroup) - if err != nil { - testLogger.Error(t, fmt.Sprintf("Test setup error: %v", err)) - } - require.NoError(t, err, "Test options initialization failed") - - output, err := options.RunTestConsistency() - if err != nil { - testLogger.FAIL(t, fmt.Sprintf("Provisioning failed: %v", err)) - } - require.NoError(t, err, "Cluster provisioning failed with output: %v", output) - require.NotNil(t, output, "Received nil output from provisioning") - - testLogger.PASS(t, fmt.Sprintf("Test %s completed successfully", t.Name())) -} diff --git a/tests/utilities/helpers.go b/tests/utilities/helpers.go index c63f5555..e0572011 100644 --- a/tests/utilities/helpers.go +++ b/tests/utilities/helpers.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "math/rand" - "os" "os/exec" "path/filepath" @@ -770,22 +769,3 @@ func GetBoolVar(vars map[string]interface{}, key string) (bool, error) { return boolVal, nil } - -// GeneratePassword generates a random string of length 8 using lowercase characters -func GeneratePassword() string { - // Define the character set containing lowercase letters - const charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*" - - b := make([]byte, 8) - - // Loop through each index of the byte slice - for i := range b { - // Generate a random index within the length of the character set - randomIndex := rand.Intn(len(charset)) - - b[i] = charset[randomIndex] - } - - // Convert the byte slice to a string and return it - return string(b) + "1*" -} diff --git a/tools/access-management/README.md b/tools/access-management/README.md deleted file mode 100644 index f5db94af..00000000 --- a/tools/access-management/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# IAM Permissions Assignment for LSF Deployment - -#### Before deploying an IBM Spectrum LSF cluster, specific IAM permissions must be assigned to either a user or an access group. The automation script enables this process. - -User has the flexibility to run the specific scripts to gain the required IAM permissions to perform the LSF deployment. The automation ensures that if the user has a certain permissions, then the script will omit them and add only the required permissions to perform the deployment. - -For example, for the App configuration service, the user requires Administrator and Manager permissions. If the user already has the Administrator permission, then the script will omit this and provide only Manager permission. - -### Benefits of the scripts: - -#### Interactive input collection - The script prompts for the IBMid (admin email), Account ID, and target (User or Access Group). - -#### Permission check - The script verifies that the admin has account-level Administrator rights which is required to assign policies. - -#### Assigns required permissions for LSF deployment - This script grants the appropriate permissions across IBM Cloud services that LSF depends upon (for example, VPC, COS, DNS services, KMS, Secrets Manager, and Sysdig Monitoring). - -#### Avoids duplicates - The script skips the assignment if a matching policy already exists. - -You can get the scripts by performing gitclone on the branch: - -``` -git clone -b main https://github.com/terraform-ibm-modules/terraform-ibm-hpc.git -``` - -1. Navigate to cd tools/access-management, you will get the permissions.sh file. - -2. Login to the IBM Cloud with your API key. Run the following command: - -``` -ibmcloud login --apikey -g -chmod +x permissions.sh -./permissions.sh -``` - -3. Enter the admin email or IBMid. - -4. Enter the Account ID. - -For the Account ID, login to the IBM Cloud account by using your unique credentials. Go to Manage > Account > Account settings. You will find the Account ID. - -5. You will be asked to assign the roles: - -``` -Access Group - Select this option, if you want to assign the access to the entire access group. -User - Select this option, if you want to assign the access to an individual user. -Select the required option. -``` - -6. Enter the target user email, if you select the option 2. - -7. User policy is successfully created. - -If the user skips to enter the ACCOUNT_ID, then script displays the error message: - -``` -:x: ACCOUNT_ID is required. -``` - -This script ensures the user or access group has all the required IAM permissions to successfully deploy an LSF environment. diff --git a/tools/access-management/permissions.sh b/tools/access-management/permissions.sh deleted file mode 100755 index 6c09bdcc..00000000 --- a/tools/access-management/permissions.sh +++ /dev/null @@ -1,338 +0,0 @@ -#!/usr/bin/env bash -set -e - -##################################### -# 1. Prompt for required inputs -##################################### -echo "🔧 IBM Cloud Permissions Assignment Script (Interactive Mode)" - -read -rp "Enter admin email (your IBMid): " ADMIN_EMAIL -if [ -z "$ADMIN_EMAIL" ]; then - echo "❌ ADMIN_EMAIL is required." - exit 1 -fi - -read -rp "Enter Account ID: " ACCOUNT_ID -if [ -z "$ACCOUNT_ID" ]; then - echo "❌ ACCOUNT_ID is required." - exit 1 -fi - -echo "Do you want to assign roles to an Access Group or a User?" -select target_type in "Access Group" "User"; do - case $target_type in - "Access Group") - read -rp "Enter Access Group Name: " ACCESS_GROUP - break - ;; - "User") - read -rp "Enter target User Email: " USER_EMAIL - break - ;; - *) - echo "❗ Invalid selection. Choose 1 or 2." - ;; - esac -done - -##################################### -# 2. Check IAM Administrator rights -##################################### -echo "🔍 Checking if $ADMIN_EMAIL can assign IAM permissions..." -has_permission=false - -check_policies() { - local policies="$1" - local scope="$2" - - # Check Administrator role for serviceType=service - local has_admin - has_admin=$(echo "$policies" | jq -e ' - .[] | - select(.roles? != null) | - select(any(.roles[]?.display_name; . == "Administrator")) | - select(any(.resources[].attributes[]?; .name == "accountId")) | - select(any(.resources[].attributes[]?; .name == "serviceType" and .value == "service")) - ' >/dev/null 2>&1 && echo "true" || echo "false") - - # Check role for serviceType=platform_service (Viewer, Editor, or Administrator) - local has_platform_role - has_platform_role=$(echo "$policies" | jq -e ' - .[] | - select(.roles? != null) | - select(any(.roles[]?.display_name; . == "Viewer" or . == "Editor" or . == "Administrator")) | - select(any(.resources[].attributes[]?; .name == "accountId")) | - select(any(.resources[].attributes[]?; .name == "serviceType" and .value == "platform_service")) - ' >/dev/null 2>&1 && echo "true" || echo "false") - - # Check role for IAM Identity service (Administrator) - local has_identity_role - has_identity_role=$(echo "$policies" | jq -e ' - .[] | - select(.roles? != null) | - select(any(.roles[]?.display_name; . == "Administrator")) | - select(any(.resources[].attributes[]?; .name == "accountId")) | - select(any(.resources[].attributes[]?; .name == "serviceName" and .value == "iam-identity")) - ' >/dev/null 2>&1 && echo "true" || echo "false") - - # Debug printing - if [ "$has_admin" = "true" ]; then - echo "✅ At $scope policy level: Has Administrator for All Identity and Access enabled service" - else - echo "❌ At $scope policy level: Missing Administrator for All Identity and Access enabled service" - fi - - if [ "$has_identity_role" = "true" ]; then - echo "✅ At $scope policy level: Has Administrator for IAM Identity services" - else - echo "❌ At $scope policy level: Missing Administrator for IAM Identity service" - fi - - if [ "$has_platform_role" = "true" ]; then - echo "✅ At $scope policy level: Has Viewer/Editor/Administrator for All Account Management services" - else - echo "❌ At $scope policy level: Missing Viewer/Editor/Administrator for All Account Management services" - fi - - [[ "$has_admin" == "true" && "$has_platform_role" == "true" && "$has_identity_role" == "true" ]] -} - -USER_POLICIES=$(ibmcloud iam user-policies "$ADMIN_EMAIL" --output json 2>/dev/null || echo "[]") -if echo "$USER_POLICIES" | jq empty 2>/dev/null; then - if check_policies "$USER_POLICIES" "User"; then - has_permission=true - fi -fi - -if [ "$has_permission" != true ]; then - ACCESS_GROUPS_FOR_ADMIN=$(ibmcloud iam access-groups -u "$ADMIN_EMAIL" --output json 2>/dev/null || echo "[]") - - ALL_GROUP_POLICIES="[]" - while IFS= read -r GROUP_NAME; do - GROUP_POLICIES=$(ibmcloud iam access-group-policies "$GROUP_NAME" --output json 2>/dev/null || echo "[]") - ALL_GROUP_POLICIES=$(echo "$ALL_GROUP_POLICIES $GROUP_POLICIES" | jq -s 'add') - done < <(echo "$ACCESS_GROUPS_FOR_ADMIN" | jq -r '.[].name // empty') - # echo $ALL_GROUP_POLICIES - if check_policies "$ALL_GROUP_POLICIES" "Access Group"; then - has_permission=true - fi -fi - -if [ "$has_permission" != true ]; then - echo "❌ $ADMIN_EMAIL lacks required Administrator rights (checked User & Access Group policies) — cannot assign permissions." - exit 1 -fi - -echo "✅ $ADMIN_EMAIL has Administrator rights (verified from User & Access Group policies) — proceeding with permission assignment." - -##################################### -# 3. Role assignment definitions -##################################### -PERMISSIONS_LIST="apprapp|Administrator|Manager -cloud-object-storage|Service Configuration Reader|Writer -dns-svcs|Editor|Manager -sysdig-monitor|Administrator|Manager -kms|Service Configuration Reader|Manager -secrets-manager|Administrator|Manager -sysdig-secure|Administrator| -is|Editor| -iam-identity|Administrator| -atracker|Administrator| -logs-router|Administrator| -metrics-router|Administrator|" - -FRIENDLY_NAMES="apprapp|App Configuration -cloud-object-storage|Cloud Object Storage -dns-svcs|DNS Services -sysdig-monitor|Cloud Monitoring -kms|Key Protect -secrets-manager|Secrets Manager -sysdig-secure|Security and Compliance Center Workload Protection -is|VPC Infrastructure Services -iam-identity|IAM Identity -atracker|Activity tracker event routing -logs-router|Cloud logs routing -metrics-router|Metrics routing" - -get_friendly_name() { - local service="$1" - echo "$FRIENDLY_NAMES" | while IFS='|' read -r svc fname; do - if [ "$svc" = "$service" ]; then - echo "$fname" - return - fi - done -} - -##################################### -# 4. Role normalization helper -##################################### -normalize_roles() { - echo "$1" | tr ',' '\n' | sed 's/^ *//;s/ *$//' | sort -u | paste -sd, - -} - -##################################### -# 5. Main logic: Assign roles -##################################### -if [ -n "$ACCESS_GROUP" ] && [ -z "$USER_EMAIL" ]; then - echo "🔐 Assigning roles to access group: $ACCESS_GROUP" - echo "$PERMISSIONS_LIST" | while IFS='|' read -r SERVICE_NAME PLATFORM_ROLE SERVICE_ROLE; do - [ -n "$SERVICE_ROLE" ] && ROLES="$PLATFORM_ROLE,$SERVICE_ROLE" || ROLES="$PLATFORM_ROLE" - fname=$(get_friendly_name "$SERVICE_NAME") - [ -n "$fname" ] && DISPLAY_NAME="$SERVICE_NAME ($fname)" || DISPLAY_NAME="$SERVICE_NAME" - - existing_policies=$(ibmcloud iam access-group-policies "$ACCESS_GROUP" --output json 2>/dev/null || echo "[]") - - POLICY_ID=$(echo "$existing_policies" | jq -r \ - --arg service "$SERVICE_NAME" ' - .[] | select(any(.resources[].attributes[]?; - .name == "serviceName" and .value == $service)) | - .id' | head -n1) - - if [ -n "$POLICY_ID" ] && [ "$POLICY_ID" != "null" ]; then - EXISTING_ROLES=$(echo "$existing_policies" | jq -r --arg id "$POLICY_ID" ' - .[] | select(.id == $id) | [.roles[].display_name] | join(",")') - - EXISTING_SORTED=$(normalize_roles "$EXISTING_ROLES") - MERGED_SORTED=$(normalize_roles "$EXISTING_ROLES,$ROLES") - - if [ "$MERGED_SORTED" = "$EXISTING_SORTED" ]; then - echo "✅ Policy for $DISPLAY_NAME already includes required roles: $EXISTING_SORTED" - else - NEW_ROLES=$(comm -13 \ - <(echo "$EXISTING_SORTED" | tr ',' '\n' | sort) \ - <(echo "$MERGED_SORTED" | tr ',' '\n' | sort) | paste -sd, -) - - echo "🔄 Updating existing policy $POLICY_ID for $DISPLAY_NAME" - echo " • Current roles : $EXISTING_SORTED" - echo " • Adding roles : $NEW_ROLES" - - ibmcloud iam access-group-policy-update "$ACCESS_GROUP" "$POLICY_ID" \ - --roles "$MERGED_SORTED" \ - --service-name "$SERVICE_NAME" || echo "⚠️ Failed to update roles for $DISPLAY_NAME" - fi - else - echo "➕ Creating new policy for $DISPLAY_NAME" - ibmcloud iam access-group-policy-create "$ACCESS_GROUP" \ - --roles "$ROLES" \ - --service-name "$SERVICE_NAME" || echo "⚠️ Failed to assign $ROLES for $DISPLAY_NAME" - fi - done - - echo "🔍 Checking global Administrator/Manager policy for access group: $ACCESS_GROUP" - existing_policies=$(ibmcloud iam access-group-policies "$ACCESS_GROUP" --output json 2>/dev/null || echo "[]") - POLICY_ID=$(echo "$existing_policies" | jq -r ' - .[] | - select(any(.resources[].attributes[]?; .name == "serviceType" and .value == "service")) | - .id' | head -n1) - - if [ -n "$POLICY_ID" ] && [ "$POLICY_ID" != "null" ]; then - EXISTING_ROLES=$(echo "$existing_policies" | jq -r --arg id "$POLICY_ID" ' - .[] | select(.id == $id) | [.roles[].display_name] | join(",")') - - EXISTING_SORTED=$(normalize_roles "$EXISTING_ROLES") - MERGED_SORTED=$(normalize_roles "$EXISTING_ROLES,Administrator,Manager") - - if [ "$MERGED_SORTED" = "$EXISTING_SORTED" ]; then - echo "✅ Global Administrator/Manager policy already present with required roles for access group: $ACCESS_GROUP" - else - NEW_ROLES=$(comm -13 \ - <(echo "$EXISTING_SORTED" | tr ',' '\n' | sort) \ - <(echo "$MERGED_SORTED" | tr ',' '\n' | sort) | paste -sd, -) - - echo "🔄 Updating global policy $POLICY_ID for access group: $ACCESS_GROUP" - echo " • Current roles : $EXISTING_SORTED" - echo " • Adding roles : $NEW_ROLES" - - ibmcloud iam access-group-policy-update "$ACCESS_GROUP" "$POLICY_ID" \ - --roles "$MERGED_SORTED" || echo "⚠️ Failed to update Administrator,Manager roles for All Identity and Access enabled services to access group: $ACCESS_GROUP" - fi - else - echo "➕ Creating new global Administrator/Manager policy for access group: $ACCESS_GROUP" - ibmcloud iam access-group-policy-create "$ACCESS_GROUP" \ - --roles "Administrator,Manager" || echo "⚠️ Failed to assign Administrator,Manager roles for All Identity and Access enabled services to access group: $ACCESS_GROUP" - fi - -elif [ -z "$ACCESS_GROUP" ] && [ -n "$USER_EMAIL" ]; then - echo "👤 Assigning roles to user: $USER_EMAIL" - echo "$PERMISSIONS_LIST" | while IFS='|' read -r SERVICE_NAME PLATFORM_ROLE SERVICE_ROLE; do - [ -n "$SERVICE_ROLE" ] && ROLES="$PLATFORM_ROLE,$SERVICE_ROLE" || ROLES="$PLATFORM_ROLE" - fname=$(get_friendly_name "$SERVICE_NAME") - [ -n "$fname" ] && DISPLAY_NAME="$SERVICE_NAME ($fname)" || DISPLAY_NAME="$SERVICE_NAME" - - existing_policies=$(ibmcloud iam user-policies "$USER_EMAIL" --output json 2>/dev/null || echo "[]") - - POLICY_ID=$(echo "$existing_policies" | jq -r \ - --arg service "$SERVICE_NAME" ' - .[] - | select(any(.resources[].attributes[]?; - .name == "serviceName" and .value == $service)) - | .id' | head -n1) - - if [ -n "$POLICY_ID" ] && [ "$POLICY_ID" != "null" ]; then - EXISTING_ROLES=$(echo "$existing_policies" | jq -r --arg id "$POLICY_ID" ' - .[] | select(.id == $id) | [.roles[].display_name] | join(",")') - - EXISTING_SORTED=$(normalize_roles "$EXISTING_ROLES") - MERGED_SORTED=$(normalize_roles "$EXISTING_ROLES,$ROLES") - - if [ "$MERGED_SORTED" = "$EXISTING_SORTED" ]; then - echo "✅ Policy for $DISPLAY_NAME already includes required roles: $EXISTING_SORTED" - else - NEW_ROLES=$(comm -13 \ - <(echo "$EXISTING_SORTED" | tr ',' '\n' | sort) \ - <(echo "$MERGED_SORTED" | tr ',' '\n' | sort) | paste -sd, -) - - echo "🔄 Updating existing policy $POLICY_ID for $DISPLAY_NAME" - echo " • Current roles : $EXISTING_SORTED" - echo " • Adding roles : $NEW_ROLES" - - ibmcloud iam user-policy-update "$USER_EMAIL" "$POLICY_ID" \ - --roles "$MERGED_SORTED" \ - --service-name "$SERVICE_NAME" || echo "⚠️ Failed to update roles for $DISPLAY_NAME" - fi - else - echo "➕ Creating new policy for $DISPLAY_NAME" - ibmcloud iam user-policy-create "$USER_EMAIL" \ - --roles "$ROLES" \ - --service-name "$SERVICE_NAME" || echo "⚠️ Failed to assign $ROLES for $DISPLAY_NAME" - fi - done - - echo "🔍 Checking global Administrator/Manager policy for $USER_EMAIL" - existing_policies=$(ibmcloud iam user-policies "$USER_EMAIL" --output json 2>/dev/null || echo "[]") - POLICY_ID=$(echo "$existing_policies" | jq -r ' - .[] | - select(any(.resources[].attributes[]?; .name == "serviceType" and .value == "service")) | - .id' | head -n1) - if [ -n "$POLICY_ID" ] && [ "$POLICY_ID" != "null" ]; then - EXISTING_ROLES=$(echo "$existing_policies" | jq -r --arg id "$POLICY_ID" ' - .[] | select(.id == $id) | [.roles[].display_name] | join(",")') - - EXISTING_SORTED=$(normalize_roles "$EXISTING_ROLES") - MERGED_SORTED=$(normalize_roles "$EXISTING_ROLES,Administrator,Manager") - - if [ "$MERGED_SORTED" = "$EXISTING_SORTED" ]; then - echo "✅ Global Administrator/Manager policy already present with required roles for $USER_EMAIL" - else - NEW_ROLES=$(comm -13 \ - <(echo "$EXISTING_SORTED" | tr ',' '\n' | sort) \ - <(echo "$MERGED_SORTED" | tr ',' '\n' | sort) | paste -sd, -) - - echo "🔄 Updating global policy $POLICY_ID for $USER_EMAIL" - echo " • Current roles : $EXISTING_SORTED" - echo " • Adding roles : $NEW_ROLES" - - ibmcloud iam user-policy-update "$USER_EMAIL" "$POLICY_ID" \ - --roles "$MERGED_SORTED" || echo "⚠️ Failed to update Administrator,Manager roles for All Identity and Access enabled services to user: $USER_EMAIL" - fi - else - echo "➕ Creating new global Administrator/Manager policy for $USER_EMAIL" - ibmcloud iam user-policy-create "$USER_EMAIL" \ - --roles "Administrator,Manager" || echo "⚠️ Failed to assign Administrator,Manager roles for All Identity and Access enabled services to user: $USER_EMAIL" - fi - -else - echo "❗ Please choose either Access Group or User." - exit 1 -fi diff --git a/tools/image-builder/packer/hpcaas/compute/script.sh b/tools/image-builder/packer/hpcaas/compute/script.sh index dbf6fc79..c85dcc3d 100644 --- a/tools/image-builder/packer/hpcaas/compute/script.sh +++ b/tools/image-builder/packer/hpcaas/compute/script.sh @@ -83,26 +83,57 @@ curl -fsSL https://clis.cloud.ibm.com/install/linux | sh pip3 install ibm-vpc==0.10.0 pip3 install ibm-cloud-networking-services ibm-cloud-sdk-core selinux ibmcloud plugin install vpc-infrastructure DNS +echo 'LS_Standard 10.1 () () () () 18b1928f13939bd17bf25e09a2dd8459f238028f' > ${LSF_PACKAGES_PATH}/ls.entitlement +echo 'LSF_Standard 10.1 () () () pa 3f08e215230ffe4608213630cd5ef1d8c9b4dfea' > ${LSF_PACKAGES_PATH}/lsf.entitlement echo "======================Installation of IBMCloud Plugins completed=====================" -hostnamectl -hostnamectl set-hostname lsfservers +hostname lsfservers # Installation of LSF base packages on compute node cd "${LSF_PACKAGES_PATH}" || exit -yum install -y --nogpgcheck "${LSF_PACKAGES_PATH}"/lsf-conf-10.1.0.15-25050119.noarch.rpm -yum install -y --nogpgcheck "${LSF_PACKAGES_PATH}"/lsf-man-pages-10.1.0.15-25050119.noarch.rpm -yum install -y --nogpgcheck "${LSF_PACKAGES_PATH}"/lsf-client-10.1.0.15-25050119.x86_64.rpm -yum install -y --nogpgcheck "${LSF_PACKAGES_PATH}"/lsf-server-10.1.0.15-25050119.x86_64.rpm -yum install -y --nogpgcheck "${LSF_PACKAGES_PATH}"/lsf-integrations-10.1.0.15-25050118.x86_64.rpm -yum install -y --nogpgcheck "${LSF_PACKAGES_PATH}"/lsf-ego-server-10.1.0.15-25050118.x86_64.rpm -yum install -y --nogpgcheck "${LSF_PACKAGES_PATH}"/lsf-devel-10.1.0.15-25050119.x86_64.rpm -yum install -y --nogpgcheck "${LSF_PACKAGES_PATH}"/lsf-data-mgr-10.1.0.15-25050119.x86_64.rpm -yum install -y --nogpgcheck "${LSF_PACKAGES_PATH}"/lsf-ls-client-10.1.0.15-25050119.x86_64.rpm -yum install -y --nogpgcheck "${LSF_PACKAGES_PATH}"/ibm-jre-1.8.0-25041010.x86_64.rpm -yum install -y --nogpgcheck "${LSF_PACKAGES_PATH}"/lsf-pm-client-10.2.0.15-25050118.x86_64.rpm +zcat lsf*lsfinstall_linux_x86_64.tar.Z | tar xvf - +cd lsf*_lsfinstall || exit +sed -e '/show_copyright/ s/^#*/#/' -i lsfinstall +cat <> install.config +LSF_TOP="/opt/ibm/lsf" +LSF_ADMINS="lsfadmin" +LSF_CLUSTER_NAME="HPCCluster" +LSF_MASTER_LIST="lsfservers" +LSF_ENTITLEMENT_FILE="${LSF_PACKAGES_PATH}/lsf.entitlement" +CONFIGURATION_TEMPLATE="DEFAULT" +ENABLE_DYNAMIC_HOSTS="Y" +ENABLE_EGO="N" +ACCEPT_LICENSE="Y" +SILENT_INSTALL="Y" +LSF_SILENT_INSTALL_TARLIST="ALL" +EOT +bash lsfinstall -f install.config +echo $? +cat Install.log echo "========================LSF 10.1 installation completed=====================" + +hostname lsfservers +# Installation of Resource connector configuration on compute nodes +cd "${LSF_PACKAGES_PATH}" || exit +cd lsf*_lsfinstall || exit +cat <> server.config +LSF_TOP="/opt/ibm/lsf_worker" +LSF_ADMINS="lsfadmin" +LSF_ENTITLEMENT_FILE="${LSF_PACKAGES_PATH}/lsf.entitlement" +LSF_SERVER_HOSTS="lsfservers" +LSF_LOCAL_RESOURCES="[resource cloudhpchost]" +ACCEPT_LICENSE="Y" +SILENT_INSTALL="Y" +EOT +bash lsfinstall -s -f server.config +echo $? +cat Install.log +rm -rf /opt/ibm/lsf_worker/10.1 +ln -s /opt/ibm/lsf/10.1 /opt/ibm/lsf_worker +echo "==================LSF 10.1 Resource connector installation completed===============" + + # Installation Of OpenMPI cd "${LSF_PACKAGES_PATH}" || exit wget https://download.open-mpi.org/release/open-mpi/v4.1/openmpi-4.1.0.tar.gz @@ -163,18 +194,6 @@ else echo "INSTALL_SYDIG is set as false and the sysdig agent is not installed on compute node image" fi -#Cloud Log Agent Installation -echo "Cloud logs agent installation started" -pwd -wget https://logs-router-agent-install-packages.s3.us.cloud-object-storage.appdomain.cloud/logs-router-agent-rhel8-1.3.1.rpm.sha256 -wget https://logs-router-agent-install-packages.s3.us.cloud-object-storage.appdomain.cloud/logs-router-agent-rhel8-1.3.1.rpm -sha256sum -c logs-router-agent-rhel8-1.3.1.rpm.sha256 -rpm -ivh logs-router-agent-rhel8-1.3.1.rpm -rpm -qa | grep logs-router-agent -wget -O /root/post-config.sh https://logs-router-agent-config.s3.us.cloud-object-storage.appdomain.cloud/post-config.sh -ls -a /root -echo "Cloud logs agent installated" - # Security approach to delete unwanted ssh keys and host file entries rm -rf "${LSF_PACKAGES_PATH}" if grep -q 'ID="rhel"' /etc/os-release || grep -q 'ID="rocky"' /etc/os-release; then diff --git a/tools/image-builder/template_files.tf b/tools/image-builder/template_files.tf index 7c950a12..c112055c 100644 --- a/tools/image-builder/template_files.tf +++ b/tools/image-builder/template_files.tf @@ -14,6 +14,7 @@ data "template_file" "packer_user_data" { target_dir = "/var" prefix = var.prefix cluster_name = var.cluster_name + reservation_id = var.reservation_id catalog_validate_ssh_key = var.ssh_keys[0] zones = join(",", var.zones) existing_resource_group = var.existing_resource_group diff --git a/tools/image-builder/templates/packer_user_data.tpl b/tools/image-builder/templates/packer_user_data.tpl index e0b4fc64..cd0e8bd0 100644 --- a/tools/image-builder/templates/packer_user_data.tpl +++ b/tools/image-builder/templates/packer_user_data.tpl @@ -125,3 +125,16 @@ if [ ! -d "$(pwd)/go" ]; then echo "export GOROOT=$(pwd)/go" >> ~/.bashrc source ~/.bashrc fi + +echo "========== Executing Go function to validate the image through HPC deployment =========" +export TF_VAR_ibmcloud_api_key=${ibm_api_key} + +if [ "${private_catalog_id}" ]; then + PREFIX=${prefix} CLUSTER_NAME=${cluster_name} RESERVATION_ID=${reservation_id} SSH_FILE_PATH="/HPCaaS/artifacts/.ssh/id_rsa" REMOTE_ALLOWED_IPS=$PACKER_FIP SSH_KEYS=$CICD_SSH_KEY CATALOG_VALIDATE_SSH_KEY=${catalog_validate_ssh_key} ZONES=${zones} EXISTING_RESOURCE_GROUP=${existing_resource_group} COMPUTE_IMAGE_NAME=${image_name} PRIVATE_CATALOG_ID=${private_catalog_id} VPC_ID=${vpc_id} SUBNET_ID=${vpc_subnet_id} SOURCE_IMAGE_NAME=${source_image_name} go test -v -timeout 900m -parallel 4 -run "TestRunHpcDeploymentForCustomImageBuilder" | tee hpc_log_$(date +%d-%m-%Y-%H-%M-%S).log +else + PREFIX=${prefix} CLUSTER_NAME=${cluster_name} RESERVATION_ID=${reservation_id} SSH_FILE_PATH="/HPCaaS/artifacts/.ssh/id_rsa" REMOTE_ALLOWED_IPS=$PACKER_FIP SSH_KEYS=$CICD_SSH_KEY ZONES=${zones} EXISTING_RESOURCE_GROUP=${existing_resource_group} COMPUTE_IMAGE_NAME=${image_name} SOURCE_IMAGE_NAME=${source_image_name} go test -v -timeout 900m -parallel 4 -run "TestRunHpcDeploymentForCustomImageBuilder" | tee hpc_log_$(date +%d-%m-%Y-%H-%M-%S).log +fi + +echo "========== Deleting the SSH key =========" + +ibmcloud is key-delete $CICD_SSH_KEY -f diff --git a/tools/image-builder/variables.tf b/tools/image-builder/variables.tf index d8d14505..e044ede0 100644 --- a/tools/image-builder/variables.tf +++ b/tools/image-builder/variables.tf @@ -21,7 +21,7 @@ variable "existing_resource_group" { type = string default = "Default" validation { - condition = var.existing_resource_group != null + condition = var.resource_group != null error_message = "If you want to provide null for resource_group variable, it should be within double quotes." } } @@ -197,6 +197,17 @@ variable "cluster_name" { } } +# tflint-ignore: terraform_unused_declarations +variable "reservation_id" { + type = string + sensitive = true + description = "Ensure that you have received the reservation ID from IBM technical sales. Reservation ID is a unique identifier to distinguish different IBM Cloud HPC service agreements. It must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_)." + validation { + condition = can(regex("^[a-zA-Z][a-zA-Z0-9-_]*$", var.reservation_id)) + error_message = "Reservation ID must start with a letter and can only contain letters, numbers, hyphens (-), or underscores (_)." + } +} + # tflint-ignore: terraform_unused_declarations variable "private_catalog_id" { type = string diff --git a/tools/image-builder/version.tf b/tools/image-builder/version.tf index cfef57e6..0fa51187 100644 --- a/tools/image-builder/version.tf +++ b/tools/image-builder/version.tf @@ -3,7 +3,7 @@ terraform { required_providers { ibm = { source = "IBM-Cloud/ibm" - version = ">= 1.77.0, < 2.0.0" + version = "1.69.2" } null = { source = "hashicorp/null" diff --git a/tools/minimal-demo-prod-scripts/README.md b/tools/minimal-demo-prod-scripts/README.md deleted file mode 100644 index 830125de..00000000 --- a/tools/minimal-demo-prod-scripts/README.md +++ /dev/null @@ -1,146 +0,0 @@ -# Deploying and Connecting to LSF Environment via CLI - -The current LSF setup is designed for production grade deployments. This approach is high-priced for trying before-you-buy option and demonstration use cases. As a solution, now users can select the deployment options using three different t-shirt sizes - Small, Medium, and Large. This solution has the ability to deploy a smaller and less expensive environment on IBM Cloud to try the capability or to provide a demonstration. - -## Deployment Types: - -You will be able to choose from these 3 deployment size options: - -### Small (Minimal): -This deploys the smallest possible environment (a single management instance) for the fastest setup. All optional services like observability, logging, SCC, Atracker, and LDAP are disabled. - -### Medium (Demo): -This displays the full set of capabilities. All optional services like observability, logging, and SCC are enabled. The deployment takes longer compared to minimal. - -### Large (Production): -This option allows customization for production grade deployments. The optional services like observability, logging, and SCC are enabled by default but can be changed as required. - -All the JSON files are customizable (users can make configuration changes as needed). - -#### Note: The .env file is mandatory because it contains all the variables required to update the file regardless of deployment types. - -### Step 1: Create the .env file - -The following inputs are required to update the .env file. - -``` -# IBM Cloud API key -API_KEY="YOUR_API_KEY" - -# Account and resource details -ACCOUNT_GUID="ACCOUNT_GUID" -ZONES="ZONES" -RESOURCE_GROUP="RESOURCE_GROUP" - -# SSH key name -SSH_KEY="SSH_KEY" - -# Template JSON file (choose as per your deployment type) -TEMPLATE_FILE="catalog_values_minimal_deployment.json" - -# LSF tile version locator -LSF_TILE_VERSION="1082e7d2-5e2f-0a11-a3bc-f88a8e1931fc.92fba4af-b0dd-4b22-9415-9a5465ee9795-global" - -# App Center GUI password -# Rules: Minimum 15 characters, at least 1 uppercase, 1 lowercase, 1 number, -# and 1 special character (!@#$%^&*()_+=-). No spaces allowed. -APP_CENTER_GUI_PASSWORD="APP_CENTER_GUI_PASSWORD" -``` - -From the above snippet, below are the descriptions for the parameters: - -API_KEY - This key is used to authenticate your deployment and grant the necessary access to create and manage resources in your IBM Cloud environment. - -ACCOUNT_GUID - Login to the IBM Cloud account by using your unique credentials. Go to Manage > Account > Account settings. You will find the Account ID. - -ZONES - Provide the IBM Cloud zone. - -RESOURCE_GROUP - The existing resource group of your IBM Cloud account where VPC resources will be deployed. - -SSH_KEY - A list of SSH key names that are already configured in your IBM Cloud account to establish a connection to the Spectrum LSF nodes. - -TEMPLATE_FILE - All the .json files are uploaded in https://github.ibm.com/workload-eng-services/HPCaaS/tree/sml/tools/minimal-demo-prod-scripts. - -catalog_values_minimal_deployment.json - choose this file for small deployments. -catalog_values_demo_deployment.json - choose this file for medium deployments. -catalog_values_production_deployment.json - choose this file for large deployments. - -LSF_TILE_VERSION - Login to the IBM Cloud catalog by using your unique credentials. Click Review deployment options. In the Deployment options section, select Create from the CLI, copy the version_locator_value, and save this value. -Note: The version_locator_value changes are based on the tile version selected. - -APP_CENTER_GUI_PASSWORD - This is the password that is required to access the IBM Spectrum LSF Application Center (App Center) GUI, which is enabled by default in both Fix Pack 15 and Fix Pack 14 with HTTPS. This is a mandatory value and omitting it will result in deployment failure. - -### Step 2: Deploy the LSF environment - -You can get the scripts by performing gitclone on the branch: - -``` -git clone -b main https://github.com/terraform-ibm-modules/terraform-ibm-hpc.git -``` - -1. Navigate to minimal-demo-prod-scripts to get the all the required files. - -2. Run the chmod +x *.sh, gives permissions to all the files. - -``` -chmod +x create_lsf_environment.sh -./create_lsf_environment.sh -``` - -create_lsf_environment - This script automates the end-to-end deployment of an IBM Cloud LSF environment. It installs required plugins, generates configuration files from your .env, triggers the Schematics workspace deployment, and finally the prints access details (bastion, login, management IPs) with next steps for connecting and submitting jobs. - -### Step 3: Connect to the LSF cluster and run the jobs - -Now that your environment is set up, you can connect to the LSF cluster and perform operations such as submitting jobs, monitoring workloads, viewing infrastructure details. - -### Using Utility Scripts - -#### 1. Run the following command to view the infra details: - -``` -chmod +x show.sh -./show.sh -``` - -show.sh - This script retrieves details of the Schematics workspace for a given LSF cluster prefix. It ensures you are logged into the correct account and region, locates the workspace, and then displays its full configuration and state. - -#### 2. Copy the job submission script to the cluster by using the command: - -``` -chmod +x cp.sh -./cp.sh submit.sh -``` - -cp.sh - This script copies the submit.sh file into your LSF cluster. It validates account and region, fetches the bastion, login, and management IPs, and then securely transfers the submit.sh file either to the login node (default) or the management node (if management is specified). - -submit.sh - This script demonstrates how to submit a sample job to the LSF scheduler. It provides a simple command (sleep 30) wrapped in an LSF job submission request (bsub). By default, it requests 8 CPU cores for the job. Users can update: - -Job options (for example, -n 8 to change the number of requested cores). - -Command (for example, replace sleep 30 with their own workload). - -This serves as a template for testing job submission and can be adapted for real workloads. - -#### 3. Run the following command to jump to the LSF environment: - -``` -chmod +x jump.sh -./jump.sh -``` - -jump.sh - This script connects you directly to the LSF login node. It ensures you are targeting the right IBM Cloud account/region, fetches the bastion, login, and management IPs, and then uses SSH (with bastion as a jump host) to securely log into the LSF login node. - -#### 4. Run the following commands to submit the jobs: - -``` -sh submit.sh -bjobs -lshosts -w -``` - -#### 5. Run the following command to destroy the created infrastructure - -``` -chmod +x destroy.sh -./destroy.sh -``` diff --git a/tools/minimal-demo-prod-scripts/catalog_values_demo_deployment.json b/tools/minimal-demo-prod-scripts/catalog_values_demo_deployment.json deleted file mode 100644 index b76993c4..00000000 --- a/tools/minimal-demo-prod-scripts/catalog_values_demo_deployment.json +++ /dev/null @@ -1,69 +0,0 @@ -{ - "ibmcloud_api_key": "XX_API_KEY_XX", - "existing_resource_group": "XX_RESOURCE_GROUP_XX", - "zones": "[\"XX_ZONES_XX\"]", - "ssh_keys": "[\"XX_SSH_KEY_XX\"]", - "cluster_prefix": "XX_PREFIX_XX", - "remote_allowed_ips": "[\"XX_REMOTE_IP_XX\"]", - "app_center_gui_password": "XX_APP_CENTER_GUI_PASSWORD_XX", - - "enable_hyperthreading": true, - "vpn_enabled": false, - "TF_VERSION": "1.9", - "TF_PARALLELISM": "250", - "key_management": "key_protect", - "kms_instance_name": "__NULL__", - "kms_key_name": "__NULL__", - "enable_vpc_flow_logs": true, - "sccwp_enable": false, - "app_config_plan": "basic", - "sccwp_service_plan": "graduated-tier", - "cspm_enabled": "true", - "observability_atracker_enable": "true", - "observability_atracker_target_type": "cloudlogs", - "observability_monitoring_enable": "true", - "observability_logs_enable_for_management": "true", - "observability_logs_enable_for_compute": "true", - "observability_enable_platform_logs": "true", - "observability_enable_metrics_routing": "true", - "observability_logs_retention_period": "7", - "observability_monitoring_on_compute_nodes_enable": "true", - "observability_monitoring_plan": "graduated-tier", - "lsf_version": "fixpack_15", - "vpc_name": "__NULL__", - "compute_subnet_id": "__NULL__", - "login_subnet_id": "__NULL__", - "vpc_cidr": "10.241.0.0/18", - "vpc_cluster_private_subnets_cidr_blocks": "10.241.0.0/20", - "vpc_cluster_login_private_subnets_cidr_blocks": "10.241.16.0/28", - "dns_domain_name": "{compute = \"hpc.local\"}", - "dns_instance_id": "__NULL__", - "dns_custom_resolver_id": "__NULL__", - "bastion_instance": "{ image = \"ibm-ubuntu-22-04-5-minimal-amd64-3\", profile = \"cx2-4x8\" }", - "deployer_instance": "{ image = \"hpc-lsf-fp15-deployer-rhel810-v1\", profile = \"bx2-8x32\" }", - "login_instance": "[{ profile = \"bx2-2x8\", image = \"hpc-lsf-fp15-compute-rhel810-v1\" }]", - "management_instances": "[{ profile = \"bx2-16x64\", count = 2, image = \"hpc-lsf-fp15-rhel810-v1\" }]", - "static_compute_instances": "[{ profile = \"bx2-4x16\", count = 0, image = \"hpc-lsf-fp15-compute-rhel810-v1\" }]", - "dynamic_compute_instances": "[{ profile = \"bx2-4x16\", count = 500, image = \"hpc-lsf-fp15-compute-rhel810-v1\" }]", - "custom_file_shares": "[{mount_path = \"/mnt/vpcstorage/tools\", size = 100, iops = 2000 }, { mount_path = \"/mnt/vpcstorage/data\", size = 100, iops = 6000 }, { mount_path = \"/mnt/scale/tools\", nfs_share = \"\" }]", - "storage_security_group_id": "__NULL__", - "enable_ldap": "false", - "ldap_basedns": "hpc.local", - "ldap_server": "__NULL__", - "ldap_server_cert": "__NULL__", - "ldap_admin_password": "", - "ldap_user_name": "", - "ldap_user_password": "", - "ldap_instance": "[{ profile = \"cx2-2x4\", image = \"ibm-ubuntu-22-04-5-minimal-amd64-1\" }]", - "skip_iam_share_authorization_policy": "false", - "skip_flowlogs_s2s_auth_policy": "false", - "skip_kms_s2s_auth_policy": "false", - "skip_iam_block_storage_authorization_policy": "false", - "existing_bastion_instance_name": "__NULL__", - "existing_bastion_instance_public_ip": "__NULL__", - "existing_bastion_security_group_id": "__NULL__", - "existing_bastion_ssh_private_key": "__NULL__", - "enable_dedicated_host": "false", - "enable_cos_integration": "false", - "cos_instance_name": "__NULL__" -} diff --git a/tools/minimal-demo-prod-scripts/catalog_values_minimal_deployment.json b/tools/minimal-demo-prod-scripts/catalog_values_minimal_deployment.json deleted file mode 100644 index 682386ca..00000000 --- a/tools/minimal-demo-prod-scripts/catalog_values_minimal_deployment.json +++ /dev/null @@ -1,69 +0,0 @@ -{ - "ibmcloud_api_key": "XX_API_KEY_XX", - "existing_resource_group": "XX_RESOURCE_GROUP_XX", - "zones": "[\"XX_ZONES_XX\"]", - "ssh_keys": "[\"XX_SSH_KEY_XX\"]", - "cluster_prefix": "XX_PREFIX_XX", - "remote_allowed_ips": "[\"XX_REMOTE_IP_XX\"]", - "app_center_gui_password": "XX_APP_CENTER_GUI_PASSWORD_XX", - - "enable_hyperthreading": true, - "vpn_enabled": false, - "TF_VERSION": "1.9", - "TF_PARALLELISM": "250", - "key_management": "key_protect", - "kms_instance_name": "__NULL__", - "kms_key_name": "__NULL__", - "enable_vpc_flow_logs": false, - "sccwp_enable": false, - "app_config_plan": "basic", - "sccwp_service_plan": "graduated-tier", - "cspm_enabled": "true", - "observability_atracker_enable": "false", - "observability_atracker_target_type": "cloudlogs", - "observability_monitoring_enable": "false", - "observability_logs_enable_for_management": "false", - "observability_logs_enable_for_compute": "false", - "observability_enable_platform_logs": "false", - "observability_enable_metrics_routing": "false", - "observability_logs_retention_period": "7", - "observability_monitoring_on_compute_nodes_enable": "false", - "observability_monitoring_plan": "graduated-tier", - "lsf_version": "fixpack_15", - "vpc_name": "__NULL__", - "compute_subnet_id": "__NULL__", - "login_subnet_id": "__NULL__", - "vpc_cidr": "10.241.0.0/18", - "vpc_cluster_private_subnets_cidr_blocks": "10.241.0.0/20", - "vpc_cluster_login_private_subnets_cidr_blocks": "10.241.16.0/28", - "dns_domain_name": "{compute = \"hpc.local\"}", - "dns_instance_id": "__NULL__", - "dns_custom_resolver_id": "__NULL__", - "bastion_instance": "{ image = \"ibm-ubuntu-22-04-5-minimal-amd64-3\", profile = \"cx2-4x8\" }", - "deployer_instance": "{ image = \"hpc-lsf-fp15-deployer-rhel810-v1\", profile = \"bx2-8x32\" }", - "login_instance": "[{ profile = \"bx2-2x8\", image = \"hpc-lsf-fp15-compute-rhel810-v1\" }]", - "management_instances": "[{ profile = \"bx2-4x16\", count = 1, image = \"hpc-lsf-fp15-rhel810-v1\" }]", - "static_compute_instances": "[{ profile = \"bx2-4x16\", count = 0, image = \"hpc-lsf-fp15-compute-rhel810-v1\" }]", - "dynamic_compute_instances": "[{ profile = \"bx2-4x16\", count = 5, image = \"hpc-lsf-fp15-compute-rhel810-v1\" }]", - "custom_file_shares": "[{mount_path = \"/mnt/vpcstorage/tools\", size = 10, iops = 1000 }, { mount_path = \"/mnt/vpcstorage/data\", size = 10, iops = 1000 }, { mount_path = \"/mnt/scale/tools\", nfs_share = \"\" }]", - "storage_security_group_id": "__NULL__", - "enable_ldap": "false", - "ldap_basedns": "hpc.local", - "ldap_server": "__NULL__", - "ldap_server_cert": "__NULL__", - "ldap_admin_password": "", - "ldap_user_name": "", - "ldap_user_password": "", - "ldap_instance": "[{ profile = \"cx2-2x4\", image = \"ibm-ubuntu-22-04-5-minimal-amd64-1\" }]", - "skip_iam_share_authorization_policy": "false", - "skip_flowlogs_s2s_auth_policy": "false", - "skip_kms_s2s_auth_policy": "false", - "skip_iam_block_storage_authorization_policy": "false", - "existing_bastion_instance_name": "__NULL__", - "existing_bastion_instance_public_ip": "__NULL__", - "existing_bastion_security_group_id": "__NULL__", - "existing_bastion_ssh_private_key": "__NULL__", - "enable_dedicated_host": "false", - "enable_cos_integration": "false", - "cos_instance_name": "__NULL__" -} diff --git a/tools/minimal-demo-prod-scripts/catalog_values_production_deployment.json b/tools/minimal-demo-prod-scripts/catalog_values_production_deployment.json deleted file mode 100644 index 3b1d37ca..00000000 --- a/tools/minimal-demo-prod-scripts/catalog_values_production_deployment.json +++ /dev/null @@ -1,69 +0,0 @@ -{ - "ibmcloud_api_key": "XX_API_KEY_XX", - "existing_resource_group": "XX_RESOURCE_GROUP_XX", - "zones": "[\"XX_ZONES_XX\"]", - "ssh_keys": "[\"XX_SSH_KEY_XX\"]", - "cluster_prefix": "XX_PREFIX_XX", - "remote_allowed_ips": "[\"XX_REMOTE_IP_XX\"]", - "app_center_gui_password": "XX_APP_CENTER_GUI_PASSWORD_XX", - - "enable_hyperthreading": true, - "vpn_enabled": false, - "TF_VERSION": "1.9", - "TF_PARALLELISM": "250", - "key_management": "key_protect", - "kms_instance_name": "__NULL__", - "kms_key_name": "__NULL__", - "enable_vpc_flow_logs": true, - "sccwp_enable": true, - "app_config_plan": "standardv2", - "sccwp_service_plan": "graduated-tier", - "cspm_enabled": "true", - "observability_atracker_enable": "true", - "observability_atracker_target_type": "cloudlogs", - "observability_monitoring_enable": "true", - "observability_logs_enable_for_management": "true", - "observability_logs_enable_for_compute": "true", - "observability_enable_platform_logs": "true", - "observability_enable_metrics_routing": "true", - "observability_logs_retention_period": "7", - "observability_monitoring_on_compute_nodes_enable": "true", - "observability_monitoring_plan": "graduated-tier", - "lsf_version": "fixpack_15", - "vpc_name": "__NULL__", - "compute_subnet_id": "__NULL__", - "login_subnet_id": "__NULL__", - "vpc_cidr": "10.241.0.0/18", - "vpc_cluster_private_subnets_cidr_blocks": "10.241.0.0/20", - "vpc_cluster_login_private_subnets_cidr_blocks": "10.241.16.0/28", - "dns_domain_name": "{compute = \"hpc.local\"}", - "dns_instance_id": "__NULL__", - "dns_custom_resolver_id": "__NULL__", - "bastion_instance": "{ image = \"ibm-ubuntu-22-04-5-minimal-amd64-3\", profile = \"cx2-4x8\" }", - "deployer_instance": "{ image = \"hpc-lsf-fp15-deployer-rhel810-v1\", profile = \"bx2-8x32\" }", - "login_instance": "[{ profile = \"bx2-2x8\", image = \"hpc-lsf-fp15-compute-rhel810-v1\" }]", - "management_instances": "[{ profile = \"bx2-16x64\", count = 2, image = \"hpc-lsf-fp15-rhel810-v1\" }]", - "static_compute_instances": "[{ profile = \"bx2-4x16\", count = 5, image = \"hpc-lsf-fp15-compute-rhel810-v1\" }]", - "dynamic_compute_instances": "[{ profile = \"bx2-4x16\", count = 500, image = \"hpc-lsf-fp15-compute-rhel810-v1\" }]", - "custom_file_shares": "[{mount_path = \"/mnt/vpcstorage/tools\", size = 100, iops = 2000 }, { mount_path = \"/mnt/vpcstorage/data\", size = 100, iops = 6000 }, { mount_path = \"/mnt/scale/tools\", nfs_share = \"\" }]", - "storage_security_group_id": "__NULL__", - "enable_ldap": "false", - "ldap_basedns": "hpc.local", - "ldap_server": "__NULL__", - "ldap_server_cert": "__NULL__", - "ldap_admin_password": "", - "ldap_user_name": "", - "ldap_user_password": "", - "ldap_instance": "[{ profile = \"cx2-2x4\", image = \"ibm-ubuntu-22-04-5-minimal-amd64-1\" }]", - "skip_iam_share_authorization_policy": "false", - "skip_flowlogs_s2s_auth_policy": "false", - "skip_kms_s2s_auth_policy": "false", - "skip_iam_block_storage_authorization_policy": "false", - "existing_bastion_instance_name": "__NULL__", - "existing_bastion_instance_public_ip": "__NULL__", - "existing_bastion_security_group_id": "__NULL__", - "existing_bastion_ssh_private_key": "__NULL__", - "enable_dedicated_host": "false", - "enable_cos_integration": "true", - "cos_instance_name": "__NULL__" -} diff --git a/tools/minimal-demo-prod-scripts/cp.sh b/tools/minimal-demo-prod-scripts/cp.sh deleted file mode 100755 index e71b6f08..00000000 --- a/tools/minimal-demo-prod-scripts/cp.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env bash - -source .env - -REGION=$(echo "$ZONES" | cut -d'-' -f1-2) - -if [ $# -eq 0 ] - then - echo "Pls provide cluster_prefix, i.e. cp.sh " - exit 1 -fi - -CURRENT_ACCOUNT_GUID=$(ibmcloud target --output json | jq -r '.account.guid') -if [ "$CURRENT_ACCOUNT_GUID" != "$ACCOUNT_GUID" ] - then - ibmcloud login -a cloud.ibm.com --apikey "$API_KEY" -r "$REGION" -g "$RESOURCE_GROUP" -fi - -echo "target account $CURRENT_ACCOUNT_GUID" - -CURRENT_REGION=$(ibmcloud target --output json | jq -r '.region.name') -if [ "$CURRENT_REGION" != "$REGION" ] - then - ibmcloud target -r "$REGION" -fi -echo "target region $REGION" - -# ibmcloud is ip $1-bastion-001-fip --output json | jq -r '. | .address' -BASTION_IP=$(ibmcloud is ips | grep "$1"-bastion | grep 001-fip | awk '{print $2}') -echo "Bastion IP: $BASTION_IP" - -# ibmcloud is instance $1-login-001 --output json | jq -r '. | .network_interfaces[0].primary_ip.address' -LOGIN_IP=$(ibmcloud is instances | grep "$1"-login | grep 001 | awk '{print $4}') -echo "Login IP: $LOGIN_IP" - -# ibmcloud is instance $1-mgmt-1-001 --output json | jq -r '. | .network_interfaces[0].primary_ip.address' -LSF_IP=$(ibmcloud is instances | grep "$1"-mgmt-1 | grep 001 | awk '{print $4}') -echo "LSF IP: $LSF_IP" - -if [ "$3" == "mgmt" ] - then - echo "copying $2 to LSF /mnt/vpcstorage/tools..." - scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o IdentitiesOnly=yes -J ubuntu@"$BASTION_IP" "$2" "lsfadmin@$LSF_IP:/mnt/vpcstorage/tools/$(basename "$2")" -else - echo "copying $2 to LOGIN /home/lsfadmin..." - scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o IdentitiesOnly=yes -J ubuntu@"$BASTION_IP" "$2" "lsfadmin@$LOGIN_IP:/home/lsfadmin/$(basename "$2")" -fi diff --git a/tools/minimal-demo-prod-scripts/create_lsf_environment.sh b/tools/minimal-demo-prod-scripts/create_lsf_environment.sh deleted file mode 100755 index 6b7af2c4..00000000 --- a/tools/minimal-demo-prod-scripts/create_lsf_environment.sh +++ /dev/null @@ -1,97 +0,0 @@ -#!/usr/bin/env bash - -# Get installed plugins once -INSTALLED_PLUGINS=$(ibmcloud plugin list | awk 'NR>3 {print $1}') - -ensure_plugin() { - local plugin="$1" - if echo "$INSTALLED_PLUGINS" | grep -qw "$plugin"; then - echo "IBM Cloud $plugin plugin already installed." - else - echo "Installing IBM Cloud $plugin plugin..." - ibmcloud plugin install "$plugin" -f - fi -} - -# Ensure required plugins -ensure_plugin "catalogs-management" -ensure_plugin "schematics" -ensure_plugin "vpc-infrastructure" - -source .env - -REGION=$(echo "$ZONES" | cut -d'-' -f1-2) - -if [ $# -eq 0 ]; then - echo "Pls provide cluster_prefix, i.e. create_lsf_environment.sh " - exit 1 -fi - -REMOTE_IP=$(curl -s https://ipv4.icanhazip.com/) - -sed s/XX_PREFIX_XX/"$1"/g "$TEMPLATE_FILE" | \ -sed s/XX_API_KEY_XX/"$API_KEY"/g | \ -sed s/XX_RESOURCE_GROUP_XX/"$RESOURCE_GROUP"/g | \ -sed s/XX_SSH_KEY_XX/"$SSH_KEY"/g | \ -sed s/XX_ZONES_XX/"$ZONES"/g | \ -sed s/XX_REMOTE_IP_XX/"$REMOTE_IP"/g | \ -sed s/XX_APP_CENTER_GUI_PASSWORD_XX/"$APP_CENTER_GUI_PASSWORD"/g > environment_values_"$1".json - -ibmcloud login -a cloud.ibm.com --apikey "$API_KEY" -r "$REGION" -g "$RESOURCE_GROUP" -ibmcloud target -r "$REGION" - -# Run install and capture output -INSTALL_LOG=$(mktemp) -if ! ibmcloud catalog install --timeout 3600 \ - --vl "$LSF_TILE_VERSION" \ - --override-values environment_values_"$1".json \ - --workspace-region "$REGION" \ - --workspace-rg-id "$RESOURCE_GROUP" \ - --workspace-name "$1" \ - --workspace-tf-version 1.9 | tee "$INSTALL_LOG"; then - echo "Install command failed to start" - exit 1 -fi - -# Extract WORKSPACE_ID -WORKSPACE_ID=$(ibmcloud schematics workspace list --output json \ - | jq -r '.workspaces[] | select(.name=="'"$1"'") | .id') - -# Check if FAILED appears in install output -if grep -q "FAILED" "$INSTALL_LOG"; then - echo "❌ Installation failed for workspace: $WORKSPACE_ID" - echo "Last 100 lines of logs for quick reference:" - ibmcloud schematics logs --id "$WORKSPACE_ID" | tail -n 100 - echo "For full logs, check schematics workspace at: https://cloud.ibm.com/schematics/workspaces/$WORKSPACE_ID/jobs?region=$REGION" - exit 1 -fi - -# If success, print the Output IPs -BASTION_IP=$(ibmcloud is ips | grep "$1"-bastion | grep 001-fip | awk '{print $2}') -echo "Bastion IP: $BASTION_IP" - -LOGIN_IP=$(ibmcloud is instances | grep "$1"-login | grep 001 | awk '{print $4}') -echo "Login IP: $LOGIN_IP" - -LSF_IP=$(ibmcloud is instances | grep "$1"-mgmt-1 | grep 001 | awk '{print $4}') -echo "LSF IP: $LSF_IP" - -echo "" -echo "✅ LSF environment setup completed!" -echo "" -echo "Next Steps:" -echo "----------------------------------------" -echo "Copy the job submission script to the cluster:" -echo " ./cp.sh $1 submit.sh" -echo "" -echo "To get the details of workspace Environment:" -echo " ./show.sh $1" -echo "" -echo "Jump to the LSF Environment:" -echo " ./jump.sh $1" -echo "" -echo "Submit jobs:" -echo " ./submit.sh" -echo " bjobs ..." -echo " bhosts ..." -echo "----------------------------------------" diff --git a/tools/minimal-demo-prod-scripts/destroy.sh b/tools/minimal-demo-prod-scripts/destroy.sh deleted file mode 100755 index 9d69d01e..00000000 --- a/tools/minimal-demo-prod-scripts/destroy.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env bash - -source .env - -REGION=$(echo "$ZONES" | cut -d'-' -f1-2) - -if [ $# -eq 0 ] - then - echo "Pls provide cluster_prefix, i.e. destroy.sh " - exit 1 -fi - -CURRENT_ACCOUNT_GUID=$(ibmcloud target --output json | jq -r '.account.guid') -if [ "$CURRENT_ACCOUNT_GUID" != "$ACCOUNT_GUID" ] - then - ibmcloud login -a cloud.ibm.com --apikey "$API_KEY" -r "$REGION" -g "$RESOURCE_GROUP" -fi - -echo "target account $CURRENT_ACCOUNT_GUID" - -CURRENT_REGION=$(ibmcloud target --output json | jq -r '.region.name') -if [ "$CURRENT_REGION" != "$REGION" ] - then - ibmcloud target -r "$REGION" -fi -echo "target region $REGION" - -echo "finding schematics workspace..." -# Extract WORKSPACE_ID -WORKSPACE_ID=$(ibmcloud schematics workspace list --output json \ - | jq -r '.workspaces[] | select(.name=="'"$1"'") | .id') - -ibmcloud schematics workspace get --id "$WORKSPACE_ID" - -read -r -p "Do you want to destroy? (yes/no) " yn - -case $yn in - yes ) echo ok, we will proceed;; - no ) echo exiting...; - exit;; - * ) echo invalid response; - exit 1;; -esac - -rm environment_values_"$1".json - -ibmcloud schematics destroy --id "$WORKSPACE_ID" -f diff --git a/tools/minimal-demo-prod-scripts/jump.sh b/tools/minimal-demo-prod-scripts/jump.sh deleted file mode 100755 index 335878fe..00000000 --- a/tools/minimal-demo-prod-scripts/jump.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env bash - -source .env - -REGION=$(echo "$ZONES" | cut -d'-' -f1-2) - -if [ $# -eq 0 ] - then - echo "Pls provide cluster_prefix, i.e. jump.sh " - exit 1 -fi - -CURRENT_ACCOUNT_GUID=$(ibmcloud target --output json | jq -r '.account.guid') -if [ "$CURRENT_ACCOUNT_GUID" != "$ACCOUNT_GUID" ] - then - ibmcloud login -a cloud.ibm.com --apikey "$API_KEY" -r "$REGION" -g "$RESOURCE_GROUP" -fi - -echo "target account $CURRENT_ACCOUNT_GUID" - -CURRENT_REGION=$(ibmcloud target --output json | jq -r '.region.name') -if [ "$CURRENT_REGION" != "$REGION" ] - then - ibmcloud target -r "$REGION" -fi -echo "target region $REGION" - -BASTION_IP=$(ibmcloud is ips | grep "$1"-bastion | grep 001-fip | awk '{print $2}') -echo "Bastion IP: $BASTION_IP" - -LOGIN_IP=$(ibmcloud is instances | grep "$1"-login | grep 001 | awk '{print $4}') -echo "Login IP: $LOGIN_IP" - -LSF_IP=$(ibmcloud is instances | grep "$1"-mgmt-1 | grep 001 | awk '{print $4}') -echo "LSF IP: $LSF_IP" - -echo "Jumping to Login Node..." -ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -J ubuntu@"$BASTION_IP" lsfadmin@"$LOGIN_IP" diff --git a/tools/minimal-demo-prod-scripts/show.sh b/tools/minimal-demo-prod-scripts/show.sh deleted file mode 100755 index 69c5e3b7..00000000 --- a/tools/minimal-demo-prod-scripts/show.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env bash - -source .env - -REGION=$(echo "$ZONES" | cut -d'-' -f1-2) - -if [ $# -eq 0 ] - then - echo "Pls provide cluster_prefix, i.e. show.sh " - exit 1 -fi - -CURRENT_ACCOUNT_GUID=$(ibmcloud target --output json | jq -r '.account.guid') -if [ "$CURRENT_ACCOUNT_GUID" != "$ACCOUNT_GUID" ] - then - ibmcloud login -a cloud.ibm.com --apikey "$API_KEY" -r "$REGION" -g "$RESOURCE_GROUP" -fi - -echo "target account $CURRENT_ACCOUNT_GUID" - -CURRENT_REGION=$(ibmcloud target --output json | jq -r '.region.name') -if [ "$CURRENT_REGION" != "$REGION" ] - then - ibmcloud target -r "$REGION" -fi -echo "target region $REGION" - -echo "finding schematics workspace..." -WORKSPACE_ID=$(ibmcloud schematics workspace list | grep "$1" | awk '{ print $2 }') - -ibmcloud schematics workspace get --id "$WORKSPACE_ID" diff --git a/tools/minimal-demo-prod-scripts/submit.sh b/tools/minimal-demo-prod-scripts/submit.sh deleted file mode 100755 index e79a5a70..00000000 --- a/tools/minimal-demo-prod-scripts/submit.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash - -# Sample Job Submission -# --------------------- -# This example submits a simple LSF job that: -# - Requests 8 cores (-n 8) -# - Runs the command: sleep 30 -# -# You can modify the command (e.g., sleep 30) or job options (-n 8) -# to suit your own workload requirements. - -CMD="bsub -n 8 sleep 30" -echo "Running: $CMD" -$CMD diff --git a/variables.tf b/variables.tf index 746569fc..6cef5666 100644 --- a/variables.tf +++ b/variables.tf @@ -20,7 +20,7 @@ variable "lsf_version" { variable "scheduler" { type = string default = null - description = "Select one of the scheduler (Scale/LSF/Symphony/Slurm/null)" + description = "Select one of the scheduler (LSF/Symphony/Slurm/null)" } variable "ibm_customer_number" { @@ -133,11 +133,7 @@ variable "vpc_cluster_login_private_subnets_cidr_blocks" { default = "10.241.16.0/28" description = "Provide the CIDR block required for the creation of the login cluster's private subnet. Only one CIDR block is needed. If using a hybrid environment, modify the CIDR block to avoid conflicts with any on-premises CIDR blocks. Since the login subnet is used only for the creation of login virtual server instances, provide a CIDR range of /28." validation { - condition = can( - regex( - "^((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])/(2[8-9]|3[0-2])$", trimspace(var.vpc_cluster_login_private_subnets_cidr_blocks) - ) - ) + condition = tonumber(regex("^.*?/(\\d+)$", var.vpc_cluster_login_private_subnets_cidr_blocks)[0]) <= 28 error_message = "This subnet is used to create only a login virtual server instance. Providing a larger CIDR size will waste the usage of available IPs. A CIDR range of /28 is sufficient for the creation of the login subnet." } } @@ -166,8 +162,8 @@ variable "deployer_instance" { ############################################################################## # Compute Variables ############################################################################## -variable "client_subnet_id" { - type = string +variable "client_subnets" { + type = list(string) default = null description = "Name of an existing subnets in which the cluster resources will be deployed. If no value is given, then new subnet(s) will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)" } @@ -189,12 +185,12 @@ variable "client_instances" { default = [{ profile = "cx2-2x4" count = 0 - image = "ibm-redhat-8-10-minimal-amd64-6" + image = "ibm-redhat-8-10-minimal-amd64-4" }] description = "Number of instances to be launched for client." } -variable "compute_subnet_id" { +variable "cluster_subnet_id" { type = string default = null description = "Name of an existing subnets in which the cluster resources will be deployed. If no value is given, then new subnet(s) will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)" @@ -225,17 +221,15 @@ variable "management_instances" { variable "static_compute_instances" { type = list( object({ - profile = string - count = number - image = string - filesystem = optional(string) + profile = string + count = number + image = string }) ) default = [{ - profile = "cx2-2x4" - count = 0 - image = "ibm-redhat-8-10-minimal-amd64-4" - filesystem = "/ibm/fs1" + profile = "cx2-2x4" + count = 0 + image = "ibm-redhat-8-10-minimal-amd64-4" }] description = "Min Number of instances to be launched for compute cluster." } @@ -258,14 +252,14 @@ variable "dynamic_compute_instances" { variable "compute_gui_username" { type = string - default = "" + default = "admin" sensitive = true description = "GUI user to perform system management and monitoring tasks on compute cluster." } variable "compute_gui_password" { type = string - default = "" + default = "hpc@IBMCloud" sensitive = true description = "Password for compute cluster GUI" } @@ -273,8 +267,8 @@ variable "compute_gui_password" { ############################################################################## # Storage Variables ############################################################################## -variable "storage_subnet_id" { - type = string +variable "storage_subnets" { + type = list(string) default = null description = "Name of an existing subnets in which the cluster resources will be deployed. If no value is given, then new subnet(s) will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)" } @@ -291,11 +285,11 @@ variable "storage_instances" { profile = string count = number image = string - filesystem = optional(string) + filesystem = string }) ) default = [{ - profile = "bx2d-32x128" + profile = "bx2d-2x8" count = 0 image = "ibm-redhat-8-10-minimal-amd64-4" filesystem = "/ibm/fs1" @@ -309,7 +303,7 @@ variable "storage_servers" { profile = string count = number image = string - filesystem = optional(string) + filesystem = string }) ) default = [{ @@ -321,20 +315,8 @@ variable "storage_servers" { description = "Number of BareMetal Servers to be launched for storage cluster." } -variable "tie_breaker_bm_server_profile" { - type = string - default = null - description = "Specify the bare metal server profile type name to be used for creating the bare metal Tie breaker node. If no value is provided, the storage bare metal server profile will be used as the default. For more information, see [bare metal server profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-bare-metal-servers-profile&interface=ui). [Tie Breaker Node](https://www.ibm.com/docs/en/storage-scale/5.2.2?topic=quorum-node-tiebreaker-disks)" -} - -variable "scale_management_vsi_profile" { - type = string - default = "bx2-8x32" - description = "The virtual server instance profile type name to be used to create the Management node. For more information, see [Instance Profiles](https://cloud.ibm.com/docs/vpc?topic=vpc-profiles&interface=ui)." -} - -variable "protocol_subnet_id" { - type = string +variable "protocol_subnets" { + type = list(string) default = null description = "Name of an existing subnets in which the cluster resources will be deployed. If no value is given, then new subnet(s) will be provisioned for the cluster. [Learn more](https://cloud.ibm.com/docs/vpc)" } @@ -350,11 +332,13 @@ variable "protocol_instances" { object({ profile = string count = number + image = string }) ) default = [{ profile = "bx2-2x8" count = 0 + image = "ibm-redhat-8-10-minimal-amd64-4" }] description = "Number of instances to be launched for protocol hosts." } @@ -367,14 +351,14 @@ variable "colocate_protocol_instances" { variable "storage_gui_username" { type = string - default = "" + default = "admin" sensitive = true description = "GUI user to perform system management and monitoring tasks on storage cluster." } variable "storage_gui_password" { type = string - default = "" + default = "hpc@IBMCloud" sensitive = true description = "Password for storage cluster GUI" } @@ -496,12 +480,6 @@ variable "existing_kms_instance_guid" { description = "The existing KMS instance guid." } -variable "key_protect_instance_id" { - type = string - default = null - description = "An existing Key Protect instance used for filesystem encryption" -} - # variable "hpcs_instance_name" { # type = string # default = null @@ -565,33 +543,39 @@ variable "filesystem_config" { default_metadata_replica = number max_data_replica = number max_metadata_replica = number + mount_point = string }) ) default = null description = "File system configurations." } -variable "filesets_config" { - type = list( - object({ - client_mount_path = string - quota = number - }) - ) - default = null - description = "Fileset configurations." -} +# variable "filesets_config" { +# type = list( +# object({ +# fileset = string +# filesystem = string +# junction_path = string +# client_mount_path = string +# quota = number +# }) +# ) +# default = null +# description = "Fileset configurations." +# } variable "afm_instances" { type = list( object({ profile = string count = number + image = string }) ) default = [{ - profile = "bx2-32x128" + profile = "bx2-2x8" count = 0 + image = "ibm-redhat-8-10-minimal-amd64-4" }] description = "Number of instances to be launched for afm hosts." } @@ -619,32 +603,19 @@ variable "afm_cos_config" { bucket_storage_class = "smart" bucket_type = "region_location" }] - nullable = false + # default = [{ + # afm_fileset = "afm_fileset" + # mode = "iw" + # cos_instance = null + # bucket_name = null + # bucket_region = "us-south" + # cos_service_cred_key = "" + # bucket_storage_class = "smart" + # bucket_type = "region_location" + # }] description = "AFM configurations." } -variable "scale_afm_bucket_config_details" { - description = "Scale AFM COS Bucket and Configuration Details" - type = list(object({ - bucket = string - endpoint = string - fileset = string - filesystem = string - mode = string - })) - default = null -} - -variable "scale_afm_cos_hmac_key_params" { - description = "Scale AFM COS HMAC Key Details" - type = list(object({ - akey = string - bucket = string - skey = string - })) - default = null -} - ############################################################################## # LSF specific Variables ############################################################################## @@ -701,7 +672,7 @@ variable "app_center_gui_password" { variable "observability_atracker_enable" { type = bool - default = false + default = true description = "Activity Tracker Event Routing to configure how to route auditing events. While multiple Activity Tracker instances can be created, only one tracker is needed to capture all events. Creating additional trackers is unnecessary if an existing Activity Tracker is already integrated with a COS bucket. In such cases, set the value to false, as all events can be monitored and accessed through the existing Activity Tracker." } @@ -718,7 +689,7 @@ variable "observability_atracker_target_type" { variable "observability_monitoring_enable" { description = "Set false to disable IBM Cloud Monitoring integration. If enabled, infrastructure and LSF application metrics from Management Nodes will be ingested." type = bool - default = false + default = true } variable "observability_logs_enable_for_management" { @@ -813,16 +784,28 @@ variable "cloud_metrics_data_bucket" { description = "cloud metrics data bucket" } +# variable "scc_cos_bucket" { +# type = string +# default = null +# description = "scc cos bucket" +# } + +# variable "scc_cos_instance_crn" { +# type = string +# default = null +# description = "scc cos instance crn" +# } + ############################################################################# # VARIABLES TO BE CHECKED ############################################################################## -variable "sccwp_enable" { - type = bool - default = false - description = "Flag to enable SCC instance creation. If true, an instance of SCC (Security and Compliance Center) will be created." -} + + + + + ############################################################################# # LDAP variables @@ -841,7 +824,7 @@ variable "ldap_basedns" { variable "ldap_server" { type = string - default = "null" + default = "" description = "Provide the IP address for the existing LDAP server. If no address is given, a new LDAP server will be created." } @@ -872,11 +855,11 @@ variable "ldap_user_password" { description = "The LDAP user password should be 8 to 20 characters long, with a mix of at least three alphabetic characters, including one uppercase and one lowercase letter. It must also include two numerical digits and at least one special character from (~@_+:) are required.It is important to avoid including the username in the password for enhanced security.[This value is ignored for an existing LDAP server]." } -# variable "ldap_instance_key_pair" { -# type = list(string) -# default = null -# description = "Name of the SSH key configured in your IBM Cloud account that is used to establish a connection to the LDAP Server. Make sure that the SSH key is present in the same resource group and region where the LDAP Servers are provisioned. If you do not have an SSH key in your IBM Cloud account, create one by using the [SSH keys](https://cloud.ibm.com/docs/vpc?topic=vpc-ssh-keys) instructions." -# } +variable "ldap_instance_key_pair" { + type = list(string) + default = null + description = "Name of the SSH key configured in your IBM Cloud account that is used to establish a connection to the LDAP Server. Make sure that the SSH key is present in the same resource group and region where the LDAP Servers are provisioned. If you do not have an SSH key in your IBM Cloud account, create one by using the [SSH keys](https://cloud.ibm.com/docs/vpc?topic=vpc-ssh-keys) instructions." +} variable "ldap_instance" { type = list( @@ -907,6 +890,12 @@ variable "scale_encryption_type" { description = "To enable filesystem encryption, specify either 'key_protect' or 'gklm'. If neither is specified, the default value will be 'null' and encryption is disabled" } +variable "gklm_instance_key_pair" { + type = list(string) + default = null + description = "The key pair to use to launch the GKLM host." +} + variable "gklm_instances" { type = list( object({ @@ -918,11 +907,23 @@ variable "gklm_instances" { default = [{ profile = "bx2-2x8" count = 2 - image = "hpcc-scale-gklm4202-v2-5-2" + image = "ibm-redhat-8-10-minimal-amd64-4" }] - description = "Number of GKLM instances to be launched for scale cluster." + description = "Number of instances to be launched for client." } +# variable "scale_encryption_admin_default_password" { +# type = string +# default = null +# description = "The default administrator password used for resetting the admin password based on the user input. The password has to be updated which was configured during the GKLM installation." +# } + +# variable "scale_encryption_admin_username" { +# type = string +# default = null +# description = "The default Admin username for Security Key Lifecycle Manager(GKLM)." +# } + variable "scale_encryption_admin_password" { type = string default = null @@ -935,15 +936,9 @@ variable "scale_ansible_repo_clone_path" { description = "Path to clone github.com/IBM/ibm-spectrum-scale-install-infra." } -variable "scale_config_path" { - type = string - default = "/opt/IBM/ibm-spectrumscale-cloud-deploy" - description = "Path to clone github.com/IBM/ibm-spectrum-scale-install-infra." -} - variable "spectrumscale_rpms_path" { type = string - default = "/opt/IBM/gpfs_cloud_rpms" + default = "/opt/ibm/gpfs_cloud_rpms" description = "Path that contains IBM Spectrum Scale product cloud rpms." } @@ -1012,6 +1007,12 @@ variable "bastion_fip" { default = null description = "bastion fip" } + +variable "scale_compute_cluster_filesystem_mountpoint" { + type = string + default = "/gpfs/fs1" + description = "Compute cluster (accessingCluster) Filesystem mount point." +} ############################################################################## # Dedicatedhost Variables ############################################################################## @@ -1056,7 +1057,6 @@ variable "resource_group_ids" { default = null description = "Map describing resource groups to create or reference" } - ############################################################################## # Login Variables ############################################################################## @@ -1071,7 +1071,7 @@ variable "login_instance" { profile = "bx2-2x8" image = "hpcaas-lsf10-rhel810-compute-v8" }] - description = "Number of instances to be launched for login node." + description = "Specify the list of login node configurations, including instance profile, image name. By default, login node is created using Fix Pack 15. If deploying with Fix Pack 14, set lsf_version to fixpack_14 and use the corresponding image hpc-lsf-fp14-compute-rhel810-v1. The selected image must align with the specified lsf_version, any mismatch may lead to deployment failures." } ############################################################################## @@ -1113,7 +1113,7 @@ variable "sccwp_service_plan" { } } -variable "bms_boot_drive_encryption" { +variable "sccwp_enable" { type = bool default = true description = "Set this flag to true to create an instance of IBM Security and Compliance Center (SCC) Workload Protection. When enabled, it provides tools to discover and prioritize vulnerabilities, monitor for security threats, and enforce configuration, permission, and compliance policies across the full lifecycle of your workloads. To view the data on the dashboard, enable the cspm to create the app configuration and required trusted profile policies.[Learn more](https://cloud.ibm.com/docs/workload-protection?topic=workload-protection-about)." @@ -1127,74 +1127,14 @@ variable "cspm_enabled" { } variable "app_config_plan" { - description = "To enable the encryption for the boot drive of bare metal server. Select true or false" + description = "Specify the IBM service pricing plan for the app configuration. Allowed values are 'basic', 'lite', 'standardv2', 'enterprise'." type = string default = "basic" validation { - error_message = "Plan for App configuration can only be basic, standardv2, enterprise.." + error_message = "Plan for App configuration can only be basic, lite, standardv2, enterprise.." condition = contains( - ["basic", "standardv2", "enterprise"], + ["basic", "lite", "standardv2", "enterprise"], var.app_config_plan ) } } - -variable "client_security_group_name" { - type = string - default = null - description = "Provide the security group name to provision the client nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the client nodes to function properly." - validation { - condition = anytrue([var.vpc_name != null && var.client_security_group_name != null, var.client_security_group_name == null]) - error_message = "If the client_security_group_name are provided, the user should also provide the vpc_name." - } -} - -variable "compute_security_group_name" { - type = string - default = null - description = "Provide the security group name to provision the compute nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the compute nodes to function properly." - validation { - condition = anytrue([var.vpc_name != null && var.compute_security_group_name != null, var.compute_security_group_name == null]) - error_message = "If the compute_security_group_name are provided, the user should also provide the vpc_name." - } -} - -variable "storage_security_group_name" { - type = string - default = null - description = "Provide the security group name to provision the storage node. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the storage node to function properly." - validation { - condition = anytrue([var.vpc_name != null && var.storage_security_group_name != null, var.storage_security_group_name == null]) - error_message = "If the storage_security_group_name are provided, the user should also provide the vpc_name." - } -} - -variable "ldap_security_group_name" { - type = string - default = null - description = "Provide the security group name to provision the ldap nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the ldap nodes to function properly." - validation { - condition = anytrue([var.vpc_name != null && var.ldap_security_group_name != null, var.ldap_security_group_name == null]) - error_message = "If the ldap_security_group_name are provided, the user should also provide the vpc_name." - } -} - -variable "gklm_security_group_name" { - type = string - default = null - description = "Provide the security group name to provision the gklm nodes. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the gklm nodes to function properly." - validation { - condition = anytrue([var.vpc_name != null && var.gklm_security_group_name != null, var.gklm_security_group_name == null]) - error_message = "If the gklm_security_group_name are provided, the user should also provide the vpc_name." - } -} - -variable "login_security_group_name" { - type = string - default = null - description = "Provide the security group name to provision the bastion node. If set to null, the solution will automatically create the necessary security group and rules. If you choose to use an existing security group, ensure it has the appropriate rules configured for the bastion node to function properly." - validation { - condition = anytrue([var.vpc_name != null && var.login_security_group_name != null, var.login_security_group_name == null]) - error_message = "If the login_security_group_name are provided, the user should also provide the vpc_name." - } -} From 602f8d3047f3e6feb361a4ca2aec0c49e47b734d Mon Sep 17 00:00:00 2001 From: Nupur Goyal Date: Tue, 21 Oct 2025 16:09:41 +0530 Subject: [PATCH 2/2] fixing pre-commit --- .tekton/scripts/cos_data.py | 13 +- ibm_catalog.json | 1 + modules/common/scripts/prepare_client_inv.py | 39 ++-- modules/common/scripts/prepare_ldap_inv.py | 16 +- .../scripts/prepare_remote_mount_inv.py | 24 +-- .../common/scripts/prepare_scale_inv_ini.py | 168 +++++++----------- .../scripts/wait_for_ssh_availability.py | 12 +- modules/resource_provisioner/locals.tf | 2 +- 8 files changed, 119 insertions(+), 156 deletions(-) diff --git a/.tekton/scripts/cos_data.py b/.tekton/scripts/cos_data.py index 8bd063c0..2e2b5778 100644 --- a/.tekton/scripts/cos_data.py +++ b/.tekton/scripts/cos_data.py @@ -7,7 +7,6 @@ class DownloadFromCOS: - def upload_file(self, bucket_name, file_path, filename): print(f"-- working on file {filename}") try: @@ -16,10 +15,10 @@ def upload_file(self, bucket_name, file_path, filename): ) print(f"--- {filename} successfully uploaded in {file_path}!") except ClientError as be: - print("[CLIENT ERROR]: {0}\n".format(be)) + print(f"[CLIENT ERROR]: {be}\n") self.return_code += 1 except Exception as e: - print("[CLIENT ERROR] Unable to upload file to COS: {0}".format(e)) + print(f"[CLIENT ERROR] Unable to upload file to COS: {e}") self.return_code += 1 def upload_multiple_files(self, FILE_NAME_FULLPATH, bucket_name, file_path): @@ -35,10 +34,10 @@ def download_file(self, bucket_name, filename): ) print(f"--- {filename} successfully downloaded!") except ClientError as be: - print("[CLIENT ERROR]: {0}\n".format(be)) + print(f"[CLIENT ERROR]: {be}\n") self.return_code += 1 except Exception as e: - print("[CLIENT ERROR] Unable to download file from COS: {0}".format(e)) + print(f"[CLIENT ERROR] Unable to download file from COS: {e}") self.return_code += 1 def delete_file(self, bucket_name, filename): @@ -47,10 +46,10 @@ def delete_file(self, bucket_name, filename): self.client.delete_object(Bucket=bucket_name, Key=filename) print(f"--- {filename} successfully deleted!") except ClientError as be: - print("[CLIENT ERROR]: {0}\n".format(be)) + print(f"[CLIENT ERROR]: {be}\n") self.return_code += 1 except Exception as e: - print("[CLIENT ERROR] Unable to download file from COS: {0}".format(e)) + print(f"[CLIENT ERROR] Unable to download file from COS: {e}") self.return_code += 1 def main(self): diff --git a/ibm_catalog.json b/ibm_catalog.json index 8bd5af3e..adb3b716 100644 --- a/ibm_catalog.json +++ b/ibm_catalog.json @@ -48,6 +48,7 @@ "name": "Cluster-with-LSF", "install_type": "fullstack", "working_directory": "solutions/lsf", + "terraform_version": "1.10.5", "compliance": { "authority": "scc-v3", "profiles": [ diff --git a/modules/common/scripts/prepare_client_inv.py b/modules/common/scripts/prepare_client_inv.py index e7521a2f..8e1ef914 100755 --- a/modules/common/scripts/prepare_client_inv.py +++ b/modules/common/scripts/prepare_client_inv.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- """ Copyright IBM Corporation 2023 @@ -38,12 +37,11 @@ def read_json_file(json_path): tf_inv = json.load(json_handler) except json.decoder.JSONDecodeError: print( - "Provided terraform inventory file (%s) is not a valid json." - % json_path + f"Provided terraform inventory file ({json_path}) is not a valid json." ) sys.exit(1) except OSError: - print("Provided terraform inventory file (%s) does not exist." % json_path) + print(f"Provided terraform inventory file ({json_path}) does not exist.") sys.exit(1) return tf_inv @@ -57,7 +55,7 @@ def write_to_file(filepath, filecontent): def prepare_ansible_playbook_mount_fileset_client(hosts_config): """Write to playbook""" - content = """--- + content = f"""--- # Mounting mount filesets on client nodes - hosts: {hosts_config} collections: @@ -68,9 +66,7 @@ def prepare_ansible_playbook_mount_fileset_client(hosts_config): - nfs_client_prepare - nfs_client_configure - {{ role: auth_configure, when: enable_ldap }} -""".format( - hosts_config=hosts_config - ) +""" return content @@ -168,24 +164,27 @@ def initialize_node_details(client_cluster_instance_names, key_file): # Step-1: Read the inventory file STRG_TF = read_json_file(ARGUMENTS.client_tf_inv_path) if ARGUMENTS.verbose: - print("Parsed storage terraform output: %s" % json.dumps(STRG_TF, indent=4)) + print(f"Parsed storage terraform output: {json.dumps(STRG_TF, indent=4)}") # Step-2: Cleanup the Client Playbook file cleanup( - "%s/%s/%s_mount_cloud_playbook.yaml" - % (ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", "client") + "{}/{}/{}_mount_cloud_playbook.yaml".format( + ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", "client" + ) ) # Step-3: Cleanup the Clinet inventory file cleanup( - "%s/%s/%s_mount_inventory.ini" - % (ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", "client") + "{}/{}/{}_mount_inventory.ini".format( + ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", "client" + ) ) # Step-4: Create playbook playbook_content = prepare_ansible_playbook_mount_fileset_client("client_nodes") write_to_file( - "%s/%s/client_cloud_playbook.yaml" - % (ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra"), + "{}/{}/client_cloud_playbook.yaml".format( + ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra" + ), playbook_content, ) @@ -211,8 +210,9 @@ def initialize_node_details(client_cluster_instance_names, key_file): node_template = node_template + each_entry + "\n" with open( - "%s/%s/client_inventory.ini" - % (ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra"), + "{}/{}/client_inventory.ini".format( + ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra" + ), "w", ) as configfile: configfile.write("[client_nodes]" + "\n") @@ -228,8 +228,9 @@ def initialize_node_details(client_cluster_instance_names, key_file): ARGUMENTS.ldap_admin_password, ) with open( - "%s/%s/client_inventory.ini" - % (ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra"), + "{}/{}/client_inventory.ini".format( + ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra" + ), "w", ) as configfile: configfile.write("[client_nodes]" + "\n") diff --git a/modules/common/scripts/prepare_ldap_inv.py b/modules/common/scripts/prepare_ldap_inv.py index 6fdaae08..17d93aef 100644 --- a/modules/common/scripts/prepare_ldap_inv.py +++ b/modules/common/scripts/prepare_ldap_inv.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- """ Copyright IBM Corporation 2023 @@ -127,8 +126,9 @@ def initialize_node_details(ldap_instance_ips, key_file): if ARGUMENTS.ldap_basedns != "null": ldap_playbook_content = prepare_ansible_playbook_ldap_server("ldap_nodes") write_to_file( - "%s/%s/ldap_configure_playbook.yaml" - % (ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra"), + "{}/{}/ldap_configure_playbook.yaml".format( + ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra" + ), ldap_playbook_content, ) if ARGUMENTS.verbose: @@ -155,8 +155,9 @@ def initialize_node_details(ldap_instance_ips, key_file): node_template = node_template + each_entry + "\n" with open( - "%s/%s/ldap_inventory.ini" - % (ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra"), + "{}/{}/ldap_inventory.ini".format( + ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra" + ), "w", ) as configfile: configfile.write("[ldap_nodes]" + "\n") @@ -170,8 +171,9 @@ def initialize_node_details(ldap_instance_ips, key_file): ARGUMENTS.ldap_user_password, ) with open( - "%s/%s/ldap_inventory.ini" - % (ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra"), + "{}/{}/ldap_inventory.ini".format( + ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra" + ), "w", ) as configfile: configfile.write("[ldap_nodes]" + "\n") diff --git a/modules/common/scripts/prepare_remote_mount_inv.py b/modules/common/scripts/prepare_remote_mount_inv.py index 4d6028f5..24036d70 100755 --- a/modules/common/scripts/prepare_remote_mount_inv.py +++ b/modules/common/scripts/prepare_remote_mount_inv.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- """ Copyright IBM Corporation 2018 @@ -39,12 +38,11 @@ def read_json_file(json_path): tf_inv = json.load(json_handler) except json.decoder.JSONDecodeError: print( - "Provided terraform inventory file (%s) is not a valid json." - % json_path + f"Provided terraform inventory file ({json_path}) is not a valid json." ) sys.exit(1) except OSError: - print("Provided terraform inventory file (%s) does not exist." % json_path) + print(f"Provided terraform inventory file ({json_path}) does not exist.") sys.exit(1) return tf_inv @@ -191,18 +189,18 @@ def initialize_node_details(storage_gui_ip, user, key_file): # Step-1: Read the inventory file COMP_TF = read_json_file(ARGUMENTS.compute_tf_inv_path) if ARGUMENTS.verbose: - print("Parsed compute terraform output: %s" % json.dumps(COMP_TF, indent=4)) + print(f"Parsed compute terraform output: {json.dumps(COMP_TF, indent=4)}") STRG_TF = read_json_file(ARGUMENTS.storage_tf_inv_path) if ARGUMENTS.verbose: - print("Parsed storage terraform output: %s" % json.dumps(STRG_TF, indent=4)) + print(f"Parsed storage terraform output: {json.dumps(STRG_TF, indent=4)}") # Step-2: Read the GUI inventory file COMP_GUI = read_json_file(ARGUMENTS.compute_gui_inv_path) if ARGUMENTS.verbose: - print("Parsed compute terraform output: %s" % json.dumps(COMP_GUI, indent=4)) + print(f"Parsed compute terraform output: {json.dumps(COMP_GUI, indent=4)}") STRG_GUI = read_json_file(ARGUMENTS.storage_gui_inv_path) if ARGUMENTS.verbose: - print("Parsed storage terraform output: %s" % json.dumps(STRG_GUI, indent=4)) + print(f"Parsed storage terraform output: {json.dumps(STRG_GUI, indent=4)}") # Step-3: Create playbook remote_mount = {} @@ -222,8 +220,9 @@ def initialize_node_details(storage_gui_ip, user, key_file): playbook_content = prepare_remote_mount_playbook("scale_nodes", remote_mount) write_to_file( - "%s/%s/remote_mount_cloud_playbook.yaml" - % (ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra"), + "{}/{}/remote_mount_cloud_playbook.yaml".format( + ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra" + ), playbook_content, ) @@ -250,8 +249,9 @@ def initialize_node_details(storage_gui_ip, user, key_file): node_template = node_template + each_entry + "\n" with open( - "%s/%s/remote_mount_inventory.ini" - % (ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra"), + "{}/{}/remote_mount_inventory.ini".format( + ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra" + ), "w", ) as configfile: configfile.write("[scale_nodes]" + "\n") diff --git a/modules/common/scripts/prepare_scale_inv_ini.py b/modules/common/scripts/prepare_scale_inv_ini.py index 1057b62e..752d5489 100755 --- a/modules/common/scripts/prepare_scale_inv_ini.py +++ b/modules/common/scripts/prepare_scale_inv_ini.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- """ Copyright IBM Corporation 2018 @@ -43,7 +42,7 @@ def calculate_pagepool(nodeclass, memory): else: pagepool_gb = min(int((memory * 0.25) // 1), 32) - return "{}G".format(pagepool_gb) + return f"{pagepool_gb}G" def calculate_maxStatCache(nodeclass, memory): @@ -134,12 +133,12 @@ def read_json_file(json_path): tf_inv = json.load(json_handler) except json.decoder.JSONDecodeError: print( - "Provided terraform inventory file (%s) is not a valid " - "json." % json_path + f"Provided terraform inventory file ({json_path}) is not a valid " + "json." ) sys.exit(1) except OSError: - print("Provided terraform inventory file (%s) does not exist." % json_path) + print(f"Provided terraform inventory file ({json_path}) does not exist.") sys.exit(1) return tf_inv @@ -159,7 +158,7 @@ def write_to_file(filepath, filecontent): def prepare_ansible_playbook(hosts_config, cluster_config, cluster_key_file): """Write to playbook""" - content = """--- + content = f"""--- # Ensure provisioned VMs are up and Passwordless SSH setup # has been compleated and operational - name: Check passwordless SSH connection is setup @@ -244,17 +243,13 @@ def prepare_ansible_playbook(hosts_config, cluster_config, cluster_key_file): - {{ role: kp_encryption_prepare, when: "enable_key_protect and scale_cluster_type == 'storage'" }} - {{ role: kp_encryption_configure, when: enable_key_protect }} - {{ role: kp_encryption_apply, when: "enable_key_protect and scale_cluster_type == 'storage'" }} -""".format( - hosts_config=hosts_config, - cluster_config=cluster_config, - cluster_key_file=cluster_key_file, - ) +""" return content def prepare_packer_ansible_playbook(hosts_config, cluster_config): """Write to playbook""" - content = """--- + content = f"""--- # Install and config Spectrum Scale on nodes - hosts: {hosts_config} collections: @@ -268,15 +263,13 @@ def prepare_packer_ansible_playbook(hosts_config, cluster_config): - gui_verify - perfmon_configure - perfmon_verify -""".format( - hosts_config=hosts_config, cluster_config=cluster_config - ) +""" return content def prepare_nogui_ansible_playbook(hosts_config, cluster_config): """Write to playbook""" - content = """--- + content = f"""--- # Install and config Spectrum Scale on nodes - hosts: {hosts_config} collections: @@ -288,15 +281,13 @@ def prepare_nogui_ansible_playbook(hosts_config, cluster_config): - core_prepare - core_install - core_configure -""".format( - hosts_config=hosts_config, cluster_config=cluster_config - ) +""" return content def prepare_nogui_packer_ansible_playbook(hosts_config, cluster_config): """Write to playbook""" - content = """--- + content = f"""--- # Install and config Spectrum Scale on nodes - hosts: {hosts_config} collections: @@ -306,9 +297,7 @@ def prepare_nogui_packer_ansible_playbook(hosts_config, cluster_config): - include_vars: group_vars/{cluster_config} roles: - core_configure -""".format( - hosts_config=hosts_config, cluster_config=cluster_config - ) +""" return content @@ -497,8 +486,7 @@ def initialize_node_details( } write_json_file( {"compute_cluster_gui_ip_address": each_ip}, - "%s/%s" - % ( + "{}/{}".format( str(pathlib.PurePath(ARGUMENTS.tf_inv_path).parent), "compute_cluster_gui_details.json", ), @@ -598,8 +586,7 @@ def initialize_node_details( } write_json_file( {"storage_cluster_gui_ip_address": each_ip}, - "%s/%s" - % ( + "{}/{}".format( str(pathlib.PurePath(ARGUMENTS.tf_inv_path).parent), "storage_cluster_gui_details.json", ), @@ -1128,7 +1115,7 @@ def initialize_scale_ces_details( # Step-1: Read the inventory file TF = read_json_file(ARGUMENTS.tf_inv_path) if ARGUMENTS.verbose: - print("Parsed terraform output: %s" % json.dumps(TF, indent=4)) + print(f"Parsed terraform output: {json.dumps(TF, indent=4)}") # Step-2: Identify the cluster type if ( @@ -1137,37 +1124,33 @@ def initialize_scale_ces_details( ): cluster_type = "compute" cleanup( - "%s/%s/%s_inventory.ini" - % ( + "{}/{}/{}_inventory.ini".format( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, ) ) cleanup( - "%s/%s_cluster_gui_details.json" - % (str(pathlib.PurePath(ARGUMENTS.tf_inv_path).parent), cluster_type) + f"{str(pathlib.PurePath(ARGUMENTS.tf_inv_path).parent)}/{cluster_type}_cluster_gui_details.json" ) cleanup( - "/%s/%s/%s_cloud_playbook.yaml" - % ( + "/{}/{}/{}_cloud_playbook.yaml".format( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, ) ) cleanup( - "%s/%s/%s/%s" - % ( + "{}/{}/{}/{}".format( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", "group_vars", - "%s_cluster_config.yaml" % cluster_type, + f"{cluster_type}_cluster_config.yaml", ) ) gui_username = ARGUMENTS.gui_username gui_password = ARGUMENTS.gui_password - profile_path = "%s/computesncparams" % ARGUMENTS.install_infra_path + profile_path = f"{ARGUMENTS.install_infra_path}/computesncparams" replica_config = False computenodegrp = generate_nodeclass_config( "computenodegrp", @@ -1192,37 +1175,33 @@ def initialize_scale_ces_details( # single az storage cluster cluster_type = "storage" cleanup( - "%s/%s/%s_inventory.ini" - % ( + "{}/{}/{}_inventory.ini".format( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, ) ) cleanup( - "%s/%s_cluster_gui_details.json" - % (str(pathlib.PurePath(ARGUMENTS.tf_inv_path).parent), cluster_type) + f"{str(pathlib.PurePath(ARGUMENTS.tf_inv_path).parent)}/{cluster_type}_cluster_gui_details.json" ) cleanup( - "/%s/%s/%s_cloud_playbook.yaml" - % ( + "/{}/{}/{}_cloud_playbook.yaml".format( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, ) ) cleanup( - "%s/%s/%s/%s" - % ( + "{}/{}/{}/{}".format( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", "group_vars", - "%s_cluster_config.yaml" % cluster_type, + f"{cluster_type}_cluster_config.yaml", ) ) gui_username = ARGUMENTS.gui_username gui_password = ARGUMENTS.gui_password - profile_path = "%s/storagesncparams" % ARGUMENTS.install_infra_path + profile_path = f"{ARGUMENTS.install_infra_path}/storagesncparams" replica_config = bool(len(TF["vpc_availability_zones"]) > 1) managementnodegrp = generate_nodeclass_config( @@ -1287,37 +1266,33 @@ def initialize_scale_ces_details( # multi az storage cluster cluster_type = "storage" cleanup( - "%s/%s/%s_inventory.ini" - % ( + "{}/{}/{}_inventory.ini".format( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, ) ) cleanup( - "%s/%s_cluster_gui_details.json" - % (str(pathlib.PurePath(ARGUMENTS.tf_inv_path).parent), cluster_type) + f"{str(pathlib.PurePath(ARGUMENTS.tf_inv_path).parent)}/{cluster_type}_cluster_gui_details.json" ) cleanup( - "/%s/%s/%s_cloud_playbook.yaml" - % ( + "/{}/{}/{}_cloud_playbook.yaml".format( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, ) ) cleanup( - "%s/%s/%s/%s" - % ( + "{}/{}/{}/{}".format( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", "group_vars", - "%s_cluster_config.yaml" % cluster_type, + f"{cluster_type}_cluster_config.yaml", ) ) gui_username = ARGUMENTS.gui_username gui_password = ARGUMENTS.gui_password - profile_path = "%s/storagesncparams" % ARGUMENTS.install_infra_path + profile_path = f"{ARGUMENTS.install_infra_path}/storagesncparams" replica_config = bool(len(TF["vpc_availability_zones"]) > 1) managementnodegrp = generate_nodeclass_config( @@ -1376,33 +1351,30 @@ def initialize_scale_ces_details( else: cluster_type = "combined" cleanup( - "%s/%s/%s_inventory.ini" - % ( + "{}/{}/{}_inventory.ini".format( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, ) ) cleanup( - "/%s/%s/%s_cloud_playbook.yaml" - % ( + "/{}/{}/{}_cloud_playbook.yaml".format( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, ) ) cleanup( - "%s/%s/%s/%s" - % ( + "{}/{}/{}/{}".format( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", "group_vars", - "%s_cluster_config.yaml" % cluster_type, + f"{cluster_type}_cluster_config.yaml", ) ) gui_username = ARGUMENTS.gui_username gui_password = ARGUMENTS.gui_password - profile_path = "%s/scalesncparams" % ARGUMENTS.install_infra_path + profile_path = f"{ARGUMENTS.install_infra_path}/scalesncparams" replica_config = bool(len(TF["vpc_availability_zones"]) > 1) computenodegrp = generate_nodeclass_config( @@ -1480,7 +1452,7 @@ def initialize_scale_ces_details( nodeclassgrp.append(afmgatewaygrp) scale_config = initialize_scale_config_details(nodeclassgrp) - print("Identified cluster type: %s" % cluster_type) + print(f"Identified cluster type: {cluster_type}") # Step-3: Identify if tie breaker needs to be counted for storage if len(TF["vpc_availability_zones"]) > 1: @@ -1520,12 +1492,11 @@ def initialize_scale_ces_details( ): playbook_content = prepare_ansible_playbook( "scale_nodes", - "%s_cluster_config.yaml" % cluster_type, + f"{cluster_type}_cluster_config.yaml", ARGUMENTS.instance_private_key, ) write_to_file( - "/%s/%s/%s_cloud_playbook.yaml" - % ( + "/{}/{}/{}_cloud_playbook.yaml".format( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, @@ -1537,11 +1508,10 @@ def initialize_scale_ces_details( and ARGUMENTS.using_rest_initialization == "true" ): playbook_content = prepare_packer_ansible_playbook( - "scale_nodes", "%s_cluster_config.yaml" % cluster_type + "scale_nodes", f"{cluster_type}_cluster_config.yaml" ) write_to_file( - "/%s/%s/%s_cloud_playbook.yaml" - % ( + "/{}/{}/{}_cloud_playbook.yaml".format( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, @@ -1553,11 +1523,10 @@ def initialize_scale_ces_details( and ARGUMENTS.using_rest_initialization == "false" ): playbook_content = prepare_nogui_ansible_playbook( - "scale_nodes", "%s_cluster_config.yaml" % cluster_type + "scale_nodes", f"{cluster_type}_cluster_config.yaml" ) write_to_file( - "/%s/%s/%s_cloud_playbook.yaml" - % ( + "/{}/{}/{}_cloud_playbook.yaml".format( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, @@ -1569,11 +1538,10 @@ def initialize_scale_ces_details( and ARGUMENTS.using_rest_initialization == "false" ): playbook_content = prepare_nogui_packer_ansible_playbook( - "scale_nodes", "%s_cluster_config.yaml" % cluster_type + "scale_nodes", f"{cluster_type}_cluster_config.yaml" ) write_to_file( - "/%s/%s/%s_cloud_playbook.yaml" - % ( + "/{}/{}/{}_cloud_playbook.yaml".format( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, @@ -1590,16 +1558,18 @@ def initialize_scale_ces_details( ): encryption_playbook_content = prepare_ansible_playbook_encryption_gklm() write_to_file( - "%s/%s/encryption_gklm_playbook.yaml" - % (ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra"), + "{}/{}/encryption_gklm_playbook.yaml".format( + ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra" + ), encryption_playbook_content, ) encryption_playbook_content = prepare_ansible_playbook_encryption_cluster( "scale_nodes" ) write_to_file( - "%s/%s/encryption_cluster_playbook.yaml" - % (ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra"), + "{}/{}/encryption_cluster_playbook.yaml".format( + ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra" + ), encryption_playbook_content, ) if ARGUMENTS.verbose: @@ -1642,7 +1612,7 @@ def initialize_scale_ces_details( if TF["resource_prefix"]: cluster_name = TF["resource_prefix"] else: - cluster_name = "%s.%s" % ("spectrum-scale", cluster_type) + cluster_name = "{}.{}".format("spectrum-scale", cluster_type) config["all:vars"] = initialize_cluster_details( TF["scale_version"], @@ -1674,8 +1644,7 @@ def initialize_scale_ces_details( TF["afm_config_details"], ) with open( - "%s/%s/%s_inventory.ini" - % ( + "{}/{}/{}_inventory.ini".format( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, @@ -1688,16 +1657,14 @@ def initialize_scale_ces_details( if ARGUMENTS.verbose: config.read( - "%s/%s/%s_inventory.ini" - % ( + "{}/{}/{}_inventory.ini".format( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, ) ) print( - "Content of %s/%s/%s_inventory.ini" - % ( + "Content of {}/{}/{}_inventory.ini".format( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", cluster_type, @@ -1707,12 +1674,11 @@ def initialize_scale_ces_details( print(node_template) print("[all:vars]") for each_key in config["all:vars"]: - print("%s: %s" % (each_key, config.get("all:vars", each_key))) + print("{}: {}".format(each_key, config.get("all:vars", each_key))) # Step-6: Create group_vars directory create_directory( - "%s/%s/%s" - % ( + "{}/{}/{}".format( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", "group_vars", @@ -1720,20 +1686,18 @@ def initialize_scale_ces_details( ) # Step-7: Create group_vars with open( - "%s/%s/%s/%s" - % ( + "{}/{}/{}/{}".format( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", "group_vars", - "%s_cluster_config.yaml" % cluster_type, + f"{cluster_type}_cluster_config.yaml", ), "w", ) as groupvar: yaml.dump(scale_config, groupvar, default_flow_style=False) if ARGUMENTS.verbose: print( - "group_vars content:\n%s" - % yaml.dump(scale_config, default_flow_style=False) + f"group_vars content:\n{yaml.dump(scale_config, default_flow_style=False)}" ) if cluster_type in ["storage", "combined"]: @@ -1770,18 +1734,16 @@ def initialize_scale_ces_details( "scale_storage": scale_storage["scale_storage"], } with open( - "%s/%s/%s/%s" - % ( + "{}/{}/{}/{}".format( ARGUMENTS.install_infra_path, "ibm-spectrum-scale-install-infra", "group_vars", - "%s_cluster_config.yaml" % cluster_type, + f"{cluster_type}_cluster_config.yaml", ), "a", ) as groupvar: yaml.dump(scale_storage_cluster, groupvar, default_flow_style=False) if ARGUMENTS.verbose: print( - "group_vars content:\n%s" - % yaml.dump(scale_storage_cluster, default_flow_style=False) + f"group_vars content:\n{yaml.dump(scale_storage_cluster, default_flow_style=False)}" ) diff --git a/modules/common/scripts/wait_for_ssh_availability.py b/modules/common/scripts/wait_for_ssh_availability.py index ada2d58a..e02ca35c 100755 --- a/modules/common/scripts/wait_for_ssh_availability.py +++ b/modules/common/scripts/wait_for_ssh_availability.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- """ Copyright IBM Corporation 2018 @@ -31,12 +30,11 @@ def read_json_file(json_path): tf_inv = json.load(json_handler) except json.decoder.JSONDecodeError: print( - "Provided terraform inventory file (%s) is not a valid json." - % json_path + f"Provided terraform inventory file ({json_path}) is not a valid json." ) sys.exit(1) except OSError: - print("Provided terraform inventory file (%s) does not exist." % json_path) + print(f"Provided terraform inventory file ({json_path}) does not exist.") sys.exit(1) return tf_inv @@ -63,7 +61,7 @@ def aws_ec2_wait_running(instance_ids, region): Wait for EC2 instances to obtain running-ok state. :args: region(string), instance_ids(list) """ - print("Waiting for instance's (%s) to obtain running-ok state." % instance_ids) + print(f"Waiting for instance's ({instance_ids}) to obtain running-ok state.") command = [ "aws", "ec2", @@ -77,7 +75,7 @@ def aws_ec2_wait_running(instance_ids, region): if code: print("Instance's did not obtain running-ok state. Existing!") - print("%s: %s %s: %s" % ("stdout", out, "stderr", err)) + print("{}: {} {}: {}".format("stdout", out, "stderr", err)) sys.exit(1) @@ -99,7 +97,7 @@ def aws_ec2_wait_running(instance_ids, region): # Step-1: Read the inventory file TF = read_json_file(ARGUMENTS.tf_inv_path) if ARGUMENTS.verbose: - print("Parsed terraform output: %s" % json.dumps(TF, indent=4)) + print(f"Parsed terraform output: {json.dumps(TF, indent=4)}") # Step-2: Identify instance id's based cluster_type target_instance_ids = [] diff --git a/modules/resource_provisioner/locals.tf b/modules/resource_provisioner/locals.tf index c7a75cc5..74cd132a 100644 --- a/modules/resource_provisioner/locals.tf +++ b/modules/resource_provisioner/locals.tf @@ -4,7 +4,7 @@ locals { deployer_path = "/opt/ibm" remote_terraform_path = format("%s/terraform-ibm-hpc", local.deployer_path) da_hpc_repo_url = "github.com/terraform-ibm-modules/terraform-ibm-hpc.git" - da_hpc_repo_tag = "main" + da_hpc_repo_tag = "revert-to-8b6254e" remote_ansible_path = format("%s/ibm-spectrumscale-cloud-deploy", local.deployer_path) scale_cloud_infra_repo_url = "https://github.com/jayeshh123/ibm-spectrum-scale-install-infra" scale_cloud_infra_repo_name = "ibm-spectrum-scale-install-infra"