-
Couldn't load subscription status.
- Fork 10
Deployer Node Changes Specific to LSF Cluster Creation #199
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
ca0c984
eb794a1
e1c599c
ef5d250
70d06d8
300fbc1
35f677b
5495e6c
bc0de4f
b6f2140
38a31f7
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Large diffs are not rendered by default.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,5 +1,6 @@ | ||
| module "landing_zone" { | ||
| source = "./modules/landing_zone" | ||
| enable_landing_zone = var.enable_landing_zone | ||
| allowed_cidr = var.allowed_cidr | ||
| compute_subnets_cidr = var.compute_subnets_cidr | ||
| clusters = var.clusters | ||
|
|
@@ -55,16 +56,23 @@ module "deployer" { | |
| boot_volume_encryption_key = local.boot_volume_encryption_key | ||
| existing_kms_instance_guid = local.existing_kms_instance_guid | ||
| skip_iam_authorization_policy = var.skip_iam_authorization_policy | ||
| static_compute_instances = var.static_compute_instances | ||
| management_instances = var.management_instances | ||
| dns_domain_names = var.dns_domain_names | ||
|
|
||
| } | ||
|
|
||
| module "landing_zone_vsi" { | ||
| count = var.enable_deployer == false ? 1 : 0 | ||
| source = "./modules/landing_zone_vsi" | ||
| resource_group = var.resource_group | ||
| prefix = var.prefix | ||
| zones = var.zones | ||
| vpc_id = local.vpc_id | ||
| bastion_security_group_id = local.bastion_security_group_id | ||
| bastion_security_group_id = var.bastion_security_group_id | ||
| bastion_public_key_content = local.bastion_public_key_content | ||
| compute_public_key_content = var.compute_public_key_content | ||
| compute_private_key_content= var.compute_private_key_content | ||
| client_subnets = local.client_subnets | ||
| client_ssh_keys = local.client_ssh_keys | ||
| client_instances = var.client_instances | ||
|
|
@@ -82,22 +90,137 @@ module "landing_zone_vsi" { | |
| dns_domain_names = var.dns_domain_names | ||
| kms_encryption_enabled = local.kms_encryption_enabled | ||
| boot_volume_encryption_key = local.boot_volume_encryption_key | ||
| enable_bastion = var.enable_bastion | ||
| } | ||
|
|
||
|
|
||
| resource "local_sensitive_file" "prepare_tf_input" { | ||
| count = var.enable_deployer == true ? 1 : 0 | ||
| content = <<EOT | ||
| { | ||
| "ibmcloud_api_key": "${var.ibmcloud_api_key}", | ||
| "resource_group": "${var.resource_group}", | ||
| "prefix": "${var.prefix}", | ||
| "zones": ${local.zones}, | ||
| "enable_landing_zone": false, | ||
| "enable_deployer": false, | ||
| "enable_bastion": false, | ||
| "bastion_fip": "${local.bastion_fip}", | ||
| "compute_ssh_keys": ${local.list_compute_ssh_keys}, | ||
| "storage_ssh_keys": ${local.list_storage_ssh_keys}, | ||
| "storage_instances": ${local.list_storage_instances}, | ||
| "management_instances": ${local.list_management_instances}, | ||
| "protocol_instances": ${local.list_protocol_instances}, | ||
| "ibm_customer_number": "${var.ibm_customer_number}", | ||
| "static_compute_instances": ${local.list_compute_instances}, | ||
| "client_instances": ${local.list_client_instances}, | ||
| "enable_cos_integration": ${var.enable_cos_integration}, | ||
| "enable_atracker": ${var.enable_atracker}, | ||
| "enable_vpc_flow_logs": ${var.enable_vpc_flow_logs}, | ||
| "allowed_cidr": ${local.allowed_cidr}, | ||
| "vpc_id": "${local.vpc_id}", | ||
| "vpc": "${local.vpc}", | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @jayeshh123 This is something this needs to re-checked and remove this value |
||
| "storage_subnets": ${local.list_storage_subnets}, | ||
| "protocol_subnets": ${local.list_protocol_subnets}, | ||
| "compute_subnets": ${local.list_compute_subnets}, | ||
| "client_subnets": ${local.list_client_subnets}, | ||
| "bastion_subnets": ${local.list_bastion_subnets}, | ||
| "dns_domain_names": ${local.dns_domain_names}, | ||
| "compute_public_key_content": ${local.compute_public_key_content}, | ||
| "compute_private_key_content": ${local.compute_private_key_content}, | ||
| "bastion_security_group_id": "${local.bastion_security_group_id}", | ||
| "deployer_hostname": "${local.deployer_hostname}", | ||
| "deployer_ip": "${local.deployer_ip}" | ||
| } | ||
| EOT | ||
| filename = local.schematics_inputs_path | ||
| } | ||
|
|
||
| resource "null_resource" "tf_resource_provisioner" { | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @jayeshh123 You cannot have any individual resources being created from the main.tf file. Instead this should be called from a module directly |
||
| count = var.enable_deployer == true ? 1 : 0 | ||
| connection { | ||
| type = "ssh" | ||
| host = flatten(module.deployer.deployer_vsi_data[*].list)[0].ipv4_address | ||
| user = "vpcuser" | ||
| private_key = local.bastion_private_key_content | ||
| bastion_host = local.bastion_fip | ||
| bastion_user = "ubuntu" | ||
| bastion_private_key = local.bastion_private_key_content | ||
| timeout = "60m" | ||
| } | ||
|
|
||
| provisioner "file" { | ||
| source = local.schematics_inputs_path | ||
| destination = local.remote_inputs_path | ||
| } | ||
|
|
||
| provisioner "remote-exec" { | ||
| inline = [ | ||
| "if [ ! -d ${local.remote_terraform_path} ]; then sudo git clone -b ${local.da_hpc_repo_tag} ${local.da_hpc_repo_url} ${local.remote_terraform_path}; fi", | ||
| "sudo ln -fs /usr/local/bin/ansible-playbook /usr/bin/ansible-playbook", | ||
| "sudo cp ${local.remote_inputs_path} ${local.remote_terraform_path}", | ||
| "export TF_LOG=${var.TF_LOG} && sudo -E terraform -chdir=${local.remote_terraform_path} init && sudo -E terraform -chdir=${local.remote_terraform_path} apply -parallelism=${var.TF_PARALLELISM} -auto-approve" | ||
| ] | ||
| } | ||
|
|
||
| triggers = { | ||
| always_run = timestamp() | ||
| } | ||
|
|
||
| depends_on = [ | ||
| module.deployer, | ||
| local_sensitive_file.prepare_tf_input | ||
| ] | ||
| } | ||
|
|
||
| resource "null_resource" "cluster_destroyer" { | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @jayeshh123 You cannot have any individual resources being created from the main.tf file. Instead this should be called from a module directly |
||
| count = var.enable_deployer == true ? 1 : 0 | ||
| triggers = { | ||
| conn_host = flatten(module.deployer.deployer_vsi_data[*].list)[0].ipv4_address | ||
| conn_private_key = local.bastion_private_key_content | ||
| conn_bastion_host = local.bastion_fip | ||
| conn_bastion_private_key = local.bastion_private_key_content | ||
| conn_ibmcloud_api_key = var.ibmcloud_api_key | ||
| conn_remote_terraform_path = local.remote_terraform_path | ||
| conn_terraform_log_level = var.TF_LOG | ||
| } | ||
|
|
||
| connection { | ||
| type = "ssh" | ||
| host = self.triggers.conn_host | ||
| user = "vpcuser" | ||
| private_key = self.triggers.conn_private_key | ||
| bastion_host = self.triggers.conn_bastion_host | ||
| bastion_user = "ubuntu" | ||
| bastion_private_key = self.triggers.conn_bastion_private_key | ||
| timeout = "60m" | ||
| } | ||
|
|
||
| provisioner "remote-exec" { | ||
| when = destroy | ||
| on_failure = fail | ||
| inline = [ | ||
| "export TF_LOG=${self.triggers.conn_terraform_log_level} && sudo -E terraform -chdir=${self.triggers.conn_remote_terraform_path} destroy -auto-approve" | ||
| ] | ||
| } | ||
| } | ||
|
|
||
| module "file_storage" { | ||
| count = var.enable_deployer == false ? 1 : 0 | ||
| source = "./modules/file_storage" | ||
| zone = var.zones[0] # always the first zone | ||
| resource_group_id = local.resource_group_id | ||
| resource_group_id = local.resource_group_ids["service_rg"] | ||
| file_shares = local.file_shares | ||
| encryption_key_crn = local.boot_volume_encryption_key | ||
| security_group_ids = local.compute_security_group_id | ||
| subnet_id = local.compute_subnet_id | ||
| } | ||
|
|
||
| module "dns" { | ||
| count = var.enable_deployer == false ? 1 : 0 | ||
| source = "./modules/dns" | ||
| prefix = var.prefix | ||
| resource_group_id = local.resource_group_id | ||
| resource_group_id = local.resource_group_ids["service_rg"] | ||
| vpc_crn = local.vpc_crn | ||
| subnets_crn = local.subnets_crn | ||
| dns_instance_id = var.dns_instance_id | ||
|
|
@@ -106,74 +229,120 @@ module "dns" { | |
| } | ||
|
|
||
| module "compute_dns_records" { | ||
| count = var.enable_deployer == false ? 1 : 0 | ||
| source = "./modules/dns_record" | ||
| dns_instance_id = local.dns_instance_id | ||
| dns_zone_id = local.compute_dns_zone_id | ||
| dns_records = local.compute_dns_records | ||
| depends_on = [ module.dns ] | ||
| } | ||
|
|
||
| module "storage_dns_records" { | ||
| count = var.enable_deployer == false ? 1 : 0 | ||
| source = "./modules/dns_record" | ||
| dns_instance_id = local.dns_instance_id | ||
| dns_zone_id = local.storage_dns_zone_id | ||
| dns_records = local.storage_dns_records | ||
| depends_on = [ module.dns ] | ||
| } | ||
|
|
||
| module "protocol_dns_records" { | ||
| count = var.enable_deployer == false ? 1 : 0 | ||
| source = "./modules/dns_record" | ||
| dns_instance_id = local.dns_instance_id | ||
| dns_zone_id = local.protocol_dns_zone_id | ||
| dns_records = local.protocol_dns_records | ||
| depends_on = [ module.dns ] | ||
| } | ||
|
|
||
| resource "time_sleep" "wait_60_seconds" { | ||
| create_duration = "60s" | ||
| depends_on = [ module.storage_dns_records, module.protocol_dns_records, module.compute_dns_records ] | ||
| } | ||
|
|
||
| module "write_compute_cluster_inventory" { | ||
| count = var.enable_deployer == false ? 1 : 0 | ||
| source = "./modules/write_inventory" | ||
| json_inventory_path = local.json_inventory_path | ||
| lsf_masters = local.management_nodes | ||
| lsf_servers = local.compute_nodes | ||
| lsf_clients = local.client_nodes | ||
| gui_hosts = local.gui_hosts | ||
| db_hosts = local.db_hosts | ||
| my_cluster_name = var.prefix | ||
| ha_shared_dir = local.ha_shared_dir | ||
| nfs_install_dir = local.nfs_install_dir | ||
| Enable_Monitoring = local.Enable_Monitoring | ||
| lsf_deployer_hostname = local.lsf_deployer_hostname | ||
| depends_on = [ time_sleep.wait_60_seconds ] | ||
| } | ||
|
|
||
| module "write_storage_cluster_inventory" { | ||
| count = var.enable_deployer == false ? 1 : 0 | ||
| source = "./modules/write_inventory" | ||
| json_inventory_path = local.json_inventory_path | ||
| lsf_masters = local.management_nodes | ||
| lsf_servers = local.compute_nodes | ||
| lsf_clients = local.client_nodes | ||
| gui_hosts = local.gui_hosts | ||
| db_hosts = local.db_hosts | ||
| my_cluster_name = var.prefix | ||
| ha_shared_dir = local.ha_shared_dir | ||
| nfs_install_dir = local.nfs_install_dir | ||
| Enable_Monitoring = local.Enable_Monitoring | ||
| lsf_deployer_hostname = local.lsf_deployer_hostname | ||
| depends_on = [ time_sleep.wait_60_seconds ] | ||
| } | ||
|
|
||
| module "compute_inventory" { | ||
| count = var.enable_deployer == false ? 1 : 0 | ||
| source = "./modules/inventory" | ||
| hosts = local.compute_hosts | ||
| inventory_path = local.compute_inventory_path | ||
| name_mount_path_map = local.fileshare_name_mount_path_map | ||
| depends_on = [ time_sleep.wait_60_seconds ] | ||
| depends_on = [ module.write_compute_cluster_inventory ] | ||
| } | ||
|
|
||
| module "storage_inventory" { | ||
| count = var.enable_deployer == false ? 1 : 0 | ||
| source = "./modules/inventory" | ||
| hosts = local.storage_hosts | ||
| inventory_path = local.storage_inventory_path | ||
| name_mount_path_map = local.fileshare_name_mount_path_map | ||
| depends_on = [ time_sleep.wait_60_seconds ] | ||
| depends_on = [ module.write_storage_cluster_inventory ] | ||
| } | ||
|
|
||
| module "compute_playbook" { | ||
| count = var.enable_deployer == false ? 1 : 0 | ||
| source = "./modules/playbook" | ||
| bastion_fip = local.bastion_fip | ||
| private_key_path = local.compute_private_key_path | ||
| inventory_path = local.compute_inventory_path | ||
| playbook_path = local.compute_playbook_path | ||
| enable_bastion = var.enable_bastion | ||
| depends_on = [ module.compute_inventory ] | ||
| } | ||
|
|
||
| module "storage_playbook" { | ||
| source = "./modules/playbook" | ||
| bastion_fip = local.bastion_fip | ||
| private_key_path = local.storage_private_key_path | ||
| inventory_path = local.storage_inventory_path | ||
| playbook_path = local.storage_playbook_path | ||
| depends_on = [ module.storage_inventory ] | ||
| } | ||
| # module "storage_playbook" { | ||
| # count = var.enable_deployer == false ? 1 : 0 | ||
| # source = "./modules/playbook" | ||
| # bastion_fip = local.bastion_fip | ||
| # private_key_path = local.storage_private_key_path | ||
| # inventory_path = local.storage_inventory_path | ||
| # playbook_path = local.storage_playbook_path | ||
| # enable_bastion = var.enable_bastion | ||
| # depends_on = [ module.storage_inventory ] | ||
| # } | ||
|
|
||
| ################################################### | ||
| # Observability Modules | ||
| ################################################### | ||
|
|
||
| module "cloud_monitoring_instance_creation" { | ||
| source = "./modules/observability_instance" | ||
| enable_deployer = var.enable_deployer | ||
| location = local.region | ||
| rg = local.resource_group_id | ||
| rg = local.resource_group_ids["service_rg"] | ||
| cloud_monitoring_provision = var.observability_monitoring_enable | ||
| observability_monitoring_plan = var.observability_monitoring_plan | ||
| enable_metrics_routing = var.observability_enable_metrics_routing | ||
|
|
@@ -191,10 +360,10 @@ module "cloud_monitoring_instance_creation" { | |
|
|
||
| # Code for SCC Instance | ||
| module "scc_instance_and_profile" { | ||
| count = var.scc_enable ? 1 : 0 | ||
| count = var.enable_deployer == true && var.scc_enable ? 1 : 0 | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @jayeshh123 Lets talk to Nupur and PC about this and update the logic accordingly |
||
| source = "./modules/security/scc" | ||
| location = var.scc_location != "" ? var.scc_location : "us-south" | ||
| rg = local.resource_group_id | ||
| rg = local.resource_group_ids["service_rg"] | ||
| scc_profile = var.scc_enable ? var.scc_profile : "" | ||
| # scc_profile_version = var.scc_profile != "" && var.scc_profile != null ? var.scc_profile_version : "" | ||
| event_notification_plan = var.scc_event_notification_plan | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,39 @@ | ||
| --- | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @jayeshh123 As discussed if this is not used, please remove this file |
||
| - name: Run LSF Config Test | ||
| ansible.builtin.shell: > | ||
| ansible-playbook -f 32 -i lsf-inventory lsf-config-test.yml > config-test.log 2>&1 | ||
| args: | ||
| chdir: "{{ playbook_path }}" | ||
| register: lsf_config_test_result | ||
| changed_when: false | ||
| failed_when: lsf_config_test_result.rc != 0 | ||
| ignore_errors: no | ||
| delegate_to: "{{ lsf_deployer_hostname }}.comp.com" | ||
| async: 3600 | ||
| poll: 5 | ||
|
|
||
| - name: Run LSF Predeploy Test | ||
| ansible.builtin.shell: > | ||
| ansible-playbook -f 32 -i lsf-inventory lsf-predeploy-test.yml >> predeploy.log 2>&1 | ||
| args: | ||
| chdir: "{{ playbook_path }}" | ||
| register: lsf_predeploy_test_result | ||
| changed_when: false | ||
| failed_when: lsf_predeploy_test_result.rc != 0 | ||
| ignore_errors: no | ||
| delegate_to: "{{ lsf_deployer_hostname }}.comp.com" | ||
| async: 3600 | ||
| poll: 5 | ||
|
|
||
| - name: Run LSF Deploy | ||
| ansible.builtin.shell: > | ||
| ansible-playbook -f 32 -i lsf-inventory lsf-deploy.yml >> deploy.log 2>&1 | ||
| args: | ||
| chdir: "{{ playbook_path }}" | ||
| register: lsf_deploy_test_result | ||
| changed_when: false | ||
| failed_when: lsf_deploy_test_result.rc != 0 | ||
| ignore_errors: no | ||
| delegate_to: "{{ lsf_deployer_hostname }}.comp.com" | ||
| async: 3600 | ||
| poll: 5 | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@jayeshh123 When we support the existing bastion, we may need to tweak this logic.Just Fyi